• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import static android.media.AudioManager.AUDIO_SESSION_ID_GENERATE;
20 
21 import android.annotation.CallbackExecutor;
22 import android.annotation.FloatRange;
23 import android.annotation.IntDef;
24 import android.annotation.IntRange;
25 import android.annotation.NonNull;
26 import android.annotation.Nullable;
27 import android.annotation.RequiresPermission;
28 import android.annotation.SystemApi;
29 import android.annotation.TestApi;
30 import android.compat.annotation.UnsupportedAppUsage;
31 import android.content.AttributionSource;
32 import android.content.AttributionSource.ScopedParcelState;
33 import android.content.Context;
34 import android.media.audiopolicy.AudioMix;
35 import android.media.audiopolicy.AudioMixingRule;
36 import android.media.audiopolicy.AudioPolicy;
37 import android.media.metrics.LogSessionId;
38 import android.os.Binder;
39 import android.os.Build;
40 import android.os.Handler;
41 import android.os.HandlerThread;
42 import android.os.Looper;
43 import android.os.Message;
44 import android.os.Parcel;
45 import android.os.PersistableBundle;
46 import android.util.ArrayMap;
47 import android.util.Log;
48 
49 import com.android.internal.annotations.GuardedBy;
50 
51 import java.lang.annotation.Retention;
52 import java.lang.annotation.RetentionPolicy;
53 import java.lang.ref.WeakReference;
54 import java.nio.ByteBuffer;
55 import java.nio.ByteOrder;
56 import java.nio.NioUtils;
57 import java.util.LinkedList;
58 import java.util.Map;
59 import java.util.Objects;
60 import java.util.concurrent.Executor;
61 
62 /**
63  * The AudioTrack class manages and plays a single audio resource for Java applications.
64  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
65  * achieved by "pushing" the data to the AudioTrack object using one of the
66  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
67  *  and {@link #write(float[], int, int, int)} methods.
68  *
69  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
70  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
71  * one of the {@code write()} methods. These are blocking and return when the data has been
72  * transferred from the Java layer to the native layer and queued for playback. The streaming
73  * mode is most useful when playing blocks of audio data that for instance are:
74  *
75  * <ul>
76  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
77  *   <li>too big to fit in memory because of the characteristics of the audio data
78  *         (high sampling rate, bits per sample ...)</li>
79  *   <li>received or generated while previously queued audio is playing.</li>
80  * </ul>
81  *
82  * The static mode should be chosen when dealing with short sounds that fit in memory and
83  * that need to be played with the smallest latency possible. The static mode will
84  * therefore be preferred for UI and game sounds that are played often, and with the
85  * smallest overhead possible.
86  *
87  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
88  * The size of this buffer, specified during the construction, determines how long an AudioTrack
89  * can play before running out of data.<br>
90  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
91  * be played from it.<br>
92  * For the streaming mode, data will be written to the audio sink in chunks of
93  * sizes less than or equal to the total buffer size.
94  *
95  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
96  */
97 public class AudioTrack extends PlayerBase
98                         implements AudioRouting
99                                  , VolumeAutomation
100 {
101     //---------------------------------------------------------
102     // Constants
103     //--------------------
104     /** Minimum value for a linear gain or auxiliary effect level.
105      *  This value must be exactly equal to 0.0f; do not change it.
106      */
107     private static final float GAIN_MIN = 0.0f;
108     /** Maximum value for a linear gain or auxiliary effect level.
109      *  This value must be greater than or equal to 1.0f.
110      */
111     private static final float GAIN_MAX = 1.0f;
112 
113     /** indicates AudioTrack state is stopped */
114     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
115     /** indicates AudioTrack state is paused */
116     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
117     /** indicates AudioTrack state is playing */
118     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
119     /**
120       * @hide
121       * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to
122       * transition to PLAYSTATE_STOPPED.
123       * Only valid for offload mode.
124       */
125     private static final int PLAYSTATE_STOPPING = 4;
126     /**
127       * @hide
128       * indicates AudioTrack state is paused from stopping state. Will transition to
129       * PLAYSTATE_STOPPING if play() is called.
130       * Only valid for offload mode.
131       */
132     private static final int PLAYSTATE_PAUSED_STOPPING = 5;
133 
134     // keep these values in sync with android_media_AudioTrack.cpp
135     /**
136      * Creation mode where audio data is transferred from Java to the native layer
137      * only once before the audio starts playing.
138      */
139     public static final int MODE_STATIC = 0;
140     /**
141      * Creation mode where audio data is streamed from Java to the native layer
142      * as the audio is playing.
143      */
144     public static final int MODE_STREAM = 1;
145 
146     /** @hide */
147     @IntDef({
148         MODE_STATIC,
149         MODE_STREAM
150     })
151     @Retention(RetentionPolicy.SOURCE)
152     public @interface TransferMode {}
153 
154     /**
155      * State of an AudioTrack that was not successfully initialized upon creation.
156      */
157     public static final int STATE_UNINITIALIZED = 0;
158     /**
159      * State of an AudioTrack that is ready to be used.
160      */
161     public static final int STATE_INITIALIZED   = 1;
162     /**
163      * State of a successfully initialized AudioTrack that uses static data,
164      * but that hasn't received that data yet.
165      */
166     public static final int STATE_NO_STATIC_DATA = 2;
167 
168     /**
169      * Denotes a successful operation.
170      */
171     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
172     /**
173      * Denotes a generic operation failure.
174      */
175     public  static final int ERROR                                 = AudioSystem.ERROR;
176     /**
177      * Denotes a failure due to the use of an invalid value.
178      */
179     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
180     /**
181      * Denotes a failure due to the improper use of a method.
182      */
183     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
184     /**
185      * An error code indicating that the object reporting it is no longer valid and needs to
186      * be recreated.
187      */
188     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
189     /**
190      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
191      * or immediately after start/ACTIVE.
192      * @hide
193      */
194     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
195 
196     // Error codes:
197     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
198     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
199     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
200     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
201     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
202     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
203 
204     // Events:
205     // to keep in sync with frameworks/av/include/media/AudioTrack.h
206     // Note: To avoid collisions with other event constants,
207     // do not define an event here that is the same value as
208     // AudioSystem.NATIVE_EVENT_ROUTING_CHANGE.
209 
210     /**
211      * Event id denotes when playback head has reached a previously set marker.
212      */
213     private static final int NATIVE_EVENT_MARKER  = 3;
214     /**
215      * Event id denotes when previously set update period has elapsed during playback.
216      */
217     private static final int NATIVE_EVENT_NEW_POS = 4;
218     /**
219      * Callback for more data
220      */
221     private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9;
222     /**
223      * IAudioTrack tear down for offloaded tracks
224      * TODO: when received, java AudioTrack must be released
225      */
226     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
227     /**
228      * Event id denotes when all the buffers queued in AF and HW are played
229      * back (after stop is called) for an offloaded track.
230      */
231     private static final int NATIVE_EVENT_STREAM_END = 7;
232     /**
233      * Event id denotes when the codec format changes.
234      *
235      * Note: Similar to a device routing change (AudioSystem.NATIVE_EVENT_ROUTING_CHANGE),
236      * this event comes from the AudioFlinger Thread / Output Stream management
237      * (not from buffer indications as above).
238      */
239     private static final int NATIVE_EVENT_CODEC_FORMAT_CHANGE = 100;
240 
241     private final static String TAG = "android.media.AudioTrack";
242 
243     /** @hide */
244     @IntDef({
245         ENCAPSULATION_MODE_NONE,
246         ENCAPSULATION_MODE_ELEMENTARY_STREAM,
247         // ENCAPSULATION_MODE_HANDLE, @SystemApi
248     })
249     @Retention(RetentionPolicy.SOURCE)
250     public @interface EncapsulationMode {}
251 
252     // Important: The ENCAPSULATION_MODE values must be kept in sync with native header files.
253     /**
254      * This mode indicates no metadata encapsulation,
255      * which is the default mode for sending audio data
256      * through {@code AudioTrack}.
257      */
258     public static final int ENCAPSULATION_MODE_NONE = 0;
259     /**
260      * This mode indicates metadata encapsulation with an elementary stream payload.
261      * Both compressed and PCM format is allowed.
262      */
263     public static final int ENCAPSULATION_MODE_ELEMENTARY_STREAM = 1;
264     /**
265      * This mode indicates metadata encapsulation with a handle payload
266      * and is set through {@link Builder#setEncapsulationMode(int)}.
267      * The handle is a 64 bit long, provided by the Tuner API
268      * in {@link android.os.Build.VERSION_CODES#R}.
269      * @hide
270      */
271     @SystemApi
272     @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
273     public static final int ENCAPSULATION_MODE_HANDLE = 2;
274 
275     /**
276      * Enumeration of metadata types permitted for use by
277      * encapsulation mode audio streams.
278      * @hide
279      */
280     @IntDef(prefix = {"ENCAPSULATION_METADATA_TYPE_"},
281             value =
282                     {
283                             ENCAPSULATION_METADATA_TYPE_NONE, /* reserved */
284                             ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER,
285                             ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR,
286                             ENCAPSULATION_METADATA_TYPE_SUPPLEMENTARY_AUDIO_PLACEMENT,
287                     })
288     @Retention(RetentionPolicy.SOURCE)
289     public @interface EncapsulationMetadataType {}
290 
291     /**
292      * Reserved do not use.
293      * @hide
294      */
295     public static final int ENCAPSULATION_METADATA_TYPE_NONE = 0; // reserved
296 
297     /**
298      * Encapsulation metadata type for framework tuner information.
299      *
300      * Refer to the Android Media TV Tuner API for details.
301      */
302     public static final int ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER = 1;
303 
304     /**
305      * Encapsulation metadata type for DVB AD descriptor.
306      *
307      * This metadata is formatted per ETSI TS 101 154 Table E.1: AD_descriptor.
308      */
309     public static final int ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR = 2;
310 
311     /**
312      * Encapsulation metadata type for placement of supplementary audio.
313      *
314      * A 32 bit integer constant, one of {@link #SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL}, {@link
315      * #SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT}, {@link #SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT}.
316      */
317     public static final int ENCAPSULATION_METADATA_TYPE_SUPPLEMENTARY_AUDIO_PLACEMENT = 3;
318 
319     /**
320      * Enumeration of supplementary audio placement types.
321      * @hide
322      */
323     @IntDef(prefix = {"SUPPLEMENTARY_AUDIO_PLACEMENT_"},
324             value =
325                     {
326                             SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL,
327                             SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT,
328                             SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT,
329                     })
330     @Retention(RetentionPolicy.SOURCE)
331     public @interface SupplementaryAudioPlacement {}
332     // Important: The SUPPLEMENTARY_AUDIO_PLACEMENT values must be kept in sync with native header
333     // files.
334 
335     /**
336      * Supplementary audio placement normal.
337      */
338     public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL = 0;
339 
340     /**
341      * Supplementary audio placement left.
342      */
343     public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT = 1;
344 
345     /**
346      * Supplementary audio placement right.
347      */
348     public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT = 2;
349 
350     /* Dual Mono handling is used when a stereo audio stream
351      * contains separate audio content on the left and right channels.
352      * Such information about the content of the stream may be found, for example, in
353      * ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
354      */
355     /** @hide */
356     @IntDef({
357         DUAL_MONO_MODE_OFF,
358         DUAL_MONO_MODE_LR,
359         DUAL_MONO_MODE_LL,
360         DUAL_MONO_MODE_RR,
361     })
362     @Retention(RetentionPolicy.SOURCE)
363     public @interface DualMonoMode {}
364     // Important: The DUAL_MONO_MODE values must be kept in sync with native header files.
365     /**
366      * This mode disables any Dual Mono presentation effect.
367      *
368      */
369     public static final int DUAL_MONO_MODE_OFF = 0;
370 
371     /**
372      * This mode indicates that a stereo stream should be presented
373      * with the left and right audio channels blended together
374      * and delivered to both channels.
375      *
376      * Behavior for non-stereo streams is implementation defined.
377      * A suggested guideline is that the left-right stereo symmetric
378      * channels are pairwise blended;
379      * the other channels such as center are left alone.
380      *
381      * The Dual Mono effect occurs before volume scaling.
382      */
383     public static final int DUAL_MONO_MODE_LR = 1;
384 
385     /**
386      * This mode indicates that a stereo stream should be presented
387      * with the left audio channel replicated into the right audio channel.
388      *
389      * Behavior for non-stereo streams is implementation defined.
390      * A suggested guideline is that all channels with left-right
391      * stereo symmetry will have the left channel position replicated
392      * into the right channel position.
393      * The center channels (with no left/right symmetry) or unbalanced
394      * channels are left alone.
395      *
396      * The Dual Mono effect occurs before volume scaling.
397      */
398     public static final int DUAL_MONO_MODE_LL = 2;
399 
400     /**
401      * This mode indicates that a stereo stream should be presented
402      * with the right audio channel replicated into the left audio channel.
403      *
404      * Behavior for non-stereo streams is implementation defined.
405      * A suggested guideline is that all channels with left-right
406      * stereo symmetry will have the right channel position replicated
407      * into the left channel position.
408      * The center channels (with no left/right symmetry) or unbalanced
409      * channels are left alone.
410      *
411      * The Dual Mono effect occurs before volume scaling.
412      */
413     public static final int DUAL_MONO_MODE_RR = 3;
414 
415     /** @hide */
416     @IntDef({
417         WRITE_BLOCKING,
418         WRITE_NON_BLOCKING
419     })
420     @Retention(RetentionPolicy.SOURCE)
421     public @interface WriteMode {}
422 
423     /**
424      * The write mode indicating the write operation will block until all data has been written,
425      * to be used as the actual value of the writeMode parameter in
426      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
427      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
428      * {@link #write(ByteBuffer, int, int, long)}.
429      */
430     public final static int WRITE_BLOCKING = 0;
431 
432     /**
433      * The write mode indicating the write operation will return immediately after
434      * queuing as much audio data for playback as possible without blocking,
435      * to be used as the actual value of the writeMode parameter in
436      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
437      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
438      * {@link #write(ByteBuffer, int, int, long)}.
439      */
440     public final static int WRITE_NON_BLOCKING = 1;
441 
442     /** @hide */
443     @IntDef({
444         PERFORMANCE_MODE_NONE,
445         PERFORMANCE_MODE_LOW_LATENCY,
446         PERFORMANCE_MODE_POWER_SAVING
447     })
448     @Retention(RetentionPolicy.SOURCE)
449     public @interface PerformanceMode {}
450 
451     /**
452      * Default performance mode for an {@link AudioTrack}.
453      */
454     public static final int PERFORMANCE_MODE_NONE = 0;
455 
456     /**
457      * Low latency performance mode for an {@link AudioTrack}.
458      * If the device supports it, this mode
459      * enables a lower latency path through to the audio output sink.
460      * Effects may no longer work with such an {@code AudioTrack} and
461      * the sample rate must match that of the output sink.
462      * <p>
463      * Applications should be aware that low latency requires careful
464      * buffer management, with smaller chunks of audio data written by each
465      * {@code write()} call.
466      * <p>
467      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
468      * {@code AudioTrack}'s actual buffer size may be too small.
469      * It is recommended that a fairly
470      * large buffer should be specified when the {@code AudioTrack} is created.
471      * Then the actual size can be reduced by calling
472      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
473      * by lowering it after each {@code write()} call until the audio glitches,
474      * which is detected by calling
475      * {@link #getUnderrunCount()}. Then the buffer size can be increased
476      * until there are no glitches.
477      * This tuning step should be done while playing silence.
478      * This technique provides a compromise between latency and glitch rate.
479      */
480     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
481 
482     /**
483      * Power saving performance mode for an {@link AudioTrack}.
484      * If the device supports it, this
485      * mode will enable a lower power path to the audio output sink.
486      * In addition, this lower power path typically will have
487      * deeper internal buffers and better underrun resistance,
488      * with a tradeoff of higher latency.
489      * <p>
490      * In this mode, applications should attempt to use a larger buffer size
491      * and deliver larger chunks of audio data per {@code write()} call.
492      * Use {@link #getBufferSizeInFrames()} to determine
493      * the actual buffer size of the {@code AudioTrack} as it may have increased
494      * to accommodate a deeper buffer.
495      */
496     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
497 
498     // keep in sync with system/media/audio/include/system/audio-base.h
499     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
500     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
501 
502     // Size of HW_AV_SYNC track AV header.
503     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
504 
505     //--------------------------------------------------------------------------
506     // Member variables
507     //--------------------
508     /**
509      * Indicates the state of the AudioTrack instance.
510      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
511      */
512     private int mState = STATE_UNINITIALIZED;
513     /**
514      * Indicates the play state of the AudioTrack instance.
515      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
516      */
517     private int mPlayState = PLAYSTATE_STOPPED;
518 
519     /**
520      * Indicates that we are expecting an end of stream callback following a call
521      * to setOffloadEndOfStream() in a gapless track transition context. The native track
522      * will be restarted automatically.
523      */
524     private boolean mOffloadEosPending = false;
525 
526     /**
527      * Lock to ensure mPlayState updates reflect the actual state of the object.
528      */
529     private final Object mPlayStateLock = new Object();
530     /**
531      * Sizes of the audio buffer.
532      * These values are set during construction and can be stale.
533      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
534      */
535     private int mNativeBufferSizeInBytes = 0;
536     private int mNativeBufferSizeInFrames = 0;
537     /**
538      * Handler for events coming from the native code.
539      */
540     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
541     /**
542      * Looper associated with the thread that creates the AudioTrack instance.
543      */
544     private final Looper mInitializationLooper;
545     /**
546      * The audio data source sampling rate in Hz.
547      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
548      */
549     private int mSampleRate; // initialized by all constructors via audioParamCheck()
550     /**
551      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
552      */
553     private int mChannelCount = 1;
554     /**
555      * The audio channel mask used for calling native AudioTrack
556      */
557     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
558 
559     /**
560      * The type of the audio stream to play. See
561      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
562      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
563      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
564      *   {@link AudioManager#STREAM_DTMF}.
565      */
566     @UnsupportedAppUsage
567     private int mStreamType = AudioManager.STREAM_MUSIC;
568 
569     /**
570      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
571      */
572     private int mDataLoadMode = MODE_STREAM;
573     /**
574      * The current channel position mask, as specified on AudioTrack creation.
575      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
576      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
577      */
578     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
579     /**
580      * The channel index mask if specified, otherwise 0.
581      */
582     private int mChannelIndexMask = 0;
583     /**
584      * The encoding of the audio samples.
585      * @see AudioFormat#ENCODING_PCM_8BIT
586      * @see AudioFormat#ENCODING_PCM_16BIT
587      * @see AudioFormat#ENCODING_PCM_FLOAT
588      */
589     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
590     /**
591      * The AudioAttributes used in configuration.
592      */
593     private AudioAttributes mConfiguredAudioAttributes;
594     /**
595      * Audio session ID
596      */
597     private int mSessionId = AUDIO_SESSION_ID_GENERATE;
598     /**
599      * HW_AV_SYNC track AV Sync Header
600      */
601     private ByteBuffer mAvSyncHeader = null;
602     /**
603      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
604      */
605     private int mAvSyncBytesRemaining = 0;
606     /**
607      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
608      */
609     private int mOffset = 0;
610     /**
611      * Indicates whether the track is intended to play in offload mode.
612      */
613     private boolean mOffloaded = false;
614     /**
615      * When offloaded track: delay for decoder in frames
616      */
617     private int mOffloadDelayFrames = 0;
618     /**
619      * When offloaded track: padding for decoder in frames
620      */
621     private int mOffloadPaddingFrames = 0;
622 
623     /**
624      * The log session id used for metrics.
625      * {@link LogSessionId#LOG_SESSION_ID_NONE} here means it is not set.
626      */
627     @NonNull private LogSessionId mLogSessionId = LogSessionId.LOG_SESSION_ID_NONE;
628 
629     private AudioPolicy mAudioPolicy;
630 
631     //--------------------------------
632     // Used exclusively by native code
633     //--------------------
634     /**
635      * @hide
636      * Accessed by native methods: provides access to C++ AudioTrack object.
637      */
638     @SuppressWarnings("unused")
639     @UnsupportedAppUsage
640     protected long mNativeTrackInJavaObj;
641     /**
642      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
643      * the native AudioTrack object, but not stored in it).
644      */
645     @SuppressWarnings("unused")
646     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
647     private long mJniData;
648 
649 
650     //--------------------------------------------------------------------------
651     // Constructor, Finalize
652     //--------------------
653     /**
654      * Class constructor.
655      * @param streamType the type of the audio stream. See
656      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
657      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
658      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
659      * @param sampleRateInHz the initial source sample rate expressed in Hz.
660      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
661      *   which is usually the sample rate of the sink.
662      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
663      * @param channelConfig describes the configuration of the audio channels.
664      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
665      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
666      * @param audioFormat the format in which the audio data is represented.
667      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
668      *   {@link AudioFormat#ENCODING_PCM_8BIT},
669      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
670      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
671      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
672      *   <p> If the track's creation mode is {@link #MODE_STATIC},
673      *   this is the maximum length sample, or audio clip, that can be played by this instance.
674      *   <p> If the track's creation mode is {@link #MODE_STREAM},
675      *   this should be the desired buffer size
676      *   for the <code>AudioTrack</code> to satisfy the application's
677      *   latency requirements.
678      *   If <code>bufferSizeInBytes</code> is less than the
679      *   minimum buffer size for the output sink, it is increased to the minimum
680      *   buffer size.
681      *   The method {@link #getBufferSizeInFrames()} returns the
682      *   actual size in frames of the buffer created, which
683      *   determines the minimum frequency to write
684      *   to the streaming <code>AudioTrack</code> to avoid underrun.
685      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
686      *   for an AudioTrack instance in streaming mode.
687      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
688      * @throws java.lang.IllegalArgumentException
689      * @deprecated use {@link Builder} or
690      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
691      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
692      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)693     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
694             int bufferSizeInBytes, int mode)
695     throws IllegalArgumentException {
696         this(streamType, sampleRateInHz, channelConfig, audioFormat,
697                 bufferSizeInBytes, mode, AUDIO_SESSION_ID_GENERATE);
698     }
699 
700     /**
701      * Class constructor with audio session. Use this constructor when the AudioTrack must be
702      * attached to a particular audio session. The primary use of the audio session ID is to
703      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
704      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
705      * and media players in the same session and not to the output mix.
706      * When an AudioTrack is created without specifying a session, it will create its own session
707      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
708      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
709      * session
710      * with all other media players or audio tracks in the same session, otherwise a new session
711      * will be created for this track if none is supplied.
712      * @param streamType the type of the audio stream. See
713      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
714      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
715      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
716      * @param sampleRateInHz the initial source sample rate expressed in Hz.
717      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
718      *   which is usually the sample rate of the sink.
719      * @param channelConfig describes the configuration of the audio channels.
720      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
721      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
722      * @param audioFormat the format in which the audio data is represented.
723      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
724      *   {@link AudioFormat#ENCODING_PCM_8BIT},
725      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
726      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
727      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
728      *   <p> If the track's creation mode is {@link #MODE_STATIC},
729      *   this is the maximum length sample, or audio clip, that can be played by this instance.
730      *   <p> If the track's creation mode is {@link #MODE_STREAM},
731      *   this should be the desired buffer size
732      *   for the <code>AudioTrack</code> to satisfy the application's
733      *   latency requirements.
734      *   If <code>bufferSizeInBytes</code> is less than the
735      *   minimum buffer size for the output sink, it is increased to the minimum
736      *   buffer size.
737      *   The method {@link #getBufferSizeInFrames()} returns the
738      *   actual size in frames of the buffer created, which
739      *   determines the minimum frequency to write
740      *   to the streaming <code>AudioTrack</code> to avoid underrun.
741      *   You can write data into this buffer in smaller chunks than this size.
742      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
743      *   for an AudioTrack instance in streaming mode.
744      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
745      * @param sessionId Id of audio session the AudioTrack must be attached to
746      * @throws java.lang.IllegalArgumentException
747      * @deprecated use {@link Builder} or
748      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
749      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
750      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)751     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
752             int bufferSizeInBytes, int mode, int sessionId)
753     throws IllegalArgumentException {
754         // mState already == STATE_UNINITIALIZED
755         this((new AudioAttributes.Builder())
756                     .setLegacyStreamType(streamType)
757                     .build(),
758                 (new AudioFormat.Builder())
759                     .setChannelMask(channelConfig)
760                     .setEncoding(audioFormat)
761                     .setSampleRate(sampleRateInHz)
762                     .build(),
763                 bufferSizeInBytes,
764                 mode, sessionId);
765         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
766     }
767 
768     /**
769      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
770      * @param attributes a non-null {@link AudioAttributes} instance.
771      * @param format a non-null {@link AudioFormat} instance describing the format of the data
772      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
773      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
774      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
775      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
776      *   <p> If the track's creation mode is {@link #MODE_STATIC},
777      *   this is the maximum length sample, or audio clip, that can be played by this instance.
778      *   <p> If the track's creation mode is {@link #MODE_STREAM},
779      *   this should be the desired buffer size
780      *   for the <code>AudioTrack</code> to satisfy the application's
781      *   latency requirements.
782      *   If <code>bufferSizeInBytes</code> is less than the
783      *   minimum buffer size for the output sink, it is increased to the minimum
784      *   buffer size.
785      *   The method {@link #getBufferSizeInFrames()} returns the
786      *   actual size in frames of the buffer created, which
787      *   determines the minimum frequency to write
788      *   to the streaming <code>AudioTrack</code> to avoid underrun.
789      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
790      *   for an AudioTrack instance in streaming mode.
791      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
792      * @param sessionId ID of audio session the AudioTrack must be attached to, or
793      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
794      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
795      *   construction.
796      * @throws IllegalArgumentException
797      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)798     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
799             int mode, int sessionId)
800                     throws IllegalArgumentException {
801         this(null /* context */, attributes, format, bufferSizeInBytes, mode, sessionId,
802                 false /*offload*/, ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
803     }
804 
AudioTrack(@ullable Context context, AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, @Nullable TunerConfiguration tunerConfiguration)805     private AudioTrack(@Nullable Context context, AudioAttributes attributes, AudioFormat format,
806             int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode,
807             @Nullable TunerConfiguration tunerConfiguration)
808                     throws IllegalArgumentException {
809         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
810         // mState already == STATE_UNINITIALIZED
811 
812         mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
813 
814         if (format == null) {
815             throw new IllegalArgumentException("Illegal null AudioFormat");
816         }
817 
818         // Check if we should enable deep buffer mode
819         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
820             mAttributes = new AudioAttributes.Builder(mAttributes)
821                 .replaceFlags((mAttributes.getAllFlags()
822                         | AudioAttributes.FLAG_DEEP_BUFFER)
823                         & ~AudioAttributes.FLAG_LOW_LATENCY)
824                 .build();
825         }
826 
827         // remember which looper is associated with the AudioTrack instantiation
828         Looper looper;
829         if ((looper = Looper.myLooper()) == null) {
830             looper = Looper.getMainLooper();
831         }
832 
833         int rate = format.getSampleRate();
834         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
835             rate = 0;
836         }
837 
838         int channelIndexMask = 0;
839         if ((format.getPropertySetMask()
840                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
841             channelIndexMask = format.getChannelIndexMask();
842         }
843         int channelMask = 0;
844         if ((format.getPropertySetMask()
845                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
846             channelMask = format.getChannelMask();
847         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
848             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
849                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
850         }
851         int encoding = AudioFormat.ENCODING_DEFAULT;
852         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
853             encoding = format.getEncoding();
854         }
855         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
856         mOffloaded = offload;
857         mStreamType = AudioSystem.STREAM_DEFAULT;
858 
859         audioBuffSizeCheck(bufferSizeInBytes);
860 
861         mInitializationLooper = looper;
862 
863         if (sessionId < 0) {
864             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
865         }
866 
867         int[] sampleRate = new int[] {mSampleRate};
868         int[] session = new int[1];
869         session[0] = resolvePlaybackSessionId(context, sessionId);
870 
871         AttributionSource attributionSource = context == null
872                 ? AttributionSource.myAttributionSource() : context.getAttributionSource();
873 
874         // native initialization
875         try (ScopedParcelState attributionSourceState = attributionSource.asScopedParcelState()) {
876             int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
877                     sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
878                     mNativeBufferSizeInBytes, mDataLoadMode, session,
879                     attributionSourceState.getParcel(), 0 /*nativeTrackInJavaObj*/, offload,
880                     encapsulationMode, tunerConfiguration, getCurrentOpPackageName());
881             if (initResult != SUCCESS) {
882                 loge("Error code " + initResult + " when initializing AudioTrack.");
883                 return; // with mState == STATE_UNINITIALIZED
884             }
885         }
886 
887         mSampleRate = sampleRate[0];
888         mSessionId = session[0];
889 
890         // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
891 
892         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
893             int frameSizeInBytes;
894             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
895                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
896             } else {
897                 frameSizeInBytes = 1;
898             }
899             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
900         }
901 
902         if (mDataLoadMode == MODE_STATIC) {
903             mState = STATE_NO_STATIC_DATA;
904         } else {
905             mState = STATE_INITIALIZED;
906         }
907 
908         baseRegisterPlayer(mSessionId);
909         native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
910     }
911 
912     /**
913      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
914      * the AudioTrackRoutingProxy subclass.
915      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
916      * (associated with an OpenSL ES player).
917      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
918      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
919      * it means that the OpenSL player interface hasn't been realized, so there is no native
920      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
921      * OpenSLES interface is realized.
922      */
AudioTrack(long nativeTrackInJavaObj)923     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
924         super(new AudioAttributes.Builder().build(),
925                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
926         // "final"s
927         mNativeTrackInJavaObj = 0;
928         mJniData = 0;
929 
930         // remember which looper is associated with the AudioTrack instantiation
931         Looper looper;
932         if ((looper = Looper.myLooper()) == null) {
933             looper = Looper.getMainLooper();
934         }
935         mInitializationLooper = looper;
936 
937         // other initialization...
938         if (nativeTrackInJavaObj != 0) {
939             baseRegisterPlayer(AudioSystem.AUDIO_SESSION_ALLOCATE);
940             deferred_connect(nativeTrackInJavaObj);
941         } else {
942             mState = STATE_UNINITIALIZED;
943         }
944     }
945 
946     /**
947      * @hide
948      */
949     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
deferred_connect(long nativeTrackInJavaObj)950     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
951         if (mState != STATE_INITIALIZED) {
952             // Note that for this native_setup, we are providing an already created/initialized
953             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
954             int[] session = { 0 };
955             int[] rates = { 0 };
956             try (ScopedParcelState attributionSourceState =
957                          AttributionSource.myAttributionSource().asScopedParcelState()) {
958                 int initResult = native_setup(new WeakReference<AudioTrack>(this),
959                         null /*mAttributes - NA*/,
960                         rates /*sampleRate - NA*/,
961                         0 /*mChannelMask - NA*/,
962                         0 /*mChannelIndexMask - NA*/,
963                         0 /*mAudioFormat - NA*/,
964                         0 /*mNativeBufferSizeInBytes - NA*/,
965                         0 /*mDataLoadMode - NA*/,
966                         session,
967                         attributionSourceState.getParcel(),
968                         nativeTrackInJavaObj,
969                         false /*offload*/,
970                         ENCAPSULATION_MODE_NONE,
971                         null /* tunerConfiguration */,
972                         "" /* opPackagename */);
973                 if (initResult != SUCCESS) {
974                     loge("Error code " + initResult + " when initializing AudioTrack.");
975                     return; // with mState == STATE_UNINITIALIZED
976                 }
977             }
978 
979             mSessionId = session[0];
980 
981             mState = STATE_INITIALIZED;
982         }
983     }
984 
985     /**
986      * TunerConfiguration is used to convey tuner information
987      * from the android.media.tv.Tuner API to AudioTrack construction.
988      *
989      * Use the Builder to construct the TunerConfiguration object,
990      * which is then used by the {@link AudioTrack.Builder} to create an AudioTrack.
991      * @hide
992      */
993     @SystemApi
994     public static class TunerConfiguration {
995         private final int mContentId;
996         private final int mSyncId;
997 
998         /**
999          * A special content id for {@link #TunerConfiguration(int, int)}
1000          * indicating audio is delivered
1001          * from an {@code AudioTrack} write, not tunneled from the tuner stack.
1002          */
1003         public static final int CONTENT_ID_NONE = 0;
1004 
1005         /**
1006          * Constructs a TunerConfiguration instance for use in {@link AudioTrack.Builder}
1007          *
1008          * @param contentId selects the audio stream to use.
1009          *     The contentId may be obtained from
1010          *     {@link android.media.tv.tuner.filter.Filter#getId()},
1011          *     such obtained id is always a positive number.
1012          *     If audio is to be delivered through an {@code AudioTrack} write
1013          *     then {@code CONTENT_ID_NONE} may be used.
1014          * @param syncId selects the clock to use for synchronization
1015          *     of audio with other streams such as video.
1016          *     The syncId may be obtained from
1017          *     {@link android.media.tv.tuner.Tuner#getAvSyncHwId()}.
1018          *     This is always a positive number.
1019          */
1020         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
TunerConfiguration( @ntRangefrom = 0) int contentId, @IntRange(from = 1)int syncId)1021         public TunerConfiguration(
1022                 @IntRange(from = 0) int contentId, @IntRange(from = 1)int syncId) {
1023             if (contentId < 0) {
1024                 throw new IllegalArgumentException(
1025                         "contentId " + contentId + " must be positive or CONTENT_ID_NONE");
1026             }
1027             if (syncId < 1) {
1028                 throw new IllegalArgumentException("syncId " + syncId + " must be positive");
1029             }
1030             mContentId = contentId;
1031             mSyncId = syncId;
1032         }
1033 
1034         /**
1035          * Returns the contentId.
1036          */
1037         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getContentId()1038         public @IntRange(from = 1) int getContentId() {
1039             return mContentId; // The Builder ensures this is > 0.
1040         }
1041 
1042         /**
1043          * Returns the syncId.
1044          */
1045         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getSyncId()1046         public @IntRange(from = 1) int getSyncId() {
1047             return mSyncId;  // The Builder ensures this is > 0.
1048         }
1049     }
1050 
1051     /**
1052      * Builder class for {@link AudioTrack} objects.
1053      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
1054      * attributes and audio format parameters, you indicate which of those vary from the default
1055      * behavior on the device.
1056      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
1057      * parameters, to be used by a new <code>AudioTrack</code> instance:
1058      *
1059      * <pre class="prettyprint">
1060      * AudioTrack player = new AudioTrack.Builder()
1061      *         .setAudioAttributes(new AudioAttributes.Builder()
1062      *                  .setUsage(AudioAttributes.USAGE_ALARM)
1063      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
1064      *                  .build())
1065      *         .setAudioFormat(new AudioFormat.Builder()
1066      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
1067      *                 .setSampleRate(44100)
1068      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1069      *                 .build())
1070      *         .setBufferSizeInBytes(minBuffSize)
1071      *         .build();
1072      * </pre>
1073      * <p>
1074      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
1075      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
1076      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
1077      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
1078      * {@link AudioFormat#ENCODING_PCM_16BIT}.
1079      * The sample rate will depend on the device actually selected for playback and can be queried
1080      * with {@link #getSampleRate()} method.
1081      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
1082      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
1083      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
1084      * <code>MODE_STREAM</code> will be used.
1085      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
1086      * be generated.
1087      * <br>Offload is false by default.
1088      */
1089     public static class Builder {
1090         private Context mContext;
1091         private AudioAttributes mAttributes;
1092         private AudioFormat mFormat;
1093         private int mBufferSizeInBytes;
1094         private int mEncapsulationMode = ENCAPSULATION_MODE_NONE;
1095         private int mSessionId = AUDIO_SESSION_ID_GENERATE;
1096         private int mMode = MODE_STREAM;
1097         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
1098         private boolean mOffload = false;
1099         private TunerConfiguration mTunerConfiguration;
1100         private int mCallRedirectionMode = AudioManager.CALL_REDIRECT_NONE;
1101 
1102         /**
1103          * Constructs a new Builder with the default values as described above.
1104          */
Builder()1105         public Builder() {
1106         }
1107 
1108         /**
1109          * Sets the context the track belongs to. This context will be used to pull information,
1110          * such as {@link android.content.AttributionSource} and device specific audio session ids,
1111          * which will be associated with the {@link AudioTrack}. However, the context itself will
1112          * not be retained by the {@link AudioTrack}.
1113          * @param context a non-null {@link Context} instance
1114          * @return the same Builder instance.
1115          */
setContext(@onNull Context context)1116         public @NonNull Builder setContext(@NonNull Context context) {
1117             mContext = Objects.requireNonNull(context);
1118             return this;
1119         }
1120 
1121         /**
1122          * Sets the {@link AudioAttributes}.
1123          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
1124          *     data to be played.
1125          * @return the same Builder instance.
1126          * @throws IllegalArgumentException
1127          */
setAudioAttributes(@onNull AudioAttributes attributes)1128         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
1129                 throws IllegalArgumentException {
1130             if (attributes == null) {
1131                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1132             }
1133             // keep reference, we only copy the data when building
1134             mAttributes = attributes;
1135             return this;
1136         }
1137 
1138         /**
1139          * Sets the format of the audio data to be played by the {@link AudioTrack}.
1140          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
1141          * as encoding, channel mask and sample rate.
1142          * @param format a non-null {@link AudioFormat} instance.
1143          * @return the same Builder instance.
1144          * @throws IllegalArgumentException
1145          */
setAudioFormat(@onNull AudioFormat format)1146         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
1147                 throws IllegalArgumentException {
1148             if (format == null) {
1149                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
1150             }
1151             // keep reference, we only copy the data when building
1152             mFormat = format;
1153             return this;
1154         }
1155 
1156         /**
1157          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
1158          * If using the {@link AudioTrack} in streaming mode
1159          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
1160          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
1161          * the estimated minimum buffer size for the creation of an AudioTrack instance
1162          * in streaming mode.
1163          * <br>If using the <code>AudioTrack</code> in static mode (see
1164          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
1165          * played by this instance.
1166          * @param bufferSizeInBytes
1167          * @return the same Builder instance.
1168          * @throws IllegalArgumentException
1169          */
setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)1170         public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes)
1171                 throws IllegalArgumentException {
1172             if (bufferSizeInBytes <= 0) {
1173                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
1174             }
1175             mBufferSizeInBytes = bufferSizeInBytes;
1176             return this;
1177         }
1178 
1179         /**
1180          * Sets the encapsulation mode.
1181          *
1182          * Encapsulation mode allows metadata to be sent together with
1183          * the audio data payload in a {@code ByteBuffer}.
1184          * This requires a compatible hardware audio codec.
1185          *
1186          * @param encapsulationMode one of {@link AudioTrack#ENCAPSULATION_MODE_NONE},
1187          *        or {@link AudioTrack#ENCAPSULATION_MODE_ELEMENTARY_STREAM}.
1188          * @return the same Builder instance.
1189          */
1190         // Note: with the correct permission {@code AudioTrack#ENCAPSULATION_MODE_HANDLE}
1191         // may be used as well.
setEncapsulationMode(@ncapsulationMode int encapsulationMode)1192         public @NonNull Builder setEncapsulationMode(@EncapsulationMode int encapsulationMode) {
1193             switch (encapsulationMode) {
1194                 case ENCAPSULATION_MODE_NONE:
1195                 case ENCAPSULATION_MODE_ELEMENTARY_STREAM:
1196                 case ENCAPSULATION_MODE_HANDLE:
1197                     mEncapsulationMode = encapsulationMode;
1198                     break;
1199                 default:
1200                     throw new IllegalArgumentException(
1201                             "Invalid encapsulation mode " + encapsulationMode);
1202             }
1203             return this;
1204         }
1205 
1206         /**
1207          * Sets the mode under which buffers of audio data are transferred from the
1208          * {@link AudioTrack} to the framework.
1209          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
1210          * @return the same Builder instance.
1211          * @throws IllegalArgumentException
1212          */
setTransferMode(@ransferMode int mode)1213         public @NonNull Builder setTransferMode(@TransferMode int mode)
1214                 throws IllegalArgumentException {
1215             switch(mode) {
1216                 case MODE_STREAM:
1217                 case MODE_STATIC:
1218                     mMode = mode;
1219                     break;
1220                 default:
1221                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
1222             }
1223             return this;
1224         }
1225 
1226         /**
1227          * Sets the session ID the {@link AudioTrack} will be attached to.
1228          *
1229          * Note, that if there's a device specific session id asociated with the context, explicitly
1230          * setting a session id using this method will override it
1231          * (see {@link Builder#setContext(Context)}).
1232          * @param sessionId a strictly positive ID number retrieved from another
1233          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
1234          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
1235          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
1236          * @return the same Builder instance.
1237          * @throws IllegalArgumentException
1238          */
setSessionId(@ntRangefrom = 1) int sessionId)1239         public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId)
1240                 throws IllegalArgumentException {
1241             if ((sessionId != AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
1242                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
1243             }
1244             mSessionId = sessionId;
1245             return this;
1246         }
1247 
1248         /**
1249          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
1250          * may not be supported by the particular device, and the framework is free
1251          * to ignore such request if it is incompatible with other requests or hardware.
1252          *
1253          * @param performanceMode one of
1254          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
1255          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1256          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1257          * @return the same Builder instance.
1258          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
1259          */
setPerformanceMode(@erformanceMode int performanceMode)1260         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
1261             switch (performanceMode) {
1262                 case PERFORMANCE_MODE_NONE:
1263                 case PERFORMANCE_MODE_LOW_LATENCY:
1264                 case PERFORMANCE_MODE_POWER_SAVING:
1265                     mPerformanceMode = performanceMode;
1266                     break;
1267                 default:
1268                     throw new IllegalArgumentException(
1269                             "Invalid performance mode " + performanceMode);
1270             }
1271             return this;
1272         }
1273 
1274         /**
1275          * Sets whether this track will play through the offloaded audio path.
1276          * When set to true, at build time, the audio format will be checked against
1277          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)}
1278          * to verify the audio format used by this track is supported on the device's offload
1279          * path (if any).
1280          * <br>Offload is only supported for media audio streams, and therefore requires that
1281          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
1282          * @param offload true to require the offload path for playback.
1283          * @return the same Builder instance.
1284          */
setOffloadedPlayback(boolean offload)1285         public @NonNull Builder setOffloadedPlayback(boolean offload) {
1286             mOffload = offload;
1287             return this;
1288         }
1289 
1290         /**
1291          * Sets the tuner configuration for the {@code AudioTrack}.
1292          *
1293          * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from
1294          * the Android TV tuner API which indicate the audio content stream id and the
1295          * synchronization id for the {@code AudioTrack}.
1296          *
1297          * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}.
1298          * @return the same Builder instance.
1299          * @hide
1300          */
1301         @SystemApi
1302         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
setTunerConfiguration( @onNull TunerConfiguration tunerConfiguration)1303         public @NonNull Builder setTunerConfiguration(
1304                 @NonNull TunerConfiguration tunerConfiguration) {
1305             if (tunerConfiguration == null) {
1306                 throw new IllegalArgumentException("tunerConfiguration is null");
1307             }
1308             mTunerConfiguration = tunerConfiguration;
1309             return this;
1310         }
1311 
1312         /**
1313          * @hide
1314          * Sets the {@link AudioTrack} call redirection mode.
1315          * Used when creating an AudioTrack to inject audio to call uplink path. The mode
1316          * indicates if the call is a PSTN call or a VoIP call in which case a dynamic audio
1317          * policy is created to use this track as the source for all capture with voice
1318          * communication preset.
1319          *
1320          * @param callRedirectionMode one of
1321          * {@link AudioManager#CALL_REDIRECT_NONE},
1322          * {@link AudioManager#CALL_REDIRECT_PSTN},
1323          * or {@link AAudioManager#CALL_REDIRECT_VOIP}.
1324          * @return the same Builder instance.
1325          * @throws IllegalArgumentException if {@code callRedirectionMode} is not valid.
1326          */
setCallRedirectionMode( @udioManager.CallRedirectionMode int callRedirectionMode)1327         public @NonNull Builder setCallRedirectionMode(
1328                 @AudioManager.CallRedirectionMode int callRedirectionMode) {
1329             switch (callRedirectionMode) {
1330                 case AudioManager.CALL_REDIRECT_NONE:
1331                 case AudioManager.CALL_REDIRECT_PSTN:
1332                 case AudioManager.CALL_REDIRECT_VOIP:
1333                     mCallRedirectionMode = callRedirectionMode;
1334                     break;
1335                 default:
1336                     throw new IllegalArgumentException(
1337                             "Invalid call redirection mode " + callRedirectionMode);
1338             }
1339             return this;
1340         }
1341 
buildCallInjectionTrack()1342         private @NonNull AudioTrack buildCallInjectionTrack() {
1343             AudioMixingRule audioMixingRule = new AudioMixingRule.Builder()
1344                     .addMixRule(AudioMixingRule.RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET,
1345                             new AudioAttributes.Builder()
1346                                    .setCapturePreset(MediaRecorder.AudioSource.VOICE_COMMUNICATION)
1347                                    .setForCallRedirection()
1348                                    .build())
1349                     .setTargetMixRole(AudioMixingRule.MIX_ROLE_INJECTOR)
1350                     .build();
1351             AudioMix audioMix = new AudioMix.Builder(audioMixingRule)
1352                     .setFormat(mFormat)
1353                     .setRouteFlags(AudioMix.ROUTE_FLAG_LOOP_BACK)
1354                     .build();
1355             AudioPolicy audioPolicy =
1356                     new AudioPolicy.Builder(/*context=*/ null).addMix(audioMix).build();
1357             if (AudioManager.registerAudioPolicyStatic(audioPolicy) != 0) {
1358                 throw new UnsupportedOperationException("Error: could not register audio policy");
1359             }
1360             AudioTrack track = audioPolicy.createAudioTrackSource(audioMix);
1361             if (track == null) {
1362                 throw new UnsupportedOperationException("Cannot create injection AudioTrack");
1363             }
1364             track.unregisterAudioPolicyOnRelease(audioPolicy);
1365             return track;
1366         }
1367 
1368         /**
1369          * Builds an {@link AudioTrack} instance initialized with all the parameters set
1370          * on this <code>Builder</code>.
1371          * @return a new successfully initialized {@link AudioTrack} instance.
1372          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
1373          *     were incompatible, or if they are not supported by the device,
1374          *     or if the device was not available.
1375          */
build()1376         public @NonNull AudioTrack build() throws UnsupportedOperationException {
1377             if (mAttributes == null) {
1378                 mAttributes = new AudioAttributes.Builder()
1379                         .setUsage(AudioAttributes.USAGE_MEDIA)
1380                         .build();
1381             }
1382             switch (mPerformanceMode) {
1383             case PERFORMANCE_MODE_LOW_LATENCY:
1384                 mAttributes = new AudioAttributes.Builder(mAttributes)
1385                     .replaceFlags((mAttributes.getAllFlags()
1386                             | AudioAttributes.FLAG_LOW_LATENCY)
1387                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
1388                     .build();
1389                 break;
1390             case PERFORMANCE_MODE_NONE:
1391                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
1392                     break; // do not enable deep buffer mode.
1393                 }
1394                 // permitted to fall through to enable deep buffer
1395             case PERFORMANCE_MODE_POWER_SAVING:
1396                 mAttributes = new AudioAttributes.Builder(mAttributes)
1397                 .replaceFlags((mAttributes.getAllFlags()
1398                         | AudioAttributes.FLAG_DEEP_BUFFER)
1399                         & ~AudioAttributes.FLAG_LOW_LATENCY)
1400                 .build();
1401                 break;
1402             }
1403 
1404             if (mFormat == null) {
1405                 mFormat = new AudioFormat.Builder()
1406                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1407                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
1408                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
1409                         .build();
1410             }
1411 
1412             if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_VOIP) {
1413                 return buildCallInjectionTrack();
1414             } else if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_PSTN) {
1415                 mAttributes = new AudioAttributes.Builder(mAttributes)
1416                         .setForCallRedirection()
1417                         .build();
1418             }
1419 
1420             if (mOffload) {
1421                 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) {
1422                     throw new UnsupportedOperationException(
1423                             "Offload and low latency modes are incompatible");
1424                 }
1425                 if (AudioSystem.getDirectPlaybackSupport(mFormat, mAttributes)
1426                         == AudioSystem.DIRECT_NOT_SUPPORTED) {
1427                     throw new UnsupportedOperationException(
1428                             "Cannot create AudioTrack, offload format / attributes not supported");
1429                 }
1430             }
1431 
1432             // TODO: Check mEncapsulationMode compatibility with MODE_STATIC, etc?
1433 
1434             // If the buffer size is not specified in streaming mode,
1435             // use a single frame for the buffer size and let the
1436             // native code figure out the minimum buffer size.
1437             if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
1438                 int bytesPerSample = 1;
1439                 if (AudioFormat.isEncodingLinearFrames(mFormat.getEncoding())) {
1440                     try {
1441                         bytesPerSample = mFormat.getBytesPerSample(mFormat.getEncoding());
1442                     } catch (IllegalArgumentException e) {
1443                         // do nothing
1444                     }
1445                 }
1446                 mBufferSizeInBytes = mFormat.getChannelCount() * bytesPerSample;
1447             }
1448 
1449             try {
1450                 final AudioTrack track = new AudioTrack(
1451                         mContext, mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId,
1452                         mOffload, mEncapsulationMode, mTunerConfiguration);
1453                 if (track.getState() == STATE_UNINITIALIZED) {
1454                     // release is not necessary
1455                     throw new UnsupportedOperationException("Cannot create AudioTrack");
1456                 }
1457                 return track;
1458             } catch (IllegalArgumentException e) {
1459                 throw new UnsupportedOperationException(e.getMessage());
1460             }
1461         }
1462     }
1463 
1464     /**
1465      * Sets an {@link AudioPolicy} to automatically unregister when the track is released.
1466      *
1467      * <p>This is to prevent users of the call audio injection API from having to manually
1468      * unregister the policy that was used to create the track.
1469      */
unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy)1470     private void unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy) {
1471         mAudioPolicy = audioPolicy;
1472     }
1473 
1474     /**
1475      * Configures the delay and padding values for the current compressed stream playing
1476      * in offload mode.
1477      * This can only be used on a track successfully initialized with
1478      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a
1479      * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed
1480      * stream corresponds to 200 decoded interleaved PCM samples.
1481      * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value
1482      *     of 0 indicates no delay is to be applied.
1483      * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0
1484      *     of 0 indicates no padding is to be applied.
1485      */
setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1486     public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames,
1487             @IntRange(from = 0) int paddingInFrames) {
1488         if (paddingInFrames < 0) {
1489             throw new IllegalArgumentException("Illegal negative padding");
1490         }
1491         if (delayInFrames < 0) {
1492             throw new IllegalArgumentException("Illegal negative delay");
1493         }
1494         if (!mOffloaded) {
1495             throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track");
1496         }
1497         if (mState == STATE_UNINITIALIZED) {
1498             throw new IllegalStateException("Uninitialized track");
1499         }
1500         mOffloadDelayFrames = delayInFrames;
1501         mOffloadPaddingFrames = paddingInFrames;
1502         native_set_delay_padding(delayInFrames, paddingInFrames);
1503     }
1504 
1505     /**
1506      * Return the decoder delay of an offloaded track, expressed in frames, previously set with
1507      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1508      * <p>This delay indicates the number of frames to be ignored at the beginning of the stream.
1509      * This value can only be queried on a track successfully initialized with
1510      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1511      * @return decoder delay expressed in frames.
1512      */
getOffloadDelay()1513     public @IntRange(from = 0) int getOffloadDelay() {
1514         if (!mOffloaded) {
1515             throw new IllegalStateException("Illegal query of delay on non-offloaded track");
1516         }
1517         if (mState == STATE_UNINITIALIZED) {
1518             throw new IllegalStateException("Illegal query of delay on uninitialized track");
1519         }
1520         return mOffloadDelayFrames;
1521     }
1522 
1523     /**
1524      * Return the decoder padding of an offloaded track, expressed in frames, previously set with
1525      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1526      * <p>This padding indicates the number of frames to be ignored at the end of the stream.
1527      * This value can only be queried on a track successfully initialized with
1528      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1529      * @return decoder padding expressed in frames.
1530      */
getOffloadPadding()1531     public @IntRange(from = 0) int getOffloadPadding() {
1532         if (!mOffloaded) {
1533             throw new IllegalStateException("Illegal query of padding on non-offloaded track");
1534         }
1535         if (mState == STATE_UNINITIALIZED) {
1536             throw new IllegalStateException("Illegal query of padding on uninitialized track");
1537         }
1538         return mOffloadPaddingFrames;
1539     }
1540 
1541     /**
1542      * Declares that the last write() operation on this track provided the last buffer of this
1543      * stream.
1544      * After the end of stream, previously set padding and delay values are ignored.
1545      * Can only be called only if the AudioTrack is opened in offload mode
1546      * {@see Builder#setOffloadedPlayback(boolean)}.
1547      * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING}
1548      * {@see #getPlayState()}.
1549      * Use this method in the same thread as any write() operation.
1550      */
setOffloadEndOfStream()1551     public void setOffloadEndOfStream() {
1552         if (!mOffloaded) {
1553             throw new IllegalStateException("EOS not supported on non-offloaded track");
1554         }
1555         if (mState == STATE_UNINITIALIZED) {
1556             throw new IllegalStateException("Uninitialized track");
1557         }
1558         if (mPlayState != PLAYSTATE_PLAYING) {
1559             throw new IllegalStateException("EOS not supported if not playing");
1560         }
1561         synchronized (mStreamEventCbLock) {
1562             if (mStreamEventCbInfoList.size() == 0) {
1563                 throw new IllegalStateException("EOS not supported without StreamEventCallback");
1564             }
1565         }
1566 
1567         synchronized (mPlayStateLock) {
1568             native_stop();
1569             mOffloadEosPending = true;
1570             mPlayState = PLAYSTATE_STOPPING;
1571         }
1572     }
1573 
1574     /**
1575      * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set
1576      * to {@code true}.
1577      * @return true if the track is using offloaded playback.
1578      */
isOffloadedPlayback()1579     public boolean isOffloadedPlayback() {
1580         return mOffloaded;
1581     }
1582 
1583     /**
1584      * Returns whether direct playback of an audio format with the provided attributes is
1585      * currently supported on the system.
1586      * <p>Direct playback means that the audio stream is not resampled or downmixed
1587      * by the framework. Checking for direct support can help the app select the representation
1588      * of audio content that most closely matches the capabilities of the device and peripherials
1589      * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded
1590      * or mixed with other streams, if needed.
1591      * <p>Also note that this query only provides information about the support of an audio format.
1592      * It does not indicate whether the resources necessary for the playback are available
1593      * at that instant.
1594      * @param format a non-null {@link AudioFormat} instance describing the format of
1595      *   the audio data.
1596      * @param attributes a non-null {@link AudioAttributes} instance.
1597      * @return true if the given audio format can be played directly.
1598      * @deprecated Use {@link AudioManager#getDirectPlaybackSupport(AudioFormat, AudioAttributes)}
1599      *             instead.
1600      */
1601     @Deprecated
isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1602     public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format,
1603             @NonNull AudioAttributes attributes) {
1604         if (format == null) {
1605             throw new IllegalArgumentException("Illegal null AudioFormat argument");
1606         }
1607         if (attributes == null) {
1608             throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1609         }
1610         return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(),
1611                 format.getChannelMask(), format.getChannelIndexMask(),
1612                 attributes.getContentType(), attributes.getUsage(), attributes.getFlags());
1613     }
1614 
1615     /*
1616      * The MAX_LEVEL should be exactly representable by an IEEE 754-2008 base32 float.
1617      * This means fractions must be divisible by a power of 2. For example,
1618      * 10.25f is OK as 0.25 is 1/4, but 10.1f is NOT OK as 1/10 is not expressable by
1619      * a finite binary fraction.
1620      *
1621      * 48.f is the nominal max for API level {@link android os.Build.VERSION_CODES#R}.
1622      * We use this to suggest a baseline range for implementation.
1623      *
1624      * The API contract specification allows increasing this value in a future
1625      * API release, but not decreasing this value.
1626      */
1627     private static final float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f;
1628 
isValidAudioDescriptionMixLevel(float level)1629     private static boolean isValidAudioDescriptionMixLevel(float level) {
1630         return !(Float.isNaN(level) || level > MAX_AUDIO_DESCRIPTION_MIX_LEVEL);
1631     }
1632 
1633     /**
1634      * Sets the Audio Description mix level in dB.
1635      *
1636      * For AudioTracks incorporating a secondary Audio Description stream
1637      * (where such contents may be sent through an Encapsulation Mode
1638      * other than {@link #ENCAPSULATION_MODE_NONE}).
1639      * or internally by a HW channel),
1640      * the level of mixing of the Audio Description to the Main Audio stream
1641      * is controlled by this method.
1642      *
1643      * Such mixing occurs <strong>prior</strong> to overall volume scaling.
1644      *
1645      * @param level a floating point value between
1646      *     {@code Float.NEGATIVE_INFINITY} to {@code +48.f},
1647      *     where {@code Float.NEGATIVE_INFINITY} means the Audio Description is not mixed
1648      *     and a level of {@code 0.f} means the Audio Description is mixed without scaling.
1649      * @return true on success, false on failure.
1650      */
setAudioDescriptionMixLeveldB( @loatRangeto = 48.f, toInclusive = true) float level)1651     public boolean setAudioDescriptionMixLeveldB(
1652             @FloatRange(to = 48.f, toInclusive = true) float level) {
1653         if (!isValidAudioDescriptionMixLevel(level)) {
1654             throw new IllegalArgumentException("level is out of range" + level);
1655         }
1656         return native_set_audio_description_mix_level_db(level) == SUCCESS;
1657     }
1658 
1659     /**
1660      * Returns the Audio Description mix level in dB.
1661      *
1662      * If Audio Description mixing is unavailable from the hardware device,
1663      * a value of {@code Float.NEGATIVE_INFINITY} is returned.
1664      *
1665      * @return the current Audio Description Mix Level in dB.
1666      *     A value of {@code Float.NEGATIVE_INFINITY} means
1667      *     that the audio description is not mixed or
1668      *     the hardware is not available.
1669      *     This should reflect the <strong>true</strong> internal device mix level;
1670      *     hence the application might receive any floating value
1671      *     except {@code Float.NaN}.
1672      */
getAudioDescriptionMixLeveldB()1673     public float getAudioDescriptionMixLeveldB() {
1674         float[] level = { Float.NEGATIVE_INFINITY };
1675         try {
1676             final int status = native_get_audio_description_mix_level_db(level);
1677             if (status != SUCCESS || Float.isNaN(level[0])) {
1678                 return Float.NEGATIVE_INFINITY;
1679             }
1680         } catch (Exception e) {
1681             return Float.NEGATIVE_INFINITY;
1682         }
1683         return level[0];
1684     }
1685 
isValidDualMonoMode(@ualMonoMode int dualMonoMode)1686     private static boolean isValidDualMonoMode(@DualMonoMode int dualMonoMode) {
1687         switch (dualMonoMode) {
1688             case DUAL_MONO_MODE_OFF:
1689             case DUAL_MONO_MODE_LR:
1690             case DUAL_MONO_MODE_LL:
1691             case DUAL_MONO_MODE_RR:
1692                 return true;
1693             default:
1694                 return false;
1695         }
1696     }
1697 
1698     /**
1699      * Sets the Dual Mono mode presentation on the output device.
1700      *
1701      * The Dual Mono mode is generally applied to stereo audio streams
1702      * where the left and right channels come from separate sources.
1703      *
1704      * For compressed audio, where the decoding is done in hardware,
1705      * Dual Mono presentation needs to be performed
1706      * by the hardware output device
1707      * as the PCM audio is not available to the framework.
1708      *
1709      * @param dualMonoMode one of {@link #DUAL_MONO_MODE_OFF},
1710      *     {@link #DUAL_MONO_MODE_LR},
1711      *     {@link #DUAL_MONO_MODE_LL},
1712      *     {@link #DUAL_MONO_MODE_RR}.
1713      *
1714      * @return true on success, false on failure if the output device
1715      *     does not support Dual Mono mode.
1716      */
setDualMonoMode(@ualMonoMode int dualMonoMode)1717     public boolean setDualMonoMode(@DualMonoMode int dualMonoMode) {
1718         if (!isValidDualMonoMode(dualMonoMode)) {
1719             throw new IllegalArgumentException(
1720                     "Invalid Dual Mono mode " + dualMonoMode);
1721         }
1722         return native_set_dual_mono_mode(dualMonoMode) == SUCCESS;
1723     }
1724 
1725     /**
1726      * Returns the Dual Mono mode presentation setting.
1727      *
1728      * If no Dual Mono presentation is available for the output device,
1729      * then {@link #DUAL_MONO_MODE_OFF} is returned.
1730      *
1731      * @return one of {@link #DUAL_MONO_MODE_OFF},
1732      *     {@link #DUAL_MONO_MODE_LR},
1733      *     {@link #DUAL_MONO_MODE_LL},
1734      *     {@link #DUAL_MONO_MODE_RR}.
1735      */
getDualMonoMode()1736     public @DualMonoMode int getDualMonoMode() {
1737         int[] dualMonoMode = { DUAL_MONO_MODE_OFF };
1738         try {
1739             final int status = native_get_dual_mono_mode(dualMonoMode);
1740             if (status != SUCCESS || !isValidDualMonoMode(dualMonoMode[0])) {
1741                 return DUAL_MONO_MODE_OFF;
1742             }
1743         } catch (Exception e) {
1744             return DUAL_MONO_MODE_OFF;
1745         }
1746         return dualMonoMode[0];
1747     }
1748 
1749     // mask of all the positional channels supported, however the allowed combinations
1750     // are further restricted by the matching left/right rule and
1751     // AudioSystem.OUT_CHANNEL_COUNT_MAX
1752     private static final int SUPPORTED_OUT_CHANNELS =
1753             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
1754             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
1755             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
1756             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
1757             AudioFormat.CHANNEL_OUT_BACK_LEFT |
1758             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
1759             AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER |
1760             AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER |
1761             AudioFormat.CHANNEL_OUT_BACK_CENTER |
1762             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
1763             AudioFormat.CHANNEL_OUT_SIDE_RIGHT |
1764             AudioFormat.CHANNEL_OUT_TOP_CENTER |
1765             AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT |
1766             AudioFormat.CHANNEL_OUT_TOP_FRONT_CENTER |
1767             AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT |
1768             AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT |
1769             AudioFormat.CHANNEL_OUT_TOP_BACK_CENTER |
1770             AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT |
1771             AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT |
1772             AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT |
1773             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT |
1774             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_CENTER |
1775             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT |
1776             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY_2 |
1777             AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT |
1778             AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT;
1779 
1780     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
1781     // power saving to be automatically enabled for an AudioTrack. Returns false if
1782     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1783     private static boolean shouldEnablePowerSaving(
1784             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
1785             int bufferSizeInBytes, int mode) {
1786         // If no attributes, OK
1787         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
1788         // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include
1789         // FLAG_DEEP_BUFFER because if set the request is explicit and
1790         // shouldEnablePowerSaving() should return false.
1791         final int flags = attributes.getAllFlags()
1792                 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY
1793                     | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON);
1794 
1795         if (attributes != null &&
1796                 (flags != 0  // cannot have any special flags
1797                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
1798                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
1799                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
1800                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
1801             return false;
1802         }
1803 
1804         // Format must be fully specified and be linear pcm
1805         if (format == null
1806                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
1807                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
1808                 || !AudioFormat.isValidEncoding(format.getEncoding())
1809                 || format.getChannelCount() < 1) {
1810             return false;
1811         }
1812 
1813         // Mode must be streaming
1814         if (mode != MODE_STREAM) {
1815             return false;
1816         }
1817 
1818         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
1819         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
1820         if (bufferSizeInBytes != 0) {
1821             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
1822             final int MILLIS_PER_SECOND = 1000;
1823             final long bufferTargetSize =
1824                     BUFFER_TARGET_MODE_STREAM_MS
1825                     * format.getChannelCount()
1826                     * format.getBytesPerSample(format.getEncoding())
1827                     * format.getSampleRate()
1828                     / MILLIS_PER_SECOND;
1829             if (bufferSizeInBytes < bufferTargetSize) {
1830                 return false;
1831             }
1832         }
1833 
1834         return true;
1835     }
1836 
1837     // Convenience method for the constructor's parameter checks.
1838     // This is where constructor IllegalArgumentException-s are thrown
1839     // postconditions:
1840     //    mChannelCount is valid
1841     //    mChannelMask is valid
1842     //    mAudioFormat is valid
1843     //    mSampleRate is valid
1844     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1845     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1846                                  int audioFormat, int mode) {
1847         //--------------
1848         // sample rate, note these values are subject to change
1849         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1850                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1851                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1852             throw new IllegalArgumentException(sampleRateInHz
1853                     + "Hz is not a supported sample rate.");
1854         }
1855         mSampleRate = sampleRateInHz;
1856 
1857         if (audioFormat == AudioFormat.ENCODING_IEC61937
1858                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO
1859                 && AudioFormat.channelCountFromOutChannelMask(channelConfig) != 8) {
1860             Log.w(TAG, "ENCODING_IEC61937 is configured with channel mask as " + channelConfig
1861                     + ", which is not 2 or 8 channels");
1862         }
1863 
1864         //--------------
1865         // channel config
1866         mChannelConfiguration = channelConfig;
1867 
1868         switch (channelConfig) {
1869         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1870         case AudioFormat.CHANNEL_OUT_MONO:
1871         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1872             mChannelCount = 1;
1873             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1874             break;
1875         case AudioFormat.CHANNEL_OUT_STEREO:
1876         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1877             mChannelCount = 2;
1878             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1879             break;
1880         default:
1881             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1882                 mChannelCount = 0;
1883                 break; // channel index configuration only
1884             }
1885             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
1886                 throw new IllegalArgumentException(
1887                         "Unsupported channel mask configuration " + channelConfig
1888                         + " for encoding " + audioFormat);
1889             }
1890             mChannelMask = channelConfig;
1891             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1892         }
1893         // check the channel index configuration (if present)
1894         mChannelIndexMask = channelIndexMask;
1895         if (mChannelIndexMask != 0) {
1896             // As of S, we accept up to 24 channel index mask.
1897             final int fullIndexMask = (1 << AudioSystem.FCC_24) - 1;
1898             final int channelIndexCount = Integer.bitCount(channelIndexMask);
1899             final boolean accepted = (channelIndexMask & ~fullIndexMask) == 0
1900                     && (!AudioFormat.isEncodingLinearFrames(audioFormat)  // compressed OK
1901                             || channelIndexCount <= AudioSystem.OUT_CHANNEL_COUNT_MAX); // PCM
1902             if (!accepted) {
1903                 throw new IllegalArgumentException(
1904                         "Unsupported channel index mask configuration " + channelIndexMask
1905                         + " for encoding " + audioFormat);
1906             }
1907             if (mChannelCount == 0) {
1908                  mChannelCount = channelIndexCount;
1909             } else if (mChannelCount != channelIndexCount) {
1910                 throw new IllegalArgumentException("Channel count must match");
1911             }
1912         }
1913 
1914         //--------------
1915         // audio format
1916         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1917             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1918         }
1919 
1920         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1921             throw new IllegalArgumentException("Unsupported audio encoding.");
1922         }
1923         mAudioFormat = audioFormat;
1924 
1925         //--------------
1926         // audio load mode
1927         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1928                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1929             throw new IllegalArgumentException("Invalid mode.");
1930         }
1931         mDataLoadMode = mode;
1932     }
1933 
1934     // General pair map
1935     private static final Map<String, Integer> CHANNEL_PAIR_MAP = Map.of(
1936             "front", AudioFormat.CHANNEL_OUT_FRONT_LEFT
1937                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT,
1938             "back", AudioFormat.CHANNEL_OUT_BACK_LEFT
1939                     | AudioFormat.CHANNEL_OUT_BACK_RIGHT,
1940             "front of center", AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER
1941                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER,
1942             "side", AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT,
1943             "top front", AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT
1944                     | AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT,
1945             "top back", AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT
1946                     | AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT,
1947             "top side", AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT
1948                     | AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT,
1949             "bottom front", AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT
1950                     | AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT,
1951             "front wide", AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT
1952                     | AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT);
1953 
1954     /**
1955      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1956      * @param channelConfig the mask to validate
1957      * @return false if the AudioTrack can't be used with such a mask
1958      */
isMultichannelConfigSupported(int channelConfig, int encoding)1959     private static boolean isMultichannelConfigSupported(int channelConfig, int encoding) {
1960         // check for unsupported channels
1961         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1962             loge("Channel configuration features unsupported channels");
1963             return false;
1964         }
1965         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1966         final int channelCountLimit;
1967         try {
1968             channelCountLimit = AudioFormat.isEncodingLinearFrames(encoding)
1969                     ? AudioSystem.OUT_CHANNEL_COUNT_MAX  // PCM limited to OUT_CHANNEL_COUNT_MAX
1970                     : AudioSystem.FCC_24;                // Compressed limited to 24 channels
1971         } catch (IllegalArgumentException iae) {
1972             loge("Unsupported encoding " + iae);
1973             return false;
1974         }
1975         if (channelCount > channelCountLimit) {
1976             loge("Channel configuration contains too many channels for encoding "
1977                     + encoding + "(" + channelCount + " > " + channelCountLimit + ")");
1978             return false;
1979         }
1980         // check for unsupported multichannel combinations:
1981         // - FL/FR must be present
1982         // - L/R channels must be paired (e.g. no single L channel)
1983         final int frontPair =
1984                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1985         if ((channelConfig & frontPair) != frontPair) {
1986                 loge("Front channels must be present in multichannel configurations");
1987                 return false;
1988         }
1989         // Check all pairs to see that they are matched (front duplicated here).
1990         for (Map.Entry<String, Integer> e : CHANNEL_PAIR_MAP.entrySet()) {
1991             final int positionPair = e.getValue();
1992             if ((channelConfig & positionPair) != 0
1993                     && (channelConfig & positionPair) != positionPair) {
1994                 loge("Channel pair (" + e.getKey() + ") cannot be used independently");
1995                 return false;
1996             }
1997         }
1998         return true;
1999     }
2000 
2001 
2002     // Convenience method for the constructor's audio buffer size check.
2003     // preconditions:
2004     //    mChannelCount is valid
2005     //    mAudioFormat is valid
2006     // postcondition:
2007     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)2008     private void audioBuffSizeCheck(int audioBufferSize) {
2009         // NB: this section is only valid with PCM or IEC61937 data.
2010         //     To update when supporting compressed formats
2011         int frameSizeInBytes;
2012         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
2013             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
2014         } else {
2015             frameSizeInBytes = 1;
2016         }
2017         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
2018             throw new IllegalArgumentException("Invalid audio buffer size.");
2019         }
2020 
2021         mNativeBufferSizeInBytes = audioBufferSize;
2022         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
2023     }
2024 
2025 
2026     /**
2027      * Releases the native AudioTrack resources.
2028      */
release()2029     public void release() {
2030         synchronized (mStreamEventCbLock){
2031             endStreamEventHandling();
2032         }
2033         // even though native_release() stops the native AudioTrack, we need to stop
2034         // AudioTrack subclasses too.
2035         try {
2036             stop();
2037         } catch(IllegalStateException ise) {
2038             // don't raise an exception, we're releasing the resources.
2039         }
2040         if (mAudioPolicy != null) {
2041             AudioManager.unregisterAudioPolicyAsyncStatic(mAudioPolicy);
2042             mAudioPolicy = null;
2043         }
2044 
2045         baseRelease();
2046         native_release();
2047         synchronized (mPlayStateLock) {
2048             mState = STATE_UNINITIALIZED;
2049             mPlayState = PLAYSTATE_STOPPED;
2050             mPlayStateLock.notify();
2051         }
2052     }
2053 
2054     @Override
finalize()2055     protected void finalize() {
2056         tryToDisableNativeRoutingCallback();
2057         baseRelease();
2058         native_finalize();
2059     }
2060 
2061     //--------------------------------------------------------------------------
2062     // Getters
2063     //--------------------
2064     /**
2065      * Returns the minimum gain value, which is the constant 0.0.
2066      * Gain values less than 0.0 will be clamped to 0.0.
2067      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2068      * @return the minimum value, which is the constant 0.0.
2069      */
getMinVolume()2070     static public float getMinVolume() {
2071         return GAIN_MIN;
2072     }
2073 
2074     /**
2075      * Returns the maximum gain value, which is greater than or equal to 1.0.
2076      * Gain values greater than the maximum will be clamped to the maximum.
2077      * <p>The word "volume" in the API name is historical; this is actually a gain.
2078      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
2079      * corresponds to a gain of 0 dB (sample values left unmodified).
2080      * @return the maximum value, which is greater than or equal to 1.0.
2081      */
getMaxVolume()2082     static public float getMaxVolume() {
2083         return GAIN_MAX;
2084     }
2085 
2086     /**
2087      * Returns the configured audio source sample rate in Hz.
2088      * The initial source sample rate depends on the constructor parameters,
2089      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
2090      * If the constructor had a specific sample rate, then the initial sink sample rate is that
2091      * value.
2092      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
2093      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
2094      */
getSampleRate()2095     public int getSampleRate() {
2096         return mSampleRate;
2097     }
2098 
2099     /**
2100      * Returns the current playback sample rate rate in Hz.
2101      */
getPlaybackRate()2102     public int getPlaybackRate() {
2103         return native_get_playback_rate();
2104     }
2105 
2106     /**
2107      * Returns the current playback parameters.
2108      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
2109      * @return current {@link PlaybackParams}.
2110      * @throws IllegalStateException if track is not initialized.
2111      */
getPlaybackParams()2112     public @NonNull PlaybackParams getPlaybackParams() {
2113         return native_get_playback_params();
2114     }
2115 
2116     /**
2117      * Returns the {@link AudioAttributes} used in configuration.
2118      * If a {@code streamType} is used instead of an {@code AudioAttributes}
2119      * to configure the AudioTrack
2120      * (the use of {@code streamType} for configuration is deprecated),
2121      * then the {@code AudioAttributes}
2122      * equivalent to the {@code streamType} is returned.
2123      * @return The {@code AudioAttributes} used to configure the AudioTrack.
2124      * @throws IllegalStateException If the track is not initialized.
2125      */
getAudioAttributes()2126     public @NonNull AudioAttributes getAudioAttributes() {
2127         if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) {
2128             throw new IllegalStateException("track not initialized");
2129         }
2130         return mConfiguredAudioAttributes;
2131     }
2132 
2133     /**
2134      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
2135      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2136      */
getAudioFormat()2137     public int getAudioFormat() {
2138         return mAudioFormat;
2139     }
2140 
2141     /**
2142      * Returns the volume stream type of this AudioTrack.
2143      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
2144      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
2145      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
2146      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
2147      * {@link AudioManager#STREAM_ACCESSIBILITY}.
2148      */
getStreamType()2149     public int getStreamType() {
2150         return mStreamType;
2151     }
2152 
2153     /**
2154      * Returns the configured channel position mask.
2155      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
2156      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
2157      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
2158      * a channel index mask was used. Consider
2159      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
2160      * which contains both the channel position mask and the channel index mask.
2161      */
getChannelConfiguration()2162     public int getChannelConfiguration() {
2163         return mChannelConfiguration;
2164     }
2165 
2166     /**
2167      * Returns the configured <code>AudioTrack</code> format.
2168      * @return an {@link AudioFormat} containing the
2169      * <code>AudioTrack</code> parameters at the time of configuration.
2170      */
getFormat()2171     public @NonNull AudioFormat getFormat() {
2172         AudioFormat.Builder builder = new AudioFormat.Builder()
2173             .setSampleRate(mSampleRate)
2174             .setEncoding(mAudioFormat);
2175         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
2176             builder.setChannelMask(mChannelConfiguration);
2177         }
2178         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
2179             builder.setChannelIndexMask(mChannelIndexMask);
2180         }
2181         return builder.build();
2182     }
2183 
2184     /**
2185      * Returns the configured number of channels.
2186      */
getChannelCount()2187     public int getChannelCount() {
2188         return mChannelCount;
2189     }
2190 
2191     /**
2192      * Returns the state of the AudioTrack instance. This is useful after the
2193      * AudioTrack instance has been created to check if it was initialized
2194      * properly. This ensures that the appropriate resources have been acquired.
2195      * @see #STATE_UNINITIALIZED
2196      * @see #STATE_INITIALIZED
2197      * @see #STATE_NO_STATIC_DATA
2198      */
getState()2199     public int getState() {
2200         return mState;
2201     }
2202 
2203     /**
2204      * Returns the playback state of the AudioTrack instance.
2205      * @see #PLAYSTATE_STOPPED
2206      * @see #PLAYSTATE_PAUSED
2207      * @see #PLAYSTATE_PLAYING
2208      */
getPlayState()2209     public int getPlayState() {
2210         synchronized (mPlayStateLock) {
2211             switch (mPlayState) {
2212                 case PLAYSTATE_STOPPING:
2213                     return PLAYSTATE_PLAYING;
2214                 case PLAYSTATE_PAUSED_STOPPING:
2215                     return PLAYSTATE_PAUSED;
2216                 default:
2217                     return mPlayState;
2218             }
2219         }
2220     }
2221 
2222 
2223     /**
2224      * Returns the effective size of the <code>AudioTrack</code> buffer
2225      * that the application writes to.
2226      * <p> This will be less than or equal to the result of
2227      * {@link #getBufferCapacityInFrames()}.
2228      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
2229      * <p> If the track is subsequently routed to a different output sink, the buffer
2230      * size and capacity may enlarge to accommodate.
2231      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2232      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2233      * the size of the <code>AudioTrack</code> buffer in bytes.
2234      * <p> See also {@link AudioManager#getProperty(String)} for key
2235      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2236      * @return current size in frames of the <code>AudioTrack</code> buffer.
2237      * @throws IllegalStateException if track is not initialized.
2238      */
getBufferSizeInFrames()2239     public @IntRange (from = 0) int getBufferSizeInFrames() {
2240         return native_get_buffer_size_frames();
2241     }
2242 
2243     /**
2244      * Limits the effective size of the <code>AudioTrack</code> buffer
2245      * that the application writes to.
2246      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
2247      * If a blocking write is used then the write will block until the data
2248      * can fit within this limit.
2249      * <p>Changing this limit modifies the latency associated with
2250      * the buffer for this track. A smaller size will give lower latency
2251      * but there may be more glitches due to buffer underruns.
2252      * <p>The actual size used may not be equal to this requested size.
2253      * It will be limited to a valid range with a maximum of
2254      * {@link #getBufferCapacityInFrames()}.
2255      * It may also be adjusted slightly for internal reasons.
2256      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
2257      * will be returned.
2258      * <p>This method is supported for PCM audio at all API levels.
2259      * Compressed audio is supported in API levels 33 and above.
2260      * For compressed streams the size of a frame is considered to be exactly one byte.
2261      *
2262      * @param bufferSizeInFrames requested buffer size in frames
2263      * @return the actual buffer size in frames or an error code,
2264      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2265      * @throws IllegalStateException if track is not initialized.
2266      */
setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)2267     public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) {
2268         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
2269             return ERROR_INVALID_OPERATION;
2270         }
2271         if (bufferSizeInFrames < 0) {
2272             return ERROR_BAD_VALUE;
2273         }
2274         return native_set_buffer_size_frames(bufferSizeInFrames);
2275     }
2276 
2277     /**
2278      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
2279      *  <p> If the track's creation mode is {@link #MODE_STATIC},
2280      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
2281      *  A static track's frame count will not change.
2282      *  <p> If the track's creation mode is {@link #MODE_STREAM},
2283      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
2284      *  For streaming tracks, this value may be rounded up to a larger value if needed by
2285      *  the target output sink, and
2286      *  if the track is subsequently routed to a different output sink, the
2287      *  frame count may enlarge to accommodate.
2288      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2289      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2290      *  the size of the <code>AudioTrack</code> buffer in bytes.
2291      *  <p> See also {@link AudioManager#getProperty(String)} for key
2292      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2293      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
2294      *  @throws IllegalStateException if track is not initialized.
2295      */
getBufferCapacityInFrames()2296     public @IntRange (from = 0) int getBufferCapacityInFrames() {
2297         return native_get_buffer_capacity_frames();
2298     }
2299 
2300     /**
2301      * Sets the streaming start threshold for an <code>AudioTrack</code>.
2302      * <p> The streaming start threshold is the buffer level that the written audio
2303      * data must reach for audio streaming to start after {@link #play()} is called.
2304      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2305      *
2306      * @param startThresholdInFrames the desired start threshold.
2307      * @return the actual start threshold in frames value. This is
2308      *         an integer between 1 to the buffer capacity
2309      *         (see {@link #getBufferCapacityInFrames()}),
2310      *         and might change if the output sink changes after track creation.
2311      * @throws IllegalStateException if the track is not initialized or the
2312      *         track transfer mode is not {@link #MODE_STREAM}.
2313      * @throws IllegalArgumentException if startThresholdInFrames is not positive.
2314      * @see #getStartThresholdInFrames()
2315      */
setStartThresholdInFrames( @ntRange from = 1) int startThresholdInFrames)2316     public @IntRange(from = 1) int setStartThresholdInFrames(
2317             @IntRange (from = 1) int startThresholdInFrames) {
2318         if (mState != STATE_INITIALIZED) {
2319             throw new IllegalStateException("AudioTrack is not initialized");
2320         }
2321         if (mDataLoadMode != MODE_STREAM) {
2322             throw new IllegalStateException("AudioTrack must be a streaming track");
2323         }
2324         if (startThresholdInFrames < 1) {
2325             throw new IllegalArgumentException("startThresholdInFrames "
2326                     + startThresholdInFrames + " must be positive");
2327         }
2328         return native_setStartThresholdInFrames(startThresholdInFrames);
2329     }
2330 
2331     /**
2332      * Returns the streaming start threshold of the <code>AudioTrack</code>.
2333      * <p> The streaming start threshold is the buffer level that the written audio
2334      * data must reach for audio streaming to start after {@link #play()} is called.
2335      * When an <code>AudioTrack</code> is created, the streaming start threshold
2336      * is the buffer capacity in frames. If the buffer size in frames is reduced
2337      * by {@link #setBufferSizeInFrames(int)} to a value smaller than the start threshold
2338      * then that value will be used instead for the streaming start threshold.
2339      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2340      *
2341      * @return the current start threshold in frames value. This is
2342      *         an integer between 1 to the buffer capacity
2343      *         (see {@link #getBufferCapacityInFrames()}),
2344      *         and might change if the  output sink changes after track creation.
2345      * @throws IllegalStateException if the track is not initialized or the
2346      *         track is not {@link #MODE_STREAM}.
2347      * @see #setStartThresholdInFrames(int)
2348      */
getStartThresholdInFrames()2349     public @IntRange (from = 1) int getStartThresholdInFrames() {
2350         if (mState != STATE_INITIALIZED) {
2351             throw new IllegalStateException("AudioTrack is not initialized");
2352         }
2353         if (mDataLoadMode != MODE_STREAM) {
2354             throw new IllegalStateException("AudioTrack must be a streaming track");
2355         }
2356         return native_getStartThresholdInFrames();
2357     }
2358 
2359     /**
2360      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
2361      *  @return current size in frames of the <code>AudioTrack</code> buffer.
2362      *  @throws IllegalStateException
2363      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
2364      */
2365     @Deprecated
getNativeFrameCount()2366     protected int getNativeFrameCount() {
2367         return native_get_buffer_capacity_frames();
2368     }
2369 
2370     /**
2371      * Returns marker position expressed in frames.
2372      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
2373      * or zero if marker is disabled.
2374      */
getNotificationMarkerPosition()2375     public int getNotificationMarkerPosition() {
2376         return native_get_marker_pos();
2377     }
2378 
2379     /**
2380      * Returns the notification update period expressed in frames.
2381      * Zero means that no position update notifications are being delivered.
2382      */
getPositionNotificationPeriod()2383     public int getPositionNotificationPeriod() {
2384         return native_get_pos_update_period();
2385     }
2386 
2387     /**
2388      * Returns the playback head position expressed in frames.
2389      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
2390      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
2391      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
2392      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
2393      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
2394      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
2395      * the total number of frames played since reset,
2396      * <i>not</i> the current offset within the buffer.
2397      */
getPlaybackHeadPosition()2398     public int getPlaybackHeadPosition() {
2399         return native_get_position();
2400     }
2401 
2402     /**
2403      * Returns this track's estimated latency in milliseconds. This includes the latency due
2404      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
2405      *
2406      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
2407      * a better solution.
2408      * @hide
2409      */
2410     @UnsupportedAppUsage(trackingBug = 130237544)
getLatency()2411     public int getLatency() {
2412         return native_get_latency();
2413     }
2414 
2415     /**
2416      * Returns the number of underrun occurrences in the application-level write buffer
2417      * since the AudioTrack was created.
2418      * An underrun occurs if the application does not write audio
2419      * data quickly enough, causing the buffer to underflow
2420      * and a potential audio glitch or pop.
2421      * <p>
2422      * Underruns are less likely when buffer sizes are large.
2423      * It may be possible to eliminate underruns by recreating the AudioTrack with
2424      * a larger buffer.
2425      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
2426      * effective size of the buffer.
2427      */
getUnderrunCount()2428     public int getUnderrunCount() {
2429         return native_get_underrun_count();
2430     }
2431 
2432     /**
2433      * Returns the current performance mode of the {@link AudioTrack}.
2434      *
2435      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
2436      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
2437      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
2438      * Use {@link AudioTrack.Builder#setPerformanceMode}
2439      * in the {@link AudioTrack.Builder} to enable a performance mode.
2440      * @throws IllegalStateException if track is not initialized.
2441      */
getPerformanceMode()2442     public @PerformanceMode int getPerformanceMode() {
2443         final int flags = native_get_flags();
2444         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
2445             return PERFORMANCE_MODE_LOW_LATENCY;
2446         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
2447             return PERFORMANCE_MODE_POWER_SAVING;
2448         } else {
2449             return PERFORMANCE_MODE_NONE;
2450         }
2451     }
2452 
2453     /**
2454      *  Returns the output sample rate in Hz for the specified stream type.
2455      */
getNativeOutputSampleRate(int streamType)2456     static public int getNativeOutputSampleRate(int streamType) {
2457         return native_get_output_sample_rate(streamType);
2458     }
2459 
2460     /**
2461      * Returns the estimated minimum buffer size required for an AudioTrack
2462      * object to be created in the {@link #MODE_STREAM} mode.
2463      * The size is an estimate because it does not consider either the route or the sink,
2464      * since neither is known yet.  Note that this size doesn't
2465      * guarantee a smooth playback under load, and higher values should be chosen according to
2466      * the expected frequency at which the buffer will be refilled with additional data to play.
2467      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
2468      * to a higher value than the initial source sample rate, be sure to configure the buffer size
2469      * based on the highest planned sample rate.
2470      * @param sampleRateInHz the source sample rate expressed in Hz.
2471      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
2472      * @param channelConfig describes the configuration of the audio channels.
2473      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
2474      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
2475      * @param audioFormat the format in which the audio data is represented.
2476      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
2477      *   {@link AudioFormat#ENCODING_PCM_8BIT},
2478      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2479      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
2480      *   or {@link #ERROR} if unable to query for output properties,
2481      *   or the minimum buffer size expressed in bytes.
2482      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)2483     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
2484         int channelCount = 0;
2485         switch(channelConfig) {
2486         case AudioFormat.CHANNEL_OUT_MONO:
2487         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
2488             channelCount = 1;
2489             break;
2490         case AudioFormat.CHANNEL_OUT_STEREO:
2491         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
2492             channelCount = 2;
2493             break;
2494         default:
2495             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
2496                 loge("getMinBufferSize(): Invalid channel configuration.");
2497                 return ERROR_BAD_VALUE;
2498             } else {
2499                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
2500             }
2501         }
2502 
2503         if (!AudioFormat.isPublicEncoding(audioFormat)) {
2504             loge("getMinBufferSize(): Invalid audio format.");
2505             return ERROR_BAD_VALUE;
2506         }
2507 
2508         // sample rate, note these values are subject to change
2509         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
2510         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
2511                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
2512             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
2513             return ERROR_BAD_VALUE;
2514         }
2515 
2516         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
2517         if (size <= 0) {
2518             loge("getMinBufferSize(): error querying hardware");
2519             return ERROR;
2520         }
2521         else {
2522             return size;
2523         }
2524     }
2525 
2526     /**
2527      * Returns the audio session ID.
2528      *
2529      * @return the ID of the audio session this AudioTrack belongs to.
2530      */
getAudioSessionId()2531     public int getAudioSessionId() {
2532         return mSessionId;
2533     }
2534 
2535    /**
2536     * Poll for a timestamp on demand.
2537     * <p>
2538     * If you need to track timestamps during initial warmup or after a routing or mode change,
2539     * you should request a new timestamp periodically until the reported timestamps
2540     * show that the frame position is advancing, or until it becomes clear that
2541     * timestamps are unavailable for this route.
2542     * <p>
2543     * After the clock is advancing at a stable rate,
2544     * query for a new timestamp approximately once every 10 seconds to once per minute.
2545     * Calling this method more often is inefficient.
2546     * It is also counter-productive to call this method more often than recommended,
2547     * because the short-term differences between successive timestamp reports are not meaningful.
2548     * If you need a high-resolution mapping between frame position and presentation time,
2549     * consider implementing that at application level, based on low-resolution timestamps.
2550     * <p>
2551     * The audio data at the returned position may either already have been
2552     * presented, or may have not yet been presented but is committed to be presented.
2553     * It is not possible to request the time corresponding to a particular position,
2554     * or to request the (fractional) position corresponding to a particular time.
2555     * If you need such features, consider implementing them at application level.
2556     *
2557     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2558     *        and owned by caller.
2559     * @return true if a timestamp is available, or false if no timestamp is available.
2560     *         If a timestamp is available,
2561     *         the AudioTimestamp instance is filled in with a position in frame units, together
2562     *         with the estimated time when that frame was presented or is committed to
2563     *         be presented.
2564     *         In the case that no timestamp is available, any supplied instance is left unaltered.
2565     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
2566     *         or during and immediately after a route change.
2567     *         A timestamp is permanently unavailable for a given route if the route does not support
2568     *         timestamps.  In this case, the approximate frame position can be obtained
2569     *         using {@link #getPlaybackHeadPosition}.
2570     *         However, it may be useful to continue to query for
2571     *         timestamps occasionally, to recover after a route change.
2572     */
2573     // Add this text when the "on new timestamp" API is added:
2574     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)2575     public boolean getTimestamp(AudioTimestamp timestamp)
2576     {
2577         if (timestamp == null) {
2578             throw new IllegalArgumentException();
2579         }
2580         // It's unfortunate, but we have to either create garbage every time or use synchronized
2581         long[] longArray = new long[2];
2582         int ret = native_get_timestamp(longArray);
2583         if (ret != SUCCESS) {
2584             return false;
2585         }
2586         timestamp.framePosition = longArray[0];
2587         timestamp.nanoTime = longArray[1];
2588         return true;
2589     }
2590 
2591     /**
2592      * Poll for a timestamp on demand.
2593      * <p>
2594      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
2595      *
2596      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2597      *        and owned by caller.
2598      * @return {@link #SUCCESS} if a timestamp is available
2599      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
2600      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
2601      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
2602      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
2603      *         for the timestamp.
2604      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2605      *         needs to be recreated.
2606      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
2607      *         timestamps. In this case, the approximate frame position can be obtained
2608      *         using {@link #getPlaybackHeadPosition}.
2609      *
2610      *         The AudioTimestamp instance is filled in with a position in frame units, together
2611      *         with the estimated time when that frame was presented or is committed to
2612      *         be presented.
2613      * @hide
2614      */
2615      // Add this text when the "on new timestamp" API is added:
2616      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)2617      public int getTimestampWithStatus(AudioTimestamp timestamp)
2618      {
2619          if (timestamp == null) {
2620              throw new IllegalArgumentException();
2621          }
2622          // It's unfortunate, but we have to either create garbage every time or use synchronized
2623          long[] longArray = new long[2];
2624          int ret = native_get_timestamp(longArray);
2625          timestamp.framePosition = longArray[0];
2626          timestamp.nanoTime = longArray[1];
2627          return ret;
2628      }
2629 
2630     /**
2631      *  Return Metrics data about the current AudioTrack instance.
2632      *
2633      * @return a {@link PersistableBundle} containing the set of attributes and values
2634      * available for the media being handled by this instance of AudioTrack
2635      * The attributes are descibed in {@link MetricsConstants}.
2636      *
2637      * Additional vendor-specific fields may also be present in
2638      * the return value.
2639      */
getMetrics()2640     public PersistableBundle getMetrics() {
2641         PersistableBundle bundle = native_getMetrics();
2642         return bundle;
2643     }
2644 
native_getMetrics()2645     private native PersistableBundle native_getMetrics();
2646 
2647     //--------------------------------------------------------------------------
2648     // Initialization / configuration
2649     //--------------------
2650     /**
2651      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2652      * for each periodic playback head position update.
2653      * Notifications will be received in the same thread as the one in which the AudioTrack
2654      * instance was created.
2655      * @param listener
2656      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)2657     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
2658         setPlaybackPositionUpdateListener(listener, null);
2659     }
2660 
2661     /**
2662      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2663      * for each periodic playback head position update.
2664      * Use this method to receive AudioTrack events in the Handler associated with another
2665      * thread than the one in which you created the AudioTrack instance.
2666      * @param listener
2667      * @param handler the Handler that will receive the event notification messages.
2668      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)2669     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
2670                                                     Handler handler) {
2671         if (listener != null) {
2672             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
2673         } else {
2674             mEventHandlerDelegate = null;
2675         }
2676     }
2677 
2678 
clampGainOrLevel(float gainOrLevel)2679     private static float clampGainOrLevel(float gainOrLevel) {
2680         if (Float.isNaN(gainOrLevel)) {
2681             throw new IllegalArgumentException();
2682         }
2683         if (gainOrLevel < GAIN_MIN) {
2684             gainOrLevel = GAIN_MIN;
2685         } else if (gainOrLevel > GAIN_MAX) {
2686             gainOrLevel = GAIN_MAX;
2687         }
2688         return gainOrLevel;
2689     }
2690 
2691 
2692      /**
2693      * Sets the specified left and right output gain values on the AudioTrack.
2694      * <p>Gain values are clamped to the closed interval [0.0, max] where
2695      * max is the value of {@link #getMaxVolume}.
2696      * A value of 0.0 results in zero gain (silence), and
2697      * a value of 1.0 means unity gain (signal unchanged).
2698      * The default value is 1.0 meaning unity gain.
2699      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2700      * @param leftGain output gain for the left channel.
2701      * @param rightGain output gain for the right channel
2702      * @return error code or success, see {@link #SUCCESS},
2703      *    {@link #ERROR_INVALID_OPERATION}
2704      * @deprecated Applications should use {@link #setVolume} instead, as it
2705      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2706      */
2707     @Deprecated
setStereoVolume(float leftGain, float rightGain)2708     public int setStereoVolume(float leftGain, float rightGain) {
2709         if (mState == STATE_UNINITIALIZED) {
2710             return ERROR_INVALID_OPERATION;
2711         }
2712 
2713         baseSetVolume(leftGain, rightGain);
2714         return SUCCESS;
2715     }
2716 
2717     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)2718     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
2719         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
2720         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
2721 
2722         native_setVolume(leftVolume, rightVolume);
2723     }
2724 
2725 
2726     /**
2727      * Sets the specified output gain value on all channels of this track.
2728      * <p>Gain values are clamped to the closed interval [0.0, max] where
2729      * max is the value of {@link #getMaxVolume}.
2730      * A value of 0.0 results in zero gain (silence), and
2731      * a value of 1.0 means unity gain (signal unchanged).
2732      * The default value is 1.0 meaning unity gain.
2733      * <p>This API is preferred over {@link #setStereoVolume}, as it
2734      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2735      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2736      * @param gain output gain for all channels.
2737      * @return error code or success, see {@link #SUCCESS},
2738      *    {@link #ERROR_INVALID_OPERATION}
2739      */
setVolume(float gain)2740     public int setVolume(float gain) {
2741         return setStereoVolume(gain, gain);
2742     }
2743 
2744     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2745     /* package */ int playerApplyVolumeShaper(
2746             @NonNull VolumeShaper.Configuration configuration,
2747             @NonNull VolumeShaper.Operation operation) {
2748         return native_applyVolumeShaper(configuration, operation);
2749     }
2750 
2751     @Override
playerGetVolumeShaperState(int id)2752     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
2753         return native_getVolumeShaperState(id);
2754     }
2755 
2756     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2757     public @NonNull VolumeShaper createVolumeShaper(
2758             @NonNull VolumeShaper.Configuration configuration) {
2759         return new VolumeShaper(configuration, this);
2760     }
2761 
2762     /**
2763      * Sets the playback sample rate for this track. This sets the sampling rate at which
2764      * the audio data will be consumed and played back
2765      * (as set by the sampleRateInHz parameter in the
2766      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
2767      * not the original sampling rate of the
2768      * content. For example, setting it to half the sample rate of the content will cause the
2769      * playback to last twice as long, but will also result in a pitch shift down by one octave.
2770      * The valid sample rate range is from 1 Hz to twice the value returned by
2771      * {@link #getNativeOutputSampleRate(int)}.
2772      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
2773      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
2774      * for playback of content of differing sample rate,
2775      * but with identical encoding and channel mask.
2776      * @param sampleRateInHz the sample rate expressed in Hz
2777      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2778      *    {@link #ERROR_INVALID_OPERATION}
2779      */
setPlaybackRate(int sampleRateInHz)2780     public int setPlaybackRate(int sampleRateInHz) {
2781         if (mState != STATE_INITIALIZED) {
2782             return ERROR_INVALID_OPERATION;
2783         }
2784         if (sampleRateInHz <= 0) {
2785             return ERROR_BAD_VALUE;
2786         }
2787         return native_set_playback_rate(sampleRateInHz);
2788     }
2789 
2790 
2791     /**
2792      * Sets the playback parameters.
2793      * This method returns failure if it cannot apply the playback parameters.
2794      * One possible cause is that the parameters for speed or pitch are out of range.
2795      * Another possible cause is that the <code>AudioTrack</code> is streaming
2796      * (see {@link #MODE_STREAM}) and the
2797      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
2798      * on configuration must be larger than the speed multiplied by the minimum size
2799      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
2800      * @param params see {@link PlaybackParams}. In particular,
2801      * speed, pitch, and audio mode should be set.
2802      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
2803      * @throws IllegalStateException if track is not initialized.
2804      */
setPlaybackParams(@onNull PlaybackParams params)2805     public void setPlaybackParams(@NonNull PlaybackParams params) {
2806         if (params == null) {
2807             throw new IllegalArgumentException("params is null");
2808         }
2809         native_set_playback_params(params);
2810     }
2811 
2812 
2813     /**
2814      * Sets the position of the notification marker.  At most one marker can be active.
2815      * @param markerInFrames marker position in wrapping frame units similar to
2816      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
2817      * To set a marker at a position which would appear as zero due to wraparound,
2818      * a workaround is to use a non-zero position near zero, such as -1 or 1.
2819      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2820      *  {@link #ERROR_INVALID_OPERATION}
2821      */
setNotificationMarkerPosition(int markerInFrames)2822     public int setNotificationMarkerPosition(int markerInFrames) {
2823         if (mState == STATE_UNINITIALIZED) {
2824             return ERROR_INVALID_OPERATION;
2825         }
2826         return native_set_marker_pos(markerInFrames);
2827     }
2828 
2829 
2830     /**
2831      * Sets the period for the periodic notification event.
2832      * @param periodInFrames update period expressed in frames.
2833      * Zero period means no position updates.  A negative period is not allowed.
2834      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
2835      */
setPositionNotificationPeriod(int periodInFrames)2836     public int setPositionNotificationPeriod(int periodInFrames) {
2837         if (mState == STATE_UNINITIALIZED) {
2838             return ERROR_INVALID_OPERATION;
2839         }
2840         return native_set_pos_update_period(periodInFrames);
2841     }
2842 
2843 
2844     /**
2845      * Sets the playback head position within the static buffer.
2846      * The track must be stopped or paused for the position to be changed,
2847      * and must use the {@link #MODE_STATIC} mode.
2848      * @param positionInFrames playback head position within buffer, expressed in frames.
2849      * Zero corresponds to start of buffer.
2850      * The position must not be greater than the buffer size in frames, or negative.
2851      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
2852      * the position values have different meanings.
2853      * <br>
2854      * If looping is currently enabled and the new position is greater than or equal to the
2855      * loop end marker, the behavior varies by API level:
2856      * as of {@link android.os.Build.VERSION_CODES#M},
2857      * the looping is first disabled and then the position is set.
2858      * For earlier API levels, the behavior is unspecified.
2859      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2860      *    {@link #ERROR_INVALID_OPERATION}
2861      */
setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2862     public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) {
2863         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2864                 getPlayState() == PLAYSTATE_PLAYING) {
2865             return ERROR_INVALID_OPERATION;
2866         }
2867         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
2868             return ERROR_BAD_VALUE;
2869         }
2870         return native_set_position(positionInFrames);
2871     }
2872 
2873     /**
2874      * Sets the loop points and the loop count. The loop can be infinite.
2875      * Similarly to setPlaybackHeadPosition,
2876      * the track must be stopped or paused for the loop points to be changed,
2877      * and must use the {@link #MODE_STATIC} mode.
2878      * @param startInFrames loop start marker expressed in frames.
2879      * Zero corresponds to start of buffer.
2880      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
2881      * @param endInFrames loop end marker expressed in frames.
2882      * The total buffer size in frames corresponds to end of buffer.
2883      * The end marker must not be greater than the buffer size in frames.
2884      * For looping, the end marker must not be less than or equal to the start marker,
2885      * but to disable looping
2886      * it is permitted for start marker, end marker, and loop count to all be 0.
2887      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
2888      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
2889      * support,
2890      * {@link #ERROR_BAD_VALUE} is returned.
2891      * The loop range is the interval [startInFrames, endInFrames).
2892      * <br>
2893      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
2894      * unless it is greater than or equal to the loop end marker, in which case
2895      * it is forced to the loop start marker.
2896      * For earlier API levels, the effect on position is unspecified.
2897      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
2898      *    A value of -1 means infinite looping, and 0 disables looping.
2899      *    A value of positive N means to "loop" (go back) N times.  For example,
2900      *    a value of one means to play the region two times in total.
2901      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2902      *    {@link #ERROR_INVALID_OPERATION}
2903      */
setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2904     public int setLoopPoints(@IntRange (from = 0) int startInFrames,
2905             @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) {
2906         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2907                 getPlayState() == PLAYSTATE_PLAYING) {
2908             return ERROR_INVALID_OPERATION;
2909         }
2910         if (loopCount == 0) {
2911             ;   // explicitly allowed as an exception to the loop region range check
2912         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
2913                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
2914             return ERROR_BAD_VALUE;
2915         }
2916         return native_set_loop(startInFrames, endInFrames, loopCount);
2917     }
2918 
2919     /**
2920      * Sets the audio presentation.
2921      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
2922      * If a multi-stream decoder (MSD) is not present, or the format does not support
2923      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
2924      * {@link #ERROR} is returned in case of any other error.
2925      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
2926      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
2927      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2928      * @throws IllegalArgumentException if the audio presentation is null.
2929      * @throws IllegalStateException if track is not initialized.
2930      */
setPresentation(@onNull AudioPresentation presentation)2931     public int setPresentation(@NonNull AudioPresentation presentation) {
2932         if (presentation == null) {
2933             throw new IllegalArgumentException("audio presentation is null");
2934         }
2935         return native_setPresentation(presentation.getPresentationId(),
2936                 presentation.getProgramId());
2937     }
2938 
2939     /**
2940      * Sets the initialization state of the instance. This method was originally intended to be used
2941      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
2942      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
2943      * @param state the state of the AudioTrack instance
2944      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
2945      */
2946     @Deprecated
setState(int state)2947     protected void setState(int state) {
2948         mState = state;
2949     }
2950 
2951 
2952     //---------------------------------------------------------
2953     // Transport control methods
2954     //--------------------
2955     /**
2956      * Starts playing an AudioTrack.
2957      * <p>
2958      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
2959      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
2960      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
2961      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
2962      * play().
2963      * <p>
2964      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
2965      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
2966      * If you don't call write() first, or if you call write() but with an insufficient amount of
2967      * data, then the track will be in underrun state at play().  In this case,
2968      * playback will not actually start playing until the data path is filled to a
2969      * device-specific minimum level.  This requirement for the path to be filled
2970      * to a minimum level is also true when resuming audio playback after calling stop().
2971      * Similarly the buffer will need to be filled up again after
2972      * the track underruns due to failure to call write() in a timely manner with sufficient data.
2973      * For portability, an application should prime the data path to the maximum allowed
2974      * by writing data until the write() method returns a short transfer count.
2975      * This allows play() to start immediately, and reduces the chance of underrun.
2976      *<p>
2977      * As of {@link android.os.Build.VERSION_CODES#S} the minimum level to start playing
2978      * can be obtained using {@link #getStartThresholdInFrames()} and set with
2979      * {@link #setStartThresholdInFrames(int)}.
2980      *
2981      * @throws IllegalStateException if the track isn't properly initialized
2982      */
play()2983     public void play()
2984     throws IllegalStateException {
2985         if (mState != STATE_INITIALIZED) {
2986             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
2987         }
2988         //FIXME use lambda to pass startImpl to superclass
2989         final int delay = getStartDelayMs();
2990         if (delay == 0) {
2991             startImpl();
2992         } else {
2993             new Thread() {
2994                 public void run() {
2995                     try {
2996                         Thread.sleep(delay);
2997                     } catch (InterruptedException e) {
2998                         e.printStackTrace();
2999                     }
3000                     baseSetStartDelayMs(0);
3001                     try {
3002                         startImpl();
3003                     } catch (IllegalStateException e) {
3004                         // fail silently for a state exception when it is happening after
3005                         // a delayed start, as the player state could have changed between the
3006                         // call to start() and the execution of startImpl()
3007                     }
3008                 }
3009             }.start();
3010         }
3011     }
3012 
startImpl()3013     private void startImpl() {
3014         synchronized (mRoutingChangeListeners) {
3015             if (!mEnableSelfRoutingMonitor) {
3016                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
3017             }
3018         }
3019         synchronized(mPlayStateLock) {
3020             baseStart(0); // unknown device at this point
3021             native_start();
3022             // FIXME see b/179218630
3023             //baseStart(native_getRoutedDeviceId());
3024             if (mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3025                 mPlayState = PLAYSTATE_STOPPING;
3026             } else {
3027                 mPlayState = PLAYSTATE_PLAYING;
3028                 mOffloadEosPending = false;
3029             }
3030         }
3031     }
3032 
3033     /**
3034      * Stops playing the audio data.
3035      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
3036      * after the last buffer that was written has been played. For an immediate stop, use
3037      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
3038      * back yet.
3039      * @throws IllegalStateException
3040      */
stop()3041     public void stop()
3042     throws IllegalStateException {
3043         if (mState != STATE_INITIALIZED) {
3044             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
3045         }
3046 
3047         // stop playing
3048         synchronized(mPlayStateLock) {
3049             native_stop();
3050             baseStop();
3051             if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) {
3052                 mPlayState = PLAYSTATE_STOPPING;
3053             } else {
3054                 mPlayState = PLAYSTATE_STOPPED;
3055                 mOffloadEosPending = false;
3056                 mAvSyncHeader = null;
3057                 mAvSyncBytesRemaining = 0;
3058                 mPlayStateLock.notify();
3059             }
3060         }
3061         tryToDisableNativeRoutingCallback();
3062     }
3063 
3064     /**
3065      * Pauses the playback of the audio data. Data that has not been played
3066      * back will not be discarded. Subsequent calls to {@link #play} will play
3067      * this data back. See {@link #flush()} to discard this data.
3068      *
3069      * @throws IllegalStateException
3070      */
pause()3071     public void pause()
3072     throws IllegalStateException {
3073         if (mState != STATE_INITIALIZED) {
3074             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
3075         }
3076 
3077         // pause playback
3078         synchronized(mPlayStateLock) {
3079             native_pause();
3080             basePause();
3081             if (mPlayState == PLAYSTATE_STOPPING) {
3082                 mPlayState = PLAYSTATE_PAUSED_STOPPING;
3083             } else {
3084                 mPlayState = PLAYSTATE_PAUSED;
3085             }
3086         }
3087     }
3088 
3089 
3090     //---------------------------------------------------------
3091     // Audio data supply
3092     //--------------------
3093 
3094     /**
3095      * Flushes the audio data currently queued for playback. Any data that has
3096      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
3097      * or if the track's creation mode is not {@link #MODE_STREAM}.
3098      * <BR> Note that although data written but not yet presented is discarded, there is no
3099      * guarantee that all of the buffer space formerly used by that data
3100      * is available for a subsequent write.
3101      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
3102      * less than or equal to the total buffer size
3103      * may return a short actual transfer count.
3104      */
flush()3105     public void flush() {
3106         if (mState == STATE_INITIALIZED) {
3107             // flush the data in native layer
3108             native_flush();
3109             mAvSyncHeader = null;
3110             mAvSyncBytesRemaining = 0;
3111         }
3112 
3113     }
3114 
3115     /**
3116      * Writes the audio data to the audio sink for playback (streaming mode),
3117      * or copies audio data for later playback (static buffer mode).
3118      * The format specified in the AudioTrack constructor should be
3119      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
3120      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
3121      * <p>
3122      * In streaming mode, the write will normally block until all the data has been enqueued for
3123      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3124      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3125      * occurs during the write, then the write may return a short transfer count.
3126      * <p>
3127      * In static buffer mode, copies the data to the buffer starting at offset 0.
3128      * Note that the actual playback of this data might occur after this function returns.
3129      *
3130      * @param audioData the array that holds the data to play.
3131      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
3132      *    starts.
3133      *    Must not be negative, or cause the data access to go out of bounds of the array.
3134      * @param sizeInBytes the number of bytes to write in audioData after the offset.
3135      *    Must not be negative, or cause the data access to go out of bounds of the array.
3136      * @return zero or the positive number of bytes that were written, or one of the following
3137      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3138      *    not to exceed sizeInBytes.
3139      * <ul>
3140      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3141      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3142      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3143      *    needs to be recreated. The dead object error code is not returned if some data was
3144      *    successfully transferred. In this case, the error is returned at the next write()</li>
3145      * <li>{@link #ERROR} in case of other error</li>
3146      * </ul>
3147      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
3148      * set to  {@link #WRITE_BLOCKING}.
3149      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)3150     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
3151         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
3152     }
3153 
3154     /**
3155      * Writes the audio data to the audio sink for playback (streaming mode),
3156      * or copies audio data for later playback (static buffer mode).
3157      * The format specified in the AudioTrack constructor should be
3158      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
3159      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
3160      * <p>
3161      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3162      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3163      * for playback, and will return a full transfer count.  However, if the write mode is
3164      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3165      * interrupts the write by calling stop or pause, or an I/O error
3166      * occurs during the write, then the write may return a short transfer count.
3167      * <p>
3168      * In static buffer mode, copies the data to the buffer starting at offset 0,
3169      * and the write mode is ignored.
3170      * Note that the actual playback of this data might occur after this function returns.
3171      *
3172      * @param audioData the array that holds the data to play.
3173      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
3174      *    starts.
3175      *    Must not be negative, or cause the data access to go out of bounds of the array.
3176      * @param sizeInBytes the number of bytes to write in audioData after the offset.
3177      *    Must not be negative, or cause the data access to go out of bounds of the array.
3178      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3179      *     effect in static mode.
3180      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3181      *         to the audio sink.
3182      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3183      *     queuing as much audio data for playback as possible without blocking.
3184      * @return zero or the positive number of bytes that were written, or one of the following
3185      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3186      *    not to exceed sizeInBytes.
3187      * <ul>
3188      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3189      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3190      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3191      *    needs to be recreated. The dead object error code is not returned if some data was
3192      *    successfully transferred. In this case, the error is returned at the next write()</li>
3193      * <li>{@link #ERROR} in case of other error</li>
3194      * </ul>
3195      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)3196     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
3197             @WriteMode int writeMode) {
3198         // Note: we allow writes of extended integers and compressed formats from a byte array.
3199         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
3200             return ERROR_INVALID_OPERATION;
3201         }
3202 
3203         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3204             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3205             return ERROR_BAD_VALUE;
3206         }
3207 
3208         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
3209                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
3210                 || (offsetInBytes + sizeInBytes > audioData.length)) {
3211             return ERROR_BAD_VALUE;
3212         }
3213 
3214         if (!blockUntilOffloadDrain(writeMode)) {
3215             return 0;
3216         }
3217 
3218         final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
3219                 writeMode == WRITE_BLOCKING);
3220 
3221         if ((mDataLoadMode == MODE_STATIC)
3222                 && (mState == STATE_NO_STATIC_DATA)
3223                 && (ret > 0)) {
3224             // benign race with respect to other APIs that read mState
3225             mState = STATE_INITIALIZED;
3226         }
3227 
3228         return ret;
3229     }
3230 
3231     /**
3232      * Writes the audio data to the audio sink for playback (streaming mode),
3233      * or copies audio data for later playback (static buffer mode).
3234      * The format specified in the AudioTrack constructor should be
3235      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3236      * <p>
3237      * In streaming mode, the write will normally block until all the data has been enqueued for
3238      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3239      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3240      * occurs during the write, then the write may return a short transfer count.
3241      * <p>
3242      * In static buffer mode, copies the data to the buffer starting at offset 0.
3243      * Note that the actual playback of this data might occur after this function returns.
3244      *
3245      * @param audioData the array that holds the data to play.
3246      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
3247      *     starts.
3248      *    Must not be negative, or cause the data access to go out of bounds of the array.
3249      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3250      *    Must not be negative, or cause the data access to go out of bounds of the array.
3251      * @return zero or the positive number of shorts that were written, or one of the following
3252      *    error codes. The number of shorts will be a multiple of the channel count not to
3253      *    exceed sizeInShorts.
3254      * <ul>
3255      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3256      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3257      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3258      *    needs to be recreated. The dead object error code is not returned if some data was
3259      *    successfully transferred. In this case, the error is returned at the next write()</li>
3260      * <li>{@link #ERROR} in case of other error</li>
3261      * </ul>
3262      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
3263      * set to  {@link #WRITE_BLOCKING}.
3264      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)3265     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
3266         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
3267     }
3268 
3269     /**
3270      * Writes the audio data to the audio sink for playback (streaming mode),
3271      * or copies audio data for later playback (static buffer mode).
3272      * The format specified in the AudioTrack constructor should be
3273      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3274      * <p>
3275      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3276      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3277      * for playback, and will return a full transfer count.  However, if the write mode is
3278      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3279      * interrupts the write by calling stop or pause, or an I/O error
3280      * occurs during the write, then the write may return a short transfer count.
3281      * <p>
3282      * In static buffer mode, copies the data to the buffer starting at offset 0.
3283      * Note that the actual playback of this data might occur after this function returns.
3284      *
3285      * @param audioData the array that holds the data to write.
3286      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
3287      *     starts.
3288      *    Must not be negative, or cause the data access to go out of bounds of the array.
3289      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3290      *    Must not be negative, or cause the data access to go out of bounds of the array.
3291      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3292      *     effect in static mode.
3293      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3294      *         to the audio sink.
3295      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3296      *     queuing as much audio data for playback as possible without blocking.
3297      * @return zero or the positive number of shorts that were written, or one of the following
3298      *    error codes. The number of shorts will be a multiple of the channel count not to
3299      *    exceed sizeInShorts.
3300      * <ul>
3301      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3302      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3303      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3304      *    needs to be recreated. The dead object error code is not returned if some data was
3305      *    successfully transferred. In this case, the error is returned at the next write()</li>
3306      * <li>{@link #ERROR} in case of other error</li>
3307      * </ul>
3308      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)3309     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
3310             @WriteMode int writeMode) {
3311 
3312         if (mState == STATE_UNINITIALIZED
3313                 || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT
3314                 // use ByteBuffer or byte[] instead for later encodings
3315                 || mAudioFormat > AudioFormat.ENCODING_LEGACY_SHORT_ARRAY_THRESHOLD) {
3316             return ERROR_INVALID_OPERATION;
3317         }
3318 
3319         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3320             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3321             return ERROR_BAD_VALUE;
3322         }
3323 
3324         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
3325                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
3326                 || (offsetInShorts + sizeInShorts > audioData.length)) {
3327             return ERROR_BAD_VALUE;
3328         }
3329 
3330         if (!blockUntilOffloadDrain(writeMode)) {
3331             return 0;
3332         }
3333 
3334         final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
3335                 writeMode == WRITE_BLOCKING);
3336 
3337         if ((mDataLoadMode == MODE_STATIC)
3338                 && (mState == STATE_NO_STATIC_DATA)
3339                 && (ret > 0)) {
3340             // benign race with respect to other APIs that read mState
3341             mState = STATE_INITIALIZED;
3342         }
3343 
3344         return ret;
3345     }
3346 
3347     /**
3348      * Writes the audio data to the audio sink for playback (streaming mode),
3349      * or copies audio data for later playback (static buffer mode).
3350      * The format specified in the AudioTrack constructor should be
3351      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
3352      * <p>
3353      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3354      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3355      * for playback, and will return a full transfer count.  However, if the write mode is
3356      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3357      * interrupts the write by calling stop or pause, or an I/O error
3358      * occurs during the write, then the write may return a short transfer count.
3359      * <p>
3360      * In static buffer mode, copies the data to the buffer starting at offset 0,
3361      * and the write mode is ignored.
3362      * Note that the actual playback of this data might occur after this function returns.
3363      *
3364      * @param audioData the array that holds the data to write.
3365      *     The implementation does not clip for sample values within the nominal range
3366      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
3367      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
3368      *     that could add energy, such as reverb.  For the convenience of applications
3369      *     that compute samples using filters with non-unity gain,
3370      *     sample values +3 dB beyond the nominal range are permitted.
3371      *     However such values may eventually be limited or clipped, depending on various gains
3372      *     and later processing in the audio path.  Therefore applications are encouraged
3373      *     to provide samples values within the nominal range.
3374      * @param offsetInFloats the offset, expressed as a number of floats,
3375      *     in audioData where the data to write starts.
3376      *    Must not be negative, or cause the data access to go out of bounds of the array.
3377      * @param sizeInFloats the number of floats to write in audioData after the offset.
3378      *    Must not be negative, or cause the data access to go out of bounds of the array.
3379      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3380      *     effect in static mode.
3381      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3382      *         to the audio sink.
3383      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3384      *     queuing as much audio data for playback as possible without blocking.
3385      * @return zero or the positive number of floats that were written, or one of the following
3386      *    error codes. The number of floats will be a multiple of the channel count not to
3387      *    exceed sizeInFloats.
3388      * <ul>
3389      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3390      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3391      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3392      *    needs to be recreated. The dead object error code is not returned if some data was
3393      *    successfully transferred. In this case, the error is returned at the next write()</li>
3394      * <li>{@link #ERROR} in case of other error</li>
3395      * </ul>
3396      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)3397     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
3398             @WriteMode int writeMode) {
3399 
3400         if (mState == STATE_UNINITIALIZED) {
3401             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3402             return ERROR_INVALID_OPERATION;
3403         }
3404 
3405         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
3406             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
3407             return ERROR_INVALID_OPERATION;
3408         }
3409 
3410         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3411             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3412             return ERROR_BAD_VALUE;
3413         }
3414 
3415         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
3416                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
3417                 || (offsetInFloats + sizeInFloats > audioData.length)) {
3418             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
3419             return ERROR_BAD_VALUE;
3420         }
3421 
3422         if (!blockUntilOffloadDrain(writeMode)) {
3423             return 0;
3424         }
3425 
3426         final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
3427                 writeMode == WRITE_BLOCKING);
3428 
3429         if ((mDataLoadMode == MODE_STATIC)
3430                 && (mState == STATE_NO_STATIC_DATA)
3431                 && (ret > 0)) {
3432             // benign race with respect to other APIs that read mState
3433             mState = STATE_INITIALIZED;
3434         }
3435 
3436         return ret;
3437     }
3438 
3439 
3440     /**
3441      * Writes the audio data to the audio sink for playback (streaming mode),
3442      * or copies audio data for later playback (static buffer mode).
3443      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
3444      * <p>
3445      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3446      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3447      * for playback, and will return a full transfer count.  However, if the write mode is
3448      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3449      * interrupts the write by calling stop or pause, or an I/O error
3450      * occurs during the write, then the write may return a short transfer count.
3451      * <p>
3452      * In static buffer mode, copies the data to the buffer starting at offset 0,
3453      * and the write mode is ignored.
3454      * Note that the actual playback of this data might occur after this function returns.
3455      *
3456      * @param audioData the buffer that holds the data to write, starting at the position reported
3457      *     by <code>audioData.position()</code>.
3458      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3459      *     have been advanced to reflect the amount of data that was successfully written to
3460      *     the AudioTrack.
3461      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3462      *     that the number of bytes requested be a multiple of the frame size (sample size in
3463      *     bytes multiplied by the channel count).
3464      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3465      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3466      *     effect in static mode.
3467      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3468      *         to the audio sink.
3469      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3470      *     queuing as much audio data for playback as possible without blocking.
3471      * @return zero or the positive number of bytes that were written, or one of the following
3472      *    error codes.
3473      * <ul>
3474      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3475      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3476      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3477      *    needs to be recreated. The dead object error code is not returned if some data was
3478      *    successfully transferred. In this case, the error is returned at the next write()</li>
3479      * <li>{@link #ERROR} in case of other error</li>
3480      * </ul>
3481      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)3482     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3483             @WriteMode int writeMode) {
3484 
3485         if (mState == STATE_UNINITIALIZED) {
3486             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3487             return ERROR_INVALID_OPERATION;
3488         }
3489 
3490         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3491             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3492             return ERROR_BAD_VALUE;
3493         }
3494 
3495         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3496             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3497             return ERROR_BAD_VALUE;
3498         }
3499 
3500         if (!blockUntilOffloadDrain(writeMode)) {
3501             return 0;
3502         }
3503 
3504         int ret = 0;
3505         if (audioData.isDirect()) {
3506             ret = native_write_native_bytes(audioData,
3507                     audioData.position(), sizeInBytes, mAudioFormat,
3508                     writeMode == WRITE_BLOCKING);
3509         } else {
3510             ret = native_write_byte(NioUtils.unsafeArray(audioData),
3511                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
3512                     sizeInBytes, mAudioFormat,
3513                     writeMode == WRITE_BLOCKING);
3514         }
3515 
3516         if ((mDataLoadMode == MODE_STATIC)
3517                 && (mState == STATE_NO_STATIC_DATA)
3518                 && (ret > 0)) {
3519             // benign race with respect to other APIs that read mState
3520             mState = STATE_INITIALIZED;
3521         }
3522 
3523         if (ret > 0) {
3524             audioData.position(audioData.position() + ret);
3525         }
3526 
3527         return ret;
3528     }
3529 
3530     /**
3531      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
3532      * The blocking behavior will depend on the write mode.
3533      * @param audioData the buffer that holds the data to write, starting at the position reported
3534      *     by <code>audioData.position()</code>.
3535      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3536      *     have been advanced to reflect the amount of data that was successfully written to
3537      *     the AudioTrack.
3538      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3539      *     that the number of bytes requested be a multiple of the frame size (sample size in
3540      *     bytes multiplied by the channel count).
3541      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3542      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
3543      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3544      *         to the audio sink.
3545      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3546      *     queuing as much audio data for playback as possible without blocking.
3547      * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the
3548      *     provided audioData.
3549      * @return zero or the positive number of bytes that were written, or one of the following
3550      *    error codes.
3551      * <ul>
3552      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3553      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3554      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3555      *    needs to be recreated. The dead object error code is not returned if some data was
3556      *    successfully transferred. In this case, the error is returned at the next write()</li>
3557      * <li>{@link #ERROR} in case of other error</li>
3558      * </ul>
3559      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)3560     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3561             @WriteMode int writeMode, long timestamp) {
3562 
3563         if (mState == STATE_UNINITIALIZED) {
3564             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3565             return ERROR_INVALID_OPERATION;
3566         }
3567 
3568         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3569             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3570             return ERROR_BAD_VALUE;
3571         }
3572 
3573         if (mDataLoadMode != MODE_STREAM) {
3574             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
3575             return ERROR_INVALID_OPERATION;
3576         }
3577 
3578         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
3579             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
3580             return write(audioData, sizeInBytes, writeMode);
3581         }
3582 
3583         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3584             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3585             return ERROR_BAD_VALUE;
3586         }
3587 
3588         if (!blockUntilOffloadDrain(writeMode)) {
3589             return 0;
3590         }
3591 
3592         // create timestamp header if none exists
3593         if (mAvSyncHeader == null) {
3594             mAvSyncHeader = ByteBuffer.allocate(mOffset);
3595             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
3596             mAvSyncHeader.putInt(0x55550002);
3597         }
3598 
3599         if (mAvSyncBytesRemaining == 0) {
3600             mAvSyncHeader.putInt(4, sizeInBytes);
3601             mAvSyncHeader.putLong(8, timestamp);
3602             mAvSyncHeader.putInt(16, mOffset);
3603             mAvSyncHeader.position(0);
3604             mAvSyncBytesRemaining = sizeInBytes;
3605         }
3606 
3607         // write timestamp header if not completely written already
3608         int ret = 0;
3609         if (mAvSyncHeader.remaining() != 0) {
3610             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
3611             if (ret < 0) {
3612                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
3613                 mAvSyncHeader = null;
3614                 mAvSyncBytesRemaining = 0;
3615                 return ret;
3616             }
3617             if (mAvSyncHeader.remaining() > 0) {
3618                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
3619                 return 0;
3620             }
3621         }
3622 
3623         // write audio data
3624         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
3625         ret = write(audioData, sizeToWrite, writeMode);
3626         if (ret < 0) {
3627             Log.e(TAG, "AudioTrack.write() could not write audio data!");
3628             mAvSyncHeader = null;
3629             mAvSyncBytesRemaining = 0;
3630             return ret;
3631         }
3632 
3633         mAvSyncBytesRemaining -= ret;
3634 
3635         return ret;
3636     }
3637 
3638 
3639     /**
3640      * Sets the playback head position within the static buffer to zero,
3641      * that is it rewinds to start of static buffer.
3642      * The track must be stopped or paused, and
3643      * the track's creation mode must be {@link #MODE_STATIC}.
3644      * <p>
3645      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
3646      * {@link #getPlaybackHeadPosition()} to zero.
3647      * For earlier API levels, the reset behavior is unspecified.
3648      * <p>
3649      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
3650      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
3651      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
3652      *  {@link #ERROR_INVALID_OPERATION}
3653      */
reloadStaticData()3654     public int reloadStaticData() {
3655         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
3656             return ERROR_INVALID_OPERATION;
3657         }
3658         return native_reload_static();
3659     }
3660 
3661     /**
3662      * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is
3663      * received if blocking write or return with 0 frames written if non blocking mode.
3664      */
blockUntilOffloadDrain(int writeMode)3665     private boolean blockUntilOffloadDrain(int writeMode) {
3666         synchronized (mPlayStateLock) {
3667             while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3668                 if (writeMode == WRITE_NON_BLOCKING) {
3669                     return false;
3670                 }
3671                 try {
3672                     mPlayStateLock.wait();
3673                 } catch (InterruptedException e) {
3674                 }
3675             }
3676             return true;
3677         }
3678     }
3679 
3680     //--------------------------------------------------------------------------
3681     // Audio effects management
3682     //--------------------
3683 
3684     /**
3685      * Attaches an auxiliary effect to the audio track. A typical auxiliary
3686      * effect is a reverberation effect which can be applied on any sound source
3687      * that directs a certain amount of its energy to this effect. This amount
3688      * is defined by setAuxEffectSendLevel().
3689      * {@see #setAuxEffectSendLevel(float)}.
3690      * <p>After creating an auxiliary effect (e.g.
3691      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
3692      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
3693      * this method to attach the audio track to the effect.
3694      * <p>To detach the effect from the audio track, call this method with a
3695      * null effect id.
3696      *
3697      * @param effectId system wide unique id of the effect to attach
3698      * @return error code or success, see {@link #SUCCESS},
3699      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
3700      */
attachAuxEffect(int effectId)3701     public int attachAuxEffect(int effectId) {
3702         if (mState == STATE_UNINITIALIZED) {
3703             return ERROR_INVALID_OPERATION;
3704         }
3705         return native_attachAuxEffect(effectId);
3706     }
3707 
3708     /**
3709      * Sets the send level of the audio track to the attached auxiliary effect
3710      * {@link #attachAuxEffect(int)}.  Effect levels
3711      * are clamped to the closed interval [0.0, max] where
3712      * max is the value of {@link #getMaxVolume}.
3713      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
3714      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
3715      * this method must be called for the effect to be applied.
3716      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
3717      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
3718      * so an appropriate conversion from linear UI input x to level is:
3719      * x == 0 -&gt; level = 0
3720      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
3721      *
3722      * @param level linear send level
3723      * @return error code or success, see {@link #SUCCESS},
3724      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
3725      */
setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3726     public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) {
3727         if (mState == STATE_UNINITIALIZED) {
3728             return ERROR_INVALID_OPERATION;
3729         }
3730         return baseSetAuxEffectSendLevel(level);
3731     }
3732 
3733     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)3734     int playerSetAuxEffectSendLevel(boolean muting, float level) {
3735         level = clampGainOrLevel(muting ? 0.0f : level);
3736         int err = native_setAuxEffectSendLevel(level);
3737         return err == 0 ? SUCCESS : ERROR;
3738     }
3739 
3740     //--------------------------------------------------------------------------
3741     // Explicit Routing
3742     //--------------------
3743     private AudioDeviceInfo mPreferredDevice = null;
3744 
3745     /**
3746      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
3747      * the output from this AudioTrack.
3748      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
3749      *  If deviceInfo is null, default routing is restored.
3750      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
3751      * does not correspond to a valid audio output device.
3752      */
3753     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)3754     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
3755         // Do some validation....
3756         if (deviceInfo != null && !deviceInfo.isSink()) {
3757             return false;
3758         }
3759         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
3760         boolean status = native_setOutputDevice(preferredDeviceId);
3761         if (status == true) {
3762             synchronized (this) {
3763                 mPreferredDevice = deviceInfo;
3764             }
3765         }
3766         return status;
3767     }
3768 
3769     /**
3770      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
3771      * is not guaranteed to correspond to the actual device being used for playback.
3772      */
3773     @Override
getPreferredDevice()3774     public AudioDeviceInfo getPreferredDevice() {
3775         synchronized (this) {
3776             return mPreferredDevice;
3777         }
3778     }
3779 
3780     /**
3781      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
3782      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3783      * <code>getRoutedDevice()</code> will return null.
3784      */
3785     @Override
getRoutedDevice()3786     public AudioDeviceInfo getRoutedDevice() {
3787         int deviceId = native_getRoutedDeviceId();
3788         if (deviceId == 0) {
3789             return null;
3790         }
3791         return AudioManager.getDeviceForPortId(deviceId, AudioManager.GET_DEVICES_OUTPUTS);
3792     }
3793 
tryToDisableNativeRoutingCallback()3794     private void tryToDisableNativeRoutingCallback() {
3795         synchronized (mRoutingChangeListeners) {
3796             if (mEnableSelfRoutingMonitor) {
3797                 mEnableSelfRoutingMonitor = false;
3798                 testDisableNativeRoutingCallbacksLocked();
3799             }
3800         }
3801     }
3802 
3803     /**
3804      * Call BEFORE adding a routing callback handler and when enabling self routing listener
3805      * @return returns true for success, false otherwise.
3806      */
3807     @GuardedBy("mRoutingChangeListeners")
testEnableNativeRoutingCallbacksLocked()3808     private boolean testEnableNativeRoutingCallbacksLocked() {
3809         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3810             try {
3811                 native_enableDeviceCallback();
3812                 return true;
3813             } catch (IllegalStateException e) {
3814                 if (Log.isLoggable(TAG, Log.DEBUG)) {
3815                     Log.d(TAG, "testEnableNativeRoutingCallbacks failed", e);
3816                 }
3817             }
3818         }
3819         return false;
3820     }
3821 
3822     /*
3823      * Call AFTER removing a routing callback handler and when disabling self routing listener.
3824      */
3825     @GuardedBy("mRoutingChangeListeners")
testDisableNativeRoutingCallbacksLocked()3826     private void testDisableNativeRoutingCallbacksLocked() {
3827         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3828             try {
3829                 native_disableDeviceCallback();
3830             } catch (IllegalStateException e) {
3831                 // Fail silently as track state could have changed in between stop
3832                 // and disabling routing callback
3833             }
3834         }
3835     }
3836 
3837     //--------------------------------------------------------------------------
3838     // (Re)Routing Info
3839     //--------------------
3840     /**
3841      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
3842      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
3843      * by an app to receive (re)routing notifications.
3844      */
3845     @GuardedBy("mRoutingChangeListeners")
3846     private ArrayMap<AudioRouting.OnRoutingChangedListener,
3847             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
3848 
3849     @GuardedBy("mRoutingChangeListeners")
3850     private boolean mEnableSelfRoutingMonitor;
3851 
3852    /**
3853     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
3854     * changes on this AudioTrack.
3855     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
3856     * notifications of rerouting events.
3857     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3858     * the callback. If <code>null</code>, the {@link Handler} associated with the main
3859     * {@link Looper} will be used.
3860     */
3861     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3862     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
3863             Handler handler) {
3864         synchronized (mRoutingChangeListeners) {
3865             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
3866                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
3867                 mRoutingChangeListeners.put(
3868                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
3869                                 handler != null ? handler : new Handler(mInitializationLooper)));
3870             }
3871         }
3872     }
3873 
3874     /**
3875      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
3876      * to receive rerouting notifications.
3877      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
3878      * to remove.
3879      */
3880     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3881     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
3882         synchronized (mRoutingChangeListeners) {
3883             if (mRoutingChangeListeners.containsKey(listener)) {
3884                 mRoutingChangeListeners.remove(listener);
3885             }
3886             testDisableNativeRoutingCallbacksLocked();
3887         }
3888     }
3889 
3890     //--------------------------------------------------------------------------
3891     // (Re)Routing Info
3892     //--------------------
3893     /**
3894      * Defines the interface by which applications can receive notifications of
3895      * routing changes for the associated {@link AudioTrack}.
3896      *
3897      * @deprecated users should switch to the general purpose
3898      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3899      */
3900     @Deprecated
3901     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
3902         /**
3903          * Called when the routing of an AudioTrack changes from either and
3904          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
3905          * retrieve the newly routed-to device.
3906          */
onRoutingChanged(AudioTrack audioTrack)3907         public void onRoutingChanged(AudioTrack audioTrack);
3908 
3909         @Override
onRoutingChanged(AudioRouting router)3910         default public void onRoutingChanged(AudioRouting router) {
3911             if (router instanceof AudioTrack) {
3912                 onRoutingChanged((AudioTrack) router);
3913             }
3914         }
3915     }
3916 
3917     /**
3918      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
3919      * on this AudioTrack.
3920      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
3921      * of rerouting events.
3922      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3923      * the callback. If <code>null</code>, the {@link Handler} associated with the main
3924      * {@link Looper} will be used.
3925      * @deprecated users should switch to the general purpose
3926      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3927      */
3928     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3929     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
3930             android.os.Handler handler) {
3931         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
3932     }
3933 
3934     /**
3935      * Removes an {@link OnRoutingChangedListener} which has been previously added
3936      * to receive rerouting notifications.
3937      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
3938      * @deprecated users should switch to the general purpose
3939      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3940      */
3941     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)3942     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
3943         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
3944     }
3945 
3946     /**
3947      * Sends device list change notification to all listeners.
3948      */
broadcastRoutingChange()3949     private void broadcastRoutingChange() {
3950         AudioManager.resetAudioPortGeneration();
3951         baseUpdateDeviceId(getRoutedDevice());
3952         synchronized (mRoutingChangeListeners) {
3953             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
3954                 delegate.notifyClient();
3955             }
3956         }
3957     }
3958 
3959     //--------------------------------------------------------------------------
3960     // Codec notifications
3961     //--------------------
3962 
3963     // OnCodecFormatChangedListener notifications uses an instance
3964     // of ListenerList to manage its listeners.
3965 
3966     private final Utils.ListenerList<AudioMetadataReadMap> mCodecFormatChangedListeners =
3967             new Utils.ListenerList();
3968 
3969     /**
3970      * Interface definition for a listener for codec format changes.
3971      */
3972     public interface OnCodecFormatChangedListener {
3973         /**
3974          * Called when the compressed codec format changes.
3975          *
3976          * @param audioTrack is the {@code AudioTrack} instance associated with the codec.
3977          * @param info is a {@link AudioMetadataReadMap} of values which contains decoded format
3978          *     changes reported by the codec.  Not all hardware
3979          *     codecs indicate codec format changes. Acceptable keys are taken from
3980          *     {@code AudioMetadata.Format.KEY_*} range, with the associated value type.
3981          */
onCodecFormatChanged( @onNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info)3982         void onCodecFormatChanged(
3983                 @NonNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info);
3984     }
3985 
3986     /**
3987      * Adds an {@link OnCodecFormatChangedListener} to receive notifications of
3988      * codec format change events on this {@code AudioTrack}.
3989      *
3990      * @param executor  Specifies the {@link Executor} object to control execution.
3991      *
3992      * @param listener The {@link OnCodecFormatChangedListener} interface to receive
3993      *     notifications of codec events.
3994      */
addOnCodecFormatChangedListener( @onNull @allbackExecutor Executor executor, @NonNull OnCodecFormatChangedListener listener)3995     public void addOnCodecFormatChangedListener(
3996             @NonNull @CallbackExecutor Executor executor,
3997             @NonNull OnCodecFormatChangedListener listener) { // NPE checks done by ListenerList.
3998         mCodecFormatChangedListeners.add(
3999                 listener, /* key for removal */
4000                 executor,
4001                 (int eventCode, AudioMetadataReadMap readMap) -> {
4002                     // eventCode is unused by this implementation.
4003                     listener.onCodecFormatChanged(this, readMap);
4004                 }
4005         );
4006     }
4007 
4008     /**
4009      * Removes an {@link OnCodecFormatChangedListener} which has been previously added
4010      * to receive codec format change events.
4011      *
4012      * @param listener The previously added {@link OnCodecFormatChangedListener} interface
4013      * to remove.
4014      */
removeOnCodecFormatChangedListener( @onNull OnCodecFormatChangedListener listener)4015     public void removeOnCodecFormatChangedListener(
4016             @NonNull OnCodecFormatChangedListener listener) {
4017         mCodecFormatChangedListeners.remove(listener);  // NPE checks done by ListenerList.
4018     }
4019 
4020     //---------------------------------------------------------
4021     // Interface definitions
4022     //--------------------
4023     /**
4024      * Interface definition for a callback to be invoked when the playback head position of
4025      * an AudioTrack has reached a notification marker or has increased by a certain period.
4026      */
4027     public interface OnPlaybackPositionUpdateListener  {
4028         /**
4029          * Called on the listener to notify it that the previously set marker has been reached
4030          * by the playback head.
4031          */
onMarkerReached(AudioTrack track)4032         void onMarkerReached(AudioTrack track);
4033 
4034         /**
4035          * Called on the listener to periodically notify it that the playback head has reached
4036          * a multiple of the notification period.
4037          */
onPeriodicNotification(AudioTrack track)4038         void onPeriodicNotification(AudioTrack track);
4039     }
4040 
4041     /**
4042      * Abstract class to receive event notifications about the stream playback in offloaded mode.
4043      * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register
4044      * the callback on the given {@link AudioTrack} instance.
4045      */
4046     public abstract static class StreamEventCallback {
4047         /**
4048          * Called when an offloaded track is no longer valid and has been discarded by the system.
4049          * An example of this happening is when an offloaded track has been paused too long, and
4050          * gets invalidated by the system to prevent any other offload.
4051          * @param track the {@link AudioTrack} on which the event happened.
4052          */
onTearDown(@onNull AudioTrack track)4053         public void onTearDown(@NonNull AudioTrack track) { }
4054         /**
4055          * Called when all the buffers of an offloaded track that were queued in the audio system
4056          * (e.g. the combination of the Android audio framework and the device's audio hardware)
4057          * have been played after {@link AudioTrack#stop()} has been called.
4058          * @param track the {@link AudioTrack} on which the event happened.
4059          */
onPresentationEnded(@onNull AudioTrack track)4060         public void onPresentationEnded(@NonNull AudioTrack track) { }
4061         /**
4062          * Called when more audio data can be written without blocking on an offloaded track.
4063          * @param track the {@link AudioTrack} on which the event happened.
4064          * @param sizeInFrames the number of frames available to write without blocking.
4065          *   Note that the frame size of a compressed stream is 1 byte.
4066          */
onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)4067         public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) {
4068         }
4069     }
4070 
4071     /**
4072      * Registers a callback for the notification of stream events.
4073      * This callback can only be registered for instances operating in offloaded mode
4074      * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and
4075      * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for
4076      * more details).
4077      * @param executor {@link Executor} to handle the callbacks.
4078      * @param eventCallback the callback to receive the stream event notifications.
4079      */
registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)4080     public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
4081             @NonNull StreamEventCallback eventCallback) {
4082         if (eventCallback == null) {
4083             throw new IllegalArgumentException("Illegal null StreamEventCallback");
4084         }
4085         if (!mOffloaded) {
4086             throw new IllegalStateException(
4087                     "Cannot register StreamEventCallback on non-offloaded AudioTrack");
4088         }
4089         if (executor == null) {
4090             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
4091         }
4092         synchronized (mStreamEventCbLock) {
4093             // check if eventCallback already in list
4094             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
4095                 if (seci.mStreamEventCb == eventCallback) {
4096                     throw new IllegalArgumentException(
4097                             "StreamEventCallback already registered");
4098                 }
4099             }
4100             beginStreamEventHandling();
4101             mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback));
4102         }
4103     }
4104 
4105     /**
4106      * Unregisters the callback for notification of stream events, previously registered
4107      * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}.
4108      * @param eventCallback the callback to unregister.
4109      */
unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)4110     public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) {
4111         if (eventCallback == null) {
4112             throw new IllegalArgumentException("Illegal null StreamEventCallback");
4113         }
4114         if (!mOffloaded) {
4115             throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack");
4116         }
4117         synchronized (mStreamEventCbLock) {
4118             StreamEventCbInfo seciToRemove = null;
4119             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
4120                 if (seci.mStreamEventCb == eventCallback) {
4121                     // ok to remove while iterating over list as we exit iteration
4122                     mStreamEventCbInfoList.remove(seci);
4123                     if (mStreamEventCbInfoList.size() == 0) {
4124                         endStreamEventHandling();
4125                     }
4126                     return;
4127                 }
4128             }
4129             throw new IllegalArgumentException("StreamEventCallback was not registered");
4130         }
4131     }
4132 
4133     //---------------------------------------------------------
4134     // Offload
4135     //--------------------
4136     private static class StreamEventCbInfo {
4137         final Executor mStreamEventExec;
4138         final StreamEventCallback mStreamEventCb;
4139 
StreamEventCbInfo(Executor e, StreamEventCallback cb)4140         StreamEventCbInfo(Executor e, StreamEventCallback cb) {
4141             mStreamEventExec = e;
4142             mStreamEventCb = cb;
4143         }
4144     }
4145 
4146     private final Object mStreamEventCbLock = new Object();
4147     @GuardedBy("mStreamEventCbLock")
4148     @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList =
4149             new LinkedList<StreamEventCbInfo>();
4150     /**
4151      * Dedicated thread for handling the StreamEvent callbacks
4152      */
4153     private @Nullable HandlerThread mStreamEventHandlerThread;
4154     private @Nullable volatile StreamEventHandler mStreamEventHandler;
4155 
4156     /**
4157      * Called from native AudioTrack callback thread, filter messages if necessary
4158      * and repost event on AudioTrack message loop to prevent blocking native thread.
4159      * @param what event code received from native
4160      * @param arg optional argument for event
4161      */
handleStreamEventFromNative(int what, int arg)4162     void handleStreamEventFromNative(int what, int arg) {
4163         if (mStreamEventHandler == null) {
4164             return;
4165         }
4166         switch (what) {
4167             case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4168                 // replace previous CAN_WRITE_MORE_DATA messages with the latest value
4169                 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA);
4170                 mStreamEventHandler.sendMessage(
4171                         mStreamEventHandler.obtainMessage(
4172                                 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/));
4173                 break;
4174             case NATIVE_EVENT_NEW_IAUDIOTRACK:
4175                 mStreamEventHandler.sendMessage(
4176                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK));
4177                 break;
4178             case NATIVE_EVENT_STREAM_END:
4179                 mStreamEventHandler.sendMessage(
4180                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END));
4181                 break;
4182         }
4183     }
4184 
4185     private class StreamEventHandler extends Handler {
4186 
StreamEventHandler(Looper looper)4187         StreamEventHandler(Looper looper) {
4188             super(looper);
4189         }
4190 
4191         @Override
handleMessage(Message msg)4192         public void handleMessage(Message msg) {
4193             final LinkedList<StreamEventCbInfo> cbInfoList;
4194             synchronized (mStreamEventCbLock) {
4195                 if (msg.what == NATIVE_EVENT_STREAM_END) {
4196                     synchronized (mPlayStateLock) {
4197                         if (mPlayState == PLAYSTATE_STOPPING) {
4198                             if (mOffloadEosPending) {
4199                                 native_start();
4200                                 mPlayState = PLAYSTATE_PLAYING;
4201                             } else {
4202                                 mAvSyncHeader = null;
4203                                 mAvSyncBytesRemaining = 0;
4204                                 mPlayState = PLAYSTATE_STOPPED;
4205                             }
4206                             mOffloadEosPending = false;
4207                             mPlayStateLock.notify();
4208                         }
4209                     }
4210                 }
4211                 if (mStreamEventCbInfoList.size() == 0) {
4212                     return;
4213                 }
4214                 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList);
4215             }
4216 
4217             final long identity = Binder.clearCallingIdentity();
4218             try {
4219                 for (StreamEventCbInfo cbi : cbInfoList) {
4220                     switch (msg.what) {
4221                         case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4222                             cbi.mStreamEventExec.execute(() ->
4223                                     cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1));
4224                             break;
4225                         case NATIVE_EVENT_NEW_IAUDIOTRACK:
4226                             // TODO also release track as it's not longer usable
4227                             cbi.mStreamEventExec.execute(() ->
4228                                     cbi.mStreamEventCb.onTearDown(AudioTrack.this));
4229                             break;
4230                         case NATIVE_EVENT_STREAM_END:
4231                             cbi.mStreamEventExec.execute(() ->
4232                                     cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this));
4233                             break;
4234                     }
4235                 }
4236             } finally {
4237                 Binder.restoreCallingIdentity(identity);
4238             }
4239         }
4240     }
4241 
4242     @GuardedBy("mStreamEventCbLock")
beginStreamEventHandling()4243     private void beginStreamEventHandling() {
4244         if (mStreamEventHandlerThread == null) {
4245             mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent");
4246             mStreamEventHandlerThread.start();
4247             final Looper looper = mStreamEventHandlerThread.getLooper();
4248             if (looper != null) {
4249                 mStreamEventHandler = new StreamEventHandler(looper);
4250             }
4251         }
4252     }
4253 
4254     @GuardedBy("mStreamEventCbLock")
endStreamEventHandling()4255     private void endStreamEventHandling() {
4256         if (mStreamEventHandlerThread != null) {
4257             mStreamEventHandlerThread.quit();
4258             mStreamEventHandlerThread = null;
4259         }
4260     }
4261 
4262     /**
4263      * Sets a {@link LogSessionId} instance to this AudioTrack for metrics collection.
4264      *
4265      * @param logSessionId a {@link LogSessionId} instance which is used to
4266      *        identify this object to the metrics service. Proper generated
4267      *        Ids must be obtained from the Java metrics service and should
4268      *        be considered opaque. Use
4269      *        {@link LogSessionId#LOG_SESSION_ID_NONE} to remove the
4270      *        logSessionId association.
4271      * @throws IllegalStateException if AudioTrack not initialized.
4272      *
4273      */
setLogSessionId(@onNull LogSessionId logSessionId)4274     public void setLogSessionId(@NonNull LogSessionId logSessionId) {
4275         Objects.requireNonNull(logSessionId);
4276         if (mState == STATE_UNINITIALIZED) {
4277             throw new IllegalStateException("track not initialized");
4278         }
4279         String stringId = logSessionId.getStringId();
4280         native_setLogSessionId(stringId);
4281         mLogSessionId = logSessionId;
4282     }
4283 
4284     /**
4285      * Returns the {@link LogSessionId}.
4286      */
4287     @NonNull
getLogSessionId()4288     public LogSessionId getLogSessionId() {
4289         return mLogSessionId;
4290     }
4291 
4292     //---------------------------------------------------------
4293     // Inner classes
4294     //--------------------
4295     /**
4296      * Helper class to handle the forwarding of native events to the appropriate listener
4297      * (potentially) handled in a different thread
4298      */
4299     private class NativePositionEventHandlerDelegate {
4300         private final Handler mHandler;
4301 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)4302         NativePositionEventHandlerDelegate(final AudioTrack track,
4303                                    final OnPlaybackPositionUpdateListener listener,
4304                                    Handler handler) {
4305             // find the looper for our new event handler
4306             Looper looper;
4307             if (handler != null) {
4308                 looper = handler.getLooper();
4309             } else {
4310                 // no given handler, use the looper the AudioTrack was created in
4311                 looper = mInitializationLooper;
4312             }
4313 
4314             // construct the event handler with this looper
4315             if (looper != null) {
4316                 // implement the event handler delegate
4317                 mHandler = new Handler(looper) {
4318                     @Override
4319                     public void handleMessage(Message msg) {
4320                         if (track == null) {
4321                             return;
4322                         }
4323                         switch(msg.what) {
4324                         case NATIVE_EVENT_MARKER:
4325                             if (listener != null) {
4326                                 listener.onMarkerReached(track);
4327                             }
4328                             break;
4329                         case NATIVE_EVENT_NEW_POS:
4330                             if (listener != null) {
4331                                 listener.onPeriodicNotification(track);
4332                             }
4333                             break;
4334                         default:
4335                             loge("Unknown native event type: " + msg.what);
4336                             break;
4337                         }
4338                     }
4339                 };
4340             } else {
4341                 mHandler = null;
4342             }
4343         }
4344 
getHandler()4345         Handler getHandler() {
4346             return mHandler;
4347         }
4348     }
4349 
4350     //---------------------------------------------------------
4351     // Methods for IPlayer interface
4352     //--------------------
4353     @Override
playerStart()4354     void playerStart() {
4355         play();
4356     }
4357 
4358     @Override
playerPause()4359     void playerPause() {
4360         pause();
4361     }
4362 
4363     @Override
playerStop()4364     void playerStop() {
4365         stop();
4366     }
4367 
4368     //---------------------------------------------------------
4369     // Java methods called from the native side
4370     //--------------------
4371     @SuppressWarnings("unused")
4372     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)4373     private static void postEventFromNative(Object audiotrack_ref,
4374             int what, int arg1, int arg2, Object obj) {
4375         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
4376         final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get();
4377         if (track == null) {
4378             return;
4379         }
4380 
4381         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
4382             track.broadcastRoutingChange();
4383             return;
4384         }
4385 
4386         if (what == NATIVE_EVENT_CODEC_FORMAT_CHANGE) {
4387             ByteBuffer buffer = (ByteBuffer) obj;
4388             buffer.order(ByteOrder.nativeOrder());
4389             buffer.rewind();
4390             AudioMetadataReadMap audioMetaData = AudioMetadata.fromByteBuffer(buffer);
4391             if (audioMetaData == null) {
4392                 Log.e(TAG, "Unable to get audio metadata from byte buffer");
4393                 return;
4394             }
4395             track.mCodecFormatChangedListeners.notify(0 /* eventCode, unused */, audioMetaData);
4396             return;
4397         }
4398 
4399         if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA
4400                 || what == NATIVE_EVENT_NEW_IAUDIOTRACK
4401                 || what == NATIVE_EVENT_STREAM_END) {
4402             track.handleStreamEventFromNative(what, arg1);
4403             return;
4404         }
4405 
4406         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
4407         if (delegate != null) {
4408             Handler handler = delegate.getHandler();
4409             if (handler != null) {
4410                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
4411                 handler.sendMessage(m);
4412             }
4413         }
4414     }
4415 
4416     //---------------------------------------------------------
4417     // Native methods called from the Java side
4418     //--------------------
4419 
native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)4420     private static native boolean native_is_direct_output_supported(int encoding, int sampleRate,
4421             int channelMask, int channelIndexMask, int contentType, int usage, int flags);
4422 
4423     // post-condition: mStreamType is overwritten with a value
4424     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
4425     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, @NonNull Parcel attributionSource, long nativeAudioTrack, boolean offload, int encapsulationMode, Object tunerConfiguration, @NonNull String opPackageName)4426     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
4427             Object /*AudioAttributes*/ attributes,
4428             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
4429             int buffSizeInBytes, int mode, int[] sessionId, @NonNull Parcel attributionSource,
4430             long nativeAudioTrack, boolean offload, int encapsulationMode,
4431             Object tunerConfiguration, @NonNull String opPackageName);
4432 
native_finalize()4433     private native final void native_finalize();
4434 
4435     /**
4436      * @hide
4437      */
4438     @UnsupportedAppUsage
native_release()4439     public native final void native_release();
4440 
native_start()4441     private native final void native_start();
4442 
native_stop()4443     private native final void native_stop();
4444 
native_pause()4445     private native final void native_pause();
4446 
native_flush()4447     private native final void native_flush();
4448 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)4449     private native final int native_write_byte(byte[] audioData,
4450                                                int offsetInBytes, int sizeInBytes, int format,
4451                                                boolean isBlocking);
4452 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)4453     private native final int native_write_short(short[] audioData,
4454                                                 int offsetInShorts, int sizeInShorts, int format,
4455                                                 boolean isBlocking);
4456 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)4457     private native final int native_write_float(float[] audioData,
4458                                                 int offsetInFloats, int sizeInFloats, int format,
4459                                                 boolean isBlocking);
4460 
native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)4461     private native final int native_write_native_bytes(ByteBuffer audioData,
4462             int positionInBytes, int sizeInBytes, int format, boolean blocking);
4463 
native_reload_static()4464     private native final int native_reload_static();
4465 
native_get_buffer_size_frames()4466     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)4467     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()4468     private native final int native_get_buffer_capacity_frames();
4469 
native_setVolume(float leftVolume, float rightVolume)4470     private native final void native_setVolume(float leftVolume, float rightVolume);
4471 
native_set_playback_rate(int sampleRateInHz)4472     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()4473     private native final int native_get_playback_rate();
4474 
native_set_playback_params(@onNull PlaybackParams params)4475     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()4476     private native final @NonNull PlaybackParams native_get_playback_params();
4477 
native_set_marker_pos(int marker)4478     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()4479     private native final int native_get_marker_pos();
4480 
native_set_pos_update_period(int updatePeriod)4481     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()4482     private native final int native_get_pos_update_period();
4483 
native_set_position(int position)4484     private native final int native_set_position(int position);
native_get_position()4485     private native final int native_get_position();
4486 
native_get_latency()4487     private native final int native_get_latency();
4488 
native_get_underrun_count()4489     private native final int native_get_underrun_count();
4490 
native_get_flags()4491     private native final int native_get_flags();
4492 
4493     // longArray must be a non-null array of length >= 2
4494     // [0] is assigned the frame position
4495     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)4496     private native final int native_get_timestamp(long[] longArray);
4497 
native_set_loop(int start, int end, int loopCount)4498     private native final int native_set_loop(int start, int end, int loopCount);
4499 
native_get_output_sample_rate(int streamType)4500     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)4501     static private native final int native_get_min_buff_size(
4502             int sampleRateInHz, int channelConfig, int audioFormat);
4503 
native_attachAuxEffect(int effectId)4504     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)4505     private native final int native_setAuxEffectSendLevel(float level);
4506 
native_setOutputDevice(int deviceId)4507     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()4508     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()4509     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()4510     private native final void native_disableDeviceCallback();
4511 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)4512     private native int native_applyVolumeShaper(
4513             @NonNull VolumeShaper.Configuration configuration,
4514             @NonNull VolumeShaper.Operation operation);
4515 
native_getVolumeShaperState(int id)4516     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
native_setPresentation(int presentationId, int programId)4517     private native final int native_setPresentation(int presentationId, int programId);
4518 
native_getPortId()4519     private native int native_getPortId();
4520 
native_set_delay_padding(int delayInFrames, int paddingInFrames)4521     private native void native_set_delay_padding(int delayInFrames, int paddingInFrames);
4522 
native_set_audio_description_mix_level_db(float level)4523     private native int native_set_audio_description_mix_level_db(float level);
native_get_audio_description_mix_level_db(float[] level)4524     private native int native_get_audio_description_mix_level_db(float[] level);
native_set_dual_mono_mode(int dualMonoMode)4525     private native int native_set_dual_mono_mode(int dualMonoMode);
native_get_dual_mono_mode(int[] dualMonoMode)4526     private native int native_get_dual_mono_mode(int[] dualMonoMode);
native_setLogSessionId(@ullable String logSessionId)4527     private native void native_setLogSessionId(@Nullable String logSessionId);
native_setStartThresholdInFrames(int startThresholdInFrames)4528     private native int native_setStartThresholdInFrames(int startThresholdInFrames);
native_getStartThresholdInFrames()4529     private native int native_getStartThresholdInFrames();
4530 
4531     /**
4532      * Sets the audio service Player Interface Id.
4533      *
4534      * The playerIId does not change over the lifetime of the client
4535      * Java AudioTrack and is set automatically on creation.
4536      *
4537      * This call informs the native AudioTrack for metrics logging purposes.
4538      *
4539      * @param id the value reported by AudioManager when registering the track.
4540      *           A value of -1 indicates invalid - the playerIId was never set.
4541      * @throws IllegalStateException if AudioTrack not initialized.
4542      */
native_setPlayerIId(int playerIId)4543     private native void native_setPlayerIId(int playerIId);
4544 
4545     //---------------------------------------------------------
4546     // Utility methods
4547     //------------------
4548 
logd(String msg)4549     private static void logd(String msg) {
4550         Log.d(TAG, msg);
4551     }
4552 
loge(String msg)4553     private static void loge(String msg) {
4554         Log.e(TAG, msg);
4555     }
4556 
4557     public final static class MetricsConstants
4558     {
MetricsConstants()4559         private MetricsConstants() {}
4560 
4561         // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors.
4562         private static final String MM_PREFIX = "android.media.audiotrack.";
4563 
4564         /**
4565          * Key to extract the stream type for this track
4566          * from the {@link AudioTrack#getMetrics} return value.
4567          * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}.
4568          * The value is a {@code String}.
4569          */
4570         public static final String STREAMTYPE = MM_PREFIX + "streamtype";
4571 
4572         /**
4573          * Key to extract the attribute content type for this track
4574          * from the {@link AudioTrack#getMetrics} return value.
4575          * The value is a {@code String}.
4576          */
4577         public static final String CONTENTTYPE = MM_PREFIX + "type";
4578 
4579         /**
4580          * Key to extract the attribute usage for this track
4581          * from the {@link AudioTrack#getMetrics} return value.
4582          * The value is a {@code String}.
4583          */
4584         public static final String USAGE = MM_PREFIX + "usage";
4585 
4586         /**
4587          * Key to extract the sample rate for this track in Hz
4588          * from the {@link AudioTrack#getMetrics} return value.
4589          * The value is an {@code int}.
4590          * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead.
4591          */
4592         @Deprecated
4593         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
4594 
4595         /**
4596          * Key to extract the native channel mask information for this track
4597          * from the {@link AudioTrack#getMetrics} return value.
4598          *
4599          * The value is a {@code long}.
4600          * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from
4601          * the returned format instead.
4602          */
4603         @Deprecated
4604         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
4605 
4606         /**
4607          * Use for testing only. Do not expose.
4608          * The current sample rate.
4609          * The value is an {@code int}.
4610          * @hide
4611          */
4612         @TestApi
4613         public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate";
4614 
4615         /**
4616          * Use for testing only. Do not expose.
4617          * The native channel mask.
4618          * The value is a {@code long}.
4619          * @hide
4620          */
4621         @TestApi
4622         public static final String CHANNEL_MASK = MM_PREFIX + "channelMask";
4623 
4624         /**
4625          * Use for testing only. Do not expose.
4626          * The output audio data encoding.
4627          * The value is a {@code String}.
4628          * @hide
4629          */
4630         @TestApi
4631         public static final String ENCODING = MM_PREFIX + "encoding";
4632 
4633         /**
4634          * Use for testing only. Do not expose.
4635          * The port id of this track port in audioserver.
4636          * The value is an {@code int}.
4637          * @hide
4638          */
4639         @TestApi
4640         public static final String PORT_ID = MM_PREFIX + "portId";
4641 
4642         /**
4643          * Use for testing only. Do not expose.
4644          * The buffer frameCount.
4645          * The value is an {@code int}.
4646          * @hide
4647          */
4648         @TestApi
4649         public static final String FRAME_COUNT = MM_PREFIX + "frameCount";
4650 
4651         /**
4652          * Use for testing only. Do not expose.
4653          * The actual track attributes used.
4654          * The value is a {@code String}.
4655          * @hide
4656          */
4657         @TestApi
4658         public static final String ATTRIBUTES = MM_PREFIX + "attributes";
4659     }
4660 }
4661