• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import android.annotation.CallbackExecutor;
20 import android.annotation.FloatRange;
21 import android.annotation.IntDef;
22 import android.annotation.IntRange;
23 import android.annotation.NonNull;
24 import android.annotation.Nullable;
25 import android.annotation.RequiresPermission;
26 import android.annotation.SystemApi;
27 import android.annotation.TestApi;
28 import android.compat.annotation.UnsupportedAppUsage;
29 import android.media.audiopolicy.AudioMix;
30 import android.media.audiopolicy.AudioMixingRule;
31 import android.media.audiopolicy.AudioPolicy;
32 import android.media.metrics.LogSessionId;
33 import android.os.Binder;
34 import android.os.Build;
35 import android.os.Handler;
36 import android.os.HandlerThread;
37 import android.os.Looper;
38 import android.os.Message;
39 import android.os.PersistableBundle;
40 import android.util.ArrayMap;
41 import android.util.Log;
42 
43 import com.android.internal.annotations.GuardedBy;
44 
45 import java.lang.annotation.Retention;
46 import java.lang.annotation.RetentionPolicy;
47 import java.lang.ref.WeakReference;
48 import java.nio.ByteBuffer;
49 import java.nio.ByteOrder;
50 import java.nio.NioUtils;
51 import java.util.HashMap;
52 import java.util.LinkedList;
53 import java.util.Objects;
54 import java.util.concurrent.Executor;
55 
56 /**
57  * The AudioTrack class manages and plays a single audio resource for Java applications.
58  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
59  * achieved by "pushing" the data to the AudioTrack object using one of the
60  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
61  *  and {@link #write(float[], int, int, int)} methods.
62  *
63  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
64  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
65  * one of the {@code write()} methods. These are blocking and return when the data has been
66  * transferred from the Java layer to the native layer and queued for playback. The streaming
67  * mode is most useful when playing blocks of audio data that for instance are:
68  *
69  * <ul>
70  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
71  *   <li>too big to fit in memory because of the characteristics of the audio data
72  *         (high sampling rate, bits per sample ...)</li>
73  *   <li>received or generated while previously queued audio is playing.</li>
74  * </ul>
75  *
76  * The static mode should be chosen when dealing with short sounds that fit in memory and
77  * that need to be played with the smallest latency possible. The static mode will
78  * therefore be preferred for UI and game sounds that are played often, and with the
79  * smallest overhead possible.
80  *
81  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
82  * The size of this buffer, specified during the construction, determines how long an AudioTrack
83  * can play before running out of data.<br>
84  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
85  * be played from it.<br>
86  * For the streaming mode, data will be written to the audio sink in chunks of
87  * sizes less than or equal to the total buffer size.
88  *
89  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
90  */
91 public class AudioTrack extends PlayerBase
92                         implements AudioRouting
93                                  , VolumeAutomation
94 {
95     //---------------------------------------------------------
96     // Constants
97     //--------------------
98     /** Minimum value for a linear gain or auxiliary effect level.
99      *  This value must be exactly equal to 0.0f; do not change it.
100      */
101     private static final float GAIN_MIN = 0.0f;
102     /** Maximum value for a linear gain or auxiliary effect level.
103      *  This value must be greater than or equal to 1.0f.
104      */
105     private static final float GAIN_MAX = 1.0f;
106 
107     /** indicates AudioTrack state is stopped */
108     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
109     /** indicates AudioTrack state is paused */
110     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
111     /** indicates AudioTrack state is playing */
112     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
113     /**
114       * @hide
115       * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to
116       * transition to PLAYSTATE_STOPPED.
117       * Only valid for offload mode.
118       */
119     private static final int PLAYSTATE_STOPPING = 4;
120     /**
121       * @hide
122       * indicates AudioTrack state is paused from stopping state. Will transition to
123       * PLAYSTATE_STOPPING if play() is called.
124       * Only valid for offload mode.
125       */
126     private static final int PLAYSTATE_PAUSED_STOPPING = 5;
127 
128     // keep these values in sync with android_media_AudioTrack.cpp
129     /**
130      * Creation mode where audio data is transferred from Java to the native layer
131      * only once before the audio starts playing.
132      */
133     public static final int MODE_STATIC = 0;
134     /**
135      * Creation mode where audio data is streamed from Java to the native layer
136      * as the audio is playing.
137      */
138     public static final int MODE_STREAM = 1;
139 
140     /** @hide */
141     @IntDef({
142         MODE_STATIC,
143         MODE_STREAM
144     })
145     @Retention(RetentionPolicy.SOURCE)
146     public @interface TransferMode {}
147 
148     /**
149      * State of an AudioTrack that was not successfully initialized upon creation.
150      */
151     public static final int STATE_UNINITIALIZED = 0;
152     /**
153      * State of an AudioTrack that is ready to be used.
154      */
155     public static final int STATE_INITIALIZED   = 1;
156     /**
157      * State of a successfully initialized AudioTrack that uses static data,
158      * but that hasn't received that data yet.
159      */
160     public static final int STATE_NO_STATIC_DATA = 2;
161 
162     /**
163      * Denotes a successful operation.
164      */
165     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
166     /**
167      * Denotes a generic operation failure.
168      */
169     public  static final int ERROR                                 = AudioSystem.ERROR;
170     /**
171      * Denotes a failure due to the use of an invalid value.
172      */
173     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
174     /**
175      * Denotes a failure due to the improper use of a method.
176      */
177     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
178     /**
179      * An error code indicating that the object reporting it is no longer valid and needs to
180      * be recreated.
181      */
182     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
183     /**
184      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
185      * or immediately after start/ACTIVE.
186      * @hide
187      */
188     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
189 
190     // Error codes:
191     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
192     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
193     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
194     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
195     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
196     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
197 
198     // Events:
199     // to keep in sync with frameworks/av/include/media/AudioTrack.h
200     // Note: To avoid collisions with other event constants,
201     // do not define an event here that is the same value as
202     // AudioSystem.NATIVE_EVENT_ROUTING_CHANGE.
203 
204     /**
205      * Event id denotes when playback head has reached a previously set marker.
206      */
207     private static final int NATIVE_EVENT_MARKER  = 3;
208     /**
209      * Event id denotes when previously set update period has elapsed during playback.
210      */
211     private static final int NATIVE_EVENT_NEW_POS = 4;
212     /**
213      * Callback for more data
214      */
215     private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9;
216     /**
217      * IAudioTrack tear down for offloaded tracks
218      * TODO: when received, java AudioTrack must be released
219      */
220     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
221     /**
222      * Event id denotes when all the buffers queued in AF and HW are played
223      * back (after stop is called) for an offloaded track.
224      */
225     private static final int NATIVE_EVENT_STREAM_END = 7;
226     /**
227      * Event id denotes when the codec format changes.
228      *
229      * Note: Similar to a device routing change (AudioSystem.NATIVE_EVENT_ROUTING_CHANGE),
230      * this event comes from the AudioFlinger Thread / Output Stream management
231      * (not from buffer indications as above).
232      */
233     private static final int NATIVE_EVENT_CODEC_FORMAT_CHANGE = 100;
234 
235     private final static String TAG = "android.media.AudioTrack";
236 
237     /** @hide */
238     @IntDef({
239         ENCAPSULATION_MODE_NONE,
240         ENCAPSULATION_MODE_ELEMENTARY_STREAM,
241         // ENCAPSULATION_MODE_HANDLE, @SystemApi
242     })
243     @Retention(RetentionPolicy.SOURCE)
244     public @interface EncapsulationMode {}
245 
246     // Important: The ENCAPSULATION_MODE values must be kept in sync with native header files.
247     /**
248      * This mode indicates no metadata encapsulation,
249      * which is the default mode for sending audio data
250      * through {@code AudioTrack}.
251      */
252     public static final int ENCAPSULATION_MODE_NONE = 0;
253     /**
254      * This mode indicates metadata encapsulation with an elementary stream payload.
255      * Both compressed and PCM format is allowed.
256      */
257     public static final int ENCAPSULATION_MODE_ELEMENTARY_STREAM = 1;
258     /**
259      * This mode indicates metadata encapsulation with a handle payload
260      * and is set through {@link Builder#setEncapsulationMode(int)}.
261      * The handle is a 64 bit long, provided by the Tuner API
262      * in {@link android.os.Build.VERSION_CODES#R}.
263      * @hide
264      */
265     @SystemApi
266     @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
267     public static final int ENCAPSULATION_MODE_HANDLE = 2;
268 
269     /* Enumeration of metadata types permitted for use by
270      * encapsulation mode audio streams.
271      */
272     /** @hide */
273     @IntDef(prefix = { "ENCAPSULATION_METADATA_TYPE_" }, value = {
274         ENCAPSULATION_METADATA_TYPE_NONE, /* reserved */
275         ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER,
276         ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR,
277     })
278     @Retention(RetentionPolicy.SOURCE)
279     public @interface EncapsulationMetadataType {}
280 
281     /**
282      * Reserved do not use.
283      * @hide
284      */
285     public static final int ENCAPSULATION_METADATA_TYPE_NONE = 0; // reserved
286 
287     /**
288      * Encapsulation metadata type for framework tuner information.
289      *
290      * Refer to the Android Media TV Tuner API for details.
291      */
292     public static final int ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER = 1;
293 
294     /**
295      * Encapsulation metadata type for DVB AD descriptor.
296      *
297      * This metadata is formatted per ETSI TS 101 154 Table E.1: AD_descriptor.
298      */
299     public static final int ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR = 2;
300 
301     /* Dual Mono handling is used when a stereo audio stream
302      * contains separate audio content on the left and right channels.
303      * Such information about the content of the stream may be found, for example, in
304      * ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
305      */
306     /** @hide */
307     @IntDef({
308         DUAL_MONO_MODE_OFF,
309         DUAL_MONO_MODE_LR,
310         DUAL_MONO_MODE_LL,
311         DUAL_MONO_MODE_RR,
312     })
313     @Retention(RetentionPolicy.SOURCE)
314     public @interface DualMonoMode {}
315     // Important: The DUAL_MONO_MODE values must be kept in sync with native header files.
316     /**
317      * This mode disables any Dual Mono presentation effect.
318      *
319      */
320     public static final int DUAL_MONO_MODE_OFF = 0;
321 
322     /**
323      * This mode indicates that a stereo stream should be presented
324      * with the left and right audio channels blended together
325      * and delivered to both channels.
326      *
327      * Behavior for non-stereo streams is implementation defined.
328      * A suggested guideline is that the left-right stereo symmetric
329      * channels are pairwise blended;
330      * the other channels such as center are left alone.
331      *
332      * The Dual Mono effect occurs before volume scaling.
333      */
334     public static final int DUAL_MONO_MODE_LR = 1;
335 
336     /**
337      * This mode indicates that a stereo stream should be presented
338      * with the left audio channel replicated into the right audio channel.
339      *
340      * Behavior for non-stereo streams is implementation defined.
341      * A suggested guideline is that all channels with left-right
342      * stereo symmetry will have the left channel position replicated
343      * into the right channel position.
344      * The center channels (with no left/right symmetry) or unbalanced
345      * channels are left alone.
346      *
347      * The Dual Mono effect occurs before volume scaling.
348      */
349     public static final int DUAL_MONO_MODE_LL = 2;
350 
351     /**
352      * This mode indicates that a stereo stream should be presented
353      * with the right audio channel replicated into the left audio channel.
354      *
355      * Behavior for non-stereo streams is implementation defined.
356      * A suggested guideline is that all channels with left-right
357      * stereo symmetry will have the right channel position replicated
358      * into the left channel position.
359      * The center channels (with no left/right symmetry) or unbalanced
360      * channels are left alone.
361      *
362      * The Dual Mono effect occurs before volume scaling.
363      */
364     public static final int DUAL_MONO_MODE_RR = 3;
365 
366     /** @hide */
367     @IntDef({
368         WRITE_BLOCKING,
369         WRITE_NON_BLOCKING
370     })
371     @Retention(RetentionPolicy.SOURCE)
372     public @interface WriteMode {}
373 
374     /**
375      * The write mode indicating the write operation will block until all data has been written,
376      * to be used as the actual value of the writeMode parameter in
377      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
378      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
379      * {@link #write(ByteBuffer, int, int, long)}.
380      */
381     public final static int WRITE_BLOCKING = 0;
382 
383     /**
384      * The write mode indicating the write operation will return immediately after
385      * queuing as much audio data for playback as possible without blocking,
386      * to be used as the actual value of the writeMode parameter in
387      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
388      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
389      * {@link #write(ByteBuffer, int, int, long)}.
390      */
391     public final static int WRITE_NON_BLOCKING = 1;
392 
393     /** @hide */
394     @IntDef({
395         PERFORMANCE_MODE_NONE,
396         PERFORMANCE_MODE_LOW_LATENCY,
397         PERFORMANCE_MODE_POWER_SAVING
398     })
399     @Retention(RetentionPolicy.SOURCE)
400     public @interface PerformanceMode {}
401 
402     /**
403      * Default performance mode for an {@link AudioTrack}.
404      */
405     public static final int PERFORMANCE_MODE_NONE = 0;
406 
407     /**
408      * Low latency performance mode for an {@link AudioTrack}.
409      * If the device supports it, this mode
410      * enables a lower latency path through to the audio output sink.
411      * Effects may no longer work with such an {@code AudioTrack} and
412      * the sample rate must match that of the output sink.
413      * <p>
414      * Applications should be aware that low latency requires careful
415      * buffer management, with smaller chunks of audio data written by each
416      * {@code write()} call.
417      * <p>
418      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
419      * {@code AudioTrack}'s actual buffer size may be too small.
420      * It is recommended that a fairly
421      * large buffer should be specified when the {@code AudioTrack} is created.
422      * Then the actual size can be reduced by calling
423      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
424      * by lowering it after each {@code write()} call until the audio glitches,
425      * which is detected by calling
426      * {@link #getUnderrunCount()}. Then the buffer size can be increased
427      * until there are no glitches.
428      * This tuning step should be done while playing silence.
429      * This technique provides a compromise between latency and glitch rate.
430      */
431     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
432 
433     /**
434      * Power saving performance mode for an {@link AudioTrack}.
435      * If the device supports it, this
436      * mode will enable a lower power path to the audio output sink.
437      * In addition, this lower power path typically will have
438      * deeper internal buffers and better underrun resistance,
439      * with a tradeoff of higher latency.
440      * <p>
441      * In this mode, applications should attempt to use a larger buffer size
442      * and deliver larger chunks of audio data per {@code write()} call.
443      * Use {@link #getBufferSizeInFrames()} to determine
444      * the actual buffer size of the {@code AudioTrack} as it may have increased
445      * to accommodate a deeper buffer.
446      */
447     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
448 
449     // keep in sync with system/media/audio/include/system/audio-base.h
450     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
451     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
452 
453     // Size of HW_AV_SYNC track AV header.
454     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
455 
456     //--------------------------------------------------------------------------
457     // Member variables
458     //--------------------
459     /**
460      * Indicates the state of the AudioTrack instance.
461      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
462      */
463     private int mState = STATE_UNINITIALIZED;
464     /**
465      * Indicates the play state of the AudioTrack instance.
466      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
467      */
468     private int mPlayState = PLAYSTATE_STOPPED;
469 
470     /**
471      * Indicates that we are expecting an end of stream callback following a call
472      * to setOffloadEndOfStream() in a gapless track transition context. The native track
473      * will be restarted automatically.
474      */
475     private boolean mOffloadEosPending = false;
476 
477     /**
478      * Lock to ensure mPlayState updates reflect the actual state of the object.
479      */
480     private final Object mPlayStateLock = new Object();
481     /**
482      * Sizes of the audio buffer.
483      * These values are set during construction and can be stale.
484      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
485      */
486     private int mNativeBufferSizeInBytes = 0;
487     private int mNativeBufferSizeInFrames = 0;
488     /**
489      * Handler for events coming from the native code.
490      */
491     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
492     /**
493      * Looper associated with the thread that creates the AudioTrack instance.
494      */
495     private final Looper mInitializationLooper;
496     /**
497      * The audio data source sampling rate in Hz.
498      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
499      */
500     private int mSampleRate; // initialized by all constructors via audioParamCheck()
501     /**
502      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
503      */
504     private int mChannelCount = 1;
505     /**
506      * The audio channel mask used for calling native AudioTrack
507      */
508     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
509 
510     /**
511      * The type of the audio stream to play. See
512      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
513      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
514      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
515      *   {@link AudioManager#STREAM_DTMF}.
516      */
517     @UnsupportedAppUsage
518     private int mStreamType = AudioManager.STREAM_MUSIC;
519 
520     /**
521      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
522      */
523     private int mDataLoadMode = MODE_STREAM;
524     /**
525      * The current channel position mask, as specified on AudioTrack creation.
526      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
527      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
528      */
529     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
530     /**
531      * The channel index mask if specified, otherwise 0.
532      */
533     private int mChannelIndexMask = 0;
534     /**
535      * The encoding of the audio samples.
536      * @see AudioFormat#ENCODING_PCM_8BIT
537      * @see AudioFormat#ENCODING_PCM_16BIT
538      * @see AudioFormat#ENCODING_PCM_FLOAT
539      */
540     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
541     /**
542      * The AudioAttributes used in configuration.
543      */
544     private AudioAttributes mConfiguredAudioAttributes;
545     /**
546      * Audio session ID
547      */
548     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
549     /**
550      * HW_AV_SYNC track AV Sync Header
551      */
552     private ByteBuffer mAvSyncHeader = null;
553     /**
554      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
555      */
556     private int mAvSyncBytesRemaining = 0;
557     /**
558      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
559      */
560     private int mOffset = 0;
561     /**
562      * Indicates whether the track is intended to play in offload mode.
563      */
564     private boolean mOffloaded = false;
565     /**
566      * When offloaded track: delay for decoder in frames
567      */
568     private int mOffloadDelayFrames = 0;
569     /**
570      * When offloaded track: padding for decoder in frames
571      */
572     private int mOffloadPaddingFrames = 0;
573 
574     /**
575      * The log session id used for metrics.
576      * {@link LogSessionId#LOG_SESSION_ID_NONE} here means it is not set.
577      */
578     @NonNull private LogSessionId mLogSessionId = LogSessionId.LOG_SESSION_ID_NONE;
579 
580     private AudioPolicy mAudioPolicy;
581 
582     //--------------------------------
583     // Used exclusively by native code
584     //--------------------
585     /**
586      * @hide
587      * Accessed by native methods: provides access to C++ AudioTrack object.
588      */
589     @SuppressWarnings("unused")
590     @UnsupportedAppUsage
591     protected long mNativeTrackInJavaObj;
592     /**
593      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
594      * the native AudioTrack object, but not stored in it).
595      */
596     @SuppressWarnings("unused")
597     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
598     private long mJniData;
599 
600 
601     //--------------------------------------------------------------------------
602     // Constructor, Finalize
603     //--------------------
604     /**
605      * Class constructor.
606      * @param streamType the type of the audio stream. See
607      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
608      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
609      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
610      * @param sampleRateInHz the initial source sample rate expressed in Hz.
611      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
612      *   which is usually the sample rate of the sink.
613      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
614      * @param channelConfig describes the configuration of the audio channels.
615      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
616      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
617      * @param audioFormat the format in which the audio data is represented.
618      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
619      *   {@link AudioFormat#ENCODING_PCM_8BIT},
620      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
621      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
622      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
623      *   <p> If the track's creation mode is {@link #MODE_STATIC},
624      *   this is the maximum length sample, or audio clip, that can be played by this instance.
625      *   <p> If the track's creation mode is {@link #MODE_STREAM},
626      *   this should be the desired buffer size
627      *   for the <code>AudioTrack</code> to satisfy the application's
628      *   latency requirements.
629      *   If <code>bufferSizeInBytes</code> is less than the
630      *   minimum buffer size for the output sink, it is increased to the minimum
631      *   buffer size.
632      *   The method {@link #getBufferSizeInFrames()} returns the
633      *   actual size in frames of the buffer created, which
634      *   determines the minimum frequency to write
635      *   to the streaming <code>AudioTrack</code> to avoid underrun.
636      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
637      *   for an AudioTrack instance in streaming mode.
638      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
639      * @throws java.lang.IllegalArgumentException
640      * @deprecated use {@link Builder} or
641      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
642      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
643      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)644     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
645             int bufferSizeInBytes, int mode)
646     throws IllegalArgumentException {
647         this(streamType, sampleRateInHz, channelConfig, audioFormat,
648                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
649     }
650 
651     /**
652      * Class constructor with audio session. Use this constructor when the AudioTrack must be
653      * attached to a particular audio session. The primary use of the audio session ID is to
654      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
655      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
656      * and media players in the same session and not to the output mix.
657      * When an AudioTrack is created without specifying a session, it will create its own session
658      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
659      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
660      * session
661      * with all other media players or audio tracks in the same session, otherwise a new session
662      * will be created for this track if none is supplied.
663      * @param streamType the type of the audio stream. See
664      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
665      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
666      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
667      * @param sampleRateInHz the initial source sample rate expressed in Hz.
668      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
669      *   which is usually the sample rate of the sink.
670      * @param channelConfig describes the configuration of the audio channels.
671      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
672      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
673      * @param audioFormat the format in which the audio data is represented.
674      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
675      *   {@link AudioFormat#ENCODING_PCM_8BIT},
676      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
677      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
678      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
679      *   <p> If the track's creation mode is {@link #MODE_STATIC},
680      *   this is the maximum length sample, or audio clip, that can be played by this instance.
681      *   <p> If the track's creation mode is {@link #MODE_STREAM},
682      *   this should be the desired buffer size
683      *   for the <code>AudioTrack</code> to satisfy the application's
684      *   latency requirements.
685      *   If <code>bufferSizeInBytes</code> is less than the
686      *   minimum buffer size for the output sink, it is increased to the minimum
687      *   buffer size.
688      *   The method {@link #getBufferSizeInFrames()} returns the
689      *   actual size in frames of the buffer created, which
690      *   determines the minimum frequency to write
691      *   to the streaming <code>AudioTrack</code> to avoid underrun.
692      *   You can write data into this buffer in smaller chunks than this size.
693      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
694      *   for an AudioTrack instance in streaming mode.
695      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
696      * @param sessionId Id of audio session the AudioTrack must be attached to
697      * @throws java.lang.IllegalArgumentException
698      * @deprecated use {@link Builder} or
699      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
700      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
701      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)702     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
703             int bufferSizeInBytes, int mode, int sessionId)
704     throws IllegalArgumentException {
705         // mState already == STATE_UNINITIALIZED
706         this((new AudioAttributes.Builder())
707                     .setLegacyStreamType(streamType)
708                     .build(),
709                 (new AudioFormat.Builder())
710                     .setChannelMask(channelConfig)
711                     .setEncoding(audioFormat)
712                     .setSampleRate(sampleRateInHz)
713                     .build(),
714                 bufferSizeInBytes,
715                 mode, sessionId);
716         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
717     }
718 
719     /**
720      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
721      * @param attributes a non-null {@link AudioAttributes} instance.
722      * @param format a non-null {@link AudioFormat} instance describing the format of the data
723      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
724      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
725      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
726      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
727      *   <p> If the track's creation mode is {@link #MODE_STATIC},
728      *   this is the maximum length sample, or audio clip, that can be played by this instance.
729      *   <p> If the track's creation mode is {@link #MODE_STREAM},
730      *   this should be the desired buffer size
731      *   for the <code>AudioTrack</code> to satisfy the application's
732      *   latency requirements.
733      *   If <code>bufferSizeInBytes</code> is less than the
734      *   minimum buffer size for the output sink, it is increased to the minimum
735      *   buffer size.
736      *   The method {@link #getBufferSizeInFrames()} returns the
737      *   actual size in frames of the buffer created, which
738      *   determines the minimum frequency to write
739      *   to the streaming <code>AudioTrack</code> to avoid underrun.
740      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
741      *   for an AudioTrack instance in streaming mode.
742      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
743      * @param sessionId ID of audio session the AudioTrack must be attached to, or
744      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
745      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
746      *   construction.
747      * @throws IllegalArgumentException
748      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)749     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
750             int mode, int sessionId)
751                     throws IllegalArgumentException {
752         this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/,
753                 ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
754     }
755 
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, @Nullable TunerConfiguration tunerConfiguration)756     private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
757             int mode, int sessionId, boolean offload, int encapsulationMode,
758             @Nullable TunerConfiguration tunerConfiguration)
759                     throws IllegalArgumentException {
760         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
761         // mState already == STATE_UNINITIALIZED
762 
763         mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
764 
765         if (format == null) {
766             throw new IllegalArgumentException("Illegal null AudioFormat");
767         }
768 
769         // Check if we should enable deep buffer mode
770         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
771             mAttributes = new AudioAttributes.Builder(mAttributes)
772                 .replaceFlags((mAttributes.getAllFlags()
773                         | AudioAttributes.FLAG_DEEP_BUFFER)
774                         & ~AudioAttributes.FLAG_LOW_LATENCY)
775                 .build();
776         }
777 
778         // remember which looper is associated with the AudioTrack instantiation
779         Looper looper;
780         if ((looper = Looper.myLooper()) == null) {
781             looper = Looper.getMainLooper();
782         }
783 
784         int rate = format.getSampleRate();
785         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
786             rate = 0;
787         }
788 
789         int channelIndexMask = 0;
790         if ((format.getPropertySetMask()
791                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
792             channelIndexMask = format.getChannelIndexMask();
793         }
794         int channelMask = 0;
795         if ((format.getPropertySetMask()
796                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
797             channelMask = format.getChannelMask();
798         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
799             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
800                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
801         }
802         int encoding = AudioFormat.ENCODING_DEFAULT;
803         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
804             encoding = format.getEncoding();
805         }
806         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
807         mOffloaded = offload;
808         mStreamType = AudioSystem.STREAM_DEFAULT;
809 
810         audioBuffSizeCheck(bufferSizeInBytes);
811 
812         mInitializationLooper = looper;
813 
814         if (sessionId < 0) {
815             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
816         }
817 
818         int[] sampleRate = new int[] {mSampleRate};
819         int[] session = new int[1];
820         session[0] = sessionId;
821         // native initialization
822         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
823                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
824                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
825                 offload, encapsulationMode, tunerConfiguration,
826                 getCurrentOpPackageName());
827         if (initResult != SUCCESS) {
828             loge("Error code "+initResult+" when initializing AudioTrack.");
829             return; // with mState == STATE_UNINITIALIZED
830         }
831 
832         mSampleRate = sampleRate[0];
833         mSessionId = session[0];
834 
835         // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
836 
837         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
838             int frameSizeInBytes;
839             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
840                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
841             } else {
842                 frameSizeInBytes = 1;
843             }
844             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
845         }
846 
847         if (mDataLoadMode == MODE_STATIC) {
848             mState = STATE_NO_STATIC_DATA;
849         } else {
850             mState = STATE_INITIALIZED;
851         }
852 
853         baseRegisterPlayer(mSessionId);
854         native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack.
855     }
856 
857     /**
858      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
859      * the AudioTrackRoutingProxy subclass.
860      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
861      * (associated with an OpenSL ES player).
862      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
863      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
864      * it means that the OpenSL player interface hasn't been realized, so there is no native
865      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
866      * OpenSLES interface is realized.
867      */
AudioTrack(long nativeTrackInJavaObj)868     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
869         super(new AudioAttributes.Builder().build(),
870                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
871         // "final"s
872         mNativeTrackInJavaObj = 0;
873         mJniData = 0;
874 
875         // remember which looper is associated with the AudioTrack instantiation
876         Looper looper;
877         if ((looper = Looper.myLooper()) == null) {
878             looper = Looper.getMainLooper();
879         }
880         mInitializationLooper = looper;
881 
882         // other initialization...
883         if (nativeTrackInJavaObj != 0) {
884             baseRegisterPlayer(AudioSystem.AUDIO_SESSION_ALLOCATE);
885             deferred_connect(nativeTrackInJavaObj);
886         } else {
887             mState = STATE_UNINITIALIZED;
888         }
889     }
890 
891     /**
892      * @hide
893      */
894     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
deferred_connect(long nativeTrackInJavaObj)895     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
896         if (mState != STATE_INITIALIZED) {
897             // Note that for this native_setup, we are providing an already created/initialized
898             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
899             int[] session = { 0 };
900             int[] rates = { 0 };
901             int initResult = native_setup(new WeakReference<AudioTrack>(this),
902                     null /*mAttributes - NA*/,
903                     rates /*sampleRate - NA*/,
904                     0 /*mChannelMask - NA*/,
905                     0 /*mChannelIndexMask - NA*/,
906                     0 /*mAudioFormat - NA*/,
907                     0 /*mNativeBufferSizeInBytes - NA*/,
908                     0 /*mDataLoadMode - NA*/,
909                     session,
910                     nativeTrackInJavaObj,
911                     false /*offload*/,
912                     ENCAPSULATION_MODE_NONE,
913                     null /* tunerConfiguration */,
914                     "" /* opPackagename */);
915             if (initResult != SUCCESS) {
916                 loge("Error code "+initResult+" when initializing AudioTrack.");
917                 return; // with mState == STATE_UNINITIALIZED
918             }
919 
920             mSessionId = session[0];
921 
922             mState = STATE_INITIALIZED;
923         }
924     }
925 
926     /**
927      * TunerConfiguration is used to convey tuner information
928      * from the android.media.tv.Tuner API to AudioTrack construction.
929      *
930      * Use the Builder to construct the TunerConfiguration object,
931      * which is then used by the {@link AudioTrack.Builder} to create an AudioTrack.
932      * @hide
933      */
934     @SystemApi
935     public static class TunerConfiguration {
936         private final int mContentId;
937         private final int mSyncId;
938 
939         /**
940          * A special content id for {@link #TunerConfiguration(int, int)}
941          * indicating audio is delivered
942          * from an {@code AudioTrack} write, not tunneled from the tuner stack.
943          */
944         public static final int CONTENT_ID_NONE = 0;
945 
946         /**
947          * Constructs a TunerConfiguration instance for use in {@link AudioTrack.Builder}
948          *
949          * @param contentId selects the audio stream to use.
950          *     The contentId may be obtained from
951          *     {@link android.media.tv.tuner.filter.Filter#getId()},
952          *     such obtained id is always a positive number.
953          *     If audio is to be delivered through an {@code AudioTrack} write
954          *     then {@code CONTENT_ID_NONE} may be used.
955          * @param syncId selects the clock to use for synchronization
956          *     of audio with other streams such as video.
957          *     The syncId may be obtained from
958          *     {@link android.media.tv.tuner.Tuner#getAvSyncHwId()}.
959          *     This is always a positive number.
960          */
961         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
TunerConfiguration( @ntRangefrom = 0) int contentId, @IntRange(from = 1)int syncId)962         public TunerConfiguration(
963                 @IntRange(from = 0) int contentId, @IntRange(from = 1)int syncId) {
964             if (contentId < 0) {
965                 throw new IllegalArgumentException(
966                         "contentId " + contentId + " must be positive or CONTENT_ID_NONE");
967             }
968             if (syncId < 1) {
969                 throw new IllegalArgumentException("syncId " + syncId + " must be positive");
970             }
971             mContentId = contentId;
972             mSyncId = syncId;
973         }
974 
975         /**
976          * Returns the contentId.
977          */
978         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getContentId()979         public @IntRange(from = 1) int getContentId() {
980             return mContentId; // The Builder ensures this is > 0.
981         }
982 
983         /**
984          * Returns the syncId.
985          */
986         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getSyncId()987         public @IntRange(from = 1) int getSyncId() {
988             return mSyncId;  // The Builder ensures this is > 0.
989         }
990     }
991 
992     /**
993      * Builder class for {@link AudioTrack} objects.
994      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
995      * attributes and audio format parameters, you indicate which of those vary from the default
996      * behavior on the device.
997      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
998      * parameters, to be used by a new <code>AudioTrack</code> instance:
999      *
1000      * <pre class="prettyprint">
1001      * AudioTrack player = new AudioTrack.Builder()
1002      *         .setAudioAttributes(new AudioAttributes.Builder()
1003      *                  .setUsage(AudioAttributes.USAGE_ALARM)
1004      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
1005      *                  .build())
1006      *         .setAudioFormat(new AudioFormat.Builder()
1007      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
1008      *                 .setSampleRate(44100)
1009      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1010      *                 .build())
1011      *         .setBufferSizeInBytes(minBuffSize)
1012      *         .build();
1013      * </pre>
1014      * <p>
1015      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
1016      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
1017      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
1018      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
1019      * {@link AudioFormat#ENCODING_PCM_16BIT}.
1020      * The sample rate will depend on the device actually selected for playback and can be queried
1021      * with {@link #getSampleRate()} method.
1022      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
1023      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
1024      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
1025      * <code>MODE_STREAM</code> will be used.
1026      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
1027      * be generated.
1028      * <br>Offload is false by default.
1029      */
1030     public static class Builder {
1031         private AudioAttributes mAttributes;
1032         private AudioFormat mFormat;
1033         private int mBufferSizeInBytes;
1034         private int mEncapsulationMode = ENCAPSULATION_MODE_NONE;
1035         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
1036         private int mMode = MODE_STREAM;
1037         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
1038         private boolean mOffload = false;
1039         private TunerConfiguration mTunerConfiguration;
1040         private int mCallRedirectionMode = AudioManager.CALL_REDIRECT_NONE;
1041 
1042         /**
1043          * Constructs a new Builder with the default values as described above.
1044          */
Builder()1045         public Builder() {
1046         }
1047 
1048         /**
1049          * Sets the {@link AudioAttributes}.
1050          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
1051          *     data to be played.
1052          * @return the same Builder instance.
1053          * @throws IllegalArgumentException
1054          */
setAudioAttributes(@onNull AudioAttributes attributes)1055         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
1056                 throws IllegalArgumentException {
1057             if (attributes == null) {
1058                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1059             }
1060             // keep reference, we only copy the data when building
1061             mAttributes = attributes;
1062             return this;
1063         }
1064 
1065         /**
1066          * Sets the format of the audio data to be played by the {@link AudioTrack}.
1067          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
1068          * as encoding, channel mask and sample rate.
1069          * @param format a non-null {@link AudioFormat} instance.
1070          * @return the same Builder instance.
1071          * @throws IllegalArgumentException
1072          */
setAudioFormat(@onNull AudioFormat format)1073         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
1074                 throws IllegalArgumentException {
1075             if (format == null) {
1076                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
1077             }
1078             // keep reference, we only copy the data when building
1079             mFormat = format;
1080             return this;
1081         }
1082 
1083         /**
1084          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
1085          * If using the {@link AudioTrack} in streaming mode
1086          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
1087          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
1088          * the estimated minimum buffer size for the creation of an AudioTrack instance
1089          * in streaming mode.
1090          * <br>If using the <code>AudioTrack</code> in static mode (see
1091          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
1092          * played by this instance.
1093          * @param bufferSizeInBytes
1094          * @return the same Builder instance.
1095          * @throws IllegalArgumentException
1096          */
setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)1097         public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes)
1098                 throws IllegalArgumentException {
1099             if (bufferSizeInBytes <= 0) {
1100                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
1101             }
1102             mBufferSizeInBytes = bufferSizeInBytes;
1103             return this;
1104         }
1105 
1106         /**
1107          * Sets the encapsulation mode.
1108          *
1109          * Encapsulation mode allows metadata to be sent together with
1110          * the audio data payload in a {@code ByteBuffer}.
1111          * This requires a compatible hardware audio codec.
1112          *
1113          * @param encapsulationMode one of {@link AudioTrack#ENCAPSULATION_MODE_NONE},
1114          *        or {@link AudioTrack#ENCAPSULATION_MODE_ELEMENTARY_STREAM}.
1115          * @return the same Builder instance.
1116          */
1117         // Note: with the correct permission {@code AudioTrack#ENCAPSULATION_MODE_HANDLE}
1118         // may be used as well.
setEncapsulationMode(@ncapsulationMode int encapsulationMode)1119         public @NonNull Builder setEncapsulationMode(@EncapsulationMode int encapsulationMode) {
1120             switch (encapsulationMode) {
1121                 case ENCAPSULATION_MODE_NONE:
1122                 case ENCAPSULATION_MODE_ELEMENTARY_STREAM:
1123                 case ENCAPSULATION_MODE_HANDLE:
1124                     mEncapsulationMode = encapsulationMode;
1125                     break;
1126                 default:
1127                     throw new IllegalArgumentException(
1128                             "Invalid encapsulation mode " + encapsulationMode);
1129             }
1130             return this;
1131         }
1132 
1133         /**
1134          * Sets the mode under which buffers of audio data are transferred from the
1135          * {@link AudioTrack} to the framework.
1136          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
1137          * @return the same Builder instance.
1138          * @throws IllegalArgumentException
1139          */
setTransferMode(@ransferMode int mode)1140         public @NonNull Builder setTransferMode(@TransferMode int mode)
1141                 throws IllegalArgumentException {
1142             switch(mode) {
1143                 case MODE_STREAM:
1144                 case MODE_STATIC:
1145                     mMode = mode;
1146                     break;
1147                 default:
1148                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
1149             }
1150             return this;
1151         }
1152 
1153         /**
1154          * Sets the session ID the {@link AudioTrack} will be attached to.
1155          * @param sessionId a strictly positive ID number retrieved from another
1156          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
1157          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
1158          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
1159          * @return the same Builder instance.
1160          * @throws IllegalArgumentException
1161          */
setSessionId(@ntRangefrom = 1) int sessionId)1162         public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId)
1163                 throws IllegalArgumentException {
1164             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
1165                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
1166             }
1167             mSessionId = sessionId;
1168             return this;
1169         }
1170 
1171         /**
1172          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
1173          * may not be supported by the particular device, and the framework is free
1174          * to ignore such request if it is incompatible with other requests or hardware.
1175          *
1176          * @param performanceMode one of
1177          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
1178          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1179          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1180          * @return the same Builder instance.
1181          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
1182          */
setPerformanceMode(@erformanceMode int performanceMode)1183         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
1184             switch (performanceMode) {
1185                 case PERFORMANCE_MODE_NONE:
1186                 case PERFORMANCE_MODE_LOW_LATENCY:
1187                 case PERFORMANCE_MODE_POWER_SAVING:
1188                     mPerformanceMode = performanceMode;
1189                     break;
1190                 default:
1191                     throw new IllegalArgumentException(
1192                             "Invalid performance mode " + performanceMode);
1193             }
1194             return this;
1195         }
1196 
1197         /**
1198          * Sets whether this track will play through the offloaded audio path.
1199          * When set to true, at build time, the audio format will be checked against
1200          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)}
1201          * to verify the audio format used by this track is supported on the device's offload
1202          * path (if any).
1203          * <br>Offload is only supported for media audio streams, and therefore requires that
1204          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
1205          * @param offload true to require the offload path for playback.
1206          * @return the same Builder instance.
1207          */
setOffloadedPlayback(boolean offload)1208         public @NonNull Builder setOffloadedPlayback(boolean offload) {
1209             mOffload = offload;
1210             return this;
1211         }
1212 
1213         /**
1214          * Sets the tuner configuration for the {@code AudioTrack}.
1215          *
1216          * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from
1217          * the Android TV tuner API which indicate the audio content stream id and the
1218          * synchronization id for the {@code AudioTrack}.
1219          *
1220          * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}.
1221          * @return the same Builder instance.
1222          * @hide
1223          */
1224         @SystemApi
1225         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
setTunerConfiguration( @onNull TunerConfiguration tunerConfiguration)1226         public @NonNull Builder setTunerConfiguration(
1227                 @NonNull TunerConfiguration tunerConfiguration) {
1228             if (tunerConfiguration == null) {
1229                 throw new IllegalArgumentException("tunerConfiguration is null");
1230             }
1231             mTunerConfiguration = tunerConfiguration;
1232             return this;
1233         }
1234 
1235         /**
1236          * Sets the tuner configuration for the {@code AudioTrack}.
1237          *
1238          * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from
1239          * the Android TV tuner API which indicate the audio content stream id and the
1240          * synchronization id for the {@code AudioTrack}.
1241          *
1242          * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}.
1243          * @return the same Builder instance.
1244          * @hide
1245          */
1246 
1247         /**
1248          * @hide
1249          * Sets the {@link AudioTrack} call redirection mode.
1250          * Used when creating an AudioTrack to inject audio to call uplink path. The mode
1251          * indicates if the call is a PSTN call or a VoIP call in which case a dynamic audio
1252          * policy is created to use this track as the source for all capture with voice
1253          * communication preset.
1254          *
1255          * @param callRedirectionMode one of
1256          * {@link AudioManager#CALL_REDIRECT_NONE},
1257          * {@link AudioManager#CALL_REDIRECT_PSTN},
1258          * or {@link AAudioManager#CALL_REDIRECT_VOIP}.
1259          * @return the same Builder instance.
1260          * @throws IllegalArgumentException if {@code callRedirectionMode} is not valid.
1261          */
setCallRedirectionMode( @udioManager.CallRedirectionMode int callRedirectionMode)1262         public @NonNull Builder setCallRedirectionMode(
1263                 @AudioManager.CallRedirectionMode int callRedirectionMode) {
1264             switch (callRedirectionMode) {
1265                 case AudioManager.CALL_REDIRECT_NONE:
1266                 case AudioManager.CALL_REDIRECT_PSTN:
1267                 case AudioManager.CALL_REDIRECT_VOIP:
1268                     mCallRedirectionMode = callRedirectionMode;
1269                     break;
1270                 default:
1271                     throw new IllegalArgumentException(
1272                             "Invalid call redirection mode " + callRedirectionMode);
1273             }
1274             return this;
1275         }
1276 
buildCallInjectionTrack()1277         private @NonNull AudioTrack buildCallInjectionTrack() {
1278             AudioMixingRule audioMixingRule = new AudioMixingRule.Builder()
1279                     .addMixRule(AudioMixingRule.RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET,
1280                             new AudioAttributes.Builder()
1281                                    .setCapturePreset(MediaRecorder.AudioSource.VOICE_COMMUNICATION)
1282                                    .setForCallRedirection()
1283                                    .build())
1284                     .setTargetMixRole(AudioMixingRule.MIX_ROLE_INJECTOR)
1285                     .build();
1286             AudioMix audioMix = new AudioMix.Builder(audioMixingRule)
1287                     .setFormat(mFormat)
1288                     .setRouteFlags(AudioMix.ROUTE_FLAG_LOOP_BACK)
1289                     .build();
1290             AudioPolicy audioPolicy =
1291                     new AudioPolicy.Builder(/*context=*/ null).addMix(audioMix).build();
1292             if (AudioManager.registerAudioPolicyStatic(audioPolicy) != 0) {
1293                 throw new UnsupportedOperationException("Error: could not register audio policy");
1294             }
1295             AudioTrack track = audioPolicy.createAudioTrackSource(audioMix);
1296             if (track == null) {
1297                 throw new UnsupportedOperationException("Cannot create injection AudioTrack");
1298             }
1299             track.unregisterAudioPolicyOnRelease(audioPolicy);
1300             return track;
1301         }
1302 
1303         /**
1304          * Builds an {@link AudioTrack} instance initialized with all the parameters set
1305          * on this <code>Builder</code>.
1306          * @return a new successfully initialized {@link AudioTrack} instance.
1307          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
1308          *     were incompatible, or if they are not supported by the device,
1309          *     or if the device was not available.
1310          */
build()1311         public @NonNull AudioTrack build() throws UnsupportedOperationException {
1312             if (mAttributes == null) {
1313                 mAttributes = new AudioAttributes.Builder()
1314                         .setUsage(AudioAttributes.USAGE_MEDIA)
1315                         .build();
1316             }
1317             switch (mPerformanceMode) {
1318             case PERFORMANCE_MODE_LOW_LATENCY:
1319                 mAttributes = new AudioAttributes.Builder(mAttributes)
1320                     .replaceFlags((mAttributes.getAllFlags()
1321                             | AudioAttributes.FLAG_LOW_LATENCY)
1322                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
1323                     .build();
1324                 break;
1325             case PERFORMANCE_MODE_NONE:
1326                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
1327                     break; // do not enable deep buffer mode.
1328                 }
1329                 // permitted to fall through to enable deep buffer
1330             case PERFORMANCE_MODE_POWER_SAVING:
1331                 mAttributes = new AudioAttributes.Builder(mAttributes)
1332                 .replaceFlags((mAttributes.getAllFlags()
1333                         | AudioAttributes.FLAG_DEEP_BUFFER)
1334                         & ~AudioAttributes.FLAG_LOW_LATENCY)
1335                 .build();
1336                 break;
1337             }
1338 
1339             if (mFormat == null) {
1340                 mFormat = new AudioFormat.Builder()
1341                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1342                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
1343                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
1344                         .build();
1345             }
1346 
1347             if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_VOIP) {
1348                 return buildCallInjectionTrack();
1349             } else if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_PSTN) {
1350                 mAttributes = new AudioAttributes.Builder(mAttributes)
1351                         .setForCallRedirection()
1352                         .build();
1353             }
1354 
1355             if (mOffload) {
1356                 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) {
1357                     throw new UnsupportedOperationException(
1358                             "Offload and low latency modes are incompatible");
1359                 }
1360                 if (AudioSystem.getDirectPlaybackSupport(mFormat, mAttributes)
1361                         == AudioSystem.DIRECT_NOT_SUPPORTED) {
1362                     throw new UnsupportedOperationException(
1363                             "Cannot create AudioTrack, offload format / attributes not supported");
1364                 }
1365             }
1366 
1367             // TODO: Check mEncapsulationMode compatibility with MODE_STATIC, etc?
1368 
1369             // If the buffer size is not specified in streaming mode,
1370             // use a single frame for the buffer size and let the
1371             // native code figure out the minimum buffer size.
1372             if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
1373                 int bytesPerSample = 1;
1374                 if (AudioFormat.isEncodingLinearFrames(mFormat.getEncoding())) {
1375                     try {
1376                         bytesPerSample = mFormat.getBytesPerSample(mFormat.getEncoding());
1377                     } catch (IllegalArgumentException e) {
1378                         // do nothing
1379                     }
1380                 }
1381                 mBufferSizeInBytes = mFormat.getChannelCount() * bytesPerSample;
1382             }
1383 
1384             try {
1385                 final AudioTrack track = new AudioTrack(
1386                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, mOffload,
1387                         mEncapsulationMode, mTunerConfiguration);
1388                 if (track.getState() == STATE_UNINITIALIZED) {
1389                     // release is not necessary
1390                     throw new UnsupportedOperationException("Cannot create AudioTrack");
1391                 }
1392                 return track;
1393             } catch (IllegalArgumentException e) {
1394                 throw new UnsupportedOperationException(e.getMessage());
1395             }
1396         }
1397     }
1398 
1399     /**
1400      * Sets an {@link AudioPolicy} to automatically unregister when the track is released.
1401      *
1402      * <p>This is to prevent users of the call audio injection API from having to manually
1403      * unregister the policy that was used to create the track.
1404      */
unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy)1405     private void unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy) {
1406         mAudioPolicy = audioPolicy;
1407     }
1408 
1409     /**
1410      * Configures the delay and padding values for the current compressed stream playing
1411      * in offload mode.
1412      * This can only be used on a track successfully initialized with
1413      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a
1414      * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed
1415      * stream corresponds to 200 decoded interleaved PCM samples.
1416      * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value
1417      *     of 0 indicates no delay is to be applied.
1418      * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0
1419      *     of 0 indicates no padding is to be applied.
1420      */
setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1421     public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames,
1422             @IntRange(from = 0) int paddingInFrames) {
1423         if (paddingInFrames < 0) {
1424             throw new IllegalArgumentException("Illegal negative padding");
1425         }
1426         if (delayInFrames < 0) {
1427             throw new IllegalArgumentException("Illegal negative delay");
1428         }
1429         if (!mOffloaded) {
1430             throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track");
1431         }
1432         if (mState == STATE_UNINITIALIZED) {
1433             throw new IllegalStateException("Uninitialized track");
1434         }
1435         mOffloadDelayFrames = delayInFrames;
1436         mOffloadPaddingFrames = paddingInFrames;
1437         native_set_delay_padding(delayInFrames, paddingInFrames);
1438     }
1439 
1440     /**
1441      * Return the decoder delay of an offloaded track, expressed in frames, previously set with
1442      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1443      * <p>This delay indicates the number of frames to be ignored at the beginning of the stream.
1444      * This value can only be queried on a track successfully initialized with
1445      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1446      * @return decoder delay expressed in frames.
1447      */
getOffloadDelay()1448     public @IntRange(from = 0) int getOffloadDelay() {
1449         if (!mOffloaded) {
1450             throw new IllegalStateException("Illegal query of delay on non-offloaded track");
1451         }
1452         if (mState == STATE_UNINITIALIZED) {
1453             throw new IllegalStateException("Illegal query of delay on uninitialized track");
1454         }
1455         return mOffloadDelayFrames;
1456     }
1457 
1458     /**
1459      * Return the decoder padding of an offloaded track, expressed in frames, previously set with
1460      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1461      * <p>This padding indicates the number of frames to be ignored at the end of the stream.
1462      * This value can only be queried on a track successfully initialized with
1463      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1464      * @return decoder padding expressed in frames.
1465      */
getOffloadPadding()1466     public @IntRange(from = 0) int getOffloadPadding() {
1467         if (!mOffloaded) {
1468             throw new IllegalStateException("Illegal query of padding on non-offloaded track");
1469         }
1470         if (mState == STATE_UNINITIALIZED) {
1471             throw new IllegalStateException("Illegal query of padding on uninitialized track");
1472         }
1473         return mOffloadPaddingFrames;
1474     }
1475 
1476     /**
1477      * Declares that the last write() operation on this track provided the last buffer of this
1478      * stream.
1479      * After the end of stream, previously set padding and delay values are ignored.
1480      * Can only be called only if the AudioTrack is opened in offload mode
1481      * {@see Builder#setOffloadedPlayback(boolean)}.
1482      * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING}
1483      * {@see #getPlayState()}.
1484      * Use this method in the same thread as any write() operation.
1485      */
setOffloadEndOfStream()1486     public void setOffloadEndOfStream() {
1487         if (!mOffloaded) {
1488             throw new IllegalStateException("EOS not supported on non-offloaded track");
1489         }
1490         if (mState == STATE_UNINITIALIZED) {
1491             throw new IllegalStateException("Uninitialized track");
1492         }
1493         if (mPlayState != PLAYSTATE_PLAYING) {
1494             throw new IllegalStateException("EOS not supported if not playing");
1495         }
1496         synchronized (mStreamEventCbLock) {
1497             if (mStreamEventCbInfoList.size() == 0) {
1498                 throw new IllegalStateException("EOS not supported without StreamEventCallback");
1499             }
1500         }
1501 
1502         synchronized (mPlayStateLock) {
1503             native_stop();
1504             mOffloadEosPending = true;
1505             mPlayState = PLAYSTATE_STOPPING;
1506         }
1507     }
1508 
1509     /**
1510      * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set
1511      * to {@code true}.
1512      * @return true if the track is using offloaded playback.
1513      */
isOffloadedPlayback()1514     public boolean isOffloadedPlayback() {
1515         return mOffloaded;
1516     }
1517 
1518     /**
1519      * Returns whether direct playback of an audio format with the provided attributes is
1520      * currently supported on the system.
1521      * <p>Direct playback means that the audio stream is not resampled or downmixed
1522      * by the framework. Checking for direct support can help the app select the representation
1523      * of audio content that most closely matches the capabilities of the device and peripherials
1524      * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded
1525      * or mixed with other streams, if needed.
1526      * <p>Also note that this query only provides information about the support of an audio format.
1527      * It does not indicate whether the resources necessary for the playback are available
1528      * at that instant.
1529      * @param format a non-null {@link AudioFormat} instance describing the format of
1530      *   the audio data.
1531      * @param attributes a non-null {@link AudioAttributes} instance.
1532      * @return true if the given audio format can be played directly.
1533      * @deprecated Use {@link AudioManager#getDirectPlaybackSupport(AudioFormat, AudioAttributes)}
1534      *             instead.
1535      */
1536     @Deprecated
isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1537     public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format,
1538             @NonNull AudioAttributes attributes) {
1539         if (format == null) {
1540             throw new IllegalArgumentException("Illegal null AudioFormat argument");
1541         }
1542         if (attributes == null) {
1543             throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1544         }
1545         return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(),
1546                 format.getChannelMask(), format.getChannelIndexMask(),
1547                 attributes.getContentType(), attributes.getUsage(), attributes.getFlags());
1548     }
1549 
1550     /*
1551      * The MAX_LEVEL should be exactly representable by an IEEE 754-2008 base32 float.
1552      * This means fractions must be divisible by a power of 2. For example,
1553      * 10.25f is OK as 0.25 is 1/4, but 10.1f is NOT OK as 1/10 is not expressable by
1554      * a finite binary fraction.
1555      *
1556      * 48.f is the nominal max for API level {@link android os.Build.VERSION_CODES#R}.
1557      * We use this to suggest a baseline range for implementation.
1558      *
1559      * The API contract specification allows increasing this value in a future
1560      * API release, but not decreasing this value.
1561      */
1562     private static final float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f;
1563 
isValidAudioDescriptionMixLevel(float level)1564     private static boolean isValidAudioDescriptionMixLevel(float level) {
1565         return !(Float.isNaN(level) || level > MAX_AUDIO_DESCRIPTION_MIX_LEVEL);
1566     }
1567 
1568     /**
1569      * Sets the Audio Description mix level in dB.
1570      *
1571      * For AudioTracks incorporating a secondary Audio Description stream
1572      * (where such contents may be sent through an Encapsulation Mode
1573      * other than {@link #ENCAPSULATION_MODE_NONE}).
1574      * or internally by a HW channel),
1575      * the level of mixing of the Audio Description to the Main Audio stream
1576      * is controlled by this method.
1577      *
1578      * Such mixing occurs <strong>prior</strong> to overall volume scaling.
1579      *
1580      * @param level a floating point value between
1581      *     {@code Float.NEGATIVE_INFINITY} to {@code +48.f},
1582      *     where {@code Float.NEGATIVE_INFINITY} means the Audio Description is not mixed
1583      *     and a level of {@code 0.f} means the Audio Description is mixed without scaling.
1584      * @return true on success, false on failure.
1585      */
setAudioDescriptionMixLeveldB( @loatRangeto = 48.f, toInclusive = true) float level)1586     public boolean setAudioDescriptionMixLeveldB(
1587             @FloatRange(to = 48.f, toInclusive = true) float level) {
1588         if (!isValidAudioDescriptionMixLevel(level)) {
1589             throw new IllegalArgumentException("level is out of range" + level);
1590         }
1591         return native_set_audio_description_mix_level_db(level) == SUCCESS;
1592     }
1593 
1594     /**
1595      * Returns the Audio Description mix level in dB.
1596      *
1597      * If Audio Description mixing is unavailable from the hardware device,
1598      * a value of {@code Float.NEGATIVE_INFINITY} is returned.
1599      *
1600      * @return the current Audio Description Mix Level in dB.
1601      *     A value of {@code Float.NEGATIVE_INFINITY} means
1602      *     that the audio description is not mixed or
1603      *     the hardware is not available.
1604      *     This should reflect the <strong>true</strong> internal device mix level;
1605      *     hence the application might receive any floating value
1606      *     except {@code Float.NaN}.
1607      */
getAudioDescriptionMixLeveldB()1608     public float getAudioDescriptionMixLeveldB() {
1609         float[] level = { Float.NEGATIVE_INFINITY };
1610         try {
1611             final int status = native_get_audio_description_mix_level_db(level);
1612             if (status != SUCCESS || Float.isNaN(level[0])) {
1613                 return Float.NEGATIVE_INFINITY;
1614             }
1615         } catch (Exception e) {
1616             return Float.NEGATIVE_INFINITY;
1617         }
1618         return level[0];
1619     }
1620 
isValidDualMonoMode(@ualMonoMode int dualMonoMode)1621     private static boolean isValidDualMonoMode(@DualMonoMode int dualMonoMode) {
1622         switch (dualMonoMode) {
1623             case DUAL_MONO_MODE_OFF:
1624             case DUAL_MONO_MODE_LR:
1625             case DUAL_MONO_MODE_LL:
1626             case DUAL_MONO_MODE_RR:
1627                 return true;
1628             default:
1629                 return false;
1630         }
1631     }
1632 
1633     /**
1634      * Sets the Dual Mono mode presentation on the output device.
1635      *
1636      * The Dual Mono mode is generally applied to stereo audio streams
1637      * where the left and right channels come from separate sources.
1638      *
1639      * For compressed audio, where the decoding is done in hardware,
1640      * Dual Mono presentation needs to be performed
1641      * by the hardware output device
1642      * as the PCM audio is not available to the framework.
1643      *
1644      * @param dualMonoMode one of {@link #DUAL_MONO_MODE_OFF},
1645      *     {@link #DUAL_MONO_MODE_LR},
1646      *     {@link #DUAL_MONO_MODE_LL},
1647      *     {@link #DUAL_MONO_MODE_RR}.
1648      *
1649      * @return true on success, false on failure if the output device
1650      *     does not support Dual Mono mode.
1651      */
setDualMonoMode(@ualMonoMode int dualMonoMode)1652     public boolean setDualMonoMode(@DualMonoMode int dualMonoMode) {
1653         if (!isValidDualMonoMode(dualMonoMode)) {
1654             throw new IllegalArgumentException(
1655                     "Invalid Dual Mono mode " + dualMonoMode);
1656         }
1657         return native_set_dual_mono_mode(dualMonoMode) == SUCCESS;
1658     }
1659 
1660     /**
1661      * Returns the Dual Mono mode presentation setting.
1662      *
1663      * If no Dual Mono presentation is available for the output device,
1664      * then {@link #DUAL_MONO_MODE_OFF} is returned.
1665      *
1666      * @return one of {@link #DUAL_MONO_MODE_OFF},
1667      *     {@link #DUAL_MONO_MODE_LR},
1668      *     {@link #DUAL_MONO_MODE_LL},
1669      *     {@link #DUAL_MONO_MODE_RR}.
1670      */
getDualMonoMode()1671     public @DualMonoMode int getDualMonoMode() {
1672         int[] dualMonoMode = { DUAL_MONO_MODE_OFF };
1673         try {
1674             final int status = native_get_dual_mono_mode(dualMonoMode);
1675             if (status != SUCCESS || !isValidDualMonoMode(dualMonoMode[0])) {
1676                 return DUAL_MONO_MODE_OFF;
1677             }
1678         } catch (Exception e) {
1679             return DUAL_MONO_MODE_OFF;
1680         }
1681         return dualMonoMode[0];
1682     }
1683 
1684     // mask of all the positional channels supported, however the allowed combinations
1685     // are further restricted by the matching left/right rule and
1686     // AudioSystem.OUT_CHANNEL_COUNT_MAX
1687     private static final int SUPPORTED_OUT_CHANNELS =
1688             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
1689             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
1690             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
1691             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
1692             AudioFormat.CHANNEL_OUT_BACK_LEFT |
1693             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
1694             AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER |
1695             AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER |
1696             AudioFormat.CHANNEL_OUT_BACK_CENTER |
1697             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
1698             AudioFormat.CHANNEL_OUT_SIDE_RIGHT |
1699             AudioFormat.CHANNEL_OUT_TOP_CENTER |
1700             AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT |
1701             AudioFormat.CHANNEL_OUT_TOP_FRONT_CENTER |
1702             AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT |
1703             AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT |
1704             AudioFormat.CHANNEL_OUT_TOP_BACK_CENTER |
1705             AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT |
1706             AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT |
1707             AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT |
1708             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT |
1709             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_CENTER |
1710             AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT |
1711             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY_2 |
1712             AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT |
1713             AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT;
1714 
1715     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
1716     // power saving to be automatically enabled for an AudioTrack. Returns false if
1717     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1718     private static boolean shouldEnablePowerSaving(
1719             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
1720             int bufferSizeInBytes, int mode) {
1721         // If no attributes, OK
1722         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
1723         // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include
1724         // FLAG_DEEP_BUFFER because if set the request is explicit and
1725         // shouldEnablePowerSaving() should return false.
1726         final int flags = attributes.getAllFlags()
1727                 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY
1728                     | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON);
1729 
1730         if (attributes != null &&
1731                 (flags != 0  // cannot have any special flags
1732                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
1733                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
1734                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
1735                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
1736             return false;
1737         }
1738 
1739         // Format must be fully specified and be linear pcm
1740         if (format == null
1741                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
1742                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
1743                 || !AudioFormat.isValidEncoding(format.getEncoding())
1744                 || format.getChannelCount() < 1) {
1745             return false;
1746         }
1747 
1748         // Mode must be streaming
1749         if (mode != MODE_STREAM) {
1750             return false;
1751         }
1752 
1753         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
1754         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
1755         if (bufferSizeInBytes != 0) {
1756             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
1757             final int MILLIS_PER_SECOND = 1000;
1758             final long bufferTargetSize =
1759                     BUFFER_TARGET_MODE_STREAM_MS
1760                     * format.getChannelCount()
1761                     * format.getBytesPerSample(format.getEncoding())
1762                     * format.getSampleRate()
1763                     / MILLIS_PER_SECOND;
1764             if (bufferSizeInBytes < bufferTargetSize) {
1765                 return false;
1766             }
1767         }
1768 
1769         return true;
1770     }
1771 
1772     // Convenience method for the constructor's parameter checks.
1773     // This is where constructor IllegalArgumentException-s are thrown
1774     // postconditions:
1775     //    mChannelCount is valid
1776     //    mChannelMask is valid
1777     //    mAudioFormat is valid
1778     //    mSampleRate is valid
1779     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1780     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1781                                  int audioFormat, int mode) {
1782         //--------------
1783         // sample rate, note these values are subject to change
1784         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1785                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1786                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1787             throw new IllegalArgumentException(sampleRateInHz
1788                     + "Hz is not a supported sample rate.");
1789         }
1790         mSampleRate = sampleRateInHz;
1791 
1792         if (audioFormat == AudioFormat.ENCODING_IEC61937
1793                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO
1794                 && AudioFormat.channelCountFromOutChannelMask(channelConfig) != 8) {
1795             Log.w(TAG, "ENCODING_IEC61937 is configured with channel mask as " + channelConfig
1796                     + ", which is not 2 or 8 channels");
1797         }
1798 
1799         //--------------
1800         // channel config
1801         mChannelConfiguration = channelConfig;
1802 
1803         switch (channelConfig) {
1804         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1805         case AudioFormat.CHANNEL_OUT_MONO:
1806         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1807             mChannelCount = 1;
1808             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1809             break;
1810         case AudioFormat.CHANNEL_OUT_STEREO:
1811         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1812             mChannelCount = 2;
1813             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1814             break;
1815         default:
1816             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1817                 mChannelCount = 0;
1818                 break; // channel index configuration only
1819             }
1820             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
1821                 throw new IllegalArgumentException(
1822                         "Unsupported channel mask configuration " + channelConfig
1823                         + " for encoding " + audioFormat);
1824             }
1825             mChannelMask = channelConfig;
1826             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1827         }
1828         // check the channel index configuration (if present)
1829         mChannelIndexMask = channelIndexMask;
1830         if (mChannelIndexMask != 0) {
1831             // As of S, we accept up to 24 channel index mask.
1832             final int fullIndexMask = (1 << AudioSystem.FCC_24) - 1;
1833             final int channelIndexCount = Integer.bitCount(channelIndexMask);
1834             final boolean accepted = (channelIndexMask & ~fullIndexMask) == 0
1835                     && (!AudioFormat.isEncodingLinearFrames(audioFormat)  // compressed OK
1836                             || channelIndexCount <= AudioSystem.OUT_CHANNEL_COUNT_MAX); // PCM
1837             if (!accepted) {
1838                 throw new IllegalArgumentException(
1839                         "Unsupported channel index mask configuration " + channelIndexMask
1840                         + " for encoding " + audioFormat);
1841             }
1842             if (mChannelCount == 0) {
1843                  mChannelCount = channelIndexCount;
1844             } else if (mChannelCount != channelIndexCount) {
1845                 throw new IllegalArgumentException("Channel count must match");
1846             }
1847         }
1848 
1849         //--------------
1850         // audio format
1851         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1852             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1853         }
1854 
1855         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1856             throw new IllegalArgumentException("Unsupported audio encoding.");
1857         }
1858         mAudioFormat = audioFormat;
1859 
1860         //--------------
1861         // audio load mode
1862         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1863                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1864             throw new IllegalArgumentException("Invalid mode.");
1865         }
1866         mDataLoadMode = mode;
1867     }
1868 
1869     // General pair map
1870     private static final HashMap<String, Integer> CHANNEL_PAIR_MAP = new HashMap<>() {{
1871         put("front", AudioFormat.CHANNEL_OUT_FRONT_LEFT
1872                 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT);
1873         put("back", AudioFormat.CHANNEL_OUT_BACK_LEFT
1874                 | AudioFormat.CHANNEL_OUT_BACK_RIGHT);
1875         put("front of center", AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER
1876                 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
1877         put("side", AudioFormat.CHANNEL_OUT_SIDE_LEFT
1878                 | AudioFormat.CHANNEL_OUT_SIDE_RIGHT);
1879         put("top front", AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT
1880                 | AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT);
1881         put("top back", AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT
1882                 | AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT);
1883         put("top side", AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT
1884                 | AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT);
1885         put("bottom front", AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT
1886                 | AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT);
1887         put("front wide", AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT
1888                 | AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT);
1889     }};
1890 
1891     /**
1892      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1893      * @param channelConfig the mask to validate
1894      * @return false if the AudioTrack can't be used with such a mask
1895      */
isMultichannelConfigSupported(int channelConfig, int encoding)1896     private static boolean isMultichannelConfigSupported(int channelConfig, int encoding) {
1897         // check for unsupported channels
1898         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1899             loge("Channel configuration features unsupported channels");
1900             return false;
1901         }
1902         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1903         final int channelCountLimit;
1904         try {
1905             channelCountLimit = AudioFormat.isEncodingLinearFrames(encoding)
1906                     ? AudioSystem.OUT_CHANNEL_COUNT_MAX  // PCM limited to OUT_CHANNEL_COUNT_MAX
1907                     : AudioSystem.FCC_24;                // Compressed limited to 24 channels
1908         } catch (IllegalArgumentException iae) {
1909             loge("Unsupported encoding " + iae);
1910             return false;
1911         }
1912         if (channelCount > channelCountLimit) {
1913             loge("Channel configuration contains too many channels for encoding "
1914                     + encoding + "(" + channelCount + " > " + channelCountLimit + ")");
1915             return false;
1916         }
1917         // check for unsupported multichannel combinations:
1918         // - FL/FR must be present
1919         // - L/R channels must be paired (e.g. no single L channel)
1920         final int frontPair =
1921                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1922         if ((channelConfig & frontPair) != frontPair) {
1923                 loge("Front channels must be present in multichannel configurations");
1924                 return false;
1925         }
1926         // Check all pairs to see that they are matched (front duplicated here).
1927         for (HashMap.Entry<String, Integer> e : CHANNEL_PAIR_MAP.entrySet()) {
1928             final int positionPair = e.getValue();
1929             if ((channelConfig & positionPair) != 0
1930                     && (channelConfig & positionPair) != positionPair) {
1931                 loge("Channel pair (" + e.getKey() + ") cannot be used independently");
1932                 return false;
1933             }
1934         }
1935         return true;
1936     }
1937 
1938 
1939     // Convenience method for the constructor's audio buffer size check.
1940     // preconditions:
1941     //    mChannelCount is valid
1942     //    mAudioFormat is valid
1943     // postcondition:
1944     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)1945     private void audioBuffSizeCheck(int audioBufferSize) {
1946         // NB: this section is only valid with PCM or IEC61937 data.
1947         //     To update when supporting compressed formats
1948         int frameSizeInBytes;
1949         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
1950             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
1951         } else {
1952             frameSizeInBytes = 1;
1953         }
1954         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
1955             throw new IllegalArgumentException("Invalid audio buffer size.");
1956         }
1957 
1958         mNativeBufferSizeInBytes = audioBufferSize;
1959         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
1960     }
1961 
1962 
1963     /**
1964      * Releases the native AudioTrack resources.
1965      */
release()1966     public void release() {
1967         synchronized (mStreamEventCbLock){
1968             endStreamEventHandling();
1969         }
1970         // even though native_release() stops the native AudioTrack, we need to stop
1971         // AudioTrack subclasses too.
1972         try {
1973             stop();
1974         } catch(IllegalStateException ise) {
1975             // don't raise an exception, we're releasing the resources.
1976         }
1977         if (mAudioPolicy != null) {
1978             AudioManager.unregisterAudioPolicyAsyncStatic(mAudioPolicy);
1979             mAudioPolicy = null;
1980         }
1981 
1982         baseRelease();
1983         native_release();
1984         synchronized (mPlayStateLock) {
1985             mState = STATE_UNINITIALIZED;
1986             mPlayState = PLAYSTATE_STOPPED;
1987             mPlayStateLock.notify();
1988         }
1989     }
1990 
1991     @Override
finalize()1992     protected void finalize() {
1993         tryToDisableNativeRoutingCallback();
1994         baseRelease();
1995         native_finalize();
1996     }
1997 
1998     //--------------------------------------------------------------------------
1999     // Getters
2000     //--------------------
2001     /**
2002      * Returns the minimum gain value, which is the constant 0.0.
2003      * Gain values less than 0.0 will be clamped to 0.0.
2004      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2005      * @return the minimum value, which is the constant 0.0.
2006      */
getMinVolume()2007     static public float getMinVolume() {
2008         return GAIN_MIN;
2009     }
2010 
2011     /**
2012      * Returns the maximum gain value, which is greater than or equal to 1.0.
2013      * Gain values greater than the maximum will be clamped to the maximum.
2014      * <p>The word "volume" in the API name is historical; this is actually a gain.
2015      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
2016      * corresponds to a gain of 0 dB (sample values left unmodified).
2017      * @return the maximum value, which is greater than or equal to 1.0.
2018      */
getMaxVolume()2019     static public float getMaxVolume() {
2020         return GAIN_MAX;
2021     }
2022 
2023     /**
2024      * Returns the configured audio source sample rate in Hz.
2025      * The initial source sample rate depends on the constructor parameters,
2026      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
2027      * If the constructor had a specific sample rate, then the initial sink sample rate is that
2028      * value.
2029      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
2030      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
2031      */
getSampleRate()2032     public int getSampleRate() {
2033         return mSampleRate;
2034     }
2035 
2036     /**
2037      * Returns the current playback sample rate rate in Hz.
2038      */
getPlaybackRate()2039     public int getPlaybackRate() {
2040         return native_get_playback_rate();
2041     }
2042 
2043     /**
2044      * Returns the current playback parameters.
2045      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
2046      * @return current {@link PlaybackParams}.
2047      * @throws IllegalStateException if track is not initialized.
2048      */
getPlaybackParams()2049     public @NonNull PlaybackParams getPlaybackParams() {
2050         return native_get_playback_params();
2051     }
2052 
2053     /**
2054      * Returns the {@link AudioAttributes} used in configuration.
2055      * If a {@code streamType} is used instead of an {@code AudioAttributes}
2056      * to configure the AudioTrack
2057      * (the use of {@code streamType} for configuration is deprecated),
2058      * then the {@code AudioAttributes}
2059      * equivalent to the {@code streamType} is returned.
2060      * @return The {@code AudioAttributes} used to configure the AudioTrack.
2061      * @throws IllegalStateException If the track is not initialized.
2062      */
getAudioAttributes()2063     public @NonNull AudioAttributes getAudioAttributes() {
2064         if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) {
2065             throw new IllegalStateException("track not initialized");
2066         }
2067         return mConfiguredAudioAttributes;
2068     }
2069 
2070     /**
2071      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
2072      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2073      */
getAudioFormat()2074     public int getAudioFormat() {
2075         return mAudioFormat;
2076     }
2077 
2078     /**
2079      * Returns the volume stream type of this AudioTrack.
2080      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
2081      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
2082      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
2083      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
2084      * {@link AudioManager#STREAM_ACCESSIBILITY}.
2085      */
getStreamType()2086     public int getStreamType() {
2087         return mStreamType;
2088     }
2089 
2090     /**
2091      * Returns the configured channel position mask.
2092      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
2093      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
2094      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
2095      * a channel index mask was used. Consider
2096      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
2097      * which contains both the channel position mask and the channel index mask.
2098      */
getChannelConfiguration()2099     public int getChannelConfiguration() {
2100         return mChannelConfiguration;
2101     }
2102 
2103     /**
2104      * Returns the configured <code>AudioTrack</code> format.
2105      * @return an {@link AudioFormat} containing the
2106      * <code>AudioTrack</code> parameters at the time of configuration.
2107      */
getFormat()2108     public @NonNull AudioFormat getFormat() {
2109         AudioFormat.Builder builder = new AudioFormat.Builder()
2110             .setSampleRate(mSampleRate)
2111             .setEncoding(mAudioFormat);
2112         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
2113             builder.setChannelMask(mChannelConfiguration);
2114         }
2115         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
2116             builder.setChannelIndexMask(mChannelIndexMask);
2117         }
2118         return builder.build();
2119     }
2120 
2121     /**
2122      * Returns the configured number of channels.
2123      */
getChannelCount()2124     public int getChannelCount() {
2125         return mChannelCount;
2126     }
2127 
2128     /**
2129      * Returns the state of the AudioTrack instance. This is useful after the
2130      * AudioTrack instance has been created to check if it was initialized
2131      * properly. This ensures that the appropriate resources have been acquired.
2132      * @see #STATE_UNINITIALIZED
2133      * @see #STATE_INITIALIZED
2134      * @see #STATE_NO_STATIC_DATA
2135      */
getState()2136     public int getState() {
2137         return mState;
2138     }
2139 
2140     /**
2141      * Returns the playback state of the AudioTrack instance.
2142      * @see #PLAYSTATE_STOPPED
2143      * @see #PLAYSTATE_PAUSED
2144      * @see #PLAYSTATE_PLAYING
2145      */
getPlayState()2146     public int getPlayState() {
2147         synchronized (mPlayStateLock) {
2148             switch (mPlayState) {
2149                 case PLAYSTATE_STOPPING:
2150                     return PLAYSTATE_PLAYING;
2151                 case PLAYSTATE_PAUSED_STOPPING:
2152                     return PLAYSTATE_PAUSED;
2153                 default:
2154                     return mPlayState;
2155             }
2156         }
2157     }
2158 
2159 
2160     /**
2161      * Returns the effective size of the <code>AudioTrack</code> buffer
2162      * that the application writes to.
2163      * <p> This will be less than or equal to the result of
2164      * {@link #getBufferCapacityInFrames()}.
2165      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
2166      * <p> If the track is subsequently routed to a different output sink, the buffer
2167      * size and capacity may enlarge to accommodate.
2168      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2169      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2170      * the size of the <code>AudioTrack</code> buffer in bytes.
2171      * <p> See also {@link AudioManager#getProperty(String)} for key
2172      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2173      * @return current size in frames of the <code>AudioTrack</code> buffer.
2174      * @throws IllegalStateException if track is not initialized.
2175      */
getBufferSizeInFrames()2176     public @IntRange (from = 0) int getBufferSizeInFrames() {
2177         return native_get_buffer_size_frames();
2178     }
2179 
2180     /**
2181      * Limits the effective size of the <code>AudioTrack</code> buffer
2182      * that the application writes to.
2183      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
2184      * If a blocking write is used then the write will block until the data
2185      * can fit within this limit.
2186      * <p>Changing this limit modifies the latency associated with
2187      * the buffer for this track. A smaller size will give lower latency
2188      * but there may be more glitches due to buffer underruns.
2189      * <p>The actual size used may not be equal to this requested size.
2190      * It will be limited to a valid range with a maximum of
2191      * {@link #getBufferCapacityInFrames()}.
2192      * It may also be adjusted slightly for internal reasons.
2193      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
2194      * will be returned.
2195      * <p>This method is supported for PCM audio at all API levels.
2196      * Compressed audio is supported in API levels 33 and above.
2197      * For compressed streams the size of a frame is considered to be exactly one byte.
2198      *
2199      * @param bufferSizeInFrames requested buffer size in frames
2200      * @return the actual buffer size in frames or an error code,
2201      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2202      * @throws IllegalStateException if track is not initialized.
2203      */
setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)2204     public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) {
2205         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
2206             return ERROR_INVALID_OPERATION;
2207         }
2208         if (bufferSizeInFrames < 0) {
2209             return ERROR_BAD_VALUE;
2210         }
2211         return native_set_buffer_size_frames(bufferSizeInFrames);
2212     }
2213 
2214     /**
2215      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
2216      *  <p> If the track's creation mode is {@link #MODE_STATIC},
2217      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
2218      *  A static track's frame count will not change.
2219      *  <p> If the track's creation mode is {@link #MODE_STREAM},
2220      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
2221      *  For streaming tracks, this value may be rounded up to a larger value if needed by
2222      *  the target output sink, and
2223      *  if the track is subsequently routed to a different output sink, the
2224      *  frame count may enlarge to accommodate.
2225      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2226      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2227      *  the size of the <code>AudioTrack</code> buffer in bytes.
2228      *  <p> See also {@link AudioManager#getProperty(String)} for key
2229      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2230      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
2231      *  @throws IllegalStateException if track is not initialized.
2232      */
getBufferCapacityInFrames()2233     public @IntRange (from = 0) int getBufferCapacityInFrames() {
2234         return native_get_buffer_capacity_frames();
2235     }
2236 
2237     /**
2238      * Sets the streaming start threshold for an <code>AudioTrack</code>.
2239      * <p> The streaming start threshold is the buffer level that the written audio
2240      * data must reach for audio streaming to start after {@link #play()} is called.
2241      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2242      *
2243      * @param startThresholdInFrames the desired start threshold.
2244      * @return the actual start threshold in frames value. This is
2245      *         an integer between 1 to the buffer capacity
2246      *         (see {@link #getBufferCapacityInFrames()}),
2247      *         and might change if the output sink changes after track creation.
2248      * @throws IllegalStateException if the track is not initialized or the
2249      *         track transfer mode is not {@link #MODE_STREAM}.
2250      * @throws IllegalArgumentException if startThresholdInFrames is not positive.
2251      * @see #getStartThresholdInFrames()
2252      */
setStartThresholdInFrames( @ntRange from = 1) int startThresholdInFrames)2253     public @IntRange(from = 1) int setStartThresholdInFrames(
2254             @IntRange (from = 1) int startThresholdInFrames) {
2255         if (mState != STATE_INITIALIZED) {
2256             throw new IllegalStateException("AudioTrack is not initialized");
2257         }
2258         if (mDataLoadMode != MODE_STREAM) {
2259             throw new IllegalStateException("AudioTrack must be a streaming track");
2260         }
2261         if (startThresholdInFrames < 1) {
2262             throw new IllegalArgumentException("startThresholdInFrames "
2263                     + startThresholdInFrames + " must be positive");
2264         }
2265         return native_setStartThresholdInFrames(startThresholdInFrames);
2266     }
2267 
2268     /**
2269      * Returns the streaming start threshold of the <code>AudioTrack</code>.
2270      * <p> The streaming start threshold is the buffer level that the written audio
2271      * data must reach for audio streaming to start after {@link #play()} is called.
2272      * When an <code>AudioTrack</code> is created, the streaming start threshold
2273      * is the buffer capacity in frames. If the buffer size in frames is reduced
2274      * by {@link #setBufferSizeInFrames(int)} to a value smaller than the start threshold
2275      * then that value will be used instead for the streaming start threshold.
2276      * <p> For compressed streams, the size of a frame is considered to be exactly one byte.
2277      *
2278      * @return the current start threshold in frames value. This is
2279      *         an integer between 1 to the buffer capacity
2280      *         (see {@link #getBufferCapacityInFrames()}),
2281      *         and might change if the  output sink changes after track creation.
2282      * @throws IllegalStateException if the track is not initialized or the
2283      *         track is not {@link #MODE_STREAM}.
2284      * @see #setStartThresholdInFrames(int)
2285      */
getStartThresholdInFrames()2286     public @IntRange (from = 1) int getStartThresholdInFrames() {
2287         if (mState != STATE_INITIALIZED) {
2288             throw new IllegalStateException("AudioTrack is not initialized");
2289         }
2290         if (mDataLoadMode != MODE_STREAM) {
2291             throw new IllegalStateException("AudioTrack must be a streaming track");
2292         }
2293         return native_getStartThresholdInFrames();
2294     }
2295 
2296     /**
2297      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
2298      *  @return current size in frames of the <code>AudioTrack</code> buffer.
2299      *  @throws IllegalStateException
2300      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
2301      */
2302     @Deprecated
getNativeFrameCount()2303     protected int getNativeFrameCount() {
2304         return native_get_buffer_capacity_frames();
2305     }
2306 
2307     /**
2308      * Returns marker position expressed in frames.
2309      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
2310      * or zero if marker is disabled.
2311      */
getNotificationMarkerPosition()2312     public int getNotificationMarkerPosition() {
2313         return native_get_marker_pos();
2314     }
2315 
2316     /**
2317      * Returns the notification update period expressed in frames.
2318      * Zero means that no position update notifications are being delivered.
2319      */
getPositionNotificationPeriod()2320     public int getPositionNotificationPeriod() {
2321         return native_get_pos_update_period();
2322     }
2323 
2324     /**
2325      * Returns the playback head position expressed in frames.
2326      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
2327      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
2328      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
2329      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
2330      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
2331      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
2332      * the total number of frames played since reset,
2333      * <i>not</i> the current offset within the buffer.
2334      */
getPlaybackHeadPosition()2335     public int getPlaybackHeadPosition() {
2336         return native_get_position();
2337     }
2338 
2339     /**
2340      * Returns this track's estimated latency in milliseconds. This includes the latency due
2341      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
2342      *
2343      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
2344      * a better solution.
2345      * @hide
2346      */
2347     @UnsupportedAppUsage(trackingBug = 130237544)
getLatency()2348     public int getLatency() {
2349         return native_get_latency();
2350     }
2351 
2352     /**
2353      * Returns the number of underrun occurrences in the application-level write buffer
2354      * since the AudioTrack was created.
2355      * An underrun occurs if the application does not write audio
2356      * data quickly enough, causing the buffer to underflow
2357      * and a potential audio glitch or pop.
2358      * <p>
2359      * Underruns are less likely when buffer sizes are large.
2360      * It may be possible to eliminate underruns by recreating the AudioTrack with
2361      * a larger buffer.
2362      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
2363      * effective size of the buffer.
2364      */
getUnderrunCount()2365     public int getUnderrunCount() {
2366         return native_get_underrun_count();
2367     }
2368 
2369     /**
2370      * Returns the current performance mode of the {@link AudioTrack}.
2371      *
2372      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
2373      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
2374      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
2375      * Use {@link AudioTrack.Builder#setPerformanceMode}
2376      * in the {@link AudioTrack.Builder} to enable a performance mode.
2377      * @throws IllegalStateException if track is not initialized.
2378      */
getPerformanceMode()2379     public @PerformanceMode int getPerformanceMode() {
2380         final int flags = native_get_flags();
2381         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
2382             return PERFORMANCE_MODE_LOW_LATENCY;
2383         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
2384             return PERFORMANCE_MODE_POWER_SAVING;
2385         } else {
2386             return PERFORMANCE_MODE_NONE;
2387         }
2388     }
2389 
2390     /**
2391      *  Returns the output sample rate in Hz for the specified stream type.
2392      */
getNativeOutputSampleRate(int streamType)2393     static public int getNativeOutputSampleRate(int streamType) {
2394         return native_get_output_sample_rate(streamType);
2395     }
2396 
2397     /**
2398      * Returns the estimated minimum buffer size required for an AudioTrack
2399      * object to be created in the {@link #MODE_STREAM} mode.
2400      * The size is an estimate because it does not consider either the route or the sink,
2401      * since neither is known yet.  Note that this size doesn't
2402      * guarantee a smooth playback under load, and higher values should be chosen according to
2403      * the expected frequency at which the buffer will be refilled with additional data to play.
2404      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
2405      * to a higher value than the initial source sample rate, be sure to configure the buffer size
2406      * based on the highest planned sample rate.
2407      * @param sampleRateInHz the source sample rate expressed in Hz.
2408      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
2409      * @param channelConfig describes the configuration of the audio channels.
2410      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
2411      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
2412      * @param audioFormat the format in which the audio data is represented.
2413      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
2414      *   {@link AudioFormat#ENCODING_PCM_8BIT},
2415      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2416      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
2417      *   or {@link #ERROR} if unable to query for output properties,
2418      *   or the minimum buffer size expressed in bytes.
2419      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)2420     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
2421         int channelCount = 0;
2422         switch(channelConfig) {
2423         case AudioFormat.CHANNEL_OUT_MONO:
2424         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
2425             channelCount = 1;
2426             break;
2427         case AudioFormat.CHANNEL_OUT_STEREO:
2428         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
2429             channelCount = 2;
2430             break;
2431         default:
2432             if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
2433                 loge("getMinBufferSize(): Invalid channel configuration.");
2434                 return ERROR_BAD_VALUE;
2435             } else {
2436                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
2437             }
2438         }
2439 
2440         if (!AudioFormat.isPublicEncoding(audioFormat)) {
2441             loge("getMinBufferSize(): Invalid audio format.");
2442             return ERROR_BAD_VALUE;
2443         }
2444 
2445         // sample rate, note these values are subject to change
2446         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
2447         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
2448                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
2449             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
2450             return ERROR_BAD_VALUE;
2451         }
2452 
2453         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
2454         if (size <= 0) {
2455             loge("getMinBufferSize(): error querying hardware");
2456             return ERROR;
2457         }
2458         else {
2459             return size;
2460         }
2461     }
2462 
2463     /**
2464      * Returns the audio session ID.
2465      *
2466      * @return the ID of the audio session this AudioTrack belongs to.
2467      */
getAudioSessionId()2468     public int getAudioSessionId() {
2469         return mSessionId;
2470     }
2471 
2472    /**
2473     * Poll for a timestamp on demand.
2474     * <p>
2475     * If you need to track timestamps during initial warmup or after a routing or mode change,
2476     * you should request a new timestamp periodically until the reported timestamps
2477     * show that the frame position is advancing, or until it becomes clear that
2478     * timestamps are unavailable for this route.
2479     * <p>
2480     * After the clock is advancing at a stable rate,
2481     * query for a new timestamp approximately once every 10 seconds to once per minute.
2482     * Calling this method more often is inefficient.
2483     * It is also counter-productive to call this method more often than recommended,
2484     * because the short-term differences between successive timestamp reports are not meaningful.
2485     * If you need a high-resolution mapping between frame position and presentation time,
2486     * consider implementing that at application level, based on low-resolution timestamps.
2487     * <p>
2488     * The audio data at the returned position may either already have been
2489     * presented, or may have not yet been presented but is committed to be presented.
2490     * It is not possible to request the time corresponding to a particular position,
2491     * or to request the (fractional) position corresponding to a particular time.
2492     * If you need such features, consider implementing them at application level.
2493     *
2494     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2495     *        and owned by caller.
2496     * @return true if a timestamp is available, or false if no timestamp is available.
2497     *         If a timestamp is available,
2498     *         the AudioTimestamp instance is filled in with a position in frame units, together
2499     *         with the estimated time when that frame was presented or is committed to
2500     *         be presented.
2501     *         In the case that no timestamp is available, any supplied instance is left unaltered.
2502     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
2503     *         or during and immediately after a route change.
2504     *         A timestamp is permanently unavailable for a given route if the route does not support
2505     *         timestamps.  In this case, the approximate frame position can be obtained
2506     *         using {@link #getPlaybackHeadPosition}.
2507     *         However, it may be useful to continue to query for
2508     *         timestamps occasionally, to recover after a route change.
2509     */
2510     // Add this text when the "on new timestamp" API is added:
2511     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)2512     public boolean getTimestamp(AudioTimestamp timestamp)
2513     {
2514         if (timestamp == null) {
2515             throw new IllegalArgumentException();
2516         }
2517         // It's unfortunate, but we have to either create garbage every time or use synchronized
2518         long[] longArray = new long[2];
2519         int ret = native_get_timestamp(longArray);
2520         if (ret != SUCCESS) {
2521             return false;
2522         }
2523         timestamp.framePosition = longArray[0];
2524         timestamp.nanoTime = longArray[1];
2525         return true;
2526     }
2527 
2528     /**
2529      * Poll for a timestamp on demand.
2530      * <p>
2531      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
2532      *
2533      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2534      *        and owned by caller.
2535      * @return {@link #SUCCESS} if a timestamp is available
2536      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
2537      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
2538      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
2539      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
2540      *         for the timestamp.
2541      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2542      *         needs to be recreated.
2543      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
2544      *         timestamps. In this case, the approximate frame position can be obtained
2545      *         using {@link #getPlaybackHeadPosition}.
2546      *
2547      *         The AudioTimestamp instance is filled in with a position in frame units, together
2548      *         with the estimated time when that frame was presented or is committed to
2549      *         be presented.
2550      * @hide
2551      */
2552      // Add this text when the "on new timestamp" API is added:
2553      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)2554      public int getTimestampWithStatus(AudioTimestamp timestamp)
2555      {
2556          if (timestamp == null) {
2557              throw new IllegalArgumentException();
2558          }
2559          // It's unfortunate, but we have to either create garbage every time or use synchronized
2560          long[] longArray = new long[2];
2561          int ret = native_get_timestamp(longArray);
2562          timestamp.framePosition = longArray[0];
2563          timestamp.nanoTime = longArray[1];
2564          return ret;
2565      }
2566 
2567     /**
2568      *  Return Metrics data about the current AudioTrack instance.
2569      *
2570      * @return a {@link PersistableBundle} containing the set of attributes and values
2571      * available for the media being handled by this instance of AudioTrack
2572      * The attributes are descibed in {@link MetricsConstants}.
2573      *
2574      * Additional vendor-specific fields may also be present in
2575      * the return value.
2576      */
getMetrics()2577     public PersistableBundle getMetrics() {
2578         PersistableBundle bundle = native_getMetrics();
2579         return bundle;
2580     }
2581 
native_getMetrics()2582     private native PersistableBundle native_getMetrics();
2583 
2584     //--------------------------------------------------------------------------
2585     // Initialization / configuration
2586     //--------------------
2587     /**
2588      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2589      * for each periodic playback head position update.
2590      * Notifications will be received in the same thread as the one in which the AudioTrack
2591      * instance was created.
2592      * @param listener
2593      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)2594     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
2595         setPlaybackPositionUpdateListener(listener, null);
2596     }
2597 
2598     /**
2599      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2600      * for each periodic playback head position update.
2601      * Use this method to receive AudioTrack events in the Handler associated with another
2602      * thread than the one in which you created the AudioTrack instance.
2603      * @param listener
2604      * @param handler the Handler that will receive the event notification messages.
2605      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)2606     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
2607                                                     Handler handler) {
2608         if (listener != null) {
2609             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
2610         } else {
2611             mEventHandlerDelegate = null;
2612         }
2613     }
2614 
2615 
clampGainOrLevel(float gainOrLevel)2616     private static float clampGainOrLevel(float gainOrLevel) {
2617         if (Float.isNaN(gainOrLevel)) {
2618             throw new IllegalArgumentException();
2619         }
2620         if (gainOrLevel < GAIN_MIN) {
2621             gainOrLevel = GAIN_MIN;
2622         } else if (gainOrLevel > GAIN_MAX) {
2623             gainOrLevel = GAIN_MAX;
2624         }
2625         return gainOrLevel;
2626     }
2627 
2628 
2629      /**
2630      * Sets the specified left and right output gain values on the AudioTrack.
2631      * <p>Gain values are clamped to the closed interval [0.0, max] where
2632      * max is the value of {@link #getMaxVolume}.
2633      * A value of 0.0 results in zero gain (silence), and
2634      * a value of 1.0 means unity gain (signal unchanged).
2635      * The default value is 1.0 meaning unity gain.
2636      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2637      * @param leftGain output gain for the left channel.
2638      * @param rightGain output gain for the right channel
2639      * @return error code or success, see {@link #SUCCESS},
2640      *    {@link #ERROR_INVALID_OPERATION}
2641      * @deprecated Applications should use {@link #setVolume} instead, as it
2642      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2643      */
2644     @Deprecated
setStereoVolume(float leftGain, float rightGain)2645     public int setStereoVolume(float leftGain, float rightGain) {
2646         if (mState == STATE_UNINITIALIZED) {
2647             return ERROR_INVALID_OPERATION;
2648         }
2649 
2650         baseSetVolume(leftGain, rightGain);
2651         return SUCCESS;
2652     }
2653 
2654     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)2655     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
2656         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
2657         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
2658 
2659         native_setVolume(leftVolume, rightVolume);
2660     }
2661 
2662 
2663     /**
2664      * Sets the specified output gain value on all channels of this track.
2665      * <p>Gain values are clamped to the closed interval [0.0, max] where
2666      * max is the value of {@link #getMaxVolume}.
2667      * A value of 0.0 results in zero gain (silence), and
2668      * a value of 1.0 means unity gain (signal unchanged).
2669      * The default value is 1.0 meaning unity gain.
2670      * <p>This API is preferred over {@link #setStereoVolume}, as it
2671      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2672      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2673      * @param gain output gain for all channels.
2674      * @return error code or success, see {@link #SUCCESS},
2675      *    {@link #ERROR_INVALID_OPERATION}
2676      */
setVolume(float gain)2677     public int setVolume(float gain) {
2678         return setStereoVolume(gain, gain);
2679     }
2680 
2681     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2682     /* package */ int playerApplyVolumeShaper(
2683             @NonNull VolumeShaper.Configuration configuration,
2684             @NonNull VolumeShaper.Operation operation) {
2685         return native_applyVolumeShaper(configuration, operation);
2686     }
2687 
2688     @Override
playerGetVolumeShaperState(int id)2689     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
2690         return native_getVolumeShaperState(id);
2691     }
2692 
2693     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2694     public @NonNull VolumeShaper createVolumeShaper(
2695             @NonNull VolumeShaper.Configuration configuration) {
2696         return new VolumeShaper(configuration, this);
2697     }
2698 
2699     /**
2700      * Sets the playback sample rate for this track. This sets the sampling rate at which
2701      * the audio data will be consumed and played back
2702      * (as set by the sampleRateInHz parameter in the
2703      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
2704      * not the original sampling rate of the
2705      * content. For example, setting it to half the sample rate of the content will cause the
2706      * playback to last twice as long, but will also result in a pitch shift down by one octave.
2707      * The valid sample rate range is from 1 Hz to twice the value returned by
2708      * {@link #getNativeOutputSampleRate(int)}.
2709      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
2710      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
2711      * for playback of content of differing sample rate,
2712      * but with identical encoding and channel mask.
2713      * @param sampleRateInHz the sample rate expressed in Hz
2714      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2715      *    {@link #ERROR_INVALID_OPERATION}
2716      */
setPlaybackRate(int sampleRateInHz)2717     public int setPlaybackRate(int sampleRateInHz) {
2718         if (mState != STATE_INITIALIZED) {
2719             return ERROR_INVALID_OPERATION;
2720         }
2721         if (sampleRateInHz <= 0) {
2722             return ERROR_BAD_VALUE;
2723         }
2724         return native_set_playback_rate(sampleRateInHz);
2725     }
2726 
2727 
2728     /**
2729      * Sets the playback parameters.
2730      * This method returns failure if it cannot apply the playback parameters.
2731      * One possible cause is that the parameters for speed or pitch are out of range.
2732      * Another possible cause is that the <code>AudioTrack</code> is streaming
2733      * (see {@link #MODE_STREAM}) and the
2734      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
2735      * on configuration must be larger than the speed multiplied by the minimum size
2736      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
2737      * @param params see {@link PlaybackParams}. In particular,
2738      * speed, pitch, and audio mode should be set.
2739      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
2740      * @throws IllegalStateException if track is not initialized.
2741      */
setPlaybackParams(@onNull PlaybackParams params)2742     public void setPlaybackParams(@NonNull PlaybackParams params) {
2743         if (params == null) {
2744             throw new IllegalArgumentException("params is null");
2745         }
2746         native_set_playback_params(params);
2747     }
2748 
2749 
2750     /**
2751      * Sets the position of the notification marker.  At most one marker can be active.
2752      * @param markerInFrames marker position in wrapping frame units similar to
2753      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
2754      * To set a marker at a position which would appear as zero due to wraparound,
2755      * a workaround is to use a non-zero position near zero, such as -1 or 1.
2756      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2757      *  {@link #ERROR_INVALID_OPERATION}
2758      */
setNotificationMarkerPosition(int markerInFrames)2759     public int setNotificationMarkerPosition(int markerInFrames) {
2760         if (mState == STATE_UNINITIALIZED) {
2761             return ERROR_INVALID_OPERATION;
2762         }
2763         return native_set_marker_pos(markerInFrames);
2764     }
2765 
2766 
2767     /**
2768      * Sets the period for the periodic notification event.
2769      * @param periodInFrames update period expressed in frames.
2770      * Zero period means no position updates.  A negative period is not allowed.
2771      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
2772      */
setPositionNotificationPeriod(int periodInFrames)2773     public int setPositionNotificationPeriod(int periodInFrames) {
2774         if (mState == STATE_UNINITIALIZED) {
2775             return ERROR_INVALID_OPERATION;
2776         }
2777         return native_set_pos_update_period(periodInFrames);
2778     }
2779 
2780 
2781     /**
2782      * Sets the playback head position within the static buffer.
2783      * The track must be stopped or paused for the position to be changed,
2784      * and must use the {@link #MODE_STATIC} mode.
2785      * @param positionInFrames playback head position within buffer, expressed in frames.
2786      * Zero corresponds to start of buffer.
2787      * The position must not be greater than the buffer size in frames, or negative.
2788      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
2789      * the position values have different meanings.
2790      * <br>
2791      * If looping is currently enabled and the new position is greater than or equal to the
2792      * loop end marker, the behavior varies by API level:
2793      * as of {@link android.os.Build.VERSION_CODES#M},
2794      * the looping is first disabled and then the position is set.
2795      * For earlier API levels, the behavior is unspecified.
2796      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2797      *    {@link #ERROR_INVALID_OPERATION}
2798      */
setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2799     public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) {
2800         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2801                 getPlayState() == PLAYSTATE_PLAYING) {
2802             return ERROR_INVALID_OPERATION;
2803         }
2804         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
2805             return ERROR_BAD_VALUE;
2806         }
2807         return native_set_position(positionInFrames);
2808     }
2809 
2810     /**
2811      * Sets the loop points and the loop count. The loop can be infinite.
2812      * Similarly to setPlaybackHeadPosition,
2813      * the track must be stopped or paused for the loop points to be changed,
2814      * and must use the {@link #MODE_STATIC} mode.
2815      * @param startInFrames loop start marker expressed in frames.
2816      * Zero corresponds to start of buffer.
2817      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
2818      * @param endInFrames loop end marker expressed in frames.
2819      * The total buffer size in frames corresponds to end of buffer.
2820      * The end marker must not be greater than the buffer size in frames.
2821      * For looping, the end marker must not be less than or equal to the start marker,
2822      * but to disable looping
2823      * it is permitted for start marker, end marker, and loop count to all be 0.
2824      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
2825      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
2826      * support,
2827      * {@link #ERROR_BAD_VALUE} is returned.
2828      * The loop range is the interval [startInFrames, endInFrames).
2829      * <br>
2830      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
2831      * unless it is greater than or equal to the loop end marker, in which case
2832      * it is forced to the loop start marker.
2833      * For earlier API levels, the effect on position is unspecified.
2834      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
2835      *    A value of -1 means infinite looping, and 0 disables looping.
2836      *    A value of positive N means to "loop" (go back) N times.  For example,
2837      *    a value of one means to play the region two times in total.
2838      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2839      *    {@link #ERROR_INVALID_OPERATION}
2840      */
setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2841     public int setLoopPoints(@IntRange (from = 0) int startInFrames,
2842             @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) {
2843         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2844                 getPlayState() == PLAYSTATE_PLAYING) {
2845             return ERROR_INVALID_OPERATION;
2846         }
2847         if (loopCount == 0) {
2848             ;   // explicitly allowed as an exception to the loop region range check
2849         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
2850                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
2851             return ERROR_BAD_VALUE;
2852         }
2853         return native_set_loop(startInFrames, endInFrames, loopCount);
2854     }
2855 
2856     /**
2857      * Sets the audio presentation.
2858      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
2859      * If a multi-stream decoder (MSD) is not present, or the format does not support
2860      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
2861      * {@link #ERROR} is returned in case of any other error.
2862      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
2863      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
2864      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2865      * @throws IllegalArgumentException if the audio presentation is null.
2866      * @throws IllegalStateException if track is not initialized.
2867      */
setPresentation(@onNull AudioPresentation presentation)2868     public int setPresentation(@NonNull AudioPresentation presentation) {
2869         if (presentation == null) {
2870             throw new IllegalArgumentException("audio presentation is null");
2871         }
2872         return native_setPresentation(presentation.getPresentationId(),
2873                 presentation.getProgramId());
2874     }
2875 
2876     /**
2877      * Sets the initialization state of the instance. This method was originally intended to be used
2878      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
2879      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
2880      * @param state the state of the AudioTrack instance
2881      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
2882      */
2883     @Deprecated
setState(int state)2884     protected void setState(int state) {
2885         mState = state;
2886     }
2887 
2888 
2889     //---------------------------------------------------------
2890     // Transport control methods
2891     //--------------------
2892     /**
2893      * Starts playing an AudioTrack.
2894      * <p>
2895      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
2896      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
2897      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
2898      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
2899      * play().
2900      * <p>
2901      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
2902      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
2903      * If you don't call write() first, or if you call write() but with an insufficient amount of
2904      * data, then the track will be in underrun state at play().  In this case,
2905      * playback will not actually start playing until the data path is filled to a
2906      * device-specific minimum level.  This requirement for the path to be filled
2907      * to a minimum level is also true when resuming audio playback after calling stop().
2908      * Similarly the buffer will need to be filled up again after
2909      * the track underruns due to failure to call write() in a timely manner with sufficient data.
2910      * For portability, an application should prime the data path to the maximum allowed
2911      * by writing data until the write() method returns a short transfer count.
2912      * This allows play() to start immediately, and reduces the chance of underrun.
2913      *<p>
2914      * As of {@link android.os.Build.VERSION_CODES#S} the minimum level to start playing
2915      * can be obtained using {@link #getStartThresholdInFrames()} and set with
2916      * {@link #setStartThresholdInFrames(int)}.
2917      *
2918      * @throws IllegalStateException if the track isn't properly initialized
2919      */
play()2920     public void play()
2921     throws IllegalStateException {
2922         if (mState != STATE_INITIALIZED) {
2923             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
2924         }
2925         //FIXME use lambda to pass startImpl to superclass
2926         final int delay = getStartDelayMs();
2927         if (delay == 0) {
2928             startImpl();
2929         } else {
2930             new Thread() {
2931                 public void run() {
2932                     try {
2933                         Thread.sleep(delay);
2934                     } catch (InterruptedException e) {
2935                         e.printStackTrace();
2936                     }
2937                     baseSetStartDelayMs(0);
2938                     try {
2939                         startImpl();
2940                     } catch (IllegalStateException e) {
2941                         // fail silently for a state exception when it is happening after
2942                         // a delayed start, as the player state could have changed between the
2943                         // call to start() and the execution of startImpl()
2944                     }
2945                 }
2946             }.start();
2947         }
2948     }
2949 
startImpl()2950     private void startImpl() {
2951         synchronized (mRoutingChangeListeners) {
2952             if (!mEnableSelfRoutingMonitor) {
2953                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
2954             }
2955         }
2956         synchronized(mPlayStateLock) {
2957             baseStart(0); // unknown device at this point
2958             native_start();
2959             // FIXME see b/179218630
2960             //baseStart(native_getRoutedDeviceId());
2961             if (mPlayState == PLAYSTATE_PAUSED_STOPPING) {
2962                 mPlayState = PLAYSTATE_STOPPING;
2963             } else {
2964                 mPlayState = PLAYSTATE_PLAYING;
2965                 mOffloadEosPending = false;
2966             }
2967         }
2968     }
2969 
2970     /**
2971      * Stops playing the audio data.
2972      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
2973      * after the last buffer that was written has been played. For an immediate stop, use
2974      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
2975      * back yet.
2976      * @throws IllegalStateException
2977      */
stop()2978     public void stop()
2979     throws IllegalStateException {
2980         if (mState != STATE_INITIALIZED) {
2981             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
2982         }
2983 
2984         // stop playing
2985         synchronized(mPlayStateLock) {
2986             native_stop();
2987             baseStop();
2988             if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) {
2989                 mPlayState = PLAYSTATE_STOPPING;
2990             } else {
2991                 mPlayState = PLAYSTATE_STOPPED;
2992                 mOffloadEosPending = false;
2993                 mAvSyncHeader = null;
2994                 mAvSyncBytesRemaining = 0;
2995                 mPlayStateLock.notify();
2996             }
2997         }
2998         tryToDisableNativeRoutingCallback();
2999     }
3000 
3001     /**
3002      * Pauses the playback of the audio data. Data that has not been played
3003      * back will not be discarded. Subsequent calls to {@link #play} will play
3004      * this data back. See {@link #flush()} to discard this data.
3005      *
3006      * @throws IllegalStateException
3007      */
pause()3008     public void pause()
3009     throws IllegalStateException {
3010         if (mState != STATE_INITIALIZED) {
3011             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
3012         }
3013 
3014         // pause playback
3015         synchronized(mPlayStateLock) {
3016             native_pause();
3017             basePause();
3018             if (mPlayState == PLAYSTATE_STOPPING) {
3019                 mPlayState = PLAYSTATE_PAUSED_STOPPING;
3020             } else {
3021                 mPlayState = PLAYSTATE_PAUSED;
3022             }
3023         }
3024     }
3025 
3026 
3027     //---------------------------------------------------------
3028     // Audio data supply
3029     //--------------------
3030 
3031     /**
3032      * Flushes the audio data currently queued for playback. Any data that has
3033      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
3034      * or if the track's creation mode is not {@link #MODE_STREAM}.
3035      * <BR> Note that although data written but not yet presented is discarded, there is no
3036      * guarantee that all of the buffer space formerly used by that data
3037      * is available for a subsequent write.
3038      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
3039      * less than or equal to the total buffer size
3040      * may return a short actual transfer count.
3041      */
flush()3042     public void flush() {
3043         if (mState == STATE_INITIALIZED) {
3044             // flush the data in native layer
3045             native_flush();
3046             mAvSyncHeader = null;
3047             mAvSyncBytesRemaining = 0;
3048         }
3049 
3050     }
3051 
3052     /**
3053      * Writes the audio data to the audio sink for playback (streaming mode),
3054      * or copies audio data for later playback (static buffer mode).
3055      * The format specified in the AudioTrack constructor should be
3056      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
3057      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
3058      * <p>
3059      * In streaming mode, the write will normally block until all the data has been enqueued for
3060      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3061      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3062      * occurs during the write, then the write may return a short transfer count.
3063      * <p>
3064      * In static buffer mode, copies the data to the buffer starting at offset 0.
3065      * Note that the actual playback of this data might occur after this function returns.
3066      *
3067      * @param audioData the array that holds the data to play.
3068      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
3069      *    starts.
3070      *    Must not be negative, or cause the data access to go out of bounds of the array.
3071      * @param sizeInBytes the number of bytes to write in audioData after the offset.
3072      *    Must not be negative, or cause the data access to go out of bounds of the array.
3073      * @return zero or the positive number of bytes that were written, or one of the following
3074      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3075      *    not to exceed sizeInBytes.
3076      * <ul>
3077      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3078      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3079      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3080      *    needs to be recreated. The dead object error code is not returned if some data was
3081      *    successfully transferred. In this case, the error is returned at the next write()</li>
3082      * <li>{@link #ERROR} in case of other error</li>
3083      * </ul>
3084      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
3085      * set to  {@link #WRITE_BLOCKING}.
3086      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)3087     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
3088         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
3089     }
3090 
3091     /**
3092      * Writes the audio data to the audio sink for playback (streaming mode),
3093      * or copies audio data for later playback (static buffer mode).
3094      * The format specified in the AudioTrack constructor should be
3095      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
3096      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
3097      * <p>
3098      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3099      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3100      * for playback, and will return a full transfer count.  However, if the write mode is
3101      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3102      * interrupts the write by calling stop or pause, or an I/O error
3103      * occurs during the write, then the write may return a short transfer count.
3104      * <p>
3105      * In static buffer mode, copies the data to the buffer starting at offset 0,
3106      * and the write mode is ignored.
3107      * Note that the actual playback of this data might occur after this function returns.
3108      *
3109      * @param audioData the array that holds the data to play.
3110      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
3111      *    starts.
3112      *    Must not be negative, or cause the data access to go out of bounds of the array.
3113      * @param sizeInBytes the number of bytes to write in audioData after the offset.
3114      *    Must not be negative, or cause the data access to go out of bounds of the array.
3115      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3116      *     effect in static mode.
3117      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3118      *         to the audio sink.
3119      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3120      *     queuing as much audio data for playback as possible without blocking.
3121      * @return zero or the positive number of bytes that were written, or one of the following
3122      *    error codes. The number of bytes will be a multiple of the frame size in bytes
3123      *    not to exceed sizeInBytes.
3124      * <ul>
3125      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3126      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3127      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3128      *    needs to be recreated. The dead object error code is not returned if some data was
3129      *    successfully transferred. In this case, the error is returned at the next write()</li>
3130      * <li>{@link #ERROR} in case of other error</li>
3131      * </ul>
3132      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)3133     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
3134             @WriteMode int writeMode) {
3135         // Note: we allow writes of extended integers and compressed formats from a byte array.
3136         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
3137             return ERROR_INVALID_OPERATION;
3138         }
3139 
3140         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3141             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3142             return ERROR_BAD_VALUE;
3143         }
3144 
3145         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
3146                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
3147                 || (offsetInBytes + sizeInBytes > audioData.length)) {
3148             return ERROR_BAD_VALUE;
3149         }
3150 
3151         if (!blockUntilOffloadDrain(writeMode)) {
3152             return 0;
3153         }
3154 
3155         final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
3156                 writeMode == WRITE_BLOCKING);
3157 
3158         if ((mDataLoadMode == MODE_STATIC)
3159                 && (mState == STATE_NO_STATIC_DATA)
3160                 && (ret > 0)) {
3161             // benign race with respect to other APIs that read mState
3162             mState = STATE_INITIALIZED;
3163         }
3164 
3165         return ret;
3166     }
3167 
3168     /**
3169      * Writes the audio data to the audio sink for playback (streaming mode),
3170      * or copies audio data for later playback (static buffer mode).
3171      * The format specified in the AudioTrack constructor should be
3172      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3173      * <p>
3174      * In streaming mode, the write will normally block until all the data has been enqueued for
3175      * playback, and will return a full transfer count.  However, if the track is stopped or paused
3176      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
3177      * occurs during the write, then the write may return a short transfer count.
3178      * <p>
3179      * In static buffer mode, copies the data to the buffer starting at offset 0.
3180      * Note that the actual playback of this data might occur after this function returns.
3181      *
3182      * @param audioData the array that holds the data to play.
3183      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
3184      *     starts.
3185      *    Must not be negative, or cause the data access to go out of bounds of the array.
3186      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3187      *    Must not be negative, or cause the data access to go out of bounds of the array.
3188      * @return zero or the positive number of shorts that were written, or one of the following
3189      *    error codes. The number of shorts will be a multiple of the channel count not to
3190      *    exceed sizeInShorts.
3191      * <ul>
3192      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3193      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3194      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3195      *    needs to be recreated. The dead object error code is not returned if some data was
3196      *    successfully transferred. In this case, the error is returned at the next write()</li>
3197      * <li>{@link #ERROR} in case of other error</li>
3198      * </ul>
3199      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
3200      * set to  {@link #WRITE_BLOCKING}.
3201      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)3202     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
3203         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
3204     }
3205 
3206     /**
3207      * Writes the audio data to the audio sink for playback (streaming mode),
3208      * or copies audio data for later playback (static buffer mode).
3209      * The format specified in the AudioTrack constructor should be
3210      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
3211      * <p>
3212      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3213      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3214      * for playback, and will return a full transfer count.  However, if the write mode is
3215      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3216      * interrupts the write by calling stop or pause, or an I/O error
3217      * occurs during the write, then the write may return a short transfer count.
3218      * <p>
3219      * In static buffer mode, copies the data to the buffer starting at offset 0.
3220      * Note that the actual playback of this data might occur after this function returns.
3221      *
3222      * @param audioData the array that holds the data to write.
3223      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
3224      *     starts.
3225      *    Must not be negative, or cause the data access to go out of bounds of the array.
3226      * @param sizeInShorts the number of shorts to read in audioData after the offset.
3227      *    Must not be negative, or cause the data access to go out of bounds of the array.
3228      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3229      *     effect in static mode.
3230      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3231      *         to the audio sink.
3232      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3233      *     queuing as much audio data for playback as possible without blocking.
3234      * @return zero or the positive number of shorts that were written, or one of the following
3235      *    error codes. The number of shorts will be a multiple of the channel count not to
3236      *    exceed sizeInShorts.
3237      * <ul>
3238      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3239      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3240      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3241      *    needs to be recreated. The dead object error code is not returned if some data was
3242      *    successfully transferred. In this case, the error is returned at the next write()</li>
3243      * <li>{@link #ERROR} in case of other error</li>
3244      * </ul>
3245      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)3246     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
3247             @WriteMode int writeMode) {
3248 
3249         if (mState == STATE_UNINITIALIZED
3250                 || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT
3251                 // use ByteBuffer or byte[] instead for later encodings
3252                 || mAudioFormat > AudioFormat.ENCODING_LEGACY_SHORT_ARRAY_THRESHOLD) {
3253             return ERROR_INVALID_OPERATION;
3254         }
3255 
3256         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3257             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3258             return ERROR_BAD_VALUE;
3259         }
3260 
3261         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
3262                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
3263                 || (offsetInShorts + sizeInShorts > audioData.length)) {
3264             return ERROR_BAD_VALUE;
3265         }
3266 
3267         if (!blockUntilOffloadDrain(writeMode)) {
3268             return 0;
3269         }
3270 
3271         final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
3272                 writeMode == WRITE_BLOCKING);
3273 
3274         if ((mDataLoadMode == MODE_STATIC)
3275                 && (mState == STATE_NO_STATIC_DATA)
3276                 && (ret > 0)) {
3277             // benign race with respect to other APIs that read mState
3278             mState = STATE_INITIALIZED;
3279         }
3280 
3281         return ret;
3282     }
3283 
3284     /**
3285      * Writes the audio data to the audio sink for playback (streaming mode),
3286      * or copies audio data for later playback (static buffer mode).
3287      * The format specified in the AudioTrack constructor should be
3288      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
3289      * <p>
3290      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3291      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3292      * for playback, and will return a full transfer count.  However, if the write mode is
3293      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3294      * interrupts the write by calling stop or pause, or an I/O error
3295      * occurs during the write, then the write may return a short transfer count.
3296      * <p>
3297      * In static buffer mode, copies the data to the buffer starting at offset 0,
3298      * and the write mode is ignored.
3299      * Note that the actual playback of this data might occur after this function returns.
3300      *
3301      * @param audioData the array that holds the data to write.
3302      *     The implementation does not clip for sample values within the nominal range
3303      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
3304      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
3305      *     that could add energy, such as reverb.  For the convenience of applications
3306      *     that compute samples using filters with non-unity gain,
3307      *     sample values +3 dB beyond the nominal range are permitted.
3308      *     However such values may eventually be limited or clipped, depending on various gains
3309      *     and later processing in the audio path.  Therefore applications are encouraged
3310      *     to provide samples values within the nominal range.
3311      * @param offsetInFloats the offset, expressed as a number of floats,
3312      *     in audioData where the data to write starts.
3313      *    Must not be negative, or cause the data access to go out of bounds of the array.
3314      * @param sizeInFloats the number of floats to write in audioData after the offset.
3315      *    Must not be negative, or cause the data access to go out of bounds of the array.
3316      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3317      *     effect in static mode.
3318      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3319      *         to the audio sink.
3320      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3321      *     queuing as much audio data for playback as possible without blocking.
3322      * @return zero or the positive number of floats that were written, or one of the following
3323      *    error codes. The number of floats will be a multiple of the channel count not to
3324      *    exceed sizeInFloats.
3325      * <ul>
3326      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3327      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3328      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3329      *    needs to be recreated. The dead object error code is not returned if some data was
3330      *    successfully transferred. In this case, the error is returned at the next write()</li>
3331      * <li>{@link #ERROR} in case of other error</li>
3332      * </ul>
3333      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)3334     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
3335             @WriteMode int writeMode) {
3336 
3337         if (mState == STATE_UNINITIALIZED) {
3338             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3339             return ERROR_INVALID_OPERATION;
3340         }
3341 
3342         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
3343             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
3344             return ERROR_INVALID_OPERATION;
3345         }
3346 
3347         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3348             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3349             return ERROR_BAD_VALUE;
3350         }
3351 
3352         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
3353                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
3354                 || (offsetInFloats + sizeInFloats > audioData.length)) {
3355             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
3356             return ERROR_BAD_VALUE;
3357         }
3358 
3359         if (!blockUntilOffloadDrain(writeMode)) {
3360             return 0;
3361         }
3362 
3363         final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
3364                 writeMode == WRITE_BLOCKING);
3365 
3366         if ((mDataLoadMode == MODE_STATIC)
3367                 && (mState == STATE_NO_STATIC_DATA)
3368                 && (ret > 0)) {
3369             // benign race with respect to other APIs that read mState
3370             mState = STATE_INITIALIZED;
3371         }
3372 
3373         return ret;
3374     }
3375 
3376 
3377     /**
3378      * Writes the audio data to the audio sink for playback (streaming mode),
3379      * or copies audio data for later playback (static buffer mode).
3380      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
3381      * <p>
3382      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3383      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3384      * for playback, and will return a full transfer count.  However, if the write mode is
3385      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3386      * interrupts the write by calling stop or pause, or an I/O error
3387      * occurs during the write, then the write may return a short transfer count.
3388      * <p>
3389      * In static buffer mode, copies the data to the buffer starting at offset 0,
3390      * and the write mode is ignored.
3391      * Note that the actual playback of this data might occur after this function returns.
3392      *
3393      * @param audioData the buffer that holds the data to write, starting at the position reported
3394      *     by <code>audioData.position()</code>.
3395      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3396      *     have been advanced to reflect the amount of data that was successfully written to
3397      *     the AudioTrack.
3398      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3399      *     that the number of bytes requested be a multiple of the frame size (sample size in
3400      *     bytes multiplied by the channel count).
3401      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3402      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3403      *     effect in static mode.
3404      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3405      *         to the audio sink.
3406      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3407      *     queuing as much audio data for playback as possible without blocking.
3408      * @return zero or the positive number of bytes that were written, or one of the following
3409      *    error codes.
3410      * <ul>
3411      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3412      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3413      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3414      *    needs to be recreated. The dead object error code is not returned if some data was
3415      *    successfully transferred. In this case, the error is returned at the next write()</li>
3416      * <li>{@link #ERROR} in case of other error</li>
3417      * </ul>
3418      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)3419     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3420             @WriteMode int writeMode) {
3421 
3422         if (mState == STATE_UNINITIALIZED) {
3423             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3424             return ERROR_INVALID_OPERATION;
3425         }
3426 
3427         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3428             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3429             return ERROR_BAD_VALUE;
3430         }
3431 
3432         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3433             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3434             return ERROR_BAD_VALUE;
3435         }
3436 
3437         if (!blockUntilOffloadDrain(writeMode)) {
3438             return 0;
3439         }
3440 
3441         int ret = 0;
3442         if (audioData.isDirect()) {
3443             ret = native_write_native_bytes(audioData,
3444                     audioData.position(), sizeInBytes, mAudioFormat,
3445                     writeMode == WRITE_BLOCKING);
3446         } else {
3447             ret = native_write_byte(NioUtils.unsafeArray(audioData),
3448                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
3449                     sizeInBytes, mAudioFormat,
3450                     writeMode == WRITE_BLOCKING);
3451         }
3452 
3453         if ((mDataLoadMode == MODE_STATIC)
3454                 && (mState == STATE_NO_STATIC_DATA)
3455                 && (ret > 0)) {
3456             // benign race with respect to other APIs that read mState
3457             mState = STATE_INITIALIZED;
3458         }
3459 
3460         if (ret > 0) {
3461             audioData.position(audioData.position() + ret);
3462         }
3463 
3464         return ret;
3465     }
3466 
3467     /**
3468      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
3469      * The blocking behavior will depend on the write mode.
3470      * @param audioData the buffer that holds the data to write, starting at the position reported
3471      *     by <code>audioData.position()</code>.
3472      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3473      *     have been advanced to reflect the amount of data that was successfully written to
3474      *     the AudioTrack.
3475      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3476      *     that the number of bytes requested be a multiple of the frame size (sample size in
3477      *     bytes multiplied by the channel count).
3478      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3479      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
3480      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3481      *         to the audio sink.
3482      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3483      *     queuing as much audio data for playback as possible without blocking.
3484      * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the
3485      *     provided audioData.
3486      * @return zero or the positive number of bytes that were written, or one of the following
3487      *    error codes.
3488      * <ul>
3489      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3490      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3491      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3492      *    needs to be recreated. The dead object error code is not returned if some data was
3493      *    successfully transferred. In this case, the error is returned at the next write()</li>
3494      * <li>{@link #ERROR} in case of other error</li>
3495      * </ul>
3496      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)3497     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3498             @WriteMode int writeMode, long timestamp) {
3499 
3500         if (mState == STATE_UNINITIALIZED) {
3501             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3502             return ERROR_INVALID_OPERATION;
3503         }
3504 
3505         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3506             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3507             return ERROR_BAD_VALUE;
3508         }
3509 
3510         if (mDataLoadMode != MODE_STREAM) {
3511             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
3512             return ERROR_INVALID_OPERATION;
3513         }
3514 
3515         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
3516             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
3517             return write(audioData, sizeInBytes, writeMode);
3518         }
3519 
3520         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3521             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3522             return ERROR_BAD_VALUE;
3523         }
3524 
3525         if (!blockUntilOffloadDrain(writeMode)) {
3526             return 0;
3527         }
3528 
3529         // create timestamp header if none exists
3530         if (mAvSyncHeader == null) {
3531             mAvSyncHeader = ByteBuffer.allocate(mOffset);
3532             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
3533             mAvSyncHeader.putInt(0x55550002);
3534         }
3535 
3536         if (mAvSyncBytesRemaining == 0) {
3537             mAvSyncHeader.putInt(4, sizeInBytes);
3538             mAvSyncHeader.putLong(8, timestamp);
3539             mAvSyncHeader.putInt(16, mOffset);
3540             mAvSyncHeader.position(0);
3541             mAvSyncBytesRemaining = sizeInBytes;
3542         }
3543 
3544         // write timestamp header if not completely written already
3545         int ret = 0;
3546         if (mAvSyncHeader.remaining() != 0) {
3547             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
3548             if (ret < 0) {
3549                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
3550                 mAvSyncHeader = null;
3551                 mAvSyncBytesRemaining = 0;
3552                 return ret;
3553             }
3554             if (mAvSyncHeader.remaining() > 0) {
3555                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
3556                 return 0;
3557             }
3558         }
3559 
3560         // write audio data
3561         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
3562         ret = write(audioData, sizeToWrite, writeMode);
3563         if (ret < 0) {
3564             Log.e(TAG, "AudioTrack.write() could not write audio data!");
3565             mAvSyncHeader = null;
3566             mAvSyncBytesRemaining = 0;
3567             return ret;
3568         }
3569 
3570         mAvSyncBytesRemaining -= ret;
3571 
3572         return ret;
3573     }
3574 
3575 
3576     /**
3577      * Sets the playback head position within the static buffer to zero,
3578      * that is it rewinds to start of static buffer.
3579      * The track must be stopped or paused, and
3580      * the track's creation mode must be {@link #MODE_STATIC}.
3581      * <p>
3582      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
3583      * {@link #getPlaybackHeadPosition()} to zero.
3584      * For earlier API levels, the reset behavior is unspecified.
3585      * <p>
3586      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
3587      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
3588      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
3589      *  {@link #ERROR_INVALID_OPERATION}
3590      */
reloadStaticData()3591     public int reloadStaticData() {
3592         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
3593             return ERROR_INVALID_OPERATION;
3594         }
3595         return native_reload_static();
3596     }
3597 
3598     /**
3599      * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is
3600      * received if blocking write or return with 0 frames written if non blocking mode.
3601      */
blockUntilOffloadDrain(int writeMode)3602     private boolean blockUntilOffloadDrain(int writeMode) {
3603         synchronized (mPlayStateLock) {
3604             while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3605                 if (writeMode == WRITE_NON_BLOCKING) {
3606                     return false;
3607                 }
3608                 try {
3609                     mPlayStateLock.wait();
3610                 } catch (InterruptedException e) {
3611                 }
3612             }
3613             return true;
3614         }
3615     }
3616 
3617     //--------------------------------------------------------------------------
3618     // Audio effects management
3619     //--------------------
3620 
3621     /**
3622      * Attaches an auxiliary effect to the audio track. A typical auxiliary
3623      * effect is a reverberation effect which can be applied on any sound source
3624      * that directs a certain amount of its energy to this effect. This amount
3625      * is defined by setAuxEffectSendLevel().
3626      * {@see #setAuxEffectSendLevel(float)}.
3627      * <p>After creating an auxiliary effect (e.g.
3628      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
3629      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
3630      * this method to attach the audio track to the effect.
3631      * <p>To detach the effect from the audio track, call this method with a
3632      * null effect id.
3633      *
3634      * @param effectId system wide unique id of the effect to attach
3635      * @return error code or success, see {@link #SUCCESS},
3636      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
3637      */
attachAuxEffect(int effectId)3638     public int attachAuxEffect(int effectId) {
3639         if (mState == STATE_UNINITIALIZED) {
3640             return ERROR_INVALID_OPERATION;
3641         }
3642         return native_attachAuxEffect(effectId);
3643     }
3644 
3645     /**
3646      * Sets the send level of the audio track to the attached auxiliary effect
3647      * {@link #attachAuxEffect(int)}.  Effect levels
3648      * are clamped to the closed interval [0.0, max] where
3649      * max is the value of {@link #getMaxVolume}.
3650      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
3651      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
3652      * this method must be called for the effect to be applied.
3653      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
3654      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
3655      * so an appropriate conversion from linear UI input x to level is:
3656      * x == 0 -&gt; level = 0
3657      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
3658      *
3659      * @param level linear send level
3660      * @return error code or success, see {@link #SUCCESS},
3661      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
3662      */
setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3663     public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) {
3664         if (mState == STATE_UNINITIALIZED) {
3665             return ERROR_INVALID_OPERATION;
3666         }
3667         return baseSetAuxEffectSendLevel(level);
3668     }
3669 
3670     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)3671     int playerSetAuxEffectSendLevel(boolean muting, float level) {
3672         level = clampGainOrLevel(muting ? 0.0f : level);
3673         int err = native_setAuxEffectSendLevel(level);
3674         return err == 0 ? SUCCESS : ERROR;
3675     }
3676 
3677     //--------------------------------------------------------------------------
3678     // Explicit Routing
3679     //--------------------
3680     private AudioDeviceInfo mPreferredDevice = null;
3681 
3682     /**
3683      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
3684      * the output from this AudioTrack.
3685      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
3686      *  If deviceInfo is null, default routing is restored.
3687      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
3688      * does not correspond to a valid audio output device.
3689      */
3690     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)3691     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
3692         // Do some validation....
3693         if (deviceInfo != null && !deviceInfo.isSink()) {
3694             return false;
3695         }
3696         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
3697         boolean status = native_setOutputDevice(preferredDeviceId);
3698         if (status == true) {
3699             synchronized (this) {
3700                 mPreferredDevice = deviceInfo;
3701             }
3702         }
3703         return status;
3704     }
3705 
3706     /**
3707      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
3708      * is not guaranteed to correspond to the actual device being used for playback.
3709      */
3710     @Override
getPreferredDevice()3711     public AudioDeviceInfo getPreferredDevice() {
3712         synchronized (this) {
3713             return mPreferredDevice;
3714         }
3715     }
3716 
3717     /**
3718      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
3719      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3720      * <code>getRoutedDevice()</code> will return null.
3721      */
3722     @Override
getRoutedDevice()3723     public AudioDeviceInfo getRoutedDevice() {
3724         int deviceId = native_getRoutedDeviceId();
3725         if (deviceId == 0) {
3726             return null;
3727         }
3728         return AudioManager.getDeviceForPortId(deviceId, AudioManager.GET_DEVICES_OUTPUTS);
3729     }
3730 
tryToDisableNativeRoutingCallback()3731     private void tryToDisableNativeRoutingCallback() {
3732         synchronized (mRoutingChangeListeners) {
3733             if (mEnableSelfRoutingMonitor) {
3734                 mEnableSelfRoutingMonitor = false;
3735                 testDisableNativeRoutingCallbacksLocked();
3736             }
3737         }
3738     }
3739 
3740     /**
3741      * Call BEFORE adding a routing callback handler and when enabling self routing listener
3742      * @return returns true for success, false otherwise.
3743      */
3744     @GuardedBy("mRoutingChangeListeners")
testEnableNativeRoutingCallbacksLocked()3745     private boolean testEnableNativeRoutingCallbacksLocked() {
3746         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3747             try {
3748                 native_enableDeviceCallback();
3749                 return true;
3750             } catch (IllegalStateException e) {
3751                 if (Log.isLoggable(TAG, Log.DEBUG)) {
3752                     Log.d(TAG, "testEnableNativeRoutingCallbacks failed", e);
3753                 }
3754             }
3755         }
3756         return false;
3757     }
3758 
3759     /*
3760      * Call AFTER removing a routing callback handler and when disabling self routing listener.
3761      */
3762     @GuardedBy("mRoutingChangeListeners")
testDisableNativeRoutingCallbacksLocked()3763     private void testDisableNativeRoutingCallbacksLocked() {
3764         if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) {
3765             try {
3766                 native_disableDeviceCallback();
3767             } catch (IllegalStateException e) {
3768                 // Fail silently as track state could have changed in between stop
3769                 // and disabling routing callback
3770             }
3771         }
3772     }
3773 
3774     //--------------------------------------------------------------------------
3775     // (Re)Routing Info
3776     //--------------------
3777     /**
3778      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
3779      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
3780      * by an app to receive (re)routing notifications.
3781      */
3782     @GuardedBy("mRoutingChangeListeners")
3783     private ArrayMap<AudioRouting.OnRoutingChangedListener,
3784             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
3785 
3786     @GuardedBy("mRoutingChangeListeners")
3787     private boolean mEnableSelfRoutingMonitor;
3788 
3789    /**
3790     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
3791     * changes on this AudioTrack.
3792     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
3793     * notifications of rerouting events.
3794     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3795     * the callback. If <code>null</code>, the {@link Handler} associated with the main
3796     * {@link Looper} will be used.
3797     */
3798     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3799     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
3800             Handler handler) {
3801         synchronized (mRoutingChangeListeners) {
3802             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
3803                 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked();
3804                 mRoutingChangeListeners.put(
3805                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
3806                                 handler != null ? handler : new Handler(mInitializationLooper)));
3807             }
3808         }
3809     }
3810 
3811     /**
3812      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
3813      * to receive rerouting notifications.
3814      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
3815      * to remove.
3816      */
3817     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3818     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
3819         synchronized (mRoutingChangeListeners) {
3820             if (mRoutingChangeListeners.containsKey(listener)) {
3821                 mRoutingChangeListeners.remove(listener);
3822             }
3823             testDisableNativeRoutingCallbacksLocked();
3824         }
3825     }
3826 
3827     //--------------------------------------------------------------------------
3828     // (Re)Routing Info
3829     //--------------------
3830     /**
3831      * Defines the interface by which applications can receive notifications of
3832      * routing changes for the associated {@link AudioTrack}.
3833      *
3834      * @deprecated users should switch to the general purpose
3835      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3836      */
3837     @Deprecated
3838     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
3839         /**
3840          * Called when the routing of an AudioTrack changes from either and
3841          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
3842          * retrieve the newly routed-to device.
3843          */
onRoutingChanged(AudioTrack audioTrack)3844         public void onRoutingChanged(AudioTrack audioTrack);
3845 
3846         @Override
onRoutingChanged(AudioRouting router)3847         default public void onRoutingChanged(AudioRouting router) {
3848             if (router instanceof AudioTrack) {
3849                 onRoutingChanged((AudioTrack) router);
3850             }
3851         }
3852     }
3853 
3854     /**
3855      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
3856      * on this AudioTrack.
3857      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
3858      * of rerouting events.
3859      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3860      * the callback. If <code>null</code>, the {@link Handler} associated with the main
3861      * {@link Looper} will be used.
3862      * @deprecated users should switch to the general purpose
3863      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3864      */
3865     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3866     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
3867             android.os.Handler handler) {
3868         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
3869     }
3870 
3871     /**
3872      * Removes an {@link OnRoutingChangedListener} which has been previously added
3873      * to receive rerouting notifications.
3874      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
3875      * @deprecated users should switch to the general purpose
3876      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3877      */
3878     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)3879     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
3880         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
3881     }
3882 
3883     /**
3884      * Sends device list change notification to all listeners.
3885      */
broadcastRoutingChange()3886     private void broadcastRoutingChange() {
3887         AudioManager.resetAudioPortGeneration();
3888         baseUpdateDeviceId(getRoutedDevice());
3889         synchronized (mRoutingChangeListeners) {
3890             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
3891                 delegate.notifyClient();
3892             }
3893         }
3894     }
3895 
3896     //--------------------------------------------------------------------------
3897     // Codec notifications
3898     //--------------------
3899 
3900     // OnCodecFormatChangedListener notifications uses an instance
3901     // of ListenerList to manage its listeners.
3902 
3903     private final Utils.ListenerList<AudioMetadataReadMap> mCodecFormatChangedListeners =
3904             new Utils.ListenerList();
3905 
3906     /**
3907      * Interface definition for a listener for codec format changes.
3908      */
3909     public interface OnCodecFormatChangedListener {
3910         /**
3911          * Called when the compressed codec format changes.
3912          *
3913          * @param audioTrack is the {@code AudioTrack} instance associated with the codec.
3914          * @param info is a {@link AudioMetadataReadMap} of values which contains decoded format
3915          *     changes reported by the codec.  Not all hardware
3916          *     codecs indicate codec format changes. Acceptable keys are taken from
3917          *     {@code AudioMetadata.Format.KEY_*} range, with the associated value type.
3918          */
onCodecFormatChanged( @onNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info)3919         void onCodecFormatChanged(
3920                 @NonNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info);
3921     }
3922 
3923     /**
3924      * Adds an {@link OnCodecFormatChangedListener} to receive notifications of
3925      * codec format change events on this {@code AudioTrack}.
3926      *
3927      * @param executor  Specifies the {@link Executor} object to control execution.
3928      *
3929      * @param listener The {@link OnCodecFormatChangedListener} interface to receive
3930      *     notifications of codec events.
3931      */
addOnCodecFormatChangedListener( @onNull @allbackExecutor Executor executor, @NonNull OnCodecFormatChangedListener listener)3932     public void addOnCodecFormatChangedListener(
3933             @NonNull @CallbackExecutor Executor executor,
3934             @NonNull OnCodecFormatChangedListener listener) { // NPE checks done by ListenerList.
3935         mCodecFormatChangedListeners.add(
3936                 listener, /* key for removal */
3937                 executor,
3938                 (int eventCode, AudioMetadataReadMap readMap) -> {
3939                     // eventCode is unused by this implementation.
3940                     listener.onCodecFormatChanged(this, readMap);
3941                 }
3942         );
3943     }
3944 
3945     /**
3946      * Removes an {@link OnCodecFormatChangedListener} which has been previously added
3947      * to receive codec format change events.
3948      *
3949      * @param listener The previously added {@link OnCodecFormatChangedListener} interface
3950      * to remove.
3951      */
removeOnCodecFormatChangedListener( @onNull OnCodecFormatChangedListener listener)3952     public void removeOnCodecFormatChangedListener(
3953             @NonNull OnCodecFormatChangedListener listener) {
3954         mCodecFormatChangedListeners.remove(listener);  // NPE checks done by ListenerList.
3955     }
3956 
3957     //---------------------------------------------------------
3958     // Interface definitions
3959     //--------------------
3960     /**
3961      * Interface definition for a callback to be invoked when the playback head position of
3962      * an AudioTrack has reached a notification marker or has increased by a certain period.
3963      */
3964     public interface OnPlaybackPositionUpdateListener  {
3965         /**
3966          * Called on the listener to notify it that the previously set marker has been reached
3967          * by the playback head.
3968          */
onMarkerReached(AudioTrack track)3969         void onMarkerReached(AudioTrack track);
3970 
3971         /**
3972          * Called on the listener to periodically notify it that the playback head has reached
3973          * a multiple of the notification period.
3974          */
onPeriodicNotification(AudioTrack track)3975         void onPeriodicNotification(AudioTrack track);
3976     }
3977 
3978     /**
3979      * Abstract class to receive event notifications about the stream playback in offloaded mode.
3980      * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register
3981      * the callback on the given {@link AudioTrack} instance.
3982      */
3983     public abstract static class StreamEventCallback {
3984         /**
3985          * Called when an offloaded track is no longer valid and has been discarded by the system.
3986          * An example of this happening is when an offloaded track has been paused too long, and
3987          * gets invalidated by the system to prevent any other offload.
3988          * @param track the {@link AudioTrack} on which the event happened.
3989          */
onTearDown(@onNull AudioTrack track)3990         public void onTearDown(@NonNull AudioTrack track) { }
3991         /**
3992          * Called when all the buffers of an offloaded track that were queued in the audio system
3993          * (e.g. the combination of the Android audio framework and the device's audio hardware)
3994          * have been played after {@link AudioTrack#stop()} has been called.
3995          * @param track the {@link AudioTrack} on which the event happened.
3996          */
onPresentationEnded(@onNull AudioTrack track)3997         public void onPresentationEnded(@NonNull AudioTrack track) { }
3998         /**
3999          * Called when more audio data can be written without blocking on an offloaded track.
4000          * @param track the {@link AudioTrack} on which the event happened.
4001          * @param sizeInFrames the number of frames available to write without blocking.
4002          *   Note that the frame size of a compressed stream is 1 byte.
4003          */
onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)4004         public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) {
4005         }
4006     }
4007 
4008     /**
4009      * Registers a callback for the notification of stream events.
4010      * This callback can only be registered for instances operating in offloaded mode
4011      * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and
4012      * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for
4013      * more details).
4014      * @param executor {@link Executor} to handle the callbacks.
4015      * @param eventCallback the callback to receive the stream event notifications.
4016      */
registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)4017     public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
4018             @NonNull StreamEventCallback eventCallback) {
4019         if (eventCallback == null) {
4020             throw new IllegalArgumentException("Illegal null StreamEventCallback");
4021         }
4022         if (!mOffloaded) {
4023             throw new IllegalStateException(
4024                     "Cannot register StreamEventCallback on non-offloaded AudioTrack");
4025         }
4026         if (executor == null) {
4027             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
4028         }
4029         synchronized (mStreamEventCbLock) {
4030             // check if eventCallback already in list
4031             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
4032                 if (seci.mStreamEventCb == eventCallback) {
4033                     throw new IllegalArgumentException(
4034                             "StreamEventCallback already registered");
4035                 }
4036             }
4037             beginStreamEventHandling();
4038             mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback));
4039         }
4040     }
4041 
4042     /**
4043      * Unregisters the callback for notification of stream events, previously registered
4044      * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}.
4045      * @param eventCallback the callback to unregister.
4046      */
unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)4047     public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) {
4048         if (eventCallback == null) {
4049             throw new IllegalArgumentException("Illegal null StreamEventCallback");
4050         }
4051         if (!mOffloaded) {
4052             throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack");
4053         }
4054         synchronized (mStreamEventCbLock) {
4055             StreamEventCbInfo seciToRemove = null;
4056             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
4057                 if (seci.mStreamEventCb == eventCallback) {
4058                     // ok to remove while iterating over list as we exit iteration
4059                     mStreamEventCbInfoList.remove(seci);
4060                     if (mStreamEventCbInfoList.size() == 0) {
4061                         endStreamEventHandling();
4062                     }
4063                     return;
4064                 }
4065             }
4066             throw new IllegalArgumentException("StreamEventCallback was not registered");
4067         }
4068     }
4069 
4070     //---------------------------------------------------------
4071     // Offload
4072     //--------------------
4073     private static class StreamEventCbInfo {
4074         final Executor mStreamEventExec;
4075         final StreamEventCallback mStreamEventCb;
4076 
StreamEventCbInfo(Executor e, StreamEventCallback cb)4077         StreamEventCbInfo(Executor e, StreamEventCallback cb) {
4078             mStreamEventExec = e;
4079             mStreamEventCb = cb;
4080         }
4081     }
4082 
4083     private final Object mStreamEventCbLock = new Object();
4084     @GuardedBy("mStreamEventCbLock")
4085     @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList =
4086             new LinkedList<StreamEventCbInfo>();
4087     /**
4088      * Dedicated thread for handling the StreamEvent callbacks
4089      */
4090     private @Nullable HandlerThread mStreamEventHandlerThread;
4091     private @Nullable volatile StreamEventHandler mStreamEventHandler;
4092 
4093     /**
4094      * Called from native AudioTrack callback thread, filter messages if necessary
4095      * and repost event on AudioTrack message loop to prevent blocking native thread.
4096      * @param what event code received from native
4097      * @param arg optional argument for event
4098      */
handleStreamEventFromNative(int what, int arg)4099     void handleStreamEventFromNative(int what, int arg) {
4100         if (mStreamEventHandler == null) {
4101             return;
4102         }
4103         switch (what) {
4104             case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4105                 // replace previous CAN_WRITE_MORE_DATA messages with the latest value
4106                 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA);
4107                 mStreamEventHandler.sendMessage(
4108                         mStreamEventHandler.obtainMessage(
4109                                 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/));
4110                 break;
4111             case NATIVE_EVENT_NEW_IAUDIOTRACK:
4112                 mStreamEventHandler.sendMessage(
4113                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK));
4114                 break;
4115             case NATIVE_EVENT_STREAM_END:
4116                 mStreamEventHandler.sendMessage(
4117                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END));
4118                 break;
4119         }
4120     }
4121 
4122     private class StreamEventHandler extends Handler {
4123 
StreamEventHandler(Looper looper)4124         StreamEventHandler(Looper looper) {
4125             super(looper);
4126         }
4127 
4128         @Override
handleMessage(Message msg)4129         public void handleMessage(Message msg) {
4130             final LinkedList<StreamEventCbInfo> cbInfoList;
4131             synchronized (mStreamEventCbLock) {
4132                 if (msg.what == NATIVE_EVENT_STREAM_END) {
4133                     synchronized (mPlayStateLock) {
4134                         if (mPlayState == PLAYSTATE_STOPPING) {
4135                             if (mOffloadEosPending) {
4136                                 native_start();
4137                                 mPlayState = PLAYSTATE_PLAYING;
4138                             } else {
4139                                 mAvSyncHeader = null;
4140                                 mAvSyncBytesRemaining = 0;
4141                                 mPlayState = PLAYSTATE_STOPPED;
4142                             }
4143                             mOffloadEosPending = false;
4144                             mPlayStateLock.notify();
4145                         }
4146                     }
4147                 }
4148                 if (mStreamEventCbInfoList.size() == 0) {
4149                     return;
4150                 }
4151                 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList);
4152             }
4153 
4154             final long identity = Binder.clearCallingIdentity();
4155             try {
4156                 for (StreamEventCbInfo cbi : cbInfoList) {
4157                     switch (msg.what) {
4158                         case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
4159                             cbi.mStreamEventExec.execute(() ->
4160                                     cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1));
4161                             break;
4162                         case NATIVE_EVENT_NEW_IAUDIOTRACK:
4163                             // TODO also release track as it's not longer usable
4164                             cbi.mStreamEventExec.execute(() ->
4165                                     cbi.mStreamEventCb.onTearDown(AudioTrack.this));
4166                             break;
4167                         case NATIVE_EVENT_STREAM_END:
4168                             cbi.mStreamEventExec.execute(() ->
4169                                     cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this));
4170                             break;
4171                     }
4172                 }
4173             } finally {
4174                 Binder.restoreCallingIdentity(identity);
4175             }
4176         }
4177     }
4178 
4179     @GuardedBy("mStreamEventCbLock")
beginStreamEventHandling()4180     private void beginStreamEventHandling() {
4181         if (mStreamEventHandlerThread == null) {
4182             mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent");
4183             mStreamEventHandlerThread.start();
4184             final Looper looper = mStreamEventHandlerThread.getLooper();
4185             if (looper != null) {
4186                 mStreamEventHandler = new StreamEventHandler(looper);
4187             }
4188         }
4189     }
4190 
4191     @GuardedBy("mStreamEventCbLock")
endStreamEventHandling()4192     private void endStreamEventHandling() {
4193         if (mStreamEventHandlerThread != null) {
4194             mStreamEventHandlerThread.quit();
4195             mStreamEventHandlerThread = null;
4196         }
4197     }
4198 
4199     /**
4200      * Sets a {@link LogSessionId} instance to this AudioTrack for metrics collection.
4201      *
4202      * @param logSessionId a {@link LogSessionId} instance which is used to
4203      *        identify this object to the metrics service. Proper generated
4204      *        Ids must be obtained from the Java metrics service and should
4205      *        be considered opaque. Use
4206      *        {@link LogSessionId#LOG_SESSION_ID_NONE} to remove the
4207      *        logSessionId association.
4208      * @throws IllegalStateException if AudioTrack not initialized.
4209      *
4210      */
setLogSessionId(@onNull LogSessionId logSessionId)4211     public void setLogSessionId(@NonNull LogSessionId logSessionId) {
4212         Objects.requireNonNull(logSessionId);
4213         if (mState == STATE_UNINITIALIZED) {
4214             throw new IllegalStateException("track not initialized");
4215         }
4216         String stringId = logSessionId.getStringId();
4217         native_setLogSessionId(stringId);
4218         mLogSessionId = logSessionId;
4219     }
4220 
4221     /**
4222      * Returns the {@link LogSessionId}.
4223      */
4224     @NonNull
getLogSessionId()4225     public LogSessionId getLogSessionId() {
4226         return mLogSessionId;
4227     }
4228 
4229     //---------------------------------------------------------
4230     // Inner classes
4231     //--------------------
4232     /**
4233      * Helper class to handle the forwarding of native events to the appropriate listener
4234      * (potentially) handled in a different thread
4235      */
4236     private class NativePositionEventHandlerDelegate {
4237         private final Handler mHandler;
4238 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)4239         NativePositionEventHandlerDelegate(final AudioTrack track,
4240                                    final OnPlaybackPositionUpdateListener listener,
4241                                    Handler handler) {
4242             // find the looper for our new event handler
4243             Looper looper;
4244             if (handler != null) {
4245                 looper = handler.getLooper();
4246             } else {
4247                 // no given handler, use the looper the AudioTrack was created in
4248                 looper = mInitializationLooper;
4249             }
4250 
4251             // construct the event handler with this looper
4252             if (looper != null) {
4253                 // implement the event handler delegate
4254                 mHandler = new Handler(looper) {
4255                     @Override
4256                     public void handleMessage(Message msg) {
4257                         if (track == null) {
4258                             return;
4259                         }
4260                         switch(msg.what) {
4261                         case NATIVE_EVENT_MARKER:
4262                             if (listener != null) {
4263                                 listener.onMarkerReached(track);
4264                             }
4265                             break;
4266                         case NATIVE_EVENT_NEW_POS:
4267                             if (listener != null) {
4268                                 listener.onPeriodicNotification(track);
4269                             }
4270                             break;
4271                         default:
4272                             loge("Unknown native event type: " + msg.what);
4273                             break;
4274                         }
4275                     }
4276                 };
4277             } else {
4278                 mHandler = null;
4279             }
4280         }
4281 
getHandler()4282         Handler getHandler() {
4283             return mHandler;
4284         }
4285     }
4286 
4287     //---------------------------------------------------------
4288     // Methods for IPlayer interface
4289     //--------------------
4290     @Override
playerStart()4291     void playerStart() {
4292         play();
4293     }
4294 
4295     @Override
playerPause()4296     void playerPause() {
4297         pause();
4298     }
4299 
4300     @Override
playerStop()4301     void playerStop() {
4302         stop();
4303     }
4304 
4305     //---------------------------------------------------------
4306     // Java methods called from the native side
4307     //--------------------
4308     @SuppressWarnings("unused")
4309     @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553)
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)4310     private static void postEventFromNative(Object audiotrack_ref,
4311             int what, int arg1, int arg2, Object obj) {
4312         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
4313         final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get();
4314         if (track == null) {
4315             return;
4316         }
4317 
4318         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
4319             track.broadcastRoutingChange();
4320             return;
4321         }
4322 
4323         if (what == NATIVE_EVENT_CODEC_FORMAT_CHANGE) {
4324             ByteBuffer buffer = (ByteBuffer) obj;
4325             buffer.order(ByteOrder.nativeOrder());
4326             buffer.rewind();
4327             AudioMetadataReadMap audioMetaData = AudioMetadata.fromByteBuffer(buffer);
4328             if (audioMetaData == null) {
4329                 Log.e(TAG, "Unable to get audio metadata from byte buffer");
4330                 return;
4331             }
4332             track.mCodecFormatChangedListeners.notify(0 /* eventCode, unused */, audioMetaData);
4333             return;
4334         }
4335 
4336         if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA
4337                 || what == NATIVE_EVENT_NEW_IAUDIOTRACK
4338                 || what == NATIVE_EVENT_STREAM_END) {
4339             track.handleStreamEventFromNative(what, arg1);
4340             return;
4341         }
4342 
4343         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
4344         if (delegate != null) {
4345             Handler handler = delegate.getHandler();
4346             if (handler != null) {
4347                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
4348                 handler.sendMessage(m);
4349             }
4350         }
4351     }
4352 
4353     //---------------------------------------------------------
4354     // Native methods called from the Java side
4355     //--------------------
4356 
native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)4357     private static native boolean native_is_direct_output_supported(int encoding, int sampleRate,
4358             int channelMask, int channelIndexMask, int contentType, int usage, int flags);
4359 
4360     // post-condition: mStreamType is overwritten with a value
4361     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
4362     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack, boolean offload, int encapsulationMode, Object tunerConfiguration, @NonNull String opPackageName)4363     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
4364             Object /*AudioAttributes*/ attributes,
4365             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
4366             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack,
4367             boolean offload, int encapsulationMode, Object tunerConfiguration,
4368             @NonNull String opPackageName);
4369 
native_finalize()4370     private native final void native_finalize();
4371 
4372     /**
4373      * @hide
4374      */
4375     @UnsupportedAppUsage
native_release()4376     public native final void native_release();
4377 
native_start()4378     private native final void native_start();
4379 
native_stop()4380     private native final void native_stop();
4381 
native_pause()4382     private native final void native_pause();
4383 
native_flush()4384     private native final void native_flush();
4385 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)4386     private native final int native_write_byte(byte[] audioData,
4387                                                int offsetInBytes, int sizeInBytes, int format,
4388                                                boolean isBlocking);
4389 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)4390     private native final int native_write_short(short[] audioData,
4391                                                 int offsetInShorts, int sizeInShorts, int format,
4392                                                 boolean isBlocking);
4393 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)4394     private native final int native_write_float(float[] audioData,
4395                                                 int offsetInFloats, int sizeInFloats, int format,
4396                                                 boolean isBlocking);
4397 
native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)4398     private native final int native_write_native_bytes(ByteBuffer audioData,
4399             int positionInBytes, int sizeInBytes, int format, boolean blocking);
4400 
native_reload_static()4401     private native final int native_reload_static();
4402 
native_get_buffer_size_frames()4403     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)4404     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()4405     private native final int native_get_buffer_capacity_frames();
4406 
native_setVolume(float leftVolume, float rightVolume)4407     private native final void native_setVolume(float leftVolume, float rightVolume);
4408 
native_set_playback_rate(int sampleRateInHz)4409     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()4410     private native final int native_get_playback_rate();
4411 
native_set_playback_params(@onNull PlaybackParams params)4412     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()4413     private native final @NonNull PlaybackParams native_get_playback_params();
4414 
native_set_marker_pos(int marker)4415     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()4416     private native final int native_get_marker_pos();
4417 
native_set_pos_update_period(int updatePeriod)4418     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()4419     private native final int native_get_pos_update_period();
4420 
native_set_position(int position)4421     private native final int native_set_position(int position);
native_get_position()4422     private native final int native_get_position();
4423 
native_get_latency()4424     private native final int native_get_latency();
4425 
native_get_underrun_count()4426     private native final int native_get_underrun_count();
4427 
native_get_flags()4428     private native final int native_get_flags();
4429 
4430     // longArray must be a non-null array of length >= 2
4431     // [0] is assigned the frame position
4432     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)4433     private native final int native_get_timestamp(long[] longArray);
4434 
native_set_loop(int start, int end, int loopCount)4435     private native final int native_set_loop(int start, int end, int loopCount);
4436 
native_get_output_sample_rate(int streamType)4437     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)4438     static private native final int native_get_min_buff_size(
4439             int sampleRateInHz, int channelConfig, int audioFormat);
4440 
native_attachAuxEffect(int effectId)4441     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)4442     private native final int native_setAuxEffectSendLevel(float level);
4443 
native_setOutputDevice(int deviceId)4444     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()4445     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()4446     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()4447     private native final void native_disableDeviceCallback();
4448 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)4449     private native int native_applyVolumeShaper(
4450             @NonNull VolumeShaper.Configuration configuration,
4451             @NonNull VolumeShaper.Operation operation);
4452 
native_getVolumeShaperState(int id)4453     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
native_setPresentation(int presentationId, int programId)4454     private native final int native_setPresentation(int presentationId, int programId);
4455 
native_getPortId()4456     private native int native_getPortId();
4457 
native_set_delay_padding(int delayInFrames, int paddingInFrames)4458     private native void native_set_delay_padding(int delayInFrames, int paddingInFrames);
4459 
native_set_audio_description_mix_level_db(float level)4460     private native int native_set_audio_description_mix_level_db(float level);
native_get_audio_description_mix_level_db(float[] level)4461     private native int native_get_audio_description_mix_level_db(float[] level);
native_set_dual_mono_mode(int dualMonoMode)4462     private native int native_set_dual_mono_mode(int dualMonoMode);
native_get_dual_mono_mode(int[] dualMonoMode)4463     private native int native_get_dual_mono_mode(int[] dualMonoMode);
native_setLogSessionId(@ullable String logSessionId)4464     private native void native_setLogSessionId(@Nullable String logSessionId);
native_setStartThresholdInFrames(int startThresholdInFrames)4465     private native int native_setStartThresholdInFrames(int startThresholdInFrames);
native_getStartThresholdInFrames()4466     private native int native_getStartThresholdInFrames();
4467 
4468     /**
4469      * Sets the audio service Player Interface Id.
4470      *
4471      * The playerIId does not change over the lifetime of the client
4472      * Java AudioTrack and is set automatically on creation.
4473      *
4474      * This call informs the native AudioTrack for metrics logging purposes.
4475      *
4476      * @param id the value reported by AudioManager when registering the track.
4477      *           A value of -1 indicates invalid - the playerIId was never set.
4478      * @throws IllegalStateException if AudioTrack not initialized.
4479      */
native_setPlayerIId(int playerIId)4480     private native void native_setPlayerIId(int playerIId);
4481 
4482     //---------------------------------------------------------
4483     // Utility methods
4484     //------------------
4485 
logd(String msg)4486     private static void logd(String msg) {
4487         Log.d(TAG, msg);
4488     }
4489 
loge(String msg)4490     private static void loge(String msg) {
4491         Log.e(TAG, msg);
4492     }
4493 
4494     public final static class MetricsConstants
4495     {
MetricsConstants()4496         private MetricsConstants() {}
4497 
4498         // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors.
4499         private static final String MM_PREFIX = "android.media.audiotrack.";
4500 
4501         /**
4502          * Key to extract the stream type for this track
4503          * from the {@link AudioTrack#getMetrics} return value.
4504          * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}.
4505          * The value is a {@code String}.
4506          */
4507         public static final String STREAMTYPE = MM_PREFIX + "streamtype";
4508 
4509         /**
4510          * Key to extract the attribute content type for this track
4511          * from the {@link AudioTrack#getMetrics} return value.
4512          * The value is a {@code String}.
4513          */
4514         public static final String CONTENTTYPE = MM_PREFIX + "type";
4515 
4516         /**
4517          * Key to extract the attribute usage for this track
4518          * from the {@link AudioTrack#getMetrics} return value.
4519          * The value is a {@code String}.
4520          */
4521         public static final String USAGE = MM_PREFIX + "usage";
4522 
4523         /**
4524          * Key to extract the sample rate for this track in Hz
4525          * from the {@link AudioTrack#getMetrics} return value.
4526          * The value is an {@code int}.
4527          * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead.
4528          */
4529         @Deprecated
4530         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
4531 
4532         /**
4533          * Key to extract the native channel mask information for this track
4534          * from the {@link AudioTrack#getMetrics} return value.
4535          *
4536          * The value is a {@code long}.
4537          * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from
4538          * the returned format instead.
4539          */
4540         @Deprecated
4541         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
4542 
4543         /**
4544          * Use for testing only. Do not expose.
4545          * The current sample rate.
4546          * The value is an {@code int}.
4547          * @hide
4548          */
4549         @TestApi
4550         public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate";
4551 
4552         /**
4553          * Use for testing only. Do not expose.
4554          * The native channel mask.
4555          * The value is a {@code long}.
4556          * @hide
4557          */
4558         @TestApi
4559         public static final String CHANNEL_MASK = MM_PREFIX + "channelMask";
4560 
4561         /**
4562          * Use for testing only. Do not expose.
4563          * The output audio data encoding.
4564          * The value is a {@code String}.
4565          * @hide
4566          */
4567         @TestApi
4568         public static final String ENCODING = MM_PREFIX + "encoding";
4569 
4570         /**
4571          * Use for testing only. Do not expose.
4572          * The port id of this track port in audioserver.
4573          * The value is an {@code int}.
4574          * @hide
4575          */
4576         @TestApi
4577         public static final String PORT_ID = MM_PREFIX + "portId";
4578 
4579         /**
4580          * Use for testing only. Do not expose.
4581          * The buffer frameCount.
4582          * The value is an {@code int}.
4583          * @hide
4584          */
4585         @TestApi
4586         public static final String FRAME_COUNT = MM_PREFIX + "frameCount";
4587 
4588         /**
4589          * Use for testing only. Do not expose.
4590          * The actual track attributes used.
4591          * The value is a {@code String}.
4592          * @hide
4593          */
4594         @TestApi
4595         public static final String ATTRIBUTES = MM_PREFIX + "attributes";
4596     }
4597 }
4598