• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import android.annotation.CallbackExecutor;
20 import android.annotation.FloatRange;
21 import android.annotation.IntDef;
22 import android.annotation.IntRange;
23 import android.annotation.NonNull;
24 import android.annotation.Nullable;
25 import android.annotation.RequiresPermission;
26 import android.annotation.SystemApi;
27 import android.annotation.TestApi;
28 import android.compat.annotation.UnsupportedAppUsage;
29 import android.os.Binder;
30 import android.os.Handler;
31 import android.os.HandlerThread;
32 import android.os.Looper;
33 import android.os.Message;
34 import android.os.PersistableBundle;
35 import android.util.ArrayMap;
36 import android.util.Log;
37 
38 import com.android.internal.annotations.GuardedBy;
39 
40 import java.lang.annotation.Retention;
41 import java.lang.annotation.RetentionPolicy;
42 import java.lang.ref.WeakReference;
43 import java.nio.ByteBuffer;
44 import java.nio.ByteOrder;
45 import java.nio.NioUtils;
46 import java.util.LinkedList;
47 import java.util.concurrent.Executor;
48 
49 /**
50  * The AudioTrack class manages and plays a single audio resource for Java applications.
51  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
52  * achieved by "pushing" the data to the AudioTrack object using one of the
53  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
54  *  and {@link #write(float[], int, int, int)} methods.
55  *
56  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
57  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
58  * one of the {@code write()} methods. These are blocking and return when the data has been
59  * transferred from the Java layer to the native layer and queued for playback. The streaming
60  * mode is most useful when playing blocks of audio data that for instance are:
61  *
62  * <ul>
63  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
64  *   <li>too big to fit in memory because of the characteristics of the audio data
65  *         (high sampling rate, bits per sample ...)</li>
66  *   <li>received or generated while previously queued audio is playing.</li>
67  * </ul>
68  *
69  * The static mode should be chosen when dealing with short sounds that fit in memory and
70  * that need to be played with the smallest latency possible. The static mode will
71  * therefore be preferred for UI and game sounds that are played often, and with the
72  * smallest overhead possible.
73  *
74  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
75  * The size of this buffer, specified during the construction, determines how long an AudioTrack
76  * can play before running out of data.<br>
77  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
78  * be played from it.<br>
79  * For the streaming mode, data will be written to the audio sink in chunks of
80  * sizes less than or equal to the total buffer size.
81  *
82  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
83  */
84 public class AudioTrack extends PlayerBase
85                         implements AudioRouting
86                                  , VolumeAutomation
87 {
88     //---------------------------------------------------------
89     // Constants
90     //--------------------
91     /** Minimum value for a linear gain or auxiliary effect level.
92      *  This value must be exactly equal to 0.0f; do not change it.
93      */
94     private static final float GAIN_MIN = 0.0f;
95     /** Maximum value for a linear gain or auxiliary effect level.
96      *  This value must be greater than or equal to 1.0f.
97      */
98     private static final float GAIN_MAX = 1.0f;
99 
100     /** indicates AudioTrack state is stopped */
101     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
102     /** indicates AudioTrack state is paused */
103     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
104     /** indicates AudioTrack state is playing */
105     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
106     /**
107       * @hide
108       * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to
109       * transition to PLAYSTATE_STOPPED.
110       * Only valid for offload mode.
111       */
112     private static final int PLAYSTATE_STOPPING = 4;
113     /**
114       * @hide
115       * indicates AudioTrack state is paused from stopping state. Will transition to
116       * PLAYSTATE_STOPPING if play() is called.
117       * Only valid for offload mode.
118       */
119     private static final int PLAYSTATE_PAUSED_STOPPING = 5;
120 
121     // keep these values in sync with android_media_AudioTrack.cpp
122     /**
123      * Creation mode where audio data is transferred from Java to the native layer
124      * only once before the audio starts playing.
125      */
126     public static final int MODE_STATIC = 0;
127     /**
128      * Creation mode where audio data is streamed from Java to the native layer
129      * as the audio is playing.
130      */
131     public static final int MODE_STREAM = 1;
132 
133     /** @hide */
134     @IntDef({
135         MODE_STATIC,
136         MODE_STREAM
137     })
138     @Retention(RetentionPolicy.SOURCE)
139     public @interface TransferMode {}
140 
141     /**
142      * State of an AudioTrack that was not successfully initialized upon creation.
143      */
144     public static final int STATE_UNINITIALIZED = 0;
145     /**
146      * State of an AudioTrack that is ready to be used.
147      */
148     public static final int STATE_INITIALIZED   = 1;
149     /**
150      * State of a successfully initialized AudioTrack that uses static data,
151      * but that hasn't received that data yet.
152      */
153     public static final int STATE_NO_STATIC_DATA = 2;
154 
155     /**
156      * Denotes a successful operation.
157      */
158     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
159     /**
160      * Denotes a generic operation failure.
161      */
162     public  static final int ERROR                                 = AudioSystem.ERROR;
163     /**
164      * Denotes a failure due to the use of an invalid value.
165      */
166     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
167     /**
168      * Denotes a failure due to the improper use of a method.
169      */
170     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
171     /**
172      * An error code indicating that the object reporting it is no longer valid and needs to
173      * be recreated.
174      */
175     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
176     /**
177      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
178      * or immediately after start/ACTIVE.
179      * @hide
180      */
181     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
182 
183     // Error codes:
184     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
185     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
186     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
187     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
188     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
189     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
190 
191     // Events:
192     // to keep in sync with frameworks/av/include/media/AudioTrack.h
193     // Note: To avoid collisions with other event constants,
194     // do not define an event here that is the same value as
195     // AudioSystem.NATIVE_EVENT_ROUTING_CHANGE.
196 
197     /**
198      * Event id denotes when playback head has reached a previously set marker.
199      */
200     private static final int NATIVE_EVENT_MARKER  = 3;
201     /**
202      * Event id denotes when previously set update period has elapsed during playback.
203      */
204     private static final int NATIVE_EVENT_NEW_POS = 4;
205     /**
206      * Callback for more data
207      */
208     private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9;
209     /**
210      * IAudioTrack tear down for offloaded tracks
211      * TODO: when received, java AudioTrack must be released
212      */
213     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
214     /**
215      * Event id denotes when all the buffers queued in AF and HW are played
216      * back (after stop is called) for an offloaded track.
217      */
218     private static final int NATIVE_EVENT_STREAM_END = 7;
219     /**
220      * Event id denotes when the codec format changes.
221      *
222      * Note: Similar to a device routing change (AudioSystem.NATIVE_EVENT_ROUTING_CHANGE),
223      * this event comes from the AudioFlinger Thread / Output Stream management
224      * (not from buffer indications as above).
225      */
226     private static final int NATIVE_EVENT_CODEC_FORMAT_CHANGE = 100;
227 
228     private final static String TAG = "android.media.AudioTrack";
229 
230     /** @hide */
231     @IntDef({
232         ENCAPSULATION_MODE_NONE,
233         ENCAPSULATION_MODE_ELEMENTARY_STREAM,
234         // ENCAPSULATION_MODE_HANDLE, @SystemApi
235     })
236     @Retention(RetentionPolicy.SOURCE)
237     public @interface EncapsulationMode {}
238 
239     // Important: The ENCAPSULATION_MODE values must be kept in sync with native header files.
240     /**
241      * This mode indicates no metadata encapsulation,
242      * which is the default mode for sending audio data
243      * through {@code AudioTrack}.
244      */
245     public static final int ENCAPSULATION_MODE_NONE = 0;
246     /**
247      * This mode indicates metadata encapsulation with an elementary stream payload.
248      * Both compressed and PCM format is allowed.
249      */
250     public static final int ENCAPSULATION_MODE_ELEMENTARY_STREAM = 1;
251     /**
252      * This mode indicates metadata encapsulation with a handle payload
253      * and is set through {@link Builder#setEncapsulationMode(int)}.
254      * The handle is a 64 bit long, provided by the Tuner API
255      * in {@link android.os.Build.VERSION_CODES#R}.
256      * @hide
257      */
258     @SystemApi
259     @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
260     public static final int ENCAPSULATION_MODE_HANDLE = 2;
261 
262     /* Enumeration of metadata types permitted for use by
263      * encapsulation mode audio streams.
264      */
265     /** @hide */
266     @IntDef(prefix = { "ENCAPSULATION_METADATA_TYPE_" }, value = {
267         ENCAPSULATION_METADATA_TYPE_NONE, /* reserved */
268         ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER,
269         ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR,
270     })
271     @Retention(RetentionPolicy.SOURCE)
272     public @interface EncapsulationMetadataType {}
273 
274     /**
275      * Reserved do not use.
276      * @hide
277      */
278     public static final int ENCAPSULATION_METADATA_TYPE_NONE = 0; // reserved
279 
280     /**
281      * Encapsulation metadata type for framework tuner information.
282      *
283      * Refer to the Android Media TV Tuner API for details.
284      */
285     public static final int ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER = 1;
286 
287     /**
288      * Encapsulation metadata type for DVB AD descriptor.
289      *
290      * This metadata is formatted per ETSI TS 101 154 Table E.1: AD_descriptor.
291      */
292     public static final int ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR = 2;
293 
294     /* Dual Mono handling is used when a stereo audio stream
295      * contains separate audio content on the left and right channels.
296      * Such information about the content of the stream may be found, for example, in
297      * ITU T-REC-J.94-201610 A.6.2.3 Component descriptor.
298      */
299     /** @hide */
300     @IntDef({
301         DUAL_MONO_MODE_OFF,
302         DUAL_MONO_MODE_LR,
303         DUAL_MONO_MODE_LL,
304         DUAL_MONO_MODE_RR,
305     })
306     @Retention(RetentionPolicy.SOURCE)
307     public @interface DualMonoMode {}
308     // Important: The DUAL_MONO_MODE values must be kept in sync with native header files.
309     /**
310      * This mode disables any Dual Mono presentation effect.
311      *
312      */
313     public static final int DUAL_MONO_MODE_OFF = 0;
314 
315     /**
316      * This mode indicates that a stereo stream should be presented
317      * with the left and right audio channels blended together
318      * and delivered to both channels.
319      *
320      * Behavior for non-stereo streams is implementation defined.
321      * A suggested guideline is that the left-right stereo symmetric
322      * channels are pairwise blended;
323      * the other channels such as center are left alone.
324      *
325      * The Dual Mono effect occurs before volume scaling.
326      */
327     public static final int DUAL_MONO_MODE_LR = 1;
328 
329     /**
330      * This mode indicates that a stereo stream should be presented
331      * with the left audio channel replicated into the right audio channel.
332      *
333      * Behavior for non-stereo streams is implementation defined.
334      * A suggested guideline is that all channels with left-right
335      * stereo symmetry will have the left channel position replicated
336      * into the right channel position.
337      * The center channels (with no left/right symmetry) or unbalanced
338      * channels are left alone.
339      *
340      * The Dual Mono effect occurs before volume scaling.
341      */
342     public static final int DUAL_MONO_MODE_LL = 2;
343 
344     /**
345      * This mode indicates that a stereo stream should be presented
346      * with the right audio channel replicated into the left audio channel.
347      *
348      * Behavior for non-stereo streams is implementation defined.
349      * A suggested guideline is that all channels with left-right
350      * stereo symmetry will have the right channel position replicated
351      * into the left channel position.
352      * The center channels (with no left/right symmetry) or unbalanced
353      * channels are left alone.
354      *
355      * The Dual Mono effect occurs before volume scaling.
356      */
357     public static final int DUAL_MONO_MODE_RR = 3;
358 
359     /** @hide */
360     @IntDef({
361         WRITE_BLOCKING,
362         WRITE_NON_BLOCKING
363     })
364     @Retention(RetentionPolicy.SOURCE)
365     public @interface WriteMode {}
366 
367     /**
368      * The write mode indicating the write operation will block until all data has been written,
369      * to be used as the actual value of the writeMode parameter in
370      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
371      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
372      * {@link #write(ByteBuffer, int, int, long)}.
373      */
374     public final static int WRITE_BLOCKING = 0;
375 
376     /**
377      * The write mode indicating the write operation will return immediately after
378      * queuing as much audio data for playback as possible without blocking,
379      * to be used as the actual value of the writeMode parameter in
380      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
381      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
382      * {@link #write(ByteBuffer, int, int, long)}.
383      */
384     public final static int WRITE_NON_BLOCKING = 1;
385 
386     /** @hide */
387     @IntDef({
388         PERFORMANCE_MODE_NONE,
389         PERFORMANCE_MODE_LOW_LATENCY,
390         PERFORMANCE_MODE_POWER_SAVING
391     })
392     @Retention(RetentionPolicy.SOURCE)
393     public @interface PerformanceMode {}
394 
395     /**
396      * Default performance mode for an {@link AudioTrack}.
397      */
398     public static final int PERFORMANCE_MODE_NONE = 0;
399 
400     /**
401      * Low latency performance mode for an {@link AudioTrack}.
402      * If the device supports it, this mode
403      * enables a lower latency path through to the audio output sink.
404      * Effects may no longer work with such an {@code AudioTrack} and
405      * the sample rate must match that of the output sink.
406      * <p>
407      * Applications should be aware that low latency requires careful
408      * buffer management, with smaller chunks of audio data written by each
409      * {@code write()} call.
410      * <p>
411      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
412      * {@code AudioTrack}'s actual buffer size may be too small.
413      * It is recommended that a fairly
414      * large buffer should be specified when the {@code AudioTrack} is created.
415      * Then the actual size can be reduced by calling
416      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
417      * by lowering it after each {@code write()} call until the audio glitches,
418      * which is detected by calling
419      * {@link #getUnderrunCount()}. Then the buffer size can be increased
420      * until there are no glitches.
421      * This tuning step should be done while playing silence.
422      * This technique provides a compromise between latency and glitch rate.
423      */
424     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
425 
426     /**
427      * Power saving performance mode for an {@link AudioTrack}.
428      * If the device supports it, this
429      * mode will enable a lower power path to the audio output sink.
430      * In addition, this lower power path typically will have
431      * deeper internal buffers and better underrun resistance,
432      * with a tradeoff of higher latency.
433      * <p>
434      * In this mode, applications should attempt to use a larger buffer size
435      * and deliver larger chunks of audio data per {@code write()} call.
436      * Use {@link #getBufferSizeInFrames()} to determine
437      * the actual buffer size of the {@code AudioTrack} as it may have increased
438      * to accommodate a deeper buffer.
439      */
440     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
441 
442     // keep in sync with system/media/audio/include/system/audio-base.h
443     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
444     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
445 
446     // Size of HW_AV_SYNC track AV header.
447     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
448 
449     //--------------------------------------------------------------------------
450     // Member variables
451     //--------------------
452     /**
453      * Indicates the state of the AudioTrack instance.
454      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
455      */
456     private int mState = STATE_UNINITIALIZED;
457     /**
458      * Indicates the play state of the AudioTrack instance.
459      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
460      */
461     private int mPlayState = PLAYSTATE_STOPPED;
462 
463     /**
464      * Indicates that we are expecting an end of stream callback following a call
465      * to setOffloadEndOfStream() in a gapless track transition context. The native track
466      * will be restarted automatically.
467      */
468     private boolean mOffloadEosPending = false;
469 
470     /**
471      * Lock to ensure mPlayState updates reflect the actual state of the object.
472      */
473     private final Object mPlayStateLock = new Object();
474     /**
475      * Sizes of the audio buffer.
476      * These values are set during construction and can be stale.
477      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
478      */
479     private int mNativeBufferSizeInBytes = 0;
480     private int mNativeBufferSizeInFrames = 0;
481     /**
482      * Handler for events coming from the native code.
483      */
484     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
485     /**
486      * Looper associated with the thread that creates the AudioTrack instance.
487      */
488     private final Looper mInitializationLooper;
489     /**
490      * The audio data source sampling rate in Hz.
491      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
492      */
493     private int mSampleRate; // initialized by all constructors via audioParamCheck()
494     /**
495      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
496      */
497     private int mChannelCount = 1;
498     /**
499      * The audio channel mask used for calling native AudioTrack
500      */
501     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
502 
503     /**
504      * The type of the audio stream to play. See
505      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
506      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
507      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
508      *   {@link AudioManager#STREAM_DTMF}.
509      */
510     @UnsupportedAppUsage
511     private int mStreamType = AudioManager.STREAM_MUSIC;
512 
513     /**
514      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
515      */
516     private int mDataLoadMode = MODE_STREAM;
517     /**
518      * The current channel position mask, as specified on AudioTrack creation.
519      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
520      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
521      */
522     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
523     /**
524      * The channel index mask if specified, otherwise 0.
525      */
526     private int mChannelIndexMask = 0;
527     /**
528      * The encoding of the audio samples.
529      * @see AudioFormat#ENCODING_PCM_8BIT
530      * @see AudioFormat#ENCODING_PCM_16BIT
531      * @see AudioFormat#ENCODING_PCM_FLOAT
532      */
533     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
534     /**
535      * The AudioAttributes used in configuration.
536      */
537     private AudioAttributes mConfiguredAudioAttributes;
538     /**
539      * Audio session ID
540      */
541     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
542     /**
543      * HW_AV_SYNC track AV Sync Header
544      */
545     private ByteBuffer mAvSyncHeader = null;
546     /**
547      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
548      */
549     private int mAvSyncBytesRemaining = 0;
550     /**
551      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
552      */
553     private int mOffset = 0;
554     /**
555      * Indicates whether the track is intended to play in offload mode.
556      */
557     private boolean mOffloaded = false;
558     /**
559      * When offloaded track: delay for decoder in frames
560      */
561     private int mOffloadDelayFrames = 0;
562     /**
563      * When offloaded track: padding for decoder in frames
564      */
565     private int mOffloadPaddingFrames = 0;
566 
567     //--------------------------------
568     // Used exclusively by native code
569     //--------------------
570     /**
571      * @hide
572      * Accessed by native methods: provides access to C++ AudioTrack object.
573      */
574     @SuppressWarnings("unused")
575     @UnsupportedAppUsage
576     protected long mNativeTrackInJavaObj;
577     /**
578      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
579      * the native AudioTrack object, but not stored in it).
580      */
581     @SuppressWarnings("unused")
582     @UnsupportedAppUsage
583     private long mJniData;
584 
585 
586     //--------------------------------------------------------------------------
587     // Constructor, Finalize
588     //--------------------
589     /**
590      * Class constructor.
591      * @param streamType the type of the audio stream. See
592      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
593      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
594      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
595      * @param sampleRateInHz the initial source sample rate expressed in Hz.
596      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
597      *   which is usually the sample rate of the sink.
598      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
599      * @param channelConfig describes the configuration of the audio channels.
600      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
601      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
602      * @param audioFormat the format in which the audio data is represented.
603      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
604      *   {@link AudioFormat#ENCODING_PCM_8BIT},
605      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
606      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
607      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
608      *   <p> If the track's creation mode is {@link #MODE_STATIC},
609      *   this is the maximum length sample, or audio clip, that can be played by this instance.
610      *   <p> If the track's creation mode is {@link #MODE_STREAM},
611      *   this should be the desired buffer size
612      *   for the <code>AudioTrack</code> to satisfy the application's
613      *   latency requirements.
614      *   If <code>bufferSizeInBytes</code> is less than the
615      *   minimum buffer size for the output sink, it is increased to the minimum
616      *   buffer size.
617      *   The method {@link #getBufferSizeInFrames()} returns the
618      *   actual size in frames of the buffer created, which
619      *   determines the minimum frequency to write
620      *   to the streaming <code>AudioTrack</code> to avoid underrun.
621      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
622      *   for an AudioTrack instance in streaming mode.
623      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
624      * @throws java.lang.IllegalArgumentException
625      * @deprecated use {@link Builder} or
626      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
627      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
628      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)629     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
630             int bufferSizeInBytes, int mode)
631     throws IllegalArgumentException {
632         this(streamType, sampleRateInHz, channelConfig, audioFormat,
633                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
634     }
635 
636     /**
637      * Class constructor with audio session. Use this constructor when the AudioTrack must be
638      * attached to a particular audio session. The primary use of the audio session ID is to
639      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
640      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
641      * and media players in the same session and not to the output mix.
642      * When an AudioTrack is created without specifying a session, it will create its own session
643      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
644      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
645      * session
646      * with all other media players or audio tracks in the same session, otherwise a new session
647      * will be created for this track if none is supplied.
648      * @param streamType the type of the audio stream. See
649      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
650      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
651      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
652      * @param sampleRateInHz the initial source sample rate expressed in Hz.
653      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
654      *   which is usually the sample rate of the sink.
655      * @param channelConfig describes the configuration of the audio channels.
656      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
657      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
658      * @param audioFormat the format in which the audio data is represented.
659      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
660      *   {@link AudioFormat#ENCODING_PCM_8BIT},
661      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
662      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
663      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
664      *   <p> If the track's creation mode is {@link #MODE_STATIC},
665      *   this is the maximum length sample, or audio clip, that can be played by this instance.
666      *   <p> If the track's creation mode is {@link #MODE_STREAM},
667      *   this should be the desired buffer size
668      *   for the <code>AudioTrack</code> to satisfy the application's
669      *   latency requirements.
670      *   If <code>bufferSizeInBytes</code> is less than the
671      *   minimum buffer size for the output sink, it is increased to the minimum
672      *   buffer size.
673      *   The method {@link #getBufferSizeInFrames()} returns the
674      *   actual size in frames of the buffer created, which
675      *   determines the minimum frequency to write
676      *   to the streaming <code>AudioTrack</code> to avoid underrun.
677      *   You can write data into this buffer in smaller chunks than this size.
678      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
679      *   for an AudioTrack instance in streaming mode.
680      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
681      * @param sessionId Id of audio session the AudioTrack must be attached to
682      * @throws java.lang.IllegalArgumentException
683      * @deprecated use {@link Builder} or
684      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
685      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
686      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)687     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
688             int bufferSizeInBytes, int mode, int sessionId)
689     throws IllegalArgumentException {
690         // mState already == STATE_UNINITIALIZED
691         this((new AudioAttributes.Builder())
692                     .setLegacyStreamType(streamType)
693                     .build(),
694                 (new AudioFormat.Builder())
695                     .setChannelMask(channelConfig)
696                     .setEncoding(audioFormat)
697                     .setSampleRate(sampleRateInHz)
698                     .build(),
699                 bufferSizeInBytes,
700                 mode, sessionId);
701         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
702     }
703 
704     /**
705      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
706      * @param attributes a non-null {@link AudioAttributes} instance.
707      * @param format a non-null {@link AudioFormat} instance describing the format of the data
708      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
709      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
710      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
711      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
712      *   <p> If the track's creation mode is {@link #MODE_STATIC},
713      *   this is the maximum length sample, or audio clip, that can be played by this instance.
714      *   <p> If the track's creation mode is {@link #MODE_STREAM},
715      *   this should be the desired buffer size
716      *   for the <code>AudioTrack</code> to satisfy the application's
717      *   latency requirements.
718      *   If <code>bufferSizeInBytes</code> is less than the
719      *   minimum buffer size for the output sink, it is increased to the minimum
720      *   buffer size.
721      *   The method {@link #getBufferSizeInFrames()} returns the
722      *   actual size in frames of the buffer created, which
723      *   determines the minimum frequency to write
724      *   to the streaming <code>AudioTrack</code> to avoid underrun.
725      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
726      *   for an AudioTrack instance in streaming mode.
727      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
728      * @param sessionId ID of audio session the AudioTrack must be attached to, or
729      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
730      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
731      *   construction.
732      * @throws IllegalArgumentException
733      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)734     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
735             int mode, int sessionId)
736                     throws IllegalArgumentException {
737         this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/,
738                 ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
739     }
740 
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, @Nullable TunerConfiguration tunerConfiguration)741     private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
742             int mode, int sessionId, boolean offload, int encapsulationMode,
743             @Nullable TunerConfiguration tunerConfiguration)
744                     throws IllegalArgumentException {
745         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
746         // mState already == STATE_UNINITIALIZED
747 
748         mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
749 
750         if (format == null) {
751             throw new IllegalArgumentException("Illegal null AudioFormat");
752         }
753 
754         // Check if we should enable deep buffer mode
755         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
756             mAttributes = new AudioAttributes.Builder(mAttributes)
757                 .replaceFlags((mAttributes.getAllFlags()
758                         | AudioAttributes.FLAG_DEEP_BUFFER)
759                         & ~AudioAttributes.FLAG_LOW_LATENCY)
760                 .build();
761         }
762 
763         // remember which looper is associated with the AudioTrack instantiation
764         Looper looper;
765         if ((looper = Looper.myLooper()) == null) {
766             looper = Looper.getMainLooper();
767         }
768 
769         int rate = format.getSampleRate();
770         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
771             rate = 0;
772         }
773 
774         int channelIndexMask = 0;
775         if ((format.getPropertySetMask()
776                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
777             channelIndexMask = format.getChannelIndexMask();
778         }
779         int channelMask = 0;
780         if ((format.getPropertySetMask()
781                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
782             channelMask = format.getChannelMask();
783         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
784             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
785                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
786         }
787         int encoding = AudioFormat.ENCODING_DEFAULT;
788         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
789             encoding = format.getEncoding();
790         }
791         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
792         mOffloaded = offload;
793         mStreamType = AudioSystem.STREAM_DEFAULT;
794 
795         audioBuffSizeCheck(bufferSizeInBytes);
796 
797         mInitializationLooper = looper;
798 
799         if (sessionId < 0) {
800             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
801         }
802 
803         int[] sampleRate = new int[] {mSampleRate};
804         int[] session = new int[1];
805         session[0] = sessionId;
806         // native initialization
807         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
808                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
809                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
810                 offload, encapsulationMode, tunerConfiguration,
811                 getCurrentOpPackageName());
812         if (initResult != SUCCESS) {
813             loge("Error code "+initResult+" when initializing AudioTrack.");
814             return; // with mState == STATE_UNINITIALIZED
815         }
816 
817         mSampleRate = sampleRate[0];
818         mSessionId = session[0];
819 
820         // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
821 
822         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
823             int frameSizeInBytes;
824             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
825                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
826             } else {
827                 frameSizeInBytes = 1;
828             }
829             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
830         }
831 
832         if (mDataLoadMode == MODE_STATIC) {
833             mState = STATE_NO_STATIC_DATA;
834         } else {
835             mState = STATE_INITIALIZED;
836         }
837 
838         baseRegisterPlayer();
839     }
840 
841     /**
842      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
843      * the AudioTrackRoutingProxy subclass.
844      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
845      * (associated with an OpenSL ES player).
846      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
847      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
848      * it means that the OpenSL player interface hasn't been realized, so there is no native
849      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
850      * OpenSLES interface is realized.
851      */
AudioTrack(long nativeTrackInJavaObj)852     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
853         super(new AudioAttributes.Builder().build(),
854                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
855         // "final"s
856         mNativeTrackInJavaObj = 0;
857         mJniData = 0;
858 
859         // remember which looper is associated with the AudioTrack instantiation
860         Looper looper;
861         if ((looper = Looper.myLooper()) == null) {
862             looper = Looper.getMainLooper();
863         }
864         mInitializationLooper = looper;
865 
866         // other initialization...
867         if (nativeTrackInJavaObj != 0) {
868             baseRegisterPlayer();
869             deferred_connect(nativeTrackInJavaObj);
870         } else {
871             mState = STATE_UNINITIALIZED;
872         }
873     }
874 
875     /**
876      * @hide
877      */
878     @UnsupportedAppUsage
deferred_connect(long nativeTrackInJavaObj)879     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
880         if (mState != STATE_INITIALIZED) {
881             // Note that for this native_setup, we are providing an already created/initialized
882             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
883             int[] session = { 0 };
884             int[] rates = { 0 };
885             int initResult = native_setup(new WeakReference<AudioTrack>(this),
886                     null /*mAttributes - NA*/,
887                     rates /*sampleRate - NA*/,
888                     0 /*mChannelMask - NA*/,
889                     0 /*mChannelIndexMask - NA*/,
890                     0 /*mAudioFormat - NA*/,
891                     0 /*mNativeBufferSizeInBytes - NA*/,
892                     0 /*mDataLoadMode - NA*/,
893                     session,
894                     nativeTrackInJavaObj,
895                     false /*offload*/,
896                     ENCAPSULATION_MODE_NONE,
897                     null /* tunerConfiguration */,
898                     "" /* opPackagename */);
899             if (initResult != SUCCESS) {
900                 loge("Error code "+initResult+" when initializing AudioTrack.");
901                 return; // with mState == STATE_UNINITIALIZED
902             }
903 
904             mSessionId = session[0];
905 
906             mState = STATE_INITIALIZED;
907         }
908     }
909 
910     /**
911      * TunerConfiguration is used to convey tuner information
912      * from the android.media.tv.Tuner API to AudioTrack construction.
913      *
914      * Use the Builder to construct the TunerConfiguration object,
915      * which is then used by the {@link AudioTrack.Builder} to create an AudioTrack.
916      * @hide
917      */
918     @SystemApi
919     public static class TunerConfiguration {
920         private final int mContentId;
921         private final int mSyncId;
922 
923         /**
924          * Constructs a TunerConfiguration instance for use in {@link AudioTrack.Builder}
925          *
926          * @param contentId selects the audio stream to use.
927          *     The contentId may be obtained from
928          *     {@link android.media.tv.tuner.filter.Filter#getId()}.
929          *     This is always a positive number.
930          * @param syncId selects the clock to use for synchronization
931          *     of audio with other streams such as video.
932          *     The syncId may be obtained from
933          *     {@link android.media.tv.tuner.Tuner#getAvSyncHwId()}.
934          *     This is always a positive number.
935          */
936         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
TunerConfiguration( @ntRangefrom = 1) int contentId, @IntRange(from = 1)int syncId)937         public TunerConfiguration(
938                 @IntRange(from = 1) int contentId, @IntRange(from = 1)int syncId) {
939             if (contentId < 1) {
940                 throw new IllegalArgumentException(
941                         "contentId " + contentId + " must be positive");
942             }
943             if (syncId < 1) {
944                 throw new IllegalArgumentException("syncId " + syncId + " must be positive");
945             }
946             mContentId = contentId;
947             mSyncId = syncId;
948         }
949 
950         /**
951          * Returns the contentId.
952          */
953         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getContentId()954         public @IntRange(from = 1) int getContentId() {
955             return mContentId; // The Builder ensures this is > 0.
956         }
957 
958         /**
959          * Returns the syncId.
960          */
961         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
getSyncId()962         public @IntRange(from = 1) int getSyncId() {
963             return mSyncId;  // The Builder ensures this is > 0.
964         }
965     }
966 
967     /**
968      * Builder class for {@link AudioTrack} objects.
969      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
970      * attributes and audio format parameters, you indicate which of those vary from the default
971      * behavior on the device.
972      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
973      * parameters, to be used by a new <code>AudioTrack</code> instance:
974      *
975      * <pre class="prettyprint">
976      * AudioTrack player = new AudioTrack.Builder()
977      *         .setAudioAttributes(new AudioAttributes.Builder()
978      *                  .setUsage(AudioAttributes.USAGE_ALARM)
979      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
980      *                  .build())
981      *         .setAudioFormat(new AudioFormat.Builder()
982      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
983      *                 .setSampleRate(44100)
984      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
985      *                 .build())
986      *         .setBufferSizeInBytes(minBuffSize)
987      *         .build();
988      * </pre>
989      * <p>
990      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
991      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
992      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
993      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
994      * {@link AudioFormat#ENCODING_PCM_16BIT}.
995      * The sample rate will depend on the device actually selected for playback and can be queried
996      * with {@link #getSampleRate()} method.
997      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
998      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
999      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
1000      * <code>MODE_STREAM</code> will be used.
1001      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
1002      * be generated.
1003      * <br>Offload is false by default.
1004      */
1005     public static class Builder {
1006         private AudioAttributes mAttributes;
1007         private AudioFormat mFormat;
1008         private int mBufferSizeInBytes;
1009         private int mEncapsulationMode = ENCAPSULATION_MODE_NONE;
1010         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
1011         private int mMode = MODE_STREAM;
1012         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
1013         private boolean mOffload = false;
1014         private TunerConfiguration mTunerConfiguration;
1015 
1016         /**
1017          * Constructs a new Builder with the default values as described above.
1018          */
Builder()1019         public Builder() {
1020         }
1021 
1022         /**
1023          * Sets the {@link AudioAttributes}.
1024          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
1025          *     data to be played.
1026          * @return the same Builder instance.
1027          * @throws IllegalArgumentException
1028          */
setAudioAttributes(@onNull AudioAttributes attributes)1029         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
1030                 throws IllegalArgumentException {
1031             if (attributes == null) {
1032                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1033             }
1034             // keep reference, we only copy the data when building
1035             mAttributes = attributes;
1036             return this;
1037         }
1038 
1039         /**
1040          * Sets the format of the audio data to be played by the {@link AudioTrack}.
1041          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
1042          * as encoding, channel mask and sample rate.
1043          * @param format a non-null {@link AudioFormat} instance.
1044          * @return the same Builder instance.
1045          * @throws IllegalArgumentException
1046          */
setAudioFormat(@onNull AudioFormat format)1047         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
1048                 throws IllegalArgumentException {
1049             if (format == null) {
1050                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
1051             }
1052             // keep reference, we only copy the data when building
1053             mFormat = format;
1054             return this;
1055         }
1056 
1057         /**
1058          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
1059          * If using the {@link AudioTrack} in streaming mode
1060          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
1061          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
1062          * the estimated minimum buffer size for the creation of an AudioTrack instance
1063          * in streaming mode.
1064          * <br>If using the <code>AudioTrack</code> in static mode (see
1065          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
1066          * played by this instance.
1067          * @param bufferSizeInBytes
1068          * @return the same Builder instance.
1069          * @throws IllegalArgumentException
1070          */
setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)1071         public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes)
1072                 throws IllegalArgumentException {
1073             if (bufferSizeInBytes <= 0) {
1074                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
1075             }
1076             mBufferSizeInBytes = bufferSizeInBytes;
1077             return this;
1078         }
1079 
1080         /**
1081          * Sets the encapsulation mode.
1082          *
1083          * Encapsulation mode allows metadata to be sent together with
1084          * the audio data payload in a {@code ByteBuffer}.
1085          * This requires a compatible hardware audio codec.
1086          *
1087          * @param encapsulationMode one of {@link AudioTrack#ENCAPSULATION_MODE_NONE},
1088          *        or {@link AudioTrack#ENCAPSULATION_MODE_ELEMENTARY_STREAM}.
1089          * @return the same Builder instance.
1090          */
1091         // Note: with the correct permission {@code AudioTrack#ENCAPSULATION_MODE_HANDLE}
1092         // may be used as well.
setEncapsulationMode(@ncapsulationMode int encapsulationMode)1093         public @NonNull Builder setEncapsulationMode(@EncapsulationMode int encapsulationMode) {
1094             switch (encapsulationMode) {
1095                 case ENCAPSULATION_MODE_NONE:
1096                 case ENCAPSULATION_MODE_ELEMENTARY_STREAM:
1097                 case ENCAPSULATION_MODE_HANDLE:
1098                     mEncapsulationMode = encapsulationMode;
1099                     break;
1100                 default:
1101                     throw new IllegalArgumentException(
1102                             "Invalid encapsulation mode " + encapsulationMode);
1103             }
1104             return this;
1105         }
1106 
1107         /**
1108          * Sets the mode under which buffers of audio data are transferred from the
1109          * {@link AudioTrack} to the framework.
1110          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
1111          * @return the same Builder instance.
1112          * @throws IllegalArgumentException
1113          */
setTransferMode(@ransferMode int mode)1114         public @NonNull Builder setTransferMode(@TransferMode int mode)
1115                 throws IllegalArgumentException {
1116             switch(mode) {
1117                 case MODE_STREAM:
1118                 case MODE_STATIC:
1119                     mMode = mode;
1120                     break;
1121                 default:
1122                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
1123             }
1124             return this;
1125         }
1126 
1127         /**
1128          * Sets the session ID the {@link AudioTrack} will be attached to.
1129          * @param sessionId a strictly positive ID number retrieved from another
1130          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
1131          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
1132          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
1133          * @return the same Builder instance.
1134          * @throws IllegalArgumentException
1135          */
setSessionId(@ntRangefrom = 1) int sessionId)1136         public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId)
1137                 throws IllegalArgumentException {
1138             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
1139                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
1140             }
1141             mSessionId = sessionId;
1142             return this;
1143         }
1144 
1145         /**
1146          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
1147          * may not be supported by the particular device, and the framework is free
1148          * to ignore such request if it is incompatible with other requests or hardware.
1149          *
1150          * @param performanceMode one of
1151          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
1152          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1153          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1154          * @return the same Builder instance.
1155          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
1156          */
setPerformanceMode(@erformanceMode int performanceMode)1157         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
1158             switch (performanceMode) {
1159                 case PERFORMANCE_MODE_NONE:
1160                 case PERFORMANCE_MODE_LOW_LATENCY:
1161                 case PERFORMANCE_MODE_POWER_SAVING:
1162                     mPerformanceMode = performanceMode;
1163                     break;
1164                 default:
1165                     throw new IllegalArgumentException(
1166                             "Invalid performance mode " + performanceMode);
1167             }
1168             return this;
1169         }
1170 
1171         /**
1172          * Sets whether this track will play through the offloaded audio path.
1173          * When set to true, at build time, the audio format will be checked against
1174          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)}
1175          * to verify the audio format used by this track is supported on the device's offload
1176          * path (if any).
1177          * <br>Offload is only supported for media audio streams, and therefore requires that
1178          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
1179          * @param offload true to require the offload path for playback.
1180          * @return the same Builder instance.
1181          */
setOffloadedPlayback(boolean offload)1182         public @NonNull Builder setOffloadedPlayback(boolean offload) {
1183             mOffload = offload;
1184             return this;
1185         }
1186 
1187         /**
1188          * Sets the tuner configuration for the {@code AudioTrack}.
1189          *
1190          * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from
1191          * the Android TV tuner API which indicate the audio content stream id and the
1192          * synchronization id for the {@code AudioTrack}.
1193          *
1194          * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}.
1195          * @return the same Builder instance.
1196          * @hide
1197          */
1198         @SystemApi
1199         @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING)
setTunerConfiguration( @onNull TunerConfiguration tunerConfiguration)1200         public @NonNull Builder setTunerConfiguration(
1201                 @NonNull TunerConfiguration tunerConfiguration) {
1202             if (tunerConfiguration == null) {
1203                 throw new IllegalArgumentException("tunerConfiguration is null");
1204             }
1205             mTunerConfiguration = tunerConfiguration;
1206             return this;
1207         }
1208 
1209         /**
1210          * Builds an {@link AudioTrack} instance initialized with all the parameters set
1211          * on this <code>Builder</code>.
1212          * @return a new successfully initialized {@link AudioTrack} instance.
1213          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
1214          *     were incompatible, or if they are not supported by the device,
1215          *     or if the device was not available.
1216          */
build()1217         public @NonNull AudioTrack build() throws UnsupportedOperationException {
1218             if (mAttributes == null) {
1219                 mAttributes = new AudioAttributes.Builder()
1220                         .setUsage(AudioAttributes.USAGE_MEDIA)
1221                         .build();
1222             }
1223             switch (mPerformanceMode) {
1224             case PERFORMANCE_MODE_LOW_LATENCY:
1225                 mAttributes = new AudioAttributes.Builder(mAttributes)
1226                     .replaceFlags((mAttributes.getAllFlags()
1227                             | AudioAttributes.FLAG_LOW_LATENCY)
1228                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
1229                     .build();
1230                 break;
1231             case PERFORMANCE_MODE_NONE:
1232                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
1233                     break; // do not enable deep buffer mode.
1234                 }
1235                 // permitted to fall through to enable deep buffer
1236             case PERFORMANCE_MODE_POWER_SAVING:
1237                 mAttributes = new AudioAttributes.Builder(mAttributes)
1238                 .replaceFlags((mAttributes.getAllFlags()
1239                         | AudioAttributes.FLAG_DEEP_BUFFER)
1240                         & ~AudioAttributes.FLAG_LOW_LATENCY)
1241                 .build();
1242                 break;
1243             }
1244 
1245             if (mFormat == null) {
1246                 mFormat = new AudioFormat.Builder()
1247                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
1248                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
1249                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
1250                         .build();
1251             }
1252 
1253             if (mOffload) {
1254                 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) {
1255                     throw new UnsupportedOperationException(
1256                             "Offload and low latency modes are incompatible");
1257                 }
1258                 if (!AudioSystem.isOffloadSupported(mFormat, mAttributes)) {
1259                     throw new UnsupportedOperationException(
1260                             "Cannot create AudioTrack, offload format / attributes not supported");
1261                 }
1262             }
1263 
1264             // TODO: Check mEncapsulationMode compatibility with MODE_STATIC, etc?
1265 
1266             try {
1267                 // If the buffer size is not specified in streaming mode,
1268                 // use a single frame for the buffer size and let the
1269                 // native code figure out the minimum buffer size.
1270                 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
1271                     mBufferSizeInBytes = mFormat.getChannelCount()
1272                             * mFormat.getBytesPerSample(mFormat.getEncoding());
1273                 }
1274                 final AudioTrack track = new AudioTrack(
1275                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, mOffload,
1276                         mEncapsulationMode, mTunerConfiguration);
1277                 if (track.getState() == STATE_UNINITIALIZED) {
1278                     // release is not necessary
1279                     throw new UnsupportedOperationException("Cannot create AudioTrack");
1280                 }
1281                 return track;
1282             } catch (IllegalArgumentException e) {
1283                 throw new UnsupportedOperationException(e.getMessage());
1284             }
1285         }
1286     }
1287 
1288     /**
1289      * Configures the delay and padding values for the current compressed stream playing
1290      * in offload mode.
1291      * This can only be used on a track successfully initialized with
1292      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a
1293      * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed
1294      * stream corresponds to 200 decoded interleaved PCM samples.
1295      * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value
1296      *     of 0 indicates no delay is to be applied.
1297      * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0
1298      *     of 0 indicates no padding is to be applied.
1299      */
setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1300     public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames,
1301             @IntRange(from = 0) int paddingInFrames) {
1302         if (paddingInFrames < 0) {
1303             throw new IllegalArgumentException("Illegal negative padding");
1304         }
1305         if (delayInFrames < 0) {
1306             throw new IllegalArgumentException("Illegal negative delay");
1307         }
1308         if (!mOffloaded) {
1309             throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track");
1310         }
1311         if (mState == STATE_UNINITIALIZED) {
1312             throw new IllegalStateException("Uninitialized track");
1313         }
1314         mOffloadDelayFrames = delayInFrames;
1315         mOffloadPaddingFrames = paddingInFrames;
1316         native_set_delay_padding(delayInFrames, paddingInFrames);
1317     }
1318 
1319     /**
1320      * Return the decoder delay of an offloaded track, expressed in frames, previously set with
1321      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1322      * <p>This delay indicates the number of frames to be ignored at the beginning of the stream.
1323      * This value can only be queried on a track successfully initialized with
1324      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1325      * @return decoder delay expressed in frames.
1326      */
getOffloadDelay()1327     public @IntRange(from = 0) int getOffloadDelay() {
1328         if (!mOffloaded) {
1329             throw new IllegalStateException("Illegal query of delay on non-offloaded track");
1330         }
1331         if (mState == STATE_UNINITIALIZED) {
1332             throw new IllegalStateException("Illegal query of delay on uninitialized track");
1333         }
1334         return mOffloadDelayFrames;
1335     }
1336 
1337     /**
1338      * Return the decoder padding of an offloaded track, expressed in frames, previously set with
1339      * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified.
1340      * <p>This padding indicates the number of frames to be ignored at the end of the stream.
1341      * This value can only be queried on a track successfully initialized with
1342      * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}.
1343      * @return decoder padding expressed in frames.
1344      */
getOffloadPadding()1345     public @IntRange(from = 0) int getOffloadPadding() {
1346         if (!mOffloaded) {
1347             throw new IllegalStateException("Illegal query of padding on non-offloaded track");
1348         }
1349         if (mState == STATE_UNINITIALIZED) {
1350             throw new IllegalStateException("Illegal query of padding on uninitialized track");
1351         }
1352         return mOffloadPaddingFrames;
1353     }
1354 
1355     /**
1356      * Declares that the last write() operation on this track provided the last buffer of this
1357      * stream.
1358      * After the end of stream, previously set padding and delay values are ignored.
1359      * Can only be called only if the AudioTrack is opened in offload mode
1360      * {@see Builder#setOffloadedPlayback(boolean)}.
1361      * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING}
1362      * {@see #getPlayState()}.
1363      * Use this method in the same thread as any write() operation.
1364      */
setOffloadEndOfStream()1365     public void setOffloadEndOfStream() {
1366         if (!mOffloaded) {
1367             throw new IllegalStateException("EOS not supported on non-offloaded track");
1368         }
1369         if (mState == STATE_UNINITIALIZED) {
1370             throw new IllegalStateException("Uninitialized track");
1371         }
1372         if (mPlayState != PLAYSTATE_PLAYING) {
1373             throw new IllegalStateException("EOS not supported if not playing");
1374         }
1375         synchronized (mStreamEventCbLock) {
1376             if (mStreamEventCbInfoList.size() == 0) {
1377                 throw new IllegalStateException("EOS not supported without StreamEventCallback");
1378             }
1379         }
1380 
1381         synchronized (mPlayStateLock) {
1382             native_stop();
1383             mOffloadEosPending = true;
1384             mPlayState = PLAYSTATE_STOPPING;
1385         }
1386     }
1387 
1388     /**
1389      * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set
1390      * to {@code true}.
1391      * @return true if the track is using offloaded playback.
1392      */
isOffloadedPlayback()1393     public boolean isOffloadedPlayback() {
1394         return mOffloaded;
1395     }
1396 
1397     /**
1398      * Returns whether direct playback of an audio format with the provided attributes is
1399      * currently supported on the system.
1400      * <p>Direct playback means that the audio stream is not resampled or downmixed
1401      * by the framework. Checking for direct support can help the app select the representation
1402      * of audio content that most closely matches the capabilities of the device and peripherials
1403      * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded
1404      * or mixed with other streams, if needed.
1405      * <p>Also note that this query only provides information about the support of an audio format.
1406      * It does not indicate whether the resources necessary for the playback are available
1407      * at that instant.
1408      * @param format a non-null {@link AudioFormat} instance describing the format of
1409      *   the audio data.
1410      * @param attributes a non-null {@link AudioAttributes} instance.
1411      * @return true if the given audio format can be played directly.
1412      */
isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1413     public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format,
1414             @NonNull AudioAttributes attributes) {
1415         if (format == null) {
1416             throw new IllegalArgumentException("Illegal null AudioFormat argument");
1417         }
1418         if (attributes == null) {
1419             throw new IllegalArgumentException("Illegal null AudioAttributes argument");
1420         }
1421         return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(),
1422                 format.getChannelMask(), format.getChannelIndexMask(),
1423                 attributes.getContentType(), attributes.getUsage(), attributes.getFlags());
1424     }
1425 
1426     /*
1427      * The MAX_LEVEL should be exactly representable by an IEEE 754-2008 base32 float.
1428      * This means fractions must be divisible by a power of 2. For example,
1429      * 10.25f is OK as 0.25 is 1/4, but 10.1f is NOT OK as 1/10 is not expressable by
1430      * a finite binary fraction.
1431      *
1432      * 48.f is the nominal max for API level {@link android os.Build.VERSION_CODES#R}.
1433      * We use this to suggest a baseline range for implementation.
1434      *
1435      * The API contract specification allows increasing this value in a future
1436      * API release, but not decreasing this value.
1437      */
1438     private static final float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f;
1439 
isValidAudioDescriptionMixLevel(float level)1440     private static boolean isValidAudioDescriptionMixLevel(float level) {
1441         return !(Float.isNaN(level) || level > MAX_AUDIO_DESCRIPTION_MIX_LEVEL);
1442     }
1443 
1444     /**
1445      * Sets the Audio Description mix level in dB.
1446      *
1447      * For AudioTracks incorporating a secondary Audio Description stream
1448      * (where such contents may be sent through an Encapsulation Mode
1449      * other than {@link #ENCAPSULATION_MODE_NONE}).
1450      * or internally by a HW channel),
1451      * the level of mixing of the Audio Description to the Main Audio stream
1452      * is controlled by this method.
1453      *
1454      * Such mixing occurs <strong>prior</strong> to overall volume scaling.
1455      *
1456      * @param level a floating point value between
1457      *     {@code Float.NEGATIVE_INFINITY} to {@code +48.f},
1458      *     where {@code Float.NEGATIVE_INFINITY} means the Audio Description is not mixed
1459      *     and a level of {@code 0.f} means the Audio Description is mixed without scaling.
1460      * @return true on success, false on failure.
1461      */
setAudioDescriptionMixLeveldB( @loatRangeto = 48.f, toInclusive = true) float level)1462     public boolean setAudioDescriptionMixLeveldB(
1463             @FloatRange(to = 48.f, toInclusive = true) float level) {
1464         if (!isValidAudioDescriptionMixLevel(level)) {
1465             throw new IllegalArgumentException("level is out of range" + level);
1466         }
1467         return native_set_audio_description_mix_level_db(level) == SUCCESS;
1468     }
1469 
1470     /**
1471      * Returns the Audio Description mix level in dB.
1472      *
1473      * If Audio Description mixing is unavailable from the hardware device,
1474      * a value of {@code Float.NEGATIVE_INFINITY} is returned.
1475      *
1476      * @return the current Audio Description Mix Level in dB.
1477      *     A value of {@code Float.NEGATIVE_INFINITY} means
1478      *     that the audio description is not mixed or
1479      *     the hardware is not available.
1480      *     This should reflect the <strong>true</strong> internal device mix level;
1481      *     hence the application might receive any floating value
1482      *     except {@code Float.NaN}.
1483      */
getAudioDescriptionMixLeveldB()1484     public float getAudioDescriptionMixLeveldB() {
1485         float[] level = { Float.NEGATIVE_INFINITY };
1486         try {
1487             final int status = native_get_audio_description_mix_level_db(level);
1488             if (status != SUCCESS || Float.isNaN(level[0])) {
1489                 return Float.NEGATIVE_INFINITY;
1490             }
1491         } catch (Exception e) {
1492             return Float.NEGATIVE_INFINITY;
1493         }
1494         return level[0];
1495     }
1496 
isValidDualMonoMode(@ualMonoMode int dualMonoMode)1497     private static boolean isValidDualMonoMode(@DualMonoMode int dualMonoMode) {
1498         switch (dualMonoMode) {
1499             case DUAL_MONO_MODE_OFF:
1500             case DUAL_MONO_MODE_LR:
1501             case DUAL_MONO_MODE_LL:
1502             case DUAL_MONO_MODE_RR:
1503                 return true;
1504             default:
1505                 return false;
1506         }
1507     }
1508 
1509     /**
1510      * Sets the Dual Mono mode presentation on the output device.
1511      *
1512      * The Dual Mono mode is generally applied to stereo audio streams
1513      * where the left and right channels come from separate sources.
1514      *
1515      * For compressed audio, where the decoding is done in hardware,
1516      * Dual Mono presentation needs to be performed
1517      * by the hardware output device
1518      * as the PCM audio is not available to the framework.
1519      *
1520      * @param dualMonoMode one of {@link #DUAL_MONO_MODE_OFF},
1521      *     {@link #DUAL_MONO_MODE_LR},
1522      *     {@link #DUAL_MONO_MODE_LL},
1523      *     {@link #DUAL_MONO_MODE_RR}.
1524      *
1525      * @return true on success, false on failure if the output device
1526      *     does not support Dual Mono mode.
1527      */
setDualMonoMode(@ualMonoMode int dualMonoMode)1528     public boolean setDualMonoMode(@DualMonoMode int dualMonoMode) {
1529         if (!isValidDualMonoMode(dualMonoMode)) {
1530             throw new IllegalArgumentException(
1531                     "Invalid Dual Mono mode " + dualMonoMode);
1532         }
1533         return native_set_dual_mono_mode(dualMonoMode) == SUCCESS;
1534     }
1535 
1536     /**
1537      * Returns the Dual Mono mode presentation setting.
1538      *
1539      * If no Dual Mono presentation is available for the output device,
1540      * then {@link #DUAL_MONO_MODE_OFF} is returned.
1541      *
1542      * @return one of {@link #DUAL_MONO_MODE_OFF},
1543      *     {@link #DUAL_MONO_MODE_LR},
1544      *     {@link #DUAL_MONO_MODE_LL},
1545      *     {@link #DUAL_MONO_MODE_RR}.
1546      */
getDualMonoMode()1547     public @DualMonoMode int getDualMonoMode() {
1548         int[] dualMonoMode = { DUAL_MONO_MODE_OFF };
1549         try {
1550             final int status = native_get_dual_mono_mode(dualMonoMode);
1551             if (status != SUCCESS || !isValidDualMonoMode(dualMonoMode[0])) {
1552                 return DUAL_MONO_MODE_OFF;
1553             }
1554         } catch (Exception e) {
1555             return DUAL_MONO_MODE_OFF;
1556         }
1557         return dualMonoMode[0];
1558     }
1559 
1560     // mask of all the positional channels supported, however the allowed combinations
1561     // are further restricted by the matching left/right rule and
1562     // AudioSystem.OUT_CHANNEL_COUNT_MAX
1563     private static final int SUPPORTED_OUT_CHANNELS =
1564             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
1565             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
1566             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
1567             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
1568             AudioFormat.CHANNEL_OUT_BACK_LEFT |
1569             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
1570             AudioFormat.CHANNEL_OUT_BACK_CENTER |
1571             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
1572             AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
1573 
1574     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
1575     // power saving to be automatically enabled for an AudioTrack. Returns false if
1576     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1577     private static boolean shouldEnablePowerSaving(
1578             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
1579             int bufferSizeInBytes, int mode) {
1580         // If no attributes, OK
1581         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
1582         // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include
1583         // FLAG_DEEP_BUFFER because if set the request is explicit and
1584         // shouldEnablePowerSaving() should return false.
1585         final int flags = attributes.getAllFlags()
1586                 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY
1587                     | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON);
1588 
1589         if (attributes != null &&
1590                 (flags != 0  // cannot have any special flags
1591                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
1592                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
1593                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
1594                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
1595             return false;
1596         }
1597 
1598         // Format must be fully specified and be linear pcm
1599         if (format == null
1600                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
1601                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
1602                 || !AudioFormat.isValidEncoding(format.getEncoding())
1603                 || format.getChannelCount() < 1) {
1604             return false;
1605         }
1606 
1607         // Mode must be streaming
1608         if (mode != MODE_STREAM) {
1609             return false;
1610         }
1611 
1612         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
1613         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
1614         if (bufferSizeInBytes != 0) {
1615             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
1616             final int MILLIS_PER_SECOND = 1000;
1617             final long bufferTargetSize =
1618                     BUFFER_TARGET_MODE_STREAM_MS
1619                     * format.getChannelCount()
1620                     * format.getBytesPerSample(format.getEncoding())
1621                     * format.getSampleRate()
1622                     / MILLIS_PER_SECOND;
1623             if (bufferSizeInBytes < bufferTargetSize) {
1624                 return false;
1625             }
1626         }
1627 
1628         return true;
1629     }
1630 
1631     // Convenience method for the constructor's parameter checks.
1632     // This is where constructor IllegalArgumentException-s are thrown
1633     // postconditions:
1634     //    mChannelCount is valid
1635     //    mChannelMask is valid
1636     //    mAudioFormat is valid
1637     //    mSampleRate is valid
1638     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1639     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
1640                                  int audioFormat, int mode) {
1641         //--------------
1642         // sample rate, note these values are subject to change
1643         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
1644                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
1645                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
1646             throw new IllegalArgumentException(sampleRateInHz
1647                     + "Hz is not a supported sample rate.");
1648         }
1649         mSampleRate = sampleRateInHz;
1650 
1651         // IEC61937 is based on stereo. We could coerce it to stereo.
1652         // But the application needs to know the stream is stereo so that
1653         // it is encoded and played correctly. So better to just reject it.
1654         if (audioFormat == AudioFormat.ENCODING_IEC61937
1655                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) {
1656             throw new IllegalArgumentException(
1657                     "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO");
1658         }
1659 
1660         //--------------
1661         // channel config
1662         mChannelConfiguration = channelConfig;
1663 
1664         switch (channelConfig) {
1665         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1666         case AudioFormat.CHANNEL_OUT_MONO:
1667         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1668             mChannelCount = 1;
1669             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1670             break;
1671         case AudioFormat.CHANNEL_OUT_STEREO:
1672         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1673             mChannelCount = 2;
1674             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1675             break;
1676         default:
1677             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1678                 mChannelCount = 0;
1679                 break; // channel index configuration only
1680             }
1681             if (!isMultichannelConfigSupported(channelConfig)) {
1682                 // input channel configuration features unsupported channels
1683                 throw new IllegalArgumentException("Unsupported channel configuration.");
1684             }
1685             mChannelMask = channelConfig;
1686             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1687         }
1688         // check the channel index configuration (if present)
1689         mChannelIndexMask = channelIndexMask;
1690         if (mChannelIndexMask != 0) {
1691             // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
1692             final int indexMask = (1 << AudioSystem.OUT_CHANNEL_COUNT_MAX) - 1;
1693             if ((channelIndexMask & ~indexMask) != 0) {
1694                 throw new IllegalArgumentException("Unsupported channel index configuration "
1695                         + channelIndexMask);
1696             }
1697             int channelIndexCount = Integer.bitCount(channelIndexMask);
1698             if (mChannelCount == 0) {
1699                  mChannelCount = channelIndexCount;
1700             } else if (mChannelCount != channelIndexCount) {
1701                 throw new IllegalArgumentException("Channel count must match");
1702             }
1703         }
1704 
1705         //--------------
1706         // audio format
1707         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1708             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1709         }
1710 
1711         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1712             throw new IllegalArgumentException("Unsupported audio encoding.");
1713         }
1714         mAudioFormat = audioFormat;
1715 
1716         //--------------
1717         // audio load mode
1718         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1719                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1720             throw new IllegalArgumentException("Invalid mode.");
1721         }
1722         mDataLoadMode = mode;
1723     }
1724 
1725     /**
1726      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1727      * @param channelConfig the mask to validate
1728      * @return false if the AudioTrack can't be used with such a mask
1729      */
isMultichannelConfigSupported(int channelConfig)1730     private static boolean isMultichannelConfigSupported(int channelConfig) {
1731         // check for unsupported channels
1732         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1733             loge("Channel configuration features unsupported channels");
1734             return false;
1735         }
1736         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1737         if (channelCount > AudioSystem.OUT_CHANNEL_COUNT_MAX) {
1738             loge("Channel configuration contains too many channels " +
1739                     channelCount + ">" + AudioSystem.OUT_CHANNEL_COUNT_MAX);
1740             return false;
1741         }
1742         // check for unsupported multichannel combinations:
1743         // - FL/FR must be present
1744         // - L/R channels must be paired (e.g. no single L channel)
1745         final int frontPair =
1746                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1747         if ((channelConfig & frontPair) != frontPair) {
1748                 loge("Front channels must be present in multichannel configurations");
1749                 return false;
1750         }
1751         final int backPair =
1752                 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
1753         if ((channelConfig & backPair) != 0) {
1754             if ((channelConfig & backPair) != backPair) {
1755                 loge("Rear channels can't be used independently");
1756                 return false;
1757             }
1758         }
1759         final int sidePair =
1760                 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
1761         if ((channelConfig & sidePair) != 0
1762                 && (channelConfig & sidePair) != sidePair) {
1763             loge("Side channels can't be used independently");
1764             return false;
1765         }
1766         return true;
1767     }
1768 
1769 
1770     // Convenience method for the constructor's audio buffer size check.
1771     // preconditions:
1772     //    mChannelCount is valid
1773     //    mAudioFormat is valid
1774     // postcondition:
1775     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)1776     private void audioBuffSizeCheck(int audioBufferSize) {
1777         // NB: this section is only valid with PCM or IEC61937 data.
1778         //     To update when supporting compressed formats
1779         int frameSizeInBytes;
1780         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
1781             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
1782         } else {
1783             frameSizeInBytes = 1;
1784         }
1785         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
1786             throw new IllegalArgumentException("Invalid audio buffer size.");
1787         }
1788 
1789         mNativeBufferSizeInBytes = audioBufferSize;
1790         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
1791     }
1792 
1793 
1794     /**
1795      * Releases the native AudioTrack resources.
1796      */
release()1797     public void release() {
1798         synchronized (mStreamEventCbLock){
1799             endStreamEventHandling();
1800         }
1801         // even though native_release() stops the native AudioTrack, we need to stop
1802         // AudioTrack subclasses too.
1803         try {
1804             stop();
1805         } catch(IllegalStateException ise) {
1806             // don't raise an exception, we're releasing the resources.
1807         }
1808         baseRelease();
1809         native_release();
1810         synchronized (mPlayStateLock) {
1811             mState = STATE_UNINITIALIZED;
1812             mPlayState = PLAYSTATE_STOPPED;
1813             mPlayStateLock.notify();
1814         }
1815     }
1816 
1817     @Override
finalize()1818     protected void finalize() {
1819         baseRelease();
1820         native_finalize();
1821     }
1822 
1823     //--------------------------------------------------------------------------
1824     // Getters
1825     //--------------------
1826     /**
1827      * Returns the minimum gain value, which is the constant 0.0.
1828      * Gain values less than 0.0 will be clamped to 0.0.
1829      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1830      * @return the minimum value, which is the constant 0.0.
1831      */
getMinVolume()1832     static public float getMinVolume() {
1833         return GAIN_MIN;
1834     }
1835 
1836     /**
1837      * Returns the maximum gain value, which is greater than or equal to 1.0.
1838      * Gain values greater than the maximum will be clamped to the maximum.
1839      * <p>The word "volume" in the API name is historical; this is actually a gain.
1840      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
1841      * corresponds to a gain of 0 dB (sample values left unmodified).
1842      * @return the maximum value, which is greater than or equal to 1.0.
1843      */
getMaxVolume()1844     static public float getMaxVolume() {
1845         return GAIN_MAX;
1846     }
1847 
1848     /**
1849      * Returns the configured audio source sample rate in Hz.
1850      * The initial source sample rate depends on the constructor parameters,
1851      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
1852      * If the constructor had a specific sample rate, then the initial sink sample rate is that
1853      * value.
1854      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
1855      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
1856      */
getSampleRate()1857     public int getSampleRate() {
1858         return mSampleRate;
1859     }
1860 
1861     /**
1862      * Returns the current playback sample rate rate in Hz.
1863      */
getPlaybackRate()1864     public int getPlaybackRate() {
1865         return native_get_playback_rate();
1866     }
1867 
1868     /**
1869      * Returns the current playback parameters.
1870      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
1871      * @return current {@link PlaybackParams}.
1872      * @throws IllegalStateException if track is not initialized.
1873      */
getPlaybackParams()1874     public @NonNull PlaybackParams getPlaybackParams() {
1875         return native_get_playback_params();
1876     }
1877 
1878     /**
1879      * Returns the {@link AudioAttributes} used in configuration.
1880      * If a {@code streamType} is used instead of an {@code AudioAttributes}
1881      * to configure the AudioTrack
1882      * (the use of {@code streamType} for configuration is deprecated),
1883      * then the {@code AudioAttributes}
1884      * equivalent to the {@code streamType} is returned.
1885      * @return The {@code AudioAttributes} used to configure the AudioTrack.
1886      * @throws IllegalStateException If the track is not initialized.
1887      */
getAudioAttributes()1888     public @NonNull AudioAttributes getAudioAttributes() {
1889         if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) {
1890             throw new IllegalStateException("track not initialized");
1891         }
1892         return mConfiguredAudioAttributes;
1893     }
1894 
1895     /**
1896      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
1897      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1898      */
getAudioFormat()1899     public int getAudioFormat() {
1900         return mAudioFormat;
1901     }
1902 
1903     /**
1904      * Returns the volume stream type of this AudioTrack.
1905      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
1906      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
1907      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
1908      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
1909      * {@link AudioManager#STREAM_ACCESSIBILITY}.
1910      */
getStreamType()1911     public int getStreamType() {
1912         return mStreamType;
1913     }
1914 
1915     /**
1916      * Returns the configured channel position mask.
1917      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
1918      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
1919      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
1920      * a channel index mask was used. Consider
1921      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
1922      * which contains both the channel position mask and the channel index mask.
1923      */
getChannelConfiguration()1924     public int getChannelConfiguration() {
1925         return mChannelConfiguration;
1926     }
1927 
1928     /**
1929      * Returns the configured <code>AudioTrack</code> format.
1930      * @return an {@link AudioFormat} containing the
1931      * <code>AudioTrack</code> parameters at the time of configuration.
1932      */
getFormat()1933     public @NonNull AudioFormat getFormat() {
1934         AudioFormat.Builder builder = new AudioFormat.Builder()
1935             .setSampleRate(mSampleRate)
1936             .setEncoding(mAudioFormat);
1937         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
1938             builder.setChannelMask(mChannelConfiguration);
1939         }
1940         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
1941             builder.setChannelIndexMask(mChannelIndexMask);
1942         }
1943         return builder.build();
1944     }
1945 
1946     /**
1947      * Returns the configured number of channels.
1948      */
getChannelCount()1949     public int getChannelCount() {
1950         return mChannelCount;
1951     }
1952 
1953     /**
1954      * Returns the state of the AudioTrack instance. This is useful after the
1955      * AudioTrack instance has been created to check if it was initialized
1956      * properly. This ensures that the appropriate resources have been acquired.
1957      * @see #STATE_UNINITIALIZED
1958      * @see #STATE_INITIALIZED
1959      * @see #STATE_NO_STATIC_DATA
1960      */
getState()1961     public int getState() {
1962         return mState;
1963     }
1964 
1965     /**
1966      * Returns the playback state of the AudioTrack instance.
1967      * @see #PLAYSTATE_STOPPED
1968      * @see #PLAYSTATE_PAUSED
1969      * @see #PLAYSTATE_PLAYING
1970      */
getPlayState()1971     public int getPlayState() {
1972         synchronized (mPlayStateLock) {
1973             switch (mPlayState) {
1974                 case PLAYSTATE_STOPPING:
1975                     return PLAYSTATE_PLAYING;
1976                 case PLAYSTATE_PAUSED_STOPPING:
1977                     return PLAYSTATE_PAUSED;
1978                 default:
1979                     return mPlayState;
1980             }
1981         }
1982     }
1983 
1984 
1985     /**
1986      * Returns the effective size of the <code>AudioTrack</code> buffer
1987      * that the application writes to.
1988      * <p> This will be less than or equal to the result of
1989      * {@link #getBufferCapacityInFrames()}.
1990      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
1991      * <p> If the track is subsequently routed to a different output sink, the buffer
1992      * size and capacity may enlarge to accommodate.
1993      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1994      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1995      * the size of the <code>AudioTrack</code> buffer in bytes.
1996      * <p> See also {@link AudioManager#getProperty(String)} for key
1997      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1998      * @return current size in frames of the <code>AudioTrack</code> buffer.
1999      * @throws IllegalStateException if track is not initialized.
2000      */
getBufferSizeInFrames()2001     public @IntRange (from = 0) int getBufferSizeInFrames() {
2002         return native_get_buffer_size_frames();
2003     }
2004 
2005     /**
2006      * Limits the effective size of the <code>AudioTrack</code> buffer
2007      * that the application writes to.
2008      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
2009      * If a blocking write is used then the write will block until the data
2010      * can fit within this limit.
2011      * <p>Changing this limit modifies the latency associated with
2012      * the buffer for this track. A smaller size will give lower latency
2013      * but there may be more glitches due to buffer underruns.
2014      * <p>The actual size used may not be equal to this requested size.
2015      * It will be limited to a valid range with a maximum of
2016      * {@link #getBufferCapacityInFrames()}.
2017      * It may also be adjusted slightly for internal reasons.
2018      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
2019      * will be returned.
2020      * <p>This method is only supported for PCM audio.
2021      * It is not supported for compressed audio tracks.
2022      *
2023      * @param bufferSizeInFrames requested buffer size in frames
2024      * @return the actual buffer size in frames or an error code,
2025      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2026      * @throws IllegalStateException if track is not initialized.
2027      */
setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)2028     public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) {
2029         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
2030             return ERROR_INVALID_OPERATION;
2031         }
2032         if (bufferSizeInFrames < 0) {
2033             return ERROR_BAD_VALUE;
2034         }
2035         return native_set_buffer_size_frames(bufferSizeInFrames);
2036     }
2037 
2038     /**
2039      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
2040      *  <p> If the track's creation mode is {@link #MODE_STATIC},
2041      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
2042      *  A static track's frame count will not change.
2043      *  <p> If the track's creation mode is {@link #MODE_STREAM},
2044      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
2045      *  For streaming tracks, this value may be rounded up to a larger value if needed by
2046      *  the target output sink, and
2047      *  if the track is subsequently routed to a different output sink, the
2048      *  frame count may enlarge to accommodate.
2049      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
2050      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
2051      *  the size of the <code>AudioTrack</code> buffer in bytes.
2052      *  <p> See also {@link AudioManager#getProperty(String)} for key
2053      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
2054      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
2055      *  @throws IllegalStateException if track is not initialized.
2056      */
getBufferCapacityInFrames()2057     public @IntRange (from = 0) int getBufferCapacityInFrames() {
2058         return native_get_buffer_capacity_frames();
2059     }
2060 
2061     /**
2062      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
2063      *  @return current size in frames of the <code>AudioTrack</code> buffer.
2064      *  @throws IllegalStateException
2065      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
2066      */
2067     @Deprecated
getNativeFrameCount()2068     protected int getNativeFrameCount() {
2069         return native_get_buffer_capacity_frames();
2070     }
2071 
2072     /**
2073      * Returns marker position expressed in frames.
2074      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
2075      * or zero if marker is disabled.
2076      */
getNotificationMarkerPosition()2077     public int getNotificationMarkerPosition() {
2078         return native_get_marker_pos();
2079     }
2080 
2081     /**
2082      * Returns the notification update period expressed in frames.
2083      * Zero means that no position update notifications are being delivered.
2084      */
getPositionNotificationPeriod()2085     public int getPositionNotificationPeriod() {
2086         return native_get_pos_update_period();
2087     }
2088 
2089     /**
2090      * Returns the playback head position expressed in frames.
2091      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
2092      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
2093      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
2094      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
2095      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
2096      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
2097      * the total number of frames played since reset,
2098      * <i>not</i> the current offset within the buffer.
2099      */
getPlaybackHeadPosition()2100     public int getPlaybackHeadPosition() {
2101         return native_get_position();
2102     }
2103 
2104     /**
2105      * Returns this track's estimated latency in milliseconds. This includes the latency due
2106      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
2107      *
2108      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
2109      * a better solution.
2110      * @hide
2111      */
2112     @UnsupportedAppUsage(trackingBug = 130237544)
getLatency()2113     public int getLatency() {
2114         return native_get_latency();
2115     }
2116 
2117     /**
2118      * Returns the number of underrun occurrences in the application-level write buffer
2119      * since the AudioTrack was created.
2120      * An underrun occurs if the application does not write audio
2121      * data quickly enough, causing the buffer to underflow
2122      * and a potential audio glitch or pop.
2123      * <p>
2124      * Underruns are less likely when buffer sizes are large.
2125      * It may be possible to eliminate underruns by recreating the AudioTrack with
2126      * a larger buffer.
2127      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
2128      * effective size of the buffer.
2129      */
getUnderrunCount()2130     public int getUnderrunCount() {
2131         return native_get_underrun_count();
2132     }
2133 
2134     /**
2135      * Returns the current performance mode of the {@link AudioTrack}.
2136      *
2137      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
2138      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
2139      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
2140      * Use {@link AudioTrack.Builder#setPerformanceMode}
2141      * in the {@link AudioTrack.Builder} to enable a performance mode.
2142      * @throws IllegalStateException if track is not initialized.
2143      */
getPerformanceMode()2144     public @PerformanceMode int getPerformanceMode() {
2145         final int flags = native_get_flags();
2146         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
2147             return PERFORMANCE_MODE_LOW_LATENCY;
2148         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
2149             return PERFORMANCE_MODE_POWER_SAVING;
2150         } else {
2151             return PERFORMANCE_MODE_NONE;
2152         }
2153     }
2154 
2155     /**
2156      *  Returns the output sample rate in Hz for the specified stream type.
2157      */
getNativeOutputSampleRate(int streamType)2158     static public int getNativeOutputSampleRate(int streamType) {
2159         return native_get_output_sample_rate(streamType);
2160     }
2161 
2162     /**
2163      * Returns the estimated minimum buffer size required for an AudioTrack
2164      * object to be created in the {@link #MODE_STREAM} mode.
2165      * The size is an estimate because it does not consider either the route or the sink,
2166      * since neither is known yet.  Note that this size doesn't
2167      * guarantee a smooth playback under load, and higher values should be chosen according to
2168      * the expected frequency at which the buffer will be refilled with additional data to play.
2169      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
2170      * to a higher value than the initial source sample rate, be sure to configure the buffer size
2171      * based on the highest planned sample rate.
2172      * @param sampleRateInHz the source sample rate expressed in Hz.
2173      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
2174      * @param channelConfig describes the configuration of the audio channels.
2175      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
2176      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
2177      * @param audioFormat the format in which the audio data is represented.
2178      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
2179      *   {@link AudioFormat#ENCODING_PCM_8BIT},
2180      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
2181      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
2182      *   or {@link #ERROR} if unable to query for output properties,
2183      *   or the minimum buffer size expressed in bytes.
2184      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)2185     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
2186         int channelCount = 0;
2187         switch(channelConfig) {
2188         case AudioFormat.CHANNEL_OUT_MONO:
2189         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
2190             channelCount = 1;
2191             break;
2192         case AudioFormat.CHANNEL_OUT_STEREO:
2193         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
2194             channelCount = 2;
2195             break;
2196         default:
2197             if (!isMultichannelConfigSupported(channelConfig)) {
2198                 loge("getMinBufferSize(): Invalid channel configuration.");
2199                 return ERROR_BAD_VALUE;
2200             } else {
2201                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
2202             }
2203         }
2204 
2205         if (!AudioFormat.isPublicEncoding(audioFormat)) {
2206             loge("getMinBufferSize(): Invalid audio format.");
2207             return ERROR_BAD_VALUE;
2208         }
2209 
2210         // sample rate, note these values are subject to change
2211         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
2212         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
2213                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
2214             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
2215             return ERROR_BAD_VALUE;
2216         }
2217 
2218         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
2219         if (size <= 0) {
2220             loge("getMinBufferSize(): error querying hardware");
2221             return ERROR;
2222         }
2223         else {
2224             return size;
2225         }
2226     }
2227 
2228     /**
2229      * Returns the audio session ID.
2230      *
2231      * @return the ID of the audio session this AudioTrack belongs to.
2232      */
getAudioSessionId()2233     public int getAudioSessionId() {
2234         return mSessionId;
2235     }
2236 
2237    /**
2238     * Poll for a timestamp on demand.
2239     * <p>
2240     * If you need to track timestamps during initial warmup or after a routing or mode change,
2241     * you should request a new timestamp periodically until the reported timestamps
2242     * show that the frame position is advancing, or until it becomes clear that
2243     * timestamps are unavailable for this route.
2244     * <p>
2245     * After the clock is advancing at a stable rate,
2246     * query for a new timestamp approximately once every 10 seconds to once per minute.
2247     * Calling this method more often is inefficient.
2248     * It is also counter-productive to call this method more often than recommended,
2249     * because the short-term differences between successive timestamp reports are not meaningful.
2250     * If you need a high-resolution mapping between frame position and presentation time,
2251     * consider implementing that at application level, based on low-resolution timestamps.
2252     * <p>
2253     * The audio data at the returned position may either already have been
2254     * presented, or may have not yet been presented but is committed to be presented.
2255     * It is not possible to request the time corresponding to a particular position,
2256     * or to request the (fractional) position corresponding to a particular time.
2257     * If you need such features, consider implementing them at application level.
2258     *
2259     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2260     *        and owned by caller.
2261     * @return true if a timestamp is available, or false if no timestamp is available.
2262     *         If a timestamp is available,
2263     *         the AudioTimestamp instance is filled in with a position in frame units, together
2264     *         with the estimated time when that frame was presented or is committed to
2265     *         be presented.
2266     *         In the case that no timestamp is available, any supplied instance is left unaltered.
2267     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
2268     *         or during and immediately after a route change.
2269     *         A timestamp is permanently unavailable for a given route if the route does not support
2270     *         timestamps.  In this case, the approximate frame position can be obtained
2271     *         using {@link #getPlaybackHeadPosition}.
2272     *         However, it may be useful to continue to query for
2273     *         timestamps occasionally, to recover after a route change.
2274     */
2275     // Add this text when the "on new timestamp" API is added:
2276     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)2277     public boolean getTimestamp(AudioTimestamp timestamp)
2278     {
2279         if (timestamp == null) {
2280             throw new IllegalArgumentException();
2281         }
2282         // It's unfortunate, but we have to either create garbage every time or use synchronized
2283         long[] longArray = new long[2];
2284         int ret = native_get_timestamp(longArray);
2285         if (ret != SUCCESS) {
2286             return false;
2287         }
2288         timestamp.framePosition = longArray[0];
2289         timestamp.nanoTime = longArray[1];
2290         return true;
2291     }
2292 
2293     /**
2294      * Poll for a timestamp on demand.
2295      * <p>
2296      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
2297      *
2298      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
2299      *        and owned by caller.
2300      * @return {@link #SUCCESS} if a timestamp is available
2301      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
2302      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
2303      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
2304      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
2305      *         for the timestamp.
2306      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2307      *         needs to be recreated.
2308      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
2309      *         timestamps. In this case, the approximate frame position can be obtained
2310      *         using {@link #getPlaybackHeadPosition}.
2311      *
2312      *         The AudioTimestamp instance is filled in with a position in frame units, together
2313      *         with the estimated time when that frame was presented or is committed to
2314      *         be presented.
2315      * @hide
2316      */
2317      // Add this text when the "on new timestamp" API is added:
2318      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)2319      public int getTimestampWithStatus(AudioTimestamp timestamp)
2320      {
2321          if (timestamp == null) {
2322              throw new IllegalArgumentException();
2323          }
2324          // It's unfortunate, but we have to either create garbage every time or use synchronized
2325          long[] longArray = new long[2];
2326          int ret = native_get_timestamp(longArray);
2327          timestamp.framePosition = longArray[0];
2328          timestamp.nanoTime = longArray[1];
2329          return ret;
2330      }
2331 
2332     /**
2333      *  Return Metrics data about the current AudioTrack instance.
2334      *
2335      * @return a {@link PersistableBundle} containing the set of attributes and values
2336      * available for the media being handled by this instance of AudioTrack
2337      * The attributes are descibed in {@link MetricsConstants}.
2338      *
2339      * Additional vendor-specific fields may also be present in
2340      * the return value.
2341      */
getMetrics()2342     public PersistableBundle getMetrics() {
2343         PersistableBundle bundle = native_getMetrics();
2344         return bundle;
2345     }
2346 
native_getMetrics()2347     private native PersistableBundle native_getMetrics();
2348 
2349     //--------------------------------------------------------------------------
2350     // Initialization / configuration
2351     //--------------------
2352     /**
2353      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2354      * for each periodic playback head position update.
2355      * Notifications will be received in the same thread as the one in which the AudioTrack
2356      * instance was created.
2357      * @param listener
2358      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)2359     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
2360         setPlaybackPositionUpdateListener(listener, null);
2361     }
2362 
2363     /**
2364      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
2365      * for each periodic playback head position update.
2366      * Use this method to receive AudioTrack events in the Handler associated with another
2367      * thread than the one in which you created the AudioTrack instance.
2368      * @param listener
2369      * @param handler the Handler that will receive the event notification messages.
2370      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)2371     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
2372                                                     Handler handler) {
2373         if (listener != null) {
2374             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
2375         } else {
2376             mEventHandlerDelegate = null;
2377         }
2378     }
2379 
2380 
clampGainOrLevel(float gainOrLevel)2381     private static float clampGainOrLevel(float gainOrLevel) {
2382         if (Float.isNaN(gainOrLevel)) {
2383             throw new IllegalArgumentException();
2384         }
2385         if (gainOrLevel < GAIN_MIN) {
2386             gainOrLevel = GAIN_MIN;
2387         } else if (gainOrLevel > GAIN_MAX) {
2388             gainOrLevel = GAIN_MAX;
2389         }
2390         return gainOrLevel;
2391     }
2392 
2393 
2394      /**
2395      * Sets the specified left and right output gain values on the AudioTrack.
2396      * <p>Gain values are clamped to the closed interval [0.0, max] where
2397      * max is the value of {@link #getMaxVolume}.
2398      * A value of 0.0 results in zero gain (silence), and
2399      * a value of 1.0 means unity gain (signal unchanged).
2400      * The default value is 1.0 meaning unity gain.
2401      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2402      * @param leftGain output gain for the left channel.
2403      * @param rightGain output gain for the right channel
2404      * @return error code or success, see {@link #SUCCESS},
2405      *    {@link #ERROR_INVALID_OPERATION}
2406      * @deprecated Applications should use {@link #setVolume} instead, as it
2407      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2408      */
2409     @Deprecated
setStereoVolume(float leftGain, float rightGain)2410     public int setStereoVolume(float leftGain, float rightGain) {
2411         if (mState == STATE_UNINITIALIZED) {
2412             return ERROR_INVALID_OPERATION;
2413         }
2414 
2415         baseSetVolume(leftGain, rightGain);
2416         return SUCCESS;
2417     }
2418 
2419     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)2420     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
2421         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
2422         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
2423 
2424         native_setVolume(leftVolume, rightVolume);
2425     }
2426 
2427 
2428     /**
2429      * Sets the specified output gain value on all channels of this track.
2430      * <p>Gain values are clamped to the closed interval [0.0, max] where
2431      * max is the value of {@link #getMaxVolume}.
2432      * A value of 0.0 results in zero gain (silence), and
2433      * a value of 1.0 means unity gain (signal unchanged).
2434      * The default value is 1.0 meaning unity gain.
2435      * <p>This API is preferred over {@link #setStereoVolume}, as it
2436      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
2437      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
2438      * @param gain output gain for all channels.
2439      * @return error code or success, see {@link #SUCCESS},
2440      *    {@link #ERROR_INVALID_OPERATION}
2441      */
setVolume(float gain)2442     public int setVolume(float gain) {
2443         return setStereoVolume(gain, gain);
2444     }
2445 
2446     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2447     /* package */ int playerApplyVolumeShaper(
2448             @NonNull VolumeShaper.Configuration configuration,
2449             @NonNull VolumeShaper.Operation operation) {
2450         return native_applyVolumeShaper(configuration, operation);
2451     }
2452 
2453     @Override
playerGetVolumeShaperState(int id)2454     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
2455         return native_getVolumeShaperState(id);
2456     }
2457 
2458     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2459     public @NonNull VolumeShaper createVolumeShaper(
2460             @NonNull VolumeShaper.Configuration configuration) {
2461         return new VolumeShaper(configuration, this);
2462     }
2463 
2464     /**
2465      * Sets the playback sample rate for this track. This sets the sampling rate at which
2466      * the audio data will be consumed and played back
2467      * (as set by the sampleRateInHz parameter in the
2468      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
2469      * not the original sampling rate of the
2470      * content. For example, setting it to half the sample rate of the content will cause the
2471      * playback to last twice as long, but will also result in a pitch shift down by one octave.
2472      * The valid sample rate range is from 1 Hz to twice the value returned by
2473      * {@link #getNativeOutputSampleRate(int)}.
2474      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
2475      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
2476      * for playback of content of differing sample rate,
2477      * but with identical encoding and channel mask.
2478      * @param sampleRateInHz the sample rate expressed in Hz
2479      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2480      *    {@link #ERROR_INVALID_OPERATION}
2481      */
setPlaybackRate(int sampleRateInHz)2482     public int setPlaybackRate(int sampleRateInHz) {
2483         if (mState != STATE_INITIALIZED) {
2484             return ERROR_INVALID_OPERATION;
2485         }
2486         if (sampleRateInHz <= 0) {
2487             return ERROR_BAD_VALUE;
2488         }
2489         return native_set_playback_rate(sampleRateInHz);
2490     }
2491 
2492 
2493     /**
2494      * Sets the playback parameters.
2495      * This method returns failure if it cannot apply the playback parameters.
2496      * One possible cause is that the parameters for speed or pitch are out of range.
2497      * Another possible cause is that the <code>AudioTrack</code> is streaming
2498      * (see {@link #MODE_STREAM}) and the
2499      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
2500      * on configuration must be larger than the speed multiplied by the minimum size
2501      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
2502      * @param params see {@link PlaybackParams}. In particular,
2503      * speed, pitch, and audio mode should be set.
2504      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
2505      * @throws IllegalStateException if track is not initialized.
2506      */
setPlaybackParams(@onNull PlaybackParams params)2507     public void setPlaybackParams(@NonNull PlaybackParams params) {
2508         if (params == null) {
2509             throw new IllegalArgumentException("params is null");
2510         }
2511         native_set_playback_params(params);
2512     }
2513 
2514 
2515     /**
2516      * Sets the position of the notification marker.  At most one marker can be active.
2517      * @param markerInFrames marker position in wrapping frame units similar to
2518      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
2519      * To set a marker at a position which would appear as zero due to wraparound,
2520      * a workaround is to use a non-zero position near zero, such as -1 or 1.
2521      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2522      *  {@link #ERROR_INVALID_OPERATION}
2523      */
setNotificationMarkerPosition(int markerInFrames)2524     public int setNotificationMarkerPosition(int markerInFrames) {
2525         if (mState == STATE_UNINITIALIZED) {
2526             return ERROR_INVALID_OPERATION;
2527         }
2528         return native_set_marker_pos(markerInFrames);
2529     }
2530 
2531 
2532     /**
2533      * Sets the period for the periodic notification event.
2534      * @param periodInFrames update period expressed in frames.
2535      * Zero period means no position updates.  A negative period is not allowed.
2536      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
2537      */
setPositionNotificationPeriod(int periodInFrames)2538     public int setPositionNotificationPeriod(int periodInFrames) {
2539         if (mState == STATE_UNINITIALIZED) {
2540             return ERROR_INVALID_OPERATION;
2541         }
2542         return native_set_pos_update_period(periodInFrames);
2543     }
2544 
2545 
2546     /**
2547      * Sets the playback head position within the static buffer.
2548      * The track must be stopped or paused for the position to be changed,
2549      * and must use the {@link #MODE_STATIC} mode.
2550      * @param positionInFrames playback head position within buffer, expressed in frames.
2551      * Zero corresponds to start of buffer.
2552      * The position must not be greater than the buffer size in frames, or negative.
2553      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
2554      * the position values have different meanings.
2555      * <br>
2556      * If looping is currently enabled and the new position is greater than or equal to the
2557      * loop end marker, the behavior varies by API level:
2558      * as of {@link android.os.Build.VERSION_CODES#M},
2559      * the looping is first disabled and then the position is set.
2560      * For earlier API levels, the behavior is unspecified.
2561      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2562      *    {@link #ERROR_INVALID_OPERATION}
2563      */
setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2564     public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) {
2565         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2566                 getPlayState() == PLAYSTATE_PLAYING) {
2567             return ERROR_INVALID_OPERATION;
2568         }
2569         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
2570             return ERROR_BAD_VALUE;
2571         }
2572         return native_set_position(positionInFrames);
2573     }
2574 
2575     /**
2576      * Sets the loop points and the loop count. The loop can be infinite.
2577      * Similarly to setPlaybackHeadPosition,
2578      * the track must be stopped or paused for the loop points to be changed,
2579      * and must use the {@link #MODE_STATIC} mode.
2580      * @param startInFrames loop start marker expressed in frames.
2581      * Zero corresponds to start of buffer.
2582      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
2583      * @param endInFrames loop end marker expressed in frames.
2584      * The total buffer size in frames corresponds to end of buffer.
2585      * The end marker must not be greater than the buffer size in frames.
2586      * For looping, the end marker must not be less than or equal to the start marker,
2587      * but to disable looping
2588      * it is permitted for start marker, end marker, and loop count to all be 0.
2589      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
2590      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
2591      * support,
2592      * {@link #ERROR_BAD_VALUE} is returned.
2593      * The loop range is the interval [startInFrames, endInFrames).
2594      * <br>
2595      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
2596      * unless it is greater than or equal to the loop end marker, in which case
2597      * it is forced to the loop start marker.
2598      * For earlier API levels, the effect on position is unspecified.
2599      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
2600      *    A value of -1 means infinite looping, and 0 disables looping.
2601      *    A value of positive N means to "loop" (go back) N times.  For example,
2602      *    a value of one means to play the region two times in total.
2603      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2604      *    {@link #ERROR_INVALID_OPERATION}
2605      */
setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2606     public int setLoopPoints(@IntRange (from = 0) int startInFrames,
2607             @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) {
2608         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
2609                 getPlayState() == PLAYSTATE_PLAYING) {
2610             return ERROR_INVALID_OPERATION;
2611         }
2612         if (loopCount == 0) {
2613             ;   // explicitly allowed as an exception to the loop region range check
2614         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
2615                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
2616             return ERROR_BAD_VALUE;
2617         }
2618         return native_set_loop(startInFrames, endInFrames, loopCount);
2619     }
2620 
2621     /**
2622      * Sets the audio presentation.
2623      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
2624      * If a multi-stream decoder (MSD) is not present, or the format does not support
2625      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
2626      * {@link #ERROR} is returned in case of any other error.
2627      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
2628      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
2629      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
2630      * @throws IllegalArgumentException if the audio presentation is null.
2631      * @throws IllegalStateException if track is not initialized.
2632      */
setPresentation(@onNull AudioPresentation presentation)2633     public int setPresentation(@NonNull AudioPresentation presentation) {
2634         if (presentation == null) {
2635             throw new IllegalArgumentException("audio presentation is null");
2636         }
2637         return native_setPresentation(presentation.getPresentationId(),
2638                 presentation.getProgramId());
2639     }
2640 
2641     /**
2642      * Sets the initialization state of the instance. This method was originally intended to be used
2643      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
2644      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
2645      * @param state the state of the AudioTrack instance
2646      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
2647      */
2648     @Deprecated
setState(int state)2649     protected void setState(int state) {
2650         mState = state;
2651     }
2652 
2653 
2654     //---------------------------------------------------------
2655     // Transport control methods
2656     //--------------------
2657     /**
2658      * Starts playing an AudioTrack.
2659      * <p>
2660      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
2661      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
2662      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
2663      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
2664      * play().
2665      * <p>
2666      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
2667      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
2668      * If you don't call write() first, or if you call write() but with an insufficient amount of
2669      * data, then the track will be in underrun state at play().  In this case,
2670      * playback will not actually start playing until the data path is filled to a
2671      * device-specific minimum level.  This requirement for the path to be filled
2672      * to a minimum level is also true when resuming audio playback after calling stop().
2673      * Similarly the buffer will need to be filled up again after
2674      * the track underruns due to failure to call write() in a timely manner with sufficient data.
2675      * For portability, an application should prime the data path to the maximum allowed
2676      * by writing data until the write() method returns a short transfer count.
2677      * This allows play() to start immediately, and reduces the chance of underrun.
2678      *
2679      * @throws IllegalStateException if the track isn't properly initialized
2680      */
play()2681     public void play()
2682     throws IllegalStateException {
2683         if (mState != STATE_INITIALIZED) {
2684             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
2685         }
2686         //FIXME use lambda to pass startImpl to superclass
2687         final int delay = getStartDelayMs();
2688         if (delay == 0) {
2689             startImpl();
2690         } else {
2691             new Thread() {
2692                 public void run() {
2693                     try {
2694                         Thread.sleep(delay);
2695                     } catch (InterruptedException e) {
2696                         e.printStackTrace();
2697                     }
2698                     baseSetStartDelayMs(0);
2699                     try {
2700                         startImpl();
2701                     } catch (IllegalStateException e) {
2702                         // fail silently for a state exception when it is happening after
2703                         // a delayed start, as the player state could have changed between the
2704                         // call to start() and the execution of startImpl()
2705                     }
2706                 }
2707             }.start();
2708         }
2709     }
2710 
startImpl()2711     private void startImpl() {
2712         synchronized(mPlayStateLock) {
2713             baseStart();
2714             native_start();
2715             if (mPlayState == PLAYSTATE_PAUSED_STOPPING) {
2716                 mPlayState = PLAYSTATE_STOPPING;
2717             } else {
2718                 mPlayState = PLAYSTATE_PLAYING;
2719                 mOffloadEosPending = false;
2720             }
2721         }
2722     }
2723 
2724     /**
2725      * Stops playing the audio data.
2726      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
2727      * after the last buffer that was written has been played. For an immediate stop, use
2728      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
2729      * back yet.
2730      * @throws IllegalStateException
2731      */
stop()2732     public void stop()
2733     throws IllegalStateException {
2734         if (mState != STATE_INITIALIZED) {
2735             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
2736         }
2737 
2738         // stop playing
2739         synchronized(mPlayStateLock) {
2740             native_stop();
2741             baseStop();
2742             if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) {
2743                 mPlayState = PLAYSTATE_STOPPING;
2744             } else {
2745                 mPlayState = PLAYSTATE_STOPPED;
2746                 mOffloadEosPending = false;
2747                 mAvSyncHeader = null;
2748                 mAvSyncBytesRemaining = 0;
2749                 mPlayStateLock.notify();
2750             }
2751         }
2752     }
2753 
2754     /**
2755      * Pauses the playback of the audio data. Data that has not been played
2756      * back will not be discarded. Subsequent calls to {@link #play} will play
2757      * this data back. See {@link #flush()} to discard this data.
2758      *
2759      * @throws IllegalStateException
2760      */
pause()2761     public void pause()
2762     throws IllegalStateException {
2763         if (mState != STATE_INITIALIZED) {
2764             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
2765         }
2766 
2767         // pause playback
2768         synchronized(mPlayStateLock) {
2769             native_pause();
2770             basePause();
2771             if (mPlayState == PLAYSTATE_STOPPING) {
2772                 mPlayState = PLAYSTATE_PAUSED_STOPPING;
2773             } else {
2774                 mPlayState = PLAYSTATE_PAUSED;
2775             }
2776         }
2777     }
2778 
2779 
2780     //---------------------------------------------------------
2781     // Audio data supply
2782     //--------------------
2783 
2784     /**
2785      * Flushes the audio data currently queued for playback. Any data that has
2786      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
2787      * or if the track's creation mode is not {@link #MODE_STREAM}.
2788      * <BR> Note that although data written but not yet presented is discarded, there is no
2789      * guarantee that all of the buffer space formerly used by that data
2790      * is available for a subsequent write.
2791      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
2792      * less than or equal to the total buffer size
2793      * may return a short actual transfer count.
2794      */
flush()2795     public void flush() {
2796         if (mState == STATE_INITIALIZED) {
2797             // flush the data in native layer
2798             native_flush();
2799             mAvSyncHeader = null;
2800             mAvSyncBytesRemaining = 0;
2801         }
2802 
2803     }
2804 
2805     /**
2806      * Writes the audio data to the audio sink for playback (streaming mode),
2807      * or copies audio data for later playback (static buffer mode).
2808      * The format specified in the AudioTrack constructor should be
2809      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2810      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2811      * <p>
2812      * In streaming mode, the write will normally block until all the data has been enqueued for
2813      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2814      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2815      * occurs during the write, then the write may return a short transfer count.
2816      * <p>
2817      * In static buffer mode, copies the data to the buffer starting at offset 0.
2818      * Note that the actual playback of this data might occur after this function returns.
2819      *
2820      * @param audioData the array that holds the data to play.
2821      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2822      *    starts.
2823      *    Must not be negative, or cause the data access to go out of bounds of the array.
2824      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2825      *    Must not be negative, or cause the data access to go out of bounds of the array.
2826      * @return zero or the positive number of bytes that were written, or one of the following
2827      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2828      *    not to exceed sizeInBytes.
2829      * <ul>
2830      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2831      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2832      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2833      *    needs to be recreated. The dead object error code is not returned if some data was
2834      *    successfully transferred. In this case, the error is returned at the next write()</li>
2835      * <li>{@link #ERROR} in case of other error</li>
2836      * </ul>
2837      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
2838      * set to  {@link #WRITE_BLOCKING}.
2839      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)2840     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
2841         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
2842     }
2843 
2844     /**
2845      * Writes the audio data to the audio sink for playback (streaming mode),
2846      * or copies audio data for later playback (static buffer mode).
2847      * The format specified in the AudioTrack constructor should be
2848      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2849      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2850      * <p>
2851      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2852      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2853      * for playback, and will return a full transfer count.  However, if the write mode is
2854      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2855      * interrupts the write by calling stop or pause, or an I/O error
2856      * occurs during the write, then the write may return a short transfer count.
2857      * <p>
2858      * In static buffer mode, copies the data to the buffer starting at offset 0,
2859      * and the write mode is ignored.
2860      * Note that the actual playback of this data might occur after this function returns.
2861      *
2862      * @param audioData the array that holds the data to play.
2863      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2864      *    starts.
2865      *    Must not be negative, or cause the data access to go out of bounds of the array.
2866      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2867      *    Must not be negative, or cause the data access to go out of bounds of the array.
2868      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2869      *     effect in static mode.
2870      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2871      *         to the audio sink.
2872      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2873      *     queuing as much audio data for playback as possible without blocking.
2874      * @return zero or the positive number of bytes that were written, or one of the following
2875      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2876      *    not to exceed sizeInBytes.
2877      * <ul>
2878      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2879      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2880      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2881      *    needs to be recreated. The dead object error code is not returned if some data was
2882      *    successfully transferred. In this case, the error is returned at the next write()</li>
2883      * <li>{@link #ERROR} in case of other error</li>
2884      * </ul>
2885      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)2886     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
2887             @WriteMode int writeMode) {
2888 
2889         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2890             return ERROR_INVALID_OPERATION;
2891         }
2892 
2893         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2894             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2895             return ERROR_BAD_VALUE;
2896         }
2897 
2898         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
2899                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
2900                 || (offsetInBytes + sizeInBytes > audioData.length)) {
2901             return ERROR_BAD_VALUE;
2902         }
2903 
2904         if (!blockUntilOffloadDrain(writeMode)) {
2905             return 0;
2906         }
2907 
2908         final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
2909                 writeMode == WRITE_BLOCKING);
2910 
2911         if ((mDataLoadMode == MODE_STATIC)
2912                 && (mState == STATE_NO_STATIC_DATA)
2913                 && (ret > 0)) {
2914             // benign race with respect to other APIs that read mState
2915             mState = STATE_INITIALIZED;
2916         }
2917 
2918         return ret;
2919     }
2920 
2921     /**
2922      * Writes the audio data to the audio sink for playback (streaming mode),
2923      * or copies audio data for later playback (static buffer mode).
2924      * The format specified in the AudioTrack constructor should be
2925      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2926      * <p>
2927      * In streaming mode, the write will normally block until all the data has been enqueued for
2928      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2929      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2930      * occurs during the write, then the write may return a short transfer count.
2931      * <p>
2932      * In static buffer mode, copies the data to the buffer starting at offset 0.
2933      * Note that the actual playback of this data might occur after this function returns.
2934      *
2935      * @param audioData the array that holds the data to play.
2936      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
2937      *     starts.
2938      *    Must not be negative, or cause the data access to go out of bounds of the array.
2939      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2940      *    Must not be negative, or cause the data access to go out of bounds of the array.
2941      * @return zero or the positive number of shorts that were written, or one of the following
2942      *    error codes. The number of shorts will be a multiple of the channel count not to
2943      *    exceed sizeInShorts.
2944      * <ul>
2945      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2946      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2947      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2948      *    needs to be recreated. The dead object error code is not returned if some data was
2949      *    successfully transferred. In this case, the error is returned at the next write()</li>
2950      * <li>{@link #ERROR} in case of other error</li>
2951      * </ul>
2952      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
2953      * set to  {@link #WRITE_BLOCKING}.
2954      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)2955     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
2956         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
2957     }
2958 
2959     /**
2960      * Writes the audio data to the audio sink for playback (streaming mode),
2961      * or copies audio data for later playback (static buffer mode).
2962      * The format specified in the AudioTrack constructor should be
2963      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2964      * <p>
2965      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2966      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2967      * for playback, and will return a full transfer count.  However, if the write mode is
2968      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2969      * interrupts the write by calling stop or pause, or an I/O error
2970      * occurs during the write, then the write may return a short transfer count.
2971      * <p>
2972      * In static buffer mode, copies the data to the buffer starting at offset 0.
2973      * Note that the actual playback of this data might occur after this function returns.
2974      *
2975      * @param audioData the array that holds the data to write.
2976      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
2977      *     starts.
2978      *    Must not be negative, or cause the data access to go out of bounds of the array.
2979      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2980      *    Must not be negative, or cause the data access to go out of bounds of the array.
2981      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2982      *     effect in static mode.
2983      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2984      *         to the audio sink.
2985      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2986      *     queuing as much audio data for playback as possible without blocking.
2987      * @return zero or the positive number of shorts that were written, or one of the following
2988      *    error codes. The number of shorts will be a multiple of the channel count not to
2989      *    exceed sizeInShorts.
2990      * <ul>
2991      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2992      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2993      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2994      *    needs to be recreated. The dead object error code is not returned if some data was
2995      *    successfully transferred. In this case, the error is returned at the next write()</li>
2996      * <li>{@link #ERROR} in case of other error</li>
2997      * </ul>
2998      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)2999     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
3000             @WriteMode int writeMode) {
3001 
3002         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
3003             return ERROR_INVALID_OPERATION;
3004         }
3005 
3006         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3007             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3008             return ERROR_BAD_VALUE;
3009         }
3010 
3011         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
3012                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
3013                 || (offsetInShorts + sizeInShorts > audioData.length)) {
3014             return ERROR_BAD_VALUE;
3015         }
3016 
3017         if (!blockUntilOffloadDrain(writeMode)) {
3018             return 0;
3019         }
3020 
3021         final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
3022                 writeMode == WRITE_BLOCKING);
3023 
3024         if ((mDataLoadMode == MODE_STATIC)
3025                 && (mState == STATE_NO_STATIC_DATA)
3026                 && (ret > 0)) {
3027             // benign race with respect to other APIs that read mState
3028             mState = STATE_INITIALIZED;
3029         }
3030 
3031         return ret;
3032     }
3033 
3034     /**
3035      * Writes the audio data to the audio sink for playback (streaming mode),
3036      * or copies audio data for later playback (static buffer mode).
3037      * The format specified in the AudioTrack constructor should be
3038      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
3039      * <p>
3040      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3041      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3042      * for playback, and will return a full transfer count.  However, if the write mode is
3043      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3044      * interrupts the write by calling stop or pause, or an I/O error
3045      * occurs during the write, then the write may return a short transfer count.
3046      * <p>
3047      * In static buffer mode, copies the data to the buffer starting at offset 0,
3048      * and the write mode is ignored.
3049      * Note that the actual playback of this data might occur after this function returns.
3050      *
3051      * @param audioData the array that holds the data to write.
3052      *     The implementation does not clip for sample values within the nominal range
3053      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
3054      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
3055      *     that could add energy, such as reverb.  For the convenience of applications
3056      *     that compute samples using filters with non-unity gain,
3057      *     sample values +3 dB beyond the nominal range are permitted.
3058      *     However such values may eventually be limited or clipped, depending on various gains
3059      *     and later processing in the audio path.  Therefore applications are encouraged
3060      *     to provide samples values within the nominal range.
3061      * @param offsetInFloats the offset, expressed as a number of floats,
3062      *     in audioData where the data to write starts.
3063      *    Must not be negative, or cause the data access to go out of bounds of the array.
3064      * @param sizeInFloats the number of floats to write in audioData after the offset.
3065      *    Must not be negative, or cause the data access to go out of bounds of the array.
3066      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3067      *     effect in static mode.
3068      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3069      *         to the audio sink.
3070      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3071      *     queuing as much audio data for playback as possible without blocking.
3072      * @return zero or the positive number of floats that were written, or one of the following
3073      *    error codes. The number of floats will be a multiple of the channel count not to
3074      *    exceed sizeInFloats.
3075      * <ul>
3076      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3077      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3078      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3079      *    needs to be recreated. The dead object error code is not returned if some data was
3080      *    successfully transferred. In this case, the error is returned at the next write()</li>
3081      * <li>{@link #ERROR} in case of other error</li>
3082      * </ul>
3083      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)3084     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
3085             @WriteMode int writeMode) {
3086 
3087         if (mState == STATE_UNINITIALIZED) {
3088             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3089             return ERROR_INVALID_OPERATION;
3090         }
3091 
3092         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
3093             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
3094             return ERROR_INVALID_OPERATION;
3095         }
3096 
3097         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3098             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3099             return ERROR_BAD_VALUE;
3100         }
3101 
3102         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
3103                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
3104                 || (offsetInFloats + sizeInFloats > audioData.length)) {
3105             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
3106             return ERROR_BAD_VALUE;
3107         }
3108 
3109         if (!blockUntilOffloadDrain(writeMode)) {
3110             return 0;
3111         }
3112 
3113         final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
3114                 writeMode == WRITE_BLOCKING);
3115 
3116         if ((mDataLoadMode == MODE_STATIC)
3117                 && (mState == STATE_NO_STATIC_DATA)
3118                 && (ret > 0)) {
3119             // benign race with respect to other APIs that read mState
3120             mState = STATE_INITIALIZED;
3121         }
3122 
3123         return ret;
3124     }
3125 
3126 
3127     /**
3128      * Writes the audio data to the audio sink for playback (streaming mode),
3129      * or copies audio data for later playback (static buffer mode).
3130      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
3131      * <p>
3132      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
3133      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
3134      * for playback, and will return a full transfer count.  However, if the write mode is
3135      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
3136      * interrupts the write by calling stop or pause, or an I/O error
3137      * occurs during the write, then the write may return a short transfer count.
3138      * <p>
3139      * In static buffer mode, copies the data to the buffer starting at offset 0,
3140      * and the write mode is ignored.
3141      * Note that the actual playback of this data might occur after this function returns.
3142      *
3143      * @param audioData the buffer that holds the data to write, starting at the position reported
3144      *     by <code>audioData.position()</code>.
3145      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3146      *     have been advanced to reflect the amount of data that was successfully written to
3147      *     the AudioTrack.
3148      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3149      *     that the number of bytes requested be a multiple of the frame size (sample size in
3150      *     bytes multiplied by the channel count).
3151      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3152      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
3153      *     effect in static mode.
3154      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3155      *         to the audio sink.
3156      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3157      *     queuing as much audio data for playback as possible without blocking.
3158      * @return zero or the positive number of bytes that were written, or one of the following
3159      *    error codes.
3160      * <ul>
3161      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3162      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3163      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3164      *    needs to be recreated. The dead object error code is not returned if some data was
3165      *    successfully transferred. In this case, the error is returned at the next write()</li>
3166      * <li>{@link #ERROR} in case of other error</li>
3167      * </ul>
3168      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)3169     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3170             @WriteMode int writeMode) {
3171 
3172         if (mState == STATE_UNINITIALIZED) {
3173             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3174             return ERROR_INVALID_OPERATION;
3175         }
3176 
3177         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3178             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3179             return ERROR_BAD_VALUE;
3180         }
3181 
3182         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3183             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3184             return ERROR_BAD_VALUE;
3185         }
3186 
3187         if (!blockUntilOffloadDrain(writeMode)) {
3188             return 0;
3189         }
3190 
3191         int ret = 0;
3192         if (audioData.isDirect()) {
3193             ret = native_write_native_bytes(audioData,
3194                     audioData.position(), sizeInBytes, mAudioFormat,
3195                     writeMode == WRITE_BLOCKING);
3196         } else {
3197             ret = native_write_byte(NioUtils.unsafeArray(audioData),
3198                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
3199                     sizeInBytes, mAudioFormat,
3200                     writeMode == WRITE_BLOCKING);
3201         }
3202 
3203         if ((mDataLoadMode == MODE_STATIC)
3204                 && (mState == STATE_NO_STATIC_DATA)
3205                 && (ret > 0)) {
3206             // benign race with respect to other APIs that read mState
3207             mState = STATE_INITIALIZED;
3208         }
3209 
3210         if (ret > 0) {
3211             audioData.position(audioData.position() + ret);
3212         }
3213 
3214         return ret;
3215     }
3216 
3217     /**
3218      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
3219      * The blocking behavior will depend on the write mode.
3220      * @param audioData the buffer that holds the data to write, starting at the position reported
3221      *     by <code>audioData.position()</code>.
3222      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
3223      *     have been advanced to reflect the amount of data that was successfully written to
3224      *     the AudioTrack.
3225      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
3226      *     that the number of bytes requested be a multiple of the frame size (sample size in
3227      *     bytes multiplied by the channel count).
3228      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
3229      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
3230      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
3231      *         to the audio sink.
3232      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
3233      *     queuing as much audio data for playback as possible without blocking.
3234      * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the
3235      *     provided audioData.
3236      * @return zero or the positive number of bytes that were written, or one of the following
3237      *    error codes.
3238      * <ul>
3239      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
3240      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
3241      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
3242      *    needs to be recreated. The dead object error code is not returned if some data was
3243      *    successfully transferred. In this case, the error is returned at the next write()</li>
3244      * <li>{@link #ERROR} in case of other error</li>
3245      * </ul>
3246      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)3247     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
3248             @WriteMode int writeMode, long timestamp) {
3249 
3250         if (mState == STATE_UNINITIALIZED) {
3251             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
3252             return ERROR_INVALID_OPERATION;
3253         }
3254 
3255         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
3256             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
3257             return ERROR_BAD_VALUE;
3258         }
3259 
3260         if (mDataLoadMode != MODE_STREAM) {
3261             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
3262             return ERROR_INVALID_OPERATION;
3263         }
3264 
3265         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
3266             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
3267             return write(audioData, sizeInBytes, writeMode);
3268         }
3269 
3270         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
3271             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
3272             return ERROR_BAD_VALUE;
3273         }
3274 
3275         if (!blockUntilOffloadDrain(writeMode)) {
3276             return 0;
3277         }
3278 
3279         // create timestamp header if none exists
3280         if (mAvSyncHeader == null) {
3281             mAvSyncHeader = ByteBuffer.allocate(mOffset);
3282             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
3283             mAvSyncHeader.putInt(0x55550002);
3284         }
3285 
3286         if (mAvSyncBytesRemaining == 0) {
3287             mAvSyncHeader.putInt(4, sizeInBytes);
3288             mAvSyncHeader.putLong(8, timestamp);
3289             mAvSyncHeader.putInt(16, mOffset);
3290             mAvSyncHeader.position(0);
3291             mAvSyncBytesRemaining = sizeInBytes;
3292         }
3293 
3294         // write timestamp header if not completely written already
3295         int ret = 0;
3296         if (mAvSyncHeader.remaining() != 0) {
3297             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
3298             if (ret < 0) {
3299                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
3300                 mAvSyncHeader = null;
3301                 mAvSyncBytesRemaining = 0;
3302                 return ret;
3303             }
3304             if (mAvSyncHeader.remaining() > 0) {
3305                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
3306                 return 0;
3307             }
3308         }
3309 
3310         // write audio data
3311         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
3312         ret = write(audioData, sizeToWrite, writeMode);
3313         if (ret < 0) {
3314             Log.e(TAG, "AudioTrack.write() could not write audio data!");
3315             mAvSyncHeader = null;
3316             mAvSyncBytesRemaining = 0;
3317             return ret;
3318         }
3319 
3320         mAvSyncBytesRemaining -= ret;
3321 
3322         return ret;
3323     }
3324 
3325 
3326     /**
3327      * Sets the playback head position within the static buffer to zero,
3328      * that is it rewinds to start of static buffer.
3329      * The track must be stopped or paused, and
3330      * the track's creation mode must be {@link #MODE_STATIC}.
3331      * <p>
3332      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
3333      * {@link #getPlaybackHeadPosition()} to zero.
3334      * For earlier API levels, the reset behavior is unspecified.
3335      * <p>
3336      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
3337      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
3338      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
3339      *  {@link #ERROR_INVALID_OPERATION}
3340      */
reloadStaticData()3341     public int reloadStaticData() {
3342         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
3343             return ERROR_INVALID_OPERATION;
3344         }
3345         return native_reload_static();
3346     }
3347 
3348     /**
3349      * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is
3350      * received if blocking write or return with 0 frames written if non blocking mode.
3351      */
blockUntilOffloadDrain(int writeMode)3352     private boolean blockUntilOffloadDrain(int writeMode) {
3353         synchronized (mPlayStateLock) {
3354             while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) {
3355                 if (writeMode == WRITE_NON_BLOCKING) {
3356                     return false;
3357                 }
3358                 try {
3359                     mPlayStateLock.wait();
3360                 } catch (InterruptedException e) {
3361                 }
3362             }
3363             return true;
3364         }
3365     }
3366 
3367     //--------------------------------------------------------------------------
3368     // Audio effects management
3369     //--------------------
3370 
3371     /**
3372      * Attaches an auxiliary effect to the audio track. A typical auxiliary
3373      * effect is a reverberation effect which can be applied on any sound source
3374      * that directs a certain amount of its energy to this effect. This amount
3375      * is defined by setAuxEffectSendLevel().
3376      * {@see #setAuxEffectSendLevel(float)}.
3377      * <p>After creating an auxiliary effect (e.g.
3378      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
3379      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
3380      * this method to attach the audio track to the effect.
3381      * <p>To detach the effect from the audio track, call this method with a
3382      * null effect id.
3383      *
3384      * @param effectId system wide unique id of the effect to attach
3385      * @return error code or success, see {@link #SUCCESS},
3386      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
3387      */
attachAuxEffect(int effectId)3388     public int attachAuxEffect(int effectId) {
3389         if (mState == STATE_UNINITIALIZED) {
3390             return ERROR_INVALID_OPERATION;
3391         }
3392         return native_attachAuxEffect(effectId);
3393     }
3394 
3395     /**
3396      * Sets the send level of the audio track to the attached auxiliary effect
3397      * {@link #attachAuxEffect(int)}.  Effect levels
3398      * are clamped to the closed interval [0.0, max] where
3399      * max is the value of {@link #getMaxVolume}.
3400      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
3401      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
3402      * this method must be called for the effect to be applied.
3403      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
3404      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
3405      * so an appropriate conversion from linear UI input x to level is:
3406      * x == 0 -&gt; level = 0
3407      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
3408      *
3409      * @param level linear send level
3410      * @return error code or success, see {@link #SUCCESS},
3411      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
3412      */
setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3413     public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) {
3414         if (mState == STATE_UNINITIALIZED) {
3415             return ERROR_INVALID_OPERATION;
3416         }
3417         return baseSetAuxEffectSendLevel(level);
3418     }
3419 
3420     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)3421     int playerSetAuxEffectSendLevel(boolean muting, float level) {
3422         level = clampGainOrLevel(muting ? 0.0f : level);
3423         int err = native_setAuxEffectSendLevel(level);
3424         return err == 0 ? SUCCESS : ERROR;
3425     }
3426 
3427     //--------------------------------------------------------------------------
3428     // Explicit Routing
3429     //--------------------
3430     private AudioDeviceInfo mPreferredDevice = null;
3431 
3432     /**
3433      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
3434      * the output from this AudioTrack.
3435      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
3436      *  If deviceInfo is null, default routing is restored.
3437      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
3438      * does not correspond to a valid audio output device.
3439      */
3440     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)3441     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
3442         // Do some validation....
3443         if (deviceInfo != null && !deviceInfo.isSink()) {
3444             return false;
3445         }
3446         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
3447         boolean status = native_setOutputDevice(preferredDeviceId);
3448         if (status == true) {
3449             synchronized (this) {
3450                 mPreferredDevice = deviceInfo;
3451             }
3452         }
3453         return status;
3454     }
3455 
3456     /**
3457      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
3458      * is not guaranteed to correspond to the actual device being used for playback.
3459      */
3460     @Override
getPreferredDevice()3461     public AudioDeviceInfo getPreferredDevice() {
3462         synchronized (this) {
3463             return mPreferredDevice;
3464         }
3465     }
3466 
3467     /**
3468      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
3469      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
3470      * <code>getRoutedDevice()</code> will return null.
3471      */
3472     @Override
getRoutedDevice()3473     public AudioDeviceInfo getRoutedDevice() {
3474         int deviceId = native_getRoutedDeviceId();
3475         if (deviceId == 0) {
3476             return null;
3477         }
3478         AudioDeviceInfo[] devices =
3479                 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS);
3480         for (int i = 0; i < devices.length; i++) {
3481             if (devices[i].getId() == deviceId) {
3482                 return devices[i];
3483             }
3484         }
3485         return null;
3486     }
3487 
3488     /*
3489      * Call BEFORE adding a routing callback handler.
3490      */
3491     @GuardedBy("mRoutingChangeListeners")
testEnableNativeRoutingCallbacksLocked()3492     private void testEnableNativeRoutingCallbacksLocked() {
3493         if (mRoutingChangeListeners.size() == 0) {
3494             native_enableDeviceCallback();
3495         }
3496     }
3497 
3498     /*
3499      * Call AFTER removing a routing callback handler.
3500      */
3501     @GuardedBy("mRoutingChangeListeners")
testDisableNativeRoutingCallbacksLocked()3502     private void testDisableNativeRoutingCallbacksLocked() {
3503         if (mRoutingChangeListeners.size() == 0) {
3504             native_disableDeviceCallback();
3505         }
3506     }
3507 
3508     //--------------------------------------------------------------------------
3509     // (Re)Routing Info
3510     //--------------------
3511     /**
3512      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
3513      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
3514      * by an app to receive (re)routing notifications.
3515      */
3516     @GuardedBy("mRoutingChangeListeners")
3517     private ArrayMap<AudioRouting.OnRoutingChangedListener,
3518             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
3519 
3520    /**
3521     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
3522     * changes on this AudioTrack.
3523     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
3524     * notifications of rerouting events.
3525     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3526     * the callback. If <code>null</code>, the {@link Handler} associated with the main
3527     * {@link Looper} will be used.
3528     */
3529     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3530     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
3531             Handler handler) {
3532         synchronized (mRoutingChangeListeners) {
3533             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
3534                 testEnableNativeRoutingCallbacksLocked();
3535                 mRoutingChangeListeners.put(
3536                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
3537                                 handler != null ? handler : new Handler(mInitializationLooper)));
3538             }
3539         }
3540     }
3541 
3542     /**
3543      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
3544      * to receive rerouting notifications.
3545      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
3546      * to remove.
3547      */
3548     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3549     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
3550         synchronized (mRoutingChangeListeners) {
3551             if (mRoutingChangeListeners.containsKey(listener)) {
3552                 mRoutingChangeListeners.remove(listener);
3553             }
3554             testDisableNativeRoutingCallbacksLocked();
3555         }
3556     }
3557 
3558     //--------------------------------------------------------------------------
3559     // (Re)Routing Info
3560     //--------------------
3561     /**
3562      * Defines the interface by which applications can receive notifications of
3563      * routing changes for the associated {@link AudioTrack}.
3564      *
3565      * @deprecated users should switch to the general purpose
3566      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3567      */
3568     @Deprecated
3569     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
3570         /**
3571          * Called when the routing of an AudioTrack changes from either and
3572          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
3573          * retrieve the newly routed-to device.
3574          */
onRoutingChanged(AudioTrack audioTrack)3575         public void onRoutingChanged(AudioTrack audioTrack);
3576 
3577         @Override
onRoutingChanged(AudioRouting router)3578         default public void onRoutingChanged(AudioRouting router) {
3579             if (router instanceof AudioTrack) {
3580                 onRoutingChanged((AudioTrack) router);
3581             }
3582         }
3583     }
3584 
3585     /**
3586      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
3587      * on this AudioTrack.
3588      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
3589      * of rerouting events.
3590      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
3591      * the callback. If <code>null</code>, the {@link Handler} associated with the main
3592      * {@link Looper} will be used.
3593      * @deprecated users should switch to the general purpose
3594      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3595      */
3596     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3597     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
3598             android.os.Handler handler) {
3599         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
3600     }
3601 
3602     /**
3603      * Removes an {@link OnRoutingChangedListener} which has been previously added
3604      * to receive rerouting notifications.
3605      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
3606      * @deprecated users should switch to the general purpose
3607      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
3608      */
3609     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)3610     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
3611         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
3612     }
3613 
3614     /**
3615      * Sends device list change notification to all listeners.
3616      */
broadcastRoutingChange()3617     private void broadcastRoutingChange() {
3618         AudioManager.resetAudioPortGeneration();
3619         synchronized (mRoutingChangeListeners) {
3620             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
3621                 delegate.notifyClient();
3622             }
3623         }
3624     }
3625 
3626     //--------------------------------------------------------------------------
3627     // Codec notifications
3628     //--------------------
3629 
3630     // OnCodecFormatChangedListener notifications uses an instance
3631     // of ListenerList to manage its listeners.
3632 
3633     private final Utils.ListenerList<AudioMetadataReadMap> mCodecFormatChangedListeners =
3634             new Utils.ListenerList();
3635 
3636     /**
3637      * Interface definition for a listener for codec format changes.
3638      */
3639     public interface OnCodecFormatChangedListener {
3640         /**
3641          * Called when the compressed codec format changes.
3642          *
3643          * @param audioTrack is the {@code AudioTrack} instance associated with the codec.
3644          * @param info is a {@link AudioMetadataReadMap} of values which contains decoded format
3645          *     changes reported by the codec.  Not all hardware
3646          *     codecs indicate codec format changes. Acceptable keys are taken from
3647          *     {@code AudioMetadata.Format.KEY_*} range, with the associated value type.
3648          */
onCodecFormatChanged( @onNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info)3649         void onCodecFormatChanged(
3650                 @NonNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info);
3651     }
3652 
3653     /**
3654      * Adds an {@link OnCodecFormatChangedListener} to receive notifications of
3655      * codec format change events on this {@code AudioTrack}.
3656      *
3657      * @param executor  Specifies the {@link Executor} object to control execution.
3658      *
3659      * @param listener The {@link OnCodecFormatChangedListener} interface to receive
3660      *     notifications of codec events.
3661      */
addOnCodecFormatChangedListener( @onNull @allbackExecutor Executor executor, @NonNull OnCodecFormatChangedListener listener)3662     public void addOnCodecFormatChangedListener(
3663             @NonNull @CallbackExecutor Executor executor,
3664             @NonNull OnCodecFormatChangedListener listener) { // NPE checks done by ListenerList.
3665         mCodecFormatChangedListeners.add(
3666                 listener, /* key for removal */
3667                 executor,
3668                 (int eventCode, AudioMetadataReadMap readMap) -> {
3669                     // eventCode is unused by this implementation.
3670                     listener.onCodecFormatChanged(this, readMap);
3671                 }
3672         );
3673     }
3674 
3675     /**
3676      * Removes an {@link OnCodecFormatChangedListener} which has been previously added
3677      * to receive codec format change events.
3678      *
3679      * @param listener The previously added {@link OnCodecFormatChangedListener} interface
3680      * to remove.
3681      */
removeOnCodecFormatChangedListener( @onNull OnCodecFormatChangedListener listener)3682     public void removeOnCodecFormatChangedListener(
3683             @NonNull OnCodecFormatChangedListener listener) {
3684         mCodecFormatChangedListeners.remove(listener);  // NPE checks done by ListenerList.
3685     }
3686 
3687     //---------------------------------------------------------
3688     // Interface definitions
3689     //--------------------
3690     /**
3691      * Interface definition for a callback to be invoked when the playback head position of
3692      * an AudioTrack has reached a notification marker or has increased by a certain period.
3693      */
3694     public interface OnPlaybackPositionUpdateListener  {
3695         /**
3696          * Called on the listener to notify it that the previously set marker has been reached
3697          * by the playback head.
3698          */
onMarkerReached(AudioTrack track)3699         void onMarkerReached(AudioTrack track);
3700 
3701         /**
3702          * Called on the listener to periodically notify it that the playback head has reached
3703          * a multiple of the notification period.
3704          */
onPeriodicNotification(AudioTrack track)3705         void onPeriodicNotification(AudioTrack track);
3706     }
3707 
3708     /**
3709      * Abstract class to receive event notifications about the stream playback in offloaded mode.
3710      * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register
3711      * the callback on the given {@link AudioTrack} instance.
3712      */
3713     public abstract static class StreamEventCallback {
3714         /**
3715          * Called when an offloaded track is no longer valid and has been discarded by the system.
3716          * An example of this happening is when an offloaded track has been paused too long, and
3717          * gets invalidated by the system to prevent any other offload.
3718          * @param track the {@link AudioTrack} on which the event happened.
3719          */
onTearDown(@onNull AudioTrack track)3720         public void onTearDown(@NonNull AudioTrack track) { }
3721         /**
3722          * Called when all the buffers of an offloaded track that were queued in the audio system
3723          * (e.g. the combination of the Android audio framework and the device's audio hardware)
3724          * have been played after {@link AudioTrack#stop()} has been called.
3725          * @param track the {@link AudioTrack} on which the event happened.
3726          */
onPresentationEnded(@onNull AudioTrack track)3727         public void onPresentationEnded(@NonNull AudioTrack track) { }
3728         /**
3729          * Called when more audio data can be written without blocking on an offloaded track.
3730          * @param track the {@link AudioTrack} on which the event happened.
3731          * @param sizeInFrames the number of frames available to write without blocking.
3732          *   Note that the frame size of a compressed stream is 1 byte.
3733          */
onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)3734         public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) {
3735         }
3736     }
3737 
3738     /**
3739      * Registers a callback for the notification of stream events.
3740      * This callback can only be registered for instances operating in offloaded mode
3741      * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and
3742      * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for
3743      * more details).
3744      * @param executor {@link Executor} to handle the callbacks.
3745      * @param eventCallback the callback to receive the stream event notifications.
3746      */
registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)3747     public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
3748             @NonNull StreamEventCallback eventCallback) {
3749         if (eventCallback == null) {
3750             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3751         }
3752         if (!mOffloaded) {
3753             throw new IllegalStateException(
3754                     "Cannot register StreamEventCallback on non-offloaded AudioTrack");
3755         }
3756         if (executor == null) {
3757             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
3758         }
3759         synchronized (mStreamEventCbLock) {
3760             // check if eventCallback already in list
3761             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3762                 if (seci.mStreamEventCb == eventCallback) {
3763                     throw new IllegalArgumentException(
3764                             "StreamEventCallback already registered");
3765                 }
3766             }
3767             beginStreamEventHandling();
3768             mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback));
3769         }
3770     }
3771 
3772     /**
3773      * Unregisters the callback for notification of stream events, previously registered
3774      * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}.
3775      * @param eventCallback the callback to unregister.
3776      */
unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)3777     public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) {
3778         if (eventCallback == null) {
3779             throw new IllegalArgumentException("Illegal null StreamEventCallback");
3780         }
3781         if (!mOffloaded) {
3782             throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack");
3783         }
3784         synchronized (mStreamEventCbLock) {
3785             StreamEventCbInfo seciToRemove = null;
3786             for (StreamEventCbInfo seci : mStreamEventCbInfoList) {
3787                 if (seci.mStreamEventCb == eventCallback) {
3788                     // ok to remove while iterating over list as we exit iteration
3789                     mStreamEventCbInfoList.remove(seci);
3790                     if (mStreamEventCbInfoList.size() == 0) {
3791                         endStreamEventHandling();
3792                     }
3793                     return;
3794                 }
3795             }
3796             throw new IllegalArgumentException("StreamEventCallback was not registered");
3797         }
3798     }
3799 
3800     //---------------------------------------------------------
3801     // Offload
3802     //--------------------
3803     private static class StreamEventCbInfo {
3804         final Executor mStreamEventExec;
3805         final StreamEventCallback mStreamEventCb;
3806 
StreamEventCbInfo(Executor e, StreamEventCallback cb)3807         StreamEventCbInfo(Executor e, StreamEventCallback cb) {
3808             mStreamEventExec = e;
3809             mStreamEventCb = cb;
3810         }
3811     }
3812 
3813     private final Object mStreamEventCbLock = new Object();
3814     @GuardedBy("mStreamEventCbLock")
3815     @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList =
3816             new LinkedList<StreamEventCbInfo>();
3817     /**
3818      * Dedicated thread for handling the StreamEvent callbacks
3819      */
3820     private @Nullable HandlerThread mStreamEventHandlerThread;
3821     private @Nullable volatile StreamEventHandler mStreamEventHandler;
3822 
3823     /**
3824      * Called from native AudioTrack callback thread, filter messages if necessary
3825      * and repost event on AudioTrack message loop to prevent blocking native thread.
3826      * @param what event code received from native
3827      * @param arg optional argument for event
3828      */
handleStreamEventFromNative(int what, int arg)3829     void handleStreamEventFromNative(int what, int arg) {
3830         if (mStreamEventHandler == null) {
3831             return;
3832         }
3833         switch (what) {
3834             case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
3835                 // replace previous CAN_WRITE_MORE_DATA messages with the latest value
3836                 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA);
3837                 mStreamEventHandler.sendMessage(
3838                         mStreamEventHandler.obtainMessage(
3839                                 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/));
3840                 break;
3841             case NATIVE_EVENT_NEW_IAUDIOTRACK:
3842                 mStreamEventHandler.sendMessage(
3843                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK));
3844                 break;
3845             case NATIVE_EVENT_STREAM_END:
3846                 mStreamEventHandler.sendMessage(
3847                         mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END));
3848                 break;
3849         }
3850     }
3851 
3852     private class StreamEventHandler extends Handler {
3853 
StreamEventHandler(Looper looper)3854         StreamEventHandler(Looper looper) {
3855             super(looper);
3856         }
3857 
3858         @Override
handleMessage(Message msg)3859         public void handleMessage(Message msg) {
3860             final LinkedList<StreamEventCbInfo> cbInfoList;
3861             synchronized (mStreamEventCbLock) {
3862                 if (msg.what == NATIVE_EVENT_STREAM_END) {
3863                     synchronized (mPlayStateLock) {
3864                         if (mPlayState == PLAYSTATE_STOPPING) {
3865                             if (mOffloadEosPending) {
3866                                 native_start();
3867                                 mPlayState = PLAYSTATE_PLAYING;
3868                             } else {
3869                                 mAvSyncHeader = null;
3870                                 mAvSyncBytesRemaining = 0;
3871                                 mPlayState = PLAYSTATE_STOPPED;
3872                             }
3873                             mOffloadEosPending = false;
3874                             mPlayStateLock.notify();
3875                         }
3876                     }
3877                 }
3878                 if (mStreamEventCbInfoList.size() == 0) {
3879                     return;
3880                 }
3881                 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList);
3882             }
3883 
3884             final long identity = Binder.clearCallingIdentity();
3885             try {
3886                 for (StreamEventCbInfo cbi : cbInfoList) {
3887                     switch (msg.what) {
3888                         case NATIVE_EVENT_CAN_WRITE_MORE_DATA:
3889                             cbi.mStreamEventExec.execute(() ->
3890                                     cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1));
3891                             break;
3892                         case NATIVE_EVENT_NEW_IAUDIOTRACK:
3893                             // TODO also release track as it's not longer usable
3894                             cbi.mStreamEventExec.execute(() ->
3895                                     cbi.mStreamEventCb.onTearDown(AudioTrack.this));
3896                             break;
3897                         case NATIVE_EVENT_STREAM_END:
3898                             cbi.mStreamEventExec.execute(() ->
3899                                     cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this));
3900                             break;
3901                     }
3902                 }
3903             } finally {
3904                 Binder.restoreCallingIdentity(identity);
3905             }
3906         }
3907     }
3908 
3909     @GuardedBy("mStreamEventCbLock")
beginStreamEventHandling()3910     private void beginStreamEventHandling() {
3911         if (mStreamEventHandlerThread == null) {
3912             mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent");
3913             mStreamEventHandlerThread.start();
3914             final Looper looper = mStreamEventHandlerThread.getLooper();
3915             if (looper != null) {
3916                 mStreamEventHandler = new StreamEventHandler(looper);
3917             }
3918         }
3919     }
3920 
3921     @GuardedBy("mStreamEventCbLock")
endStreamEventHandling()3922     private void endStreamEventHandling() {
3923         if (mStreamEventHandlerThread != null) {
3924             mStreamEventHandlerThread.quit();
3925             mStreamEventHandlerThread = null;
3926         }
3927     }
3928 
3929     //---------------------------------------------------------
3930     // Inner classes
3931     //--------------------
3932     /**
3933      * Helper class to handle the forwarding of native events to the appropriate listener
3934      * (potentially) handled in a different thread
3935      */
3936     private class NativePositionEventHandlerDelegate {
3937         private final Handler mHandler;
3938 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)3939         NativePositionEventHandlerDelegate(final AudioTrack track,
3940                                    final OnPlaybackPositionUpdateListener listener,
3941                                    Handler handler) {
3942             // find the looper for our new event handler
3943             Looper looper;
3944             if (handler != null) {
3945                 looper = handler.getLooper();
3946             } else {
3947                 // no given handler, use the looper the AudioTrack was created in
3948                 looper = mInitializationLooper;
3949             }
3950 
3951             // construct the event handler with this looper
3952             if (looper != null) {
3953                 // implement the event handler delegate
3954                 mHandler = new Handler(looper) {
3955                     @Override
3956                     public void handleMessage(Message msg) {
3957                         if (track == null) {
3958                             return;
3959                         }
3960                         switch(msg.what) {
3961                         case NATIVE_EVENT_MARKER:
3962                             if (listener != null) {
3963                                 listener.onMarkerReached(track);
3964                             }
3965                             break;
3966                         case NATIVE_EVENT_NEW_POS:
3967                             if (listener != null) {
3968                                 listener.onPeriodicNotification(track);
3969                             }
3970                             break;
3971                         default:
3972                             loge("Unknown native event type: " + msg.what);
3973                             break;
3974                         }
3975                     }
3976                 };
3977             } else {
3978                 mHandler = null;
3979             }
3980         }
3981 
getHandler()3982         Handler getHandler() {
3983             return mHandler;
3984         }
3985     }
3986 
3987     //---------------------------------------------------------
3988     // Methods for IPlayer interface
3989     //--------------------
3990     @Override
playerStart()3991     void playerStart() {
3992         play();
3993     }
3994 
3995     @Override
playerPause()3996     void playerPause() {
3997         pause();
3998     }
3999 
4000     @Override
playerStop()4001     void playerStop() {
4002         stop();
4003     }
4004 
4005     //---------------------------------------------------------
4006     // Java methods called from the native side
4007     //--------------------
4008     @SuppressWarnings("unused")
4009     @UnsupportedAppUsage
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)4010     private static void postEventFromNative(Object audiotrack_ref,
4011             int what, int arg1, int arg2, Object obj) {
4012         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
4013         final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get();
4014         if (track == null) {
4015             return;
4016         }
4017 
4018         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
4019             track.broadcastRoutingChange();
4020             return;
4021         }
4022 
4023         if (what == NATIVE_EVENT_CODEC_FORMAT_CHANGE) {
4024             ByteBuffer buffer = (ByteBuffer) obj;
4025             buffer.order(ByteOrder.nativeOrder());
4026             buffer.rewind();
4027             AudioMetadataReadMap audioMetaData = AudioMetadata.fromByteBuffer(buffer);
4028             if (audioMetaData == null) {
4029                 Log.e(TAG, "Unable to get audio metadata from byte buffer");
4030                 return;
4031             }
4032             track.mCodecFormatChangedListeners.notify(0 /* eventCode, unused */, audioMetaData);
4033             return;
4034         }
4035 
4036         if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA
4037                 || what == NATIVE_EVENT_NEW_IAUDIOTRACK
4038                 || what == NATIVE_EVENT_STREAM_END) {
4039             track.handleStreamEventFromNative(what, arg1);
4040             return;
4041         }
4042 
4043         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
4044         if (delegate != null) {
4045             Handler handler = delegate.getHandler();
4046             if (handler != null) {
4047                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
4048                 handler.sendMessage(m);
4049             }
4050         }
4051     }
4052 
4053     //---------------------------------------------------------
4054     // Native methods called from the Java side
4055     //--------------------
4056 
native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)4057     private static native boolean native_is_direct_output_supported(int encoding, int sampleRate,
4058             int channelMask, int channelIndexMask, int contentType, int usage, int flags);
4059 
4060     // post-condition: mStreamType is overwritten with a value
4061     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
4062     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack, boolean offload, int encapsulationMode, Object tunerConfiguration, @NonNull String opPackageName)4063     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
4064             Object /*AudioAttributes*/ attributes,
4065             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
4066             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack,
4067             boolean offload, int encapsulationMode, Object tunerConfiguration,
4068             @NonNull String opPackageName);
4069 
native_finalize()4070     private native final void native_finalize();
4071 
4072     /**
4073      * @hide
4074      */
4075     @UnsupportedAppUsage
native_release()4076     public native final void native_release();
4077 
native_start()4078     private native final void native_start();
4079 
native_stop()4080     private native final void native_stop();
4081 
native_pause()4082     private native final void native_pause();
4083 
native_flush()4084     private native final void native_flush();
4085 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)4086     private native final int native_write_byte(byte[] audioData,
4087                                                int offsetInBytes, int sizeInBytes, int format,
4088                                                boolean isBlocking);
4089 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)4090     private native final int native_write_short(short[] audioData,
4091                                                 int offsetInShorts, int sizeInShorts, int format,
4092                                                 boolean isBlocking);
4093 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)4094     private native final int native_write_float(float[] audioData,
4095                                                 int offsetInFloats, int sizeInFloats, int format,
4096                                                 boolean isBlocking);
4097 
native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)4098     private native final int native_write_native_bytes(ByteBuffer audioData,
4099             int positionInBytes, int sizeInBytes, int format, boolean blocking);
4100 
native_reload_static()4101     private native final int native_reload_static();
4102 
native_get_buffer_size_frames()4103     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)4104     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()4105     private native final int native_get_buffer_capacity_frames();
4106 
native_setVolume(float leftVolume, float rightVolume)4107     private native final void native_setVolume(float leftVolume, float rightVolume);
4108 
native_set_playback_rate(int sampleRateInHz)4109     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()4110     private native final int native_get_playback_rate();
4111 
native_set_playback_params(@onNull PlaybackParams params)4112     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()4113     private native final @NonNull PlaybackParams native_get_playback_params();
4114 
native_set_marker_pos(int marker)4115     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()4116     private native final int native_get_marker_pos();
4117 
native_set_pos_update_period(int updatePeriod)4118     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()4119     private native final int native_get_pos_update_period();
4120 
native_set_position(int position)4121     private native final int native_set_position(int position);
native_get_position()4122     private native final int native_get_position();
4123 
native_get_latency()4124     private native final int native_get_latency();
4125 
native_get_underrun_count()4126     private native final int native_get_underrun_count();
4127 
native_get_flags()4128     private native final int native_get_flags();
4129 
4130     // longArray must be a non-null array of length >= 2
4131     // [0] is assigned the frame position
4132     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)4133     private native final int native_get_timestamp(long[] longArray);
4134 
native_set_loop(int start, int end, int loopCount)4135     private native final int native_set_loop(int start, int end, int loopCount);
4136 
native_get_output_sample_rate(int streamType)4137     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)4138     static private native final int native_get_min_buff_size(
4139             int sampleRateInHz, int channelConfig, int audioFormat);
4140 
native_attachAuxEffect(int effectId)4141     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)4142     private native final int native_setAuxEffectSendLevel(float level);
4143 
native_setOutputDevice(int deviceId)4144     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()4145     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()4146     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()4147     private native final void native_disableDeviceCallback();
4148 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)4149     private native int native_applyVolumeShaper(
4150             @NonNull VolumeShaper.Configuration configuration,
4151             @NonNull VolumeShaper.Operation operation);
4152 
native_getVolumeShaperState(int id)4153     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
native_setPresentation(int presentationId, int programId)4154     private native final int native_setPresentation(int presentationId, int programId);
4155 
native_getPortId()4156     private native int native_getPortId();
4157 
native_set_delay_padding(int delayInFrames, int paddingInFrames)4158     private native void native_set_delay_padding(int delayInFrames, int paddingInFrames);
4159 
native_set_audio_description_mix_level_db(float level)4160     private native int native_set_audio_description_mix_level_db(float level);
native_get_audio_description_mix_level_db(float[] level)4161     private native int native_get_audio_description_mix_level_db(float[] level);
native_set_dual_mono_mode(int dualMonoMode)4162     private native int native_set_dual_mono_mode(int dualMonoMode);
native_get_dual_mono_mode(int[] dualMonoMode)4163     private native int native_get_dual_mono_mode(int[] dualMonoMode);
4164 
4165     //---------------------------------------------------------
4166     // Utility methods
4167     //------------------
4168 
logd(String msg)4169     private static void logd(String msg) {
4170         Log.d(TAG, msg);
4171     }
4172 
loge(String msg)4173     private static void loge(String msg) {
4174         Log.e(TAG, msg);
4175     }
4176 
4177     public final static class MetricsConstants
4178     {
MetricsConstants()4179         private MetricsConstants() {}
4180 
4181         // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors.
4182         private static final String MM_PREFIX = "android.media.audiotrack.";
4183 
4184         /**
4185          * Key to extract the stream type for this track
4186          * from the {@link AudioTrack#getMetrics} return value.
4187          * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}.
4188          * The value is a {@code String}.
4189          */
4190         public static final String STREAMTYPE = MM_PREFIX + "streamtype";
4191 
4192         /**
4193          * Key to extract the attribute content type for this track
4194          * from the {@link AudioTrack#getMetrics} return value.
4195          * The value is a {@code String}.
4196          */
4197         public static final String CONTENTTYPE = MM_PREFIX + "type";
4198 
4199         /**
4200          * Key to extract the attribute usage for this track
4201          * from the {@link AudioTrack#getMetrics} return value.
4202          * The value is a {@code String}.
4203          */
4204         public static final String USAGE = MM_PREFIX + "usage";
4205 
4206         /**
4207          * Key to extract the sample rate for this track in Hz
4208          * from the {@link AudioTrack#getMetrics} return value.
4209          * The value is an {@code int}.
4210          * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead.
4211          */
4212         @Deprecated
4213         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
4214 
4215         /**
4216          * Key to extract the native channel mask information for this track
4217          * from the {@link AudioTrack#getMetrics} return value.
4218          *
4219          * The value is a {@code long}.
4220          * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from
4221          * the returned format instead.
4222          */
4223         @Deprecated
4224         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
4225 
4226         /**
4227          * Use for testing only. Do not expose.
4228          * The current sample rate.
4229          * The value is an {@code int}.
4230          * @hide
4231          */
4232         @TestApi
4233         public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate";
4234 
4235         /**
4236          * Use for testing only. Do not expose.
4237          * The native channel mask.
4238          * The value is a {@code long}.
4239          * @hide
4240          */
4241         @TestApi
4242         public static final String CHANNEL_MASK = MM_PREFIX + "channelMask";
4243 
4244         /**
4245          * Use for testing only. Do not expose.
4246          * The output audio data encoding.
4247          * The value is a {@code String}.
4248          * @hide
4249          */
4250         @TestApi
4251         public static final String ENCODING = MM_PREFIX + "encoding";
4252 
4253         /**
4254          * Use for testing only. Do not expose.
4255          * The port id of this track port in audioserver.
4256          * The value is an {@code int}.
4257          * @hide
4258          */
4259         @TestApi
4260         public static final String PORT_ID = MM_PREFIX + "portId";
4261 
4262         /**
4263          * Use for testing only. Do not expose.
4264          * The buffer frameCount.
4265          * The value is an {@code int}.
4266          * @hide
4267          */
4268         @TestApi
4269         public static final String FRAME_COUNT = MM_PREFIX + "frameCount";
4270 
4271         /**
4272          * Use for testing only. Do not expose.
4273          * The actual track attributes used.
4274          * The value is a {@code String}.
4275          * @hide
4276          */
4277         @TestApi
4278         public static final String ATTRIBUTES = MM_PREFIX + "attributes";
4279     }
4280 }
4281