• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import java.lang.annotation.Retention;
20 import java.lang.annotation.RetentionPolicy;
21 import java.lang.ref.WeakReference;
22 import java.nio.ByteBuffer;
23 import java.nio.NioUtils;
24 import java.util.Iterator;
25 import java.util.Set;
26 
27 import android.annotation.IntDef;
28 import android.app.ActivityThread;
29 import android.app.AppOpsManager;
30 import android.content.Context;
31 import android.os.Handler;
32 import android.os.IBinder;
33 import android.os.Looper;
34 import android.os.Message;
35 import android.os.Process;
36 import android.os.RemoteException;
37 import android.os.ServiceManager;
38 import android.util.Log;
39 
40 import com.android.internal.app.IAppOpsService;
41 
42 
43 /**
44  * The AudioTrack class manages and plays a single audio resource for Java applications.
45  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
46  * achieved by "pushing" the data to the AudioTrack object using one of the
47  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
48  *  and {@link #write(float[], int, int, int)} methods.
49  *
50  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
51  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
52  * one of the {@code write()} methods. These are blocking and return when the data has been
53  * transferred from the Java layer to the native layer and queued for playback. The streaming
54  * mode is most useful when playing blocks of audio data that for instance are:
55  *
56  * <ul>
57  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
58  *   <li>too big to fit in memory because of the characteristics of the audio data
59  *         (high sampling rate, bits per sample ...)</li>
60  *   <li>received or generated while previously queued audio is playing.</li>
61  * </ul>
62  *
63  * The static mode should be chosen when dealing with short sounds that fit in memory and
64  * that need to be played with the smallest latency possible. The static mode will
65  * therefore be preferred for UI and game sounds that are played often, and with the
66  * smallest overhead possible.
67  *
68  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
69  * The size of this buffer, specified during the construction, determines how long an AudioTrack
70  * can play before running out of data.<br>
71  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
72  * be played from it.<br>
73  * For the streaming mode, data will be written to the audio sink in chunks of
74  * sizes less than or equal to the total buffer size.
75  *
76  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
77  */
78 public class AudioTrack
79 {
80     //---------------------------------------------------------
81     // Constants
82     //--------------------
83     /** Minimum value for a linear gain or auxiliary effect level.
84      *  This value must be exactly equal to 0.0f; do not change it.
85      */
86     private static final float GAIN_MIN = 0.0f;
87     /** Maximum value for a linear gain or auxiliary effect level.
88      *  This value must be greater than or equal to 1.0f.
89      */
90     private static final float GAIN_MAX = 1.0f;
91 
92     /** Minimum value for sample rate */
93     private static final int SAMPLE_RATE_HZ_MIN = 4000;
94     /** Maximum value for sample rate */
95     private static final int SAMPLE_RATE_HZ_MAX = 96000;
96 
97     /** Maximum value for AudioTrack channel count */
98     private static final int CHANNEL_COUNT_MAX = 8;
99 
100     /** indicates AudioTrack state is stopped */
101     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
102     /** indicates AudioTrack state is paused */
103     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
104     /** indicates AudioTrack state is playing */
105     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
106 
107     // keep these values in sync with android_media_AudioTrack.cpp
108     /**
109      * Creation mode where audio data is transferred from Java to the native layer
110      * only once before the audio starts playing.
111      */
112     public static final int MODE_STATIC = 0;
113     /**
114      * Creation mode where audio data is streamed from Java to the native layer
115      * as the audio is playing.
116      */
117     public static final int MODE_STREAM = 1;
118 
119     /**
120      * State of an AudioTrack that was not successfully initialized upon creation.
121      */
122     public static final int STATE_UNINITIALIZED = 0;
123     /**
124      * State of an AudioTrack that is ready to be used.
125      */
126     public static final int STATE_INITIALIZED   = 1;
127     /**
128      * State of a successfully initialized AudioTrack that uses static data,
129      * but that hasn't received that data yet.
130      */
131     public static final int STATE_NO_STATIC_DATA = 2;
132 
133     /**
134      * Denotes a successful operation.
135      */
136     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
137     /**
138      * Denotes a generic operation failure.
139      */
140     public  static final int ERROR                                 = AudioSystem.ERROR;
141     /**
142      * Denotes a failure due to the use of an invalid value.
143      */
144     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
145     /**
146      * Denotes a failure due to the improper use of a method.
147      */
148     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
149 
150     // Error codes:
151     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
152     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
153     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
154     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
155     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
156     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
157 
158     // Events:
159     // to keep in sync with frameworks/av/include/media/AudioTrack.h
160     /**
161      * Event id denotes when playback head has reached a previously set marker.
162      */
163     private static final int NATIVE_EVENT_MARKER  = 3;
164     /**
165      * Event id denotes when previously set update period has elapsed during playback.
166      */
167     private static final int NATIVE_EVENT_NEW_POS = 4;
168 
169     private final static String TAG = "android.media.AudioTrack";
170 
171 
172     /** @hide */
173     @IntDef({
174         WRITE_BLOCKING,
175         WRITE_NON_BLOCKING
176     })
177     @Retention(RetentionPolicy.SOURCE)
178     public @interface WriteMode {}
179 
180     /**
181      * The write mode indicating the write operation will block until all data has been written,
182      * to be used in {@link #write(ByteBuffer, int, int)}
183      */
184     public final static int WRITE_BLOCKING = 0;
185     /**
186      * The write mode indicating the write operation will return immediately after
187      * queuing as much audio data for playback as possible without blocking, to be used in
188      * {@link #write(ByteBuffer, int, int)}.
189      */
190     public final static int WRITE_NON_BLOCKING = 1;
191 
192     //--------------------------------------------------------------------------
193     // Member variables
194     //--------------------
195     /**
196      * Indicates the state of the AudioTrack instance.
197      */
198     private int mState = STATE_UNINITIALIZED;
199     /**
200      * Indicates the play state of the AudioTrack instance.
201      */
202     private int mPlayState = PLAYSTATE_STOPPED;
203     /**
204      * Lock to make sure mPlayState updates are reflecting the actual state of the object.
205      */
206     private final Object mPlayStateLock = new Object();
207     /**
208      * Sizes of the native audio buffer.
209      */
210     private int mNativeBufferSizeInBytes = 0;
211     private int mNativeBufferSizeInFrames = 0;
212     /**
213      * Handler for events coming from the native code.
214      */
215     private NativeEventHandlerDelegate mEventHandlerDelegate;
216     /**
217      * Looper associated with the thread that creates the AudioTrack instance.
218      */
219     private final Looper mInitializationLooper;
220     /**
221      * The audio data source sampling rate in Hz.
222      */
223     private int mSampleRate; // initialized by all constructors
224     /**
225      * The number of audio output channels (1 is mono, 2 is stereo).
226      */
227     private int mChannelCount = 1;
228     /**
229      * The audio channel mask.
230      */
231     private int mChannels = AudioFormat.CHANNEL_OUT_MONO;
232 
233     /**
234      * The type of the audio stream to play. See
235      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
236      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
237      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
238      *   {@link AudioManager#STREAM_DTMF}.
239      */
240     private int mStreamType = AudioManager.STREAM_MUSIC;
241 
242     private final AudioAttributes mAttributes;
243     /**
244      * The way audio is consumed by the audio sink, streaming or static.
245      */
246     private int mDataLoadMode = MODE_STREAM;
247     /**
248      * The current audio channel configuration.
249      */
250     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
251     /**
252      * The encoding of the audio samples.
253      * @see AudioFormat#ENCODING_PCM_8BIT
254      * @see AudioFormat#ENCODING_PCM_16BIT
255      * @see AudioFormat#ENCODING_PCM_FLOAT
256      */
257     private int mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
258     /**
259      * Audio session ID
260      */
261     private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
262     /**
263      * Reference to the app-ops service.
264      */
265     private final IAppOpsService mAppOps;
266 
267     //--------------------------------
268     // Used exclusively by native code
269     //--------------------
270     /**
271      * Accessed by native methods: provides access to C++ AudioTrack object.
272      */
273     @SuppressWarnings("unused")
274     private long mNativeTrackInJavaObj;
275     /**
276      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
277      * the native AudioTrack object, but not stored in it).
278      */
279     @SuppressWarnings("unused")
280     private long mJniData;
281 
282 
283     //--------------------------------------------------------------------------
284     // Constructor, Finalize
285     //--------------------
286     /**
287      * Class constructor.
288      * @param streamType the type of the audio stream. See
289      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
290      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
291      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
292      * @param sampleRateInHz the initial source sample rate expressed in Hz.
293      * @param channelConfig describes the configuration of the audio channels.
294      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
295      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
296      * @param audioFormat the format in which the audio data is represented.
297      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
298      *   {@link AudioFormat#ENCODING_PCM_8BIT},
299      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
300      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
301      *   read from for playback.
302      *   If track's creation mode is {@link #MODE_STREAM}, you can write data into
303      *   this buffer in chunks less than or equal to this size, and it is typical to use
304      *   chunks of 1/2 of the total size to permit double-buffering.
305      *   If the track's creation mode is {@link #MODE_STATIC},
306      *   this is the maximum length sample, or audio clip, that can be played by this instance.
307      *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
308      *   for the successful creation of an AudioTrack instance in streaming mode. Using values
309      *   smaller than getMinBufferSize() will result in an initialization failure.
310      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
311      * @throws java.lang.IllegalArgumentException
312      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)313     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
314             int bufferSizeInBytes, int mode)
315     throws IllegalArgumentException {
316         this(streamType, sampleRateInHz, channelConfig, audioFormat,
317                 bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
318     }
319 
320     /**
321      * Class constructor with audio session. Use this constructor when the AudioTrack must be
322      * attached to a particular audio session. The primary use of the audio session ID is to
323      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
324      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
325      * and media players in the same session and not to the output mix.
326      * When an AudioTrack is created without specifying a session, it will create its own session
327      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
328      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
329      * session
330      * with all other media players or audio tracks in the same session, otherwise a new session
331      * will be created for this track if none is supplied.
332      * @param streamType the type of the audio stream. See
333      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
334      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
335      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
336      * @param sampleRateInHz the initial source sample rate expressed in Hz.
337      * @param channelConfig describes the configuration of the audio channels.
338      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
339      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
340      * @param audioFormat the format in which the audio data is represented.
341      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
342      *   {@link AudioFormat#ENCODING_PCM_8BIT},
343      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
344      * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
345      *   from for playback. If using the AudioTrack in streaming mode, you can write data into
346      *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
347      *   this is the maximum size of the sound that will be played for this instance.
348      *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
349      *   for the successful creation of an AudioTrack instance in streaming mode. Using values
350      *   smaller than getMinBufferSize() will result in an initialization failure.
351      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
352      * @param sessionId Id of audio session the AudioTrack must be attached to
353      * @throws java.lang.IllegalArgumentException
354      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)355     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
356             int bufferSizeInBytes, int mode, int sessionId)
357     throws IllegalArgumentException {
358         // mState already == STATE_UNINITIALIZED
359         this((new AudioAttributes.Builder())
360                     .setLegacyStreamType(streamType)
361                     .build(),
362                 (new AudioFormat.Builder())
363                     .setChannelMask(channelConfig)
364                     .setEncoding(audioFormat)
365                     .setSampleRate(sampleRateInHz)
366                     .build(),
367                 bufferSizeInBytes,
368                 mode, sessionId);
369     }
370 
371     /**
372      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
373      * @param attributes a non-null {@link AudioAttributes} instance.
374      * @param format a non-null {@link AudioFormat} instance describing the format of the data
375      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
376      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
377      * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
378      *   from for playback. If using the AudioTrack in streaming mode, you can write data into
379      *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
380      *   this is the maximum size of the sound that will be played for this instance.
381      *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
382      *   for the successful creation of an AudioTrack instance in streaming mode. Using values
383      *   smaller than getMinBufferSize() will result in an initialization failure.
384      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
385      * @param sessionId ID of audio session the AudioTrack must be attached to, or
386      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
387      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
388      *   construction.
389      * @throws IllegalArgumentException
390      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)391     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
392             int mode, int sessionId)
393                     throws IllegalArgumentException {
394         // mState already == STATE_UNINITIALIZED
395 
396         if (attributes == null) {
397             throw new IllegalArgumentException("Illegal null AudioAttributes");
398         }
399         if (format == null) {
400             throw new IllegalArgumentException("Illegal null AudioFormat");
401         }
402 
403         // remember which looper is associated with the AudioTrack instantiation
404         Looper looper;
405         if ((looper = Looper.myLooper()) == null) {
406             looper = Looper.getMainLooper();
407         }
408 
409         int rate = 0;
410         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)
411         {
412             rate = format.getSampleRate();
413         } else {
414             rate = AudioSystem.getPrimaryOutputSamplingRate();
415             if (rate <= 0) {
416                 rate = 44100;
417             }
418         }
419         int channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
420         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0)
421         {
422             channelMask = format.getChannelMask();
423         }
424         int encoding = AudioFormat.ENCODING_DEFAULT;
425         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
426             encoding = format.getEncoding();
427         }
428         audioParamCheck(rate, channelMask, encoding, mode);
429         mStreamType = AudioSystem.STREAM_DEFAULT;
430 
431         audioBuffSizeCheck(bufferSizeInBytes);
432 
433         mInitializationLooper = looper;
434         IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
435         mAppOps = IAppOpsService.Stub.asInterface(b);
436 
437         mAttributes = (new AudioAttributes.Builder(attributes).build());
438 
439         if (sessionId < 0) {
440             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
441         }
442 
443         int[] session = new int[1];
444         session[0] = sessionId;
445         // native initialization
446         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
447                 mSampleRate, mChannels, mAudioFormat,
448                 mNativeBufferSizeInBytes, mDataLoadMode, session);
449         if (initResult != SUCCESS) {
450             loge("Error code "+initResult+" when initializing AudioTrack.");
451             return; // with mState == STATE_UNINITIALIZED
452         }
453 
454         mSessionId = session[0];
455 
456         if (mDataLoadMode == MODE_STATIC) {
457             mState = STATE_NO_STATIC_DATA;
458         } else {
459             mState = STATE_INITIALIZED;
460         }
461     }
462 
463     // mask of all the channels supported by this implementation
464     private static final int SUPPORTED_OUT_CHANNELS =
465             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
466             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
467             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
468             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
469             AudioFormat.CHANNEL_OUT_BACK_LEFT |
470             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
471             AudioFormat.CHANNEL_OUT_BACK_CENTER |
472             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
473             AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
474 
475     // Convenience method for the constructor's parameter checks.
476     // This is where constructor IllegalArgumentException-s are thrown
477     // postconditions:
478     //    mChannelCount is valid
479     //    mChannels is valid
480     //    mAudioFormat is valid
481     //    mSampleRate is valid
482     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int audioFormat, int mode)483     private void audioParamCheck(int sampleRateInHz,
484                                  int channelConfig, int audioFormat, int mode) {
485         //--------------
486         // sample rate, note these values are subject to change
487         if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) {
488             throw new IllegalArgumentException(sampleRateInHz
489                     + "Hz is not a supported sample rate.");
490         }
491         mSampleRate = sampleRateInHz;
492 
493         //--------------
494         // channel config
495         mChannelConfiguration = channelConfig;
496 
497         switch (channelConfig) {
498         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
499         case AudioFormat.CHANNEL_OUT_MONO:
500         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
501             mChannelCount = 1;
502             mChannels = AudioFormat.CHANNEL_OUT_MONO;
503             break;
504         case AudioFormat.CHANNEL_OUT_STEREO:
505         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
506             mChannelCount = 2;
507             mChannels = AudioFormat.CHANNEL_OUT_STEREO;
508             break;
509         default:
510             if (!isMultichannelConfigSupported(channelConfig)) {
511                 // input channel configuration features unsupported channels
512                 throw new IllegalArgumentException("Unsupported channel configuration.");
513             }
514             mChannels = channelConfig;
515             mChannelCount = Integer.bitCount(channelConfig);
516         }
517 
518         //--------------
519         // audio format
520         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
521             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
522         }
523 
524         if (!AudioFormat.isValidEncoding(audioFormat)) {
525             throw new IllegalArgumentException("Unsupported audio encoding.");
526         }
527         mAudioFormat = audioFormat;
528 
529         //--------------
530         // audio load mode
531         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
532                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
533             throw new IllegalArgumentException("Invalid mode.");
534         }
535         mDataLoadMode = mode;
536     }
537 
538     /**
539      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
540      * @param channelConfig the mask to validate
541      * @return false if the AudioTrack can't be used with such a mask
542      */
isMultichannelConfigSupported(int channelConfig)543     private static boolean isMultichannelConfigSupported(int channelConfig) {
544         // check for unsupported channels
545         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
546             loge("Channel configuration features unsupported channels");
547             return false;
548         }
549         final int channelCount = Integer.bitCount(channelConfig);
550         if (channelCount > CHANNEL_COUNT_MAX) {
551             loge("Channel configuration contains too many channels " +
552                     channelCount + ">" + CHANNEL_COUNT_MAX);
553             return false;
554         }
555         // check for unsupported multichannel combinations:
556         // - FL/FR must be present
557         // - L/R channels must be paired (e.g. no single L channel)
558         final int frontPair =
559                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
560         if ((channelConfig & frontPair) != frontPair) {
561                 loge("Front channels must be present in multichannel configurations");
562                 return false;
563         }
564         final int backPair =
565                 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
566         if ((channelConfig & backPair) != 0) {
567             if ((channelConfig & backPair) != backPair) {
568                 loge("Rear channels can't be used independently");
569                 return false;
570             }
571         }
572         final int sidePair =
573                 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
574         if ((channelConfig & sidePair) != 0
575                 && (channelConfig & sidePair) != sidePair) {
576             loge("Side channels can't be used independently");
577             return false;
578         }
579         return true;
580     }
581 
582 
583     // Convenience method for the constructor's audio buffer size check.
584     // preconditions:
585     //    mChannelCount is valid
586     //    mAudioFormat is valid
587     // postcondition:
588     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)589     private void audioBuffSizeCheck(int audioBufferSize) {
590         // NB: this section is only valid with PCM data.
591         //     To update when supporting compressed formats
592         int frameSizeInBytes;
593         if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) {
594             frameSizeInBytes = mChannelCount
595                     * (AudioFormat.getBytesPerSample(mAudioFormat));
596         } else {
597             frameSizeInBytes = 1;
598         }
599         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
600             throw new IllegalArgumentException("Invalid audio buffer size.");
601         }
602 
603         mNativeBufferSizeInBytes = audioBufferSize;
604         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
605     }
606 
607 
608     /**
609      * Releases the native AudioTrack resources.
610      */
release()611     public void release() {
612         // even though native_release() stops the native AudioTrack, we need to stop
613         // AudioTrack subclasses too.
614         try {
615             stop();
616         } catch(IllegalStateException ise) {
617             // don't raise an exception, we're releasing the resources.
618         }
619         native_release();
620         mState = STATE_UNINITIALIZED;
621     }
622 
623     @Override
finalize()624     protected void finalize() {
625         native_finalize();
626     }
627 
628     //--------------------------------------------------------------------------
629     // Getters
630     //--------------------
631     /**
632      * Returns the minimum gain value, which is the constant 0.0.
633      * Gain values less than 0.0 will be clamped to 0.0.
634      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
635      * @return the minimum value, which is the constant 0.0.
636      */
getMinVolume()637     static public float getMinVolume() {
638         return GAIN_MIN;
639     }
640 
641     /**
642      * Returns the maximum gain value, which is greater than or equal to 1.0.
643      * Gain values greater than the maximum will be clamped to the maximum.
644      * <p>The word "volume" in the API name is historical; this is actually a gain.
645      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
646      * corresponds to a gain of 0 dB (sample values left unmodified).
647      * @return the maximum value, which is greater than or equal to 1.0.
648      */
getMaxVolume()649     static public float getMaxVolume() {
650         return GAIN_MAX;
651     }
652 
653     /**
654      * Returns the configured audio data sample rate in Hz
655      */
getSampleRate()656     public int getSampleRate() {
657         return mSampleRate;
658     }
659 
660     /**
661      * Returns the current playback rate in Hz.
662      */
getPlaybackRate()663     public int getPlaybackRate() {
664         return native_get_playback_rate();
665     }
666 
667     /**
668      * Returns the configured audio data format. See {@link AudioFormat#ENCODING_PCM_16BIT}
669      * and {@link AudioFormat#ENCODING_PCM_8BIT}.
670      */
getAudioFormat()671     public int getAudioFormat() {
672         return mAudioFormat;
673     }
674 
675     /**
676      * Returns the type of audio stream this AudioTrack is configured for.
677      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
678      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
679      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
680      * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
681      */
getStreamType()682     public int getStreamType() {
683         return mStreamType;
684     }
685 
686     /**
687      * Returns the configured channel configuration.
688      * See {@link AudioFormat#CHANNEL_OUT_MONO}
689      * and {@link AudioFormat#CHANNEL_OUT_STEREO}.
690      */
getChannelConfiguration()691     public int getChannelConfiguration() {
692         return mChannelConfiguration;
693     }
694 
695     /**
696      * Returns the configured number of channels.
697      */
getChannelCount()698     public int getChannelCount() {
699         return mChannelCount;
700     }
701 
702     /**
703      * Returns the state of the AudioTrack instance. This is useful after the
704      * AudioTrack instance has been created to check if it was initialized
705      * properly. This ensures that the appropriate resources have been acquired.
706      * @see #STATE_INITIALIZED
707      * @see #STATE_NO_STATIC_DATA
708      * @see #STATE_UNINITIALIZED
709      */
getState()710     public int getState() {
711         return mState;
712     }
713 
714     /**
715      * Returns the playback state of the AudioTrack instance.
716      * @see #PLAYSTATE_STOPPED
717      * @see #PLAYSTATE_PAUSED
718      * @see #PLAYSTATE_PLAYING
719      */
getPlayState()720     public int getPlayState() {
721         synchronized (mPlayStateLock) {
722             return mPlayState;
723         }
724     }
725 
726     /**
727      *  Returns the "native frame count", derived from the bufferSizeInBytes specified at
728      *  creation time and converted to frame units.
729      *  If track's creation mode is {@link #MODE_STATIC},
730      *  it is equal to the specified bufferSizeInBytes converted to frame units.
731      *  If track's creation mode is {@link #MODE_STREAM},
732      *  it is typically greater than or equal to the specified bufferSizeInBytes converted to frame
733      *  units; it may be rounded up to a larger value if needed by the target device implementation.
734      *  @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
735      *  See {@link AudioManager#getProperty(String)} for key
736      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
737      */
738     @Deprecated
getNativeFrameCount()739     protected int getNativeFrameCount() {
740         return native_get_native_frame_count();
741     }
742 
743     /**
744      * Returns marker position expressed in frames.
745      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
746      * or zero if marker is disabled.
747      */
getNotificationMarkerPosition()748     public int getNotificationMarkerPosition() {
749         return native_get_marker_pos();
750     }
751 
752     /**
753      * Returns the notification update period expressed in frames.
754      * Zero means that no position update notifications are being delivered.
755      */
getPositionNotificationPeriod()756     public int getPositionNotificationPeriod() {
757         return native_get_pos_update_period();
758     }
759 
760     /**
761      * Returns the playback head position expressed in frames.
762      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
763      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
764      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
765      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
766      * It is reset to zero by flush(), reload(), and stop().
767      */
getPlaybackHeadPosition()768     public int getPlaybackHeadPosition() {
769         return native_get_position();
770     }
771 
772     /**
773      * Returns this track's estimated latency in milliseconds. This includes the latency due
774      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
775      *
776      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
777      * a better solution.
778      * @hide
779      */
getLatency()780     public int getLatency() {
781         return native_get_latency();
782     }
783 
784     /**
785      *  Returns the output sample rate in Hz for the specified stream type.
786      */
getNativeOutputSampleRate(int streamType)787     static public int getNativeOutputSampleRate(int streamType) {
788         return native_get_output_sample_rate(streamType);
789     }
790 
791     /**
792      * Returns the minimum buffer size required for the successful creation of an AudioTrack
793      * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
794      * guarantee a smooth playback under load, and higher values should be chosen according to
795      * the expected frequency at which the buffer will be refilled with additional data to play.
796      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
797      * to a higher value than the initial source sample rate, be sure to configure the buffer size
798      * based on the highest planned sample rate.
799      * @param sampleRateInHz the source sample rate expressed in Hz.
800      * @param channelConfig describes the configuration of the audio channels.
801      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
802      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
803      * @param audioFormat the format in which the audio data is represented.
804      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
805      *   {@link AudioFormat#ENCODING_PCM_8BIT},
806      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
807      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
808      *   or {@link #ERROR} if unable to query for output properties,
809      *   or the minimum buffer size expressed in bytes.
810      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)811     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
812         int channelCount = 0;
813         switch(channelConfig) {
814         case AudioFormat.CHANNEL_OUT_MONO:
815         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
816             channelCount = 1;
817             break;
818         case AudioFormat.CHANNEL_OUT_STEREO:
819         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
820             channelCount = 2;
821             break;
822         default:
823             if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
824                 // input channel configuration features unsupported channels
825                 loge("getMinBufferSize(): Invalid channel configuration.");
826                 return ERROR_BAD_VALUE;
827             } else {
828                 channelCount = Integer.bitCount(channelConfig);
829             }
830         }
831 
832         if (!AudioFormat.isValidEncoding(audioFormat)) {
833             loge("getMinBufferSize(): Invalid audio format.");
834             return ERROR_BAD_VALUE;
835         }
836 
837         // sample rate, note these values are subject to change
838         if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
839             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
840             return ERROR_BAD_VALUE;
841         }
842 
843         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
844         if (size <= 0) {
845             loge("getMinBufferSize(): error querying hardware");
846             return ERROR;
847         }
848         else {
849             return size;
850         }
851     }
852 
853     /**
854      * Returns the audio session ID.
855      *
856      * @return the ID of the audio session this AudioTrack belongs to.
857      */
getAudioSessionId()858     public int getAudioSessionId() {
859         return mSessionId;
860     }
861 
862    /**
863     * Poll for a timestamp on demand.
864     * <p>
865     * If you need to track timestamps during initial warmup or after a routing or mode change,
866     * you should request a new timestamp once per second until the reported timestamps
867     * show that the audio clock is stable.
868     * Thereafter, query for a new timestamp approximately once every 10 seconds to once per minute.
869     * Calling this method more often is inefficient.
870     * It is also counter-productive to call this method more often than recommended,
871     * because the short-term differences between successive timestamp reports are not meaningful.
872     * If you need a high-resolution mapping between frame position and presentation time,
873     * consider implementing that at application level, based on low-resolution timestamps.
874     * <p>
875     * The audio data at the returned position may either already have been
876     * presented, or may have not yet been presented but is committed to be presented.
877     * It is not possible to request the time corresponding to a particular position,
878     * or to request the (fractional) position corresponding to a particular time.
879     * If you need such features, consider implementing them at application level.
880     *
881     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
882     *        and owned by caller.
883     * @return true if a timestamp is available, or false if no timestamp is available.
884     *         If a timestamp if available,
885     *         the AudioTimestamp instance is filled in with a position in frame units, together
886     *         with the estimated time when that frame was presented or is committed to
887     *         be presented.
888     *         In the case that no timestamp is available, any supplied instance is left unaltered.
889     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
890     *         or during and immediately after a route change.
891     */
892     // Add this text when the "on new timestamp" API is added:
893     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)894     public boolean getTimestamp(AudioTimestamp timestamp)
895     {
896         if (timestamp == null) {
897             throw new IllegalArgumentException();
898         }
899         // It's unfortunate, but we have to either create garbage every time or use synchronized
900         long[] longArray = new long[2];
901         int ret = native_get_timestamp(longArray);
902         if (ret != SUCCESS) {
903             return false;
904         }
905         timestamp.framePosition = longArray[0];
906         timestamp.nanoTime = longArray[1];
907         return true;
908     }
909 
910 
911     //--------------------------------------------------------------------------
912     // Initialization / configuration
913     //--------------------
914     /**
915      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
916      * for each periodic playback head position update.
917      * Notifications will be received in the same thread as the one in which the AudioTrack
918      * instance was created.
919      * @param listener
920      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)921     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
922         setPlaybackPositionUpdateListener(listener, null);
923     }
924 
925     /**
926      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
927      * for each periodic playback head position update.
928      * Use this method to receive AudioTrack events in the Handler associated with another
929      * thread than the one in which you created the AudioTrack instance.
930      * @param listener
931      * @param handler the Handler that will receive the event notification messages.
932      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)933     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
934                                                     Handler handler) {
935         if (listener != null) {
936             mEventHandlerDelegate = new NativeEventHandlerDelegate(this, listener, handler);
937         } else {
938             mEventHandlerDelegate = null;
939         }
940     }
941 
942 
clampGainOrLevel(float gainOrLevel)943     private static float clampGainOrLevel(float gainOrLevel) {
944         if (Float.isNaN(gainOrLevel)) {
945             throw new IllegalArgumentException();
946         }
947         if (gainOrLevel < GAIN_MIN) {
948             gainOrLevel = GAIN_MIN;
949         } else if (gainOrLevel > GAIN_MAX) {
950             gainOrLevel = GAIN_MAX;
951         }
952         return gainOrLevel;
953     }
954 
955 
956      /**
957      * Sets the specified left and right output gain values on the AudioTrack.
958      * <p>Gain values are clamped to the closed interval [0.0, max] where
959      * max is the value of {@link #getMaxVolume}.
960      * A value of 0.0 results in zero gain (silence), and
961      * a value of 1.0 means unity gain (signal unchanged).
962      * The default value is 1.0 meaning unity gain.
963      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
964      * @param leftGain output gain for the left channel.
965      * @param rightGain output gain for the right channel
966      * @return error code or success, see {@link #SUCCESS},
967      *    {@link #ERROR_INVALID_OPERATION}
968      * @deprecated Applications should use {@link #setVolume} instead, as it
969      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
970      */
setStereoVolume(float leftGain, float rightGain)971     public int setStereoVolume(float leftGain, float rightGain) {
972         if (isRestricted()) {
973             return SUCCESS;
974         }
975         if (mState == STATE_UNINITIALIZED) {
976             return ERROR_INVALID_OPERATION;
977         }
978 
979         leftGain = clampGainOrLevel(leftGain);
980         rightGain = clampGainOrLevel(rightGain);
981 
982         native_setVolume(leftGain, rightGain);
983 
984         return SUCCESS;
985     }
986 
987 
988     /**
989      * Sets the specified output gain value on all channels of this track.
990      * <p>Gain values are clamped to the closed interval [0.0, max] where
991      * max is the value of {@link #getMaxVolume}.
992      * A value of 0.0 results in zero gain (silence), and
993      * a value of 1.0 means unity gain (signal unchanged).
994      * The default value is 1.0 meaning unity gain.
995      * <p>This API is preferred over {@link #setStereoVolume}, as it
996      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
997      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
998      * @param gain output gain for all channels.
999      * @return error code or success, see {@link #SUCCESS},
1000      *    {@link #ERROR_INVALID_OPERATION}
1001      */
setVolume(float gain)1002     public int setVolume(float gain) {
1003         return setStereoVolume(gain, gain);
1004     }
1005 
1006 
1007     /**
1008      * Sets the playback sample rate for this track. This sets the sampling rate at which
1009      * the audio data will be consumed and played back
1010      * (as set by the sampleRateInHz parameter in the
1011      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
1012      * not the original sampling rate of the
1013      * content. For example, setting it to half the sample rate of the content will cause the
1014      * playback to last twice as long, but will also result in a pitch shift down by one octave.
1015      * The valid sample rate range is from 1 Hz to twice the value returned by
1016      * {@link #getNativeOutputSampleRate(int)}.
1017      * @param sampleRateInHz the sample rate expressed in Hz
1018      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1019      *    {@link #ERROR_INVALID_OPERATION}
1020      */
setPlaybackRate(int sampleRateInHz)1021     public int setPlaybackRate(int sampleRateInHz) {
1022         if (mState != STATE_INITIALIZED) {
1023             return ERROR_INVALID_OPERATION;
1024         }
1025         if (sampleRateInHz <= 0) {
1026             return ERROR_BAD_VALUE;
1027         }
1028         return native_set_playback_rate(sampleRateInHz);
1029     }
1030 
1031 
1032     /**
1033      * Sets the position of the notification marker.  At most one marker can be active.
1034      * @param markerInFrames marker position in wrapping frame units similar to
1035      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
1036      * To set a marker at a position which would appear as zero due to wraparound,
1037      * a workaround is to use a non-zero position near zero, such as -1 or 1.
1038      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1039      *  {@link #ERROR_INVALID_OPERATION}
1040      */
setNotificationMarkerPosition(int markerInFrames)1041     public int setNotificationMarkerPosition(int markerInFrames) {
1042         if (mState == STATE_UNINITIALIZED) {
1043             return ERROR_INVALID_OPERATION;
1044         }
1045         return native_set_marker_pos(markerInFrames);
1046     }
1047 
1048 
1049     /**
1050      * Sets the period for the periodic notification event.
1051      * @param periodInFrames update period expressed in frames
1052      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
1053      */
setPositionNotificationPeriod(int periodInFrames)1054     public int setPositionNotificationPeriod(int periodInFrames) {
1055         if (mState == STATE_UNINITIALIZED) {
1056             return ERROR_INVALID_OPERATION;
1057         }
1058         return native_set_pos_update_period(periodInFrames);
1059     }
1060 
1061 
1062     /**
1063      * Sets the playback head position.
1064      * The track must be stopped or paused for the position to be changed,
1065      * and must use the {@link #MODE_STATIC} mode.
1066      * @param positionInFrames playback head position expressed in frames
1067      * Zero corresponds to start of buffer.
1068      * The position must not be greater than the buffer size in frames, or negative.
1069      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1070      *    {@link #ERROR_INVALID_OPERATION}
1071      */
setPlaybackHeadPosition(int positionInFrames)1072     public int setPlaybackHeadPosition(int positionInFrames) {
1073         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1074                 getPlayState() == PLAYSTATE_PLAYING) {
1075             return ERROR_INVALID_OPERATION;
1076         }
1077         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1078             return ERROR_BAD_VALUE;
1079         }
1080         return native_set_position(positionInFrames);
1081     }
1082 
1083     /**
1084      * Sets the loop points and the loop count. The loop can be infinite.
1085      * Similarly to setPlaybackHeadPosition,
1086      * the track must be stopped or paused for the loop points to be changed,
1087      * and must use the {@link #MODE_STATIC} mode.
1088      * @param startInFrames loop start marker expressed in frames
1089      * Zero corresponds to start of buffer.
1090      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1091      * @param endInFrames loop end marker expressed in frames
1092      * The total buffer size in frames corresponds to end of buffer.
1093      * The end marker must not be greater than the buffer size in frames.
1094      * For looping, the end marker must not be less than or equal to the start marker,
1095      * but to disable looping
1096      * it is permitted for start marker, end marker, and loop count to all be 0.
1097      * @param loopCount the number of times the loop is looped.
1098      *    A value of -1 means infinite looping, and 0 disables looping.
1099      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1100      *    {@link #ERROR_INVALID_OPERATION}
1101      */
setLoopPoints(int startInFrames, int endInFrames, int loopCount)1102     public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1103         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1104                 getPlayState() == PLAYSTATE_PLAYING) {
1105             return ERROR_INVALID_OPERATION;
1106         }
1107         if (loopCount == 0) {
1108             ;   // explicitly allowed as an exception to the loop region range check
1109         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1110                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1111             return ERROR_BAD_VALUE;
1112         }
1113         return native_set_loop(startInFrames, endInFrames, loopCount);
1114     }
1115 
1116     /**
1117      * Sets the initialization state of the instance. This method was originally intended to be used
1118      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1119      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1120      * @param state the state of the AudioTrack instance
1121      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1122      */
1123     @Deprecated
setState(int state)1124     protected void setState(int state) {
1125         mState = state;
1126     }
1127 
1128 
1129     //---------------------------------------------------------
1130     // Transport control methods
1131     //--------------------
1132     /**
1133      * Starts playing an AudioTrack.
1134      * If track's creation mode is {@link #MODE_STATIC}, you must have called write() prior.
1135      *
1136      * @throws IllegalStateException
1137      */
play()1138     public void play()
1139     throws IllegalStateException {
1140         if (mState != STATE_INITIALIZED) {
1141             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1142         }
1143         if (isRestricted()) {
1144             setVolume(0);
1145         }
1146         synchronized(mPlayStateLock) {
1147             native_start();
1148             mPlayState = PLAYSTATE_PLAYING;
1149         }
1150     }
1151 
isRestricted()1152     private boolean isRestricted() {
1153         try {
1154             final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType);
1155             final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage,
1156                     Process.myUid(), ActivityThread.currentPackageName());
1157             return mode != AppOpsManager.MODE_ALLOWED;
1158         } catch (RemoteException e) {
1159             return false;
1160         }
1161     }
1162 
1163     /**
1164      * Stops playing the audio data.
1165      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
1166      * after the last buffer that was written has been played. For an immediate stop, use
1167      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
1168      * back yet.
1169      * @throws IllegalStateException
1170      */
stop()1171     public void stop()
1172     throws IllegalStateException {
1173         if (mState != STATE_INITIALIZED) {
1174             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
1175         }
1176 
1177         // stop playing
1178         synchronized(mPlayStateLock) {
1179             native_stop();
1180             mPlayState = PLAYSTATE_STOPPED;
1181         }
1182     }
1183 
1184     /**
1185      * Pauses the playback of the audio data. Data that has not been played
1186      * back will not be discarded. Subsequent calls to {@link #play} will play
1187      * this data back. See {@link #flush()} to discard this data.
1188      *
1189      * @throws IllegalStateException
1190      */
pause()1191     public void pause()
1192     throws IllegalStateException {
1193         if (mState != STATE_INITIALIZED) {
1194             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
1195         }
1196         //logd("pause()");
1197 
1198         // pause playback
1199         synchronized(mPlayStateLock) {
1200             native_pause();
1201             mPlayState = PLAYSTATE_PAUSED;
1202         }
1203     }
1204 
1205 
1206     //---------------------------------------------------------
1207     // Audio data supply
1208     //--------------------
1209 
1210     /**
1211      * Flushes the audio data currently queued for playback. Any data that has
1212      * not been played back will be discarded.  No-op if not stopped or paused,
1213      * or if the track's creation mode is not {@link #MODE_STREAM}.
1214      */
flush()1215     public void flush() {
1216         if (mState == STATE_INITIALIZED) {
1217             // flush the data in native layer
1218             native_flush();
1219         }
1220 
1221     }
1222 
1223     /**
1224      * Writes the audio data to the audio sink for playback (streaming mode),
1225      * or copies audio data for later playback (static buffer mode).
1226      * In streaming mode, will block until all data has been written to the audio sink.
1227      * In static buffer mode, copies the data to the buffer starting at offset 0.
1228      * Note that the actual playback of this data might occur after this function
1229      * returns. This function is thread safe with respect to {@link #stop} calls,
1230      * in which case all of the specified data might not be written to the audio sink.
1231      *
1232      * @param audioData the array that holds the data to play.
1233      * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
1234      *    starts.
1235      * @param sizeInBytes the number of bytes to read in audioData after the offset.
1236      * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
1237      *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1238      *    the parameters don't resolve to valid data and indexes, or
1239      *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1240      *    needs to be recreated.
1241      */
1242 
write(byte[] audioData, int offsetInBytes, int sizeInBytes)1243     public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
1244 
1245         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1246             return ERROR_INVALID_OPERATION;
1247         }
1248 
1249         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
1250                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
1251                 || (offsetInBytes + sizeInBytes > audioData.length)) {
1252             return ERROR_BAD_VALUE;
1253         }
1254 
1255         int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
1256                 true /*isBlocking*/);
1257 
1258         if ((mDataLoadMode == MODE_STATIC)
1259                 && (mState == STATE_NO_STATIC_DATA)
1260                 && (ret > 0)) {
1261             // benign race with respect to other APIs that read mState
1262             mState = STATE_INITIALIZED;
1263         }
1264 
1265         return ret;
1266     }
1267 
1268 
1269     /**
1270      * Writes the audio data to the audio sink for playback (streaming mode),
1271      * or copies audio data for later playback (static buffer mode).
1272      * In streaming mode, will block until all data has been written to the audio sink.
1273      * In static buffer mode, copies the data to the buffer starting at offset 0.
1274      * Note that the actual playback of this data might occur after this function
1275      * returns. This function is thread safe with respect to {@link #stop} calls,
1276      * in which case all of the specified data might not be written to the audio sink.
1277      *
1278      * @param audioData the array that holds the data to play.
1279      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
1280      *     starts.
1281      * @param sizeInShorts the number of shorts to read in audioData after the offset.
1282      * @return the number of shorts that were written or {@link #ERROR_INVALID_OPERATION}
1283      *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1284      *    the parameters don't resolve to valid data and indexes.
1285      */
1286 
write(short[] audioData, int offsetInShorts, int sizeInShorts)1287     public int write(short[] audioData, int offsetInShorts, int sizeInShorts) {
1288 
1289         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
1290             return ERROR_INVALID_OPERATION;
1291         }
1292 
1293         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
1294                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
1295                 || (offsetInShorts + sizeInShorts > audioData.length)) {
1296             return ERROR_BAD_VALUE;
1297         }
1298 
1299         int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat);
1300 
1301         if ((mDataLoadMode == MODE_STATIC)
1302                 && (mState == STATE_NO_STATIC_DATA)
1303                 && (ret > 0)) {
1304             // benign race with respect to other APIs that read mState
1305             mState = STATE_INITIALIZED;
1306         }
1307 
1308         return ret;
1309     }
1310 
1311 
1312     /**
1313      * Writes the audio data to the audio sink for playback (streaming mode),
1314      * or copies audio data for later playback (static buffer mode).
1315      * In static buffer mode, copies the data to the buffer starting at offset 0,
1316      * and the write mode is ignored.
1317      * In streaming mode, the blocking behavior will depend on the write mode.
1318      * <p>
1319      * Note that the actual playback of this data might occur after this function
1320      * returns. This function is thread safe with respect to {@link #stop} calls,
1321      * in which case all of the specified data might not be written to the audio sink.
1322      * <p>
1323      * @param audioData the array that holds the data to play.
1324      *     The implementation does not clip for sample values within the nominal range
1325      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
1326      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
1327      *     that could add energy, such as reverb.  For the convenience of applications
1328      *     that compute samples using filters with non-unity gain,
1329      *     sample values +3 dB beyond the nominal range are permitted.
1330      *     However such values may eventually be limited or clipped, depending on various gains
1331      *     and later processing in the audio path.  Therefore applications are encouraged
1332      *     to provide samples values within the nominal range.
1333      * @param offsetInFloats the offset, expressed as a number of floats,
1334      *     in audioData where the data to play starts.
1335      * @param sizeInFloats the number of floats to read in audioData after the offset.
1336      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1337      *     effect in static mode.
1338      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1339      *         to the audio sink.
1340      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1341      *     queuing as much audio data for playback as possible without blocking.
1342      * @return the number of floats that were written, or {@link #ERROR_INVALID_OPERATION}
1343      *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
1344      *    the parameters don't resolve to valid data and indexes.
1345      */
write(float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)1346     public int write(float[] audioData, int offsetInFloats, int sizeInFloats,
1347             @WriteMode int writeMode) {
1348 
1349         if (mState == STATE_UNINITIALIZED) {
1350             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1351             return ERROR_INVALID_OPERATION;
1352         }
1353 
1354         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
1355             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
1356             return ERROR_INVALID_OPERATION;
1357         }
1358 
1359         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1360             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1361             return ERROR_BAD_VALUE;
1362         }
1363 
1364         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
1365                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
1366                 || (offsetInFloats + sizeInFloats > audioData.length)) {
1367             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
1368             return ERROR_BAD_VALUE;
1369         }
1370 
1371         int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
1372                 writeMode == WRITE_BLOCKING);
1373 
1374         if ((mDataLoadMode == MODE_STATIC)
1375                 && (mState == STATE_NO_STATIC_DATA)
1376                 && (ret > 0)) {
1377             // benign race with respect to other APIs that read mState
1378             mState = STATE_INITIALIZED;
1379         }
1380 
1381         return ret;
1382     }
1383 
1384 
1385     /**
1386      * Writes the audio data to the audio sink for playback (streaming mode),
1387      * or copies audio data for later playback (static buffer mode).
1388      * In static buffer mode, copies the data to the buffer starting at its 0 offset, and the write
1389      * mode is ignored.
1390      * In streaming mode, the blocking behavior will depend on the write mode.
1391      * @param audioData the buffer that holds the data to play, starting at the position reported
1392      *     by <code>audioData.position()</code>.
1393      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
1394      *     have been advanced to reflect the amount of data that was successfully written to
1395      *     the AudioTrack.
1396      * @param sizeInBytes number of bytes to write.
1397      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
1398      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
1399      *     effect in static mode.
1400      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
1401      *         to the audio sink.
1402      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
1403      *     queuing as much audio data for playback as possible without blocking.
1404      * @return 0 or a positive number of bytes that were written, or
1405      *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1406      */
write(ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)1407     public int write(ByteBuffer audioData, int sizeInBytes,
1408             @WriteMode int writeMode) {
1409 
1410         if (mState == STATE_UNINITIALIZED) {
1411             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
1412             return ERROR_INVALID_OPERATION;
1413         }
1414 
1415         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
1416             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
1417             return ERROR_BAD_VALUE;
1418         }
1419 
1420         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
1421             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
1422             return ERROR_BAD_VALUE;
1423         }
1424 
1425         int ret = 0;
1426         if (audioData.isDirect()) {
1427             ret = native_write_native_bytes(audioData,
1428                     audioData.position(), sizeInBytes, mAudioFormat,
1429                     writeMode == WRITE_BLOCKING);
1430         } else {
1431             ret = native_write_byte(NioUtils.unsafeArray(audioData),
1432                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
1433                     sizeInBytes, mAudioFormat,
1434                     writeMode == WRITE_BLOCKING);
1435         }
1436 
1437         if ((mDataLoadMode == MODE_STATIC)
1438                 && (mState == STATE_NO_STATIC_DATA)
1439                 && (ret > 0)) {
1440             // benign race with respect to other APIs that read mState
1441             mState = STATE_INITIALIZED;
1442         }
1443 
1444         if (ret > 0) {
1445             audioData.position(audioData.position() + ret);
1446         }
1447 
1448         return ret;
1449     }
1450 
1451     /**
1452      * Notifies the native resource to reuse the audio data already loaded in the native
1453      * layer, that is to rewind to start of buffer.
1454      * The track's creation mode must be {@link #MODE_STATIC}.
1455      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1456      *  {@link #ERROR_INVALID_OPERATION}
1457      */
reloadStaticData()1458     public int reloadStaticData() {
1459         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
1460             return ERROR_INVALID_OPERATION;
1461         }
1462         return native_reload_static();
1463     }
1464 
1465     //--------------------------------------------------------------------------
1466     // Audio effects management
1467     //--------------------
1468 
1469     /**
1470      * Attaches an auxiliary effect to the audio track. A typical auxiliary
1471      * effect is a reverberation effect which can be applied on any sound source
1472      * that directs a certain amount of its energy to this effect. This amount
1473      * is defined by setAuxEffectSendLevel().
1474      * {@see #setAuxEffectSendLevel(float)}.
1475      * <p>After creating an auxiliary effect (e.g.
1476      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
1477      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
1478      * this method to attach the audio track to the effect.
1479      * <p>To detach the effect from the audio track, call this method with a
1480      * null effect id.
1481      *
1482      * @param effectId system wide unique id of the effect to attach
1483      * @return error code or success, see {@link #SUCCESS},
1484      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
1485      */
attachAuxEffect(int effectId)1486     public int attachAuxEffect(int effectId) {
1487         if (mState == STATE_UNINITIALIZED) {
1488             return ERROR_INVALID_OPERATION;
1489         }
1490         return native_attachAuxEffect(effectId);
1491     }
1492 
1493     /**
1494      * Sets the send level of the audio track to the attached auxiliary effect
1495      * {@link #attachAuxEffect(int)}.  Effect levels
1496      * are clamped to the closed interval [0.0, max] where
1497      * max is the value of {@link #getMaxVolume}.
1498      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
1499      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
1500      * this method must be called for the effect to be applied.
1501      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
1502      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
1503      * so an appropriate conversion from linear UI input x to level is:
1504      * x == 0 -&gt; level = 0
1505      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
1506      *
1507      * @param level linear send level
1508      * @return error code or success, see {@link #SUCCESS},
1509      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
1510      */
setAuxEffectSendLevel(float level)1511     public int setAuxEffectSendLevel(float level) {
1512         if (isRestricted()) {
1513             return SUCCESS;
1514         }
1515         if (mState == STATE_UNINITIALIZED) {
1516             return ERROR_INVALID_OPERATION;
1517         }
1518         level = clampGainOrLevel(level);
1519         int err = native_setAuxEffectSendLevel(level);
1520         return err == 0 ? SUCCESS : ERROR;
1521     }
1522 
1523     //---------------------------------------------------------
1524     // Interface definitions
1525     //--------------------
1526     /**
1527      * Interface definition for a callback to be invoked when the playback head position of
1528      * an AudioTrack has reached a notification marker or has increased by a certain period.
1529      */
1530     public interface OnPlaybackPositionUpdateListener  {
1531         /**
1532          * Called on the listener to notify it that the previously set marker has been reached
1533          * by the playback head.
1534          */
onMarkerReached(AudioTrack track)1535         void onMarkerReached(AudioTrack track);
1536 
1537         /**
1538          * Called on the listener to periodically notify it that the playback head has reached
1539          * a multiple of the notification period.
1540          */
onPeriodicNotification(AudioTrack track)1541         void onPeriodicNotification(AudioTrack track);
1542     }
1543 
1544     //---------------------------------------------------------
1545     // Inner classes
1546     //--------------------
1547     /**
1548      * Helper class to handle the forwarding of native events to the appropriate listener
1549      * (potentially) handled in a different thread
1550      */
1551     private class NativeEventHandlerDelegate {
1552         private final Handler mHandler;
1553 
NativeEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)1554         NativeEventHandlerDelegate(final AudioTrack track,
1555                                    final OnPlaybackPositionUpdateListener listener,
1556                                    Handler handler) {
1557             // find the looper for our new event handler
1558             Looper looper;
1559             if (handler != null) {
1560                 looper = handler.getLooper();
1561             } else {
1562                 // no given handler, use the looper the AudioTrack was created in
1563                 looper = mInitializationLooper;
1564             }
1565 
1566             // construct the event handler with this looper
1567             if (looper != null) {
1568                 // implement the event handler delegate
1569                 mHandler = new Handler(looper) {
1570                     @Override
1571                     public void handleMessage(Message msg) {
1572                         if (track == null) {
1573                             return;
1574                         }
1575                         switch(msg.what) {
1576                         case NATIVE_EVENT_MARKER:
1577                             if (listener != null) {
1578                                 listener.onMarkerReached(track);
1579                             }
1580                             break;
1581                         case NATIVE_EVENT_NEW_POS:
1582                             if (listener != null) {
1583                                 listener.onPeriodicNotification(track);
1584                             }
1585                             break;
1586                         default:
1587                             loge("Unknown native event type: " + msg.what);
1588                             break;
1589                         }
1590                     }
1591                 };
1592             } else {
1593                 mHandler = null;
1594             }
1595         }
1596 
getHandler()1597         Handler getHandler() {
1598             return mHandler;
1599         }
1600     }
1601 
1602 
1603     //---------------------------------------------------------
1604     // Java methods called from the native side
1605     //--------------------
1606     @SuppressWarnings("unused")
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)1607     private static void postEventFromNative(Object audiotrack_ref,
1608             int what, int arg1, int arg2, Object obj) {
1609         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
1610         AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
1611         if (track == null) {
1612             return;
1613         }
1614 
1615         NativeEventHandlerDelegate delegate = track.mEventHandlerDelegate;
1616         if (delegate != null) {
1617             Handler handler = delegate.getHandler();
1618             if (handler != null) {
1619                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
1620                 handler.sendMessage(m);
1621             }
1622         }
1623 
1624     }
1625 
1626 
1627     //---------------------------------------------------------
1628     // Native methods called from the Java side
1629     //--------------------
1630 
1631     // post-condition: mStreamType is overwritten with a value
1632     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
1633     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int sampleRate, int channelMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId)1634     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
1635             Object /*AudioAttributes*/ attributes,
1636             int sampleRate, int channelMask, int audioFormat,
1637             int buffSizeInBytes, int mode, int[] sessionId);
1638 
native_finalize()1639     private native final void native_finalize();
1640 
native_release()1641     private native final void native_release();
1642 
native_start()1643     private native final void native_start();
1644 
native_stop()1645     private native final void native_stop();
1646 
native_pause()1647     private native final void native_pause();
1648 
native_flush()1649     private native final void native_flush();
1650 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)1651     private native final int native_write_byte(byte[] audioData,
1652                                                int offsetInBytes, int sizeInBytes, int format,
1653                                                boolean isBlocking);
1654 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format)1655     private native final int native_write_short(short[] audioData,
1656                                                 int offsetInShorts, int sizeInShorts, int format);
1657 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)1658     private native final int native_write_float(float[] audioData,
1659                                                 int offsetInFloats, int sizeInFloats, int format,
1660                                                 boolean isBlocking);
1661 
native_write_native_bytes(Object audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)1662     private native final int native_write_native_bytes(Object audioData,
1663             int positionInBytes, int sizeInBytes, int format, boolean blocking);
1664 
native_reload_static()1665     private native final int native_reload_static();
1666 
native_get_native_frame_count()1667     private native final int native_get_native_frame_count();
1668 
native_setVolume(float leftVolume, float rightVolume)1669     private native final void native_setVolume(float leftVolume, float rightVolume);
1670 
native_set_playback_rate(int sampleRateInHz)1671     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()1672     private native final int native_get_playback_rate();
1673 
native_set_marker_pos(int marker)1674     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()1675     private native final int native_get_marker_pos();
1676 
native_set_pos_update_period(int updatePeriod)1677     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()1678     private native final int native_get_pos_update_period();
1679 
native_set_position(int position)1680     private native final int native_set_position(int position);
native_get_position()1681     private native final int native_get_position();
1682 
native_get_latency()1683     private native final int native_get_latency();
1684 
1685     // longArray must be a non-null array of length >= 2
1686     // [0] is assigned the frame position
1687     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)1688     private native final int native_get_timestamp(long[] longArray);
1689 
native_set_loop(int start, int end, int loopCount)1690     private native final int native_set_loop(int start, int end, int loopCount);
1691 
native_get_output_sample_rate(int streamType)1692     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)1693     static private native final int native_get_min_buff_size(
1694             int sampleRateInHz, int channelConfig, int audioFormat);
1695 
native_attachAuxEffect(int effectId)1696     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)1697     private native final int native_setAuxEffectSendLevel(float level);
1698 
1699     //---------------------------------------------------------
1700     // Utility methods
1701     //------------------
1702 
logd(String msg)1703     private static void logd(String msg) {
1704         Log.d(TAG, msg);
1705     }
1706 
loge(String msg)1707     private static void loge(String msg) {
1708         Log.e(TAG, msg);
1709     }
1710 
1711 }
1712