1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.media; 18 19 import android.annotation.CallbackExecutor; 20 import android.annotation.FloatRange; 21 import android.annotation.IntDef; 22 import android.annotation.IntRange; 23 import android.annotation.NonNull; 24 import android.annotation.Nullable; 25 import android.annotation.TestApi; 26 import android.annotation.UnsupportedAppUsage; 27 import android.os.Binder; 28 import android.os.Handler; 29 import android.os.HandlerThread; 30 import android.os.Looper; 31 import android.os.Message; 32 import android.os.PersistableBundle; 33 import android.util.ArrayMap; 34 import android.util.Log; 35 36 import com.android.internal.annotations.GuardedBy; 37 38 import java.lang.annotation.Retention; 39 import java.lang.annotation.RetentionPolicy; 40 import java.lang.ref.WeakReference; 41 import java.nio.ByteBuffer; 42 import java.nio.ByteOrder; 43 import java.nio.NioUtils; 44 import java.util.LinkedList; 45 import java.util.concurrent.Executor; 46 47 /** 48 * The AudioTrack class manages and plays a single audio resource for Java applications. 49 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 50 * achieved by "pushing" the data to the AudioTrack object using one of the 51 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 52 * and {@link #write(float[], int, int, int)} methods. 53 * 54 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 55 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 56 * one of the {@code write()} methods. These are blocking and return when the data has been 57 * transferred from the Java layer to the native layer and queued for playback. The streaming 58 * mode is most useful when playing blocks of audio data that for instance are: 59 * 60 * <ul> 61 * <li>too big to fit in memory because of the duration of the sound to play,</li> 62 * <li>too big to fit in memory because of the characteristics of the audio data 63 * (high sampling rate, bits per sample ...)</li> 64 * <li>received or generated while previously queued audio is playing.</li> 65 * </ul> 66 * 67 * The static mode should be chosen when dealing with short sounds that fit in memory and 68 * that need to be played with the smallest latency possible. The static mode will 69 * therefore be preferred for UI and game sounds that are played often, and with the 70 * smallest overhead possible. 71 * 72 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 73 * The size of this buffer, specified during the construction, determines how long an AudioTrack 74 * can play before running out of data.<br> 75 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 76 * be played from it.<br> 77 * For the streaming mode, data will be written to the audio sink in chunks of 78 * sizes less than or equal to the total buffer size. 79 * 80 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 81 */ 82 public class AudioTrack extends PlayerBase 83 implements AudioRouting 84 , VolumeAutomation 85 { 86 //--------------------------------------------------------- 87 // Constants 88 //-------------------- 89 /** Minimum value for a linear gain or auxiliary effect level. 90 * This value must be exactly equal to 0.0f; do not change it. 91 */ 92 private static final float GAIN_MIN = 0.0f; 93 /** Maximum value for a linear gain or auxiliary effect level. 94 * This value must be greater than or equal to 1.0f. 95 */ 96 private static final float GAIN_MAX = 1.0f; 97 98 /** indicates AudioTrack state is stopped */ 99 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 100 /** indicates AudioTrack state is paused */ 101 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 102 /** indicates AudioTrack state is playing */ 103 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 104 /** 105 * @hide 106 * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to 107 * transition to PLAYSTATE_STOPPED. 108 * Only valid for offload mode. 109 */ 110 private static final int PLAYSTATE_STOPPING = 4; 111 /** 112 * @hide 113 * indicates AudioTrack state is paused from stopping state. Will transition to 114 * PLAYSTATE_STOPPING if play() is called. 115 * Only valid for offload mode. 116 */ 117 private static final int PLAYSTATE_PAUSED_STOPPING = 5; 118 119 // keep these values in sync with android_media_AudioTrack.cpp 120 /** 121 * Creation mode where audio data is transferred from Java to the native layer 122 * only once before the audio starts playing. 123 */ 124 public static final int MODE_STATIC = 0; 125 /** 126 * Creation mode where audio data is streamed from Java to the native layer 127 * as the audio is playing. 128 */ 129 public static final int MODE_STREAM = 1; 130 131 /** @hide */ 132 @IntDef({ 133 MODE_STATIC, 134 MODE_STREAM 135 }) 136 @Retention(RetentionPolicy.SOURCE) 137 public @interface TransferMode {} 138 139 /** 140 * State of an AudioTrack that was not successfully initialized upon creation. 141 */ 142 public static final int STATE_UNINITIALIZED = 0; 143 /** 144 * State of an AudioTrack that is ready to be used. 145 */ 146 public static final int STATE_INITIALIZED = 1; 147 /** 148 * State of a successfully initialized AudioTrack that uses static data, 149 * but that hasn't received that data yet. 150 */ 151 public static final int STATE_NO_STATIC_DATA = 2; 152 153 /** 154 * Denotes a successful operation. 155 */ 156 public static final int SUCCESS = AudioSystem.SUCCESS; 157 /** 158 * Denotes a generic operation failure. 159 */ 160 public static final int ERROR = AudioSystem.ERROR; 161 /** 162 * Denotes a failure due to the use of an invalid value. 163 */ 164 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 165 /** 166 * Denotes a failure due to the improper use of a method. 167 */ 168 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 169 /** 170 * An error code indicating that the object reporting it is no longer valid and needs to 171 * be recreated. 172 */ 173 public static final int ERROR_DEAD_OBJECT = AudioSystem.DEAD_OBJECT; 174 /** 175 * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state, 176 * or immediately after start/ACTIVE. 177 * @hide 178 */ 179 public static final int ERROR_WOULD_BLOCK = AudioSystem.WOULD_BLOCK; 180 181 // Error codes: 182 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 183 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 184 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 185 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 186 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 187 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 188 189 // Events: 190 // to keep in sync with frameworks/av/include/media/AudioTrack.h 191 /** 192 * Event id denotes when playback head has reached a previously set marker. 193 */ 194 private static final int NATIVE_EVENT_MARKER = 3; 195 /** 196 * Event id denotes when previously set update period has elapsed during playback. 197 */ 198 private static final int NATIVE_EVENT_NEW_POS = 4; 199 /** 200 * Callback for more data 201 */ 202 private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9; 203 /** 204 * IAudioTrack tear down for offloaded tracks 205 * TODO: when received, java AudioTrack must be released 206 */ 207 private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6; 208 /** 209 * Event id denotes when all the buffers queued in AF and HW are played 210 * back (after stop is called) for an offloaded track. 211 */ 212 private static final int NATIVE_EVENT_STREAM_END = 7; 213 214 private final static String TAG = "android.media.AudioTrack"; 215 216 217 /** @hide */ 218 @IntDef({ 219 WRITE_BLOCKING, 220 WRITE_NON_BLOCKING 221 }) 222 @Retention(RetentionPolicy.SOURCE) 223 public @interface WriteMode {} 224 225 /** 226 * The write mode indicating the write operation will block until all data has been written, 227 * to be used as the actual value of the writeMode parameter in 228 * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)}, 229 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 230 * {@link #write(ByteBuffer, int, int, long)}. 231 */ 232 public final static int WRITE_BLOCKING = 0; 233 234 /** 235 * The write mode indicating the write operation will return immediately after 236 * queuing as much audio data for playback as possible without blocking, 237 * to be used as the actual value of the writeMode parameter in 238 * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)}, 239 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 240 * {@link #write(ByteBuffer, int, int, long)}. 241 */ 242 public final static int WRITE_NON_BLOCKING = 1; 243 244 /** @hide */ 245 @IntDef({ 246 PERFORMANCE_MODE_NONE, 247 PERFORMANCE_MODE_LOW_LATENCY, 248 PERFORMANCE_MODE_POWER_SAVING 249 }) 250 @Retention(RetentionPolicy.SOURCE) 251 public @interface PerformanceMode {} 252 253 /** 254 * Default performance mode for an {@link AudioTrack}. 255 */ 256 public static final int PERFORMANCE_MODE_NONE = 0; 257 258 /** 259 * Low latency performance mode for an {@link AudioTrack}. 260 * If the device supports it, this mode 261 * enables a lower latency path through to the audio output sink. 262 * Effects may no longer work with such an {@code AudioTrack} and 263 * the sample rate must match that of the output sink. 264 * <p> 265 * Applications should be aware that low latency requires careful 266 * buffer management, with smaller chunks of audio data written by each 267 * {@code write()} call. 268 * <p> 269 * If this flag is used without specifying a {@code bufferSizeInBytes} then the 270 * {@code AudioTrack}'s actual buffer size may be too small. 271 * It is recommended that a fairly 272 * large buffer should be specified when the {@code AudioTrack} is created. 273 * Then the actual size can be reduced by calling 274 * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized 275 * by lowering it after each {@code write()} call until the audio glitches, 276 * which is detected by calling 277 * {@link #getUnderrunCount()}. Then the buffer size can be increased 278 * until there are no glitches. 279 * This tuning step should be done while playing silence. 280 * This technique provides a compromise between latency and glitch rate. 281 */ 282 public static final int PERFORMANCE_MODE_LOW_LATENCY = 1; 283 284 /** 285 * Power saving performance mode for an {@link AudioTrack}. 286 * If the device supports it, this 287 * mode will enable a lower power path to the audio output sink. 288 * In addition, this lower power path typically will have 289 * deeper internal buffers and better underrun resistance, 290 * with a tradeoff of higher latency. 291 * <p> 292 * In this mode, applications should attempt to use a larger buffer size 293 * and deliver larger chunks of audio data per {@code write()} call. 294 * Use {@link #getBufferSizeInFrames()} to determine 295 * the actual buffer size of the {@code AudioTrack} as it may have increased 296 * to accommodate a deeper buffer. 297 */ 298 public static final int PERFORMANCE_MODE_POWER_SAVING = 2; 299 300 // keep in sync with system/media/audio/include/system/audio-base.h 301 private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4; 302 private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8; 303 304 // Size of HW_AV_SYNC track AV header. 305 private static final float HEADER_V2_SIZE_BYTES = 20.0f; 306 307 //-------------------------------------------------------------------------- 308 // Member variables 309 //-------------------- 310 /** 311 * Indicates the state of the AudioTrack instance. 312 * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA. 313 */ 314 private int mState = STATE_UNINITIALIZED; 315 /** 316 * Indicates the play state of the AudioTrack instance. 317 * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING. 318 */ 319 private int mPlayState = PLAYSTATE_STOPPED; 320 321 /** 322 * Indicates that we are expecting an end of stream callback following a call 323 * to setOffloadEndOfStream() in a gapless track transition context. The native track 324 * will be restarted automatically. 325 */ 326 private boolean mOffloadEosPending = false; 327 328 /** 329 * Lock to ensure mPlayState updates reflect the actual state of the object. 330 */ 331 private final Object mPlayStateLock = new Object(); 332 /** 333 * Sizes of the audio buffer. 334 * These values are set during construction and can be stale. 335 * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}. 336 */ 337 private int mNativeBufferSizeInBytes = 0; 338 private int mNativeBufferSizeInFrames = 0; 339 /** 340 * Handler for events coming from the native code. 341 */ 342 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 343 /** 344 * Looper associated with the thread that creates the AudioTrack instance. 345 */ 346 private final Looper mInitializationLooper; 347 /** 348 * The audio data source sampling rate in Hz. 349 * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}. 350 */ 351 private int mSampleRate; // initialized by all constructors via audioParamCheck() 352 /** 353 * The number of audio output channels (1 is mono, 2 is stereo, etc.). 354 */ 355 private int mChannelCount = 1; 356 /** 357 * The audio channel mask used for calling native AudioTrack 358 */ 359 private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 360 361 /** 362 * The type of the audio stream to play. See 363 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 364 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 365 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 366 * {@link AudioManager#STREAM_DTMF}. 367 */ 368 @UnsupportedAppUsage 369 private int mStreamType = AudioManager.STREAM_MUSIC; 370 371 /** 372 * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM. 373 */ 374 private int mDataLoadMode = MODE_STREAM; 375 /** 376 * The current channel position mask, as specified on AudioTrack creation. 377 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 378 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 379 */ 380 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 381 /** 382 * The channel index mask if specified, otherwise 0. 383 */ 384 private int mChannelIndexMask = 0; 385 /** 386 * The encoding of the audio samples. 387 * @see AudioFormat#ENCODING_PCM_8BIT 388 * @see AudioFormat#ENCODING_PCM_16BIT 389 * @see AudioFormat#ENCODING_PCM_FLOAT 390 */ 391 private int mAudioFormat; // initialized by all constructors via audioParamCheck() 392 /** 393 * The AudioAttributes used in configuration. 394 */ 395 private AudioAttributes mConfiguredAudioAttributes; 396 /** 397 * Audio session ID 398 */ 399 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 400 /** 401 * HW_AV_SYNC track AV Sync Header 402 */ 403 private ByteBuffer mAvSyncHeader = null; 404 /** 405 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 406 */ 407 private int mAvSyncBytesRemaining = 0; 408 /** 409 * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header. 410 */ 411 private int mOffset = 0; 412 /** 413 * Indicates whether the track is intended to play in offload mode. 414 */ 415 private boolean mOffloaded = false; 416 /** 417 * When offloaded track: delay for decoder in frames 418 */ 419 private int mOffloadDelayFrames = 0; 420 /** 421 * When offloaded track: padding for decoder in frames 422 */ 423 private int mOffloadPaddingFrames = 0; 424 425 //-------------------------------- 426 // Used exclusively by native code 427 //-------------------- 428 /** 429 * @hide 430 * Accessed by native methods: provides access to C++ AudioTrack object. 431 */ 432 @SuppressWarnings("unused") 433 @UnsupportedAppUsage 434 protected long mNativeTrackInJavaObj; 435 /** 436 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 437 * the native AudioTrack object, but not stored in it). 438 */ 439 @SuppressWarnings("unused") 440 @UnsupportedAppUsage 441 private long mJniData; 442 443 444 //-------------------------------------------------------------------------- 445 // Constructor, Finalize 446 //-------------------- 447 /** 448 * Class constructor. 449 * @param streamType the type of the audio stream. See 450 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 451 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 452 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 453 * @param sampleRateInHz the initial source sample rate expressed in Hz. 454 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 455 * which is usually the sample rate of the sink. 456 * {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen. 457 * @param channelConfig describes the configuration of the audio channels. 458 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 459 * {@link AudioFormat#CHANNEL_OUT_STEREO} 460 * @param audioFormat the format in which the audio data is represented. 461 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 462 * {@link AudioFormat#ENCODING_PCM_8BIT}, 463 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 464 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 465 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 466 * <p> If the track's creation mode is {@link #MODE_STATIC}, 467 * this is the maximum length sample, or audio clip, that can be played by this instance. 468 * <p> If the track's creation mode is {@link #MODE_STREAM}, 469 * this should be the desired buffer size 470 * for the <code>AudioTrack</code> to satisfy the application's 471 * latency requirements. 472 * If <code>bufferSizeInBytes</code> is less than the 473 * minimum buffer size for the output sink, it is increased to the minimum 474 * buffer size. 475 * The method {@link #getBufferSizeInFrames()} returns the 476 * actual size in frames of the buffer created, which 477 * determines the minimum frequency to write 478 * to the streaming <code>AudioTrack</code> to avoid underrun. 479 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 480 * for an AudioTrack instance in streaming mode. 481 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 482 * @throws java.lang.IllegalArgumentException 483 * @deprecated use {@link Builder} or 484 * {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the 485 * {@link AudioAttributes} instead of the stream type which is only for volume control. 486 */ AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)487 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 488 int bufferSizeInBytes, int mode) 489 throws IllegalArgumentException { 490 this(streamType, sampleRateInHz, channelConfig, audioFormat, 491 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE); 492 } 493 494 /** 495 * Class constructor with audio session. Use this constructor when the AudioTrack must be 496 * attached to a particular audio session. The primary use of the audio session ID is to 497 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 498 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 499 * and media players in the same session and not to the output mix. 500 * When an AudioTrack is created without specifying a session, it will create its own session 501 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 502 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 503 * session 504 * with all other media players or audio tracks in the same session, otherwise a new session 505 * will be created for this track if none is supplied. 506 * @param streamType the type of the audio stream. See 507 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 508 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 509 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 510 * @param sampleRateInHz the initial source sample rate expressed in Hz. 511 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 512 * which is usually the sample rate of the sink. 513 * @param channelConfig describes the configuration of the audio channels. 514 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 515 * {@link AudioFormat#CHANNEL_OUT_STEREO} 516 * @param audioFormat the format in which the audio data is represented. 517 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 518 * {@link AudioFormat#ENCODING_PCM_8BIT}, 519 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 520 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 521 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 522 * <p> If the track's creation mode is {@link #MODE_STATIC}, 523 * this is the maximum length sample, or audio clip, that can be played by this instance. 524 * <p> If the track's creation mode is {@link #MODE_STREAM}, 525 * this should be the desired buffer size 526 * for the <code>AudioTrack</code> to satisfy the application's 527 * latency requirements. 528 * If <code>bufferSizeInBytes</code> is less than the 529 * minimum buffer size for the output sink, it is increased to the minimum 530 * buffer size. 531 * The method {@link #getBufferSizeInFrames()} returns the 532 * actual size in frames of the buffer created, which 533 * determines the minimum frequency to write 534 * to the streaming <code>AudioTrack</code> to avoid underrun. 535 * You can write data into this buffer in smaller chunks than this size. 536 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 537 * for an AudioTrack instance in streaming mode. 538 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 539 * @param sessionId Id of audio session the AudioTrack must be attached to 540 * @throws java.lang.IllegalArgumentException 541 * @deprecated use {@link Builder} or 542 * {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the 543 * {@link AudioAttributes} instead of the stream type which is only for volume control. 544 */ AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)545 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 546 int bufferSizeInBytes, int mode, int sessionId) 547 throws IllegalArgumentException { 548 // mState already == STATE_UNINITIALIZED 549 this((new AudioAttributes.Builder()) 550 .setLegacyStreamType(streamType) 551 .build(), 552 (new AudioFormat.Builder()) 553 .setChannelMask(channelConfig) 554 .setEncoding(audioFormat) 555 .setSampleRate(sampleRateInHz) 556 .build(), 557 bufferSizeInBytes, 558 mode, sessionId); 559 deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()"); 560 } 561 562 /** 563 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 564 * @param attributes a non-null {@link AudioAttributes} instance. 565 * @param format a non-null {@link AudioFormat} instance describing the format of the data 566 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 567 * configuring the audio format parameters such as encoding, channel mask and sample rate. 568 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 569 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 570 * <p> If the track's creation mode is {@link #MODE_STATIC}, 571 * this is the maximum length sample, or audio clip, that can be played by this instance. 572 * <p> If the track's creation mode is {@link #MODE_STREAM}, 573 * this should be the desired buffer size 574 * for the <code>AudioTrack</code> to satisfy the application's 575 * latency requirements. 576 * If <code>bufferSizeInBytes</code> is less than the 577 * minimum buffer size for the output sink, it is increased to the minimum 578 * buffer size. 579 * The method {@link #getBufferSizeInFrames()} returns the 580 * actual size in frames of the buffer created, which 581 * determines the minimum frequency to write 582 * to the streaming <code>AudioTrack</code> to avoid underrun. 583 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 584 * for an AudioTrack instance in streaming mode. 585 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 586 * @param sessionId ID of audio session the AudioTrack must be attached to, or 587 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 588 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 589 * construction. 590 * @throws IllegalArgumentException 591 */ AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)592 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 593 int mode, int sessionId) 594 throws IllegalArgumentException { 595 this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/); 596 } 597 AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload)598 private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 599 int mode, int sessionId, boolean offload) 600 throws IllegalArgumentException { 601 super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK); 602 // mState already == STATE_UNINITIALIZED 603 604 mConfiguredAudioAttributes = attributes; // object copy not needed, immutable. 605 606 if (format == null) { 607 throw new IllegalArgumentException("Illegal null AudioFormat"); 608 } 609 610 // Check if we should enable deep buffer mode 611 if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) { 612 mAttributes = new AudioAttributes.Builder(mAttributes) 613 .replaceFlags((mAttributes.getAllFlags() 614 | AudioAttributes.FLAG_DEEP_BUFFER) 615 & ~AudioAttributes.FLAG_LOW_LATENCY) 616 .build(); 617 } 618 619 // remember which looper is associated with the AudioTrack instantiation 620 Looper looper; 621 if ((looper = Looper.myLooper()) == null) { 622 looper = Looper.getMainLooper(); 623 } 624 625 int rate = format.getSampleRate(); 626 if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 627 rate = 0; 628 } 629 630 int channelIndexMask = 0; 631 if ((format.getPropertySetMask() 632 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 633 channelIndexMask = format.getChannelIndexMask(); 634 } 635 int channelMask = 0; 636 if ((format.getPropertySetMask() 637 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 638 channelMask = format.getChannelMask(); 639 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 640 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 641 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 642 } 643 int encoding = AudioFormat.ENCODING_DEFAULT; 644 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 645 encoding = format.getEncoding(); 646 } 647 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 648 mOffloaded = offload; 649 mStreamType = AudioSystem.STREAM_DEFAULT; 650 651 audioBuffSizeCheck(bufferSizeInBytes); 652 653 mInitializationLooper = looper; 654 655 if (sessionId < 0) { 656 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 657 } 658 659 int[] sampleRate = new int[] {mSampleRate}; 660 int[] session = new int[1]; 661 session[0] = sessionId; 662 // native initialization 663 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 664 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, 665 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/, 666 offload); 667 if (initResult != SUCCESS) { 668 loge("Error code "+initResult+" when initializing AudioTrack."); 669 return; // with mState == STATE_UNINITIALIZED 670 } 671 672 mSampleRate = sampleRate[0]; 673 mSessionId = session[0]; 674 675 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) { 676 int frameSizeInBytes; 677 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 678 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 679 } else { 680 frameSizeInBytes = 1; 681 } 682 mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes; 683 } 684 685 if (mDataLoadMode == MODE_STATIC) { 686 mState = STATE_NO_STATIC_DATA; 687 } else { 688 mState = STATE_INITIALIZED; 689 } 690 691 baseRegisterPlayer(); 692 } 693 694 /** 695 * A constructor which explicitly connects a Native (C++) AudioTrack. For use by 696 * the AudioTrackRoutingProxy subclass. 697 * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack 698 * (associated with an OpenSL ES player). 699 * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy, 700 * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj 701 * it means that the OpenSL player interface hasn't been realized, so there is no native 702 * Audiotrack to connect to. In this case wait to call deferred_connect() until the 703 * OpenSLES interface is realized. 704 */ AudioTrack(long nativeTrackInJavaObj)705 /*package*/ AudioTrack(long nativeTrackInJavaObj) { 706 super(new AudioAttributes.Builder().build(), 707 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK); 708 // "final"s 709 mNativeTrackInJavaObj = 0; 710 mJniData = 0; 711 712 // remember which looper is associated with the AudioTrack instantiation 713 Looper looper; 714 if ((looper = Looper.myLooper()) == null) { 715 looper = Looper.getMainLooper(); 716 } 717 mInitializationLooper = looper; 718 719 // other initialization... 720 if (nativeTrackInJavaObj != 0) { 721 baseRegisterPlayer(); 722 deferred_connect(nativeTrackInJavaObj); 723 } else { 724 mState = STATE_UNINITIALIZED; 725 } 726 } 727 728 /** 729 * @hide 730 */ 731 @UnsupportedAppUsage deferred_connect(long nativeTrackInJavaObj)732 /* package */ void deferred_connect(long nativeTrackInJavaObj) { 733 if (mState != STATE_INITIALIZED) { 734 // Note that for this native_setup, we are providing an already created/initialized 735 // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored. 736 int[] session = { 0 }; 737 int[] rates = { 0 }; 738 int initResult = native_setup(new WeakReference<AudioTrack>(this), 739 null /*mAttributes - NA*/, 740 rates /*sampleRate - NA*/, 741 0 /*mChannelMask - NA*/, 742 0 /*mChannelIndexMask - NA*/, 743 0 /*mAudioFormat - NA*/, 744 0 /*mNativeBufferSizeInBytes - NA*/, 745 0 /*mDataLoadMode - NA*/, 746 session, 747 nativeTrackInJavaObj, 748 false /*offload*/); 749 if (initResult != SUCCESS) { 750 loge("Error code "+initResult+" when initializing AudioTrack."); 751 return; // with mState == STATE_UNINITIALIZED 752 } 753 754 mSessionId = session[0]; 755 756 mState = STATE_INITIALIZED; 757 } 758 } 759 760 /** 761 * Builder class for {@link AudioTrack} objects. 762 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 763 * attributes and audio format parameters, you indicate which of those vary from the default 764 * behavior on the device. 765 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 766 * parameters, to be used by a new <code>AudioTrack</code> instance: 767 * 768 * <pre class="prettyprint"> 769 * AudioTrack player = new AudioTrack.Builder() 770 * .setAudioAttributes(new AudioAttributes.Builder() 771 * .setUsage(AudioAttributes.USAGE_ALARM) 772 * .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) 773 * .build()) 774 * .setAudioFormat(new AudioFormat.Builder() 775 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 776 * .setSampleRate(44100) 777 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 778 * .build()) 779 * .setBufferSizeInBytes(minBuffSize) 780 * .build(); 781 * </pre> 782 * <p> 783 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 784 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 785 * <br>If the audio format is not specified or is incomplete, its channel configuration will be 786 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 787 * {@link AudioFormat#ENCODING_PCM_16BIT}. 788 * The sample rate will depend on the device actually selected for playback and can be queried 789 * with {@link #getSampleRate()} method. 790 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 791 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 792 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 793 * <code>MODE_STREAM</code> will be used. 794 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 795 * be generated. 796 * <br>Offload is false by default. 797 */ 798 public static class Builder { 799 private AudioAttributes mAttributes; 800 private AudioFormat mFormat; 801 private int mBufferSizeInBytes; 802 private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 803 private int mMode = MODE_STREAM; 804 private int mPerformanceMode = PERFORMANCE_MODE_NONE; 805 private boolean mOffload = false; 806 807 /** 808 * Constructs a new Builder with the default values as described above. 809 */ Builder()810 public Builder() { 811 } 812 813 /** 814 * Sets the {@link AudioAttributes}. 815 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 816 * data to be played. 817 * @return the same Builder instance. 818 * @throws IllegalArgumentException 819 */ setAudioAttributes(@onNull AudioAttributes attributes)820 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 821 throws IllegalArgumentException { 822 if (attributes == null) { 823 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 824 } 825 // keep reference, we only copy the data when building 826 mAttributes = attributes; 827 return this; 828 } 829 830 /** 831 * Sets the format of the audio data to be played by the {@link AudioTrack}. 832 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 833 * as encoding, channel mask and sample rate. 834 * @param format a non-null {@link AudioFormat} instance. 835 * @return the same Builder instance. 836 * @throws IllegalArgumentException 837 */ setAudioFormat(@onNull AudioFormat format)838 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 839 throws IllegalArgumentException { 840 if (format == null) { 841 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 842 } 843 // keep reference, we only copy the data when building 844 mFormat = format; 845 return this; 846 } 847 848 /** 849 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 850 * If using the {@link AudioTrack} in streaming mode 851 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 852 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 853 * the estimated minimum buffer size for the creation of an AudioTrack instance 854 * in streaming mode. 855 * <br>If using the <code>AudioTrack</code> in static mode (see 856 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 857 * played by this instance. 858 * @param bufferSizeInBytes 859 * @return the same Builder instance. 860 * @throws IllegalArgumentException 861 */ setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)862 public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes) 863 throws IllegalArgumentException { 864 if (bufferSizeInBytes <= 0) { 865 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 866 } 867 mBufferSizeInBytes = bufferSizeInBytes; 868 return this; 869 } 870 871 /** 872 * Sets the mode under which buffers of audio data are transferred from the 873 * {@link AudioTrack} to the framework. 874 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 875 * @return the same Builder instance. 876 * @throws IllegalArgumentException 877 */ setTransferMode(@ransferMode int mode)878 public @NonNull Builder setTransferMode(@TransferMode int mode) 879 throws IllegalArgumentException { 880 switch(mode) { 881 case MODE_STREAM: 882 case MODE_STATIC: 883 mMode = mode; 884 break; 885 default: 886 throw new IllegalArgumentException("Invalid transfer mode " + mode); 887 } 888 return this; 889 } 890 891 /** 892 * Sets the session ID the {@link AudioTrack} will be attached to. 893 * @param sessionId a strictly positive ID number retrieved from another 894 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 895 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 896 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 897 * @return the same Builder instance. 898 * @throws IllegalArgumentException 899 */ setSessionId(@ntRangefrom = 1) int sessionId)900 public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId) 901 throws IllegalArgumentException { 902 if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 903 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 904 } 905 mSessionId = sessionId; 906 return this; 907 } 908 909 /** 910 * Sets the {@link AudioTrack} performance mode. This is an advisory request which 911 * may not be supported by the particular device, and the framework is free 912 * to ignore such request if it is incompatible with other requests or hardware. 913 * 914 * @param performanceMode one of 915 * {@link AudioTrack#PERFORMANCE_MODE_NONE}, 916 * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY}, 917 * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}. 918 * @return the same Builder instance. 919 * @throws IllegalArgumentException if {@code performanceMode} is not valid. 920 */ setPerformanceMode(@erformanceMode int performanceMode)921 public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) { 922 switch (performanceMode) { 923 case PERFORMANCE_MODE_NONE: 924 case PERFORMANCE_MODE_LOW_LATENCY: 925 case PERFORMANCE_MODE_POWER_SAVING: 926 mPerformanceMode = performanceMode; 927 break; 928 default: 929 throw new IllegalArgumentException( 930 "Invalid performance mode " + performanceMode); 931 } 932 return this; 933 } 934 935 /** 936 * Sets whether this track will play through the offloaded audio path. 937 * When set to true, at build time, the audio format will be checked against 938 * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} 939 * to verify the audio format used by this track is supported on the device's offload 940 * path (if any). 941 * <br>Offload is only supported for media audio streams, and therefore requires that 942 * the usage be {@link AudioAttributes#USAGE_MEDIA}. 943 * @param offload true to require the offload path for playback. 944 * @return the same Builder instance. 945 */ setOffloadedPlayback(boolean offload)946 public @NonNull Builder setOffloadedPlayback(boolean offload) { 947 mOffload = offload; 948 return this; 949 } 950 951 /** 952 * Builds an {@link AudioTrack} instance initialized with all the parameters set 953 * on this <code>Builder</code>. 954 * @return a new successfully initialized {@link AudioTrack} instance. 955 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 956 * were incompatible, or if they are not supported by the device, 957 * or if the device was not available. 958 */ build()959 public @NonNull AudioTrack build() throws UnsupportedOperationException { 960 if (mAttributes == null) { 961 mAttributes = new AudioAttributes.Builder() 962 .setUsage(AudioAttributes.USAGE_MEDIA) 963 .build(); 964 } 965 switch (mPerformanceMode) { 966 case PERFORMANCE_MODE_LOW_LATENCY: 967 mAttributes = new AudioAttributes.Builder(mAttributes) 968 .replaceFlags((mAttributes.getAllFlags() 969 | AudioAttributes.FLAG_LOW_LATENCY) 970 & ~AudioAttributes.FLAG_DEEP_BUFFER) 971 .build(); 972 break; 973 case PERFORMANCE_MODE_NONE: 974 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) { 975 break; // do not enable deep buffer mode. 976 } 977 // permitted to fall through to enable deep buffer 978 case PERFORMANCE_MODE_POWER_SAVING: 979 mAttributes = new AudioAttributes.Builder(mAttributes) 980 .replaceFlags((mAttributes.getAllFlags() 981 | AudioAttributes.FLAG_DEEP_BUFFER) 982 & ~AudioAttributes.FLAG_LOW_LATENCY) 983 .build(); 984 break; 985 } 986 987 if (mFormat == null) { 988 mFormat = new AudioFormat.Builder() 989 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 990 //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED) 991 .setEncoding(AudioFormat.ENCODING_DEFAULT) 992 .build(); 993 } 994 995 if (mOffload) { 996 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) { 997 throw new UnsupportedOperationException( 998 "Offload and low latency modes are incompatible"); 999 } 1000 if (!AudioSystem.isOffloadSupported(mFormat, mAttributes)) { 1001 throw new UnsupportedOperationException( 1002 "Cannot create AudioTrack, offload format / attributes not supported"); 1003 } 1004 } 1005 1006 try { 1007 // If the buffer size is not specified in streaming mode, 1008 // use a single frame for the buffer size and let the 1009 // native code figure out the minimum buffer size. 1010 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 1011 mBufferSizeInBytes = mFormat.getChannelCount() 1012 * mFormat.getBytesPerSample(mFormat.getEncoding()); 1013 } 1014 final AudioTrack track = new AudioTrack( 1015 mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, mOffload); 1016 if (track.getState() == STATE_UNINITIALIZED) { 1017 // release is not necessary 1018 throw new UnsupportedOperationException("Cannot create AudioTrack"); 1019 } 1020 return track; 1021 } catch (IllegalArgumentException e) { 1022 throw new UnsupportedOperationException(e.getMessage()); 1023 } 1024 } 1025 } 1026 1027 /** 1028 * Configures the delay and padding values for the current compressed stream playing 1029 * in offload mode. 1030 * This can only be used on a track successfully initialized with 1031 * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a 1032 * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed 1033 * stream corresponds to 200 decoded interleaved PCM samples. 1034 * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value 1035 * of 0 indicates no delay is to be applied. 1036 * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0 1037 * of 0 indicates no padding is to be applied. 1038 */ setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1039 public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames, 1040 @IntRange(from = 0) int paddingInFrames) { 1041 if (paddingInFrames < 0) { 1042 throw new IllegalArgumentException("Illegal negative padding"); 1043 } 1044 if (delayInFrames < 0) { 1045 throw new IllegalArgumentException("Illegal negative delay"); 1046 } 1047 if (!mOffloaded) { 1048 throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track"); 1049 } 1050 if (mState == STATE_UNINITIALIZED) { 1051 throw new IllegalStateException("Uninitialized track"); 1052 } 1053 mOffloadDelayFrames = delayInFrames; 1054 mOffloadPaddingFrames = paddingInFrames; 1055 native_set_delay_padding(delayInFrames, paddingInFrames); 1056 } 1057 1058 /** 1059 * Return the decoder delay of an offloaded track, expressed in frames, previously set with 1060 * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified. 1061 * <p>This delay indicates the number of frames to be ignored at the beginning of the stream. 1062 * This value can only be queried on a track successfully initialized with 1063 * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. 1064 * @return decoder delay expressed in frames. 1065 */ getOffloadDelay()1066 public @IntRange(from = 0) int getOffloadDelay() { 1067 if (!mOffloaded) { 1068 throw new IllegalStateException("Illegal query of delay on non-offloaded track"); 1069 } 1070 if (mState == STATE_UNINITIALIZED) { 1071 throw new IllegalStateException("Illegal query of delay on uninitialized track"); 1072 } 1073 return mOffloadDelayFrames; 1074 } 1075 1076 /** 1077 * Return the decoder padding of an offloaded track, expressed in frames, previously set with 1078 * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified. 1079 * <p>This padding indicates the number of frames to be ignored at the end of the stream. 1080 * This value can only be queried on a track successfully initialized with 1081 * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. 1082 * @return decoder padding expressed in frames. 1083 */ getOffloadPadding()1084 public @IntRange(from = 0) int getOffloadPadding() { 1085 if (!mOffloaded) { 1086 throw new IllegalStateException("Illegal query of padding on non-offloaded track"); 1087 } 1088 if (mState == STATE_UNINITIALIZED) { 1089 throw new IllegalStateException("Illegal query of padding on uninitialized track"); 1090 } 1091 return mOffloadPaddingFrames; 1092 } 1093 1094 /** 1095 * Declares that the last write() operation on this track provided the last buffer of this 1096 * stream. 1097 * After the end of stream, previously set padding and delay values are ignored. 1098 * Can only be called only if the AudioTrack is opened in offload mode 1099 * {@see Builder#setOffloadedPlayback(boolean)}. 1100 * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING} 1101 * {@see #getPlaystate()}. 1102 * Use this method in the same thread as any write() operation. 1103 */ setOffloadEndOfStream()1104 public void setOffloadEndOfStream() { 1105 if (!mOffloaded) { 1106 throw new IllegalStateException("EOS not supported on non-offloaded track"); 1107 } 1108 if (mState == STATE_UNINITIALIZED) { 1109 throw new IllegalStateException("Uninitialized track"); 1110 } 1111 if (mPlayState != PLAYSTATE_PLAYING) { 1112 throw new IllegalStateException("EOS not supported if not playing"); 1113 } 1114 synchronized (mStreamEventCbLock) { 1115 if (mStreamEventCbInfoList.size() == 0) { 1116 throw new IllegalStateException("EOS not supported without StreamEventCallback"); 1117 } 1118 } 1119 1120 synchronized (mPlayStateLock) { 1121 native_stop(); 1122 mOffloadEosPending = true; 1123 mPlayState = PLAYSTATE_STOPPING; 1124 } 1125 } 1126 1127 /** 1128 * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set 1129 * to {@code true}. 1130 * @return true if the track is using offloaded playback. 1131 */ isOffloadedPlayback()1132 public boolean isOffloadedPlayback() { 1133 return mOffloaded; 1134 } 1135 1136 /** 1137 * Returns whether direct playback of an audio format with the provided attributes is 1138 * currently supported on the system. 1139 * <p>Direct playback means that the audio stream is not resampled or downmixed 1140 * by the framework. Checking for direct support can help the app select the representation 1141 * of audio content that most closely matches the capabilities of the device and peripherials 1142 * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded 1143 * or mixed with other streams, if needed. 1144 * <p>Also note that this query only provides information about the support of an audio format. 1145 * It does not indicate whether the resources necessary for the playback are available 1146 * at that instant. 1147 * @param format a non-null {@link AudioFormat} instance describing the format of 1148 * the audio data. 1149 * @param attributes a non-null {@link AudioAttributes} instance. 1150 * @return true if the given audio format can be played directly. 1151 */ isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1152 public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format, 1153 @NonNull AudioAttributes attributes) { 1154 if (format == null) { 1155 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 1156 } 1157 if (attributes == null) { 1158 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 1159 } 1160 return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(), 1161 format.getChannelMask(), format.getChannelIndexMask(), 1162 attributes.getContentType(), attributes.getUsage(), attributes.getFlags()); 1163 } 1164 1165 // mask of all the positional channels supported, however the allowed combinations 1166 // are further restricted by the matching left/right rule and 1167 // AudioSystem.OUT_CHANNEL_COUNT_MAX 1168 private static final int SUPPORTED_OUT_CHANNELS = 1169 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 1170 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 1171 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 1172 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 1173 AudioFormat.CHANNEL_OUT_BACK_LEFT | 1174 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 1175 AudioFormat.CHANNEL_OUT_BACK_CENTER | 1176 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 1177 AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 1178 1179 // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow 1180 // power saving to be automatically enabled for an AudioTrack. Returns false if 1181 // power saving is already enabled in the attributes parameter. shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1182 private static boolean shouldEnablePowerSaving( 1183 @Nullable AudioAttributes attributes, @Nullable AudioFormat format, 1184 int bufferSizeInBytes, int mode) { 1185 // If no attributes, OK 1186 // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE. 1187 if (attributes != null && 1188 (attributes.getAllFlags() != 0 // cannot have any special flags 1189 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA 1190 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN 1191 && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC 1192 && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) { 1193 return false; 1194 } 1195 1196 // Format must be fully specified and be linear pcm 1197 if (format == null 1198 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED 1199 || !AudioFormat.isEncodingLinearPcm(format.getEncoding()) 1200 || !AudioFormat.isValidEncoding(format.getEncoding()) 1201 || format.getChannelCount() < 1) { 1202 return false; 1203 } 1204 1205 // Mode must be streaming 1206 if (mode != MODE_STREAM) { 1207 return false; 1208 } 1209 1210 // A buffer size of 0 is always compatible with deep buffer (when called from the Builder) 1211 // but for app compatibility we only use deep buffer power saving for large buffer sizes. 1212 if (bufferSizeInBytes != 0) { 1213 final long BUFFER_TARGET_MODE_STREAM_MS = 100; 1214 final int MILLIS_PER_SECOND = 1000; 1215 final long bufferTargetSize = 1216 BUFFER_TARGET_MODE_STREAM_MS 1217 * format.getChannelCount() 1218 * format.getBytesPerSample(format.getEncoding()) 1219 * format.getSampleRate() 1220 / MILLIS_PER_SECOND; 1221 if (bufferSizeInBytes < bufferTargetSize) { 1222 return false; 1223 } 1224 } 1225 1226 return true; 1227 } 1228 1229 // Convenience method for the constructor's parameter checks. 1230 // This is where constructor IllegalArgumentException-s are thrown 1231 // postconditions: 1232 // mChannelCount is valid 1233 // mChannelMask is valid 1234 // mAudioFormat is valid 1235 // mSampleRate is valid 1236 // mDataLoadMode is valid audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1237 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 1238 int audioFormat, int mode) { 1239 //-------------- 1240 // sample rate, note these values are subject to change 1241 if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN || 1242 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) && 1243 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 1244 throw new IllegalArgumentException(sampleRateInHz 1245 + "Hz is not a supported sample rate."); 1246 } 1247 mSampleRate = sampleRateInHz; 1248 1249 // IEC61937 is based on stereo. We could coerce it to stereo. 1250 // But the application needs to know the stream is stereo so that 1251 // it is encoded and played correctly. So better to just reject it. 1252 if (audioFormat == AudioFormat.ENCODING_IEC61937 1253 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) { 1254 throw new IllegalArgumentException( 1255 "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO"); 1256 } 1257 1258 //-------------- 1259 // channel config 1260 mChannelConfiguration = channelConfig; 1261 1262 switch (channelConfig) { 1263 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 1264 case AudioFormat.CHANNEL_OUT_MONO: 1265 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1266 mChannelCount = 1; 1267 mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 1268 break; 1269 case AudioFormat.CHANNEL_OUT_STEREO: 1270 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1271 mChannelCount = 2; 1272 mChannelMask = AudioFormat.CHANNEL_OUT_STEREO; 1273 break; 1274 default: 1275 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 1276 mChannelCount = 0; 1277 break; // channel index configuration only 1278 } 1279 if (!isMultichannelConfigSupported(channelConfig)) { 1280 // input channel configuration features unsupported channels 1281 throw new IllegalArgumentException("Unsupported channel configuration."); 1282 } 1283 mChannelMask = channelConfig; 1284 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1285 } 1286 // check the channel index configuration (if present) 1287 mChannelIndexMask = channelIndexMask; 1288 if (mChannelIndexMask != 0) { 1289 // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2 1290 final int indexMask = (1 << AudioSystem.OUT_CHANNEL_COUNT_MAX) - 1; 1291 if ((channelIndexMask & ~indexMask) != 0) { 1292 throw new IllegalArgumentException("Unsupported channel index configuration " 1293 + channelIndexMask); 1294 } 1295 int channelIndexCount = Integer.bitCount(channelIndexMask); 1296 if (mChannelCount == 0) { 1297 mChannelCount = channelIndexCount; 1298 } else if (mChannelCount != channelIndexCount) { 1299 throw new IllegalArgumentException("Channel count must match"); 1300 } 1301 } 1302 1303 //-------------- 1304 // audio format 1305 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 1306 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 1307 } 1308 1309 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1310 throw new IllegalArgumentException("Unsupported audio encoding."); 1311 } 1312 mAudioFormat = audioFormat; 1313 1314 //-------------- 1315 // audio load mode 1316 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 1317 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 1318 throw new IllegalArgumentException("Invalid mode."); 1319 } 1320 mDataLoadMode = mode; 1321 } 1322 1323 /** 1324 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 1325 * @param channelConfig the mask to validate 1326 * @return false if the AudioTrack can't be used with such a mask 1327 */ isMultichannelConfigSupported(int channelConfig)1328 private static boolean isMultichannelConfigSupported(int channelConfig) { 1329 // check for unsupported channels 1330 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 1331 loge("Channel configuration features unsupported channels"); 1332 return false; 1333 } 1334 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1335 if (channelCount > AudioSystem.OUT_CHANNEL_COUNT_MAX) { 1336 loge("Channel configuration contains too many channels " + 1337 channelCount + ">" + AudioSystem.OUT_CHANNEL_COUNT_MAX); 1338 return false; 1339 } 1340 // check for unsupported multichannel combinations: 1341 // - FL/FR must be present 1342 // - L/R channels must be paired (e.g. no single L channel) 1343 final int frontPair = 1344 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 1345 if ((channelConfig & frontPair) != frontPair) { 1346 loge("Front channels must be present in multichannel configurations"); 1347 return false; 1348 } 1349 final int backPair = 1350 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT; 1351 if ((channelConfig & backPair) != 0) { 1352 if ((channelConfig & backPair) != backPair) { 1353 loge("Rear channels can't be used independently"); 1354 return false; 1355 } 1356 } 1357 final int sidePair = 1358 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT; 1359 if ((channelConfig & sidePair) != 0 1360 && (channelConfig & sidePair) != sidePair) { 1361 loge("Side channels can't be used independently"); 1362 return false; 1363 } 1364 return true; 1365 } 1366 1367 1368 // Convenience method for the constructor's audio buffer size check. 1369 // preconditions: 1370 // mChannelCount is valid 1371 // mAudioFormat is valid 1372 // postcondition: 1373 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) audioBuffSizeCheck(int audioBufferSize)1374 private void audioBuffSizeCheck(int audioBufferSize) { 1375 // NB: this section is only valid with PCM or IEC61937 data. 1376 // To update when supporting compressed formats 1377 int frameSizeInBytes; 1378 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 1379 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 1380 } else { 1381 frameSizeInBytes = 1; 1382 } 1383 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 1384 throw new IllegalArgumentException("Invalid audio buffer size."); 1385 } 1386 1387 mNativeBufferSizeInBytes = audioBufferSize; 1388 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 1389 } 1390 1391 1392 /** 1393 * Releases the native AudioTrack resources. 1394 */ release()1395 public void release() { 1396 synchronized (mStreamEventCbLock){ 1397 endStreamEventHandling(); 1398 } 1399 // even though native_release() stops the native AudioTrack, we need to stop 1400 // AudioTrack subclasses too. 1401 try { 1402 stop(); 1403 } catch(IllegalStateException ise) { 1404 // don't raise an exception, we're releasing the resources. 1405 } 1406 baseRelease(); 1407 native_release(); 1408 synchronized (mPlayStateLock) { 1409 mState = STATE_UNINITIALIZED; 1410 mPlayState = PLAYSTATE_STOPPED; 1411 mPlayStateLock.notify(); 1412 } 1413 } 1414 1415 @Override finalize()1416 protected void finalize() { 1417 baseRelease(); 1418 native_finalize(); 1419 } 1420 1421 //-------------------------------------------------------------------------- 1422 // Getters 1423 //-------------------- 1424 /** 1425 * Returns the minimum gain value, which is the constant 0.0. 1426 * Gain values less than 0.0 will be clamped to 0.0. 1427 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 1428 * @return the minimum value, which is the constant 0.0. 1429 */ getMinVolume()1430 static public float getMinVolume() { 1431 return GAIN_MIN; 1432 } 1433 1434 /** 1435 * Returns the maximum gain value, which is greater than or equal to 1.0. 1436 * Gain values greater than the maximum will be clamped to the maximum. 1437 * <p>The word "volume" in the API name is historical; this is actually a gain. 1438 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 1439 * corresponds to a gain of 0 dB (sample values left unmodified). 1440 * @return the maximum value, which is greater than or equal to 1.0. 1441 */ getMaxVolume()1442 static public float getMaxVolume() { 1443 return GAIN_MAX; 1444 } 1445 1446 /** 1447 * Returns the configured audio source sample rate in Hz. 1448 * The initial source sample rate depends on the constructor parameters, 1449 * but the source sample rate may change if {@link #setPlaybackRate(int)} is called. 1450 * If the constructor had a specific sample rate, then the initial sink sample rate is that 1451 * value. 1452 * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}, 1453 * then the initial sink sample rate is a route-dependent default value based on the source [sic]. 1454 */ getSampleRate()1455 public int getSampleRate() { 1456 return mSampleRate; 1457 } 1458 1459 /** 1460 * Returns the current playback sample rate rate in Hz. 1461 */ getPlaybackRate()1462 public int getPlaybackRate() { 1463 return native_get_playback_rate(); 1464 } 1465 1466 /** 1467 * Returns the current playback parameters. 1468 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 1469 * @return current {@link PlaybackParams}. 1470 * @throws IllegalStateException if track is not initialized. 1471 */ getPlaybackParams()1472 public @NonNull PlaybackParams getPlaybackParams() { 1473 return native_get_playback_params(); 1474 } 1475 1476 /** 1477 * Returns the {@link AudioAttributes} used in configuration. 1478 * If a {@code streamType} is used instead of an {@code AudioAttributes} 1479 * to configure the AudioTrack 1480 * (the use of {@code streamType} for configuration is deprecated), 1481 * then the {@code AudioAttributes} 1482 * equivalent to the {@code streamType} is returned. 1483 * @return The {@code AudioAttributes} used to configure the AudioTrack. 1484 * @throws IllegalStateException If the track is not initialized. 1485 */ getAudioAttributes()1486 public @NonNull AudioAttributes getAudioAttributes() { 1487 if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) { 1488 throw new IllegalStateException("track not initialized"); 1489 } 1490 return mConfiguredAudioAttributes; 1491 } 1492 1493 /** 1494 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 1495 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1496 */ getAudioFormat()1497 public int getAudioFormat() { 1498 return mAudioFormat; 1499 } 1500 1501 /** 1502 * Returns the volume stream type of this AudioTrack. 1503 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 1504 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 1505 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 1506 * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or 1507 * {@link AudioManager#STREAM_ACCESSIBILITY}. 1508 */ getStreamType()1509 public int getStreamType() { 1510 return mStreamType; 1511 } 1512 1513 /** 1514 * Returns the configured channel position mask. 1515 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 1516 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 1517 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 1518 * a channel index mask was used. Consider 1519 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 1520 * which contains both the channel position mask and the channel index mask. 1521 */ getChannelConfiguration()1522 public int getChannelConfiguration() { 1523 return mChannelConfiguration; 1524 } 1525 1526 /** 1527 * Returns the configured <code>AudioTrack</code> format. 1528 * @return an {@link AudioFormat} containing the 1529 * <code>AudioTrack</code> parameters at the time of configuration. 1530 */ getFormat()1531 public @NonNull AudioFormat getFormat() { 1532 AudioFormat.Builder builder = new AudioFormat.Builder() 1533 .setSampleRate(mSampleRate) 1534 .setEncoding(mAudioFormat); 1535 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 1536 builder.setChannelMask(mChannelConfiguration); 1537 } 1538 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 1539 builder.setChannelIndexMask(mChannelIndexMask); 1540 } 1541 return builder.build(); 1542 } 1543 1544 /** 1545 * Returns the configured number of channels. 1546 */ getChannelCount()1547 public int getChannelCount() { 1548 return mChannelCount; 1549 } 1550 1551 /** 1552 * Returns the state of the AudioTrack instance. This is useful after the 1553 * AudioTrack instance has been created to check if it was initialized 1554 * properly. This ensures that the appropriate resources have been acquired. 1555 * @see #STATE_UNINITIALIZED 1556 * @see #STATE_INITIALIZED 1557 * @see #STATE_NO_STATIC_DATA 1558 */ getState()1559 public int getState() { 1560 return mState; 1561 } 1562 1563 /** 1564 * Returns the playback state of the AudioTrack instance. 1565 * @see #PLAYSTATE_STOPPED 1566 * @see #PLAYSTATE_PAUSED 1567 * @see #PLAYSTATE_PLAYING 1568 */ getPlayState()1569 public int getPlayState() { 1570 synchronized (mPlayStateLock) { 1571 switch (mPlayState) { 1572 case PLAYSTATE_STOPPING: 1573 return PLAYSTATE_PLAYING; 1574 case PLAYSTATE_PAUSED_STOPPING: 1575 return PLAYSTATE_PAUSED; 1576 default: 1577 return mPlayState; 1578 } 1579 } 1580 } 1581 1582 1583 /** 1584 * Returns the effective size of the <code>AudioTrack</code> buffer 1585 * that the application writes to. 1586 * <p> This will be less than or equal to the result of 1587 * {@link #getBufferCapacityInFrames()}. 1588 * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called. 1589 * <p> If the track is subsequently routed to a different output sink, the buffer 1590 * size and capacity may enlarge to accommodate. 1591 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1592 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1593 * the size of the <code>AudioTrack</code> buffer in bytes. 1594 * <p> See also {@link AudioManager#getProperty(String)} for key 1595 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1596 * @return current size in frames of the <code>AudioTrack</code> buffer. 1597 * @throws IllegalStateException if track is not initialized. 1598 */ getBufferSizeInFrames()1599 public @IntRange (from = 0) int getBufferSizeInFrames() { 1600 return native_get_buffer_size_frames(); 1601 } 1602 1603 /** 1604 * Limits the effective size of the <code>AudioTrack</code> buffer 1605 * that the application writes to. 1606 * <p> A write to this AudioTrack will not fill the buffer beyond this limit. 1607 * If a blocking write is used then the write will block until the data 1608 * can fit within this limit. 1609 * <p>Changing this limit modifies the latency associated with 1610 * the buffer for this track. A smaller size will give lower latency 1611 * but there may be more glitches due to buffer underruns. 1612 * <p>The actual size used may not be equal to this requested size. 1613 * It will be limited to a valid range with a maximum of 1614 * {@link #getBufferCapacityInFrames()}. 1615 * It may also be adjusted slightly for internal reasons. 1616 * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE} 1617 * will be returned. 1618 * <p>This method is only supported for PCM audio. 1619 * It is not supported for compressed audio tracks. 1620 * 1621 * @param bufferSizeInFrames requested buffer size in frames 1622 * @return the actual buffer size in frames or an error code, 1623 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 1624 * @throws IllegalStateException if track is not initialized. 1625 */ setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)1626 public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) { 1627 if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) { 1628 return ERROR_INVALID_OPERATION; 1629 } 1630 if (bufferSizeInFrames < 0) { 1631 return ERROR_BAD_VALUE; 1632 } 1633 return native_set_buffer_size_frames(bufferSizeInFrames); 1634 } 1635 1636 /** 1637 * Returns the maximum size of the <code>AudioTrack</code> buffer in frames. 1638 * <p> If the track's creation mode is {@link #MODE_STATIC}, 1639 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 1640 * A static track's frame count will not change. 1641 * <p> If the track's creation mode is {@link #MODE_STREAM}, 1642 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 1643 * For streaming tracks, this value may be rounded up to a larger value if needed by 1644 * the target output sink, and 1645 * if the track is subsequently routed to a different output sink, the 1646 * frame count may enlarge to accommodate. 1647 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 1648 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 1649 * the size of the <code>AudioTrack</code> buffer in bytes. 1650 * <p> See also {@link AudioManager#getProperty(String)} for key 1651 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 1652 * @return maximum size in frames of the <code>AudioTrack</code> buffer. 1653 * @throws IllegalStateException if track is not initialized. 1654 */ getBufferCapacityInFrames()1655 public @IntRange (from = 0) int getBufferCapacityInFrames() { 1656 return native_get_buffer_capacity_frames(); 1657 } 1658 1659 /** 1660 * Returns the frame count of the native <code>AudioTrack</code> buffer. 1661 * @return current size in frames of the <code>AudioTrack</code> buffer. 1662 * @throws IllegalStateException 1663 * @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead. 1664 */ 1665 @Deprecated getNativeFrameCount()1666 protected int getNativeFrameCount() { 1667 return native_get_buffer_capacity_frames(); 1668 } 1669 1670 /** 1671 * Returns marker position expressed in frames. 1672 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 1673 * or zero if marker is disabled. 1674 */ getNotificationMarkerPosition()1675 public int getNotificationMarkerPosition() { 1676 return native_get_marker_pos(); 1677 } 1678 1679 /** 1680 * Returns the notification update period expressed in frames. 1681 * Zero means that no position update notifications are being delivered. 1682 */ getPositionNotificationPeriod()1683 public int getPositionNotificationPeriod() { 1684 return native_get_pos_update_period(); 1685 } 1686 1687 /** 1688 * Returns the playback head position expressed in frames. 1689 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 1690 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 1691 * This is a continuously advancing counter. It will wrap (overflow) periodically, 1692 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 1693 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 1694 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 1695 * the total number of frames played since reset, 1696 * <i>not</i> the current offset within the buffer. 1697 */ getPlaybackHeadPosition()1698 public int getPlaybackHeadPosition() { 1699 return native_get_position(); 1700 } 1701 1702 /** 1703 * Returns this track's estimated latency in milliseconds. This includes the latency due 1704 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 1705 * 1706 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 1707 * a better solution. 1708 * @hide 1709 */ 1710 @UnsupportedAppUsage(trackingBug = 130237544) getLatency()1711 public int getLatency() { 1712 return native_get_latency(); 1713 } 1714 1715 /** 1716 * Returns the number of underrun occurrences in the application-level write buffer 1717 * since the AudioTrack was created. 1718 * An underrun occurs if the application does not write audio 1719 * data quickly enough, causing the buffer to underflow 1720 * and a potential audio glitch or pop. 1721 * <p> 1722 * Underruns are less likely when buffer sizes are large. 1723 * It may be possible to eliminate underruns by recreating the AudioTrack with 1724 * a larger buffer. 1725 * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the 1726 * effective size of the buffer. 1727 */ getUnderrunCount()1728 public int getUnderrunCount() { 1729 return native_get_underrun_count(); 1730 } 1731 1732 /** 1733 * Returns the current performance mode of the {@link AudioTrack}. 1734 * 1735 * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE}, 1736 * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY}, 1737 * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}. 1738 * Use {@link AudioTrack.Builder#setPerformanceMode} 1739 * in the {@link AudioTrack.Builder} to enable a performance mode. 1740 * @throws IllegalStateException if track is not initialized. 1741 */ getPerformanceMode()1742 public @PerformanceMode int getPerformanceMode() { 1743 final int flags = native_get_flags(); 1744 if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) { 1745 return PERFORMANCE_MODE_LOW_LATENCY; 1746 } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) { 1747 return PERFORMANCE_MODE_POWER_SAVING; 1748 } else { 1749 return PERFORMANCE_MODE_NONE; 1750 } 1751 } 1752 1753 /** 1754 * Returns the output sample rate in Hz for the specified stream type. 1755 */ getNativeOutputSampleRate(int streamType)1756 static public int getNativeOutputSampleRate(int streamType) { 1757 return native_get_output_sample_rate(streamType); 1758 } 1759 1760 /** 1761 * Returns the estimated minimum buffer size required for an AudioTrack 1762 * object to be created in the {@link #MODE_STREAM} mode. 1763 * The size is an estimate because it does not consider either the route or the sink, 1764 * since neither is known yet. Note that this size doesn't 1765 * guarantee a smooth playback under load, and higher values should be chosen according to 1766 * the expected frequency at which the buffer will be refilled with additional data to play. 1767 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 1768 * to a higher value than the initial source sample rate, be sure to configure the buffer size 1769 * based on the highest planned sample rate. 1770 * @param sampleRateInHz the source sample rate expressed in Hz. 1771 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted. 1772 * @param channelConfig describes the configuration of the audio channels. 1773 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 1774 * {@link AudioFormat#CHANNEL_OUT_STEREO} 1775 * @param audioFormat the format in which the audio data is represented. 1776 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 1777 * {@link AudioFormat#ENCODING_PCM_8BIT}, 1778 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 1779 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 1780 * or {@link #ERROR} if unable to query for output properties, 1781 * or the minimum buffer size expressed in bytes. 1782 */ getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)1783 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 1784 int channelCount = 0; 1785 switch(channelConfig) { 1786 case AudioFormat.CHANNEL_OUT_MONO: 1787 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1788 channelCount = 1; 1789 break; 1790 case AudioFormat.CHANNEL_OUT_STEREO: 1791 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1792 channelCount = 2; 1793 break; 1794 default: 1795 if (!isMultichannelConfigSupported(channelConfig)) { 1796 loge("getMinBufferSize(): Invalid channel configuration."); 1797 return ERROR_BAD_VALUE; 1798 } else { 1799 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1800 } 1801 } 1802 1803 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1804 loge("getMinBufferSize(): Invalid audio format."); 1805 return ERROR_BAD_VALUE; 1806 } 1807 1808 // sample rate, note these values are subject to change 1809 // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed 1810 if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) || 1811 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) { 1812 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 1813 return ERROR_BAD_VALUE; 1814 } 1815 1816 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 1817 if (size <= 0) { 1818 loge("getMinBufferSize(): error querying hardware"); 1819 return ERROR; 1820 } 1821 else { 1822 return size; 1823 } 1824 } 1825 1826 /** 1827 * Returns the audio session ID. 1828 * 1829 * @return the ID of the audio session this AudioTrack belongs to. 1830 */ getAudioSessionId()1831 public int getAudioSessionId() { 1832 return mSessionId; 1833 } 1834 1835 /** 1836 * Poll for a timestamp on demand. 1837 * <p> 1838 * If you need to track timestamps during initial warmup or after a routing or mode change, 1839 * you should request a new timestamp periodically until the reported timestamps 1840 * show that the frame position is advancing, or until it becomes clear that 1841 * timestamps are unavailable for this route. 1842 * <p> 1843 * After the clock is advancing at a stable rate, 1844 * query for a new timestamp approximately once every 10 seconds to once per minute. 1845 * Calling this method more often is inefficient. 1846 * It is also counter-productive to call this method more often than recommended, 1847 * because the short-term differences between successive timestamp reports are not meaningful. 1848 * If you need a high-resolution mapping between frame position and presentation time, 1849 * consider implementing that at application level, based on low-resolution timestamps. 1850 * <p> 1851 * The audio data at the returned position may either already have been 1852 * presented, or may have not yet been presented but is committed to be presented. 1853 * It is not possible to request the time corresponding to a particular position, 1854 * or to request the (fractional) position corresponding to a particular time. 1855 * If you need such features, consider implementing them at application level. 1856 * 1857 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1858 * and owned by caller. 1859 * @return true if a timestamp is available, or false if no timestamp is available. 1860 * If a timestamp is available, 1861 * the AudioTimestamp instance is filled in with a position in frame units, together 1862 * with the estimated time when that frame was presented or is committed to 1863 * be presented. 1864 * In the case that no timestamp is available, any supplied instance is left unaltered. 1865 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 1866 * or during and immediately after a route change. 1867 * A timestamp is permanently unavailable for a given route if the route does not support 1868 * timestamps. In this case, the approximate frame position can be obtained 1869 * using {@link #getPlaybackHeadPosition}. 1870 * However, it may be useful to continue to query for 1871 * timestamps occasionally, to recover after a route change. 1872 */ 1873 // Add this text when the "on new timestamp" API is added: 1874 // Use if you need to get the most recent timestamp outside of the event callback handler. getTimestamp(AudioTimestamp timestamp)1875 public boolean getTimestamp(AudioTimestamp timestamp) 1876 { 1877 if (timestamp == null) { 1878 throw new IllegalArgumentException(); 1879 } 1880 // It's unfortunate, but we have to either create garbage every time or use synchronized 1881 long[] longArray = new long[2]; 1882 int ret = native_get_timestamp(longArray); 1883 if (ret != SUCCESS) { 1884 return false; 1885 } 1886 timestamp.framePosition = longArray[0]; 1887 timestamp.nanoTime = longArray[1]; 1888 return true; 1889 } 1890 1891 /** 1892 * Poll for a timestamp on demand. 1893 * <p> 1894 * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code. 1895 * 1896 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 1897 * and owned by caller. 1898 * @return {@link #SUCCESS} if a timestamp is available 1899 * {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called 1900 * immediately after start/ACTIVE, when the number of frames consumed is less than the 1901 * overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll 1902 * again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time 1903 * for the timestamp. 1904 * {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 1905 * needs to be recreated. 1906 * {@link #ERROR_INVALID_OPERATION} if current route does not support 1907 * timestamps. In this case, the approximate frame position can be obtained 1908 * using {@link #getPlaybackHeadPosition}. 1909 * 1910 * The AudioTimestamp instance is filled in with a position in frame units, together 1911 * with the estimated time when that frame was presented or is committed to 1912 * be presented. 1913 * @hide 1914 */ 1915 // Add this text when the "on new timestamp" API is added: 1916 // Use if you need to get the most recent timestamp outside of the event callback handler. getTimestampWithStatus(AudioTimestamp timestamp)1917 public int getTimestampWithStatus(AudioTimestamp timestamp) 1918 { 1919 if (timestamp == null) { 1920 throw new IllegalArgumentException(); 1921 } 1922 // It's unfortunate, but we have to either create garbage every time or use synchronized 1923 long[] longArray = new long[2]; 1924 int ret = native_get_timestamp(longArray); 1925 timestamp.framePosition = longArray[0]; 1926 timestamp.nanoTime = longArray[1]; 1927 return ret; 1928 } 1929 1930 /** 1931 * Return Metrics data about the current AudioTrack instance. 1932 * 1933 * @return a {@link PersistableBundle} containing the set of attributes and values 1934 * available for the media being handled by this instance of AudioTrack 1935 * The attributes are descibed in {@link MetricsConstants}. 1936 * 1937 * Additional vendor-specific fields may also be present in 1938 * the return value. 1939 */ getMetrics()1940 public PersistableBundle getMetrics() { 1941 PersistableBundle bundle = native_getMetrics(); 1942 return bundle; 1943 } 1944 native_getMetrics()1945 private native PersistableBundle native_getMetrics(); 1946 1947 //-------------------------------------------------------------------------- 1948 // Initialization / configuration 1949 //-------------------- 1950 /** 1951 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1952 * for each periodic playback head position update. 1953 * Notifications will be received in the same thread as the one in which the AudioTrack 1954 * instance was created. 1955 * @param listener 1956 */ setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)1957 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 1958 setPlaybackPositionUpdateListener(listener, null); 1959 } 1960 1961 /** 1962 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 1963 * for each periodic playback head position update. 1964 * Use this method to receive AudioTrack events in the Handler associated with another 1965 * thread than the one in which you created the AudioTrack instance. 1966 * @param listener 1967 * @param handler the Handler that will receive the event notification messages. 1968 */ setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)1969 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 1970 Handler handler) { 1971 if (listener != null) { 1972 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 1973 } else { 1974 mEventHandlerDelegate = null; 1975 } 1976 } 1977 1978 clampGainOrLevel(float gainOrLevel)1979 private static float clampGainOrLevel(float gainOrLevel) { 1980 if (Float.isNaN(gainOrLevel)) { 1981 throw new IllegalArgumentException(); 1982 } 1983 if (gainOrLevel < GAIN_MIN) { 1984 gainOrLevel = GAIN_MIN; 1985 } else if (gainOrLevel > GAIN_MAX) { 1986 gainOrLevel = GAIN_MAX; 1987 } 1988 return gainOrLevel; 1989 } 1990 1991 1992 /** 1993 * Sets the specified left and right output gain values on the AudioTrack. 1994 * <p>Gain values are clamped to the closed interval [0.0, max] where 1995 * max is the value of {@link #getMaxVolume}. 1996 * A value of 0.0 results in zero gain (silence), and 1997 * a value of 1.0 means unity gain (signal unchanged). 1998 * The default value is 1.0 meaning unity gain. 1999 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 2000 * @param leftGain output gain for the left channel. 2001 * @param rightGain output gain for the right channel 2002 * @return error code or success, see {@link #SUCCESS}, 2003 * {@link #ERROR_INVALID_OPERATION} 2004 * @deprecated Applications should use {@link #setVolume} instead, as it 2005 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 2006 */ 2007 @Deprecated setStereoVolume(float leftGain, float rightGain)2008 public int setStereoVolume(float leftGain, float rightGain) { 2009 if (mState == STATE_UNINITIALIZED) { 2010 return ERROR_INVALID_OPERATION; 2011 } 2012 2013 baseSetVolume(leftGain, rightGain); 2014 return SUCCESS; 2015 } 2016 2017 @Override playerSetVolume(boolean muting, float leftVolume, float rightVolume)2018 void playerSetVolume(boolean muting, float leftVolume, float rightVolume) { 2019 leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume); 2020 rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume); 2021 2022 native_setVolume(leftVolume, rightVolume); 2023 } 2024 2025 2026 /** 2027 * Sets the specified output gain value on all channels of this track. 2028 * <p>Gain values are clamped to the closed interval [0.0, max] where 2029 * max is the value of {@link #getMaxVolume}. 2030 * A value of 0.0 results in zero gain (silence), and 2031 * a value of 1.0 means unity gain (signal unchanged). 2032 * The default value is 1.0 meaning unity gain. 2033 * <p>This API is preferred over {@link #setStereoVolume}, as it 2034 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 2035 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 2036 * @param gain output gain for all channels. 2037 * @return error code or success, see {@link #SUCCESS}, 2038 * {@link #ERROR_INVALID_OPERATION} 2039 */ setVolume(float gain)2040 public int setVolume(float gain) { 2041 return setStereoVolume(gain, gain); 2042 } 2043 2044 @Override playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2045 /* package */ int playerApplyVolumeShaper( 2046 @NonNull VolumeShaper.Configuration configuration, 2047 @NonNull VolumeShaper.Operation operation) { 2048 return native_applyVolumeShaper(configuration, operation); 2049 } 2050 2051 @Override playerGetVolumeShaperState(int id)2052 /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) { 2053 return native_getVolumeShaperState(id); 2054 } 2055 2056 @Override createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2057 public @NonNull VolumeShaper createVolumeShaper( 2058 @NonNull VolumeShaper.Configuration configuration) { 2059 return new VolumeShaper(configuration, this); 2060 } 2061 2062 /** 2063 * Sets the playback sample rate for this track. This sets the sampling rate at which 2064 * the audio data will be consumed and played back 2065 * (as set by the sampleRateInHz parameter in the 2066 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 2067 * not the original sampling rate of the 2068 * content. For example, setting it to half the sample rate of the content will cause the 2069 * playback to last twice as long, but will also result in a pitch shift down by one octave. 2070 * The valid sample rate range is from 1 Hz to twice the value returned by 2071 * {@link #getNativeOutputSampleRate(int)}. 2072 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 2073 * <p> This method may also be used to repurpose an existing <code>AudioTrack</code> 2074 * for playback of content of differing sample rate, 2075 * but with identical encoding and channel mask. 2076 * @param sampleRateInHz the sample rate expressed in Hz 2077 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2078 * {@link #ERROR_INVALID_OPERATION} 2079 */ setPlaybackRate(int sampleRateInHz)2080 public int setPlaybackRate(int sampleRateInHz) { 2081 if (mState != STATE_INITIALIZED) { 2082 return ERROR_INVALID_OPERATION; 2083 } 2084 if (sampleRateInHz <= 0) { 2085 return ERROR_BAD_VALUE; 2086 } 2087 return native_set_playback_rate(sampleRateInHz); 2088 } 2089 2090 2091 /** 2092 * Sets the playback parameters. 2093 * This method returns failure if it cannot apply the playback parameters. 2094 * One possible cause is that the parameters for speed or pitch are out of range. 2095 * Another possible cause is that the <code>AudioTrack</code> is streaming 2096 * (see {@link #MODE_STREAM}) and the 2097 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 2098 * on configuration must be larger than the speed multiplied by the minimum size 2099 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 2100 * @param params see {@link PlaybackParams}. In particular, 2101 * speed, pitch, and audio mode should be set. 2102 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 2103 * @throws IllegalStateException if track is not initialized. 2104 */ setPlaybackParams(@onNull PlaybackParams params)2105 public void setPlaybackParams(@NonNull PlaybackParams params) { 2106 if (params == null) { 2107 throw new IllegalArgumentException("params is null"); 2108 } 2109 native_set_playback_params(params); 2110 } 2111 2112 2113 /** 2114 * Sets the position of the notification marker. At most one marker can be active. 2115 * @param markerInFrames marker position in wrapping frame units similar to 2116 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 2117 * To set a marker at a position which would appear as zero due to wraparound, 2118 * a workaround is to use a non-zero position near zero, such as -1 or 1. 2119 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2120 * {@link #ERROR_INVALID_OPERATION} 2121 */ setNotificationMarkerPosition(int markerInFrames)2122 public int setNotificationMarkerPosition(int markerInFrames) { 2123 if (mState == STATE_UNINITIALIZED) { 2124 return ERROR_INVALID_OPERATION; 2125 } 2126 return native_set_marker_pos(markerInFrames); 2127 } 2128 2129 2130 /** 2131 * Sets the period for the periodic notification event. 2132 * @param periodInFrames update period expressed in frames. 2133 * Zero period means no position updates. A negative period is not allowed. 2134 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 2135 */ setPositionNotificationPeriod(int periodInFrames)2136 public int setPositionNotificationPeriod(int periodInFrames) { 2137 if (mState == STATE_UNINITIALIZED) { 2138 return ERROR_INVALID_OPERATION; 2139 } 2140 return native_set_pos_update_period(periodInFrames); 2141 } 2142 2143 2144 /** 2145 * Sets the playback head position within the static buffer. 2146 * The track must be stopped or paused for the position to be changed, 2147 * and must use the {@link #MODE_STATIC} mode. 2148 * @param positionInFrames playback head position within buffer, expressed in frames. 2149 * Zero corresponds to start of buffer. 2150 * The position must not be greater than the buffer size in frames, or negative. 2151 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 2152 * the position values have different meanings. 2153 * <br> 2154 * If looping is currently enabled and the new position is greater than or equal to the 2155 * loop end marker, the behavior varies by API level: 2156 * as of {@link android.os.Build.VERSION_CODES#M}, 2157 * the looping is first disabled and then the position is set. 2158 * For earlier API levels, the behavior is unspecified. 2159 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2160 * {@link #ERROR_INVALID_OPERATION} 2161 */ setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2162 public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) { 2163 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 2164 getPlayState() == PLAYSTATE_PLAYING) { 2165 return ERROR_INVALID_OPERATION; 2166 } 2167 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 2168 return ERROR_BAD_VALUE; 2169 } 2170 return native_set_position(positionInFrames); 2171 } 2172 2173 /** 2174 * Sets the loop points and the loop count. The loop can be infinite. 2175 * Similarly to setPlaybackHeadPosition, 2176 * the track must be stopped or paused for the loop points to be changed, 2177 * and must use the {@link #MODE_STATIC} mode. 2178 * @param startInFrames loop start marker expressed in frames. 2179 * Zero corresponds to start of buffer. 2180 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 2181 * @param endInFrames loop end marker expressed in frames. 2182 * The total buffer size in frames corresponds to end of buffer. 2183 * The end marker must not be greater than the buffer size in frames. 2184 * For looping, the end marker must not be less than or equal to the start marker, 2185 * but to disable looping 2186 * it is permitted for start marker, end marker, and loop count to all be 0. 2187 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 2188 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 2189 * support, 2190 * {@link #ERROR_BAD_VALUE} is returned. 2191 * The loop range is the interval [startInFrames, endInFrames). 2192 * <br> 2193 * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged, 2194 * unless it is greater than or equal to the loop end marker, in which case 2195 * it is forced to the loop start marker. 2196 * For earlier API levels, the effect on position is unspecified. 2197 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 2198 * A value of -1 means infinite looping, and 0 disables looping. 2199 * A value of positive N means to "loop" (go back) N times. For example, 2200 * a value of one means to play the region two times in total. 2201 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2202 * {@link #ERROR_INVALID_OPERATION} 2203 */ setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2204 public int setLoopPoints(@IntRange (from = 0) int startInFrames, 2205 @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) { 2206 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 2207 getPlayState() == PLAYSTATE_PLAYING) { 2208 return ERROR_INVALID_OPERATION; 2209 } 2210 if (loopCount == 0) { 2211 ; // explicitly allowed as an exception to the loop region range check 2212 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 2213 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 2214 return ERROR_BAD_VALUE; 2215 } 2216 return native_set_loop(startInFrames, endInFrames, loopCount); 2217 } 2218 2219 /** 2220 * Sets the audio presentation. 2221 * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned. 2222 * If a multi-stream decoder (MSD) is not present, or the format does not support 2223 * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned. 2224 * {@link #ERROR} is returned in case of any other error. 2225 * @param presentation see {@link AudioPresentation}. In particular, id should be set. 2226 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR}, 2227 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 2228 * @throws IllegalArgumentException if the audio presentation is null. 2229 * @throws IllegalStateException if track is not initialized. 2230 */ setPresentation(@onNull AudioPresentation presentation)2231 public int setPresentation(@NonNull AudioPresentation presentation) { 2232 if (presentation == null) { 2233 throw new IllegalArgumentException("audio presentation is null"); 2234 } 2235 return native_setPresentation(presentation.getPresentationId(), 2236 presentation.getProgramId()); 2237 } 2238 2239 /** 2240 * Sets the initialization state of the instance. This method was originally intended to be used 2241 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 2242 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 2243 * @param state the state of the AudioTrack instance 2244 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 2245 */ 2246 @Deprecated setState(int state)2247 protected void setState(int state) { 2248 mState = state; 2249 } 2250 2251 2252 //--------------------------------------------------------- 2253 // Transport control methods 2254 //-------------------- 2255 /** 2256 * Starts playing an AudioTrack. 2257 * <p> 2258 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 2259 * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)}, 2260 * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)}, 2261 * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to 2262 * play(). 2263 * <p> 2264 * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to 2265 * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor). 2266 * If you don't call write() first, or if you call write() but with an insufficient amount of 2267 * data, then the track will be in underrun state at play(). In this case, 2268 * playback will not actually start playing until the data path is filled to a 2269 * device-specific minimum level. This requirement for the path to be filled 2270 * to a minimum level is also true when resuming audio playback after calling stop(). 2271 * Similarly the buffer will need to be filled up again after 2272 * the track underruns due to failure to call write() in a timely manner with sufficient data. 2273 * For portability, an application should prime the data path to the maximum allowed 2274 * by writing data until the write() method returns a short transfer count. 2275 * This allows play() to start immediately, and reduces the chance of underrun. 2276 * 2277 * @throws IllegalStateException if the track isn't properly initialized 2278 */ play()2279 public void play() 2280 throws IllegalStateException { 2281 if (mState != STATE_INITIALIZED) { 2282 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 2283 } 2284 //FIXME use lambda to pass startImpl to superclass 2285 final int delay = getStartDelayMs(); 2286 if (delay == 0) { 2287 startImpl(); 2288 } else { 2289 new Thread() { 2290 public void run() { 2291 try { 2292 Thread.sleep(delay); 2293 } catch (InterruptedException e) { 2294 e.printStackTrace(); 2295 } 2296 baseSetStartDelayMs(0); 2297 try { 2298 startImpl(); 2299 } catch (IllegalStateException e) { 2300 // fail silently for a state exception when it is happening after 2301 // a delayed start, as the player state could have changed between the 2302 // call to start() and the execution of startImpl() 2303 } 2304 } 2305 }.start(); 2306 } 2307 } 2308 startImpl()2309 private void startImpl() { 2310 synchronized(mPlayStateLock) { 2311 baseStart(); 2312 native_start(); 2313 if (mPlayState == PLAYSTATE_PAUSED_STOPPING) { 2314 mPlayState = PLAYSTATE_STOPPING; 2315 } else { 2316 mPlayState = PLAYSTATE_PLAYING; 2317 mOffloadEosPending = false; 2318 } 2319 } 2320 } 2321 2322 /** 2323 * Stops playing the audio data. 2324 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 2325 * after the last buffer that was written has been played. For an immediate stop, use 2326 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 2327 * back yet. 2328 * @throws IllegalStateException 2329 */ stop()2330 public void stop() 2331 throws IllegalStateException { 2332 if (mState != STATE_INITIALIZED) { 2333 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 2334 } 2335 2336 // stop playing 2337 synchronized(mPlayStateLock) { 2338 native_stop(); 2339 baseStop(); 2340 if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) { 2341 mPlayState = PLAYSTATE_STOPPING; 2342 } else { 2343 mPlayState = PLAYSTATE_STOPPED; 2344 mOffloadEosPending = false; 2345 mAvSyncHeader = null; 2346 mAvSyncBytesRemaining = 0; 2347 mPlayStateLock.notify(); 2348 } 2349 } 2350 } 2351 2352 /** 2353 * Pauses the playback of the audio data. Data that has not been played 2354 * back will not be discarded. Subsequent calls to {@link #play} will play 2355 * this data back. See {@link #flush()} to discard this data. 2356 * 2357 * @throws IllegalStateException 2358 */ pause()2359 public void pause() 2360 throws IllegalStateException { 2361 if (mState != STATE_INITIALIZED) { 2362 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 2363 } 2364 2365 // pause playback 2366 synchronized(mPlayStateLock) { 2367 native_pause(); 2368 basePause(); 2369 if (mPlayState == PLAYSTATE_STOPPING) { 2370 mPlayState = PLAYSTATE_PAUSED_STOPPING; 2371 } else { 2372 mPlayState = PLAYSTATE_PAUSED; 2373 } 2374 } 2375 } 2376 2377 2378 //--------------------------------------------------------- 2379 // Audio data supply 2380 //-------------------- 2381 2382 /** 2383 * Flushes the audio data currently queued for playback. Any data that has 2384 * been written but not yet presented will be discarded. No-op if not stopped or paused, 2385 * or if the track's creation mode is not {@link #MODE_STREAM}. 2386 * <BR> Note that although data written but not yet presented is discarded, there is no 2387 * guarantee that all of the buffer space formerly used by that data 2388 * is available for a subsequent write. 2389 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 2390 * less than or equal to the total buffer size 2391 * may return a short actual transfer count. 2392 */ flush()2393 public void flush() { 2394 if (mState == STATE_INITIALIZED) { 2395 // flush the data in native layer 2396 native_flush(); 2397 mAvSyncHeader = null; 2398 mAvSyncBytesRemaining = 0; 2399 } 2400 2401 } 2402 2403 /** 2404 * Writes the audio data to the audio sink for playback (streaming mode), 2405 * or copies audio data for later playback (static buffer mode). 2406 * The format specified in the AudioTrack constructor should be 2407 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 2408 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 2409 * <p> 2410 * In streaming mode, the write will normally block until all the data has been enqueued for 2411 * playback, and will return a full transfer count. However, if the track is stopped or paused 2412 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 2413 * occurs during the write, then the write may return a short transfer count. 2414 * <p> 2415 * In static buffer mode, copies the data to the buffer starting at offset 0. 2416 * Note that the actual playback of this data might occur after this function returns. 2417 * 2418 * @param audioData the array that holds the data to play. 2419 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 2420 * starts. 2421 * Must not be negative, or cause the data access to go out of bounds of the array. 2422 * @param sizeInBytes the number of bytes to write in audioData after the offset. 2423 * Must not be negative, or cause the data access to go out of bounds of the array. 2424 * @return zero or the positive number of bytes that were written, or one of the following 2425 * error codes. The number of bytes will be a multiple of the frame size in bytes 2426 * not to exceed sizeInBytes. 2427 * <ul> 2428 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2429 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2430 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2431 * needs to be recreated. The dead object error code is not returned if some data was 2432 * successfully transferred. In this case, the error is returned at the next write()</li> 2433 * <li>{@link #ERROR} in case of other error</li> 2434 * </ul> 2435 * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code> 2436 * set to {@link #WRITE_BLOCKING}. 2437 */ write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)2438 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 2439 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 2440 } 2441 2442 /** 2443 * Writes the audio data to the audio sink for playback (streaming mode), 2444 * or copies audio data for later playback (static buffer mode). 2445 * The format specified in the AudioTrack constructor should be 2446 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 2447 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 2448 * <p> 2449 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2450 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2451 * for playback, and will return a full transfer count. However, if the write mode is 2452 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2453 * interrupts the write by calling stop or pause, or an I/O error 2454 * occurs during the write, then the write may return a short transfer count. 2455 * <p> 2456 * In static buffer mode, copies the data to the buffer starting at offset 0, 2457 * and the write mode is ignored. 2458 * Note that the actual playback of this data might occur after this function returns. 2459 * 2460 * @param audioData the array that holds the data to play. 2461 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 2462 * starts. 2463 * Must not be negative, or cause the data access to go out of bounds of the array. 2464 * @param sizeInBytes the number of bytes to write in audioData after the offset. 2465 * Must not be negative, or cause the data access to go out of bounds of the array. 2466 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2467 * effect in static mode. 2468 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2469 * to the audio sink. 2470 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2471 * queuing as much audio data for playback as possible without blocking. 2472 * @return zero or the positive number of bytes that were written, or one of the following 2473 * error codes. The number of bytes will be a multiple of the frame size in bytes 2474 * not to exceed sizeInBytes. 2475 * <ul> 2476 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2477 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2478 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2479 * needs to be recreated. The dead object error code is not returned if some data was 2480 * successfully transferred. In this case, the error is returned at the next write()</li> 2481 * <li>{@link #ERROR} in case of other error</li> 2482 * </ul> 2483 */ write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)2484 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 2485 @WriteMode int writeMode) { 2486 2487 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 2488 return ERROR_INVALID_OPERATION; 2489 } 2490 2491 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2492 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2493 return ERROR_BAD_VALUE; 2494 } 2495 2496 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 2497 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 2498 || (offsetInBytes + sizeInBytes > audioData.length)) { 2499 return ERROR_BAD_VALUE; 2500 } 2501 2502 if (!blockUntilOffloadDrain(writeMode)) { 2503 return 0; 2504 } 2505 2506 final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 2507 writeMode == WRITE_BLOCKING); 2508 2509 if ((mDataLoadMode == MODE_STATIC) 2510 && (mState == STATE_NO_STATIC_DATA) 2511 && (ret > 0)) { 2512 // benign race with respect to other APIs that read mState 2513 mState = STATE_INITIALIZED; 2514 } 2515 2516 return ret; 2517 } 2518 2519 /** 2520 * Writes the audio data to the audio sink for playback (streaming mode), 2521 * or copies audio data for later playback (static buffer mode). 2522 * The format specified in the AudioTrack constructor should be 2523 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 2524 * <p> 2525 * In streaming mode, the write will normally block until all the data has been enqueued for 2526 * playback, and will return a full transfer count. However, if the track is stopped or paused 2527 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 2528 * occurs during the write, then the write may return a short transfer count. 2529 * <p> 2530 * In static buffer mode, copies the data to the buffer starting at offset 0. 2531 * Note that the actual playback of this data might occur after this function returns. 2532 * 2533 * @param audioData the array that holds the data to play. 2534 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 2535 * starts. 2536 * Must not be negative, or cause the data access to go out of bounds of the array. 2537 * @param sizeInShorts the number of shorts to read in audioData after the offset. 2538 * Must not be negative, or cause the data access to go out of bounds of the array. 2539 * @return zero or the positive number of shorts that were written, or one of the following 2540 * error codes. The number of shorts will be a multiple of the channel count not to 2541 * exceed sizeInShorts. 2542 * <ul> 2543 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2544 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2545 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2546 * needs to be recreated. The dead object error code is not returned if some data was 2547 * successfully transferred. In this case, the error is returned at the next write()</li> 2548 * <li>{@link #ERROR} in case of other error</li> 2549 * </ul> 2550 * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code> 2551 * set to {@link #WRITE_BLOCKING}. 2552 */ write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)2553 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 2554 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 2555 } 2556 2557 /** 2558 * Writes the audio data to the audio sink for playback (streaming mode), 2559 * or copies audio data for later playback (static buffer mode). 2560 * The format specified in the AudioTrack constructor should be 2561 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 2562 * <p> 2563 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2564 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2565 * for playback, and will return a full transfer count. However, if the write mode is 2566 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2567 * interrupts the write by calling stop or pause, or an I/O error 2568 * occurs during the write, then the write may return a short transfer count. 2569 * <p> 2570 * In static buffer mode, copies the data to the buffer starting at offset 0. 2571 * Note that the actual playback of this data might occur after this function returns. 2572 * 2573 * @param audioData the array that holds the data to write. 2574 * @param offsetInShorts the offset expressed in shorts in audioData where the data to write 2575 * starts. 2576 * Must not be negative, or cause the data access to go out of bounds of the array. 2577 * @param sizeInShorts the number of shorts to read in audioData after the offset. 2578 * Must not be negative, or cause the data access to go out of bounds of the array. 2579 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2580 * effect in static mode. 2581 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2582 * to the audio sink. 2583 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2584 * queuing as much audio data for playback as possible without blocking. 2585 * @return zero or the positive number of shorts that were written, or one of the following 2586 * error codes. The number of shorts will be a multiple of the channel count not to 2587 * exceed sizeInShorts. 2588 * <ul> 2589 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2590 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2591 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2592 * needs to be recreated. The dead object error code is not returned if some data was 2593 * successfully transferred. In this case, the error is returned at the next write()</li> 2594 * <li>{@link #ERROR} in case of other error</li> 2595 * </ul> 2596 */ write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)2597 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 2598 @WriteMode int writeMode) { 2599 2600 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 2601 return ERROR_INVALID_OPERATION; 2602 } 2603 2604 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2605 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2606 return ERROR_BAD_VALUE; 2607 } 2608 2609 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 2610 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 2611 || (offsetInShorts + sizeInShorts > audioData.length)) { 2612 return ERROR_BAD_VALUE; 2613 } 2614 2615 if (!blockUntilOffloadDrain(writeMode)) { 2616 return 0; 2617 } 2618 2619 final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 2620 writeMode == WRITE_BLOCKING); 2621 2622 if ((mDataLoadMode == MODE_STATIC) 2623 && (mState == STATE_NO_STATIC_DATA) 2624 && (ret > 0)) { 2625 // benign race with respect to other APIs that read mState 2626 mState = STATE_INITIALIZED; 2627 } 2628 2629 return ret; 2630 } 2631 2632 /** 2633 * Writes the audio data to the audio sink for playback (streaming mode), 2634 * or copies audio data for later playback (static buffer mode). 2635 * The format specified in the AudioTrack constructor should be 2636 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 2637 * <p> 2638 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2639 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2640 * for playback, and will return a full transfer count. However, if the write mode is 2641 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2642 * interrupts the write by calling stop or pause, or an I/O error 2643 * occurs during the write, then the write may return a short transfer count. 2644 * <p> 2645 * In static buffer mode, copies the data to the buffer starting at offset 0, 2646 * and the write mode is ignored. 2647 * Note that the actual playback of this data might occur after this function returns. 2648 * 2649 * @param audioData the array that holds the data to write. 2650 * The implementation does not clip for sample values within the nominal range 2651 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 2652 * less than or equal to unity (1.0f), and in the absence of post-processing effects 2653 * that could add energy, such as reverb. For the convenience of applications 2654 * that compute samples using filters with non-unity gain, 2655 * sample values +3 dB beyond the nominal range are permitted. 2656 * However such values may eventually be limited or clipped, depending on various gains 2657 * and later processing in the audio path. Therefore applications are encouraged 2658 * to provide samples values within the nominal range. 2659 * @param offsetInFloats the offset, expressed as a number of floats, 2660 * in audioData where the data to write starts. 2661 * Must not be negative, or cause the data access to go out of bounds of the array. 2662 * @param sizeInFloats the number of floats to write in audioData after the offset. 2663 * Must not be negative, or cause the data access to go out of bounds of the array. 2664 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2665 * effect in static mode. 2666 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2667 * to the audio sink. 2668 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2669 * queuing as much audio data for playback as possible without blocking. 2670 * @return zero or the positive number of floats that were written, or one of the following 2671 * error codes. The number of floats will be a multiple of the channel count not to 2672 * exceed sizeInFloats. 2673 * <ul> 2674 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2675 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2676 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2677 * needs to be recreated. The dead object error code is not returned if some data was 2678 * successfully transferred. In this case, the error is returned at the next write()</li> 2679 * <li>{@link #ERROR} in case of other error</li> 2680 * </ul> 2681 */ write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)2682 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 2683 @WriteMode int writeMode) { 2684 2685 if (mState == STATE_UNINITIALIZED) { 2686 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2687 return ERROR_INVALID_OPERATION; 2688 } 2689 2690 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 2691 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 2692 return ERROR_INVALID_OPERATION; 2693 } 2694 2695 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2696 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2697 return ERROR_BAD_VALUE; 2698 } 2699 2700 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 2701 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 2702 || (offsetInFloats + sizeInFloats > audioData.length)) { 2703 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 2704 return ERROR_BAD_VALUE; 2705 } 2706 2707 if (!blockUntilOffloadDrain(writeMode)) { 2708 return 0; 2709 } 2710 2711 final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 2712 writeMode == WRITE_BLOCKING); 2713 2714 if ((mDataLoadMode == MODE_STATIC) 2715 && (mState == STATE_NO_STATIC_DATA) 2716 && (ret > 0)) { 2717 // benign race with respect to other APIs that read mState 2718 mState = STATE_INITIALIZED; 2719 } 2720 2721 return ret; 2722 } 2723 2724 2725 /** 2726 * Writes the audio data to the audio sink for playback (streaming mode), 2727 * or copies audio data for later playback (static buffer mode). 2728 * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor. 2729 * <p> 2730 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 2731 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 2732 * for playback, and will return a full transfer count. However, if the write mode is 2733 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 2734 * interrupts the write by calling stop or pause, or an I/O error 2735 * occurs during the write, then the write may return a short transfer count. 2736 * <p> 2737 * In static buffer mode, copies the data to the buffer starting at offset 0, 2738 * and the write mode is ignored. 2739 * Note that the actual playback of this data might occur after this function returns. 2740 * 2741 * @param audioData the buffer that holds the data to write, starting at the position reported 2742 * by <code>audioData.position()</code>. 2743 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2744 * have been advanced to reflect the amount of data that was successfully written to 2745 * the AudioTrack. 2746 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2747 * that the number of bytes requested be a multiple of the frame size (sample size in 2748 * bytes multiplied by the channel count). 2749 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2750 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 2751 * effect in static mode. 2752 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2753 * to the audio sink. 2754 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2755 * queuing as much audio data for playback as possible without blocking. 2756 * @return zero or the positive number of bytes that were written, or one of the following 2757 * error codes. 2758 * <ul> 2759 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2760 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2761 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2762 * needs to be recreated. The dead object error code is not returned if some data was 2763 * successfully transferred. In this case, the error is returned at the next write()</li> 2764 * <li>{@link #ERROR} in case of other error</li> 2765 * </ul> 2766 */ write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)2767 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2768 @WriteMode int writeMode) { 2769 2770 if (mState == STATE_UNINITIALIZED) { 2771 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2772 return ERROR_INVALID_OPERATION; 2773 } 2774 2775 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2776 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2777 return ERROR_BAD_VALUE; 2778 } 2779 2780 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2781 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2782 return ERROR_BAD_VALUE; 2783 } 2784 2785 if (!blockUntilOffloadDrain(writeMode)) { 2786 return 0; 2787 } 2788 2789 int ret = 0; 2790 if (audioData.isDirect()) { 2791 ret = native_write_native_bytes(audioData, 2792 audioData.position(), sizeInBytes, mAudioFormat, 2793 writeMode == WRITE_BLOCKING); 2794 } else { 2795 ret = native_write_byte(NioUtils.unsafeArray(audioData), 2796 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 2797 sizeInBytes, mAudioFormat, 2798 writeMode == WRITE_BLOCKING); 2799 } 2800 2801 if ((mDataLoadMode == MODE_STATIC) 2802 && (mState == STATE_NO_STATIC_DATA) 2803 && (ret > 0)) { 2804 // benign race with respect to other APIs that read mState 2805 mState = STATE_INITIALIZED; 2806 } 2807 2808 if (ret > 0) { 2809 audioData.position(audioData.position() + ret); 2810 } 2811 2812 return ret; 2813 } 2814 2815 /** 2816 * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track. 2817 * The blocking behavior will depend on the write mode. 2818 * @param audioData the buffer that holds the data to write, starting at the position reported 2819 * by <code>audioData.position()</code>. 2820 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 2821 * have been advanced to reflect the amount of data that was successfully written to 2822 * the AudioTrack. 2823 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 2824 * that the number of bytes requested be a multiple of the frame size (sample size in 2825 * bytes multiplied by the channel count). 2826 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 2827 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 2828 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 2829 * to the audio sink. 2830 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 2831 * queuing as much audio data for playback as possible without blocking. 2832 * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the 2833 * provided audioData. 2834 * @return zero or the positive number of bytes that were written, or one of the following 2835 * error codes. 2836 * <ul> 2837 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 2838 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 2839 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2840 * needs to be recreated. The dead object error code is not returned if some data was 2841 * successfully transferred. In this case, the error is returned at the next write()</li> 2842 * <li>{@link #ERROR} in case of other error</li> 2843 * </ul> 2844 */ write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)2845 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 2846 @WriteMode int writeMode, long timestamp) { 2847 2848 if (mState == STATE_UNINITIALIZED) { 2849 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 2850 return ERROR_INVALID_OPERATION; 2851 } 2852 2853 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 2854 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 2855 return ERROR_BAD_VALUE; 2856 } 2857 2858 if (mDataLoadMode != MODE_STREAM) { 2859 Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track"); 2860 return ERROR_INVALID_OPERATION; 2861 } 2862 2863 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 2864 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 2865 return write(audioData, sizeInBytes, writeMode); 2866 } 2867 2868 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 2869 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 2870 return ERROR_BAD_VALUE; 2871 } 2872 2873 if (!blockUntilOffloadDrain(writeMode)) { 2874 return 0; 2875 } 2876 2877 // create timestamp header if none exists 2878 if (mAvSyncHeader == null) { 2879 mAvSyncHeader = ByteBuffer.allocate(mOffset); 2880 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 2881 mAvSyncHeader.putInt(0x55550002); 2882 } 2883 2884 if (mAvSyncBytesRemaining == 0) { 2885 mAvSyncHeader.putInt(4, sizeInBytes); 2886 mAvSyncHeader.putLong(8, timestamp); 2887 mAvSyncHeader.putInt(16, mOffset); 2888 mAvSyncHeader.position(0); 2889 mAvSyncBytesRemaining = sizeInBytes; 2890 } 2891 2892 // write timestamp header if not completely written already 2893 int ret = 0; 2894 if (mAvSyncHeader.remaining() != 0) { 2895 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 2896 if (ret < 0) { 2897 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 2898 mAvSyncHeader = null; 2899 mAvSyncBytesRemaining = 0; 2900 return ret; 2901 } 2902 if (mAvSyncHeader.remaining() > 0) { 2903 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 2904 return 0; 2905 } 2906 } 2907 2908 // write audio data 2909 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 2910 ret = write(audioData, sizeToWrite, writeMode); 2911 if (ret < 0) { 2912 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 2913 mAvSyncHeader = null; 2914 mAvSyncBytesRemaining = 0; 2915 return ret; 2916 } 2917 2918 mAvSyncBytesRemaining -= ret; 2919 2920 return ret; 2921 } 2922 2923 2924 /** 2925 * Sets the playback head position within the static buffer to zero, 2926 * that is it rewinds to start of static buffer. 2927 * The track must be stopped or paused, and 2928 * the track's creation mode must be {@link #MODE_STATIC}. 2929 * <p> 2930 * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by 2931 * {@link #getPlaybackHeadPosition()} to zero. 2932 * For earlier API levels, the reset behavior is unspecified. 2933 * <p> 2934 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 2935 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 2936 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2937 * {@link #ERROR_INVALID_OPERATION} 2938 */ reloadStaticData()2939 public int reloadStaticData() { 2940 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 2941 return ERROR_INVALID_OPERATION; 2942 } 2943 return native_reload_static(); 2944 } 2945 2946 /** 2947 * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is 2948 * received if blocking write or return with 0 frames written if non blocking mode. 2949 */ blockUntilOffloadDrain(int writeMode)2950 private boolean blockUntilOffloadDrain(int writeMode) { 2951 synchronized (mPlayStateLock) { 2952 while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) { 2953 if (writeMode == WRITE_NON_BLOCKING) { 2954 return false; 2955 } 2956 try { 2957 mPlayStateLock.wait(); 2958 } catch (InterruptedException e) { 2959 } 2960 } 2961 return true; 2962 } 2963 } 2964 2965 //-------------------------------------------------------------------------- 2966 // Audio effects management 2967 //-------------------- 2968 2969 /** 2970 * Attaches an auxiliary effect to the audio track. A typical auxiliary 2971 * effect is a reverberation effect which can be applied on any sound source 2972 * that directs a certain amount of its energy to this effect. This amount 2973 * is defined by setAuxEffectSendLevel(). 2974 * {@see #setAuxEffectSendLevel(float)}. 2975 * <p>After creating an auxiliary effect (e.g. 2976 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 2977 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 2978 * this method to attach the audio track to the effect. 2979 * <p>To detach the effect from the audio track, call this method with a 2980 * null effect id. 2981 * 2982 * @param effectId system wide unique id of the effect to attach 2983 * @return error code or success, see {@link #SUCCESS}, 2984 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 2985 */ attachAuxEffect(int effectId)2986 public int attachAuxEffect(int effectId) { 2987 if (mState == STATE_UNINITIALIZED) { 2988 return ERROR_INVALID_OPERATION; 2989 } 2990 return native_attachAuxEffect(effectId); 2991 } 2992 2993 /** 2994 * Sets the send level of the audio track to the attached auxiliary effect 2995 * {@link #attachAuxEffect(int)}. Effect levels 2996 * are clamped to the closed interval [0.0, max] where 2997 * max is the value of {@link #getMaxVolume}. 2998 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 2999 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 3000 * this method must be called for the effect to be applied. 3001 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 3002 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 3003 * so an appropriate conversion from linear UI input x to level is: 3004 * x == 0 -> level = 0 3005 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 3006 * 3007 * @param level linear send level 3008 * @return error code or success, see {@link #SUCCESS}, 3009 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 3010 */ setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3011 public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) { 3012 if (mState == STATE_UNINITIALIZED) { 3013 return ERROR_INVALID_OPERATION; 3014 } 3015 return baseSetAuxEffectSendLevel(level); 3016 } 3017 3018 @Override playerSetAuxEffectSendLevel(boolean muting, float level)3019 int playerSetAuxEffectSendLevel(boolean muting, float level) { 3020 level = clampGainOrLevel(muting ? 0.0f : level); 3021 int err = native_setAuxEffectSendLevel(level); 3022 return err == 0 ? SUCCESS : ERROR; 3023 } 3024 3025 //-------------------------------------------------------------------------- 3026 // Explicit Routing 3027 //-------------------- 3028 private AudioDeviceInfo mPreferredDevice = null; 3029 3030 /** 3031 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 3032 * the output from this AudioTrack. 3033 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 3034 * If deviceInfo is null, default routing is restored. 3035 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 3036 * does not correspond to a valid audio output device. 3037 */ 3038 @Override setPreferredDevice(AudioDeviceInfo deviceInfo)3039 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 3040 // Do some validation.... 3041 if (deviceInfo != null && !deviceInfo.isSink()) { 3042 return false; 3043 } 3044 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 3045 boolean status = native_setOutputDevice(preferredDeviceId); 3046 if (status == true) { 3047 synchronized (this) { 3048 mPreferredDevice = deviceInfo; 3049 } 3050 } 3051 return status; 3052 } 3053 3054 /** 3055 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 3056 * is not guaranteed to correspond to the actual device being used for playback. 3057 */ 3058 @Override getPreferredDevice()3059 public AudioDeviceInfo getPreferredDevice() { 3060 synchronized (this) { 3061 return mPreferredDevice; 3062 } 3063 } 3064 3065 /** 3066 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 3067 * Note: The query is only valid if the AudioTrack is currently playing. If it is not, 3068 * <code>getRoutedDevice()</code> will return null. 3069 */ 3070 @Override getRoutedDevice()3071 public AudioDeviceInfo getRoutedDevice() { 3072 int deviceId = native_getRoutedDeviceId(); 3073 if (deviceId == 0) { 3074 return null; 3075 } 3076 AudioDeviceInfo[] devices = 3077 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS); 3078 for (int i = 0; i < devices.length; i++) { 3079 if (devices[i].getId() == deviceId) { 3080 return devices[i]; 3081 } 3082 } 3083 return null; 3084 } 3085 3086 /* 3087 * Call BEFORE adding a routing callback handler. 3088 */ 3089 @GuardedBy("mRoutingChangeListeners") testEnableNativeRoutingCallbacksLocked()3090 private void testEnableNativeRoutingCallbacksLocked() { 3091 if (mRoutingChangeListeners.size() == 0) { 3092 native_enableDeviceCallback(); 3093 } 3094 } 3095 3096 /* 3097 * Call AFTER removing a routing callback handler. 3098 */ 3099 @GuardedBy("mRoutingChangeListeners") testDisableNativeRoutingCallbacksLocked()3100 private void testDisableNativeRoutingCallbacksLocked() { 3101 if (mRoutingChangeListeners.size() == 0) { 3102 native_disableDeviceCallback(); 3103 } 3104 } 3105 3106 //-------------------------------------------------------------------------- 3107 // (Re)Routing Info 3108 //-------------------- 3109 /** 3110 * The list of AudioRouting.OnRoutingChangedListener interfaces added (with 3111 * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)} 3112 * by an app to receive (re)routing notifications. 3113 */ 3114 @GuardedBy("mRoutingChangeListeners") 3115 private ArrayMap<AudioRouting.OnRoutingChangedListener, 3116 NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>(); 3117 3118 /** 3119 * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing 3120 * changes on this AudioTrack. 3121 * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive 3122 * notifications of rerouting events. 3123 * @param handler Specifies the {@link Handler} object for the thread on which to execute 3124 * the callback. If <code>null</code>, the {@link Handler} associated with the main 3125 * {@link Looper} will be used. 3126 */ 3127 @Override addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3128 public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, 3129 Handler handler) { 3130 synchronized (mRoutingChangeListeners) { 3131 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 3132 testEnableNativeRoutingCallbacksLocked(); 3133 mRoutingChangeListeners.put( 3134 listener, new NativeRoutingEventHandlerDelegate(this, listener, 3135 handler != null ? handler : new Handler(mInitializationLooper))); 3136 } 3137 } 3138 } 3139 3140 /** 3141 * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added 3142 * to receive rerouting notifications. 3143 * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface 3144 * to remove. 3145 */ 3146 @Override removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3147 public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) { 3148 synchronized (mRoutingChangeListeners) { 3149 if (mRoutingChangeListeners.containsKey(listener)) { 3150 mRoutingChangeListeners.remove(listener); 3151 } 3152 testDisableNativeRoutingCallbacksLocked(); 3153 } 3154 } 3155 3156 //-------------------------------------------------------------------------- 3157 // (Re)Routing Info 3158 //-------------------- 3159 /** 3160 * Defines the interface by which applications can receive notifications of 3161 * routing changes for the associated {@link AudioTrack}. 3162 * 3163 * @deprecated users should switch to the general purpose 3164 * {@link AudioRouting.OnRoutingChangedListener} class instead. 3165 */ 3166 @Deprecated 3167 public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener { 3168 /** 3169 * Called when the routing of an AudioTrack changes from either and 3170 * explicit or policy rerouting. Use {@link #getRoutedDevice()} to 3171 * retrieve the newly routed-to device. 3172 */ onRoutingChanged(AudioTrack audioTrack)3173 public void onRoutingChanged(AudioTrack audioTrack); 3174 3175 @Override onRoutingChanged(AudioRouting router)3176 default public void onRoutingChanged(AudioRouting router) { 3177 if (router instanceof AudioTrack) { 3178 onRoutingChanged((AudioTrack) router); 3179 } 3180 } 3181 } 3182 3183 /** 3184 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 3185 * on this AudioTrack. 3186 * @param listener The {@link OnRoutingChangedListener} interface to receive notifications 3187 * of rerouting events. 3188 * @param handler Specifies the {@link Handler} object for the thread on which to execute 3189 * the callback. If <code>null</code>, the {@link Handler} associated with the main 3190 * {@link Looper} will be used. 3191 * @deprecated users should switch to the general purpose 3192 * {@link AudioRouting.OnRoutingChangedListener} class instead. 3193 */ 3194 @Deprecated addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3195 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 3196 android.os.Handler handler) { 3197 addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler); 3198 } 3199 3200 /** 3201 * Removes an {@link OnRoutingChangedListener} which has been previously added 3202 * to receive rerouting notifications. 3203 * @param listener The previously added {@link OnRoutingChangedListener} interface to remove. 3204 * @deprecated users should switch to the general purpose 3205 * {@link AudioRouting.OnRoutingChangedListener} class instead. 3206 */ 3207 @Deprecated removeOnRoutingChangedListener(OnRoutingChangedListener listener)3208 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 3209 removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener); 3210 } 3211 3212 /** 3213 * Sends device list change notification to all listeners. 3214 */ broadcastRoutingChange()3215 private void broadcastRoutingChange() { 3216 AudioManager.resetAudioPortGeneration(); 3217 synchronized (mRoutingChangeListeners) { 3218 for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) { 3219 delegate.notifyClient(); 3220 } 3221 } 3222 } 3223 3224 //--------------------------------------------------------- 3225 // Interface definitions 3226 //-------------------- 3227 /** 3228 * Interface definition for a callback to be invoked when the playback head position of 3229 * an AudioTrack has reached a notification marker or has increased by a certain period. 3230 */ 3231 public interface OnPlaybackPositionUpdateListener { 3232 /** 3233 * Called on the listener to notify it that the previously set marker has been reached 3234 * by the playback head. 3235 */ onMarkerReached(AudioTrack track)3236 void onMarkerReached(AudioTrack track); 3237 3238 /** 3239 * Called on the listener to periodically notify it that the playback head has reached 3240 * a multiple of the notification period. 3241 */ onPeriodicNotification(AudioTrack track)3242 void onPeriodicNotification(AudioTrack track); 3243 } 3244 3245 /** 3246 * Abstract class to receive event notifications about the stream playback in offloaded mode. 3247 * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register 3248 * the callback on the given {@link AudioTrack} instance. 3249 */ 3250 public abstract static class StreamEventCallback { 3251 /** 3252 * Called when an offloaded track is no longer valid and has been discarded by the system. 3253 * An example of this happening is when an offloaded track has been paused too long, and 3254 * gets invalidated by the system to prevent any other offload. 3255 * @param track the {@link AudioTrack} on which the event happened. 3256 */ onTearDown(@onNull AudioTrack track)3257 public void onTearDown(@NonNull AudioTrack track) { } 3258 /** 3259 * Called when all the buffers of an offloaded track that were queued in the audio system 3260 * (e.g. the combination of the Android audio framework and the device's audio hardware) 3261 * have been played after {@link AudioTrack#stop()} has been called. 3262 * @param track the {@link AudioTrack} on which the event happened. 3263 */ onPresentationEnded(@onNull AudioTrack track)3264 public void onPresentationEnded(@NonNull AudioTrack track) { } 3265 /** 3266 * Called when more audio data can be written without blocking on an offloaded track. 3267 * @param track the {@link AudioTrack} on which the event happened. 3268 * @param sizeInFrames the number of frames available to write without blocking. 3269 * Note that the frame size of a compressed stream is 1 byte. 3270 */ onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)3271 public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) { 3272 } 3273 } 3274 3275 /** 3276 * Registers a callback for the notification of stream events. 3277 * This callback can only be registered for instances operating in offloaded mode 3278 * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and 3279 * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for 3280 * more details). 3281 * @param executor {@link Executor} to handle the callbacks. 3282 * @param eventCallback the callback to receive the stream event notifications. 3283 */ registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)3284 public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor, 3285 @NonNull StreamEventCallback eventCallback) { 3286 if (eventCallback == null) { 3287 throw new IllegalArgumentException("Illegal null StreamEventCallback"); 3288 } 3289 if (!mOffloaded) { 3290 throw new IllegalStateException( 3291 "Cannot register StreamEventCallback on non-offloaded AudioTrack"); 3292 } 3293 if (executor == null) { 3294 throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback"); 3295 } 3296 synchronized (mStreamEventCbLock) { 3297 // check if eventCallback already in list 3298 for (StreamEventCbInfo seci : mStreamEventCbInfoList) { 3299 if (seci.mStreamEventCb == eventCallback) { 3300 throw new IllegalArgumentException( 3301 "StreamEventCallback already registered"); 3302 } 3303 } 3304 beginStreamEventHandling(); 3305 mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback)); 3306 } 3307 } 3308 3309 /** 3310 * Unregisters the callback for notification of stream events, previously registered 3311 * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}. 3312 * @param eventCallback the callback to unregister. 3313 */ unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)3314 public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) { 3315 if (eventCallback == null) { 3316 throw new IllegalArgumentException("Illegal null StreamEventCallback"); 3317 } 3318 if (!mOffloaded) { 3319 throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack"); 3320 } 3321 synchronized (mStreamEventCbLock) { 3322 StreamEventCbInfo seciToRemove = null; 3323 for (StreamEventCbInfo seci : mStreamEventCbInfoList) { 3324 if (seci.mStreamEventCb == eventCallback) { 3325 // ok to remove while iterating over list as we exit iteration 3326 mStreamEventCbInfoList.remove(seci); 3327 if (mStreamEventCbInfoList.size() == 0) { 3328 endStreamEventHandling(); 3329 } 3330 return; 3331 } 3332 } 3333 throw new IllegalArgumentException("StreamEventCallback was not registered"); 3334 } 3335 } 3336 3337 //--------------------------------------------------------- 3338 // Offload 3339 //-------------------- 3340 private static class StreamEventCbInfo { 3341 final Executor mStreamEventExec; 3342 final StreamEventCallback mStreamEventCb; 3343 StreamEventCbInfo(Executor e, StreamEventCallback cb)3344 StreamEventCbInfo(Executor e, StreamEventCallback cb) { 3345 mStreamEventExec = e; 3346 mStreamEventCb = cb; 3347 } 3348 } 3349 3350 private final Object mStreamEventCbLock = new Object(); 3351 @GuardedBy("mStreamEventCbLock") 3352 @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList = 3353 new LinkedList<StreamEventCbInfo>(); 3354 /** 3355 * Dedicated thread for handling the StreamEvent callbacks 3356 */ 3357 private @Nullable HandlerThread mStreamEventHandlerThread; 3358 private @Nullable volatile StreamEventHandler mStreamEventHandler; 3359 3360 /** 3361 * Called from native AudioTrack callback thread, filter messages if necessary 3362 * and repost event on AudioTrack message loop to prevent blocking native thread. 3363 * @param what event code received from native 3364 * @param arg optional argument for event 3365 */ handleStreamEventFromNative(int what, int arg)3366 void handleStreamEventFromNative(int what, int arg) { 3367 if (mStreamEventHandler == null) { 3368 return; 3369 } 3370 switch (what) { 3371 case NATIVE_EVENT_CAN_WRITE_MORE_DATA: 3372 // replace previous CAN_WRITE_MORE_DATA messages with the latest value 3373 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA); 3374 mStreamEventHandler.sendMessage( 3375 mStreamEventHandler.obtainMessage( 3376 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/)); 3377 break; 3378 case NATIVE_EVENT_NEW_IAUDIOTRACK: 3379 mStreamEventHandler.sendMessage( 3380 mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK)); 3381 break; 3382 case NATIVE_EVENT_STREAM_END: 3383 mStreamEventHandler.sendMessage( 3384 mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END)); 3385 break; 3386 } 3387 } 3388 3389 private class StreamEventHandler extends Handler { 3390 StreamEventHandler(Looper looper)3391 StreamEventHandler(Looper looper) { 3392 super(looper); 3393 } 3394 3395 @Override handleMessage(Message msg)3396 public void handleMessage(Message msg) { 3397 final LinkedList<StreamEventCbInfo> cbInfoList; 3398 synchronized (mStreamEventCbLock) { 3399 if (msg.what == NATIVE_EVENT_STREAM_END) { 3400 synchronized (mPlayStateLock) { 3401 if (mPlayState == PLAYSTATE_STOPPING) { 3402 if (mOffloadEosPending) { 3403 native_start(); 3404 mPlayState = PLAYSTATE_PLAYING; 3405 } else { 3406 mAvSyncHeader = null; 3407 mAvSyncBytesRemaining = 0; 3408 mPlayState = PLAYSTATE_STOPPED; 3409 } 3410 mOffloadEosPending = false; 3411 mPlayStateLock.notify(); 3412 } 3413 } 3414 } 3415 if (mStreamEventCbInfoList.size() == 0) { 3416 return; 3417 } 3418 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList); 3419 } 3420 3421 final long identity = Binder.clearCallingIdentity(); 3422 try { 3423 for (StreamEventCbInfo cbi : cbInfoList) { 3424 switch (msg.what) { 3425 case NATIVE_EVENT_CAN_WRITE_MORE_DATA: 3426 cbi.mStreamEventExec.execute(() -> 3427 cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1)); 3428 break; 3429 case NATIVE_EVENT_NEW_IAUDIOTRACK: 3430 // TODO also release track as it's not longer usable 3431 cbi.mStreamEventExec.execute(() -> 3432 cbi.mStreamEventCb.onTearDown(AudioTrack.this)); 3433 break; 3434 case NATIVE_EVENT_STREAM_END: 3435 cbi.mStreamEventExec.execute(() -> 3436 cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this)); 3437 break; 3438 } 3439 } 3440 } finally { 3441 Binder.restoreCallingIdentity(identity); 3442 } 3443 } 3444 } 3445 3446 @GuardedBy("mStreamEventCbLock") beginStreamEventHandling()3447 private void beginStreamEventHandling() { 3448 if (mStreamEventHandlerThread == null) { 3449 mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent"); 3450 mStreamEventHandlerThread.start(); 3451 final Looper looper = mStreamEventHandlerThread.getLooper(); 3452 if (looper != null) { 3453 mStreamEventHandler = new StreamEventHandler(looper); 3454 } 3455 } 3456 } 3457 3458 @GuardedBy("mStreamEventCbLock") endStreamEventHandling()3459 private void endStreamEventHandling() { 3460 if (mStreamEventHandlerThread != null) { 3461 mStreamEventHandlerThread.quit(); 3462 mStreamEventHandlerThread = null; 3463 } 3464 } 3465 3466 //--------------------------------------------------------- 3467 // Inner classes 3468 //-------------------- 3469 /** 3470 * Helper class to handle the forwarding of native events to the appropriate listener 3471 * (potentially) handled in a different thread 3472 */ 3473 private class NativePositionEventHandlerDelegate { 3474 private final Handler mHandler; 3475 NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)3476 NativePositionEventHandlerDelegate(final AudioTrack track, 3477 final OnPlaybackPositionUpdateListener listener, 3478 Handler handler) { 3479 // find the looper for our new event handler 3480 Looper looper; 3481 if (handler != null) { 3482 looper = handler.getLooper(); 3483 } else { 3484 // no given handler, use the looper the AudioTrack was created in 3485 looper = mInitializationLooper; 3486 } 3487 3488 // construct the event handler with this looper 3489 if (looper != null) { 3490 // implement the event handler delegate 3491 mHandler = new Handler(looper) { 3492 @Override 3493 public void handleMessage(Message msg) { 3494 if (track == null) { 3495 return; 3496 } 3497 switch(msg.what) { 3498 case NATIVE_EVENT_MARKER: 3499 if (listener != null) { 3500 listener.onMarkerReached(track); 3501 } 3502 break; 3503 case NATIVE_EVENT_NEW_POS: 3504 if (listener != null) { 3505 listener.onPeriodicNotification(track); 3506 } 3507 break; 3508 default: 3509 loge("Unknown native event type: " + msg.what); 3510 break; 3511 } 3512 } 3513 }; 3514 } else { 3515 mHandler = null; 3516 } 3517 } 3518 getHandler()3519 Handler getHandler() { 3520 return mHandler; 3521 } 3522 } 3523 3524 //--------------------------------------------------------- 3525 // Methods for IPlayer interface 3526 //-------------------- 3527 @Override playerStart()3528 void playerStart() { 3529 play(); 3530 } 3531 3532 @Override playerPause()3533 void playerPause() { 3534 pause(); 3535 } 3536 3537 @Override playerStop()3538 void playerStop() { 3539 stop(); 3540 } 3541 3542 //--------------------------------------------------------- 3543 // Java methods called from the native side 3544 //-------------------- 3545 @SuppressWarnings("unused") 3546 @UnsupportedAppUsage postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)3547 private static void postEventFromNative(Object audiotrack_ref, 3548 int what, int arg1, int arg2, Object obj) { 3549 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 3550 final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get(); 3551 if (track == null) { 3552 return; 3553 } 3554 3555 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 3556 track.broadcastRoutingChange(); 3557 return; 3558 } 3559 3560 if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA 3561 || what == NATIVE_EVENT_NEW_IAUDIOTRACK 3562 || what == NATIVE_EVENT_STREAM_END) { 3563 track.handleStreamEventFromNative(what, arg1); 3564 return; 3565 } 3566 3567 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 3568 if (delegate != null) { 3569 Handler handler = delegate.getHandler(); 3570 if (handler != null) { 3571 Message m = handler.obtainMessage(what, arg1, arg2, obj); 3572 handler.sendMessage(m); 3573 } 3574 } 3575 } 3576 3577 //--------------------------------------------------------- 3578 // Native methods called from the Java side 3579 //-------------------- 3580 native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)3581 private static native boolean native_is_direct_output_supported(int encoding, int sampleRate, 3582 int channelMask, int channelIndexMask, int contentType, int usage, int flags); 3583 3584 // post-condition: mStreamType is overwritten with a value 3585 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 3586 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack, boolean offload)3587 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 3588 Object /*AudioAttributes*/ attributes, 3589 int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, 3590 int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack, 3591 boolean offload); 3592 native_finalize()3593 private native final void native_finalize(); 3594 3595 /** 3596 * @hide 3597 */ 3598 @UnsupportedAppUsage native_release()3599 public native final void native_release(); 3600 native_start()3601 private native final void native_start(); 3602 native_stop()3603 private native final void native_stop(); 3604 native_pause()3605 private native final void native_pause(); 3606 native_flush()3607 private native final void native_flush(); 3608 native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)3609 private native final int native_write_byte(byte[] audioData, 3610 int offsetInBytes, int sizeInBytes, int format, 3611 boolean isBlocking); 3612 native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)3613 private native final int native_write_short(short[] audioData, 3614 int offsetInShorts, int sizeInShorts, int format, 3615 boolean isBlocking); 3616 native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)3617 private native final int native_write_float(float[] audioData, 3618 int offsetInFloats, int sizeInFloats, int format, 3619 boolean isBlocking); 3620 native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)3621 private native final int native_write_native_bytes(ByteBuffer audioData, 3622 int positionInBytes, int sizeInBytes, int format, boolean blocking); 3623 native_reload_static()3624 private native final int native_reload_static(); 3625 native_get_buffer_size_frames()3626 private native final int native_get_buffer_size_frames(); native_set_buffer_size_frames(int bufferSizeInFrames)3627 private native final int native_set_buffer_size_frames(int bufferSizeInFrames); native_get_buffer_capacity_frames()3628 private native final int native_get_buffer_capacity_frames(); 3629 native_setVolume(float leftVolume, float rightVolume)3630 private native final void native_setVolume(float leftVolume, float rightVolume); 3631 native_set_playback_rate(int sampleRateInHz)3632 private native final int native_set_playback_rate(int sampleRateInHz); native_get_playback_rate()3633 private native final int native_get_playback_rate(); 3634 native_set_playback_params(@onNull PlaybackParams params)3635 private native final void native_set_playback_params(@NonNull PlaybackParams params); native_get_playback_params()3636 private native final @NonNull PlaybackParams native_get_playback_params(); 3637 native_set_marker_pos(int marker)3638 private native final int native_set_marker_pos(int marker); native_get_marker_pos()3639 private native final int native_get_marker_pos(); 3640 native_set_pos_update_period(int updatePeriod)3641 private native final int native_set_pos_update_period(int updatePeriod); native_get_pos_update_period()3642 private native final int native_get_pos_update_period(); 3643 native_set_position(int position)3644 private native final int native_set_position(int position); native_get_position()3645 private native final int native_get_position(); 3646 native_get_latency()3647 private native final int native_get_latency(); 3648 native_get_underrun_count()3649 private native final int native_get_underrun_count(); 3650 native_get_flags()3651 private native final int native_get_flags(); 3652 3653 // longArray must be a non-null array of length >= 2 3654 // [0] is assigned the frame position 3655 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds native_get_timestamp(long[] longArray)3656 private native final int native_get_timestamp(long[] longArray); 3657 native_set_loop(int start, int end, int loopCount)3658 private native final int native_set_loop(int start, int end, int loopCount); 3659 native_get_output_sample_rate(int streamType)3660 static private native final int native_get_output_sample_rate(int streamType); native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)3661 static private native final int native_get_min_buff_size( 3662 int sampleRateInHz, int channelConfig, int audioFormat); 3663 native_attachAuxEffect(int effectId)3664 private native final int native_attachAuxEffect(int effectId); native_setAuxEffectSendLevel(float level)3665 private native final int native_setAuxEffectSendLevel(float level); 3666 native_setOutputDevice(int deviceId)3667 private native final boolean native_setOutputDevice(int deviceId); native_getRoutedDeviceId()3668 private native final int native_getRoutedDeviceId(); native_enableDeviceCallback()3669 private native final void native_enableDeviceCallback(); native_disableDeviceCallback()3670 private native final void native_disableDeviceCallback(); 3671 native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)3672 private native int native_applyVolumeShaper( 3673 @NonNull VolumeShaper.Configuration configuration, 3674 @NonNull VolumeShaper.Operation operation); 3675 native_getVolumeShaperState(int id)3676 private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id); native_setPresentation(int presentationId, int programId)3677 private native final int native_setPresentation(int presentationId, int programId); 3678 native_getPortId()3679 private native int native_getPortId(); 3680 native_set_delay_padding(int delayInFrames, int paddingInFrames)3681 private native void native_set_delay_padding(int delayInFrames, int paddingInFrames); 3682 3683 //--------------------------------------------------------- 3684 // Utility methods 3685 //------------------ 3686 logd(String msg)3687 private static void logd(String msg) { 3688 Log.d(TAG, msg); 3689 } 3690 loge(String msg)3691 private static void loge(String msg) { 3692 Log.e(TAG, msg); 3693 } 3694 3695 public final static class MetricsConstants 3696 { MetricsConstants()3697 private MetricsConstants() {} 3698 3699 // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors. 3700 private static final String MM_PREFIX = "android.media.audiotrack."; 3701 3702 /** 3703 * Key to extract the stream type for this track 3704 * from the {@link AudioTrack#getMetrics} return value. 3705 * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}. 3706 * The value is a {@code String}. 3707 */ 3708 public static final String STREAMTYPE = MM_PREFIX + "streamtype"; 3709 3710 /** 3711 * Key to extract the attribute content type for this track 3712 * from the {@link AudioTrack#getMetrics} return value. 3713 * The value is a {@code String}. 3714 */ 3715 public static final String CONTENTTYPE = MM_PREFIX + "type"; 3716 3717 /** 3718 * Key to extract the attribute usage for this track 3719 * from the {@link AudioTrack#getMetrics} return value. 3720 * The value is a {@code String}. 3721 */ 3722 public static final String USAGE = MM_PREFIX + "usage"; 3723 3724 /** 3725 * Key to extract the sample rate for this track in Hz 3726 * from the {@link AudioTrack#getMetrics} return value. 3727 * The value is an {@code int}. 3728 * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead. 3729 */ 3730 @Deprecated 3731 public static final String SAMPLERATE = "android.media.audiorecord.samplerate"; 3732 3733 /** 3734 * Key to extract the native channel mask information for this track 3735 * from the {@link AudioTrack#getMetrics} return value. 3736 * 3737 * The value is a {@code long}. 3738 * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from 3739 * the returned format instead. 3740 */ 3741 @Deprecated 3742 public static final String CHANNELMASK = "android.media.audiorecord.channelmask"; 3743 3744 /** 3745 * Use for testing only. Do not expose. 3746 * The current sample rate. 3747 * The value is an {@code int}. 3748 * @hide 3749 */ 3750 @TestApi 3751 public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate"; 3752 3753 /** 3754 * Use for testing only. Do not expose. 3755 * The native channel mask. 3756 * The value is a {@code long}. 3757 * @hide 3758 */ 3759 @TestApi 3760 public static final String CHANNEL_MASK = MM_PREFIX + "channelMask"; 3761 3762 /** 3763 * Use for testing only. Do not expose. 3764 * The output audio data encoding. 3765 * The value is a {@code String}. 3766 * @hide 3767 */ 3768 @TestApi 3769 public static final String ENCODING = MM_PREFIX + "encoding"; 3770 3771 /** 3772 * Use for testing only. Do not expose. 3773 * The port id of this track port in audioserver. 3774 * The value is an {@code int}. 3775 * @hide 3776 */ 3777 @TestApi 3778 public static final String PORT_ID = MM_PREFIX + "portId"; 3779 3780 /** 3781 * Use for testing only. Do not expose. 3782 * The buffer frameCount. 3783 * The value is an {@code int}. 3784 * @hide 3785 */ 3786 @TestApi 3787 public static final String FRAME_COUNT = MM_PREFIX + "frameCount"; 3788 3789 /** 3790 * Use for testing only. Do not expose. 3791 * The actual track attributes used. 3792 * The value is a {@code String}. 3793 * @hide 3794 */ 3795 @TestApi 3796 public static final String ATTRIBUTES = MM_PREFIX + "attributes"; 3797 } 3798 } 3799