1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.media.audio.cts; 18 19 import static org.junit.Assert.assertEquals; 20 import static org.junit.Assert.assertFalse; 21 import static org.junit.Assert.assertNotNull; 22 import static org.junit.Assert.assertTrue; 23 import static org.junit.Assert.fail; 24 import static org.testng.Assert.assertThrows; 25 26 import android.app.ActivityManager; 27 import android.content.Context; 28 import android.content.pm.PackageManager; 29 import android.media.AudioAttributes; 30 import android.media.AudioFormat; 31 import android.media.AudioManager; 32 import android.media.AudioMetadataReadMap; 33 import android.media.AudioPresentation; 34 import android.media.AudioSystem; 35 import android.media.AudioTimestamp; 36 import android.media.AudioTrack; 37 import android.media.PlaybackParams; 38 import android.media.cts.AudioHelper; 39 import android.media.metrics.LogSessionId; 40 import android.media.metrics.MediaMetricsManager; 41 import android.media.metrics.PlaybackSession; 42 import android.os.PersistableBundle; 43 import android.os.SystemClock; 44 import android.platform.test.annotations.Presubmit; 45 import android.util.Log; 46 47 import androidx.test.InstrumentationRegistry; 48 import androidx.test.filters.LargeTest; 49 import androidx.test.runner.AndroidJUnit4; 50 51 import com.android.compatibility.common.util.NonMainlineTest; 52 53 import org.junit.Test; 54 import org.junit.runner.RunWith; 55 56 import java.nio.ByteBuffer; 57 import java.nio.FloatBuffer; 58 import java.nio.ShortBuffer; 59 import java.util.concurrent.Executor; 60 61 @NonMainlineTest 62 @RunWith(AndroidJUnit4.class) 63 public class AudioTrackTest { 64 private String TAG = "AudioTrackTest"; 65 private final long WAIT_MSEC = 200; 66 private final int OFFSET_DEFAULT = 0; 67 private final int OFFSET_NEGATIVE = -10; 68 log(String testName, String message)69 private void log(String testName, String message) { 70 Log.v(TAG, "[" + testName + "] " + message); 71 } 72 loge(String testName, String message)73 private void loge(String testName, String message) { 74 Log.e(TAG, "[" + testName + "] " + message); 75 } 76 77 // ----------------------------------------------------------------- 78 // private class to hold test results 79 private static class TestResults { 80 public boolean mResult = false; 81 public String mResultLog = ""; 82 TestResults(boolean b, String s)83 public TestResults(boolean b, String s) { 84 mResult = b; 85 mResultLog = s; 86 } 87 } 88 89 // ----------------------------------------------------------------- 90 // generic test methods constructorTestMultiSampleRate( int _inTest_streamType, int _inTest_mode, int _inTest_config, int _inTest_format, int _expected_stateForMode)91 public TestResults constructorTestMultiSampleRate( 92 // parameters tested by this method 93 int _inTest_streamType, int _inTest_mode, int _inTest_config, int _inTest_format, 94 // parameter-dependent expected results 95 int _expected_stateForMode) { 96 97 int[] testSampleRates = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }; 98 String failedRates = "Failure for rate(s): "; 99 boolean localRes, finalRes = true; 100 101 for (int i = 0; i < testSampleRates.length; i++) { 102 AudioTrack track = null; 103 try { 104 track = new AudioTrack(_inTest_streamType, testSampleRates[i], _inTest_config, 105 _inTest_format, AudioTrack.getMinBufferSize(testSampleRates[i], 106 _inTest_config, _inTest_format), _inTest_mode); 107 } catch (IllegalArgumentException iae) { 108 Log.e("MediaAudioTrackTest", "[ constructorTestMultiSampleRate ] exception at SR " 109 + testSampleRates[i] + ": \n" + iae); 110 localRes = false; 111 } 112 if (track != null) { 113 localRes = (track.getState() == _expected_stateForMode); 114 track.release(); 115 } else { 116 localRes = false; 117 } 118 119 if (!localRes) { 120 // log the error for the test runner 121 failedRates += Integer.toString(testSampleRates[i]) + "Hz "; 122 // log the error for logcat 123 log("constructorTestMultiSampleRate", "failed to construct " 124 + "AudioTrack(streamType=" 125 + _inTest_streamType 126 + ", sampleRateInHz=" 127 + testSampleRates[i] 128 + ", channelConfig=" 129 + _inTest_config 130 + ", audioFormat=" 131 + _inTest_format 132 + ", bufferSizeInBytes=" 133 + AudioTrack.getMinBufferSize(testSampleRates[i], _inTest_config, 134 AudioFormat.ENCODING_PCM_16BIT) + ", mode=" + _inTest_mode); 135 // mark test as failed 136 finalRes = false; 137 } 138 } 139 return new TestResults(finalRes, failedRates); 140 } 141 142 // ----------------------------------------------------------------- 143 // AUDIOTRACK TESTS: 144 // ---------------------------------- 145 146 // ----------------------------------------------------------------- 147 // AudioTrack constructor and AudioTrack.getMinBufferSize(...) for 16bit PCM 148 // ---------------------------------- 149 150 // Test case 1: constructor for streaming AudioTrack, mono, 16bit at misc 151 // valid sample rates 152 @Test testConstructorMono16MusicStream()153 public void testConstructorMono16MusicStream() throws Exception { 154 155 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 156 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_MONO, 157 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_INITIALIZED); 158 159 assertTrue("testConstructorMono16MusicStream: " + res.mResultLog, res.mResult); 160 } 161 162 // Test case 2: constructor for streaming AudioTrack, stereo, 16bit at misc 163 // valid sample rates 164 @Test testConstructorStereo16MusicStream()165 public void testConstructorStereo16MusicStream() throws Exception { 166 167 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 168 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_STEREO, 169 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_INITIALIZED); 170 171 assertTrue("testConstructorStereo16MusicStream: " + res.mResultLog, res.mResult); 172 } 173 174 // Test case 3: constructor for static AudioTrack, mono, 16bit at misc valid 175 // sample rates 176 @Test testConstructorMono16MusicStatic()177 public void testConstructorMono16MusicStatic() throws Exception { 178 179 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 180 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_MONO, 181 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_NO_STATIC_DATA); 182 183 assertTrue("testConstructorMono16MusicStatic: " + res.mResultLog, res.mResult); 184 } 185 186 // Test case 4: constructor for static AudioTrack, stereo, 16bit at misc 187 // valid sample rates 188 @Test testConstructorStereo16MusicStatic()189 public void testConstructorStereo16MusicStatic() throws Exception { 190 191 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 192 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_STEREO, 193 AudioFormat.ENCODING_PCM_16BIT, AudioTrack.STATE_NO_STATIC_DATA); 194 195 assertTrue("testConstructorStereo16MusicStatic: " + res.mResultLog, res.mResult); 196 } 197 198 // ----------------------------------------------------------------- 199 // AudioTrack constructor and AudioTrack.getMinBufferSize(...) for 8bit PCM 200 // ---------------------------------- 201 202 // Test case 1: constructor for streaming AudioTrack, mono, 8bit at misc 203 // valid sample rates 204 @Test testConstructorMono8MusicStream()205 public void testConstructorMono8MusicStream() throws Exception { 206 207 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 208 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_MONO, 209 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_INITIALIZED); 210 211 assertTrue("testConstructorMono8MusicStream: " + res.mResultLog, res.mResult); 212 } 213 214 // Test case 2: constructor for streaming AudioTrack, stereo, 8bit at misc 215 // valid sample rates 216 @Test testConstructorStereo8MusicStream()217 public void testConstructorStereo8MusicStream() throws Exception { 218 219 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 220 AudioTrack.MODE_STREAM, AudioFormat.CHANNEL_CONFIGURATION_STEREO, 221 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_INITIALIZED); 222 223 assertTrue("testConstructorStereo8MusicStream: " + res.mResultLog, res.mResult); 224 } 225 226 // Test case 3: constructor for static AudioTrack, mono, 8bit at misc valid 227 // sample rates 228 @Test testConstructorMono8MusicStatic()229 public void testConstructorMono8MusicStatic() throws Exception { 230 231 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 232 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_MONO, 233 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_NO_STATIC_DATA); 234 235 assertTrue("testConstructorMono8MusicStatic: " + res.mResultLog, res.mResult); 236 } 237 238 // Test case 4: constructor for static AudioTrack, stereo, 8bit at misc 239 // valid sample rates 240 @Test testConstructorStereo8MusicStatic()241 public void testConstructorStereo8MusicStatic() throws Exception { 242 243 TestResults res = constructorTestMultiSampleRate(AudioManager.STREAM_MUSIC, 244 AudioTrack.MODE_STATIC, AudioFormat.CHANNEL_CONFIGURATION_STEREO, 245 AudioFormat.ENCODING_PCM_8BIT, AudioTrack.STATE_NO_STATIC_DATA); 246 247 assertTrue("testConstructorStereo8MusicStatic: " + res.mResultLog, res.mResult); 248 } 249 250 // ----------------------------------------------------------------- 251 // AudioTrack constructor for all stream types 252 // ---------------------------------- 253 254 // Test case 1: constructor for all stream types 255 @Test testConstructorStreamType()256 public void testConstructorStreamType() throws Exception { 257 // constants for test 258 final int TYPE_TEST_SR = 22050; 259 final int TYPE_TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 260 final int TYPE_TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 261 final int TYPE_TEST_MODE = AudioTrack.MODE_STREAM; 262 final int[] STREAM_TYPES = { AudioManager.STREAM_ALARM, AudioManager.STREAM_MUSIC, 263 AudioManager.STREAM_NOTIFICATION, AudioManager.STREAM_RING, 264 AudioManager.STREAM_SYSTEM, AudioManager.STREAM_VOICE_CALL }; 265 final String[] STREAM_NAMES = { "STREAM_ALARM", "STREAM_MUSIC", "STREAM_NOTIFICATION", 266 "STREAM_RING", "STREAM_SYSTEM", "STREAM_VOICE_CALL" }; 267 268 boolean localTestRes = true; 269 AudioTrack track = null; 270 // test: loop constructor on all stream types 271 for (int i = 0; i < STREAM_TYPES.length; i++) { 272 try { 273 // -------- initialization -------------- 274 track = new AudioTrack(STREAM_TYPES[i], TYPE_TEST_SR, TYPE_TEST_CONF, 275 TYPE_TEST_FORMAT, AudioTrack.getMinBufferSize(TYPE_TEST_SR, TYPE_TEST_CONF, 276 TYPE_TEST_FORMAT), TYPE_TEST_MODE); 277 } catch (IllegalArgumentException iae) { 278 loge("testConstructorStreamType", "exception for stream type " + STREAM_NAMES[i] 279 + ": " + iae); 280 localTestRes = false; 281 } 282 // -------- test -------------- 283 if (track != null) { 284 if (track.getState() != AudioTrack.STATE_INITIALIZED) { 285 localTestRes = false; 286 Log.e("MediaAudioTrackTest", 287 "[ testConstructorStreamType ] failed for stream type " 288 + STREAM_NAMES[i]); 289 } 290 // -------- tear down -------------- 291 track.release(); 292 } else { 293 localTestRes = false; 294 } 295 } 296 297 assertTrue("testConstructorStreamType", localTestRes); 298 } 299 300 // ----------------------------------------------------------------- 301 // AudioTrack construction with Builder 302 // ---------------------------------- 303 304 // Test case 1: build AudioTrack with default parameters, test documented default params 305 @Test testBuilderDefault()306 public void testBuilderDefault() throws Exception { 307 // constants for test 308 final String TEST_NAME = "testBuilderDefault"; 309 final int expectedDefaultEncoding = AudioFormat.ENCODING_PCM_16BIT; 310 final int expectedDefaultRate = 311 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC); 312 final int expectedDefaultChannels = AudioFormat.CHANNEL_OUT_STEREO; 313 // use Builder 314 final int buffSizeInBytes = AudioTrack.getMinBufferSize( 315 expectedDefaultRate, expectedDefaultChannels, expectedDefaultEncoding); 316 final AudioTrack track = new AudioTrack.Builder() 317 .setBufferSizeInBytes(buffSizeInBytes) 318 .build(); 319 // save results 320 final int observedState = track.getState(); 321 final int observedFormat = track.getAudioFormat(); 322 final int observedChannelConf = track.getChannelConfiguration(); 323 final int observedRate = track.getSampleRate(); 324 // release track before the test exits (either successfully or with an exception) 325 track.release(); 326 // compare results 327 assertEquals(TEST_NAME + ": Track initialized", AudioTrack.STATE_INITIALIZED, 328 observedState); 329 assertEquals(TEST_NAME + ": Default track encoding", expectedDefaultEncoding, 330 observedFormat); 331 assertEquals(TEST_NAME + ": Default track channels", expectedDefaultChannels, 332 observedChannelConf); 333 } 334 335 // Test case 2: build AudioTrack with AudioFormat, test it's used 336 @Test testBuilderFormat()337 public void testBuilderFormat() throws Exception { 338 // constants for test 339 final String TEST_NAME = "testBuilderFormat"; 340 final int TEST_RATE = 32000; 341 final int TEST_CHANNELS = AudioFormat.CHANNEL_OUT_STEREO; 342 // use Builder 343 final int buffSizeInBytes = AudioTrack.getMinBufferSize( 344 TEST_RATE, TEST_CHANNELS, AudioFormat.ENCODING_PCM_16BIT); 345 final AudioTrack track = new AudioTrack.Builder() 346 .setAudioAttributes(new AudioAttributes.Builder().build()) 347 .setBufferSizeInBytes(buffSizeInBytes) 348 .setAudioFormat(new AudioFormat.Builder() 349 .setChannelMask(TEST_CHANNELS).setSampleRate(TEST_RATE).build()) 350 .build(); 351 // save results 352 final int observedState = track.getState(); 353 final int observedChannelConf = track.getChannelConfiguration(); 354 final int observedRate = track.getSampleRate(); 355 // release track before the test exits (either successfully or with an exception) 356 track.release(); 357 // compare results 358 assertEquals(TEST_NAME + ": Track initialized", AudioTrack.STATE_INITIALIZED, 359 observedState); 360 assertEquals(TEST_NAME + ": Track channels", TEST_CHANNELS, observedChannelConf); 361 assertEquals(TEST_NAME + ": Track sample rate", TEST_RATE, observedRate); 362 } 363 364 // Test case 3: build AudioTrack with session ID, test it's used 365 @Test testBuilderSession()366 public void testBuilderSession() throws Exception { 367 // constants for test 368 final String TEST_NAME = "testBuilderSession"; 369 // generate a session ID 370 final int expectedSessionId = new AudioManager(getContext()).generateAudioSessionId(); 371 // use builder 372 final AudioTrack track = new AudioTrack.Builder() 373 .setSessionId(expectedSessionId) 374 .build(); 375 // save results 376 final int observedSessionId = track.getAudioSessionId(); 377 // release track before the test exits (either successfully or with an exception) 378 track.release(); 379 // compare results 380 assertEquals(TEST_NAME + ": Assigned track session ID", expectedSessionId, 381 observedSessionId); 382 } 383 384 // Test case 4: build AudioTrack with AudioAttributes built from stream type, test it's used 385 @Test testBuilderAttributesStream()386 public void testBuilderAttributesStream() throws Exception { 387 // constants for test 388 final String TEST_NAME = "testBuilderAttributesStream"; 389 // use a stream type documented in AudioAttributes.Builder.setLegacyStreamType(int) 390 final int expectedStreamType = AudioManager.STREAM_ALARM; 391 final int expectedContentType = AudioAttributes.CONTENT_TYPE_SPEECH; 392 final AudioAttributes attributes = new AudioAttributes.Builder() 393 .setLegacyStreamType(expectedStreamType) 394 .setContentType(expectedContentType) 395 .build(); 396 // use builder 397 final AudioTrack track = new AudioTrack.Builder() 398 .setAudioAttributes(attributes) 399 .build(); 400 // save results 401 final int observedStreamType = track.getStreamType(); 402 final AudioAttributes observedAttributes = track.getAudioAttributes(); 403 404 // release track before the test exits (either successfully or with an exception) 405 track.release(); 406 // compare results 407 assertEquals(TEST_NAME + ": track stream type", expectedStreamType, observedStreamType); 408 // attributes and observedAttributes should satisfy the overloaded equals. 409 assertEquals(TEST_NAME + ": observed attributes must match", 410 attributes, observedAttributes); 411 // also test content type was preserved in the attributes even though they 412 // were first configured with a legacy stream type 413 assertEquals(TEST_NAME + ": attributes content type", expectedContentType, 414 attributes.getContentType()); 415 } 416 417 // Test case 5: build AudioTrack with attributes and performance mode 418 @Test testBuilderAttributesPerformanceMode()419 public void testBuilderAttributesPerformanceMode() throws Exception { 420 // constants for test 421 final String TEST_NAME = "testBuilderAttributesPerformanceMode"; 422 final int testPerformanceModes[] = new int[] { 423 AudioTrack.PERFORMANCE_MODE_NONE, 424 AudioTrack.PERFORMANCE_MODE_LOW_LATENCY, 425 AudioTrack.PERFORMANCE_MODE_POWER_SAVING, 426 }; 427 // construct various attributes with different preset performance modes. 428 final AudioAttributes testAttributes[] = new AudioAttributes[] { 429 new AudioAttributes.Builder().build(), 430 new AudioAttributes.Builder().setFlags(AudioAttributes.FLAG_LOW_LATENCY).build(), 431 new AudioAttributes.Builder().setFlags(AudioAttributes.FLAG_DEEP_BUFFER).build(), 432 }; 433 for (int performanceMode : testPerformanceModes) { 434 for (AudioAttributes attributes : testAttributes) { 435 final AudioTrack track = new AudioTrack.Builder() 436 .setPerformanceMode(performanceMode) 437 .setAudioAttributes(attributes) 438 .build(); 439 // save results 440 final int actualPerformanceMode = track.getPerformanceMode(); 441 // release track before the test exits 442 track.release(); 443 final String result = "Attribute flags: " + attributes.getAllFlags() 444 + " set performance mode: " + performanceMode 445 + " actual performance mode: " + actualPerformanceMode; 446 Log.d(TEST_NAME, result); 447 assertTrue(TEST_NAME + ": " + result, 448 actualPerformanceMode == performanceMode // either successful 449 || actualPerformanceMode == AudioTrack.PERFORMANCE_MODE_NONE // or none 450 || performanceMode == AudioTrack.PERFORMANCE_MODE_NONE); 451 } 452 } 453 } 454 455 // Test case 6: build AudioTrack with Context and otherwise default arguments, expect success. 456 @Test testBuilderWithContext()457 public void testBuilderWithContext() { 458 final int expectedDefaultEncoding = AudioFormat.ENCODING_PCM_16BIT; 459 final int expectedDefaultRate = 460 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC); 461 final int expectedDefaultChannels = AudioFormat.CHANNEL_OUT_STEREO; 462 463 final AudioTrack track = new AudioTrack.Builder() 464 .setContext(getContext()) 465 .build(); 466 467 assertEquals(AudioTrack.STATE_INITIALIZED, track.getState()); 468 assertEquals(expectedDefaultEncoding, track.getAudioFormat()); 469 assertEquals(expectedDefaultRate, track.getSampleRate()); 470 assertEquals(expectedDefaultChannels, track.getChannelConfiguration()); 471 } 472 473 // Test case 7: build AudioTrack with Context and otherwise default arguments, expect success. 474 @Test testBuilderWithNullContext()475 public void testBuilderWithNullContext() { 476 assertThrows(NullPointerException.class, () -> new AudioTrack.Builder() 477 .setContext(/*context=*/null) 478 .build()); 479 } 480 481 // ----------------------------------------------------------------- 482 // Playback head position 483 // ---------------------------------- 484 485 // Test case 1: getPlaybackHeadPosition() at 0 after initialization 486 @Test testPlaybackHeadPositionAfterInit()487 public void testPlaybackHeadPositionAfterInit() throws Exception { 488 // constants for test 489 final String TEST_NAME = "testPlaybackHeadPositionAfterInit"; 490 final int TEST_SR = 22050; 491 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 492 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 493 final int TEST_MODE = AudioTrack.MODE_STREAM; 494 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 495 496 // -------- initialization -------------- 497 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 498 AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT), TEST_MODE); 499 // -------- test -------------- 500 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 501 assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0); 502 // -------- tear down -------------- 503 track.release(); 504 } 505 506 // Test case 2: getPlaybackHeadPosition() increases after play() 507 @Test testPlaybackHeadPositionIncrease()508 public void testPlaybackHeadPositionIncrease() throws Exception { 509 // constants for test 510 final String TEST_NAME = "testPlaybackHeadPositionIncrease"; 511 final int TEST_SR = 22050; 512 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 513 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 514 final int TEST_MODE = AudioTrack.MODE_STREAM; 515 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 516 517 // -------- initialization -------------- 518 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 519 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 520 2 * minBuffSize, TEST_MODE); 521 byte data[] = new byte[minBuffSize]; 522 // -------- test -------------- 523 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 524 track.write(data, OFFSET_DEFAULT, data.length); 525 track.write(data, OFFSET_DEFAULT, data.length); 526 track.play(); 527 Thread.sleep(100); 528 log(TEST_NAME, "position =" + track.getPlaybackHeadPosition()); 529 assertTrue(TEST_NAME, track.getPlaybackHeadPosition() > 0); 530 // -------- tear down -------------- 531 track.release(); 532 } 533 534 // Test case 3: getPlaybackHeadPosition() is 0 after flush(); 535 @Test testPlaybackHeadPositionAfterFlush()536 public void testPlaybackHeadPositionAfterFlush() throws Exception { 537 // constants for test 538 final String TEST_NAME = "testPlaybackHeadPositionAfterFlush"; 539 final int TEST_SR = 22050; 540 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 541 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 542 final int TEST_MODE = AudioTrack.MODE_STREAM; 543 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 544 545 // -------- initialization -------------- 546 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 547 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 548 2 * minBuffSize, TEST_MODE); 549 byte data[] = new byte[minBuffSize]; 550 // -------- test -------------- 551 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 552 track.write(data, OFFSET_DEFAULT, data.length); 553 track.write(data, OFFSET_DEFAULT, data.length); 554 track.play(); 555 Thread.sleep(WAIT_MSEC); 556 track.stop(); 557 track.flush(); 558 log(TEST_NAME, "position =" + track.getPlaybackHeadPosition()); 559 assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0); 560 // -------- tear down -------------- 561 track.release(); 562 } 563 564 // Test case 3: getPlaybackHeadPosition() is 0 after stop(); 565 @Test testPlaybackHeadPositionAfterStop()566 public void testPlaybackHeadPositionAfterStop() throws Exception { 567 // constants for test 568 final String TEST_NAME = "testPlaybackHeadPositionAfterStop"; 569 final int TEST_SR = 22050; 570 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 571 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 572 final int TEST_MODE = AudioTrack.MODE_STREAM; 573 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 574 final int TEST_LOOP_CNT = 10; 575 576 // -------- initialization -------------- 577 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 578 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 579 2 * minBuffSize, TEST_MODE); 580 byte data[] = new byte[minBuffSize]; 581 // -------- test -------------- 582 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 583 track.write(data, OFFSET_DEFAULT, data.length); 584 track.write(data, OFFSET_DEFAULT, data.length); 585 track.play(); 586 Thread.sleep(WAIT_MSEC); 587 track.stop(); 588 int count = 0; 589 int pos; 590 do { 591 Thread.sleep(WAIT_MSEC); 592 pos = track.getPlaybackHeadPosition(); 593 count++; 594 } while((pos != 0) && (count < TEST_LOOP_CNT)); 595 log(TEST_NAME, "position =" + pos + ", read count ="+count); 596 assertTrue(TEST_NAME, pos == 0); 597 // -------- tear down -------------- 598 track.release(); 599 } 600 601 // Test case 4: getPlaybackHeadPosition() is > 0 after play(); pause(); 602 @Test testPlaybackHeadPositionAfterPause()603 public void testPlaybackHeadPositionAfterPause() throws Exception { 604 // constants for test 605 final String TEST_NAME = "testPlaybackHeadPositionAfterPause"; 606 final int TEST_SR = 22050; 607 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 608 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 609 final int TEST_MODE = AudioTrack.MODE_STREAM; 610 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 611 612 // -------- initialization -------------- 613 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 614 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 615 2 * minBuffSize, TEST_MODE); 616 byte data[] = new byte[minBuffSize]; 617 // -------- test -------------- 618 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 619 track.write(data, OFFSET_DEFAULT, data.length); 620 track.write(data, OFFSET_DEFAULT, data.length); 621 track.play(); 622 Thread.sleep(100); 623 track.pause(); 624 int pos = track.getPlaybackHeadPosition(); 625 log(TEST_NAME, "position =" + pos); 626 assertTrue(TEST_NAME, pos > 0); 627 // -------- tear down -------------- 628 track.release(); 629 } 630 631 // Test case 5: getPlaybackHeadPosition() remains 0 after pause(); flush(); play(); 632 @Test testPlaybackHeadPositionAfterFlushAndPlay()633 public void testPlaybackHeadPositionAfterFlushAndPlay() throws Exception { 634 // constants for test 635 final String TEST_NAME = "testPlaybackHeadPositionAfterFlushAndPlay"; 636 final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO; 637 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 638 final int TEST_MODE = AudioTrack.MODE_STREAM; 639 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 640 final int TEST_SR = AudioTrack.getNativeOutputSampleRate(TEST_STREAM_TYPE); 641 642 // -------- initialization -------------- 643 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 644 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 645 2 * minBuffSize, TEST_MODE); 646 byte data[] = new byte[minBuffSize]; 647 // -------- test -------------- 648 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 649 track.write(data, OFFSET_DEFAULT, data.length); 650 track.write(data, OFFSET_DEFAULT, data.length); 651 track.play(); 652 Thread.sleep(100); 653 track.pause(); 654 655 int pos = track.getPlaybackHeadPosition(); 656 log(TEST_NAME, "position after pause =" + pos); 657 assertTrue(TEST_NAME, pos > 0); 658 659 track.flush(); 660 pos = track.getPlaybackHeadPosition(); 661 log(TEST_NAME, "position after flush =" + pos); 662 assertTrue(TEST_NAME, pos == 0); 663 664 track.play(); 665 pos = track.getPlaybackHeadPosition(); 666 log(TEST_NAME, "position after play =" + pos); 667 assertTrue(TEST_NAME, pos == 0); 668 669 Thread.sleep(100); 670 pos = track.getPlaybackHeadPosition(); 671 log(TEST_NAME, "position after 100 ms sleep =" + pos); 672 assertTrue(TEST_NAME, pos == 0); 673 // -------- tear down -------------- 674 track.release(); 675 } 676 677 // ----------------------------------------------------------------- 678 // Playback properties 679 // ---------------------------------- 680 681 // Common code for the testSetStereoVolume* and testSetVolume* tests testSetVolumeCommon(String testName, float vol, boolean isStereo)682 private void testSetVolumeCommon(String testName, float vol, boolean isStereo) throws Exception { 683 // constants for test 684 final int TEST_SR = 22050; 685 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 686 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 687 final int TEST_MODE = AudioTrack.MODE_STREAM; 688 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 689 690 // -------- initialization -------------- 691 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 692 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 693 2 * minBuffSize, TEST_MODE); 694 byte data[] = new byte[minBuffSize]; 695 // -------- test -------------- 696 track.write(data, OFFSET_DEFAULT, data.length); 697 track.write(data, OFFSET_DEFAULT, data.length); 698 track.play(); 699 if (isStereo) { 700 // TODO to really test this, do a pan instead of using same value for left and right 701 assertTrue(testName, track.setStereoVolume(vol, vol) == AudioTrack.SUCCESS); 702 } else { 703 assertTrue(testName, track.setVolume(vol) == AudioTrack.SUCCESS); 704 } 705 // -------- tear down -------------- 706 track.release(); 707 } 708 709 // Test case 1: setStereoVolume() with max volume returns SUCCESS 710 @Test testSetStereoVolumeMax()711 public void testSetStereoVolumeMax() throws Exception { 712 final String TEST_NAME = "testSetStereoVolumeMax"; 713 float maxVol = AudioTrack.getMaxVolume(); 714 testSetVolumeCommon(TEST_NAME, maxVol, true /*isStereo*/); 715 } 716 717 // Test case 2: setStereoVolume() with min volume returns SUCCESS 718 @Test testSetStereoVolumeMin()719 public void testSetStereoVolumeMin() throws Exception { 720 final String TEST_NAME = "testSetStereoVolumeMin"; 721 float minVol = AudioTrack.getMinVolume(); 722 testSetVolumeCommon(TEST_NAME, minVol, true /*isStereo*/); 723 } 724 725 // Test case 3: setStereoVolume() with mid volume returns SUCCESS 726 @Test testSetStereoVolumeMid()727 public void testSetStereoVolumeMid() throws Exception { 728 final String TEST_NAME = "testSetStereoVolumeMid"; 729 float midVol = (AudioTrack.getMaxVolume() - AudioTrack.getMinVolume()) / 2; 730 testSetVolumeCommon(TEST_NAME, midVol, true /*isStereo*/); 731 } 732 733 // Test case 4: setPlaybackRate() with half the content rate returns SUCCESS 734 @Test testSetPlaybackRate()735 public void testSetPlaybackRate() throws Exception { 736 // constants for test 737 final String TEST_NAME = "testSetPlaybackRate"; 738 final int TEST_SR = 22050; 739 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 740 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 741 final int TEST_MODE = AudioTrack.MODE_STREAM; 742 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 743 744 // -------- initialization -------------- 745 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 746 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 747 2 * minBuffSize, TEST_MODE); 748 byte data[] = new byte[minBuffSize]; 749 // -------- test -------------- 750 track.write(data, OFFSET_DEFAULT, data.length); 751 track.write(data, OFFSET_DEFAULT, data.length); 752 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 753 track.play(); 754 assertTrue(TEST_NAME, track.setPlaybackRate((int) (TEST_SR / 2)) == AudioTrack.SUCCESS); 755 // -------- tear down -------------- 756 track.release(); 757 } 758 759 // Test case 5: setPlaybackRate(0) returns bad value error 760 @Test testSetPlaybackRateZero()761 public void testSetPlaybackRateZero() throws Exception { 762 // constants for test 763 final String TEST_NAME = "testSetPlaybackRateZero"; 764 final int TEST_SR = 22050; 765 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 766 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 767 final int TEST_MODE = AudioTrack.MODE_STREAM; 768 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 769 770 // -------- initialization -------------- 771 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 772 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 773 minBuffSize, TEST_MODE); 774 // -------- test -------------- 775 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 776 assertTrue(TEST_NAME, track.setPlaybackRate(0) == AudioTrack.ERROR_BAD_VALUE); 777 // -------- tear down -------------- 778 track.release(); 779 } 780 781 // Test case 6: setPlaybackRate() accepts values twice the output sample 782 // rate 783 @Test testSetPlaybackRateTwiceOutputSR()784 public void testSetPlaybackRateTwiceOutputSR() throws Exception { 785 // constants for test 786 final String TEST_NAME = "testSetPlaybackRateTwiceOutputSR"; 787 final int TEST_SR = 22050; 788 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 789 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 790 final int TEST_MODE = AudioTrack.MODE_STREAM; 791 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 792 793 // -------- initialization -------------- 794 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 795 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 796 2 * minBuffSize, TEST_MODE); 797 byte data[] = new byte[minBuffSize]; 798 int outputSR = AudioTrack.getNativeOutputSampleRate(TEST_STREAM_TYPE); 799 // -------- test -------------- 800 track.write(data, OFFSET_DEFAULT, data.length); 801 track.write(data, OFFSET_DEFAULT, data.length); 802 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 803 track.play(); 804 assertTrue(TEST_NAME, track.setPlaybackRate(2 * outputSR) == AudioTrack.SUCCESS); 805 // -------- tear down -------------- 806 track.release(); 807 } 808 809 // Test case 7: setPlaybackRate() and retrieve value, should be the same for 810 // half the content SR 811 @Test testSetGetPlaybackRate()812 public void testSetGetPlaybackRate() throws Exception { 813 // constants for test 814 final String TEST_NAME = "testSetGetPlaybackRate"; 815 final int TEST_SR = 22050; 816 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_STEREO; 817 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 818 final int TEST_MODE = AudioTrack.MODE_STREAM; 819 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 820 821 // -------- initialization -------------- 822 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 823 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 824 2 * minBuffSize, TEST_MODE); 825 byte data[] = new byte[minBuffSize]; 826 // -------- test -------------- 827 track.write(data, OFFSET_DEFAULT, data.length); 828 track.write(data, OFFSET_DEFAULT, data.length); 829 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 830 track.play(); 831 track.setPlaybackRate((int) (TEST_SR / 2)); 832 assertTrue(TEST_NAME, track.getPlaybackRate() == (int) (TEST_SR / 2)); 833 // -------- tear down -------------- 834 track.release(); 835 } 836 837 // Test case 8: setPlaybackRate() invalid operation if track not initialized 838 @Test testSetPlaybackRateUninit()839 public void testSetPlaybackRateUninit() throws Exception { 840 // constants for test 841 final String TEST_NAME = "testSetPlaybackRateUninit"; 842 final int TEST_SR = 22050; 843 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 844 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 845 final int TEST_MODE = AudioTrack.MODE_STATIC; 846 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 847 848 // -------- initialization -------------- 849 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 850 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 851 minBuffSize, TEST_MODE); 852 // -------- test -------------- 853 assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); 854 assertEquals(TEST_NAME, AudioTrack.ERROR_INVALID_OPERATION, 855 track.setPlaybackRate(TEST_SR / 2)); 856 // -------- tear down -------------- 857 track.release(); 858 } 859 860 // Test case 9: setVolume() with max volume returns SUCCESS 861 @Test testSetVolumeMax()862 public void testSetVolumeMax() throws Exception { 863 final String TEST_NAME = "testSetVolumeMax"; 864 float maxVol = AudioTrack.getMaxVolume(); 865 testSetVolumeCommon(TEST_NAME, maxVol, false /*isStereo*/); 866 } 867 868 // Test case 10: setVolume() with min volume returns SUCCESS 869 @Test testSetVolumeMin()870 public void testSetVolumeMin() throws Exception { 871 final String TEST_NAME = "testSetVolumeMin"; 872 float minVol = AudioTrack.getMinVolume(); 873 testSetVolumeCommon(TEST_NAME, minVol, false /*isStereo*/); 874 } 875 876 // Test case 11: setVolume() with mid volume returns SUCCESS 877 @Test testSetVolumeMid()878 public void testSetVolumeMid() throws Exception { 879 final String TEST_NAME = "testSetVolumeMid"; 880 float midVol = (AudioTrack.getMaxVolume() - AudioTrack.getMinVolume()) / 2; 881 testSetVolumeCommon(TEST_NAME, midVol, false /*isStereo*/); 882 } 883 884 // ----------------------------------------------------------------- 885 // Playback progress 886 // ---------------------------------- 887 888 // Test case 1: setPlaybackHeadPosition() on playing track 889 @Test testSetPlaybackHeadPositionPlaying()890 public void testSetPlaybackHeadPositionPlaying() throws Exception { 891 // constants for test 892 final String TEST_NAME = "testSetPlaybackHeadPositionPlaying"; 893 final int TEST_SR = 22050; 894 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 895 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 896 final int TEST_MODE = AudioTrack.MODE_STREAM; 897 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 898 899 // -------- initialization -------------- 900 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 901 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 902 2 * minBuffSize, TEST_MODE); 903 byte data[] = new byte[minBuffSize]; 904 // -------- test -------------- 905 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 906 track.write(data, OFFSET_DEFAULT, data.length); 907 track.write(data, OFFSET_DEFAULT, data.length); 908 track.play(); 909 assertTrue(TEST_NAME, 910 track.setPlaybackHeadPosition(10) == AudioTrack.ERROR_INVALID_OPERATION); 911 // -------- tear down -------------- 912 track.release(); 913 } 914 915 // Test case 2: setPlaybackHeadPosition() on stopped track 916 @Test testSetPlaybackHeadPositionStopped()917 public void testSetPlaybackHeadPositionStopped() throws Exception { 918 // constants for test 919 final String TEST_NAME = "testSetPlaybackHeadPositionStopped"; 920 final int TEST_SR = 22050; 921 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 922 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 923 final int TEST_MODE = AudioTrack.MODE_STATIC; 924 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 925 926 // -------- initialization -------------- 927 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 928 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 929 2 * minBuffSize, TEST_MODE); 930 byte data[] = new byte[minBuffSize]; 931 // -------- test -------------- 932 assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); 933 track.write(data, OFFSET_DEFAULT, data.length); 934 track.write(data, OFFSET_DEFAULT, data.length); 935 assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); 936 track.play(); 937 track.stop(); 938 assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_STOPPED, track.getPlayState()); 939 assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackHeadPosition(10)); 940 // -------- tear down -------------- 941 track.release(); 942 } 943 944 // Test case 3: setPlaybackHeadPosition() on paused track 945 @Test testSetPlaybackHeadPositionPaused()946 public void testSetPlaybackHeadPositionPaused() throws Exception { 947 // constants for test 948 final String TEST_NAME = "testSetPlaybackHeadPositionPaused"; 949 final int TEST_SR = 22050; 950 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 951 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 952 final int TEST_MODE = AudioTrack.MODE_STATIC; 953 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 954 955 // -------- initialization -------------- 956 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 957 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 958 2 * minBuffSize, TEST_MODE); 959 byte data[] = new byte[minBuffSize]; 960 // -------- test -------------- 961 assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); 962 track.write(data, OFFSET_DEFAULT, data.length); 963 track.write(data, OFFSET_DEFAULT, data.length); 964 assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); 965 track.play(); 966 track.pause(); 967 assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_PAUSED, track.getPlayState()); 968 assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackHeadPosition(10)); 969 // -------- tear down -------------- 970 track.release(); 971 } 972 973 // Test case 4: setPlaybackHeadPosition() beyond what has been written 974 @Test testSetPlaybackHeadPositionTooFar()975 public void testSetPlaybackHeadPositionTooFar() throws Exception { 976 // constants for test 977 final String TEST_NAME = "testSetPlaybackHeadPositionTooFar"; 978 final int TEST_SR = 22050; 979 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 980 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 981 final int TEST_MODE = AudioTrack.MODE_STATIC; 982 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 983 984 // -------- initialization -------------- 985 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 986 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 987 2 * minBuffSize, TEST_MODE); 988 byte data[] = new byte[minBuffSize]; 989 // make up a frame index that's beyond what has been written: go from 990 // buffer size to frame 991 // count (given the audio track properties), and add 77. 992 int frameIndexTooFar = (2 * minBuffSize / 2) + 77; 993 // -------- test -------------- 994 assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); 995 track.write(data, OFFSET_DEFAULT, data.length); 996 track.write(data, OFFSET_DEFAULT, data.length); 997 assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); 998 track.play(); 999 track.stop(); 1000 assertEquals(TEST_NAME, AudioTrack.PLAYSTATE_STOPPED, track.getPlayState()); 1001 assertEquals(TEST_NAME, AudioTrack.ERROR_BAD_VALUE, 1002 track.setPlaybackHeadPosition(frameIndexTooFar)); 1003 // -------- tear down -------------- 1004 track.release(); 1005 } 1006 1007 // Test case 5: setLoopPoints() fails for MODE_STREAM 1008 @Test testSetLoopPointsStream()1009 public void testSetLoopPointsStream() throws Exception { 1010 // constants for test 1011 final String TEST_NAME = "testSetLoopPointsStream"; 1012 final int TEST_SR = 22050; 1013 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1014 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1015 final int TEST_MODE = AudioTrack.MODE_STREAM; 1016 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1017 1018 // -------- initialization -------------- 1019 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1020 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1021 2 * minBuffSize, TEST_MODE); 1022 byte data[] = new byte[minBuffSize]; 1023 // -------- test -------------- 1024 track.write(data, OFFSET_DEFAULT, data.length); 1025 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1026 assertTrue(TEST_NAME, track.setLoopPoints(2, 50, 2) == AudioTrack.ERROR_INVALID_OPERATION); 1027 // -------- tear down -------------- 1028 track.release(); 1029 } 1030 1031 // Test case 6: setLoopPoints() fails start > end 1032 @Test testSetLoopPointsStartAfterEnd()1033 public void testSetLoopPointsStartAfterEnd() throws Exception { 1034 // constants for test 1035 final String TEST_NAME = "testSetLoopPointsStartAfterEnd"; 1036 final int TEST_SR = 22050; 1037 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1038 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1039 final int TEST_MODE = AudioTrack.MODE_STATIC; 1040 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1041 1042 // -------- initialization -------------- 1043 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1044 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1045 minBuffSize, TEST_MODE); 1046 byte data[] = new byte[minBuffSize]; 1047 // -------- test -------------- 1048 track.write(data, OFFSET_DEFAULT, data.length); 1049 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1050 assertTrue(TEST_NAME, track.setLoopPoints(50, 0, 2) == AudioTrack.ERROR_BAD_VALUE); 1051 // -------- tear down -------------- 1052 track.release(); 1053 } 1054 1055 // Test case 6: setLoopPoints() success 1056 @Test testSetLoopPointsSuccess()1057 public void testSetLoopPointsSuccess() throws Exception { 1058 // constants for test 1059 final String TEST_NAME = "testSetLoopPointsSuccess"; 1060 final int TEST_SR = 22050; 1061 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1062 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1063 final int TEST_MODE = AudioTrack.MODE_STATIC; 1064 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1065 1066 // -------- initialization -------------- 1067 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1068 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1069 minBuffSize, TEST_MODE); 1070 byte data[] = new byte[minBuffSize]; 1071 // -------- test -------------- 1072 track.write(data, OFFSET_DEFAULT, data.length); 1073 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1074 assertTrue(TEST_NAME, track.setLoopPoints(0, 50, 2) == AudioTrack.SUCCESS); 1075 // -------- tear down -------------- 1076 track.release(); 1077 } 1078 1079 // Test case 7: setLoopPoints() fails with loop length bigger than content 1080 @Test testSetLoopPointsLoopTooLong()1081 public void testSetLoopPointsLoopTooLong() throws Exception { 1082 // constants for test 1083 final String TEST_NAME = "testSetLoopPointsLoopTooLong"; 1084 final int TEST_SR = 22050; 1085 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1086 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1087 final int TEST_MODE = AudioTrack.MODE_STATIC; 1088 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1089 1090 // -------- initialization -------------- 1091 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1092 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1093 minBuffSize, TEST_MODE); 1094 byte data[] = new byte[minBuffSize]; 1095 int dataSizeInFrames = minBuffSize / 2; 1096 // -------- test -------------- 1097 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA); 1098 track.write(data, OFFSET_DEFAULT, data.length); 1099 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1100 assertTrue(TEST_NAME, track.setLoopPoints(10, dataSizeInFrames + 20, 2) == 1101 AudioTrack.ERROR_BAD_VALUE); 1102 // -------- tear down -------------- 1103 track.release(); 1104 } 1105 1106 // Test case 8: setLoopPoints() fails with start beyond what can be written 1107 // for the track 1108 @Test testSetLoopPointsStartTooFar()1109 public void testSetLoopPointsStartTooFar() throws Exception { 1110 // constants for test 1111 final String TEST_NAME = "testSetLoopPointsStartTooFar"; 1112 final int TEST_SR = 22050; 1113 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1114 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1115 final int TEST_MODE = AudioTrack.MODE_STATIC; 1116 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1117 1118 // -------- initialization -------------- 1119 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1120 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1121 minBuffSize, TEST_MODE); 1122 byte data[] = new byte[minBuffSize]; 1123 int dataSizeInFrames = minBuffSize / 2;// 16bit data 1124 // -------- test -------------- 1125 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA); 1126 track.write(data, OFFSET_DEFAULT, data.length); 1127 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1128 assertTrue(TEST_NAME, 1129 track.setLoopPoints(dataSizeInFrames + 20, dataSizeInFrames + 50, 2) == 1130 AudioTrack.ERROR_BAD_VALUE); 1131 // -------- tear down -------------- 1132 track.release(); 1133 } 1134 1135 // Test case 9: setLoopPoints() fails with end beyond what can be written 1136 // for the track 1137 @Test testSetLoopPointsEndTooFar()1138 public void testSetLoopPointsEndTooFar() throws Exception { 1139 // constants for test 1140 final String TEST_NAME = "testSetLoopPointsEndTooFar"; 1141 final int TEST_SR = 22050; 1142 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1143 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1144 final int TEST_MODE = AudioTrack.MODE_STATIC; 1145 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1146 1147 // -------- initialization -------------- 1148 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1149 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1150 minBuffSize, TEST_MODE); 1151 byte data[] = new byte[minBuffSize]; 1152 int dataSizeInFrames = minBuffSize / 2;// 16bit data 1153 // -------- test -------------- 1154 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_NO_STATIC_DATA); 1155 track.write(data, OFFSET_DEFAULT, data.length); 1156 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1157 int loopCount = 2; 1158 assertTrue(TEST_NAME, 1159 track.setLoopPoints(dataSizeInFrames - 10, dataSizeInFrames + 50, loopCount) == 1160 AudioTrack.ERROR_BAD_VALUE); 1161 // -------- tear down -------------- 1162 track.release(); 1163 } 1164 1165 // ----------------------------------------------------------------- 1166 // Audio data supply 1167 // ---------------------------------- 1168 1169 // Test case 1: write() fails when supplying less data (bytes) than declared 1170 @Test testWriteByteOffsetTooBig()1171 public void testWriteByteOffsetTooBig() throws Exception { 1172 // constants for test 1173 final String TEST_NAME = "testWriteByteOffsetTooBig"; 1174 final int TEST_SR = 22050; 1175 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1176 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1177 final int TEST_MODE = AudioTrack.MODE_STREAM; 1178 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1179 1180 // -------- initialization -------------- 1181 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1182 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1183 2 * minBuffSize, TEST_MODE); 1184 byte data[] = new byte[minBuffSize]; 1185 // -------- test -------------- 1186 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1187 int offset = 10; 1188 assertTrue(TEST_NAME, track.write(data, offset, data.length) == AudioTrack.ERROR_BAD_VALUE); 1189 // -------- tear down -------------- 1190 track.release(); 1191 } 1192 1193 // Test case 2: write() fails when supplying less data (shorts) than 1194 // declared 1195 @Test testWriteShortOffsetTooBig()1196 public void testWriteShortOffsetTooBig() throws Exception { 1197 // constants for test 1198 final String TEST_NAME = "testWriteShortOffsetTooBig"; 1199 final int TEST_SR = 22050; 1200 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1201 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1202 final int TEST_MODE = AudioTrack.MODE_STREAM; 1203 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1204 1205 // -------- initialization -------------- 1206 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1207 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1208 2 * minBuffSize, TEST_MODE); 1209 short data[] = new short[minBuffSize / 2]; 1210 // -------- test -------------- 1211 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1212 int offset = 10; 1213 assertTrue(TEST_NAME, track.write(data, offset, data.length) 1214 == AudioTrack.ERROR_BAD_VALUE); 1215 // -------- tear down -------------- 1216 track.release(); 1217 } 1218 1219 // Test case 3: write() fails when supplying less data (bytes) than declared 1220 @Test testWriteByteSizeTooBig()1221 public void testWriteByteSizeTooBig() throws Exception { 1222 // constants for test 1223 final String TEST_NAME = "testWriteByteSizeTooBig"; 1224 final int TEST_SR = 22050; 1225 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1226 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1227 final int TEST_MODE = AudioTrack.MODE_STREAM; 1228 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1229 1230 // -------- initialization -------------- 1231 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1232 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1233 2 * minBuffSize, TEST_MODE); 1234 byte data[] = new byte[minBuffSize]; 1235 // -------- test -------------- 1236 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1237 assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length + 10) 1238 == AudioTrack.ERROR_BAD_VALUE); 1239 // -------- tear down -------------- 1240 track.release(); 1241 } 1242 1243 // Test case 4: write() fails when supplying less data (shorts) than 1244 // declared 1245 @Test testWriteShortSizeTooBig()1246 public void testWriteShortSizeTooBig() throws Exception { 1247 // constants for test 1248 final String TEST_NAME = "testWriteShortSizeTooBig"; 1249 final int TEST_SR = 22050; 1250 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1251 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1252 final int TEST_MODE = AudioTrack.MODE_STREAM; 1253 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1254 1255 // -------- initialization -------------- 1256 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1257 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1258 2 * minBuffSize, TEST_MODE); 1259 short data[] = new short[minBuffSize / 2]; 1260 // -------- test -------------- 1261 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1262 assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length + 10) 1263 == AudioTrack.ERROR_BAD_VALUE); 1264 // -------- tear down -------------- 1265 track.release(); 1266 } 1267 1268 // Test case 5: write() fails with negative offset 1269 @Test testWriteByteNegativeOffset()1270 public void testWriteByteNegativeOffset() throws Exception { 1271 // constants for test 1272 final String TEST_NAME = "testWriteByteNegativeOffset"; 1273 final int TEST_SR = 22050; 1274 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1275 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1276 final int TEST_MODE = AudioTrack.MODE_STREAM; 1277 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1278 1279 // -------- initialization -------------- 1280 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1281 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1282 2 * minBuffSize, TEST_MODE); 1283 byte data[] = new byte[minBuffSize]; 1284 // -------- test -------------- 1285 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1286 assertTrue(TEST_NAME, track.write(data, OFFSET_NEGATIVE, data.length - 10) == 1287 AudioTrack.ERROR_BAD_VALUE); 1288 // -------- tear down -------------- 1289 track.release(); 1290 } 1291 1292 // Test case 6: write() fails with negative offset 1293 @Test testWriteShortNegativeOffset()1294 public void testWriteShortNegativeOffset() throws Exception { 1295 // constants for test 1296 final String TEST_NAME = "testWriteShortNegativeOffset"; 1297 final int TEST_SR = 22050; 1298 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1299 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1300 final int TEST_MODE = AudioTrack.MODE_STREAM; 1301 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1302 1303 // -------- initialization -------------- 1304 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1305 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1306 2 * minBuffSize, TEST_MODE); 1307 short data[] = new short[minBuffSize / 2]; 1308 // -------- test -------------- 1309 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1310 assertTrue(TEST_NAME, 1311 track.write(data, OFFSET_NEGATIVE, data.length - 10) == AudioTrack.ERROR_BAD_VALUE); 1312 // -------- tear down -------------- 1313 track.release(); 1314 } 1315 1316 // Test case 7: write() fails with negative size 1317 @Test testWriteByteNegativeSize()1318 public void testWriteByteNegativeSize() throws Exception { 1319 // constants for test 1320 final String TEST_NAME = "testWriteByteNegativeSize"; 1321 final int TEST_SR = 22050; 1322 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1323 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1324 final int TEST_MODE = AudioTrack.MODE_STREAM; 1325 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1326 1327 // -------- initialization -------------- 1328 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1329 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1330 2 * minBuffSize, TEST_MODE); 1331 byte data[] = new byte[minBuffSize]; 1332 // -------- test -------------- 1333 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1334 int dataLength = -10; 1335 assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, dataLength) 1336 == AudioTrack.ERROR_BAD_VALUE); 1337 // -------- tear down -------------- 1338 track.release(); 1339 } 1340 1341 // Test case 8: write() fails with negative size 1342 @Test testWriteShortNegativeSize()1343 public void testWriteShortNegativeSize() throws Exception { 1344 // constants for test 1345 final String TEST_NAME = "testWriteShortNegativeSize"; 1346 final int TEST_SR = 22050; 1347 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1348 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1349 final int TEST_MODE = AudioTrack.MODE_STREAM; 1350 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1351 1352 // -------- initialization -------------- 1353 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1354 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1355 2 * minBuffSize, TEST_MODE); 1356 short data[] = new short[minBuffSize / 2]; 1357 // -------- test -------------- 1358 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1359 int dataLength = -10; 1360 assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, dataLength) 1361 == AudioTrack.ERROR_BAD_VALUE); 1362 // -------- tear down -------------- 1363 track.release(); 1364 } 1365 1366 // Test case 9: write() succeeds and returns the size that was written for 1367 // 16bit 1368 @Test testWriteByte()1369 public void testWriteByte() throws Exception { 1370 // constants for test 1371 final String TEST_NAME = "testWriteByte"; 1372 final int TEST_SR = 22050; 1373 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1374 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1375 final int TEST_MODE = AudioTrack.MODE_STREAM; 1376 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1377 1378 // -------- initialization -------------- 1379 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1380 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1381 2 * minBuffSize, TEST_MODE); 1382 byte data[] = new byte[minBuffSize]; 1383 // -------- test -------------- 1384 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1385 assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length) == data.length); 1386 // -------- tear down -------------- 1387 track.release(); 1388 } 1389 1390 // Test case 10: write() succeeds and returns the size that was written for 1391 // 16bit 1392 @Test testWriteShort()1393 public void testWriteShort() throws Exception { 1394 // constants for test 1395 final String TEST_NAME = "testWriteShort"; 1396 final int TEST_SR = 22050; 1397 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1398 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1399 final int TEST_MODE = AudioTrack.MODE_STREAM; 1400 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1401 1402 // -------- initialization -------------- 1403 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1404 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1405 2 * minBuffSize, TEST_MODE); 1406 short data[] = new short[minBuffSize / 2]; 1407 // -------- test -------------- 1408 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1409 assertTrue(TEST_NAME, track.write(data, OFFSET_DEFAULT, data.length) == data.length); 1410 track.flush(); 1411 // -------- tear down -------------- 1412 track.release(); 1413 } 1414 1415 // Test case 11: write() succeeds and returns the size that was written for 1416 // 8bit 1417 @Test testWriteByte8bit()1418 public void testWriteByte8bit() throws Exception { 1419 // constants for test 1420 final String TEST_NAME = "testWriteByte8bit"; 1421 final int TEST_SR = 22050; 1422 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1423 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; 1424 final int TEST_MODE = AudioTrack.MODE_STREAM; 1425 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1426 1427 // -------- initialization -------------- 1428 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1429 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1430 2 * minBuffSize, TEST_MODE); 1431 byte data[] = new byte[minBuffSize]; 1432 // -------- test -------------- 1433 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1434 assertEquals(TEST_NAME, data.length, track.write(data, OFFSET_DEFAULT, data.length)); 1435 // -------- tear down -------------- 1436 track.release(); 1437 } 1438 1439 // Test case 12: write() succeeds and returns the size that was written for 1440 // 8bit 1441 @Test testWriteShort8bit()1442 public void testWriteShort8bit() throws Exception { 1443 // constants for test 1444 final String TEST_NAME = "testWriteShort8bit"; 1445 final int TEST_SR = 22050; 1446 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1447 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; 1448 final int TEST_MODE = AudioTrack.MODE_STREAM; 1449 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1450 1451 // -------- initialization -------------- 1452 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1453 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1454 2 * minBuffSize, TEST_MODE); 1455 short data[] = new short[minBuffSize / 2]; 1456 // -------- test -------------- 1457 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1458 assertEquals(TEST_NAME, data.length, track.write(data, OFFSET_DEFAULT, data.length)); 1459 // -------- tear down -------------- 1460 track.release(); 1461 } 1462 1463 // ----------------------------------------------------------------- 1464 // Getters 1465 // ---------------------------------- 1466 1467 // Test case 1: getMinBufferSize() return ERROR_BAD_VALUE if SR < 4000 1468 @Test testGetMinBufferSizeTooLowSR()1469 public void testGetMinBufferSizeTooLowSR() throws Exception { 1470 // constant for test 1471 final String TEST_NAME = "testGetMinBufferSizeTooLowSR"; 1472 final int TEST_SR = 3999; 1473 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1474 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; 1475 1476 // -------- initialization & test -------------- 1477 assertTrue(TEST_NAME, AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT) == 1478 AudioTrack.ERROR_BAD_VALUE); 1479 } 1480 1481 // Test case 2: getMinBufferSize() return ERROR_BAD_VALUE if sample rate too high 1482 @Test testGetMinBufferSizeTooHighSR()1483 public void testGetMinBufferSizeTooHighSR() throws Exception { 1484 // constant for test 1485 final String TEST_NAME = "testGetMinBufferSizeTooHighSR"; 1486 // FIXME need an API to retrieve AudioTrack.SAMPLE_RATE_HZ_MAX 1487 final int TEST_SR = AudioFormat.SAMPLE_RATE_HZ_MAX + 1; 1488 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1489 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; 1490 1491 // -------- initialization & test -------------- 1492 assertTrue(TEST_NAME, AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT) == 1493 AudioTrack.ERROR_BAD_VALUE); 1494 } 1495 1496 @Test testAudioTrackProperties()1497 public void testAudioTrackProperties() throws Exception { 1498 // constants for test 1499 final String TEST_NAME = "testAudioTrackProperties"; 1500 final int TEST_SR = 22050; 1501 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1502 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; 1503 final int TEST_MODE = AudioTrack.MODE_STREAM; 1504 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1505 1506 // -------- initialization -------------- 1507 int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1508 MockAudioTrack track = new MockAudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, 1509 TEST_FORMAT, 2 * minBuffSize, TEST_MODE); 1510 assertEquals(TEST_NAME, AudioTrack.STATE_INITIALIZED, track.getState()); 1511 assertEquals(TEST_NAME, TEST_FORMAT, track.getAudioFormat()); 1512 assertEquals(TEST_NAME, TEST_CONF, track.getChannelConfiguration()); 1513 assertEquals(TEST_NAME, TEST_SR, track.getSampleRate()); 1514 assertEquals(TEST_NAME, TEST_STREAM_TYPE, track.getStreamType()); 1515 final int hannelCount = 1; 1516 assertEquals(hannelCount, track.getChannelCount()); 1517 final int notificationMarkerPosition = 0; 1518 assertEquals(TEST_NAME, notificationMarkerPosition, track.getNotificationMarkerPosition()); 1519 final int markerInFrames = 2; 1520 assertEquals(TEST_NAME, AudioTrack.SUCCESS, 1521 track.setNotificationMarkerPosition(markerInFrames)); 1522 assertEquals(TEST_NAME, markerInFrames, track.getNotificationMarkerPosition()); 1523 final int positionNotificationPeriod = 0; 1524 assertEquals(TEST_NAME, positionNotificationPeriod, track.getPositionNotificationPeriod()); 1525 final int periodInFrames = 2; 1526 assertEquals(TEST_NAME, AudioTrack.SUCCESS, 1527 track.setPositionNotificationPeriod(periodInFrames)); 1528 assertEquals(TEST_NAME, periodInFrames, track.getPositionNotificationPeriod()); 1529 track.setState(AudioTrack.STATE_NO_STATIC_DATA); 1530 assertEquals(TEST_NAME, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); 1531 track.setState(AudioTrack.STATE_UNINITIALIZED); 1532 assertEquals(TEST_NAME, AudioTrack.STATE_UNINITIALIZED, track.getState()); 1533 int frameCount = 2 * minBuffSize; 1534 if (TEST_CONF == AudioFormat.CHANNEL_CONFIGURATION_STEREO) { 1535 frameCount /= 2; 1536 } 1537 if (TEST_FORMAT == AudioFormat.ENCODING_PCM_16BIT) { 1538 frameCount /= 2; 1539 } 1540 assertTrue(TEST_NAME, track.getNativeFrameCount() >= frameCount); 1541 assertEquals(TEST_NAME, track.getNativeFrameCount(), track.getBufferSizeInFrames()); 1542 } 1543 1544 @Test testReloadStaticData()1545 public void testReloadStaticData() throws Exception { 1546 // constants for test 1547 final String TEST_NAME = "testReloadStaticData"; 1548 final int TEST_SR = 22050; 1549 final int TEST_CONF = AudioFormat.CHANNEL_CONFIGURATION_MONO; 1550 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; 1551 final int TEST_MODE = AudioTrack.MODE_STATIC; 1552 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1553 1554 // -------- initialization -------------- 1555 int bufferSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1556 byte data[] = AudioHelper.createSoundDataInByteArray( 1557 bufferSize, TEST_SR, 1024 /* frequency */, 0 /* sweep */); 1558 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 1559 bufferSize, TEST_MODE); 1560 // -------- test -------------- 1561 track.write(data, OFFSET_DEFAULT, bufferSize); 1562 assertTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED); 1563 track.play(); 1564 Thread.sleep(WAIT_MSEC); 1565 track.stop(); 1566 Thread.sleep(WAIT_MSEC); 1567 assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.reloadStaticData()); 1568 track.play(); 1569 Thread.sleep(WAIT_MSEC); 1570 track.stop(); 1571 // -------- tear down -------------- 1572 track.release(); 1573 } 1574 1575 @Presubmit 1576 @Test testPlayStaticDataShort()1577 public void testPlayStaticDataShort() throws Exception { 1578 if (!hasAudioOutput()) { 1579 Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid " 1580 + "audio output HAL"); 1581 return; 1582 } 1583 // constants for test 1584 final String TEST_NAME = "testPlayStaticDataShort"; 1585 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_FLOAT; 1586 final int TEST_SR = 48000; 1587 final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO; 1588 final int TEST_MODE = AudioTrack.MODE_STATIC; 1589 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1590 final double TEST_SWEEP = 100; 1591 final int TEST_LOOPS = 1; 1592 final double TEST_FREQUENCY = 400; 1593 final long WAIT_TIME_MS = 150; // compensate for cold start when run in isolation. 1594 final double TEST_LOOP_DURATION = 0.25; 1595 final int TEST_ADDITIONAL_DRAIN_MS = 300; // as a presubmit test, 1% of the time the 1596 // startup is slow by 200ms. 1597 1598 playOnceStaticData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP, 1599 TEST_LOOPS, TEST_FORMAT, TEST_FREQUENCY, TEST_SR, TEST_CONF, 1600 WAIT_TIME_MS, TEST_LOOP_DURATION, TEST_ADDITIONAL_DRAIN_MS); 1601 1602 } 1603 1604 @Test testPlayStaticByteArray()1605 public void testPlayStaticByteArray() throws Exception { 1606 doTestPlayStaticData("testPlayStaticByteArray", AudioFormat.ENCODING_PCM_8BIT); 1607 } 1608 1609 @Test testPlayStaticShortArray()1610 public void testPlayStaticShortArray() throws Exception { 1611 doTestPlayStaticData("testPlayStaticShortArray", AudioFormat.ENCODING_PCM_16BIT); 1612 } 1613 1614 @Test testPlayStaticFloatArray()1615 public void testPlayStaticFloatArray() throws Exception { 1616 doTestPlayStaticData("testPlayStaticFloatArray", AudioFormat.ENCODING_PCM_FLOAT); 1617 } 1618 doTestPlayStaticData(String testName, int testFormat)1619 private void doTestPlayStaticData(String testName, int testFormat) throws Exception { 1620 if (!hasAudioOutput()) { 1621 Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid " 1622 + "audio output HAL"); 1623 return; 1624 } 1625 // constants for test 1626 final int TEST_SR_ARRAY[] = { 1627 12055, // Note multichannel tracks will sound very short at low sample rates 1628 48000, 1629 }; 1630 final int TEST_CONF_ARRAY[] = { 1631 AudioFormat.CHANNEL_OUT_MONO, // 1.0 1632 AudioFormat.CHANNEL_OUT_STEREO, // 2.0 1633 AudioFormat.CHANNEL_OUT_7POINT1_SURROUND, // 7.1 1634 }; 1635 final int TEST_MODE = AudioTrack.MODE_STATIC; 1636 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1637 final double TEST_SWEEP = 100; 1638 final int TEST_LOOPS = 1; 1639 final double TEST_LOOP_DURATION = 1.; 1640 final int TEST_ADDITIONAL_DRAIN_MS = 0; 1641 // Compensates for cold start when run in isolation. 1642 // The cold output latency must be 500 ms less or 1643 // 200 ms less for low latency devices. 1644 final long WAIT_TIME_MS = isLowLatencyDevice() ? WAIT_MSEC : 500; 1645 1646 double frequency = 400; // frequency changes for each test 1647 for (int testSampleRate : TEST_SR_ARRAY) { 1648 for (int testChannelConfiguration : TEST_CONF_ARRAY) { 1649 playOnceStaticData(testName, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP, 1650 TEST_LOOPS, testFormat, frequency, testSampleRate, 1651 testChannelConfiguration, WAIT_TIME_MS, 1652 TEST_LOOP_DURATION, TEST_ADDITIONAL_DRAIN_MS); 1653 1654 frequency += 70; // increment test tone frequency 1655 } 1656 } 1657 } 1658 playOnceStaticData(String testName, int testMode, int testStreamType, double testSweep, int testLoops, int testFormat, double testFrequency, int testSr, int testConf, long waitMsec, double testLoopDuration, int additionalDrainMs)1659 private void playOnceStaticData(String testName, int testMode, int testStreamType, 1660 double testSweep, int testLoops, int testFormat, double testFrequency, int testSr, 1661 int testConf, long waitMsec, double testLoopDuration, int additionalDrainMs) 1662 throws InterruptedException { 1663 // -------- initialization -------------- 1664 final int channelCount = Integer.bitCount(testConf); 1665 final int bufferFrames = (int)(testLoopDuration * testSr); 1666 final int bufferSamples = bufferFrames * channelCount; 1667 final int bufferSize = bufferSamples 1668 * AudioFormat.getBytesPerSample(testFormat); 1669 final double frequency = testFrequency / channelCount; 1670 final long MILLISECONDS_PER_SECOND = 1000; 1671 AudioTrack track = new AudioTrack(testStreamType, testSr, 1672 testConf, testFormat, bufferSize, testMode); 1673 assertEquals(testName, AudioTrack.STATE_NO_STATIC_DATA, track.getState()); 1674 1675 // -------- test -------------- 1676 1677 // test setLoopPoints and setPosition can be called here. 1678 assertEquals(testName, 1679 android.media.AudioTrack.SUCCESS, 1680 track.setPlaybackHeadPosition(bufferFrames/2)); 1681 assertEquals(testName, 1682 android.media.AudioTrack.SUCCESS, 1683 track.setLoopPoints( 1684 0 /*startInFrames*/, bufferFrames, 10 /*loopCount*/)); 1685 // only need to write once to the static track 1686 switch (testFormat) { 1687 case AudioFormat.ENCODING_PCM_8BIT: { 1688 byte data[] = AudioHelper.createSoundDataInByteArray( 1689 bufferSamples, testSr, 1690 frequency, testSweep); 1691 assertEquals(testName, 1692 bufferSamples, 1693 track.write(data, 0 /*offsetInBytes*/, data.length)); 1694 } break; 1695 case AudioFormat.ENCODING_PCM_16BIT: { 1696 short data[] = AudioHelper.createSoundDataInShortArray( 1697 bufferSamples, testSr, 1698 frequency, testSweep); 1699 assertEquals(testName, 1700 bufferSamples, 1701 track.write(data, 0 /*offsetInBytes*/, data.length)); 1702 } break; 1703 case AudioFormat.ENCODING_PCM_FLOAT: { 1704 float data[] = AudioHelper.createSoundDataInFloatArray( 1705 bufferSamples, testSr, 1706 frequency, testSweep); 1707 assertEquals(testName, 1708 bufferSamples, 1709 track.write(data, 0 /*offsetInBytes*/, data.length, 1710 AudioTrack.WRITE_BLOCKING)); 1711 } break; 1712 } 1713 assertEquals(testName, AudioTrack.STATE_INITIALIZED, track.getState()); 1714 // test setLoopPoints and setPosition can be called here. 1715 assertEquals(testName, 1716 android.media.AudioTrack.SUCCESS, 1717 track.setPlaybackHeadPosition(0 /*positionInFrames*/)); 1718 assertEquals(testName, 1719 android.media.AudioTrack.SUCCESS, 1720 track.setLoopPoints(0 /*startInFrames*/, bufferFrames, testLoops)); 1721 1722 track.play(); 1723 Thread.sleep((int)(testLoopDuration * MILLISECONDS_PER_SECOND) * (testLoops + 1)); 1724 Thread.sleep(waitMsec + additionalDrainMs); 1725 1726 // Check position after looping. AudioTrack.getPlaybackHeadPosition() returns 1727 // the running count of frames played, not the actual static buffer position. 1728 int position = track.getPlaybackHeadPosition(); 1729 assertEquals(testName, bufferFrames * (testLoops + 1), position); 1730 1731 track.stop(); 1732 Thread.sleep(waitMsec); 1733 // -------- tear down -------------- 1734 track.release(); 1735 } 1736 1737 @Presubmit 1738 @Test testPlayStreamDataShort()1739 public void testPlayStreamDataShort() throws Exception { 1740 // constants for test 1741 final String TEST_NAME = "testPlayStreamDataShort"; 1742 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT; 1743 final int TEST_SR = 48000; 1744 final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO; 1745 final int TEST_MODE = AudioTrack.MODE_STREAM; 1746 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1747 final float TEST_SWEEP = 0; // sine wave only 1748 final boolean TEST_IS_LOW_RAM_DEVICE = isLowRamDevice(); 1749 final double TEST_FREQUENCY = 1000; 1750 final long NO_WAIT = 0; 1751 1752 playOnceStreamData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP, 1753 TEST_IS_LOW_RAM_DEVICE, TEST_FORMAT, TEST_FREQUENCY, TEST_SR, TEST_CONF, 1754 NO_WAIT, 0 /* mask */); 1755 } 1756 1757 @Test testPlayStreamByteArray()1758 public void testPlayStreamByteArray() throws Exception { 1759 doTestPlayStreamData("testPlayStreamByteArray", AudioFormat.ENCODING_PCM_8BIT); 1760 } 1761 1762 @Test testPlayStreamShortArray()1763 public void testPlayStreamShortArray() throws Exception { 1764 doTestPlayStreamData("testPlayStreamShortArray", AudioFormat.ENCODING_PCM_16BIT); 1765 } 1766 1767 @Test testPlayStreamFloatArray()1768 public void testPlayStreamFloatArray() throws Exception { 1769 doTestPlayStreamData("testPlayStreamFloatArray", AudioFormat.ENCODING_PCM_FLOAT); 1770 } 1771 doTestPlayStreamData(String testName, int testFormat)1772 private void doTestPlayStreamData(String testName, int testFormat) throws Exception { 1773 // constants for test 1774 // due to downmixer algorithmic latency, source channels greater than 2 may 1775 // sound shorter in duration at 4kHz sampling rate. 1776 final int TEST_SR_ARRAY[] = { 1777 4000, 1778 44100, 1779 48000, 1780 96000, 1781 192000, 1782 }; 1783 final int TEST_CONF_ARRAY[] = { 1784 AudioFormat.CHANNEL_OUT_MONO, // 1.0 1785 AudioFormat.CHANNEL_OUT_STEREO, // 2.0 1786 AudioFormat.CHANNEL_OUT_STEREO | AudioFormat.CHANNEL_OUT_FRONT_CENTER, // 3.0 1787 AudioFormat.CHANNEL_OUT_QUAD, // 4.0 1788 AudioFormat.CHANNEL_OUT_QUAD | AudioFormat.CHANNEL_OUT_FRONT_CENTER, // 5.0 1789 AudioFormat.CHANNEL_OUT_5POINT1, // 5.1 1790 AudioFormat.CHANNEL_OUT_6POINT1, // 6.1 1791 AudioFormat.CHANNEL_OUT_7POINT1_SURROUND, // 7.1 1792 }; 1793 final int TEST_MODE = AudioTrack.MODE_STREAM; 1794 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 1795 final float TEST_SWEEP = 0; // sine wave only 1796 final boolean TEST_IS_LOW_RAM_DEVICE = isLowRamDevice(); 1797 1798 double frequency = 400; // frequency changes for each test 1799 for (int testSampleRate : TEST_SR_ARRAY) { 1800 for (int testChannelConfiguration : TEST_CONF_ARRAY) { 1801 playOnceStreamData(testName, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP, 1802 TEST_IS_LOW_RAM_DEVICE, testFormat, frequency, 1803 testSampleRate, testChannelConfiguration, 1804 WAIT_MSEC, 0 /* mask */); 1805 frequency += 50; // increment test tone frequency 1806 } 1807 } 1808 } 1809 playOnceStreamData(String testName, int testMode, int testStream, float testSweep, boolean isLowRamDevice, int testFormat, double testFrequency, int testSr, int testConf, long waitMsec, int mask)1810 private void playOnceStreamData(String testName, int testMode, int testStream, 1811 float testSweep, boolean isLowRamDevice, int testFormat, double testFrequency, 1812 int testSr, int testConf, long waitMsec, int mask) 1813 throws InterruptedException { 1814 final int channelCount = Integer.bitCount(testConf); 1815 if (isLowRamDevice 1816 && (testSr > 96000 || channelCount > 4)) { 1817 return; // ignore. FIXME: reenable when AF memory allocation is updated. 1818 } 1819 // -------- initialization -------------- 1820 final int minBufferSize = AudioTrack.getMinBufferSize(testSr, 1821 testConf, testFormat); // in bytes 1822 AudioTrack track = new AudioTrack(testStream, testSr, 1823 testConf, testFormat, minBufferSize, testMode); 1824 assertTrue(testName, track.getState() == AudioTrack.STATE_INITIALIZED); 1825 1826 // compute parameters for the source signal data. 1827 AudioFormat format = track.getFormat(); 1828 assertEquals(testName, testSr, format.getSampleRate()); 1829 assertEquals(testName, testConf, format.getChannelMask()); 1830 assertEquals(testName, channelCount, format.getChannelCount()); 1831 assertEquals(testName, testFormat, format.getEncoding()); 1832 // duration of test tones 1833 final int frames = AudioHelper.frameCountFromMsec(300 /* ms */, format); 1834 final int sourceSamples = channelCount * frames; 1835 final double frequency = testFrequency / channelCount; 1836 1837 int written = 0; 1838 // For streaming tracks, it's ok to issue the play() command 1839 // before any audio is written. 1840 track.play(); 1841 // -------- test -------------- 1842 1843 // samplesPerWrite can be any positive value. 1844 // We prefer this to be a multiple of channelCount so write() 1845 // does not return a short count. 1846 // If samplesPerWrite is very large, it is limited to the data length 1847 // and we simply write (blocking) the entire source data and not even loop. 1848 // We choose a value here which simulates double buffer writes. 1849 final int buffers = 2; // double buffering mode 1850 final int samplesPerWrite = 1851 (track.getBufferSizeInFrames() / buffers) * channelCount; 1852 switch (testFormat) { 1853 case AudioFormat.ENCODING_PCM_8BIT: { 1854 byte data[] = AudioHelper.createSoundDataInByteArray( 1855 sourceSamples, testSr, 1856 frequency, testSweep); 1857 if (mask != 0) { 1858 AudioHelper.maskArray(data, testConf, mask); 1859 } 1860 while (written < data.length) { 1861 int samples = Math.min(data.length - written, samplesPerWrite); 1862 int ret = track.write(data, written, samples); 1863 assertEquals(testName, samples, ret); 1864 written += ret; 1865 } 1866 } 1867 break; 1868 case AudioFormat.ENCODING_PCM_16BIT: { 1869 short data[] = AudioHelper.createSoundDataInShortArray( 1870 sourceSamples, testSr, 1871 frequency, testSweep); 1872 if (mask != 0) { 1873 AudioHelper.maskArray(data, testConf, mask); 1874 } 1875 while (written < data.length) { 1876 int samples = Math.min(data.length - written, samplesPerWrite); 1877 int ret = track.write(data, written, samples); 1878 assertEquals(testName, samples, ret); 1879 written += ret; 1880 } 1881 } 1882 break; 1883 case AudioFormat.ENCODING_PCM_FLOAT: { 1884 float data[] = AudioHelper.createSoundDataInFloatArray( 1885 sourceSamples, testSr, 1886 frequency, testSweep); 1887 if (mask != 0) { 1888 AudioHelper.maskArray(data, testConf, mask); 1889 } 1890 while (written < data.length) { 1891 int samples = Math.min(data.length - written, samplesPerWrite); 1892 int ret = track.write(data, written, samples, 1893 AudioTrack.WRITE_BLOCKING); 1894 assertEquals(testName, samples, ret); 1895 written += ret; 1896 } 1897 } 1898 break; 1899 } 1900 1901 // For streaming tracks, AudioTrack.stop() doesn't immediately stop playback. 1902 // Rather, it allows the remaining data in the internal buffer to drain. 1903 track.stop(); 1904 Thread.sleep(waitMsec); // wait for the data to drain. 1905 // -------- tear down -------------- 1906 track.release(); 1907 Thread.sleep(waitMsec); // wait for release to complete 1908 } 1909 playOnceStreamByteBuffer( String testName, double testFrequency, double testSweep, int testStreamType, int testSampleRate, int testChannelMask, int testEncoding, int testTransferMode, int testWriteMode, boolean useChannelIndex, boolean useDirect)1910 private void playOnceStreamByteBuffer( 1911 String testName, double testFrequency, double testSweep, 1912 int testStreamType, int testSampleRate, int testChannelMask, int testEncoding, 1913 int testTransferMode, int testWriteMode, 1914 boolean useChannelIndex, boolean useDirect) throws Exception { 1915 AudioTrack track = null; 1916 try { 1917 AudioFormat.Builder afb = new AudioFormat.Builder() 1918 .setEncoding(testEncoding) 1919 .setSampleRate(testSampleRate); 1920 if (useChannelIndex) { 1921 afb.setChannelIndexMask(testChannelMask); 1922 } else { 1923 afb.setChannelMask(testChannelMask); 1924 } 1925 final AudioFormat format = afb.build(); 1926 final int frameSize = AudioHelper.frameSizeFromFormat(format); 1927 final int frameCount = 1928 AudioHelper.frameCountFromMsec(300 /* ms */, format); 1929 final int bufferSize = frameCount * frameSize; 1930 final int bufferSamples = frameCount * format.getChannelCount(); 1931 1932 track = new AudioTrack.Builder() 1933 .setAudioFormat(format) 1934 .setTransferMode(testTransferMode) 1935 .setBufferSizeInBytes(bufferSize) 1936 .build(); 1937 1938 assertEquals(testName + ": state", 1939 AudioTrack.STATE_INITIALIZED, track.getState()); 1940 assertEquals(testName + ": sample rate", 1941 testSampleRate, track.getSampleRate()); 1942 assertEquals(testName + ": encoding", 1943 testEncoding, track.getAudioFormat()); 1944 1945 ByteBuffer bb = useDirect 1946 ? ByteBuffer.allocateDirect(bufferSize) 1947 : ByteBuffer.allocate(bufferSize); 1948 bb.order(java.nio.ByteOrder.nativeOrder()); 1949 1950 final double sampleFrequency = testFrequency / format.getChannelCount(); 1951 switch (testEncoding) { 1952 case AudioFormat.ENCODING_PCM_8BIT: { 1953 byte data[] = AudioHelper.createSoundDataInByteArray( 1954 bufferSamples, testSampleRate, 1955 sampleFrequency, testSweep); 1956 bb.put(data); 1957 bb.flip(); 1958 } 1959 break; 1960 case AudioFormat.ENCODING_PCM_16BIT: { 1961 short data[] = AudioHelper.createSoundDataInShortArray( 1962 bufferSamples, testSampleRate, 1963 sampleFrequency, testSweep); 1964 ShortBuffer sb = bb.asShortBuffer(); 1965 sb.put(data); 1966 bb.limit(sb.limit() * 2); 1967 } 1968 break; 1969 case AudioFormat.ENCODING_PCM_FLOAT: { 1970 float data[] = AudioHelper.createSoundDataInFloatArray( 1971 bufferSamples, testSampleRate, 1972 sampleFrequency, testSweep); 1973 FloatBuffer fb = bb.asFloatBuffer(); 1974 fb.put(data); 1975 bb.limit(fb.limit() * 4); 1976 } 1977 break; 1978 } 1979 // start the AudioTrack 1980 // This can be done before or after the first write. 1981 // Current behavior for streaming tracks is that 1982 // actual playback does not begin before the internal 1983 // data buffer is completely full. 1984 track.play(); 1985 1986 // write data 1987 final long startTime = System.currentTimeMillis(); 1988 final long maxDuration = frameCount * 1000 / testSampleRate + 1000; 1989 for (int written = 0; written < bufferSize; ) { 1990 // ret may return a short count if write 1991 // is non blocking or even if write is blocking 1992 // when a stop/pause/flush is issued from another thread. 1993 final int kBatchFrames = 1000; 1994 int ret = track.write(bb, 1995 Math.min(bufferSize - written, frameSize * kBatchFrames), 1996 testWriteMode); 1997 // for non-blocking mode, this loop may spin quickly 1998 assertTrue(testName + ": write error " + ret, ret >= 0); 1999 assertTrue(testName + ": write timeout", 2000 (System.currentTimeMillis() - startTime) <= maxDuration); 2001 written += ret; 2002 } 2003 2004 // for streaming tracks, stop will allow the rest of the data to 2005 // drain out, but we don't know how long to wait unless 2006 // we check the position before stop. if we check position 2007 // after we stop, we read 0. 2008 final int position = track.getPlaybackHeadPosition(); 2009 final int remainingTimeMs = (int)((double)(frameCount - position) 2010 * 1000 / testSampleRate); 2011 track.stop(); 2012 Thread.sleep(remainingTimeMs); 2013 Thread.sleep(WAIT_MSEC); 2014 } finally { 2015 if (track != null) { 2016 track.release(); 2017 } 2018 } 2019 } 2020 2021 @Test testPlayStreamByteBuffer()2022 public void testPlayStreamByteBuffer() throws Exception { 2023 // constants for test 2024 final String TEST_NAME = "testPlayStreamByteBuffer"; 2025 final int TEST_FORMAT_ARRAY[] = { // should hear 4 tones played 3 times 2026 AudioFormat.ENCODING_PCM_8BIT, 2027 AudioFormat.ENCODING_PCM_16BIT, 2028 AudioFormat.ENCODING_PCM_FLOAT, 2029 }; 2030 final int TEST_SR_ARRAY[] = { 2031 48000, 2032 }; 2033 final int TEST_CONF_ARRAY[] = { 2034 AudioFormat.CHANNEL_OUT_STEREO, 2035 }; 2036 final int TEST_WRITE_MODE_ARRAY[] = { 2037 AudioTrack.WRITE_BLOCKING, 2038 AudioTrack.WRITE_NON_BLOCKING, 2039 }; 2040 final int TEST_MODE = AudioTrack.MODE_STREAM; 2041 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 2042 final double TEST_SWEEP = 0; // sine wave only 2043 2044 for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { 2045 double frequency = 800; // frequency changes for each test 2046 for (int TEST_SR : TEST_SR_ARRAY) { 2047 for (int TEST_CONF : TEST_CONF_ARRAY) { 2048 for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) { 2049 for (int useDirect = 0; useDirect < 2; ++useDirect) { 2050 playOnceStreamByteBuffer(TEST_NAME, frequency, TEST_SWEEP, 2051 TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 2052 TEST_MODE, TEST_WRITE_MODE, 2053 false /* useChannelIndex */, useDirect != 0); 2054 2055 // add a gap to make tones distinct 2056 Thread.sleep(100 /* millis */); 2057 frequency += 30; // increment test tone frequency 2058 } 2059 } 2060 } 2061 } 2062 } 2063 } 2064 2065 @Test testPlayChannelIndexStreamBuffer()2066 public void testPlayChannelIndexStreamBuffer() throws Exception { 2067 // should hear 4 tones played 3 or 4 times depending 2068 // on the device output capabilities (e.g. stereo or 5.1 or otherwise) 2069 final String TEST_NAME = "testPlayChannelIndexStreamBuffer"; 2070 final int TEST_FORMAT_ARRAY[] = { 2071 AudioFormat.ENCODING_PCM_8BIT, 2072 //AudioFormat.ENCODING_PCM_16BIT, 2073 //AudioFormat.ENCODING_PCM_FLOAT, 2074 }; 2075 final int TEST_SR_ARRAY[] = { 2076 48000, 2077 }; 2078 // The following channel index masks are iterated over and route 2079 // the AudioTrack channels to the output sink channels based on 2080 // the set bits in counting order (lsb to msb). 2081 // 2082 // For a stereo output sink, the sound may come from L and R, L only, none, or R only. 2083 // For a 5.1 output sink, the sound may come from a variety of outputs 2084 // as commented below. 2085 final int TEST_CONF_ARRAY[] = { // matches output sink channels: 2086 (1 << 0) | (1 << 1), // Stereo(L, R) 5.1(FL, FR) 2087 (1 << 0) | (1 << 2), // Stereo(L) 5.1(FL, FC) 2088 (1 << 4) | (1 << 5), // Stereo(None) 5.1(BL, BR) 2089 (1 << 1) | (1 << 2), // Stereo(R) 5.1(FR, FC) 2090 }; 2091 final int TEST_WRITE_MODE_ARRAY[] = { 2092 AudioTrack.WRITE_BLOCKING, 2093 AudioTrack.WRITE_NON_BLOCKING, 2094 }; 2095 final double TEST_SWEEP = 0; 2096 final int TEST_MODE = AudioTrack.MODE_STREAM; 2097 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 2098 2099 for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { 2100 for (int TEST_CONF : TEST_CONF_ARRAY) { 2101 double frequency = 800; // frequency changes for each test 2102 for (int TEST_SR : TEST_SR_ARRAY) { 2103 for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) { 2104 for (int useDirect = 0; useDirect < 2; ++useDirect) { 2105 playOnceStreamByteBuffer(TEST_NAME, frequency, TEST_SWEEP, 2106 TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 2107 TEST_MODE, TEST_WRITE_MODE, 2108 true /* useChannelIndex */, useDirect != 0); 2109 2110 // add a gap to make tones distinct 2111 Thread.sleep(100 /* millis */); 2112 frequency += 30; // increment test tone frequency 2113 } 2114 } 2115 } 2116 } 2117 } 2118 } 2119 hasAudioOutput()2120 private boolean hasAudioOutput() { 2121 return getContext().getPackageManager() 2122 .hasSystemFeature(PackageManager.FEATURE_AUDIO_OUTPUT); 2123 } 2124 isLowLatencyDevice()2125 private boolean isLowLatencyDevice() { 2126 return getContext().getPackageManager() 2127 .hasSystemFeature(PackageManager.FEATURE_AUDIO_LOW_LATENCY); 2128 } 2129 isLowRamDevice()2130 private boolean isLowRamDevice() { 2131 return ((ActivityManager) getContext().getSystemService(Context.ACTIVITY_SERVICE)) 2132 .isLowRamDevice(); 2133 } 2134 isProAudioDevice()2135 private boolean isProAudioDevice() { 2136 return getContext().getPackageManager().hasSystemFeature( 2137 PackageManager.FEATURE_AUDIO_PRO); 2138 } 2139 2140 @Test testGetTimestamp()2141 public void testGetTimestamp() throws Exception { 2142 if (!hasAudioOutput()) { 2143 Log.w(TAG, "AUDIO_OUTPUT feature not found. This system might not have a valid " 2144 + "audio output HAL"); 2145 return; 2146 } 2147 String streamName = "test_get_timestamp"; 2148 doTestTimestamp( 2149 22050 /* sampleRate */, 2150 AudioFormat.CHANNEL_OUT_MONO , 2151 AudioFormat.ENCODING_PCM_16BIT, 2152 AudioTrack.MODE_STREAM, 2153 streamName); 2154 } 2155 2156 @Test testFastTimestamp()2157 public void testFastTimestamp() throws Exception { 2158 if (!hasAudioOutput()) { 2159 Log.w(TAG, "AUDIO_OUTPUT feature not found. This system might not have a valid " 2160 + "audio output HAL"); 2161 return; 2162 } 2163 String streamName = "test_fast_timestamp"; 2164 doTestTimestamp( 2165 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC), 2166 AudioFormat.CHANNEL_OUT_MONO, 2167 AudioFormat.ENCODING_PCM_16BIT, 2168 AudioTrack.MODE_STREAM, 2169 streamName); 2170 } 2171 2172 // Note: this test may fail if playing through a remote device such as Bluetooth. doTestTimestamp(int sampleRate, int channelMask, int encoding, int transferMode, String streamName)2173 private void doTestTimestamp(int sampleRate, int channelMask, int encoding, int transferMode, 2174 String streamName) throws Exception { 2175 // constants for test 2176 final int TEST_LOOP_CNT = 10; 2177 final int TEST_BUFFER_MS = 100; 2178 final int TEST_USAGE = AudioAttributes.USAGE_MEDIA; 2179 2180 final int MILLIS_PER_SECOND = 1000; 2181 final int FRAME_TOLERANCE = sampleRate * TEST_BUFFER_MS / MILLIS_PER_SECOND; 2182 2183 // -------- initialization -------------- 2184 final int frameSize = 2185 AudioFormat.getBytesPerSample(encoding) 2186 * AudioFormat.channelCountFromOutChannelMask(channelMask); 2187 // see whether we can use fast mode 2188 final int nativeOutputSampleRate = 2189 AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_MUSIC); 2190 Log.d(TAG, "Native output sample rate " + nativeOutputSampleRate); 2191 final boolean fast = (sampleRate == nativeOutputSampleRate); 2192 2193 AudioAttributes attributes = (fast ? new AudioAttributes.Builder() 2194 .setFlags(AudioAttributes.FLAG_LOW_LATENCY) : new AudioAttributes.Builder()) 2195 .setUsage(TEST_USAGE) 2196 .build(); 2197 AudioFormat format = new AudioFormat.Builder() 2198 //.setChannelIndexMask((1 << AudioFormat.channelCountFromOutChannelMask(channelMask)) - 1) 2199 .setChannelMask(channelMask) 2200 .setEncoding(encoding) 2201 .setSampleRate(sampleRate) 2202 .build(); 2203 // not specifying the buffer size in the builder should get us the minimum buffer size. 2204 AudioTrack track = new AudioTrack.Builder() 2205 .setAudioAttributes(attributes) 2206 .setAudioFormat(format) 2207 .setTransferMode(transferMode) 2208 .build(); 2209 assertEquals(AudioTrack.STATE_INITIALIZED, track.getState()); 2210 2211 try { 2212 // We generally use a transfer size of 100ms for testing, but in rare cases 2213 // (e.g. Bluetooth) this needs to be larger to exceed the internal track buffer. 2214 final int frameCount = 2215 Math.max(track.getBufferCapacityInFrames(), 2216 sampleRate * TEST_BUFFER_MS / MILLIS_PER_SECOND); 2217 track.play(); 2218 2219 // Android nanoTime implements MONOTONIC, same as our audio timestamps. 2220 2221 final ByteBuffer data = ByteBuffer.allocate(frameCount * frameSize); 2222 data.order(java.nio.ByteOrder.nativeOrder()).limit(frameCount * frameSize); 2223 final AudioTimestamp timestamp = new AudioTimestamp(); 2224 2225 long framesWritten = 0; 2226 2227 // We start data delivery twice, the second start simulates restarting 2228 // the track after a fully drained underrun (important case for Android TV). 2229 for (int start = 0; start < 2; ++start) { 2230 final long trackStartTimeNs = System.nanoTime(); 2231 final AudioHelper.TimestampVerifier tsVerifier = 2232 new AudioHelper.TimestampVerifier( 2233 TAG + "(start " + start + ")", 2234 sampleRate, framesWritten, isProAudioDevice()); 2235 for (int i = 0; i < TEST_LOOP_CNT; ++i) { 2236 final long trackWriteTimeNs = System.nanoTime(); 2237 2238 data.position(0); 2239 assertEquals("write did not complete", 2240 data.limit(), track.write(data, data.limit(), 2241 AudioTrack.WRITE_BLOCKING)); 2242 assertEquals("write did not fill buffer", 2243 data.position(), data.limit()); 2244 framesWritten += data.limit() / frameSize; 2245 2246 // track.getTimestamp may return false if there are no physical HAL outputs. 2247 // This may occur on TV devices without connecting an HDMI monitor. 2248 // It may also be true immediately after start-up, as the mixing thread could 2249 // be idle, but since we've already pushed much more than the 2250 // minimum buffer size, that is unlikely. 2251 // Nevertheless, we don't want to have unnecessary failures, so we ignore the 2252 // first iteration if we don't get a timestamp. 2253 final boolean result = track.getTimestamp(timestamp); 2254 assertTrue("timestamp could not be read", result || i == 0); 2255 if (!result) { 2256 continue; 2257 } 2258 2259 tsVerifier.add(timestamp); 2260 2261 // Ensure that seen is greater than presented. 2262 // This is an "on-the-fly" read without pausing because pausing may cause the 2263 // timestamp to become stale and affect our jitter measurements. 2264 final long framesPresented = timestamp.framePosition; 2265 final int framesSeen = track.getPlaybackHeadPosition(); 2266 assertTrue("server frames ahead of client frames", 2267 framesWritten >= framesSeen); 2268 assertTrue("presented frames ahead of server frames", 2269 framesSeen >= framesPresented); 2270 } 2271 // Full drain. 2272 Thread.sleep(1000 /* millis */); 2273 // check that we are really at the end of playback. 2274 assertTrue("timestamp should be valid while draining", 2275 track.getTimestamp(timestamp)); 2276 // Fast tracks and sw emulated tracks may not fully drain. 2277 // We log the status here. 2278 if (framesWritten != timestamp.framePosition) { 2279 Log.d(TAG, "timestamp should fully drain. written: " 2280 + framesWritten + " position: " + timestamp.framePosition); 2281 } 2282 final long framesLowerLimit = framesWritten - FRAME_TOLERANCE; 2283 assertTrue("timestamp frame position needs to be close to written: " 2284 + timestamp.framePosition + " >= " + framesLowerLimit, 2285 timestamp.framePosition >= framesLowerLimit); 2286 2287 assertTrue("timestamp should not advance during underrun: " 2288 + timestamp.framePosition + " <= " + framesWritten, 2289 timestamp.framePosition <= framesWritten); 2290 2291 tsVerifier.verifyAndLog(trackStartTimeNs, streamName); 2292 } 2293 } finally { 2294 track.release(); 2295 } 2296 } 2297 2298 @Test testVariableRatePlayback()2299 public void testVariableRatePlayback() throws Exception { 2300 final String TEST_NAME = "testVariableRatePlayback"; 2301 final int TEST_SR = 24000; 2302 final int TEST_FINAL_SR = 96000; 2303 final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO; 2304 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; // required for test 2305 final int TEST_MODE = AudioTrack.MODE_STATIC; // required for test 2306 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 2307 2308 final int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 2309 final int bufferSizeInBytes = minBuffSize * 100; 2310 final int numChannels = AudioFormat.channelCountFromOutChannelMask(TEST_CONF); 2311 final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT); 2312 final int bytesPerFrame = numChannels * bytesPerSample; 2313 final int frameCount = bufferSizeInBytes / bytesPerFrame; 2314 2315 AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, 2316 TEST_FORMAT, bufferSizeInBytes, TEST_MODE); 2317 2318 // create byte array and write it 2319 byte[] vai = AudioHelper.createSoundDataInByteArray(bufferSizeInBytes, TEST_SR, 2320 600 /* frequency */, 0 /* sweep */); 2321 assertEquals(vai.length, track.write(vai, 0 /* offsetInBytes */, vai.length)); 2322 2323 // sweep up test and sweep down test 2324 int[] sampleRates = {TEST_SR, TEST_FINAL_SR}; 2325 int[] deltaMss = {10, 10}; 2326 int[] deltaFreqs = {200, -200}; 2327 2328 for (int i = 0; i < 2; ++i) { 2329 int remainingTime; 2330 int sampleRate = sampleRates[i]; 2331 final int deltaMs = deltaMss[i]; 2332 final int deltaFreq = deltaFreqs[i]; 2333 final int lastCheckMs = 500; // check the last 500 ms 2334 2335 assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackRate(sampleRate)); 2336 track.play(); 2337 do { 2338 Thread.sleep(deltaMs); 2339 final int position = track.getPlaybackHeadPosition(); 2340 sampleRate += deltaFreq; 2341 sampleRate = Math.min(TEST_FINAL_SR, Math.max(TEST_SR, sampleRate)); 2342 assertEquals(TEST_NAME, AudioTrack.SUCCESS, track.setPlaybackRate(sampleRate)); 2343 remainingTime = (int)((double)(frameCount - position) * 1000 2344 / sampleRate / bytesPerFrame); 2345 } while (remainingTime >= lastCheckMs + deltaMs); 2346 2347 // ensure the final frequency set is constant and plays frames as expected 2348 final int position1 = track.getPlaybackHeadPosition(); 2349 Thread.sleep(lastCheckMs); 2350 final int position2 = track.getPlaybackHeadPosition(); 2351 2352 final int tolerance60MsInFrames = sampleRate * 60 / 1000; 2353 final int expected = lastCheckMs * sampleRate / 1000; 2354 final int actual = position2 - position1; 2355 2356 // Log.d(TAG, "Variable Playback: expected(" + expected + ") actual(" + actual 2357 // + ") diff(" + (expected - actual) + ")"); 2358 assertEquals(expected, actual, tolerance60MsInFrames); 2359 track.stop(); 2360 } 2361 track.release(); 2362 } 2363 2364 // Test that AudioTrack stop limits drain to only those frames written at the time of stop. 2365 // This ensures consistent stop behavior on Android P and beyond, where data written 2366 // immediately after a stop doesn't get caught in the drain. 2367 @LargeTest 2368 @Test testStopDrain()2369 public void testStopDrain() throws Exception { 2370 final String TEST_NAME = "testStopDrain"; 2371 final int TEST_SR = 8000; 2372 final int TEST_CONF = AudioFormat.CHANNEL_OUT_MONO; // required for test 2373 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_8BIT; // required for test 2374 final int TEST_MODE = AudioTrack.MODE_STREAM; // required for test 2375 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 2376 2377 final int channelCount = AudioFormat.channelCountFromOutChannelMask(TEST_CONF); 2378 final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT); 2379 final int bytesPerFrame = channelCount * bytesPerSample; 2380 final int frameCount = TEST_SR * 3; // 3 seconds of buffer. 2381 final int bufferSizeInBytes = frameCount * bytesPerFrame; 2382 2383 final AudioTrack track = new AudioTrack( 2384 TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, bufferSizeInBytes, TEST_MODE); 2385 2386 try { 2387 // Create 6 seconds of data, but send down only 3 seconds to fill buffer. 2388 final byte[] soundData = AudioHelper.createSoundDataInByteArray( 2389 bufferSizeInBytes * 2, TEST_SR, 600 /* frequency */, 0 /* sweep */); 2390 assertEquals("cannot fill AudioTrack buffer", 2391 bufferSizeInBytes, 2392 track.write(soundData, 0 /* offsetInBytes */, bufferSizeInBytes)); 2393 2394 // Set the track playing. 2395 track.play(); 2396 2397 // Note that the timings here are very generous for our test (really the 2398 // granularity we need is on the order of a second). If we don't get scheduled 2399 // to run within about a second or so - this should be extremely rare - 2400 // the result should be a false pass (rather than a false fail). 2401 2402 // After 1.5 seconds stop. 2403 Thread.sleep(1500 /* millis */); // Assume device starts within 1.5 sec. 2404 track.stop(); 2405 2406 // We should drain 1.5 seconds and fill another 3 seconds of data. 2407 // We shouldn't be able to write 6 seconds of data - that indicates stop continues 2408 // to drain beyond the frames written at the time of stop. 2409 int length = 0; 2410 while (length < soundData.length) { 2411 Thread.sleep(800 /* millis */); // assume larger than AF thread loop period 2412 final int delta = track.write(soundData, length, soundData.length - length); 2413 assertTrue("track write error: " + delta, delta >= 0); 2414 if (delta == 0) break; 2415 length += delta; 2416 } 2417 2418 // Check to see we limit the data drained (should be able to exactly fill the buffer). 2419 assertEquals("stop drain must be limited " + bufferSizeInBytes + " != " + length, 2420 bufferSizeInBytes, length); 2421 } finally { 2422 track.release(); 2423 } 2424 } 2425 2426 @Test testVariableSpeedPlayback()2427 public void testVariableSpeedPlayback() throws Exception { 2428 if (!hasAudioOutput()) { 2429 Log.w(TAG,"AUDIO_OUTPUT feature not found. This system might not have a valid " 2430 + "audio output HAL"); 2431 return; 2432 } 2433 2434 final String TEST_NAME = "testVariableSpeedPlayback"; 2435 final int TEST_FORMAT = AudioFormat.ENCODING_PCM_FLOAT; // required for test 2436 final int TEST_MODE = AudioTrack.MODE_STATIC; // required for test 2437 final int TEST_SR = 48000; 2438 2439 AudioFormat format = new AudioFormat.Builder() 2440 //.setChannelIndexMask((1 << 0)) // output to first channel, FL 2441 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 2442 .setEncoding(TEST_FORMAT) 2443 .setSampleRate(TEST_SR) 2444 .build(); 2445 2446 // create track 2447 final int frameCount = AudioHelper.frameCountFromMsec(100 /*ms*/, format); 2448 final int frameSize = AudioHelper.frameSizeFromFormat(format); 2449 AudioTrack track = new AudioTrack.Builder() 2450 .setAudioFormat(format) 2451 .setBufferSizeInBytes(frameCount * frameSize) 2452 .setTransferMode(TEST_MODE) 2453 .build(); 2454 2455 // create float array and write it 2456 final int sampleCount = frameCount * format.getChannelCount(); 2457 float[] vaf = AudioHelper.createSoundDataInFloatArray( 2458 sampleCount, TEST_SR, 600 /* frequency */, 0 /* sweep */); 2459 assertEquals(vaf.length, track.write(vaf, 0 /* offsetInFloats */, vaf.length, 2460 AudioTrack.WRITE_NON_BLOCKING)); 2461 2462 // sweep speed and pitch 2463 final float[][][] speedAndPitch = { 2464 // { {speedStart, pitchStart} {speedEnd, pitchEnd} } 2465 { {0.5f, 0.5f}, {2.0f, 2.0f} }, // speed by SR conversion (chirp) 2466 { {0.5f, 1.0f}, {2.0f, 1.0f} }, // speed by time stretch (constant pitch) 2467 { {1.0f, 0.5f}, {1.0f, 2.0f} }, // pitch by SR conversion (chirp) 2468 }; 2469 2470 // test that playback params works as expected 2471 PlaybackParams params = new PlaybackParams().allowDefaults(); 2472 assertEquals("default speed not correct", 1.0f, params.getSpeed(), 0.f /* delta */); 2473 assertEquals("default pitch not correct", 1.0f, params.getPitch(), 0.f /* delta */); 2474 assertEquals(TEST_NAME, 2475 params.AUDIO_FALLBACK_MODE_DEFAULT, 2476 params.getAudioFallbackMode()); 2477 track.setPlaybackParams(params); // OK 2478 params.setAudioFallbackMode(params.AUDIO_FALLBACK_MODE_FAIL); 2479 assertEquals(TEST_NAME, 2480 params.AUDIO_FALLBACK_MODE_FAIL, params.getAudioFallbackMode()); 2481 params.setPitch(0.0f); 2482 try { 2483 track.setPlaybackParams(params); 2484 fail("IllegalArgumentException should be thrown on out of range data"); 2485 } catch (IllegalArgumentException e) { 2486 ; // expect this is invalid 2487 } 2488 // on failure, the AudioTrack params should not change. 2489 PlaybackParams paramCheck = track.getPlaybackParams(); 2490 assertEquals(TEST_NAME, 2491 paramCheck.AUDIO_FALLBACK_MODE_DEFAULT, paramCheck.getAudioFallbackMode()); 2492 assertEquals("pitch should be unchanged on failure", 2493 1.0f, paramCheck.getPitch(), 0. /* delta */); 2494 2495 // now try to see if we can do extreme pitch correction that should probably be muted. 2496 params.setAudioFallbackMode(params.AUDIO_FALLBACK_MODE_MUTE); 2497 assertEquals(TEST_NAME, 2498 params.AUDIO_FALLBACK_MODE_MUTE, params.getAudioFallbackMode()); 2499 params.setPitch(0.1f); 2500 track.setPlaybackParams(params); // OK 2501 2502 // now do our actual playback 2503 final int TEST_TIME_MS = 2000; 2504 final int TEST_DELTA_MS = 100; 2505 final int testSteps = TEST_TIME_MS / TEST_DELTA_MS; 2506 2507 for (int i = 0; i < speedAndPitch.length; ++i) { 2508 final float speedStart = speedAndPitch[i][0][0]; 2509 final float pitchStart = speedAndPitch[i][0][1]; 2510 final float speedEnd = speedAndPitch[i][1][0]; 2511 final float pitchEnd = speedAndPitch[i][1][1]; 2512 final float speedInc = (speedEnd - speedStart) / testSteps; 2513 final float pitchInc = (pitchEnd - pitchStart) / testSteps; 2514 2515 PlaybackParams playbackParams = new PlaybackParams() 2516 .setPitch(pitchStart) 2517 .setSpeed(speedStart) 2518 .allowDefaults(); 2519 2520 // set track in infinite loop to be a sine generator 2521 track.setLoopPoints(0, frameCount, -1 /* loopCount */); // cleared by stop() 2522 track.play(); 2523 2524 Thread.sleep(300 /* millis */); // warm up track 2525 2526 int anticipatedPosition = track.getPlaybackHeadPosition(); 2527 long timeMs = SystemClock.elapsedRealtime(); 2528 final long startTimeMs = timeMs; 2529 for (int j = 0; j < testSteps; ++j) { 2530 // set playback settings 2531 final float pitch = playbackParams.getPitch(); 2532 final float speed = playbackParams.getSpeed(); 2533 2534 track.setPlaybackParams(playbackParams); 2535 2536 // verify that settings have changed 2537 PlaybackParams checkParams = track.getPlaybackParams(); 2538 assertEquals("pitch not changed correctly", 2539 pitch, checkParams.getPitch(), 0. /* delta */); 2540 assertEquals("speed not changed correctly", 2541 speed, checkParams.getSpeed(), 0. /* delta */); 2542 2543 // sleep for playback 2544 Thread.sleep(TEST_DELTA_MS); 2545 final long newTimeMs = SystemClock.elapsedRealtime(); 2546 // Log.d(TAG, "position[" + j + "] " + track.getPlaybackHeadPosition()); 2547 anticipatedPosition += 2548 playbackParams.getSpeed() * (newTimeMs - timeMs) * TEST_SR / 1000; 2549 timeMs = newTimeMs; 2550 playbackParams.setPitch(playbackParams.getPitch() + pitchInc); 2551 playbackParams.setSpeed(playbackParams.getSpeed() + speedInc); 2552 } 2553 final int endPosition = track.getPlaybackHeadPosition(); 2554 final int tolerance100MsInFrames = 100 * TEST_SR / 1000; 2555 Log.d(TAG, "Total playback time: " + (timeMs - startTimeMs)); 2556 assertEquals(TAG, anticipatedPosition, endPosition, tolerance100MsInFrames); 2557 track.stop(); 2558 2559 Thread.sleep(100 /* millis */); // distinct pause between each test 2560 } 2561 track.release(); 2562 } 2563 2564 // Test AudioTrack to ensure we can build after a failure. 2565 @Test testAudioTrackBufferSize()2566 public void testAudioTrackBufferSize() throws Exception { 2567 // constants for test 2568 final String TEST_NAME = "testAudioTrackBufferSize"; 2569 2570 // use builder with parameters that should fail 2571 final int superBigBufferSize = 1 << 28; 2572 try { 2573 final AudioTrack track = new AudioTrack.Builder() 2574 .setBufferSizeInBytes(superBigBufferSize) 2575 .build(); 2576 track.release(); 2577 fail(TEST_NAME + ": should throw exception on failure"); 2578 } catch (UnsupportedOperationException e) { 2579 ; 2580 } 2581 2582 // we should be able to create again with minimum buffer size 2583 final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples 2584 final AudioTrack track2 = new AudioTrack.Builder() 2585 .setBufferSizeInBytes(verySmallBufferSize) 2586 .build(); 2587 2588 final int observedState2 = track2.getState(); 2589 final int observedBufferSize2 = track2.getBufferSizeInFrames(); 2590 track2.release(); 2591 2592 // succeeds for minimum buffer size 2593 assertEquals(TEST_NAME + ": state", AudioTrack.STATE_INITIALIZED, observedState2); 2594 // should force the minimum size buffer which is > 0 2595 assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0); 2596 } 2597 2598 // Test AudioTrack to see if there are any problems with large frame counts. 2599 @Test testAudioTrackLargeFrameCount()2600 public void testAudioTrackLargeFrameCount() throws Exception { 2601 // constants for test 2602 final String TEST_NAME = "testAudioTrackLargeFrameCount"; 2603 final int[] BUFFER_SIZES = { 4294968, 42949680, 429496800, Integer.MAX_VALUE }; 2604 final int[] MODES = { AudioTrack.MODE_STATIC, AudioTrack.MODE_STREAM }; 2605 2606 for (int mode : MODES) { 2607 for (int bufferSizeInBytes : BUFFER_SIZES) { 2608 try { 2609 final AudioTrack track = new AudioTrack.Builder() 2610 .setAudioFormat(new AudioFormat.Builder() 2611 .setEncoding(AudioFormat.ENCODING_PCM_8BIT) 2612 .setSampleRate(44100) 2613 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 2614 .build()) 2615 .setTransferMode(mode) 2616 .setBufferSizeInBytes(bufferSizeInBytes) // 1 byte == 1 frame 2617 .build(); 2618 track.release(); // OK to successfully complete 2619 } catch (UnsupportedOperationException e) { 2620 ; // OK to throw unsupported exception 2621 } 2622 } 2623 } 2624 } 2625 2626 @Test testSetNullPresentation()2627 public void testSetNullPresentation() throws Exception { 2628 final AudioTrack track = new AudioTrack.Builder().build(); 2629 assertThrows(IllegalArgumentException.class, () -> { 2630 track.setPresentation(null); 2631 }); 2632 } 2633 2634 @Test testAc3BuilderNoBufferSize()2635 public void testAc3BuilderNoBufferSize() throws Exception { 2636 AudioFormat format = new AudioFormat.Builder() 2637 .setEncoding(AudioFormat.ENCODING_AC3) 2638 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 2639 .setSampleRate(48000) 2640 .build(); 2641 try { 2642 AudioTrack audioTrack = new AudioTrack.Builder() 2643 .setAudioFormat(format) 2644 .setBufferSizeInBytes(100) 2645 .build(); 2646 audioTrack.release(); 2647 Thread.sleep(200); 2648 } catch (UnsupportedOperationException e) { 2649 // Do nothing. It's OK for a device to not support ac3 audio tracks. 2650 return; 2651 } 2652 // if ac3 audio tracks with set buffer size succeed, the builder should also succeed if the 2653 // buffer size isn't set, allowing the framework to report the recommended buffer size. 2654 try { 2655 AudioTrack audioTrack = new AudioTrack.Builder() 2656 .setAudioFormat(format) 2657 .build(); 2658 audioTrack.release(); 2659 } catch (UnsupportedOperationException e) { 2660 // This builder should not fail as the first builder succeeded when setting buffer size 2661 fail("UnsupportedOperationException should not be thrown when setBufferSizeInBytes" 2662 + " is excluded from builder"); 2663 } 2664 } 2665 2666 @Test testSetPresentationDefaultTrack()2667 public void testSetPresentationDefaultTrack() throws Exception { 2668 final AudioTrack track = new AudioTrack.Builder().build(); 2669 assertEquals(AudioTrack.ERROR, track.setPresentation(createAudioPresentation())); 2670 } 2671 2672 @Test testIsDirectPlaybackSupported()2673 public void testIsDirectPlaybackSupported() throws Exception { 2674 // constants for test 2675 final String TEST_NAME = "testIsDirectPlaybackSupported"; 2676 // Default format leaves everything unspecified 2677 assertFalse(AudioTrack.isDirectPlaybackSupported( 2678 new AudioFormat.Builder().build(), 2679 new AudioAttributes.Builder().build())); 2680 // There is no requirement to support direct playback for this format, 2681 // so it's not possible to assert on the result, but at least the method 2682 // must execute with no exceptions. 2683 boolean isPcmStereo48kSupported = AudioTrack.isDirectPlaybackSupported( 2684 new AudioFormat.Builder() 2685 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 2686 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 2687 .setSampleRate(48000) 2688 .build(), 2689 new AudioAttributes.Builder().build()); 2690 log(TEST_NAME, "PCM Stereo 48 kHz: " + isPcmStereo48kSupported); 2691 } 2692 2693 @Test testMediaMetrics()2694 public void testMediaMetrics() throws Exception { 2695 if (!hasAudioOutput()) { 2696 return; 2697 } 2698 2699 AudioTrack track = null; 2700 try { 2701 final int TEST_SAMPLE_RATE = 44100; 2702 final int TEST_CHANNEL_MASK = AudioFormat.CHANNEL_OUT_STEREO; 2703 final int TEST_ENCODING = AudioFormat.ENCODING_PCM_16BIT; 2704 final AudioFormat format = new AudioFormat.Builder() 2705 .setSampleRate(TEST_SAMPLE_RATE) 2706 .setChannelMask(TEST_CHANNEL_MASK) 2707 .setEncoding(TEST_ENCODING) 2708 .build(); 2709 2710 final int TEST_USAGE = AudioAttributes.USAGE_MEDIA; 2711 final int TEST_CONTENT_TYPE = AudioAttributes.CONTENT_TYPE_MUSIC; 2712 final AudioAttributes attributes = new AudioAttributes.Builder() 2713 .setUsage(TEST_USAGE) 2714 .setContentType(TEST_CONTENT_TYPE) 2715 .build(); 2716 2717 // Setup a new audio track 2718 track = new AudioTrack.Builder() 2719 .setAudioFormat(format) 2720 .setAudioAttributes(attributes) 2721 .build(); 2722 2723 final PersistableBundle metrics = track.getMetrics(); 2724 assertNotNull("null metrics", metrics); 2725 2726 // The STREAMTYPE constant was generally not present in P, and if so 2727 // was incorrectly exposed as an integer. 2728 AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.STREAMTYPE, 2729 new String("AUDIO_STREAM_MUSIC")); 2730 AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.CONTENTTYPE, 2731 new String("AUDIO_CONTENT_TYPE_MUSIC")); 2732 AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.USAGE, 2733 new String("AUDIO_USAGE_MEDIA")); 2734 2735 // AudioTrack.MetricsConstants.SAMPLERATE, metrics doesn't exit 2736 // AudioTrack.MetricsConstants.CHANNELMASK, metrics doesn't exist 2737 2738 // TestApi: 2739 AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.SAMPLE_RATE, 2740 new Integer(track.getSampleRate())); 2741 AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.CHANNEL_MASK, 2742 new Long(TEST_CHANNEL_MASK >> 2)); 2743 AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.ENCODING, 2744 new String("AUDIO_FORMAT_PCM_16_BIT")); 2745 AudioHelper.assertMetricsKeyEquals(metrics, AudioTrack.MetricsConstants.FRAME_COUNT, 2746 new Integer(track.getBufferSizeInFrames())); 2747 2748 // TestApi: no particular value checking. 2749 AudioHelper.assertMetricsKey(metrics, AudioTrack.MetricsConstants.PORT_ID); 2750 AudioHelper.assertMetricsKey(metrics, AudioTrack.MetricsConstants.ATTRIBUTES); 2751 } finally { 2752 if (track != null) { 2753 track.release(); 2754 } 2755 } 2756 } 2757 2758 @Test testMaxAudioTracks()2759 public void testMaxAudioTracks() throws Exception { 2760 if (!hasAudioOutput()) { 2761 return; 2762 } 2763 2764 // The framework must not give more than MAX_TRACKS tracks per UID. 2765 final int MAX_TRACKS = 512; // an arbitrary large number > 40 2766 final int FRAMES = 1024; 2767 2768 final AudioTrack[] tracks = new AudioTrack[MAX_TRACKS]; 2769 final AudioTrack.Builder builder = new AudioTrack.Builder() 2770 .setAudioFormat(new AudioFormat.Builder() 2771 .setEncoding(AudioFormat.ENCODING_PCM_8BIT) 2772 .setSampleRate(8000) 2773 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 2774 .build()) 2775 .setBufferSizeInBytes(FRAMES) 2776 .setTransferMode(AudioTrack.MODE_STATIC); 2777 2778 int n = 0; 2779 try { 2780 for (; n < MAX_TRACKS; ++n) { 2781 tracks[n] = builder.build(); 2782 } 2783 } catch (UnsupportedOperationException e) { 2784 ; // we expect this when we hit the uid track limit. 2785 } 2786 2787 // release all the tracks created. 2788 for (int i = 0; i < n; ++i) { 2789 tracks[i].release(); 2790 tracks[i] = null; 2791 } 2792 Log.d(TAG, "" + n + " tracks were created"); 2793 assertTrue("should be able to create at least one static track", n > 0); 2794 assertTrue("was able to create " + MAX_TRACKS + " tracks - that's too many!", 2795 n < MAX_TRACKS); 2796 } 2797 2798 @Test testTunerConfiguration()2799 public void testTunerConfiguration() throws Exception { 2800 if (!hasAudioOutput()) { 2801 return; 2802 } 2803 2804 assertThrows( 2805 IllegalArgumentException.class, 2806 () -> { 2807 final AudioTrack.TunerConfiguration badConfig = 2808 new AudioTrack.TunerConfiguration(-1 /* contentId */, 1 /* syncId */); 2809 }); 2810 2811 assertThrows( 2812 IllegalArgumentException.class, 2813 () -> { 2814 final AudioTrack.TunerConfiguration badConfig = 2815 new AudioTrack.TunerConfiguration(1 /* contentId*/, 0 /* syncId */); 2816 }); 2817 assertThrows( 2818 IllegalArgumentException.class, 2819 () -> { 2820 final AudioTrack track = new AudioTrack.Builder() 2821 .setEncapsulationMode(-1) 2822 .build(); 2823 track.release(); 2824 }); 2825 2826 assertThrows( 2827 IllegalArgumentException.class, 2828 () -> { 2829 final AudioTrack track = new AudioTrack.Builder() 2830 .setTunerConfiguration(null) 2831 .build(); 2832 track.release(); 2833 }); 2834 2835 // this should work. 2836 int[][] contentSyncPairs = { 2837 {1, 2}, 2838 {AudioTrack.TunerConfiguration.CONTENT_ID_NONE, 42}, 2839 }; 2840 for (int[] pair : contentSyncPairs) { 2841 final int contentId = pair[0]; 2842 final int syncId = pair[1]; 2843 final AudioTrack.TunerConfiguration tunerConfiguration = 2844 new AudioTrack.TunerConfiguration(contentId, syncId); 2845 2846 assertEquals("contentId must be set", contentId, tunerConfiguration.getContentId()); 2847 assertEquals("syncId must be set", syncId, tunerConfiguration.getSyncId()); 2848 2849 // this may fail on creation, not in any setters. 2850 AudioTrack track = null; 2851 try { 2852 track = new AudioTrack.Builder() 2853 .setEncapsulationMode(AudioTrack.ENCAPSULATION_MODE_NONE) 2854 .setTunerConfiguration(tunerConfiguration) 2855 .build(); 2856 } catch (UnsupportedOperationException e) { 2857 ; // creation failure is OK as TunerConfiguration requires HW support, 2858 // however other exception failures are not OK. 2859 } finally { 2860 if (track != null) { 2861 track.release(); 2862 } 2863 } 2864 } 2865 } 2866 2867 @Test testCodecFormatChangedListener()2868 public void testCodecFormatChangedListener() throws Exception { 2869 if (!hasAudioOutput()) { 2870 return; 2871 } 2872 2873 final AudioTrack audioTrack = new AudioTrack.Builder().build(); 2874 2875 assertThrows( 2876 NullPointerException.class, 2877 () -> { audioTrack.addOnCodecFormatChangedListener( 2878 null /* executor */, null /* listener */); }); 2879 2880 assertThrows( 2881 NullPointerException.class, 2882 () -> { audioTrack.removeOnCodecFormatChangedListener(null /* listener */); }); 2883 2884 2885 final AudioTrack.OnCodecFormatChangedListener listener = 2886 (AudioTrack track, AudioMetadataReadMap readMap) -> {}; 2887 2888 // add a synchronous executor. 2889 audioTrack.addOnCodecFormatChangedListener(new Executor() { 2890 @Override 2891 public void execute(Runnable r) { 2892 r.run(); 2893 } 2894 }, listener); 2895 audioTrack.removeOnCodecFormatChangedListener(listener); 2896 audioTrack.release(); 2897 } 2898 2899 @Test testDualMonoMode()2900 public void testDualMonoMode() throws Exception { 2901 if (!hasAudioOutput()) { 2902 return; 2903 } 2904 2905 final AudioTrack audioTrack = new AudioTrack.Builder().build(); 2906 2907 // Note that the output device may not support Dual Mono mode. 2908 // The following path should always succeed. 2909 audioTrack.setDualMonoMode(AudioTrack.DUAL_MONO_MODE_OFF); 2910 assertEquals(AudioTrack.DUAL_MONO_MODE_OFF, audioTrack.getDualMonoMode()); 2911 2912 // throws IAE on invalid argument. 2913 assertThrows( 2914 IllegalArgumentException.class, 2915 () -> { audioTrack.setDualMonoMode(-1); } 2916 ); 2917 2918 // check behavior after release. 2919 audioTrack.release(); 2920 assertThrows( 2921 IllegalStateException.class, 2922 () -> { audioTrack.setDualMonoMode(AudioTrack.DUAL_MONO_MODE_OFF); } 2923 ); 2924 assertEquals(AudioTrack.DUAL_MONO_MODE_OFF, audioTrack.getDualMonoMode()); 2925 } 2926 2927 @Test testAudioDescriptionMixLevel()2928 public void testAudioDescriptionMixLevel() throws Exception { 2929 if (!hasAudioOutput()) { 2930 return; 2931 } 2932 2933 final AudioTrack audioTrack = new AudioTrack.Builder().build(); 2934 2935 // Note that the output device may not support Audio Description Mix Level. 2936 // The following path should always succeed. 2937 audioTrack.setAudioDescriptionMixLeveldB(Float.NEGATIVE_INFINITY); 2938 assertEquals(Float.NEGATIVE_INFINITY, 2939 audioTrack.getAudioDescriptionMixLeveldB(), 0.f /*delta*/); 2940 2941 // throws IAE on invalid argument. 2942 assertThrows( 2943 IllegalArgumentException.class, 2944 () -> { audioTrack.setAudioDescriptionMixLeveldB(1e6f); } 2945 ); 2946 2947 // check behavior after release. 2948 audioTrack.release(); 2949 assertThrows( 2950 IllegalStateException.class, 2951 () -> { audioTrack.setAudioDescriptionMixLeveldB(0.f); } 2952 ); 2953 assertEquals(Float.NEGATIVE_INFINITY, 2954 audioTrack.getAudioDescriptionMixLeveldB(), 0.f /*delta*/); 2955 } 2956 2957 @Test testSetLogSessionId()2958 public void testSetLogSessionId() throws Exception { 2959 if (!hasAudioOutput()) { 2960 return; 2961 } 2962 AudioTrack audioTrack = null; 2963 try { 2964 audioTrack = new AudioTrack.Builder() 2965 .setAudioFormat(new AudioFormat.Builder() 2966 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 2967 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 2968 .build()) 2969 .build(); 2970 audioTrack.setLogSessionId(LogSessionId.LOG_SESSION_ID_NONE); // should not throw. 2971 assertEquals(LogSessionId.LOG_SESSION_ID_NONE, audioTrack.getLogSessionId()); 2972 2973 final String ARBITRARY_MAGIC = "0123456789abcdef"; // 16 char Base64Url. 2974 audioTrack.setLogSessionId(new LogSessionId(ARBITRARY_MAGIC)); 2975 assertEquals(new LogSessionId(ARBITRARY_MAGIC), audioTrack.getLogSessionId()); 2976 2977 final MediaMetricsManager mediaMetricsManager = 2978 getContext().getSystemService(MediaMetricsManager.class); 2979 final PlaybackSession playbackSession = mediaMetricsManager.createPlaybackSession(); 2980 audioTrack.setLogSessionId(playbackSession.getSessionId()); 2981 assertEquals(playbackSession.getSessionId(), audioTrack.getLogSessionId()); 2982 2983 // write some data to generate a log entry. 2984 short data[] = new short[audioTrack.getSampleRate() / 2]; 2985 audioTrack.play(); 2986 audioTrack.write(data, 0 /* offsetInShorts */, data.length); 2987 audioTrack.stop(); 2988 Thread.sleep(500 /* millis */); // drain 2989 2990 // Also can check the mediametrics dumpsys to validate logs generated. 2991 } finally { 2992 if (audioTrack != null) { 2993 audioTrack.release(); 2994 } 2995 } 2996 } 2997 2998 /* 2999 * The following helpers and tests are used to test setting 3000 * and getting the start threshold in frames. 3001 * 3002 * See Android CDD 5.6 [C-1-2] Cold output latency 3003 */ 3004 private static final int START_THRESHOLD_SLEEP_MILLIS = 500; 3005 3006 /** 3007 * Helper test that validates setting the start threshold. 3008 * 3009 * @param track 3010 * @param startThresholdInFrames 3011 * @throws Exception 3012 */ validateSetStartThresholdInFrames( AudioTrack track, int startThresholdInFrames)3013 private static void validateSetStartThresholdInFrames( 3014 AudioTrack track, int startThresholdInFrames) throws Exception { 3015 assertEquals(startThresholdInFrames, 3016 track.setStartThresholdInFrames(startThresholdInFrames)); 3017 assertEquals(startThresholdInFrames, 3018 track.getStartThresholdInFrames()); 3019 } 3020 3021 /** 3022 * Helper that tests that the head position eventually equals expectedFrames. 3023 * 3024 * Exponential backoff to ~ 2 x START_THRESHOLD_SLEEP_MILLIS 3025 * 3026 * @param track 3027 * @param expectedFrames 3028 * @param message 3029 * @throws Exception 3030 */ validatePlaybackHeadPosition( AudioTrack track, int expectedFrames, String message)3031 private static void validatePlaybackHeadPosition( 3032 AudioTrack track, int expectedFrames, String message) throws Exception { 3033 int cumulativeMillis = 0; 3034 int playbackHeadPosition = 0; 3035 for (double testMillis = START_THRESHOLD_SLEEP_MILLIS * 0.125; 3036 testMillis <= START_THRESHOLD_SLEEP_MILLIS; // this is exact for IEEE binary double 3037 testMillis *= 2.) { 3038 Thread.sleep((int)testMillis); 3039 playbackHeadPosition = track.getPlaybackHeadPosition(); 3040 if (playbackHeadPosition == expectedFrames) return; 3041 cumulativeMillis += (int)testMillis; 3042 } 3043 fail(message + ": expected track playbackHeadPosition: " + expectedFrames 3044 + " actual playbackHeadPosition: " + playbackHeadPosition 3045 + " wait time: " + cumulativeMillis + "ms"); 3046 } 3047 3048 /** 3049 * Helper test that sets the start threshold to frames, and validates 3050 * writing exactly frames amount of data is needed to start the 3051 * track streaming. 3052 * 3053 * @param track 3054 * @param frames 3055 * @throws Exception 3056 */ validateWriteStartsStreamWithSetStartThreshold( AudioTrack track, int frames)3057 private static void validateWriteStartsStreamWithSetStartThreshold( 3058 AudioTrack track, int frames) throws Exception { 3059 // Set our threshold to frames. 3060 validateSetStartThresholdInFrames(track, frames); 3061 3062 validateWriteStartsStream(track, frames); 3063 } 3064 3065 /** 3066 * Helper test that validates writing exactly frames amount of data is needed to start the 3067 * track streaming. 3068 * 3069 * @param track 3070 * @param frames 3071 * @throws Exception 3072 */ validateWriteStartsStream(AudioTrack track, int frames)3073 private static void validateWriteStartsStream(AudioTrack track, int frames) throws Exception { 3074 assertEquals(1, track.getChannelCount()); // must be MONO 3075 final short[] data = new short[frames]; 3076 3077 // The track must be idle/underrun or the test will fail. 3078 int expectedFrames = track.getPlaybackHeadPosition(); 3079 3080 Thread.sleep(START_THRESHOLD_SLEEP_MILLIS); 3081 assertEquals("Streaming doesn't start if the start threshold is larger than buffered data", 3082 expectedFrames, track.getPlaybackHeadPosition()); 3083 3084 // Write a small amount of data, this isn't enough to start the track. 3085 final int PARTIAL_WRITE_IN_FRAMES = frames - 1; 3086 track.write(data, 0 /* offsetInShorts */, PARTIAL_WRITE_IN_FRAMES); 3087 3088 // Ensure the track hasn't started. 3089 Thread.sleep(START_THRESHOLD_SLEEP_MILLIS); 3090 assertEquals("Track needs enough frames to start", 3091 expectedFrames, track.getPlaybackHeadPosition()); 3092 3093 // Write exactly threshold frames out, this should kick the playback off. 3094 track.write(data, 0 /* offsetInShorts */, data.length - PARTIAL_WRITE_IN_FRAMES); 3095 3096 // Verify that we have processed the data now. 3097 expectedFrames += frames; 3098 Thread.sleep(frames * 1000L / track.getSampleRate()); // accommodate for #frames. 3099 validatePlaybackHeadPosition(track, expectedFrames, 3100 "Writing buffer data to start threshold should start streaming"); 3101 } 3102 3103 /** 3104 * Helper that tests reducing the start threshold to frames will start track 3105 * streaming when frames of data are written to it. (Presumes the 3106 * previous start threshold was greater than frames). 3107 * 3108 * @param track 3109 * @param frames 3110 * @throws Exception 3111 */ validateSetStartThresholdStartsStream( AudioTrack track, int frames)3112 private static void validateSetStartThresholdStartsStream( 3113 AudioTrack track, int frames) throws Exception { 3114 assertTrue(track.getStartThresholdInFrames() > frames); 3115 assertEquals(1, track.getChannelCount()); // must be MONO 3116 final short[] data = new short[frames]; 3117 3118 // The track must be idle/underrun or the test will fail. 3119 int expectedFrames = track.getPlaybackHeadPosition(); 3120 3121 // This write is too small for now. 3122 track.write(data, 0 /* offsetInShorts */, data.length); 3123 3124 Thread.sleep(START_THRESHOLD_SLEEP_MILLIS); 3125 assertEquals("Track needs enough frames to start", 3126 expectedFrames, track.getPlaybackHeadPosition()); 3127 3128 // Reduce our start threshold. This should start streaming. 3129 validateSetStartThresholdInFrames(track, frames); 3130 3131 // Verify that we have processed the data now. 3132 expectedFrames += frames; 3133 Thread.sleep(frames * 1000L / track.getSampleRate()); // accommodate for #frames. 3134 validatePlaybackHeadPosition(track, expectedFrames, 3135 "Changing start threshold to buffer data level should start streaming"); 3136 } 3137 3138 // Tests the default fill buffer value to start playing an AudioTrack 3139 @Test testDefaultStartThresholdInFrames()3140 public void testDefaultStartThresholdInFrames() throws Exception { 3141 if (!hasAudioOutput()) { 3142 return; 3143 } 3144 3145 AudioTrack audioTrack = null; 3146 try { 3147 // Build our audiotrack 3148 audioTrack = new AudioTrack.Builder() 3149 .setAudioFormat(new AudioFormat.Builder() 3150 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 3151 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 3152 .build()) 3153 .build(); 3154 3155 // Start the AudioTrack. Now the track is waiting for data. 3156 audioTrack.play(); 3157 3158 validateWriteStartsStream(audioTrack, audioTrack.getStartThresholdInFrames()); 3159 } finally { 3160 if (audioTrack != null) { 3161 audioTrack.release(); 3162 } 3163 } 3164 } 3165 3166 // Start threshold levels that we check. 3167 private enum ThresholdLevel { LOW, MEDIUM, HIGH }; 3168 @Test testStartThresholdInFrames()3169 public void testStartThresholdInFrames() throws Exception { 3170 if (!hasAudioOutput()) { 3171 return; 3172 } 3173 3174 for (ThresholdLevel level : new ThresholdLevel[] { 3175 ThresholdLevel.LOW, ThresholdLevel.MEDIUM, ThresholdLevel.HIGH}) { 3176 AudioTrack audioTrack = null; 3177 try { 3178 // Build our audiotrack 3179 audioTrack = new AudioTrack.Builder() 3180 .setAudioFormat(new AudioFormat.Builder() 3181 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 3182 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 3183 .build()) 3184 .build(); 3185 3186 // Initially the start threshold must be the same as the buffer size in frames. 3187 final int bufferSizeInFrames = audioTrack.getBufferSizeInFrames(); 3188 assertEquals("At start, getBufferSizeInFrames should equal getStartThresholdInFrames", 3189 bufferSizeInFrames, 3190 audioTrack.getStartThresholdInFrames()); 3191 3192 final int TARGET_THRESHOLD_IN_FRAMES; // threshold level to verify 3193 switch (level) { 3194 default: 3195 case LOW: 3196 TARGET_THRESHOLD_IN_FRAMES = 2; 3197 break; 3198 case MEDIUM: 3199 TARGET_THRESHOLD_IN_FRAMES = bufferSizeInFrames / 2; 3200 break; 3201 case HIGH: 3202 TARGET_THRESHOLD_IN_FRAMES = bufferSizeInFrames - 1; 3203 break; 3204 } 3205 3206 // Skip extreme cases that don't need testing. 3207 if (TARGET_THRESHOLD_IN_FRAMES < 2 3208 || TARGET_THRESHOLD_IN_FRAMES >= bufferSizeInFrames) continue; 3209 3210 // Start the AudioTrack. Now the track is waiting for data. 3211 audioTrack.play(); 3212 3213 validateWriteStartsStreamWithSetStartThreshold( 3214 audioTrack, TARGET_THRESHOLD_IN_FRAMES); 3215 3216 // Try a condition that requires buffers to be filled again. 3217 if (false) { 3218 // Only a deep underrun when the track becomes inactive requires a refill. 3219 // Disabled as this is dependent on underlying MixerThread timeouts. 3220 Thread.sleep(5000 /* millis */); 3221 } else { 3222 // Flushing will require a refill (this does not require timing). 3223 audioTrack.pause(); 3224 audioTrack.flush(); 3225 audioTrack.play(); 3226 } 3227 3228 // Check that reducing to a smaller threshold will start the track streaming. 3229 validateSetStartThresholdStartsStream(audioTrack, TARGET_THRESHOLD_IN_FRAMES - 1); 3230 } finally { 3231 if (audioTrack != null) { 3232 audioTrack.release(); 3233 } 3234 } 3235 } 3236 } 3237 3238 @Test testStartThresholdInFramesExceptions()3239 public void testStartThresholdInFramesExceptions() throws Exception { 3240 if (!hasAudioOutput()) { 3241 return; 3242 } 3243 AudioTrack audioTrack = null; 3244 try { 3245 // Build our audiotrack 3246 audioTrack = new AudioTrack.Builder() 3247 .setAudioFormat(new AudioFormat.Builder() 3248 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 3249 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 3250 .build()) 3251 .build(); 3252 3253 // Test setting invalid start threshold. 3254 final AudioTrack track = audioTrack; // make final for lambda 3255 assertThrows(IllegalArgumentException.class, () -> { 3256 track.setStartThresholdInFrames(-1 /* startThresholdInFrames */); 3257 }); 3258 } finally { 3259 if (audioTrack != null) { 3260 audioTrack.release(); 3261 } 3262 } 3263 // If we're here audioTrack should be non-null but released, 3264 // so calls should return an IllegalStateException. 3265 final AudioTrack track = audioTrack; // make final for lambda 3266 assertThrows(IllegalStateException.class, () -> { 3267 track.getStartThresholdInFrames(); 3268 }); 3269 assertThrows(IllegalStateException.class, () -> { 3270 track.setStartThresholdInFrames(1 /* setStartThresholdInFrames */); 3271 }); 3272 } 3273 3274 /** 3275 * Tests height channel masks and higher channel counts 3276 * used in immersive AudioTrack streaming. 3277 * 3278 * @throws Exception 3279 */ 3280 @Test testImmersiveStreaming()3281 public void testImmersiveStreaming() throws Exception { 3282 if (!hasAudioOutput()) { 3283 return; 3284 } 3285 3286 final String TEST_NAME = "testImmersiveStreaming"; 3287 final int TEST_FORMAT_ARRAY[] = { 3288 AudioFormat.ENCODING_PCM_16BIT, 3289 AudioFormat.ENCODING_PCM_FLOAT, 3290 }; 3291 final int TEST_SR_ARRAY[] = { 3292 48000, // do not set too high - costly in memory. 3293 }; 3294 final int TEST_CONF_ARRAY[] = { 3295 AudioFormat.CHANNEL_OUT_5POINT1POINT2, // 8 ch (includes height channels vs 7.1). 3296 AudioFormat.CHANNEL_OUT_7POINT1POINT2, // 10ch 3297 AudioFormat.CHANNEL_OUT_7POINT1POINT4, // 12 ch 3298 AudioFormat.CHANNEL_OUT_9POINT1POINT4, // 14 ch 3299 AudioFormat.CHANNEL_OUT_9POINT1POINT6, // 16 ch 3300 AudioFormat.CHANNEL_OUT_22POINT2, // 24 ch 3301 }; 3302 3303 final int TEST_MODE = AudioTrack.MODE_STREAM; 3304 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 3305 final float TEST_SWEEP = 0; // sine wave only 3306 final boolean TEST_IS_LOW_RAM_DEVICE = false; 3307 for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { 3308 double frequency = 400; // Note: frequency changes for each test 3309 for (int TEST_SR : TEST_SR_ARRAY) { 3310 for (int TEST_CONF : TEST_CONF_ARRAY) { 3311 if (AudioFormat.channelCountFromOutChannelMask(TEST_CONF) 3312 > AudioSystem.OUT_CHANNEL_COUNT_MAX) { 3313 continue; // Skip if the channel count exceeds framework capabilities. 3314 } 3315 playOnceStreamData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP, 3316 TEST_IS_LOW_RAM_DEVICE, TEST_FORMAT, frequency, TEST_SR, TEST_CONF, 3317 WAIT_MSEC, 0 /* mask */); 3318 frequency += 50; // increment test tone frequency 3319 } 3320 } 3321 } 3322 } 3323 3324 @Test testImmersiveChannelIndex()3325 public void testImmersiveChannelIndex() throws Exception { 3326 if (!hasAudioOutput()) { 3327 return; 3328 } 3329 3330 final String TEST_NAME = "testImmersiveChannelIndex"; 3331 final int TEST_FORMAT_ARRAY[] = { 3332 AudioFormat.ENCODING_PCM_FLOAT, 3333 }; 3334 final int TEST_SR_ARRAY[] = { 3335 48000, // do not set too high - costly in memory. 3336 }; 3337 final int MAX_CHANNEL_BIT = 1 << (AudioSystem.FCC_24 - 1); // highest allowed channel. 3338 final int TEST_CONF_ARRAY[] = { 3339 MAX_CHANNEL_BIT, // likely silent - no physical device on top channel. 3340 MAX_CHANNEL_BIT | 1, // first channel will likely have physical device. 3341 (1 << AudioSystem.OUT_CHANNEL_COUNT_MAX) - 1, 3342 }; 3343 final int TEST_WRITE_MODE_ARRAY[] = { 3344 AudioTrack.WRITE_BLOCKING, 3345 AudioTrack.WRITE_NON_BLOCKING, 3346 }; 3347 final double TEST_SWEEP = 0; 3348 final int TEST_TRANSFER_MODE = AudioTrack.MODE_STREAM; 3349 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 3350 3351 double frequency = 200; // frequency changes for each test 3352 for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { 3353 for (int TEST_SR : TEST_SR_ARRAY) { 3354 for (int TEST_WRITE_MODE : TEST_WRITE_MODE_ARRAY) { 3355 for (int useDirect = 0; useDirect < 2; ++useDirect) { 3356 for (int TEST_CONF : TEST_CONF_ARRAY) { 3357 // put TEST_CONF in the inner loop to avoid 3358 // back-to-back creation of large tracks. 3359 playOnceStreamByteBuffer( 3360 TEST_NAME, frequency, TEST_SWEEP, 3361 TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 3362 TEST_TRANSFER_MODE, TEST_WRITE_MODE, 3363 true /* useChannelIndex */, useDirect != 0); 3364 frequency += 30; // increment test tone frequency 3365 } 3366 } 3367 } 3368 } 3369 } 3370 } 3371 3372 /** 3373 * Verifies downmixer works with different AudioTrack surround channel masks. 3374 * 3375 * Also a listening test: on a stereo output device, you should hear sine wave tones 3376 * instead of silence if the downmixer is working. 3377 * 3378 * @throws Exception 3379 */ 3380 @Test testDownmix()3381 public void testDownmix() throws Exception { 3382 if (!hasAudioOutput()) { 3383 return; 3384 } 3385 3386 final String TEST_NAME = "testDownmix"; 3387 final int TEST_FORMAT_ARRAY[] = { 3388 // AudioFormat.ENCODING_PCM_8BIT, // sounds a bit tinny 3389 AudioFormat.ENCODING_PCM_16BIT, 3390 AudioFormat.ENCODING_PCM_FLOAT, 3391 }; 3392 final int TEST_SR_ARRAY[] = { 3393 48000, 3394 }; 3395 final int TEST_CONF_ARRAY[] = { 3396 // This test will play back FRONT_WIDE_LEFT, then FRONT_WIDE_RIGHT. 3397 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 3398 AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT | AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT, 3399 }; 3400 3401 final int TEST_MODE = AudioTrack.MODE_STREAM; 3402 final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC; 3403 final float TEST_SWEEP = 0; // sine wave only 3404 final boolean TEST_IS_LOW_RAM_DEVICE = false; 3405 for (int TEST_FORMAT : TEST_FORMAT_ARRAY) { 3406 double frequency = 400; // Note: frequency changes for each test 3407 for (int TEST_SR : TEST_SR_ARRAY) { 3408 for (int TEST_CONF : TEST_CONF_ARRAY) { 3409 // Remove the front left and front right channels. 3410 int signalMask = TEST_CONF & ~(AudioFormat.CHANNEL_OUT_FRONT_LEFT 3411 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT); 3412 // Play all the "surround channels" in the mask individually 3413 // at different frequencies. 3414 while (signalMask != 0) { 3415 final int lowbit = signalMask & -signalMask; 3416 playOnceStreamData(TEST_NAME, TEST_MODE, TEST_STREAM_TYPE, TEST_SWEEP, 3417 TEST_IS_LOW_RAM_DEVICE, TEST_FORMAT, frequency, TEST_SR, 3418 TEST_CONF, WAIT_MSEC, lowbit); 3419 signalMask -= lowbit; 3420 frequency += 50; // increment test tone frequency 3421 } 3422 } 3423 } 3424 } 3425 } 3426 3427 /** 3428 * Ensure AudioTrack.getMinBufferSize invalid arguments return BAD_VALUE instead 3429 * of throwing exception. 3430 * 3431 * @throws Exception 3432 */ 3433 @Test testInvalidMinBufferSize()3434 public void testInvalidMinBufferSize() throws Exception { 3435 int TEST_SAMPLE_RATE = 24000; 3436 int TEST_CHANNEL_CONFIGURATION = AudioFormat.CHANNEL_OUT_STEREO; 3437 int TEST_ENCODING = AudioFormat.ENCODING_PCM_16BIT; 3438 3439 for (int i = 1; i < 8; ++i) { 3440 int minBuffSize = AudioTrack.getMinBufferSize( 3441 (i & 1) != 0 ? 0 : TEST_SAMPLE_RATE, 3442 (i & 2) != 0 ? AudioFormat.CHANNEL_INVALID : TEST_CHANNEL_CONFIGURATION, 3443 (i & 4) != 0 ? AudioFormat.ENCODING_INVALID :TEST_ENCODING); 3444 assertEquals("Invalid configuration " + i + " should return ERROR_BAD_VALUE", 3445 AudioTrack.ERROR_BAD_VALUE, minBuffSize); 3446 } 3447 } 3448 3449 /** 3450 * Test AudioTrack Builder error handling. 3451 * 3452 * @throws Exception 3453 */ 3454 @Test testAudioTrackBuilderError()3455 public void testAudioTrackBuilderError() throws Exception { 3456 if (!hasAudioOutput()) { 3457 return; 3458 } 3459 3460 final AudioTrack[] audioTrack = new AudioTrack[1]; // pointer to audio track. 3461 final int BIGNUM = Integer.MAX_VALUE; // large value that should be invalid. 3462 final int INVALID_SESSION_ID = 1024; // can never occur (wrong type in 3 lsbs) 3463 final int INVALID_CHANNEL_MASK = -1; 3464 3465 try { 3466 // NOTE: 3467 // Tuner Configuration builder error tested in testTunerConfiguration (same file). 3468 // AudioAttributes tested in AudioAttributesTest#testAudioAttributesBuilderError. 3469 // AudioFormat tested in AudioFormatTest#testAudioFormatBuilderError. 3470 3471 // We must be able to create the AudioTrack. 3472 audioTrack[0] = new AudioTrack.Builder().build(); 3473 audioTrack[0].release(); 3474 3475 // Out of bounds buffer size. A large size will fail in AudioTrack creation. 3476 assertThrows(UnsupportedOperationException.class, () -> { 3477 audioTrack[0] = new AudioTrack.Builder() 3478 .setBufferSizeInBytes(BIGNUM) 3479 .build(); 3480 }); 3481 3482 // 0 and negative buffer size throw IllegalArgumentException 3483 for (int bufferSize : new int[] {-BIGNUM, -1, 0}) { 3484 assertThrows(IllegalArgumentException.class, () -> { 3485 audioTrack[0] = new AudioTrack.Builder() 3486 .setBufferSizeInBytes(bufferSize) 3487 .build(); 3488 }); 3489 } 3490 3491 assertThrows(IllegalArgumentException.class, () -> { 3492 audioTrack[0] = new AudioTrack.Builder() 3493 .setEncapsulationMode(BIGNUM) 3494 .build(); 3495 }); 3496 3497 assertThrows(IllegalArgumentException.class, () -> { 3498 audioTrack[0] = new AudioTrack.Builder() 3499 .setPerformanceMode(BIGNUM) 3500 .build(); 3501 }); 3502 3503 // Invalid session id that is positive. 3504 // (logcat error message vague) 3505 assertThrows(UnsupportedOperationException.class, () -> { 3506 audioTrack[0] = new AudioTrack.Builder() 3507 .setSessionId(INVALID_SESSION_ID) 3508 .build(); 3509 }); 3510 3511 assertThrows(IllegalArgumentException.class, () -> { 3512 audioTrack[0] = new AudioTrack.Builder() 3513 .setTransferMode(BIGNUM) 3514 .build(); 3515 }); 3516 3517 // Specialty AudioTrack build errors. 3518 3519 // Bad audio encoding DRA expected unsupported. 3520 try { 3521 audioTrack[0] = new AudioTrack.Builder() 3522 .setAudioFormat(new AudioFormat.Builder() 3523 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 3524 .setEncoding(AudioFormat.ENCODING_DRA) 3525 .build()) 3526 .build(); 3527 // Don't throw an exception, maybe it is supported somehow, but warn. 3528 // Note: often specialty audio formats are offloaded (see setOffloadedPlayback). 3529 // AudioTrackSurroundTest and AudioTrackOffloadedTest can be used as examples. 3530 Log.w(TAG, "ENCODING_DRA is expected to be unsupported"); 3531 audioTrack[0].release(); 3532 audioTrack[0] = null; 3533 } catch (UnsupportedOperationException e) { 3534 ; // OK expected 3535 } 3536 3537 // Sample rate out of bounds. 3538 // System levels caught on AudioFormat. 3539 assertThrows(IllegalArgumentException.class, () -> { 3540 audioTrack[0] = new AudioTrack.Builder() 3541 .setAudioFormat(new AudioFormat.Builder() 3542 .setSampleRate(BIGNUM) 3543 .build()) 3544 .build(); 3545 }); 3546 3547 // Invalid channel mask - caught here on use. 3548 assertThrows(IllegalArgumentException.class, () -> { 3549 audioTrack[0] = new AudioTrack.Builder() 3550 .setAudioFormat(new AudioFormat.Builder() 3551 .setChannelMask(INVALID_CHANNEL_MASK) 3552 .build()) 3553 .build(); 3554 }); 3555 } finally { 3556 // Did we successfully complete for some reason but did not 3557 // release? 3558 if (audioTrack[0] != null) { 3559 audioTrack[0].release(); 3560 audioTrack[0] = null; 3561 } 3562 } 3563 } 3564 3565 /* Do not run in JB-MR1. will be re-opened in the next platform release. 3566 public void testResourceLeakage() throws Exception { 3567 final int BUFFER_SIZE = 600 * 1024; 3568 ByteBuffer data = ByteBuffer.allocate(BUFFER_SIZE); 3569 for (int i = 0; i < 10; i++) { 3570 Log.i(TAG, "testResourceLeakage round " + i); 3571 data.rewind(); 3572 AudioTrack track = new AudioTrack(AudioManager.STREAM_VOICE_CALL, 3573 44100, 3574 AudioFormat.CHANNEL_OUT_STEREO, 3575 AudioFormat.ENCODING_PCM_16BIT, 3576 data.capacity(), 3577 AudioTrack.MODE_STREAM); 3578 assertTrue(track != null); 3579 track.write(data.array(), 0, data.capacity()); 3580 track.play(); 3581 Thread.sleep(100); 3582 track.stop(); 3583 track.release(); 3584 } 3585 } 3586 */ 3587 3588 /* MockAudioTrack allows testing of protected getNativeFrameCount() and setState(). */ 3589 private class MockAudioTrack extends AudioTrack { 3590 MockAudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)3591 public MockAudioTrack(int streamType, int sampleRateInHz, int channelConfig, 3592 int audioFormat, int bufferSizeInBytes, int mode) throws IllegalArgumentException { 3593 super(streamType, sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes, mode); 3594 } 3595 setState(int state)3596 public void setState(int state) { 3597 super.setState(state); 3598 } 3599 getNativeFrameCount()3600 public int getNativeFrameCount() { 3601 return super.getNativeFrameCount(); 3602 } 3603 } 3604 createAudioPresentation()3605 private static AudioPresentation createAudioPresentation() { 3606 return (new AudioPresentation.Builder(42 /*presentationId*/)).build(); 3607 } 3608 getContext()3609 private static Context getContext() { 3610 return InstrumentationRegistry.getInstrumentation().getTargetContext(); 3611 } 3612 } 3613