1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.media.audio.cts; 18 19 import static org.junit.Assert.assertEquals; 20 import static org.junit.Assert.assertFalse; 21 import static org.junit.Assert.assertNotNull; 22 import static org.junit.Assert.assertNull; 23 import static org.junit.Assert.assertThrows; 24 import static org.junit.Assert.assertTrue; 25 import static org.junit.Assert.fail; 26 27 import android.Manifest; 28 import android.app.ActivityManager; 29 import android.content.Context; 30 import android.content.pm.PackageManager; 31 import android.media.AudioDeviceInfo; 32 import android.media.AudioFormat; 33 import android.media.AudioManager; 34 import android.media.AudioRecord; 35 import android.media.AudioRecord.OnRecordPositionUpdateListener; 36 import android.media.AudioRecordingConfiguration; 37 import android.media.AudioSystem; 38 import android.media.AudioTimestamp; 39 import android.media.MediaFormat; 40 import android.media.MediaRecorder; 41 import android.media.MicrophoneDirection; 42 import android.media.MicrophoneInfo; 43 import android.media.cts.AudioHelper; 44 import android.media.cts.StreamUtils; 45 import android.media.metrics.LogSessionId; 46 import android.media.metrics.MediaMetricsManager; 47 import android.media.metrics.RecordingSession; 48 import android.os.Handler; 49 import android.os.Looper; 50 import android.os.Message; 51 import android.os.PersistableBundle; 52 import android.os.Process; 53 import android.os.SystemClock; 54 import android.platform.test.annotations.Presubmit; 55 import android.util.Log; 56 57 import androidx.test.InstrumentationRegistry; 58 import androidx.test.runner.AndroidJUnit4; 59 60 import com.android.compatibility.common.util.CddTest; 61 import com.android.compatibility.common.util.DeviceReportLog; 62 import com.android.compatibility.common.util.NonMainlineTest; 63 import com.android.compatibility.common.util.ResultType; 64 import com.android.compatibility.common.util.ResultUnit; 65 import com.android.compatibility.common.util.SystemUtil; 66 67 import org.junit.After; 68 import org.junit.Before; 69 import org.junit.Test; 70 import org.junit.runner.RunWith; 71 72 import java.io.IOException; 73 import java.nio.ByteBuffer; 74 import java.nio.ShortBuffer; 75 import java.util.ArrayList; 76 import java.util.List; 77 import java.util.concurrent.Executor; 78 import java.util.function.BiFunction; 79 80 @NonMainlineTest 81 @RunWith(AndroidJUnit4.class) 82 public class AudioRecordTest { 83 private final static String TAG = "AudioRecordTest"; 84 private static final String REPORT_LOG_NAME = "CtsMediaAudioTestCases"; 85 private AudioRecord mAudioRecord; 86 private AudioManager mAudioManager; 87 private static final int SAMPLING_RATE_HZ = 44100; 88 private boolean mIsOnMarkerReachedCalled; 89 private boolean mIsOnPeriodicNotificationCalled; 90 private boolean mIsHandleMessageCalled; 91 private Looper mLooper; 92 // For doTest 93 private int mMarkerPeriodInFrames; 94 private int mMarkerPosition; 95 private Handler mHandler = new Handler(Looper.getMainLooper()) { 96 @Override 97 public void handleMessage(Message msg) { 98 mIsHandleMessageCalled = true; 99 super.handleMessage(msg); 100 } 101 }; 102 private static final int RECORD_DURATION_MS = 500; 103 private static final int TEST_TIMING_TOLERANCE_MS = 70; 104 105 @Before setUp()106 public void setUp() throws Exception { 107 if (!hasMicrophone()) { 108 return; 109 } 110 mAudioManager = InstrumentationRegistry .getInstrumentation() 111 .getContext().getSystemService(AudioManager.class); 112 /* 113 * InstrumentationTestRunner.onStart() calls Looper.prepare(), which creates a looper 114 * for the current thread. However, since we don't actually call loop() in the test, 115 * any messages queued with that looper will never be consumed. Therefore, we must 116 * create the instance in another thread, either without a looper, so the main looper is 117 * used, or with an active looper. 118 */ 119 Thread t = new Thread() { 120 @Override 121 public void run() { 122 Looper.prepare(); 123 mLooper = Looper.myLooper(); 124 synchronized(this) { 125 mAudioRecord = new AudioRecord.Builder() 126 .setAudioFormat(new AudioFormat.Builder() 127 .setSampleRate(SAMPLING_RATE_HZ) 128 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 129 .setChannelMask(AudioFormat.CHANNEL_IN_MONO).build()) 130 .setAudioSource(MediaRecorder.AudioSource.DEFAULT) 131 .setBufferSizeInBytes( 132 AudioRecord.getMinBufferSize(SAMPLING_RATE_HZ, 133 AudioFormat.CHANNEL_IN_MONO, 134 AudioFormat.ENCODING_PCM_16BIT) * 10) 135 .build(); 136 this.notify(); 137 } 138 Looper.loop(); 139 } 140 }; 141 synchronized(t) { 142 t.start(); // will block until we wait 143 t.wait(); 144 } 145 assertNotNull(mAudioRecord); 146 } 147 148 @After tearDown()149 public void tearDown() throws Exception { 150 if (hasMicrophone()) { 151 mAudioRecord.release(); 152 mLooper.quit(); 153 } 154 } 155 reset()156 private void reset() { 157 mIsOnMarkerReachedCalled = false; 158 mIsOnPeriodicNotificationCalled = false; 159 mIsHandleMessageCalled = false; 160 } 161 162 @Test testAudioRecordProperties()163 public void testAudioRecordProperties() throws Exception { 164 if (!hasMicrophone()) { 165 return; 166 } 167 assertEquals(AudioFormat.ENCODING_PCM_16BIT, mAudioRecord.getAudioFormat()); 168 assertEquals(MediaRecorder.AudioSource.DEFAULT, mAudioRecord.getAudioSource()); 169 assertEquals(1, mAudioRecord.getChannelCount()); 170 assertEquals(AudioFormat.CHANNEL_IN_MONO, 171 mAudioRecord.getChannelConfiguration()); 172 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 173 assertEquals(SAMPLING_RATE_HZ, mAudioRecord.getSampleRate()); 174 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 175 176 int bufferSize = AudioRecord.getMinBufferSize(SAMPLING_RATE_HZ, 177 AudioFormat.CHANNEL_CONFIGURATION_DEFAULT, AudioFormat.ENCODING_PCM_16BIT); 178 assertTrue(bufferSize > 0); 179 } 180 181 @Test testAudioRecordOP()182 public void testAudioRecordOP() throws Exception { 183 if (!hasMicrophone()) { 184 return; 185 } 186 final int SLEEP_TIME = 10; 187 final int RECORD_TIME = 5000; 188 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 189 190 int markerInFrames = mAudioRecord.getSampleRate() / 2; 191 assertEquals(AudioRecord.SUCCESS, 192 mAudioRecord.setNotificationMarkerPosition(markerInFrames)); 193 assertEquals(markerInFrames, mAudioRecord.getNotificationMarkerPosition()); 194 int periodInFrames = mAudioRecord.getSampleRate(); 195 assertEquals(AudioRecord.SUCCESS, 196 mAudioRecord.setPositionNotificationPeriod(periodInFrames)); 197 assertEquals(periodInFrames, mAudioRecord.getPositionNotificationPeriod()); 198 OnRecordPositionUpdateListener listener = new OnRecordPositionUpdateListener() { 199 200 public void onMarkerReached(AudioRecord recorder) { 201 mIsOnMarkerReachedCalled = true; 202 } 203 204 public void onPeriodicNotification(AudioRecord recorder) { 205 mIsOnPeriodicNotificationCalled = true; 206 } 207 }; 208 mAudioRecord.setRecordPositionUpdateListener(listener); 209 210 // use byte array as buffer 211 final int BUFFER_SIZE = 102400; 212 byte[] byteData = new byte[BUFFER_SIZE]; 213 long time = System.currentTimeMillis(); 214 mAudioRecord.startRecording(); 215 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 216 while (System.currentTimeMillis() - time < RECORD_TIME) { 217 Thread.sleep(SLEEP_TIME); 218 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 219 } 220 mAudioRecord.stop(); 221 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 222 assertTrue(mIsOnMarkerReachedCalled); 223 assertTrue(mIsOnPeriodicNotificationCalled); 224 reset(); 225 226 // use short array as buffer 227 short[] shortData = new short[BUFFER_SIZE]; 228 time = System.currentTimeMillis(); 229 mAudioRecord.startRecording(); 230 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 231 while (System.currentTimeMillis() - time < RECORD_TIME) { 232 Thread.sleep(SLEEP_TIME); 233 mAudioRecord.read(shortData, 0, BUFFER_SIZE); 234 } 235 mAudioRecord.stop(); 236 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 237 assertTrue(mIsOnMarkerReachedCalled); 238 assertTrue(mIsOnPeriodicNotificationCalled); 239 reset(); 240 241 // use ByteBuffer as buffer 242 ByteBuffer byteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE); 243 time = System.currentTimeMillis(); 244 mAudioRecord.startRecording(); 245 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 246 while (System.currentTimeMillis() - time < RECORD_TIME) { 247 Thread.sleep(SLEEP_TIME); 248 mAudioRecord.read(byteBuffer, BUFFER_SIZE); 249 } 250 mAudioRecord.stop(); 251 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 252 assertTrue(mIsOnMarkerReachedCalled); 253 assertTrue(mIsOnPeriodicNotificationCalled); 254 reset(); 255 256 // use handler 257 final Handler handler = new Handler(Looper.getMainLooper()) { 258 @Override 259 public void handleMessage(Message msg) { 260 mIsHandleMessageCalled = true; 261 super.handleMessage(msg); 262 } 263 }; 264 265 mAudioRecord.setRecordPositionUpdateListener(listener, handler); 266 time = System.currentTimeMillis(); 267 mAudioRecord.startRecording(); 268 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 269 while (System.currentTimeMillis() - time < RECORD_TIME) { 270 Thread.sleep(SLEEP_TIME); 271 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 272 } 273 mAudioRecord.stop(); 274 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 275 assertTrue(mIsOnMarkerReachedCalled); 276 assertTrue(mIsOnPeriodicNotificationCalled); 277 // The handler argument is only ever used for getting the associated Looper 278 assertFalse(mIsHandleMessageCalled); 279 280 mAudioRecord.release(); 281 assertEquals(AudioRecord.STATE_UNINITIALIZED, mAudioRecord.getState()); 282 } 283 284 @Test testAudioRecordResamplerMono8Bit()285 public void testAudioRecordResamplerMono8Bit() throws Exception { 286 doTest("resampler_mono_8bit", true /*localRecord*/, false /*customHandler*/, 287 1 /*periodsPerSecond*/, 1 /*markerPeriodsPerSecond*/, 288 false /*useByteBuffer*/, false /*blocking*/, 289 false /*auditRecording*/, false /*isChannelIndex*/, 88200 /*TEST_SR*/, 290 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_8BIT); 291 } 292 293 @Test testAudioRecordResamplerStereo8Bit()294 public void testAudioRecordResamplerStereo8Bit() throws Exception { 295 doTest("resampler_stereo_8bit", true /*localRecord*/, false /*customHandler*/, 296 0 /*periodsPerSecond*/, 3 /*markerPeriodsPerSecond*/, 297 true /*useByteBuffer*/, true /*blocking*/, 298 false /*auditRecording*/, false /*isChannelIndex*/, 45000 /*TEST_SR*/, 299 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_8BIT); 300 } 301 302 @Presubmit 303 @Test testAudioRecordLocalMono16BitShort()304 public void testAudioRecordLocalMono16BitShort() throws Exception { 305 doTest("local_mono_16bit_short", true /*localRecord*/, false /*customHandler*/, 306 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 307 false /*useByteBuffer*/, true /*blocking*/, 308 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 309 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, 500 /*TEST_TIME_MS*/); 310 } 311 312 @Test testAudioRecordLocalMono16Bit()313 public void testAudioRecordLocalMono16Bit() throws Exception { 314 doTest("local_mono_16bit", true /*localRecord*/, false /*customHandler*/, 315 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 316 false /*useByteBuffer*/, true /*blocking*/, 317 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 318 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT); 319 } 320 321 @Test testAudioRecordStereo16Bit()322 public void testAudioRecordStereo16Bit() throws Exception { 323 doTest("stereo_16bit", false /*localRecord*/, false /*customHandler*/, 324 2 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 325 false /*useByteBuffer*/, false /*blocking*/, 326 false /*auditRecording*/, false /*isChannelIndex*/, 17000 /*TEST_SR*/, 327 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT); 328 } 329 330 @Test testAudioRecordMonoFloat()331 public void testAudioRecordMonoFloat() throws Exception { 332 doTest("mono_float", false /*localRecord*/, true /*customHandler*/, 333 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 334 false /*useByteBuffer*/, true /*blocking*/, 335 false /*auditRecording*/, false /*isChannelIndex*/, 32000 /*TEST_SR*/, 336 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_FLOAT); 337 } 338 339 @Test testAudioRecordLocalNonblockingStereoFloat()340 public void testAudioRecordLocalNonblockingStereoFloat() throws Exception { 341 doTest("local_nonblocking_stereo_float", true /*localRecord*/, true /*customHandler*/, 342 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 343 false /*useByteBuffer*/, false /*blocking*/, 344 false /*auditRecording*/, false /*isChannelIndex*/, 48000 /*TEST_SR*/, 345 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 346 } 347 348 // Audit modes work best with non-blocking mode 349 @Test testAudioRecordAuditByteBufferResamplerStereoFloat()350 public void testAudioRecordAuditByteBufferResamplerStereoFloat() throws Exception { 351 if (isLowRamDevice()) { 352 return; // skip. FIXME: reenable when AF memory allocation is updated. 353 } 354 doTest("audit_byte_buffer_resampler_stereo_float", 355 false /*localRecord*/, true /*customHandler*/, 356 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 357 true /*useByteBuffer*/, false /*blocking*/, 358 true /*auditRecording*/, false /*isChannelIndex*/, 96000 /*TEST_SR*/, 359 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 360 } 361 362 @Test testAudioRecordAuditChannelIndexMonoFloat()363 public void testAudioRecordAuditChannelIndexMonoFloat() throws Exception { 364 doTest("audit_channel_index_mono_float", true /*localRecord*/, true /*customHandler*/, 365 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 366 false /*useByteBuffer*/, false /*blocking*/, 367 true /*auditRecording*/, true /*isChannelIndex*/, 47000 /*TEST_SR*/, 368 (1 << 0) /* 1 channel */, AudioFormat.ENCODING_PCM_FLOAT); 369 } 370 371 // Audit buffers can run out of space with high sample rate, 372 // so keep the channels and pcm encoding low 373 @Test testAudioRecordAuditChannelIndex2()374 public void testAudioRecordAuditChannelIndex2() throws Exception { 375 if (isLowRamDevice()) { 376 return; // skip. FIXME: reenable when AF memory allocation is updated. 377 } 378 doTest("audit_channel_index_2", true /*localRecord*/, true /*customHandler*/, 379 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 380 false /*useByteBuffer*/, false /*blocking*/, 381 true /*auditRecording*/, true /*isChannelIndex*/, 192000 /*TEST_SR*/, 382 (1 << 0) | (1 << 2) /* 2 channels, gap in middle */, 383 AudioFormat.ENCODING_PCM_8BIT); 384 } 385 386 // Audit buffers can run out of space with high numbers of channels, 387 // so keep the sample rate low. 388 @Test testAudioRecordAuditChannelIndex5()389 public void testAudioRecordAuditChannelIndex5() throws Exception { 390 doTest("audit_channel_index_5", true /*localRecord*/, true /*customHandler*/, 391 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 392 false /*useByteBuffer*/, false /*blocking*/, 393 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 394 (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4) /* 5 channels */, 395 AudioFormat.ENCODING_PCM_16BIT); 396 } 397 398 // Audit buffers can run out of space with high numbers of channels, 399 // so keep the sample rate low. 400 // This tests the maximum reported Mixed PCM channel capability 401 // for AudioRecord and AudioTrack. 402 @Test testAudioRecordAuditChannelIndexMax()403 public void testAudioRecordAuditChannelIndexMax() throws Exception { 404 // We skip this test for isLowRamDevice(s). 405 // Otherwise if the build reports a high PCM channel count capability, 406 // we expect this CTS test to work at 16kHz. 407 if (isLowRamDevice()) { 408 return; // skip. FIXME: reenable when AF memory allocation is updated. 409 } 410 final int maxChannels = AudioSystem.OUT_CHANNEL_COUNT_MAX; // FCC_LIMIT 411 doTest("audit_channel_index_max", true /*localRecord*/, true /*customHandler*/, 412 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 413 false /*useByteBuffer*/, false /*blocking*/, 414 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 415 (1 << maxChannels) - 1, 416 AudioFormat.ENCODING_PCM_16BIT); 417 } 418 419 // Audit buffers can run out of space with high numbers of channels, 420 // so keep the sample rate low. 421 @Test testAudioRecordAuditChannelIndex3()422 public void testAudioRecordAuditChannelIndex3() throws Exception { 423 doTest("audit_channel_index_3", true /*localRecord*/, true /*customHandler*/, 424 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 425 true /*useByteBuffer*/, false /*blocking*/, 426 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 427 (1 << 0) | (1 << 1) | (1 << 2) /* 3 channels */, 428 AudioFormat.ENCODING_PCM_24BIT_PACKED); 429 } 430 431 // Audit buffers can run out of space with high numbers of channels, 432 // so keep the sample rate low. 433 @Test testAudioRecordAuditChannelIndex1()434 public void testAudioRecordAuditChannelIndex1() throws Exception { 435 doTest("audit_channel_index_1", true /*localRecord*/, true /*customHandler*/, 436 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 437 true /*useByteBuffer*/, false /*blocking*/, 438 true /*auditRecording*/, true /*isChannelIndex*/, 24000 /*TEST_SR*/, 439 (1 << 0) /* 1 channels */, 440 AudioFormat.ENCODING_PCM_32BIT); 441 } 442 443 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 444 // an empty Builder matches the documentation / expected values 445 @Test testAudioRecordBuilderDefault()446 public void testAudioRecordBuilderDefault() throws Exception { 447 if (!hasMicrophone()) { 448 return; 449 } 450 // constants for test 451 final String TEST_NAME = "testAudioRecordBuilderDefault"; 452 // expected values below match the AudioRecord.Builder documentation 453 final int expectedCapturePreset = MediaRecorder.AudioSource.DEFAULT; 454 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 455 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 456 final int expectedState = AudioRecord.STATE_INITIALIZED; 457 // use builder with default values 458 final AudioRecord rec = new AudioRecord.Builder().build(); 459 // save results 460 final int observedSource = rec.getAudioSource(); 461 final int observedChannel = rec.getChannelConfiguration(); 462 final int observedEncoding = rec.getAudioFormat(); 463 final int observedState = rec.getState(); 464 // release recorder before the test exits (either successfully or with an exception) 465 rec.release(); 466 // compare results 467 assertEquals(TEST_NAME + ": default capture preset", expectedCapturePreset, observedSource); 468 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 469 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 470 assertEquals(TEST_NAME + ": state", expectedState, observedState); 471 } 472 473 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 474 // an incomplete AudioFormat matches the documentation / expected values 475 @Test testAudioRecordBuilderPartialFormat()476 public void testAudioRecordBuilderPartialFormat() throws Exception { 477 if (!hasMicrophone()) { 478 return; 479 } 480 // constants for test 481 final String TEST_NAME = "testAudioRecordBuilderPartialFormat"; 482 final int expectedRate = 16000; 483 final int expectedState = AudioRecord.STATE_INITIALIZED; 484 // expected values below match the AudioRecord.Builder documentation 485 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 486 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 487 // use builder with a partial audio format 488 final AudioRecord rec = new AudioRecord.Builder() 489 .setAudioFormat(new AudioFormat.Builder().setSampleRate(expectedRate).build()) 490 .build(); 491 // save results 492 final int observedRate = rec.getSampleRate(); 493 final int observedChannel = rec.getChannelConfiguration(); 494 final int observedEncoding = rec.getAudioFormat(); 495 final int observedState = rec.getState(); 496 // release recorder before the test exits (either successfully or with an exception) 497 rec.release(); 498 // compare results 499 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 500 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 501 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 502 assertEquals(TEST_NAME + ": state", expectedState, observedState); 503 } 504 505 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord matches 506 // the parameters used in the builder 507 @Test testAudioRecordBuilderParams()508 public void testAudioRecordBuilderParams() throws Exception { 509 if (!hasMicrophone()) { 510 return; 511 } 512 // constants for test 513 final String TEST_NAME = "testAudioRecordBuilderParams"; 514 final int expectedRate = 8000; 515 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 516 final int expectedChannelCount = 1; 517 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 518 final int expectedSource = MediaRecorder.AudioSource.VOICE_COMMUNICATION; 519 final int expectedState = AudioRecord.STATE_INITIALIZED; 520 // use builder with expected parameters 521 final AudioRecord rec = new AudioRecord.Builder() 522 .setAudioFormat(new AudioFormat.Builder() 523 .setSampleRate(expectedRate) 524 .setChannelMask(expectedChannel) 525 .setEncoding(expectedEncoding) 526 .build()) 527 .setAudioSource(expectedSource) 528 .build(); 529 // save results 530 final int observedRate = rec.getSampleRate(); 531 final int observedChannel = rec.getChannelConfiguration(); 532 final int observedChannelCount = rec.getChannelCount(); 533 final int observedEncoding = rec.getAudioFormat(); 534 final int observedSource = rec.getAudioSource(); 535 final int observedState = rec.getState(); 536 // release recorder before the test exits (either successfully or with an exception) 537 rec.release(); 538 // compare results 539 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 540 assertEquals(TEST_NAME + ": configured channel config", expectedChannel, observedChannel); 541 assertEquals(TEST_NAME + ": configured encoding", expectedEncoding, observedEncoding); 542 assertEquals(TEST_NAME + ": implicit channel count", expectedChannelCount, 543 observedChannelCount); 544 assertEquals(TEST_NAME + ": configured source", expectedSource, observedSource); 545 assertEquals(TEST_NAME + ": state", expectedState, observedState); 546 } 547 // Test AudioRecord.Builder.setRequestHotwordStream, and hotword capture 548 @Test testAudioRecordBuilderHotword()549 public void testAudioRecordBuilderHotword() throws Exception { 550 if (!hasMicrophone()) { 551 return; 552 } 553 // Verify typical behavior continues to work, and clearing works 554 AudioRecord regularRecord = new AudioRecord.Builder() 555 .setRequestHotwordStream(true) 556 .setRequestHotwordStream(false) 557 .build(); 558 559 assertEquals(regularRecord.getState(), AudioRecord.STATE_INITIALIZED); 560 assertFalse(regularRecord.isHotwordStream()); 561 assertFalse(regularRecord.isHotwordLookbackStream()); 562 regularRecord.startRecording(); 563 regularRecord.read(ByteBuffer.allocateDirect(4096), 4096); 564 regularRecord.stop(); 565 regularRecord.release(); 566 567 regularRecord = new AudioRecord.Builder() 568 .setRequestHotwordLookbackStream(true) 569 .setRequestHotwordLookbackStream(false) 570 .build(); 571 572 assertEquals(regularRecord.getState(), AudioRecord.STATE_INITIALIZED); 573 assertFalse(regularRecord.isHotwordStream()); 574 assertFalse(regularRecord.isHotwordLookbackStream()); 575 regularRecord.startRecording(); 576 regularRecord.read(ByteBuffer.allocateDirect(4096), 4096); 577 regularRecord.stop(); 578 regularRecord.release(); 579 580 // Should fail due to incompatible arguments 581 assertThrows(UnsupportedOperationException.class, 582 () -> new AudioRecord.Builder() 583 .setRequestHotwordStream(true) 584 .setRequestHotwordLookbackStream(true) 585 .build()); 586 587 // Should fail due to permission issues 588 assertThrows(UnsupportedOperationException.class, 589 () -> new AudioRecord.Builder() 590 .setRequestHotwordStream(true) 591 .build()); 592 assertThrows(UnsupportedOperationException.class, 593 () -> new AudioRecord.Builder() 594 .setRequestHotwordLookbackStream(true) 595 .build()); 596 597 // Adopt permissions to access query APIs and test functionality 598 InstrumentationRegistry.getInstrumentation() 599 .getUiAutomation() 600 .adoptShellPermissionIdentity( 601 Manifest.permission.CAPTURE_AUDIO_HOTWORD); 602 603 604 for (final boolean lookbackOn : new boolean[] { false, true} ) { 605 AudioRecord audioRecord = null; 606 if (!mAudioManager.isHotwordStreamSupported(lookbackOn)) { 607 // Hardware does not support capturing hotword content 608 continue; 609 } 610 try { 611 AudioRecord.Builder builder = new AudioRecord.Builder(); 612 if (lookbackOn) { 613 builder.setRequestHotwordLookbackStream(true); 614 } else { 615 builder.setRequestHotwordStream(true); 616 } 617 audioRecord = builder.build(); 618 if (lookbackOn) { 619 assertTrue(audioRecord.isHotwordLookbackStream()); 620 } else { 621 assertTrue(audioRecord.isHotwordStream()); 622 } 623 audioRecord.startRecording(); 624 audioRecord.read(ByteBuffer.allocateDirect(4096), 4096); 625 audioRecord.stop(); 626 } finally { 627 if (audioRecord != null) { 628 audioRecord.release(); 629 } 630 } 631 } 632 InstrumentationRegistry.getInstrumentation() 633 .getUiAutomation() 634 .dropShellPermissionIdentity(); 635 } 636 637 // Test AudioRecord to ensure we can build after a failure. 638 @Test testAudioRecordBufferSize()639 public void testAudioRecordBufferSize() throws Exception { 640 if (!hasMicrophone()) { 641 return; 642 } 643 // constants for test 644 final String TEST_NAME = "testAudioRecordBufferSize"; 645 646 // use builder with parameters that should fail 647 final int superBigBufferSize = 1 << 28; 648 try { 649 final AudioRecord record = new AudioRecord.Builder() 650 .setBufferSizeInBytes(superBigBufferSize) 651 .build(); 652 record.release(); 653 fail(TEST_NAME + ": should throw exception on failure"); 654 } catch (UnsupportedOperationException e) { 655 ; 656 } 657 658 // we should be able to create again with minimum buffer size 659 final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples 660 final AudioRecord record2 = new AudioRecord.Builder() 661 .setBufferSizeInBytes(verySmallBufferSize) 662 .build(); 663 664 final int observedState2 = record2.getState(); 665 final int observedBufferSize2 = record2.getBufferSizeInFrames(); 666 record2.release(); 667 668 // succeeds for minimum buffer size 669 assertEquals(TEST_NAME + ": state", AudioRecord.STATE_INITIALIZED, observedState2); 670 // should force the minimum size buffer which is > 0 671 assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0); 672 } 673 674 @Test testTimestamp()675 public void testTimestamp() throws Exception { 676 if (!hasMicrophone()) { 677 return; 678 } 679 final String TEST_NAME = "testTimestamp"; 680 AudioRecord record = null; 681 682 try { 683 final int NANOS_PER_MILLISECOND = 1000000; 684 final long RECORD_TIME_MS = 2000; 685 final long RECORD_TIME_NS = RECORD_TIME_MS * NANOS_PER_MILLISECOND; 686 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; // fixed at this time. 687 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_STEREO; 688 final int RECORD_SAMPLE_RATE = 23456; // requires resampling 689 record = new AudioRecord.Builder() 690 .setAudioFormat(new AudioFormat.Builder() 691 .setSampleRate(RECORD_SAMPLE_RATE) 692 .setChannelMask(RECORD_CHANNEL_MASK) 693 .setEncoding(RECORD_ENCODING) 694 .build()) 695 .build(); 696 697 // For our tests, we could set test duration by timed sleep or by # frames received. 698 // Since we don't know *exactly* when AudioRecord actually begins recording, 699 // we end the test by # frames read. 700 final int numChannels = 701 AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK); 702 final int bytesPerSample = AudioFormat.getBytesPerSample(RECORD_ENCODING); 703 final int bytesPerFrame = numChannels * bytesPerSample; 704 // careful about integer overflow in the formula below: 705 final int targetFrames = 706 (int)((long)RECORD_TIME_MS * RECORD_SAMPLE_RATE / 1000); 707 final int targetSamples = targetFrames * numChannels; 708 final int BUFFER_FRAMES = 512; 709 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 710 711 final int tries = 2; 712 for (int i = 0; i < tries; ++i) { 713 final long trackStartTimeNs = System.nanoTime(); 714 final long trackStartTimeBootNs = android.os.SystemClock.elapsedRealtimeNanos(); 715 716 record.startRecording(); 717 718 final AudioTimestamp ts = new AudioTimestamp(); 719 int samplesRead = 0; 720 // For 16 bit data, use shorts 721 final short[] shortData = new short[BUFFER_SAMPLES]; 722 final AudioHelper.TimestampVerifier tsVerifier = 723 new AudioHelper.TimestampVerifier(TAG, RECORD_SAMPLE_RATE, 724 0 /* startFrames */, isProAudioDevice()); 725 726 while (samplesRead < targetSamples) { 727 final int amount = samplesRead == 0 ? numChannels : 728 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 729 final int ret = record.read(shortData, 0, amount); 730 assertEquals("read incorrect amount", amount, ret); 731 // timestamps follow a different path than data, so it is conceivable 732 // that first data arrives before the first timestamp is ready. 733 734 if (record.getTimestamp(ts, AudioTimestamp.TIMEBASE_MONOTONIC) 735 == AudioRecord.SUCCESS) { 736 tsVerifier.add(ts); 737 } 738 samplesRead += ret; 739 } 740 record.stop(); 741 742 // stop is synchronous, but need not be in the future. 743 final long SLEEP_AFTER_STOP_FOR_INACTIVITY_MS = 1000; 744 Thread.sleep(SLEEP_AFTER_STOP_FOR_INACTIVITY_MS); 745 746 AudioTimestamp stopTs = new AudioTimestamp(); 747 AudioTimestamp stopTsBoot = new AudioTimestamp(); 748 749 assertEquals(AudioRecord.SUCCESS, 750 record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)); 751 assertEquals(AudioRecord.SUCCESS, 752 record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)); 753 754 // printTimestamp("timestamp Monotonic", ts); 755 // printTimestamp("timestamp Boottime", tsBoot); 756 // Log.d(TEST_NAME, "startTime Monotonic " + startTime); 757 // Log.d(TEST_NAME, "startTime Boottime " + startTimeBoot); 758 759 assertEquals(stopTs.framePosition, stopTsBoot.framePosition); 760 assertTrue(stopTs.framePosition >= targetFrames); 761 assertTrue(stopTs.nanoTime - trackStartTimeNs > RECORD_TIME_NS); 762 assertTrue(stopTsBoot.nanoTime - trackStartTimeBootNs > RECORD_TIME_NS); 763 764 tsVerifier.verifyAndLog(trackStartTimeNs, "test_timestamp" /* logName */); 765 } 766 } finally { 767 if (record != null) { 768 record.release(); 769 record = null; 770 } 771 } 772 } 773 774 @Test testRecordNoDataForIdleUids()775 public void testRecordNoDataForIdleUids() throws Exception { 776 if (!hasMicrophone()) { 777 return; 778 } 779 780 AudioRecord recorder = null; 781 String packageName = InstrumentationRegistry.getTargetContext().getPackageName(); 782 int currentUserId = Process.myUserHandle().getIdentifier(); 783 784 // We will record audio for 20 sec from active and idle state expecting 785 // the recording from active state to have data while from idle silence. 786 try { 787 // Ensure no race and UID active 788 makeMyUidStateActive(packageName, currentUserId); 789 790 // Setup a recorder 791 final AudioRecord candidateRecorder = new AudioRecord.Builder() 792 .setAudioSource(MediaRecorder.AudioSource.MIC) 793 .setBufferSizeInBytes(1024) 794 .setAudioFormat(new AudioFormat.Builder() 795 .setSampleRate(8000) 796 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 797 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 798 .build()) 799 .build(); 800 801 // Unleash it :P 802 candidateRecorder.startRecording(); 803 recorder = candidateRecorder; 804 805 final int sampleCount = AudioHelper.frameCountFromMsec(6000, 806 candidateRecorder.getFormat()) * candidateRecorder.getFormat() 807 .getChannelCount(); 808 final ShortBuffer buffer = ShortBuffer.allocate(sampleCount); 809 810 // Read five seconds of data 811 readDataTimed(recorder, 5000, buffer); 812 // Ensure we read non-empty bytes. Some systems only 813 // emulate audio devices and do not provide any actual audio data. 814 if (isAudioSilent(buffer)) { 815 Log.w(TAG, "Recording does not produce audio data"); 816 return; 817 } 818 819 // Start clean 820 buffer.clear(); 821 // Force idle the package 822 makeMyUidStateIdle(packageName, currentUserId); 823 // Read five seconds of data 824 readDataTimed(recorder, 5000, buffer); 825 // Ensure we read empty bytes 826 assertTrue("Recording was not silenced while UID idle", isAudioSilent(buffer)); 827 828 // Start clean 829 buffer.clear(); 830 // Reset to active 831 makeMyUidStateActive(packageName, currentUserId); 832 // Read five seconds of data 833 readDataTimed(recorder, 5000, buffer); 834 // Ensure we read non-empty bytes 835 assertFalse("Recording was silenced while UID active", isAudioSilent(buffer)); 836 } finally { 837 if (recorder != null) { 838 recorder.stop(); 839 recorder.release(); 840 } 841 resetMyUidState(packageName, currentUserId); 842 } 843 } 844 845 @Test testRestrictedAudioSourcePermissions()846 public void testRestrictedAudioSourcePermissions() throws Exception { 847 // Make sure that the following audio sources cannot be used by apps that 848 // don't have the CAPTURE_AUDIO_OUTPUT permissions: 849 // - VOICE_CALL, 850 // - VOICE_DOWNLINK 851 // - VOICE_UPLINK 852 // - REMOTE_SUBMIX 853 // - ECHO_REFERENCE - 1997 854 // - RADIO_TUNER - 1998 855 // - HOTWORD - 1999 856 // The attempt to build an AudioRecord with those sources should throw either 857 // UnsupportedOperationException or IllegalArgumentException exception. 858 final int[] restrictedAudioSources = new int [] { 859 MediaRecorder.AudioSource.VOICE_CALL, 860 MediaRecorder.AudioSource.VOICE_DOWNLINK, 861 MediaRecorder.AudioSource.VOICE_UPLINK, 862 MediaRecorder.AudioSource.REMOTE_SUBMIX, 863 1997, 864 1998, 865 1999 866 }; 867 868 for (int source : restrictedAudioSources) { 869 // AudioRecord.Builder should fail when trying to use 870 // one of the voice call audio sources. 871 try { 872 AudioRecord ar = new AudioRecord.Builder() 873 .setAudioSource(source) 874 .setAudioFormat(new AudioFormat.Builder() 875 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 876 .setSampleRate(8000) 877 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 878 .build()) 879 .build(); 880 fail("testRestrictedAudioSourcePermissions: no exception thrown for source: " 881 + source); 882 } catch (Exception e) { 883 Log.i(TAG, "Exception: " + e); 884 if (!UnsupportedOperationException.class.isInstance(e) 885 && !IllegalArgumentException.class.isInstance(e)) { 886 fail("testRestrictedAudioSourcePermissions: no exception thrown for source: " 887 + source + " Exception:" + e); 888 } 889 } 890 } 891 } 892 893 @Test testMediaMetrics()894 public void testMediaMetrics() throws Exception { 895 if (!hasMicrophone()) { 896 return; 897 } 898 899 AudioRecord record = null; 900 try { 901 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; 902 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_MONO; 903 final int RECORD_SAMPLE_RATE = 8000; 904 final AudioFormat format = new AudioFormat.Builder() 905 .setSampleRate(RECORD_SAMPLE_RATE) 906 .setChannelMask(RECORD_CHANNEL_MASK) 907 .setEncoding(RECORD_ENCODING) 908 .build(); 909 910 // Setup a recorder 911 record = new AudioRecord.Builder() 912 .setAudioSource(MediaRecorder.AudioSource.MIC) 913 .setAudioFormat(format) 914 .build(); 915 916 final PersistableBundle metrics = record.getMetrics(); 917 918 assertNotNull("null metrics", metrics); 919 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.ENCODING, 920 new String("AUDIO_FORMAT_PCM_16_BIT")); 921 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.SOURCE, 922 new String("AUDIO_SOURCE_MIC")); 923 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.SAMPLERATE, 924 new Integer(RECORD_SAMPLE_RATE)); 925 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.CHANNELS, 926 new Integer(AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK))); 927 928 // deprecated, value ignored. 929 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.LATENCY); 930 931 // TestApi: 932 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.CHANNEL_MASK, 933 new Long(RECORD_CHANNEL_MASK)); 934 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.FRAME_COUNT, 935 new Integer(record.getBufferSizeInFrames())); 936 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.DURATION_MS, 937 new Double(0.)); 938 AudioHelper.assertMetricsKeyEquals(metrics, AudioRecord.MetricsConstants.START_COUNT, 939 new Long(0)); 940 941 // TestApi: no particular value checking. 942 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.PORT_ID); 943 AudioHelper.assertMetricsKey(metrics, AudioRecord.MetricsConstants.ATTRIBUTES); 944 } finally { 945 if (record != null) { 946 record.release(); 947 } 948 } 949 } 950 printMicrophoneInfo(MicrophoneInfo microphone)951 private void printMicrophoneInfo(MicrophoneInfo microphone) { 952 Log.i(TAG, "deviceId:" + microphone.getDescription()); 953 Log.i(TAG, "portId:" + microphone.getId()); 954 Log.i(TAG, "type:" + microphone.getType()); 955 Log.i(TAG, "address:" + microphone.getAddress()); 956 Log.i(TAG, "deviceLocation:" + microphone.getLocation()); 957 Log.i(TAG, "deviceGroup:" + microphone.getGroup() 958 + " index:" + microphone.getIndexInTheGroup()); 959 MicrophoneInfo.Coordinate3F position = microphone.getPosition(); 960 Log.i(TAG, "position:" + position.x + "," + position.y + "," + position.z); 961 MicrophoneInfo.Coordinate3F orientation = microphone.getOrientation(); 962 Log.i(TAG, "orientation:" + orientation.x + "," + orientation.y + "," + orientation.z); 963 Log.i(TAG, "frequencyResponse:" + microphone.getFrequencyResponse()); 964 Log.i(TAG, "channelMapping:" + microphone.getChannelMapping()); 965 Log.i(TAG, "sensitivity:" + microphone.getSensitivity()); 966 Log.i(TAG, "max spl:" + microphone.getMaxSpl()); 967 Log.i(TAG, "min spl:" + microphone.getMinSpl()); 968 Log.i(TAG, "directionality:" + microphone.getDirectionality()); 969 Log.i(TAG, "******"); 970 } 971 972 @CddTest(requirement="5.4.1/C-1-4") 973 @Test testGetActiveMicrophones()974 public void testGetActiveMicrophones() throws Exception { 975 if (!hasMicrophone()) { 976 return; 977 } 978 mAudioRecord.startRecording(); 979 try { 980 Thread.sleep(1000); 981 } catch (InterruptedException e) { 982 } 983 List<MicrophoneInfo> activeMicrophones = mAudioRecord.getActiveMicrophones(); 984 assertTrue(activeMicrophones.size() > 0); 985 for (MicrophoneInfo activeMicrophone : activeMicrophones) { 986 printMicrophoneInfo(activeMicrophone); 987 } 988 } 989 990 private Executor mExec = new Executor() { 991 @Override 992 public void execute(Runnable command) { 993 command.run(); 994 } 995 }; 996 997 @Test testAudioRecordInfoCallback()998 public void testAudioRecordInfoCallback() throws Exception { 999 if (!hasMicrophone()) { 1000 return; 1001 } 1002 AudioRecordingConfigurationTest.MyAudioRecordingCallback callback = 1003 new AudioRecordingConfigurationTest.MyAudioRecordingCallback( 1004 mAudioRecord.getAudioSessionId(), MediaRecorder.AudioSource.DEFAULT); 1005 mAudioRecord.registerAudioRecordingCallback(mExec, callback); 1006 mAudioRecord.startRecording(); 1007 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 1008 1009 callback.await(TEST_TIMING_TOLERANCE_MS); 1010 assertTrue(callback.mCalled); 1011 assertTrue(callback.mConfigs.size() <= 1); 1012 if (callback.mConfigs.size() == 1) { 1013 checkRecordingConfig(callback.mConfigs.get(0)); 1014 } 1015 1016 Thread.sleep(RECORD_DURATION_MS); 1017 mAudioRecord.unregisterAudioRecordingCallback(callback); 1018 } 1019 1020 @Test testGetActiveRecordingConfiguration()1021 public void testGetActiveRecordingConfiguration() throws Exception { 1022 if (!hasMicrophone()) { 1023 return; 1024 } 1025 mAudioRecord.startRecording(); 1026 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 1027 1028 try { 1029 Thread.sleep(RECORD_DURATION_MS); 1030 } catch (InterruptedException e) { 1031 } 1032 1033 AudioRecordingConfiguration config = mAudioRecord.getActiveRecordingConfiguration(); 1034 checkRecordingConfig(config); 1035 1036 mAudioRecord.release(); 1037 // test no exception is thrown when querying immediately after release() 1038 // which is not a synchronous operation 1039 config = mAudioRecord.getActiveRecordingConfiguration(); 1040 try { 1041 Thread.sleep(TEST_TIMING_TOLERANCE_MS); 1042 } catch (InterruptedException e) { 1043 } 1044 assertNull("Recording configuration not null after release", 1045 mAudioRecord.getActiveRecordingConfiguration()); 1046 } 1047 checkRecordingConfig(AudioRecordingConfiguration config)1048 private static void checkRecordingConfig(AudioRecordingConfiguration config) { 1049 assertNotNull(config); 1050 AudioFormat format = config.getClientFormat(); 1051 assertEquals(AudioFormat.CHANNEL_IN_MONO, format.getChannelMask()); 1052 assertEquals(AudioFormat.ENCODING_PCM_16BIT, format.getEncoding()); 1053 assertEquals(SAMPLING_RATE_HZ, format.getSampleRate()); 1054 assertEquals(MediaRecorder.AudioSource.MIC, config.getAudioSource()); 1055 assertNotNull(config.getAudioDevice()); 1056 assertNotNull(config.getClientEffects()); 1057 assertNotNull(config.getEffects()); 1058 // no requirement here, just testing the API 1059 config.isClientSilenced(); 1060 } 1061 createAudioRecord( int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, boolean auditRecording, boolean isChannelIndex)1062 private AudioRecord createAudioRecord( 1063 int audioSource, int sampleRateInHz, 1064 int channelConfig, int audioFormat, int bufferSizeInBytes, 1065 boolean auditRecording, boolean isChannelIndex) { 1066 final AudioRecord record; 1067 if (auditRecording) { 1068 record = new AudioHelper.AudioRecordAudit( 1069 audioSource, sampleRateInHz, channelConfig, 1070 audioFormat, bufferSizeInBytes, isChannelIndex); 1071 } else if (isChannelIndex) { 1072 record = new AudioRecord.Builder() 1073 .setAudioFormat(new AudioFormat.Builder() 1074 .setChannelIndexMask(channelConfig) 1075 .setEncoding(audioFormat) 1076 .setSampleRate(sampleRateInHz) 1077 .build()) 1078 .setBufferSizeInBytes(bufferSizeInBytes) 1079 .build(); 1080 } else { 1081 record = new AudioRecord(audioSource, sampleRateInHz, channelConfig, 1082 audioFormat, bufferSizeInBytes); 1083 } 1084 1085 // did we get the AudioRecord we expected? 1086 final AudioFormat format = record.getFormat(); 1087 assertEquals(isChannelIndex ? channelConfig : AudioFormat.CHANNEL_INVALID, 1088 format.getChannelIndexMask()); 1089 assertEquals(isChannelIndex ? AudioFormat.CHANNEL_INVALID : channelConfig, 1090 format.getChannelMask()); 1091 assertEquals(audioFormat, format.getEncoding()); 1092 assertEquals(sampleRateInHz, format.getSampleRate()); 1093 final int frameSize = 1094 format.getChannelCount() * AudioFormat.getBytesPerSample(audioFormat); 1095 // our native frame count cannot be smaller than our minimum buffer size request. 1096 assertTrue(record.getBufferSizeInFrames() * frameSize >= bufferSizeInBytes); 1097 return record; 1098 } 1099 doTest(String reportName, boolean localRecord, boolean customHandler, int periodsPerSecond, int markerPeriodsPerSecond, boolean useByteBuffer, boolean blocking, final boolean auditRecording, final boolean isChannelIndex, final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT)1100 private void doTest(String reportName, boolean localRecord, boolean customHandler, 1101 int periodsPerSecond, int markerPeriodsPerSecond, 1102 boolean useByteBuffer, boolean blocking, 1103 final boolean auditRecording, final boolean isChannelIndex, 1104 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT) throws Exception { 1105 final int TEST_TIME_MS = auditRecording ? 10000 : 2000; 1106 doTest(reportName, localRecord, customHandler, periodsPerSecond, markerPeriodsPerSecond, 1107 useByteBuffer, blocking, auditRecording, isChannelIndex, 1108 TEST_SR, TEST_CONF, TEST_FORMAT, TEST_TIME_MS); 1109 } doTest(String reportName, boolean localRecord, boolean customHandler, int periodsPerSecond, int markerPeriodsPerSecond, boolean useByteBuffer, boolean blocking, final boolean auditRecording, final boolean isChannelIndex, final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT, final int TEST_TIME_MS)1110 private void doTest(String reportName, boolean localRecord, boolean customHandler, 1111 int periodsPerSecond, int markerPeriodsPerSecond, 1112 boolean useByteBuffer, boolean blocking, 1113 final boolean auditRecording, final boolean isChannelIndex, 1114 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT, final int TEST_TIME_MS) 1115 throws Exception { 1116 if (!hasMicrophone()) { 1117 return; 1118 } 1119 // audit recording plays back recorded audio, so use longer test timing 1120 final int TEST_SOURCE = MediaRecorder.AudioSource.DEFAULT; 1121 mIsHandleMessageCalled = false; 1122 1123 // For channelIndex use one frame in bytes for buffer size. 1124 // This is adjusted to the minimum buffer size by native code. 1125 final int bufferSizeInBytes = isChannelIndex ? 1126 (AudioFormat.getBytesPerSample(TEST_FORMAT) 1127 * AudioFormat.channelCountFromInChannelMask(TEST_CONF)) : 1128 AudioRecord.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 1129 assertTrue(bufferSizeInBytes > 0); 1130 1131 final AudioRecord record; 1132 final AudioHelper 1133 .MakeSomethingAsynchronouslyAndLoop<AudioRecord> makeSomething; 1134 1135 if (localRecord) { 1136 makeSomething = null; 1137 record = createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 1138 TEST_FORMAT, bufferSizeInBytes, auditRecording, isChannelIndex); 1139 } else { 1140 makeSomething = 1141 new AudioHelper.MakeSomethingAsynchronouslyAndLoop<AudioRecord>( 1142 new AudioHelper.MakesSomething<AudioRecord>() { 1143 @Override 1144 public AudioRecord makeSomething() { 1145 return createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 1146 TEST_FORMAT, bufferSizeInBytes, auditRecording, 1147 isChannelIndex); 1148 } 1149 } 1150 ); 1151 // create AudioRecord on different thread's looper. 1152 record = makeSomething.make(); 1153 } 1154 1155 // AudioRecord creation may have silently failed, check state now 1156 assertEquals(AudioRecord.STATE_INITIALIZED, record.getState()); 1157 1158 final MockOnRecordPositionUpdateListener listener; 1159 if (customHandler) { 1160 listener = new MockOnRecordPositionUpdateListener(record, mHandler); 1161 } else { 1162 listener = new MockOnRecordPositionUpdateListener(record); 1163 } 1164 1165 final int updatePeriodInFrames = (periodsPerSecond == 0) 1166 ? 0 : TEST_SR / periodsPerSecond; 1167 // After starting, there is no guarantee when the first frame of data is read. 1168 long firstSampleTime = 0; 1169 1170 // blank final variables: all successful paths will initialize the times. 1171 // this must be declared here for visibility as they are set within the try block. 1172 final long endTime; 1173 final long startTime; 1174 final long stopRequestTime; 1175 final long stopTime; 1176 final long coldInputStartTime; 1177 1178 try { 1179 if (markerPeriodsPerSecond != 0) { 1180 mMarkerPeriodInFrames = TEST_SR / markerPeriodsPerSecond; 1181 mMarkerPosition = mMarkerPeriodInFrames; 1182 assertEquals(AudioRecord.SUCCESS, 1183 record.setNotificationMarkerPosition(mMarkerPosition)); 1184 } else { 1185 mMarkerPeriodInFrames = 0; 1186 } 1187 1188 assertEquals(AudioRecord.SUCCESS, 1189 record.setPositionNotificationPeriod(updatePeriodInFrames)); 1190 1191 // at the start, there is no timestamp. 1192 AudioTimestamp startTs = new AudioTimestamp(); 1193 assertEquals(AudioRecord.ERROR_INVALID_OPERATION, 1194 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC)); 1195 assertEquals("invalid getTimestamp doesn't affect nanoTime", 0, startTs.nanoTime); 1196 1197 listener.start(TEST_SR); 1198 record.startRecording(); 1199 assertEquals(AudioRecord.RECORDSTATE_RECORDING, record.getRecordingState()); 1200 startTime = System.currentTimeMillis(); 1201 1202 // For our tests, we could set test duration by timed sleep or by # frames received. 1203 // Since we don't know *exactly* when AudioRecord actually begins recording, 1204 // we end the test by # frames read. 1205 final int numChannels = AudioFormat.channelCountFromInChannelMask(TEST_CONF); 1206 final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT); 1207 final int bytesPerFrame = numChannels * bytesPerSample; 1208 // careful about integer overflow in the formula below: 1209 final int targetFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1210 final int targetSamples = targetFrames * numChannels; 1211 final int BUFFER_FRAMES = 512; 1212 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 1213 // TODO: verify behavior when buffer size is not a multiple of frame size. 1214 1215 int samplesRead = 0; 1216 // abstract out the buffer type used with lambda. 1217 final byte[] byteData = new byte[BUFFER_SAMPLES]; 1218 final short[] shortData = new short[BUFFER_SAMPLES]; 1219 final float[] floatData = new float[BUFFER_SAMPLES]; 1220 final ByteBuffer byteBuffer = 1221 ByteBuffer.allocateDirect(BUFFER_SAMPLES * bytesPerSample); 1222 BiFunction<Integer, Boolean, Integer> reader = null; 1223 1224 // depending on the options, create a lambda to read data. 1225 if (useByteBuffer) { 1226 reader = (samples, blockForData) -> { 1227 final int amount = samples * bytesPerSample; // in bytes 1228 // read always places data at the start of the byte buffer with 1229 // position and limit are ignored. test this by setting 1230 // position and limit to arbitrary values here. 1231 final int lastPosition = 7; 1232 final int lastLimit = 13; 1233 byteBuffer.position(lastPosition); 1234 byteBuffer.limit(lastLimit); 1235 final int ret = blockForData ? record.read(byteBuffer, amount) : 1236 record.read(byteBuffer, amount, AudioRecord.READ_NON_BLOCKING); 1237 return ret / bytesPerSample; 1238 }; 1239 } else { 1240 switch (TEST_FORMAT) { 1241 case AudioFormat.ENCODING_PCM_8BIT: 1242 reader = (samples, blockForData) -> { 1243 return blockForData ? record.read(byteData, 0, samples) : 1244 record.read(byteData, 0, samples, 1245 AudioRecord.READ_NON_BLOCKING); 1246 }; 1247 break; 1248 case AudioFormat.ENCODING_PCM_16BIT: 1249 reader = (samples, blockForData) -> { 1250 return blockForData ? record.read(shortData, 0, samples) : 1251 record.read(shortData, 0, samples, 1252 AudioRecord.READ_NON_BLOCKING); 1253 }; 1254 break; 1255 case AudioFormat.ENCODING_PCM_FLOAT: 1256 reader = (samples, blockForData) -> { 1257 return record.read(floatData, 0, samples, 1258 blockForData ? AudioRecord.READ_BLOCKING 1259 : AudioRecord.READ_NON_BLOCKING); 1260 }; 1261 break; 1262 } 1263 } 1264 1265 while (samplesRead < targetSamples) { 1266 // the first time through, we read a single frame. 1267 // this sets the recording anchor position. 1268 final int amount = samplesRead == 0 ? numChannels : 1269 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 1270 final int ret = reader.apply(amount, blocking); 1271 if (blocking) { 1272 assertEquals("blocking reads should return amount requested", amount, ret); 1273 } else { 1274 assertTrue("non-blocking reads should return amount in range: " + 1275 "0 <= " + ret + " <= " + amount, 1276 0 <= ret && ret <= amount); 1277 } 1278 if (samplesRead == 0 && ret > 0) { 1279 firstSampleTime = System.currentTimeMillis(); 1280 } 1281 samplesRead += ret; 1282 if (startTs.nanoTime == 0 && ret > 0 && 1283 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) 1284 == AudioRecord.SUCCESS) { 1285 assertTrue("expecting valid timestamp with nonzero nanoTime", 1286 startTs.nanoTime > 0); 1287 } 1288 } 1289 1290 // We've read all the frames, now check the record timing. 1291 endTime = System.currentTimeMillis(); 1292 1293 coldInputStartTime = firstSampleTime - startTime; 1294 //Log.d(TAG, "first sample time " + coldInputStartTime 1295 // + " test time " + (endTime - firstSampleTime)); 1296 1297 if (coldInputStartTime > 200) { 1298 Log.w(TAG, "cold input start time way too long " 1299 + coldInputStartTime + " > 200ms"); 1300 } else if (coldInputStartTime > 100) { 1301 Log.w(TAG, "cold input start time too long " 1302 + coldInputStartTime + " > 100ms"); 1303 } 1304 1305 final int COLD_INPUT_START_TIME_LIMIT_MS = 5000; 1306 assertTrue("track must start within " + COLD_INPUT_START_TIME_LIMIT_MS + " millis", 1307 coldInputStartTime < COLD_INPUT_START_TIME_LIMIT_MS); 1308 1309 // Verify recording completes within 50 ms of expected test time (typical 20ms) 1310 final int RECORDING_TIME_TOLERANCE_MS = auditRecording ? 1311 (isLowLatencyDevice() ? 1000 : 2000) : (isLowLatencyDevice() ? 50 : 400); 1312 assertEquals("recording must complete within " + RECORDING_TIME_TOLERANCE_MS 1313 + " of expected test time", 1314 TEST_TIME_MS, endTime - firstSampleTime, RECORDING_TIME_TOLERANCE_MS); 1315 1316 // Even though we've read all the frames we want, the events may not be sent to 1317 // the listeners (events are handled through a separate internal callback thread). 1318 // One must sleep to make sure the last event(s) come in. 1319 Thread.sleep(30); 1320 1321 stopRequestTime = System.currentTimeMillis(); 1322 record.stop(); 1323 assertEquals("state should be RECORDSTATE_STOPPED after stop()", 1324 AudioRecord.RECORDSTATE_STOPPED, record.getRecordingState()); 1325 1326 stopTime = System.currentTimeMillis(); 1327 1328 // stop listening - we should be done. 1329 // Caution M behavior and likely much earlier: 1330 // we assume no events can happen after stop(), but this may not 1331 // always be true as stop can take 100ms to complete (as it may disable 1332 // input recording on the hal); thus the event handler may be block with 1333 // valid events, issuing right after stop completes. Except for those events, 1334 // no other events should show up after stop. 1335 // This behavior may change in the future but we account for it here in testing. 1336 final long SLEEP_AFTER_STOP_FOR_EVENTS_MS = 30; 1337 Thread.sleep(SLEEP_AFTER_STOP_FOR_EVENTS_MS); 1338 listener.stop(); 1339 1340 // get stop timestamp 1341 AudioTimestamp stopTs = new AudioTimestamp(); 1342 assertEquals("should successfully get timestamp after stop", 1343 AudioRecord.SUCCESS, 1344 record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)); 1345 AudioTimestamp stopTsBoot = new AudioTimestamp(); 1346 assertEquals("should successfully get boottime timestamp after stop", 1347 AudioRecord.SUCCESS, 1348 record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)); 1349 1350 // printTimestamp("startTs", startTs); 1351 // printTimestamp("stopTs", stopTs); 1352 // printTimestamp("stopTsBoot", stopTsBoot); 1353 // Log.d(TAG, "time Monotonic " + System.nanoTime()); 1354 // Log.d(TAG, "time Boottime " + SystemClock.elapsedRealtimeNanos()); 1355 1356 // stop should not reset timestamps 1357 assertTrue("stop timestamp position should be no less than frames read", 1358 stopTs.framePosition >= targetFrames); 1359 assertEquals("stop timestamp position should be same " 1360 + "between monotonic and boot timestamps", 1361 stopTs.framePosition, stopTsBoot.framePosition); 1362 assertTrue("stop timestamp nanoTime must be set", stopTs.nanoTime > 0); 1363 1364 // timestamps follow a different path than data, so it is conceivable 1365 // that first data arrives before the first timestamp is ready. 1366 assertTrue("no start timestamp read", startTs.nanoTime > 0); 1367 1368 verifyContinuousTimestamps(startTs, stopTs, TEST_SR); 1369 1370 // clean up 1371 if (makeSomething != null) { 1372 makeSomething.join(); 1373 } 1374 1375 } finally { 1376 listener.release(); 1377 // we must release the record immediately as it is a system-wide 1378 // resource needed for other tests. 1379 record.release(); 1380 } 1381 1382 final int markerPeriods = markerPeriodsPerSecond * TEST_TIME_MS / 1000; 1383 final int updatePeriods = periodsPerSecond * TEST_TIME_MS / 1000; 1384 final int markerPeriodsMax = 1385 markerPeriodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1386 final int updatePeriodsMax = 1387 periodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1388 1389 // collect statistics 1390 final ArrayList<Integer> markerList = listener.getMarkerList(); 1391 final ArrayList<Integer> periodicList = listener.getPeriodicList(); 1392 // verify count of markers and periodic notifications. 1393 // there could be an extra notification since we don't stop() immediately 1394 // rather wait for potential events to come in. 1395 //Log.d(TAG, "markerPeriods " + markerPeriods + 1396 // " markerPeriodsReceived " + markerList.size()); 1397 //Log.d(TAG, "updatePeriods " + updatePeriods + 1398 // " updatePeriodsReceived " + periodicList.size()); 1399 if (isLowLatencyDevice()) { 1400 assertTrue(TAG + ": markerPeriods " + markerPeriods + 1401 " <= markerPeriodsReceived " + markerList.size() + 1402 " <= markerPeriodsMax " + markerPeriodsMax, 1403 markerPeriods <= markerList.size() 1404 && markerList.size() <= markerPeriodsMax); 1405 assertTrue(TAG + ": updatePeriods " + updatePeriods + 1406 " <= updatePeriodsReceived " + periodicList.size() + 1407 " <= updatePeriodsMax " + updatePeriodsMax, 1408 updatePeriods <= periodicList.size() 1409 && periodicList.size() <= updatePeriodsMax); 1410 } 1411 1412 // Since we don't have accurate positioning of the start time of the recorder, 1413 // and there is no record.getPosition(), we consider only differential timing 1414 // from the first marker or periodic event. 1415 final int toleranceInFrames = TEST_SR * 80 / 1000; // 80 ms 1416 final int testTimeInFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1417 1418 AudioHelper.Statistics markerStat = new AudioHelper.Statistics(); 1419 for (int i = 1; i < markerList.size(); ++i) { 1420 final int expected = mMarkerPeriodInFrames * i; 1421 if (markerList.get(i) > testTimeInFrames) { 1422 break; // don't consider any notifications when we might be stopping. 1423 } 1424 final int actual = markerList.get(i) - markerList.get(0); 1425 //Log.d(TAG, "Marker: " + i + " expected(" + expected + ") actual(" + actual 1426 // + ") diff(" + (actual - expected) + ")" 1427 // + " tolerance " + toleranceInFrames); 1428 if (isLowLatencyDevice()) { 1429 assertEquals(expected, actual, toleranceInFrames); 1430 } 1431 markerStat.add((double)(actual - expected) * 1000 / TEST_SR); 1432 } 1433 1434 AudioHelper.Statistics periodicStat = new AudioHelper.Statistics(); 1435 for (int i = 1; i < periodicList.size(); ++i) { 1436 final int expected = updatePeriodInFrames * i; 1437 if (periodicList.get(i) > testTimeInFrames) { 1438 break; // don't consider any notifications when we might be stopping. 1439 } 1440 final int actual = periodicList.get(i) - periodicList.get(0); 1441 //Log.d(TAG, "Update: " + i + " expected(" + expected + ") actual(" + actual 1442 // + ") diff(" + (actual - expected) + ")" 1443 // + " tolerance " + toleranceInFrames); 1444 if (isLowLatencyDevice()) { 1445 assertEquals(expected, actual, toleranceInFrames); 1446 } 1447 periodicStat.add((double)(actual - expected) * 1000 / TEST_SR); 1448 } 1449 1450 // report this 1451 DeviceReportLog log = new DeviceReportLog(REPORT_LOG_NAME, reportName); 1452 log.addValue("start_recording_lag", coldInputStartTime, ResultType.LOWER_BETTER, 1453 ResultUnit.MS); 1454 log.addValue("stop_execution_time", stopTime - stopRequestTime, ResultType.LOWER_BETTER, 1455 ResultUnit.MS); 1456 log.addValue("total_record_time_expected", TEST_TIME_MS, ResultType.NEUTRAL, ResultUnit.MS); 1457 log.addValue("total_record_time_actual", endTime - firstSampleTime, ResultType.NEUTRAL, 1458 ResultUnit.MS); 1459 log.addValue("total_markers_expected", markerPeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1460 log.addValue("total_markers_actual", markerList.size(), ResultType.NEUTRAL, 1461 ResultUnit.COUNT); 1462 log.addValue("total_periods_expected", updatePeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1463 log.addValue("total_periods_actual", periodicList.size(), ResultType.NEUTRAL, 1464 ResultUnit.COUNT); 1465 log.addValue("average_marker_diff", markerStat.getAvg(), ResultType.LOWER_BETTER, 1466 ResultUnit.MS); 1467 log.addValue("maximum_marker_abs_diff", markerStat.getMaxAbs(), ResultType.LOWER_BETTER, 1468 ResultUnit.MS); 1469 log.addValue("average_marker_abs_diff", markerStat.getAvgAbs(), ResultType.LOWER_BETTER, 1470 ResultUnit.MS); 1471 log.addValue("average_periodic_diff", periodicStat.getAvg(), ResultType.LOWER_BETTER, 1472 ResultUnit.MS); 1473 log.addValue("maximum_periodic_abs_diff", periodicStat.getMaxAbs(), ResultType.LOWER_BETTER, 1474 ResultUnit.MS); 1475 log.addValue("average_periodic_abs_diff", periodicStat.getAvgAbs(), ResultType.LOWER_BETTER, 1476 ResultUnit.MS); 1477 log.setSummary("unified_abs_diff", (periodicStat.getAvgAbs() + markerStat.getAvgAbs()) / 2, 1478 ResultType.LOWER_BETTER, ResultUnit.MS); 1479 log.submit(InstrumentationRegistry.getInstrumentation()); 1480 } 1481 1482 private class MockOnRecordPositionUpdateListener 1483 implements OnRecordPositionUpdateListener { MockOnRecordPositionUpdateListener(AudioRecord record)1484 public MockOnRecordPositionUpdateListener(AudioRecord record) { 1485 mAudioRecord = record; 1486 record.setRecordPositionUpdateListener(this); 1487 } 1488 MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler)1489 public MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler) { 1490 mAudioRecord = record; 1491 record.setRecordPositionUpdateListener(this, handler); 1492 } 1493 onMarkerReached(AudioRecord record)1494 public synchronized void onMarkerReached(AudioRecord record) { 1495 if (mIsTestActive) { 1496 int position = getPosition(); 1497 mOnMarkerReachedCalled.add(position); 1498 mMarkerPosition += mMarkerPeriodInFrames; 1499 assertEquals(AudioRecord.SUCCESS, 1500 mAudioRecord.setNotificationMarkerPosition(mMarkerPosition)); 1501 } else { 1502 // see comment on stop() 1503 final long delta = System.currentTimeMillis() - mStopTime; 1504 Log.d(TAG, "onMarkerReached called " + delta + " ms after stop"); 1505 fail("onMarkerReached called when not active"); 1506 } 1507 } 1508 onPeriodicNotification(AudioRecord record)1509 public synchronized void onPeriodicNotification(AudioRecord record) { 1510 if (mIsTestActive) { 1511 int position = getPosition(); 1512 mOnPeriodicNotificationCalled.add(position); 1513 } else { 1514 // see comment on stop() 1515 final long delta = System.currentTimeMillis() - mStopTime; 1516 Log.d(TAG, "onPeriodicNotification called " + delta + " ms after stop"); 1517 fail("onPeriodicNotification called when not active"); 1518 } 1519 } 1520 start(int sampleRate)1521 public synchronized void start(int sampleRate) { 1522 mIsTestActive = true; 1523 mSampleRate = sampleRate; 1524 mStartTime = System.currentTimeMillis(); 1525 } 1526 stop()1527 public synchronized void stop() { 1528 // the listener should be stopped some time after AudioRecord is stopped 1529 // as some messages may not yet be posted. 1530 mIsTestActive = false; 1531 mStopTime = System.currentTimeMillis(); 1532 } 1533 getMarkerList()1534 public ArrayList<Integer> getMarkerList() { 1535 return mOnMarkerReachedCalled; 1536 } 1537 getPeriodicList()1538 public ArrayList<Integer> getPeriodicList() { 1539 return mOnPeriodicNotificationCalled; 1540 } 1541 release()1542 public synchronized void release() { 1543 stop(); 1544 mAudioRecord.setRecordPositionUpdateListener(null); 1545 mAudioRecord = null; 1546 } 1547 getPosition()1548 private int getPosition() { 1549 // we don't have mAudioRecord.getRecordPosition(); 1550 // so we fake this by timing. 1551 long delta = System.currentTimeMillis() - mStartTime; 1552 return (int)(delta * mSampleRate / 1000); 1553 } 1554 1555 private long mStartTime; 1556 private long mStopTime; 1557 private int mSampleRate; 1558 private boolean mIsTestActive = true; 1559 private AudioRecord mAudioRecord; 1560 private ArrayList<Integer> mOnMarkerReachedCalled = new ArrayList<Integer>(); 1561 private ArrayList<Integer> mOnPeriodicNotificationCalled = new ArrayList<Integer>(); 1562 } 1563 hasMicrophone()1564 private boolean hasMicrophone() { 1565 return getContext().getPackageManager().hasSystemFeature( 1566 PackageManager.FEATURE_MICROPHONE); 1567 } 1568 isLowRamDevice()1569 private boolean isLowRamDevice() { 1570 return ((ActivityManager) getContext().getSystemService(Context.ACTIVITY_SERVICE)) 1571 .isLowRamDevice(); 1572 } 1573 isLowLatencyDevice()1574 private boolean isLowLatencyDevice() { 1575 return getContext().getPackageManager().hasSystemFeature( 1576 PackageManager.FEATURE_AUDIO_LOW_LATENCY); 1577 } 1578 isProAudioDevice()1579 private boolean isProAudioDevice() { 1580 return getContext().getPackageManager().hasSystemFeature( 1581 PackageManager.FEATURE_AUDIO_PRO); 1582 } 1583 verifyContinuousTimestamps( AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate)1584 private void verifyContinuousTimestamps( 1585 AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate) 1586 throws Exception { 1587 final long timeDiff = stopTs.nanoTime - startTs.nanoTime; 1588 final long frameDiff = stopTs.framePosition - startTs.framePosition; 1589 final long NANOS_PER_SECOND = 1000000000; 1590 final long timeByFrames = frameDiff * NANOS_PER_SECOND / sampleRate; 1591 final double ratio = (double)timeDiff / timeByFrames; 1592 1593 // Usually the ratio is accurate to one part per thousand or better. 1594 // Log.d(TAG, "ratio=" + ratio + ", timeDiff=" + timeDiff + ", frameDiff=" + frameDiff + 1595 // ", timeByFrames=" + timeByFrames + ", sampleRate=" + sampleRate); 1596 assertEquals(1.0 /* expected */, ratio, isLowLatencyDevice() ? 0.01 : 0.5 /* delta */); 1597 } 1598 1599 // remove if AudioTimestamp has a better toString(). printTimestamp(String s, AudioTimestamp ats)1600 private void printTimestamp(String s, AudioTimestamp ats) { 1601 Log.d(TAG, s + ": pos: " + ats.framePosition + " time: " + ats.nanoTime); 1602 } 1603 readDataTimed(AudioRecord recorder, long durationMillis, ShortBuffer out)1604 private static void readDataTimed(AudioRecord recorder, long durationMillis, 1605 ShortBuffer out) throws IOException { 1606 final short[] buffer = new short[1024]; 1607 final long startTimeMillis = SystemClock.uptimeMillis(); 1608 final long stopTimeMillis = startTimeMillis + durationMillis; 1609 while (SystemClock.uptimeMillis() < stopTimeMillis) { 1610 final int readCount = recorder.read(buffer, 0, buffer.length); 1611 if (readCount <= 0) { 1612 return; 1613 } 1614 out.put(buffer, 0, readCount); 1615 } 1616 } 1617 isAudioSilent(ShortBuffer buffer)1618 private static boolean isAudioSilent(ShortBuffer buffer) { 1619 // Always need some bytes read 1620 assertTrue("Buffer should have some data", buffer.position() > 0); 1621 1622 // It is possible that the transition from empty to non empty bytes 1623 // happened in the middle of the read data due to the async nature of 1624 // the system. Therefore, we look for the transitions from non-empty 1625 // to empty and from empty to non-empty values for robustness. 1626 int totalSilenceCount = 0; 1627 final int valueCount = buffer.position(); 1628 for (int i = valueCount - 1; i >= 0; i--) { 1629 final short value = buffer.get(i); 1630 if (value == 0) { 1631 totalSilenceCount++; 1632 } 1633 } 1634 return totalSilenceCount > valueCount / 2; 1635 } 1636 makeMyUidStateActive(String packageName, int userId)1637 private static void makeMyUidStateActive(String packageName, int userId) throws IOException { 1638 final String command = String.format( 1639 "cmd media.audio_policy set-uid-state %s active --user %d", packageName, userId); 1640 SystemUtil.runShellCommand(InstrumentationRegistry.getInstrumentation(), command); 1641 } 1642 makeMyUidStateIdle(String packageName, int userId)1643 private static void makeMyUidStateIdle(String packageName, int userId) throws IOException { 1644 final String command = String.format( 1645 "cmd media.audio_policy set-uid-state %s idle --user %d", packageName, userId); 1646 SystemUtil.runShellCommand(InstrumentationRegistry.getInstrumentation(), command); 1647 } 1648 resetMyUidState(String packageName, int userId)1649 private static void resetMyUidState(String packageName, int userId) throws IOException { 1650 final String command = String.format( 1651 "cmd media.audio_policy reset-uid-state %s --user %d", packageName, userId); 1652 SystemUtil.runShellCommand(InstrumentationRegistry.getInstrumentation(), command); 1653 } 1654 getContext()1655 private static Context getContext() { 1656 return InstrumentationRegistry.getInstrumentation().getTargetContext(); 1657 } 1658 1659 /* 1660 * Microphone Direction API tests 1661 */ 1662 @Test testSetPreferredMicrophoneDirection()1663 public void testSetPreferredMicrophoneDirection() { 1664 if (!hasMicrophone()) { 1665 return; 1666 } 1667 1668 try { 1669 boolean success = 1670 mAudioRecord.setPreferredMicrophoneDirection( 1671 MicrophoneDirection.MIC_DIRECTION_TOWARDS_USER); 1672 1673 // Can't actually test this as HAL may not have implemented it 1674 // Just verify that it doesn't crash or throw an exception 1675 // assertTrue(success); 1676 } catch (Exception ex) { 1677 Log.e(TAG, "testSetPreferredMicrophoneDirection() exception:" + ex); 1678 assertTrue(false); 1679 } 1680 return; 1681 } 1682 1683 @Test testSetPreferredMicrophoneFieldDimension()1684 public void testSetPreferredMicrophoneFieldDimension() { 1685 if (!hasMicrophone()) { 1686 return; 1687 } 1688 1689 try { 1690 boolean success = mAudioRecord.setPreferredMicrophoneFieldDimension(1.0f); 1691 1692 // Can't actually test this as HAL may not have implemented it 1693 // Just verify that it doesn't crash or throw an exception 1694 // assertTrue(success); 1695 } catch (Exception ex) { 1696 Log.e(TAG, "testSetPreferredMicrophoneFieldDimension() exception:" + ex); 1697 assertTrue(false); 1698 } 1699 return; 1700 } 1701 1702 /** 1703 * Test AudioRecord Builder error handling. 1704 * 1705 * @throws Exception 1706 */ 1707 @Test testAudioRecordBuilderError()1708 public void testAudioRecordBuilderError() throws Exception { 1709 if (!hasMicrophone()) { 1710 return; 1711 } 1712 1713 final AudioRecord[] audioRecord = new AudioRecord[1]; // pointer to AudioRecord. 1714 final int BIGNUM = Integer.MAX_VALUE; // large value that should be invalid. 1715 final int INVALID_SESSION_ID = 1024; // can never occur (wrong type in 3 lsbs) 1716 final int INVALID_CHANNEL_MASK = -1; 1717 1718 try { 1719 // NOTE: 1720 // AudioFormat tested in AudioFormatTest#testAudioFormatBuilderError. 1721 1722 // We must be able to create the AudioRecord. 1723 audioRecord[0] = new AudioRecord.Builder().build(); 1724 audioRecord[0].release(); 1725 1726 // Out of bounds buffer size. A large size will fail in AudioRecord creation. 1727 assertThrows(UnsupportedOperationException.class, () -> { 1728 audioRecord[0] = new AudioRecord.Builder() 1729 .setBufferSizeInBytes(BIGNUM) 1730 .build(); 1731 }); 1732 1733 // 0 and negative buffer size throw IllegalArgumentException 1734 for (int bufferSize : new int[] {-BIGNUM, -1, 0}) { 1735 assertThrows(IllegalArgumentException.class, () -> { 1736 audioRecord[0] = new AudioRecord.Builder() 1737 .setBufferSizeInBytes(bufferSize) 1738 .build(); 1739 }); 1740 } 1741 1742 assertThrows(IllegalArgumentException.class, () -> { 1743 audioRecord[0] = new AudioRecord.Builder() 1744 .setAudioSource(BIGNUM) 1745 .build(); 1746 }); 1747 1748 assertThrows(IllegalArgumentException.class, () -> { 1749 audioRecord[0] = new AudioRecord.Builder() 1750 .setAudioSource(-2) 1751 .build(); 1752 }); 1753 1754 // Invalid session id that is positive. 1755 // (logcat error message vague) 1756 assertThrows(UnsupportedOperationException.class, () -> { 1757 audioRecord[0] = new AudioRecord.Builder() 1758 .setSessionId(INVALID_SESSION_ID) 1759 .build(); 1760 }); 1761 1762 // Specialty AudioRecord tests 1763 assertThrows(NullPointerException.class, () -> { 1764 audioRecord[0] = new AudioRecord.Builder() 1765 .setAudioPlaybackCaptureConfig(null) 1766 .build(); 1767 }); 1768 1769 assertThrows(NullPointerException.class, () -> { 1770 audioRecord[0] = new AudioRecord.Builder() 1771 .setContext(null) 1772 .build(); 1773 }); 1774 1775 // Bad audio encoding DRA expected unsupported. 1776 try { 1777 audioRecord[0] = new AudioRecord.Builder() 1778 .setAudioFormat(new AudioFormat.Builder() 1779 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 1780 .setEncoding(AudioFormat.ENCODING_DRA) 1781 .build()) 1782 .build(); 1783 // Don't throw an exception, maybe it is supported somehow, but warn. 1784 Log.w(TAG, "ENCODING_DRA is expected to be unsupported"); 1785 audioRecord[0].release(); 1786 audioRecord[0] = null; 1787 } catch (UnsupportedOperationException e) { 1788 ; // OK expected 1789 } 1790 1791 // Sample rate out of bounds. 1792 // System levels caught on AudioFormat. 1793 assertThrows(IllegalArgumentException.class, () -> { 1794 audioRecord[0] = new AudioRecord.Builder() 1795 .setAudioFormat(new AudioFormat.Builder() 1796 .setSampleRate(BIGNUM) 1797 .build()) 1798 .build(); 1799 }); 1800 1801 // Invalid channel mask 1802 // This is a UOE for AudioRecord vs IAE for AudioTrack. 1803 assertThrows(UnsupportedOperationException.class, () -> { 1804 audioRecord[0] = new AudioRecord.Builder() 1805 .setAudioFormat(new AudioFormat.Builder() 1806 .setChannelMask(INVALID_CHANNEL_MASK) 1807 .build()) 1808 .build(); 1809 }); 1810 } finally { 1811 // Did we successfully complete for some reason but did not 1812 // release? 1813 if (audioRecord[0] != null) { 1814 audioRecord[0].release(); 1815 audioRecord[0] = null; 1816 } 1817 } 1818 } 1819 1820 @Test testPrivacySensitiveBuilder()1821 public void testPrivacySensitiveBuilder() throws Exception { 1822 if (!hasMicrophone()) { 1823 return; 1824 } 1825 1826 for (final boolean privacyOn : new boolean[] { false, true} ) { 1827 AudioRecord record = new AudioRecord.Builder() 1828 .setAudioFormat(new AudioFormat.Builder() 1829 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1830 .setSampleRate(8000) 1831 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1832 .build()) 1833 .setPrivacySensitive(privacyOn) 1834 .build(); 1835 assertEquals(privacyOn, record.isPrivacySensitive()); 1836 record.release(); 1837 } 1838 } 1839 1840 @Test testPrivacySensitiveDefaults()1841 public void testPrivacySensitiveDefaults() throws Exception { 1842 if (!hasMicrophone()) { 1843 return; 1844 } 1845 1846 AudioRecord record = new AudioRecord.Builder() 1847 .setAudioSource(MediaRecorder.AudioSource.MIC) 1848 .setAudioFormat(new AudioFormat.Builder() 1849 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1850 .setSampleRate(8000) 1851 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1852 .build()) 1853 .build(); 1854 assertFalse(record.isPrivacySensitive()); 1855 record.release(); 1856 1857 record = new AudioRecord.Builder() 1858 .setAudioSource(MediaRecorder.AudioSource.VOICE_COMMUNICATION) 1859 .setAudioFormat(new AudioFormat.Builder() 1860 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1861 .setSampleRate(8000) 1862 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1863 .build()) 1864 .build(); 1865 assertTrue(record.isPrivacySensitive()); 1866 record.release(); 1867 } 1868 1869 @Test testSetLogSessionId()1870 public void testSetLogSessionId() throws Exception { 1871 if (!hasMicrophone()) { 1872 return; 1873 } 1874 AudioRecord audioRecord = null; 1875 try { 1876 audioRecord = new AudioRecord.Builder() 1877 .setAudioFormat(new AudioFormat.Builder() 1878 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1879 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1880 .build()) 1881 .build(); 1882 audioRecord.setLogSessionId(LogSessionId.LOG_SESSION_ID_NONE); // should not throw. 1883 assertEquals(LogSessionId.LOG_SESSION_ID_NONE, audioRecord.getLogSessionId()); 1884 1885 final MediaMetricsManager mediaMetricsManager = 1886 getContext().getSystemService(MediaMetricsManager.class); 1887 final RecordingSession recordingSession = 1888 mediaMetricsManager.createRecordingSession(); 1889 audioRecord.setLogSessionId(recordingSession.getSessionId()); 1890 assertEquals(recordingSession.getSessionId(), audioRecord.getLogSessionId()); 1891 1892 // record some data to generate a log entry. 1893 short data[] = new short[audioRecord.getSampleRate() / 2]; 1894 audioRecord.startRecording(); 1895 audioRecord.read(data, 0 /* offsetInShorts */, data.length); 1896 audioRecord.stop(); 1897 1898 // Also can check the mediametrics dumpsys to validate logs generated. 1899 } finally { 1900 if (audioRecord != null) { 1901 audioRecord.release(); 1902 } 1903 } 1904 } 1905 1906 @Test testCompressedCaptureAAC()1907 public void testCompressedCaptureAAC() throws Exception { 1908 final int ENCODING = AudioFormat.ENCODING_AAC_LC; 1909 final String MIMETYPE = MediaFormat.MIMETYPE_AUDIO_AAC; 1910 final int BUFFER_SIZE = 16000; 1911 if (!hasMicrophone()) { 1912 return; 1913 } 1914 AudioDeviceInfo[] devices = mAudioManager.getDevices(AudioManager.GET_DEVICES_INPUTS); 1915 // TODO test multiple supporting devices if available 1916 AudioDeviceInfo supportingDevice = null; 1917 for (AudioDeviceInfo device : devices) { 1918 for (int encoding : device.getEncodings()) { 1919 if (encoding == ENCODING) { 1920 supportingDevice = device; 1921 break; 1922 } 1923 } 1924 if (supportingDevice != null) break; 1925 } 1926 if (supportingDevice == null) { 1927 Log.i(TAG, "Compressed audio (AAC) not supported"); 1928 return; // Compressed Audio is not supported 1929 } 1930 Log.i(TAG, "Compressed audio (AAC) supported"); 1931 AudioRecord audioRecord = null; 1932 try { 1933 audioRecord = new AudioRecord.Builder() 1934 .setAudioFormat(new AudioFormat.Builder() 1935 .setEncoding(ENCODING) 1936 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 1937 .build()) 1938 .build(); 1939 audioRecord.setPreferredDevice(supportingDevice); 1940 class ByteBufferImpl extends StreamUtils.ByteBufferStream { 1941 @Override 1942 public ByteBuffer read() throws IOException { 1943 if (mCount < 1 /* only one buffer */) { 1944 ++mCount; 1945 return mByteBuffer; 1946 } 1947 return null; 1948 } 1949 public ByteBuffer mByteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE); 1950 private int mCount = 0; 1951 } 1952 1953 ByteBufferImpl byteBufferImpl = new ByteBufferImpl(); 1954 audioRecord.startRecording(); 1955 audioRecord.read(byteBufferImpl.mByteBuffer, BUFFER_SIZE); 1956 audioRecord.stop(); 1957 // Attempt to decode compressed data 1958 //sample rate/ch count not needed 1959 final MediaFormat format = MediaFormat.createAudioFormat(MIMETYPE, 0, 0); 1960 final StreamUtils.MediaCodecStream decodingStream 1961 = new StreamUtils.MediaCodecStream(byteBufferImpl, format, false); 1962 ByteBuffer decoded = decodingStream.read(); 1963 int totalDecoded = 0; 1964 while (decoded != null) { 1965 // TODO validate actual data 1966 totalDecoded += decoded.remaining(); 1967 decoded = decodingStream.read(); 1968 } 1969 Log.i(TAG, "Decoded size:" + String.valueOf(totalDecoded)); 1970 // TODO rethrow following exceptions on verification 1971 } catch (UnsupportedOperationException e) { 1972 Log.w(TAG, "Compressed AudioRecord unable to be built"); 1973 } catch (IllegalStateException e) { 1974 Log.w(TAG, "Compressed AudioRecord unable to be started"); 1975 } finally { 1976 if (audioRecord != null) { 1977 audioRecord.release(); 1978 } 1979 } 1980 } 1981 } 1982