• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 package org.webrtc.audio;
12 
13 import android.annotation.TargetApi;
14 import android.content.Context;
15 import android.media.AudioDeviceInfo;
16 import android.media.AudioFormat;
17 import android.media.AudioManager;
18 import android.media.AudioRecord;
19 import android.media.AudioRecordingConfiguration;
20 import android.media.MediaRecorder.AudioSource;
21 import android.os.Build;
22 import android.os.Process;
23 import android.support.annotation.Nullable;
24 import android.support.annotation.RequiresApi;
25 import java.lang.System;
26 import java.nio.ByteBuffer;
27 import java.util.Arrays;
28 import java.util.Iterator;
29 import java.util.List;
30 import java.util.concurrent.Callable;
31 import java.util.concurrent.Executors;
32 import java.util.concurrent.ScheduledExecutorService;
33 import java.util.concurrent.ScheduledFuture;
34 import java.util.concurrent.TimeUnit;
35 import org.webrtc.CalledByNative;
36 import org.webrtc.Logging;
37 import org.webrtc.ThreadUtils;
38 import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordErrorCallback;
39 import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStartErrorCode;
40 import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStateCallback;
41 import org.webrtc.audio.JavaAudioDeviceModule.SamplesReadyCallback;
42 
43 class WebRtcAudioRecord {
44   private static final String TAG = "WebRtcAudioRecordExternal";
45 
46   // Requested size of each recorded buffer provided to the client.
47   private static final int CALLBACK_BUFFER_SIZE_MS = 10;
48 
49   // Average number of callbacks per second.
50   private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
51 
52   // We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
53   // buffer size). The extra space is allocated to guard against glitches under
54   // high load.
55   private static final int BUFFER_SIZE_FACTOR = 2;
56 
57   // The AudioRecordJavaThread is allowed to wait for successful call to join()
58   // but the wait times out afther this amount of time.
59   private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;
60 
61   public static final int DEFAULT_AUDIO_SOURCE = AudioSource.VOICE_COMMUNICATION;
62 
63   // Default audio data format is PCM 16 bit per sample.
64   // Guaranteed to be supported by all devices.
65   public static final int DEFAULT_AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
66 
67   // Indicates AudioRecord has started recording audio.
68   private static final int AUDIO_RECORD_START = 0;
69 
70   // Indicates AudioRecord has stopped recording audio.
71   private static final int AUDIO_RECORD_STOP = 1;
72 
73   // Time to wait before checking recording status after start has been called. Tests have
74   // shown that the result can sometimes be invalid (our own status might be missing) if we check
75   // directly after start.
76   private static final int CHECK_REC_STATUS_DELAY_MS = 100;
77 
78   private final Context context;
79   private final AudioManager audioManager;
80   private final int audioSource;
81   private final int audioFormat;
82 
83   private long nativeAudioRecord;
84 
85   private final WebRtcAudioEffects effects = new WebRtcAudioEffects();
86 
87   private @Nullable ByteBuffer byteBuffer;
88 
89   private @Nullable AudioRecord audioRecord;
90   private @Nullable AudioRecordThread audioThread;
91   private @Nullable AudioDeviceInfo preferredDevice;
92 
93   private @Nullable ScheduledExecutorService executor;
94   private @Nullable ScheduledFuture<String> future;
95 
96   private volatile boolean microphoneMute;
97   private boolean audioSourceMatchesRecordingSession;
98   private boolean isAudioConfigVerified;
99   private byte[] emptyBytes;
100 
101   private final @Nullable AudioRecordErrorCallback errorCallback;
102   private final @Nullable AudioRecordStateCallback stateCallback;
103   private final @Nullable SamplesReadyCallback audioSamplesReadyCallback;
104   private final boolean isAcousticEchoCancelerSupported;
105   private final boolean isNoiseSuppressorSupported;
106 
107   /**
108    * Audio thread which keeps calling ByteBuffer.read() waiting for audio
109    * to be recorded. Feeds recorded data to the native counterpart as a
110    * periodic sequence of callbacks using DataIsRecorded().
111    * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
112    */
113   private class AudioRecordThread extends Thread {
114     private volatile boolean keepAlive = true;
115 
AudioRecordThread(String name)116     public AudioRecordThread(String name) {
117       super(name);
118     }
119 
120     @Override
run()121     public void run() {
122       Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
123       Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
124       assertTrue(audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING);
125 
126       // Audio recording has started and the client is informed about it.
127       doAudioRecordStateCallback(AUDIO_RECORD_START);
128 
129       long lastTime = System.nanoTime();
130       while (keepAlive) {
131         int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
132         if (bytesRead == byteBuffer.capacity()) {
133           if (microphoneMute) {
134             byteBuffer.clear();
135             byteBuffer.put(emptyBytes);
136           }
137           // It's possible we've been shut down during the read, and stopRecording() tried and
138           // failed to join this thread. To be a bit safer, try to avoid calling any native methods
139           // in case they've been unregistered after stopRecording() returned.
140           if (keepAlive) {
141             nativeDataIsRecorded(nativeAudioRecord, bytesRead);
142           }
143           if (audioSamplesReadyCallback != null) {
144             // Copy the entire byte buffer array. The start of the byteBuffer is not necessarily
145             // at index 0.
146             byte[] data = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.arrayOffset(),
147                 byteBuffer.capacity() + byteBuffer.arrayOffset());
148             audioSamplesReadyCallback.onWebRtcAudioRecordSamplesReady(
149                 new JavaAudioDeviceModule.AudioSamples(audioRecord.getAudioFormat(),
150                     audioRecord.getChannelCount(), audioRecord.getSampleRate(), data));
151           }
152         } else {
153           String errorMessage = "AudioRecord.read failed: " + bytesRead;
154           Logging.e(TAG, errorMessage);
155           if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
156             keepAlive = false;
157             reportWebRtcAudioRecordError(errorMessage);
158           }
159         }
160       }
161 
162       try {
163         if (audioRecord != null) {
164           audioRecord.stop();
165           doAudioRecordStateCallback(AUDIO_RECORD_STOP);
166         }
167       } catch (IllegalStateException e) {
168         Logging.e(TAG, "AudioRecord.stop failed: " + e.getMessage());
169       }
170     }
171 
172     // Stops the inner thread loop and also calls AudioRecord.stop().
173     // Does not block the calling thread.
stopThread()174     public void stopThread() {
175       Logging.d(TAG, "stopThread");
176       keepAlive = false;
177     }
178   }
179 
180   @CalledByNative
WebRtcAudioRecord(Context context, AudioManager audioManager)181   WebRtcAudioRecord(Context context, AudioManager audioManager) {
182     this(context, audioManager, DEFAULT_AUDIO_SOURCE, DEFAULT_AUDIO_FORMAT,
183         null /* errorCallback */, null /* stateCallback */, null /* audioSamplesReadyCallback */,
184         WebRtcAudioEffects.isAcousticEchoCancelerSupported(),
185         WebRtcAudioEffects.isNoiseSuppressorSupported());
186   }
187 
WebRtcAudioRecord(Context context, AudioManager audioManager, int audioSource, int audioFormat, @Nullable AudioRecordErrorCallback errorCallback, @Nullable AudioRecordStateCallback stateCallback, @Nullable SamplesReadyCallback audioSamplesReadyCallback, boolean isAcousticEchoCancelerSupported, boolean isNoiseSuppressorSupported)188   public WebRtcAudioRecord(Context context, AudioManager audioManager, int audioSource,
189       int audioFormat, @Nullable AudioRecordErrorCallback errorCallback,
190       @Nullable AudioRecordStateCallback stateCallback,
191       @Nullable SamplesReadyCallback audioSamplesReadyCallback,
192       boolean isAcousticEchoCancelerSupported, boolean isNoiseSuppressorSupported) {
193     if (isAcousticEchoCancelerSupported && !WebRtcAudioEffects.isAcousticEchoCancelerSupported()) {
194       throw new IllegalArgumentException("HW AEC not supported");
195     }
196     if (isNoiseSuppressorSupported && !WebRtcAudioEffects.isNoiseSuppressorSupported()) {
197       throw new IllegalArgumentException("HW NS not supported");
198     }
199     this.context = context;
200     this.audioManager = audioManager;
201     this.audioSource = audioSource;
202     this.audioFormat = audioFormat;
203     this.errorCallback = errorCallback;
204     this.stateCallback = stateCallback;
205     this.audioSamplesReadyCallback = audioSamplesReadyCallback;
206     this.isAcousticEchoCancelerSupported = isAcousticEchoCancelerSupported;
207     this.isNoiseSuppressorSupported = isNoiseSuppressorSupported;
208     Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
209   }
210 
211   @CalledByNative
setNativeAudioRecord(long nativeAudioRecord)212   public void setNativeAudioRecord(long nativeAudioRecord) {
213     this.nativeAudioRecord = nativeAudioRecord;
214   }
215 
216   @CalledByNative
isAcousticEchoCancelerSupported()217   boolean isAcousticEchoCancelerSupported() {
218     return isAcousticEchoCancelerSupported;
219   }
220 
221   @CalledByNative
isNoiseSuppressorSupported()222   boolean isNoiseSuppressorSupported() {
223     return isNoiseSuppressorSupported;
224   }
225 
226   // Returns true if a valid call to verifyAudioConfig() has been done. Should always be
227   // checked before using the returned value of isAudioSourceMatchingRecordingSession().
228   @CalledByNative
isAudioConfigVerified()229   boolean isAudioConfigVerified() {
230     return isAudioConfigVerified;
231   }
232 
233   // Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when
234   // startRecording() has been called. Hence, should preferably be called in combination with
235   // stopRecording() to ensure that it has been set properly. |isAudioConfigVerified| is
236   // enabled in WebRtcAudioRecord to ensure that the returned value is valid.
237   @CalledByNative
isAudioSourceMatchingRecordingSession()238   boolean isAudioSourceMatchingRecordingSession() {
239     if (!isAudioConfigVerified) {
240       Logging.w(TAG, "Audio configuration has not yet been verified");
241       return false;
242     }
243     return audioSourceMatchesRecordingSession;
244   }
245 
246   @CalledByNative
enableBuiltInAEC(boolean enable)247   private boolean enableBuiltInAEC(boolean enable) {
248     Logging.d(TAG, "enableBuiltInAEC(" + enable + ")");
249     return effects.setAEC(enable);
250   }
251 
252   @CalledByNative
enableBuiltInNS(boolean enable)253   private boolean enableBuiltInNS(boolean enable) {
254     Logging.d(TAG, "enableBuiltInNS(" + enable + ")");
255     return effects.setNS(enable);
256   }
257 
258   @CalledByNative
initRecording(int sampleRate, int channels)259   private int initRecording(int sampleRate, int channels) {
260     Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" + channels + ")");
261     if (audioRecord != null) {
262       reportWebRtcAudioRecordInitError("InitRecording called twice without StopRecording.");
263       return -1;
264     }
265     final int bytesPerFrame = channels * getBytesPerSample(audioFormat);
266     final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
267     byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
268     if (!(byteBuffer.hasArray())) {
269       reportWebRtcAudioRecordInitError("ByteBuffer does not have backing array.");
270       return -1;
271     }
272     Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
273     emptyBytes = new byte[byteBuffer.capacity()];
274     // Rather than passing the ByteBuffer with every callback (requiring
275     // the potentially expensive GetDirectBufferAddress) we simply have the
276     // the native class cache the address to the memory once.
277     nativeCacheDirectBufferAddress(nativeAudioRecord, byteBuffer);
278 
279     // Get the minimum buffer size required for the successful creation of
280     // an AudioRecord object, in byte units.
281     // Note that this size doesn't guarantee a smooth recording under load.
282     final int channelConfig = channelCountToConfiguration(channels);
283     int minBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
284     if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
285       reportWebRtcAudioRecordInitError("AudioRecord.getMinBufferSize failed: " + minBufferSize);
286       return -1;
287     }
288     Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
289 
290     // Use a larger buffer size than the minimum required when creating the
291     // AudioRecord instance to ensure smooth recording under load. It has been
292     // verified that it does not increase the actual recording latency.
293     int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
294     Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
295     try {
296       if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
297         // Use the AudioRecord.Builder class on Android M (23) and above.
298         // Throws IllegalArgumentException.
299         audioRecord = createAudioRecordOnMOrHigher(
300             audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
301         if (preferredDevice != null) {
302           setPreferredDevice(preferredDevice);
303         }
304       } else {
305         // Use the old AudioRecord constructor for API levels below 23.
306         // Throws UnsupportedOperationException.
307         audioRecord = createAudioRecordOnLowerThanM(
308             audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
309       }
310     } catch (IllegalArgumentException | UnsupportedOperationException e) {
311       // Report of exception message is sufficient. Example: "Cannot create AudioRecord".
312       reportWebRtcAudioRecordInitError(e.getMessage());
313       releaseAudioResources();
314       return -1;
315     }
316     if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
317       reportWebRtcAudioRecordInitError("Creation or initialization of audio recorder failed.");
318       releaseAudioResources();
319       return -1;
320     }
321     effects.enable(audioRecord.getAudioSessionId());
322     logMainParameters();
323     logMainParametersExtended();
324     // Check number of active recording sessions. Should be zero but we have seen conflict cases
325     // and adding a log for it can help us figure out details about conflicting sessions.
326     final int numActiveRecordingSessions =
327         logRecordingConfigurations(false /* verifyAudioConfig */);
328     if (numActiveRecordingSessions != 0) {
329       // Log the conflict as a warning since initialization did in fact succeed. Most likely, the
330       // upcoming call to startRecording() will fail under these conditions.
331       Logging.w(
332           TAG, "Potential microphone conflict. Active sessions: " + numActiveRecordingSessions);
333     }
334     return framesPerBuffer;
335   }
336 
337   /**
338    * Prefer a specific {@link AudioDeviceInfo} device for recording. Calling after recording starts
339    * is valid but may cause a temporary interruption if the audio routing changes.
340    */
341   @RequiresApi(Build.VERSION_CODES.M)
342   @TargetApi(Build.VERSION_CODES.M)
setPreferredDevice(@ullable AudioDeviceInfo preferredDevice)343   void setPreferredDevice(@Nullable AudioDeviceInfo preferredDevice) {
344     Logging.d(
345         TAG, "setPreferredDevice " + (preferredDevice != null ? preferredDevice.getId() : null));
346     this.preferredDevice = preferredDevice;
347     if (audioRecord != null) {
348       if (!audioRecord.setPreferredDevice(preferredDevice)) {
349         Logging.e(TAG, "setPreferredDevice failed");
350       }
351     }
352   }
353 
354   @CalledByNative
startRecording()355   private boolean startRecording() {
356     Logging.d(TAG, "startRecording");
357     assertTrue(audioRecord != null);
358     assertTrue(audioThread == null);
359     try {
360       audioRecord.startRecording();
361     } catch (IllegalStateException e) {
362       reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_EXCEPTION,
363           "AudioRecord.startRecording failed: " + e.getMessage());
364       return false;
365     }
366     if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
367       reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_STATE_MISMATCH,
368           "AudioRecord.startRecording failed - incorrect state: "
369               + audioRecord.getRecordingState());
370       return false;
371     }
372     audioThread = new AudioRecordThread("AudioRecordJavaThread");
373     audioThread.start();
374     scheduleLogRecordingConfigurationsTask();
375     return true;
376   }
377 
378   @CalledByNative
stopRecording()379   private boolean stopRecording() {
380     Logging.d(TAG, "stopRecording");
381     assertTrue(audioThread != null);
382     if (future != null) {
383       if (!future.isDone()) {
384         // Might be needed if the client calls startRecording(), stopRecording() back-to-back.
385         future.cancel(true /* mayInterruptIfRunning */);
386       }
387       future = null;
388     }
389     if (executor != null) {
390       executor.shutdownNow();
391       executor = null;
392     }
393     audioThread.stopThread();
394     if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
395       Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
396       WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
397     }
398     audioThread = null;
399     effects.release();
400     releaseAudioResources();
401     return true;
402   }
403 
404   @TargetApi(Build.VERSION_CODES.M)
createAudioRecordOnMOrHigher( int audioSource, int sampleRate, int channelConfig, int audioFormat, int bufferSizeInBytes)405   private static AudioRecord createAudioRecordOnMOrHigher(
406       int audioSource, int sampleRate, int channelConfig, int audioFormat, int bufferSizeInBytes) {
407     Logging.d(TAG, "createAudioRecordOnMOrHigher");
408     return new AudioRecord.Builder()
409         .setAudioSource(audioSource)
410         .setAudioFormat(new AudioFormat.Builder()
411                             .setEncoding(audioFormat)
412                             .setSampleRate(sampleRate)
413                             .setChannelMask(channelConfig)
414                             .build())
415         .setBufferSizeInBytes(bufferSizeInBytes)
416         .build();
417   }
418 
createAudioRecordOnLowerThanM( int audioSource, int sampleRate, int channelConfig, int audioFormat, int bufferSizeInBytes)419   private static AudioRecord createAudioRecordOnLowerThanM(
420       int audioSource, int sampleRate, int channelConfig, int audioFormat, int bufferSizeInBytes) {
421     Logging.d(TAG, "createAudioRecordOnLowerThanM");
422     return new AudioRecord(audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
423   }
424 
logMainParameters()425   private void logMainParameters() {
426     Logging.d(TAG,
427         "AudioRecord: "
428             + "session ID: " + audioRecord.getAudioSessionId() + ", "
429             + "channels: " + audioRecord.getChannelCount() + ", "
430             + "sample rate: " + audioRecord.getSampleRate());
431   }
432 
433   @TargetApi(Build.VERSION_CODES.M)
logMainParametersExtended()434   private void logMainParametersExtended() {
435     if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
436       Logging.d(TAG,
437           "AudioRecord: "
438               // The frame count of the native AudioRecord buffer.
439               + "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
440     }
441   }
442 
443   @TargetApi(Build.VERSION_CODES.N)
444   // Checks the number of active recording sessions and logs the states of all active sessions.
445   // Returns number of active sessions.
logRecordingConfigurations(boolean verifyAudioConfig)446   private int logRecordingConfigurations(boolean verifyAudioConfig) {
447     if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
448       Logging.w(TAG, "AudioManager#getActiveRecordingConfigurations() requires N or higher");
449       return 0;
450     }
451     if (audioRecord == null) {
452       return 0;
453     }
454     // Get a list of the currently active audio recording configurations of the device (can be more
455     // than one). An empty list indicates there is no recording active when queried.
456     List<AudioRecordingConfiguration> configs = audioManager.getActiveRecordingConfigurations();
457     final int numActiveRecordingSessions = configs.size();
458     Logging.d(TAG, "Number of active recording sessions: " + numActiveRecordingSessions);
459     if (numActiveRecordingSessions > 0) {
460       logActiveRecordingConfigs(audioRecord.getAudioSessionId(), configs);
461       if (verifyAudioConfig) {
462         // Run an extra check to verify that the existing audio source doing the recording (tied
463         // to the AudioRecord instance) is matching what the audio recording configuration lists
464         // as its client parameters. If these do not match, recording might work but under invalid
465         // conditions.
466         audioSourceMatchesRecordingSession =
467             verifyAudioConfig(audioRecord.getAudioSource(), audioRecord.getAudioSessionId(),
468                 audioRecord.getFormat(), audioRecord.getRoutedDevice(), configs);
469         isAudioConfigVerified = true;
470       }
471     }
472     return numActiveRecordingSessions;
473   }
474 
475   // Helper method which throws an exception  when an assertion has failed.
assertTrue(boolean condition)476   private static void assertTrue(boolean condition) {
477     if (!condition) {
478       throw new AssertionError("Expected condition to be true");
479     }
480   }
481 
channelCountToConfiguration(int channels)482   private int channelCountToConfiguration(int channels) {
483     return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
484   }
485 
nativeCacheDirectBufferAddress( long nativeAudioRecordJni, ByteBuffer byteBuffer)486   private native void nativeCacheDirectBufferAddress(
487       long nativeAudioRecordJni, ByteBuffer byteBuffer);
nativeDataIsRecorded(long nativeAudioRecordJni, int bytes)488   private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes);
489 
490   // Sets all recorded samples to zero if |mute| is true, i.e., ensures that
491   // the microphone is muted.
setMicrophoneMute(boolean mute)492   public void setMicrophoneMute(boolean mute) {
493     Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
494     microphoneMute = mute;
495   }
496 
497   // Releases the native AudioRecord resources.
releaseAudioResources()498   private void releaseAudioResources() {
499     Logging.d(TAG, "releaseAudioResources");
500     if (audioRecord != null) {
501       audioRecord.release();
502       audioRecord = null;
503     }
504   }
505 
reportWebRtcAudioRecordInitError(String errorMessage)506   private void reportWebRtcAudioRecordInitError(String errorMessage) {
507     Logging.e(TAG, "Init recording error: " + errorMessage);
508     WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
509     logRecordingConfigurations(false /* verifyAudioConfig */);
510     if (errorCallback != null) {
511       errorCallback.onWebRtcAudioRecordInitError(errorMessage);
512     }
513   }
514 
reportWebRtcAudioRecordStartError( AudioRecordStartErrorCode errorCode, String errorMessage)515   private void reportWebRtcAudioRecordStartError(
516       AudioRecordStartErrorCode errorCode, String errorMessage) {
517     Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
518     WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
519     logRecordingConfigurations(false /* verifyAudioConfig */);
520     if (errorCallback != null) {
521       errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
522     }
523   }
524 
reportWebRtcAudioRecordError(String errorMessage)525   private void reportWebRtcAudioRecordError(String errorMessage) {
526     Logging.e(TAG, "Run-time recording error: " + errorMessage);
527     WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
528     if (errorCallback != null) {
529       errorCallback.onWebRtcAudioRecordError(errorMessage);
530     }
531   }
532 
doAudioRecordStateCallback(int audioState)533   private void doAudioRecordStateCallback(int audioState) {
534     Logging.d(TAG, "doAudioRecordStateCallback: " + audioStateToString(audioState));
535     if (stateCallback != null) {
536       if (audioState == WebRtcAudioRecord.AUDIO_RECORD_START) {
537         stateCallback.onWebRtcAudioRecordStart();
538       } else if (audioState == WebRtcAudioRecord.AUDIO_RECORD_STOP) {
539         stateCallback.onWebRtcAudioRecordStop();
540       } else {
541         Logging.e(TAG, "Invalid audio state");
542       }
543     }
544   }
545 
546   // Reference from Android code, AudioFormat.getBytesPerSample. BitPerSample / 8
547   // Default audio data format is PCM 16 bits per sample.
548   // Guaranteed to be supported by all devices
getBytesPerSample(int audioFormat)549   private static int getBytesPerSample(int audioFormat) {
550     switch (audioFormat) {
551       case AudioFormat.ENCODING_PCM_8BIT:
552         return 1;
553       case AudioFormat.ENCODING_PCM_16BIT:
554       case AudioFormat.ENCODING_IEC61937:
555       case AudioFormat.ENCODING_DEFAULT:
556         return 2;
557       case AudioFormat.ENCODING_PCM_FLOAT:
558         return 4;
559       case AudioFormat.ENCODING_INVALID:
560       default:
561         throw new IllegalArgumentException("Bad audio format " + audioFormat);
562     }
563   }
564 
565   // Use an ExecutorService to schedule a task after a given delay where the task consists of
566   // checking (by logging) the current status of active recording sessions.
scheduleLogRecordingConfigurationsTask()567   private void scheduleLogRecordingConfigurationsTask() {
568     Logging.d(TAG, "scheduleLogRecordingConfigurationsTask");
569     if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
570       return;
571     }
572     if (executor != null) {
573       executor.shutdownNow();
574     }
575     executor = Executors.newSingleThreadScheduledExecutor();
576 
577     Callable<String> callable = () -> {
578       logRecordingConfigurations(true /* verifyAudioConfig */);
579       return "Scheduled task is done";
580     };
581 
582     if (future != null && !future.isDone()) {
583       future.cancel(true /* mayInterruptIfRunning */);
584     }
585     // Schedule call to logRecordingConfigurations() from executor thread after fixed delay.
586     future = executor.schedule(callable, CHECK_REC_STATUS_DELAY_MS, TimeUnit.MILLISECONDS);
587   };
588 
589   @TargetApi(Build.VERSION_CODES.N)
logActiveRecordingConfigs( int session, List<AudioRecordingConfiguration> configs)590   private static boolean logActiveRecordingConfigs(
591       int session, List<AudioRecordingConfiguration> configs) {
592     assertTrue(!configs.isEmpty());
593     final Iterator<AudioRecordingConfiguration> it = configs.iterator();
594     Logging.d(TAG, "AudioRecordingConfigurations: ");
595     while (it.hasNext()) {
596       final AudioRecordingConfiguration config = it.next();
597       StringBuilder conf = new StringBuilder();
598       // The audio source selected by the client.
599       final int audioSource = config.getClientAudioSource();
600       conf.append("  client audio source=")
601           .append(WebRtcAudioUtils.audioSourceToString(audioSource))
602           .append(", client session id=")
603           .append(config.getClientAudioSessionId())
604           // Compare with our own id (based on AudioRecord#getAudioSessionId()).
605           .append(" (")
606           .append(session)
607           .append(")")
608           .append("\n");
609       // Audio format at which audio is recorded on this Android device. Note that it may differ
610       // from the client application recording format (see getClientFormat()).
611       AudioFormat format = config.getFormat();
612       conf.append("  Device AudioFormat: ")
613           .append("channel count=")
614           .append(format.getChannelCount())
615           .append(", channel index mask=")
616           .append(format.getChannelIndexMask())
617           // Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
618           .append(", channel mask=")
619           .append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
620           .append(", encoding=")
621           .append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
622           .append(", sample rate=")
623           .append(format.getSampleRate())
624           .append("\n");
625       // Audio format at which the client application is recording audio.
626       format = config.getClientFormat();
627       conf.append("  Client AudioFormat: ")
628           .append("channel count=")
629           .append(format.getChannelCount())
630           .append(", channel index mask=")
631           .append(format.getChannelIndexMask())
632           // Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
633           .append(", channel mask=")
634           .append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
635           .append(", encoding=")
636           .append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
637           .append(", sample rate=")
638           .append(format.getSampleRate())
639           .append("\n");
640       // Audio input device used for this recording session.
641       final AudioDeviceInfo device = config.getAudioDevice();
642       if (device != null) {
643         assertTrue(device.isSource());
644         conf.append("  AudioDevice: ")
645             .append("type=")
646             .append(WebRtcAudioUtils.deviceTypeToString(device.getType()))
647             .append(", id=")
648             .append(device.getId());
649       }
650       Logging.d(TAG, conf.toString());
651     }
652     return true;
653   }
654 
655   // Verify that the client audio configuration (device and format) matches the requested
656   // configuration (same as AudioRecord's).
657   @TargetApi(Build.VERSION_CODES.N)
verifyAudioConfig(int source, int session, AudioFormat format, AudioDeviceInfo device, List<AudioRecordingConfiguration> configs)658   private static boolean verifyAudioConfig(int source, int session, AudioFormat format,
659       AudioDeviceInfo device, List<AudioRecordingConfiguration> configs) {
660     assertTrue(!configs.isEmpty());
661     final Iterator<AudioRecordingConfiguration> it = configs.iterator();
662     while (it.hasNext()) {
663       final AudioRecordingConfiguration config = it.next();
664       final AudioDeviceInfo configDevice = config.getAudioDevice();
665       if (configDevice == null) {
666         continue;
667       }
668       if ((config.getClientAudioSource() == source)
669           && (config.getClientAudioSessionId() == session)
670           // Check the client format (should match the format of the AudioRecord instance).
671           && (config.getClientFormat().getEncoding() == format.getEncoding())
672           && (config.getClientFormat().getSampleRate() == format.getSampleRate())
673           && (config.getClientFormat().getChannelMask() == format.getChannelMask())
674           && (config.getClientFormat().getChannelIndexMask() == format.getChannelIndexMask())
675           // Ensure that the device format is properly configured.
676           && (config.getFormat().getEncoding() != AudioFormat.ENCODING_INVALID)
677           && (config.getFormat().getSampleRate() > 0)
678           //  For the channel mask, either the position or index-based value must be valid.
679           && ((config.getFormat().getChannelMask() != AudioFormat.CHANNEL_INVALID)
680               || (config.getFormat().getChannelIndexMask() != AudioFormat.CHANNEL_INVALID))
681           && checkDeviceMatch(configDevice, device)) {
682         Logging.d(TAG, "verifyAudioConfig: PASS");
683         return true;
684       }
685     }
686     Logging.e(TAG, "verifyAudioConfig: FAILED");
687     return false;
688   }
689 
690   @TargetApi(Build.VERSION_CODES.N)
691   // Returns true if device A parameters matches those of device B.
692   // TODO(henrika): can be improved by adding AudioDeviceInfo#getAddress() but it requires API 29.
checkDeviceMatch(AudioDeviceInfo devA, AudioDeviceInfo devB)693   private static boolean checkDeviceMatch(AudioDeviceInfo devA, AudioDeviceInfo devB) {
694     return ((devA.getId() == devB.getId() && (devA.getType() == devB.getType())));
695   }
696 
audioStateToString(int state)697   private static String audioStateToString(int state) {
698     switch (state) {
699       case WebRtcAudioRecord.AUDIO_RECORD_START:
700         return "START";
701       case WebRtcAudioRecord.AUDIO_RECORD_STOP:
702         return "STOP";
703       default:
704         return "INVALID";
705     }
706   }
707 }
708