• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
12 #define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
13 
14 // MSVC++ requires this to be set before any other includes to get M_PI.
15 #ifndef _USE_MATH_DEFINES
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include <math.h>
20 #include <stddef.h>  // size_t
21 #include <stdio.h>   // FILE
22 #include <string.h>
23 
24 #include <vector>
25 
26 #include "absl/strings/string_view.h"
27 #include "absl/types/optional.h"
28 #include "api/array_view.h"
29 #include "api/audio/echo_canceller3_config.h"
30 #include "api/audio/echo_control.h"
31 #include "api/scoped_refptr.h"
32 #include "modules/audio_processing/include/audio_processing_statistics.h"
33 #include "rtc_base/arraysize.h"
34 #include "rtc_base/ref_count.h"
35 #include "rtc_base/system/file_wrapper.h"
36 #include "rtc_base/system/rtc_export.h"
37 
38 namespace rtc {
39 class TaskQueue;
40 }  // namespace rtc
41 
42 namespace webrtc {
43 
44 class AecDump;
45 class AudioBuffer;
46 
47 class StreamConfig;
48 class ProcessingConfig;
49 
50 class EchoDetector;
51 class CustomAudioAnalyzer;
52 class CustomProcessing;
53 
54 // The Audio Processing Module (APM) provides a collection of voice processing
55 // components designed for real-time communications software.
56 //
57 // APM operates on two audio streams on a frame-by-frame basis. Frames of the
58 // primary stream, on which all processing is applied, are passed to
59 // `ProcessStream()`. Frames of the reverse direction stream are passed to
60 // `ProcessReverseStream()`. On the client-side, this will typically be the
61 // near-end (capture) and far-end (render) streams, respectively. APM should be
62 // placed in the signal chain as close to the audio hardware abstraction layer
63 // (HAL) as possible.
64 //
65 // On the server-side, the reverse stream will normally not be used, with
66 // processing occurring on each incoming stream.
67 //
68 // Component interfaces follow a similar pattern and are accessed through
69 // corresponding getters in APM. All components are disabled at create-time,
70 // with default settings that are recommended for most situations. New settings
71 // can be applied without enabling a component. Enabling a component triggers
72 // memory allocation and initialization to allow it to start processing the
73 // streams.
74 //
75 // Thread safety is provided with the following assumptions to reduce locking
76 // overhead:
77 //   1. The stream getters and setters are called from the same thread as
78 //      ProcessStream(). More precisely, stream functions are never called
79 //      concurrently with ProcessStream().
80 //   2. Parameter getters are never called concurrently with the corresponding
81 //      setter.
82 //
83 // APM accepts only linear PCM audio data in chunks of ~10 ms (see
84 // AudioProcessing::GetFrameSize() for details) and sample rates ranging from
85 // 8000 Hz to 384000 Hz. The int16 interfaces use interleaved data, while the
86 // float interfaces use deinterleaved data.
87 //
88 // Usage example, omitting error checking:
89 // rtc::scoped_refptr<AudioProcessing> apm = AudioProcessingBuilder().Create();
90 //
91 // AudioProcessing::Config config;
92 // config.echo_canceller.enabled = true;
93 // config.echo_canceller.mobile_mode = false;
94 //
95 // config.gain_controller1.enabled = true;
96 // config.gain_controller1.mode =
97 // AudioProcessing::Config::GainController1::kAdaptiveAnalog;
98 // config.gain_controller1.analog_level_minimum = 0;
99 // config.gain_controller1.analog_level_maximum = 255;
100 //
101 // config.gain_controller2.enabled = true;
102 //
103 // config.high_pass_filter.enabled = true;
104 //
105 // apm->ApplyConfig(config)
106 //
107 // // Start a voice call...
108 //
109 // // ... Render frame arrives bound for the audio HAL ...
110 // apm->ProcessReverseStream(render_frame);
111 //
112 // // ... Capture frame arrives from the audio HAL ...
113 // // Call required set_stream_ functions.
114 // apm->set_stream_delay_ms(delay_ms);
115 // apm->set_stream_analog_level(analog_level);
116 //
117 // apm->ProcessStream(capture_frame);
118 //
119 // // Call required stream_ functions.
120 // analog_level = apm->recommended_stream_analog_level();
121 // has_voice = apm->stream_has_voice();
122 //
123 // // Repeat render and capture processing for the duration of the call...
124 // // Start a new call...
125 // apm->Initialize();
126 //
127 // // Close the application...
128 // apm.reset();
129 //
130 class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface {
131  public:
132   // The struct below constitutes the new parameter scheme for the audio
133   // processing. It is being introduced gradually and until it is fully
134   // introduced, it is prone to change.
135   // TODO(peah): Remove this comment once the new config scheme is fully rolled
136   // out.
137   //
138   // The parameters and behavior of the audio processing module are controlled
139   // by changing the default values in the AudioProcessing::Config struct.
140   // The config is applied by passing the struct to the ApplyConfig method.
141   //
142   // This config is intended to be used during setup, and to enable/disable
143   // top-level processing effects. Use during processing may cause undesired
144   // submodule resets, affecting the audio quality. Use the RuntimeSetting
145   // construct for runtime configuration.
146   struct RTC_EXPORT Config {
147     // Sets the properties of the audio processing pipeline.
148     struct RTC_EXPORT Pipeline {
149       // Ways to downmix a multi-channel track to mono.
150       enum class DownmixMethod {
151         kAverageChannels,  // Average across channels.
152         kUseFirstChannel   // Use the first channel.
153       };
154 
155       // Maximum allowed processing rate used internally. May only be set to
156       // 32000 or 48000 and any differing values will be treated as 48000.
157       int maximum_internal_processing_rate = 48000;
158       // Allow multi-channel processing of render audio.
159       bool multi_channel_render = false;
160       // Allow multi-channel processing of capture audio when AEC3 is active
161       // or a custom AEC is injected..
162       bool multi_channel_capture = false;
163       // Indicates how to downmix multi-channel capture audio to mono (when
164       // needed).
165       DownmixMethod capture_downmix_method = DownmixMethod::kAverageChannels;
166     } pipeline;
167 
168     // Enabled the pre-amplifier. It amplifies the capture signal
169     // before any other processing is done.
170     // TODO(webrtc:5298): Deprecate and use the pre-gain functionality in
171     // capture_level_adjustment instead.
172     struct PreAmplifier {
173       bool enabled = false;
174       float fixed_gain_factor = 1.0f;
175     } pre_amplifier;
176 
177     // Functionality for general level adjustment in the capture pipeline. This
178     // should not be used together with the legacy PreAmplifier functionality.
179     struct CaptureLevelAdjustment {
180       bool operator==(const CaptureLevelAdjustment& rhs) const;
181       bool operator!=(const CaptureLevelAdjustment& rhs) const {
182         return !(*this == rhs);
183       }
184       bool enabled = false;
185       // The `pre_gain_factor` scales the signal before any processing is done.
186       float pre_gain_factor = 1.0f;
187       // The `post_gain_factor` scales the signal after all processing is done.
188       float post_gain_factor = 1.0f;
189       struct AnalogMicGainEmulation {
190         bool operator==(const AnalogMicGainEmulation& rhs) const;
191         bool operator!=(const AnalogMicGainEmulation& rhs) const {
192           return !(*this == rhs);
193         }
194         bool enabled = false;
195         // Initial analog gain level to use for the emulated analog gain. Must
196         // be in the range [0...255].
197         int initial_level = 255;
198       } analog_mic_gain_emulation;
199     } capture_level_adjustment;
200 
201     struct HighPassFilter {
202       bool enabled = false;
203       bool apply_in_full_band = true;
204     } high_pass_filter;
205 
206     struct EchoCanceller {
207       bool enabled = false;
208       bool mobile_mode = false;
209       bool export_linear_aec_output = false;
210       // Enforce the highpass filter to be on (has no effect for the mobile
211       // mode).
212       bool enforce_high_pass_filtering = true;
213     } echo_canceller;
214 
215     // Enables background noise suppression.
216     struct NoiseSuppression {
217       bool enabled = false;
218       enum Level { kLow, kModerate, kHigh, kVeryHigh };
219       Level level = kModerate;
220       bool analyze_linear_aec_output_when_available = false;
221     } noise_suppression;
222 
223     // Enables transient suppression.
224     struct TransientSuppression {
225       bool enabled = false;
226     } transient_suppression;
227 
228     // Enables automatic gain control (AGC) functionality.
229     // The automatic gain control (AGC) component brings the signal to an
230     // appropriate range. This is done by applying a digital gain directly and,
231     // in the analog mode, prescribing an analog gain to be applied at the audio
232     // HAL.
233     // Recommended to be enabled on the client-side.
234     struct RTC_EXPORT GainController1 {
235       bool operator==(const GainController1& rhs) const;
236       bool operator!=(const GainController1& rhs) const {
237         return !(*this == rhs);
238       }
239 
240       bool enabled = false;
241       enum Mode {
242         // Adaptive mode intended for use if an analog volume control is
243         // available on the capture device. It will require the user to provide
244         // coupling between the OS mixer controls and AGC through the
245         // stream_analog_level() functions.
246         // It consists of an analog gain prescription for the audio device and a
247         // digital compression stage.
248         kAdaptiveAnalog,
249         // Adaptive mode intended for situations in which an analog volume
250         // control is unavailable. It operates in a similar fashion to the
251         // adaptive analog mode, but with scaling instead applied in the digital
252         // domain. As with the analog mode, it additionally uses a digital
253         // compression stage.
254         kAdaptiveDigital,
255         // Fixed mode which enables only the digital compression stage also used
256         // by the two adaptive modes.
257         // It is distinguished from the adaptive modes by considering only a
258         // short time-window of the input signal. It applies a fixed gain
259         // through most of the input level range, and compresses (gradually
260         // reduces gain with increasing level) the input signal at higher
261         // levels. This mode is preferred on embedded devices where the capture
262         // signal level is predictable, so that a known gain can be applied.
263         kFixedDigital
264       };
265       Mode mode = kAdaptiveAnalog;
266       // Sets the target peak level (or envelope) of the AGC in dBFs (decibels
267       // from digital full-scale). The convention is to use positive values. For
268       // instance, passing in a value of 3 corresponds to -3 dBFs, or a target
269       // level 3 dB below full-scale. Limited to [0, 31].
270       int target_level_dbfs = 3;
271       // Sets the maximum gain the digital compression stage may apply, in dB. A
272       // higher number corresponds to greater compression, while a value of 0
273       // will leave the signal uncompressed. Limited to [0, 90].
274       // For updates after APM setup, use a RuntimeSetting instead.
275       int compression_gain_db = 9;
276       // When enabled, the compression stage will hard limit the signal to the
277       // target level. Otherwise, the signal will be compressed but not limited
278       // above the target level.
279       bool enable_limiter = true;
280 
281       // Enables the analog gain controller functionality.
282       struct AnalogGainController {
283         bool enabled = true;
284         // TODO(bugs.webrtc.org/7494): Deprecated. Stop using and remove.
285         int startup_min_volume = 0;
286         // Lowest analog microphone level that will be applied in response to
287         // clipping.
288         int clipped_level_min = 70;
289         // If true, an adaptive digital gain is applied.
290         bool enable_digital_adaptive = true;
291         // Amount the microphone level is lowered with every clipping event.
292         // Limited to (0, 255].
293         int clipped_level_step = 15;
294         // Proportion of clipped samples required to declare a clipping event.
295         // Limited to (0.f, 1.f).
296         float clipped_ratio_threshold = 0.1f;
297         // Time in frames to wait after a clipping event before checking again.
298         // Limited to values higher than 0.
299         int clipped_wait_frames = 300;
300 
301         // Enables clipping prediction functionality.
302         struct ClippingPredictor {
303           bool enabled = false;
304           enum Mode {
305             // Clipping event prediction mode with fixed step estimation.
306             kClippingEventPrediction,
307             // Clipped peak estimation mode with adaptive step estimation.
308             kAdaptiveStepClippingPeakPrediction,
309             // Clipped peak estimation mode with fixed step estimation.
310             kFixedStepClippingPeakPrediction,
311           };
312           Mode mode = kClippingEventPrediction;
313           // Number of frames in the sliding analysis window.
314           int window_length = 5;
315           // Number of frames in the sliding reference window.
316           int reference_window_length = 5;
317           // Reference window delay (unit: number of frames).
318           int reference_window_delay = 5;
319           // Clipping prediction threshold (dBFS).
320           float clipping_threshold = -1.0f;
321           // Crest factor drop threshold (dB).
322           float crest_factor_margin = 3.0f;
323           // If true, the recommended clipped level step is used to modify the
324           // analog gain. Otherwise, the predictor runs without affecting the
325           // analog gain.
326           bool use_predicted_step = true;
327         } clipping_predictor;
328       } analog_gain_controller;
329     } gain_controller1;
330 
331     // Parameters for AGC2, an Automatic Gain Control (AGC) sub-module which
332     // replaces the AGC sub-module parametrized by `gain_controller1`.
333     // AGC2 brings the captured audio signal to the desired level by combining
334     // three different controllers (namely, input volume controller, adapative
335     // digital controller and fixed digital controller) and a limiter.
336     // TODO(bugs.webrtc.org:7494): Name `GainController` when AGC1 removed.
337     struct RTC_EXPORT GainController2 {
338       bool operator==(const GainController2& rhs) const;
339       bool operator!=(const GainController2& rhs) const {
340         return !(*this == rhs);
341       }
342 
343       // AGC2 must be created if and only if `enabled` is true.
344       bool enabled = false;
345 
346       // Parameters for the input volume controller, which adjusts the input
347       // volume applied when the audio is captured (e.g., microphone volume on
348       // a soundcard, input volume on HAL).
349       struct InputVolumeController {
350         bool operator==(const InputVolumeController& rhs) const;
351         bool operator!=(const InputVolumeController& rhs) const {
352           return !(*this == rhs);
353         }
354         bool enabled = false;
355       } input_volume_controller;
356 
357       // Parameters for the adaptive digital controller, which adjusts and
358       // applies a digital gain after echo cancellation and after noise
359       // suppression.
360       struct RTC_EXPORT AdaptiveDigital {
361         bool operator==(const AdaptiveDigital& rhs) const;
362         bool operator!=(const AdaptiveDigital& rhs) const {
363           return !(*this == rhs);
364         }
365 
366         bool enabled = false;
367         // TODO(bugs.webrtc.org/7494): Remove `dry_run`.
368         // When true, the adaptive digital controller runs but the signal is not
369         // modified.
370         bool dry_run = false;
371         float headroom_db = 6.0f;
372         // TODO(bugs.webrtc.org/7494): Consider removing and inferring from
373         // `max_output_noise_level_dbfs`.
374         float max_gain_db = 30.0f;
375         float initial_gain_db = 8.0f;
376         // TODO(bugs.webrtc.org/7494): Hard-code and remove parameter below.
377         int vad_reset_period_ms = 1500;
378         // TODO(bugs.webrtc.org/7494): Hard-code and remove parameter below.
379         int adjacent_speech_frames_threshold = 12;
380         float max_gain_change_db_per_second = 3.0f;
381         float max_output_noise_level_dbfs = -50.0f;
382       } adaptive_digital;
383 
384       // Parameters for the fixed digital controller, which applies a fixed
385       // digital gain after the adaptive digital controller and before the
386       // limiter.
387       struct FixedDigital {
388         // By setting `gain_db` to a value greater than zero, the limiter can be
389         // turned into a compressor that first applies a fixed gain.
390         float gain_db = 0.0f;
391       } fixed_digital;
392     } gain_controller2;
393 
394     std::string ToString() const;
395   };
396 
397   // Specifies the properties of a setting to be passed to AudioProcessing at
398   // runtime.
399   class RuntimeSetting {
400    public:
401     enum class Type {
402       kNotSpecified,
403       kCapturePreGain,
404       kCaptureCompressionGain,
405       kCaptureFixedPostGain,
406       kPlayoutVolumeChange,
407       kCustomRenderProcessingRuntimeSetting,
408       kPlayoutAudioDeviceChange,
409       kCapturePostGain,
410       kCaptureOutputUsed
411     };
412 
413     // Play-out audio device properties.
414     struct PlayoutAudioDeviceInfo {
415       int id;          // Identifies the audio device.
416       int max_volume;  // Maximum play-out volume.
417     };
418 
RuntimeSetting()419     RuntimeSetting() : type_(Type::kNotSpecified), value_(0.0f) {}
420     ~RuntimeSetting() = default;
421 
CreateCapturePreGain(float gain)422     static RuntimeSetting CreateCapturePreGain(float gain) {
423       return {Type::kCapturePreGain, gain};
424     }
425 
CreateCapturePostGain(float gain)426     static RuntimeSetting CreateCapturePostGain(float gain) {
427       return {Type::kCapturePostGain, gain};
428     }
429 
430     // Corresponds to Config::GainController1::compression_gain_db, but for
431     // runtime configuration.
CreateCompressionGainDb(int gain_db)432     static RuntimeSetting CreateCompressionGainDb(int gain_db) {
433       RTC_DCHECK_GE(gain_db, 0);
434       RTC_DCHECK_LE(gain_db, 90);
435       return {Type::kCaptureCompressionGain, static_cast<float>(gain_db)};
436     }
437 
438     // Corresponds to Config::GainController2::fixed_digital::gain_db, but for
439     // runtime configuration.
CreateCaptureFixedPostGain(float gain_db)440     static RuntimeSetting CreateCaptureFixedPostGain(float gain_db) {
441       RTC_DCHECK_GE(gain_db, 0.0f);
442       RTC_DCHECK_LE(gain_db, 90.0f);
443       return {Type::kCaptureFixedPostGain, gain_db};
444     }
445 
446     // Creates a runtime setting to notify play-out (aka render) audio device
447     // changes.
CreatePlayoutAudioDeviceChange(PlayoutAudioDeviceInfo audio_device)448     static RuntimeSetting CreatePlayoutAudioDeviceChange(
449         PlayoutAudioDeviceInfo audio_device) {
450       return {Type::kPlayoutAudioDeviceChange, audio_device};
451     }
452 
453     // Creates a runtime setting to notify play-out (aka render) volume changes.
454     // `volume` is the unnormalized volume, the maximum of which
CreatePlayoutVolumeChange(int volume)455     static RuntimeSetting CreatePlayoutVolumeChange(int volume) {
456       return {Type::kPlayoutVolumeChange, volume};
457     }
458 
CreateCustomRenderSetting(float payload)459     static RuntimeSetting CreateCustomRenderSetting(float payload) {
460       return {Type::kCustomRenderProcessingRuntimeSetting, payload};
461     }
462 
CreateCaptureOutputUsedSetting(bool capture_output_used)463     static RuntimeSetting CreateCaptureOutputUsedSetting(
464         bool capture_output_used) {
465       return {Type::kCaptureOutputUsed, capture_output_used};
466     }
467 
type()468     Type type() const { return type_; }
469     // Getters do not return a value but instead modify the argument to protect
470     // from implicit casting.
GetFloat(float * value)471     void GetFloat(float* value) const {
472       RTC_DCHECK(value);
473       *value = value_.float_value;
474     }
GetInt(int * value)475     void GetInt(int* value) const {
476       RTC_DCHECK(value);
477       *value = value_.int_value;
478     }
GetBool(bool * value)479     void GetBool(bool* value) const {
480       RTC_DCHECK(value);
481       *value = value_.bool_value;
482     }
GetPlayoutAudioDeviceInfo(PlayoutAudioDeviceInfo * value)483     void GetPlayoutAudioDeviceInfo(PlayoutAudioDeviceInfo* value) const {
484       RTC_DCHECK(value);
485       *value = value_.playout_audio_device_info;
486     }
487 
488    private:
RuntimeSetting(Type id,float value)489     RuntimeSetting(Type id, float value) : type_(id), value_(value) {}
RuntimeSetting(Type id,int value)490     RuntimeSetting(Type id, int value) : type_(id), value_(value) {}
RuntimeSetting(Type id,PlayoutAudioDeviceInfo value)491     RuntimeSetting(Type id, PlayoutAudioDeviceInfo value)
492         : type_(id), value_(value) {}
493     Type type_;
494     union U {
U()495       U() {}
U(int value)496       U(int value) : int_value(value) {}
U(float value)497       U(float value) : float_value(value) {}
U(PlayoutAudioDeviceInfo value)498       U(PlayoutAudioDeviceInfo value) : playout_audio_device_info(value) {}
499       float float_value;
500       int int_value;
501       bool bool_value;
502       PlayoutAudioDeviceInfo playout_audio_device_info;
503     } value_;
504   };
505 
~AudioProcessing()506   ~AudioProcessing() override {}
507 
508   // Initializes internal states, while retaining all user settings. This
509   // should be called before beginning to process a new audio stream. However,
510   // it is not necessary to call before processing the first stream after
511   // creation.
512   //
513   // It is also not necessary to call if the audio parameters (sample
514   // rate and number of channels) have changed. Passing updated parameters
515   // directly to `ProcessStream()` and `ProcessReverseStream()` is permissible.
516   // If the parameters are known at init-time though, they may be provided.
517   // TODO(webrtc:5298): Change to return void.
518   virtual int Initialize() = 0;
519 
520   // The int16 interfaces require:
521   //   - only `NativeRate`s be used
522   //   - that the input, output and reverse rates must match
523   //   - that `processing_config.output_stream()` matches
524   //     `processing_config.input_stream()`.
525   //
526   // The float interfaces accept arbitrary rates and support differing input and
527   // output layouts, but the output must have either one channel or the same
528   // number of channels as the input.
529   virtual int Initialize(const ProcessingConfig& processing_config) = 0;
530 
531   // TODO(peah): This method is a temporary solution used to take control
532   // over the parameters in the audio processing module and is likely to change.
533   virtual void ApplyConfig(const Config& config) = 0;
534 
535   // TODO(ajm): Only intended for internal use. Make private and friend the
536   // necessary classes?
537   virtual int proc_sample_rate_hz() const = 0;
538   virtual int proc_split_sample_rate_hz() const = 0;
539   virtual size_t num_input_channels() const = 0;
540   virtual size_t num_proc_channels() const = 0;
541   virtual size_t num_output_channels() const = 0;
542   virtual size_t num_reverse_channels() const = 0;
543 
544   // Set to true when the output of AudioProcessing will be muted or in some
545   // other way not used. Ideally, the captured audio would still be processed,
546   // but some components may change behavior based on this information.
547   // Default false. This method takes a lock. To achieve this in a lock-less
548   // manner the PostRuntimeSetting can instead be used.
549   virtual void set_output_will_be_muted(bool muted) = 0;
550 
551   // Enqueues a runtime setting.
552   virtual void SetRuntimeSetting(RuntimeSetting setting) = 0;
553 
554   // Enqueues a runtime setting. Returns a bool indicating whether the
555   // enqueueing was successfull.
556   virtual bool PostRuntimeSetting(RuntimeSetting setting) = 0;
557 
558   // Accepts and produces a ~10 ms frame of interleaved 16 bit integer audio as
559   // specified in `input_config` and `output_config`. `src` and `dest` may use
560   // the same memory, if desired.
561   virtual int ProcessStream(const int16_t* const src,
562                             const StreamConfig& input_config,
563                             const StreamConfig& output_config,
564                             int16_t* const dest) = 0;
565 
566   // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
567   // `src` points to a channel buffer, arranged according to `input_stream`. At
568   // output, the channels will be arranged according to `output_stream` in
569   // `dest`.
570   //
571   // The output must have one channel or as many channels as the input. `src`
572   // and `dest` may use the same memory, if desired.
573   virtual int ProcessStream(const float* const* src,
574                             const StreamConfig& input_config,
575                             const StreamConfig& output_config,
576                             float* const* dest) = 0;
577 
578   // Accepts and produces a ~10 ms frame of interleaved 16 bit integer audio for
579   // the reverse direction audio stream as specified in `input_config` and
580   // `output_config`. `src` and `dest` may use the same memory, if desired.
581   virtual int ProcessReverseStream(const int16_t* const src,
582                                    const StreamConfig& input_config,
583                                    const StreamConfig& output_config,
584                                    int16_t* const dest) = 0;
585 
586   // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
587   // `data` points to a channel buffer, arranged according to `reverse_config`.
588   virtual int ProcessReverseStream(const float* const* src,
589                                    const StreamConfig& input_config,
590                                    const StreamConfig& output_config,
591                                    float* const* dest) = 0;
592 
593   // Accepts deinterleaved float audio with the range [-1, 1]. Each element
594   // of `data` points to a channel buffer, arranged according to
595   // `reverse_config`.
596   virtual int AnalyzeReverseStream(const float* const* data,
597                                    const StreamConfig& reverse_config) = 0;
598 
599   // Returns the most recently produced ~10 ms of the linear AEC output at a
600   // rate of 16 kHz. If there is more than one capture channel, a mono
601   // representation of the input is returned. Returns true/false to indicate
602   // whether an output returned.
603   virtual bool GetLinearAecOutput(
604       rtc::ArrayView<std::array<float, 160>> linear_output) const = 0;
605 
606   // This must be called prior to ProcessStream() if and only if adaptive analog
607   // gain control is enabled, to pass the current analog level from the audio
608   // HAL. Must be within the range [0, 255].
609   virtual void set_stream_analog_level(int level) = 0;
610 
611   // When an analog mode is set, this should be called after
612   // `set_stream_analog_level()` and `ProcessStream()` to obtain the recommended
613   // new analog level for the audio HAL. It is the user's responsibility to
614   // apply this level.
615   virtual int recommended_stream_analog_level() const = 0;
616 
617   // This must be called if and only if echo processing is enabled.
618   //
619   // Sets the `delay` in ms between ProcessReverseStream() receiving a far-end
620   // frame and ProcessStream() receiving a near-end frame containing the
621   // corresponding echo. On the client-side this can be expressed as
622   //   delay = (t_render - t_analyze) + (t_process - t_capture)
623   // where,
624   //   - t_analyze is the time a frame is passed to ProcessReverseStream() and
625   //     t_render is the time the first sample of the same frame is rendered by
626   //     the audio hardware.
627   //   - t_capture is the time the first sample of a frame is captured by the
628   //     audio hardware and t_process is the time the same frame is passed to
629   //     ProcessStream().
630   virtual int set_stream_delay_ms(int delay) = 0;
631   virtual int stream_delay_ms() const = 0;
632 
633   // Call to signal that a key press occurred (true) or did not occur (false)
634   // with this chunk of audio.
635   virtual void set_stream_key_pressed(bool key_pressed) = 0;
636 
637   // Creates and attaches an webrtc::AecDump for recording debugging
638   // information.
639   // The `worker_queue` may not be null and must outlive the created
640   // AecDump instance. |max_log_size_bytes == -1| means the log size
641   // will be unlimited. `handle` may not be null. The AecDump takes
642   // responsibility for `handle` and closes it in the destructor. A
643   // return value of true indicates that the file has been
644   // sucessfully opened, while a value of false indicates that
645   // opening the file failed.
646   virtual bool CreateAndAttachAecDump(absl::string_view file_name,
647                                       int64_t max_log_size_bytes,
648                                       rtc::TaskQueue* worker_queue) = 0;
649   virtual bool CreateAndAttachAecDump(FILE* handle,
650                                       int64_t max_log_size_bytes,
651                                       rtc::TaskQueue* worker_queue) = 0;
652 
653   // TODO(webrtc:5298) Deprecated variant.
654   // Attaches provided webrtc::AecDump for recording debugging
655   // information. Log file and maximum file size logic is supposed to
656   // be handled by implementing instance of AecDump. Calling this
657   // method when another AecDump is attached resets the active AecDump
658   // with a new one. This causes the d-tor of the earlier AecDump to
659   // be called. The d-tor call may block until all pending logging
660   // tasks are completed.
661   virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump) = 0;
662 
663   // If no AecDump is attached, this has no effect. If an AecDump is
664   // attached, it's destructor is called. The d-tor may block until
665   // all pending logging tasks are completed.
666   virtual void DetachAecDump() = 0;
667 
668   // Get audio processing statistics.
669   virtual AudioProcessingStats GetStatistics() = 0;
670   // TODO(webrtc:5298) Deprecated variant. The `has_remote_tracks` argument
671   // should be set if there are active remote tracks (this would usually be true
672   // during a call). If there are no remote tracks some of the stats will not be
673   // set by AudioProcessing, because they only make sense if there is at least
674   // one remote track.
675   virtual AudioProcessingStats GetStatistics(bool has_remote_tracks) = 0;
676 
677   // Returns the last applied configuration.
678   virtual AudioProcessing::Config GetConfig() const = 0;
679 
680   enum Error {
681     // Fatal errors.
682     kNoError = 0,
683     kUnspecifiedError = -1,
684     kCreationFailedError = -2,
685     kUnsupportedComponentError = -3,
686     kUnsupportedFunctionError = -4,
687     kNullPointerError = -5,
688     kBadParameterError = -6,
689     kBadSampleRateError = -7,
690     kBadDataLengthError = -8,
691     kBadNumberChannelsError = -9,
692     kFileError = -10,
693     kStreamParameterNotSetError = -11,
694     kNotEnabledError = -12,
695 
696     // Warnings are non-fatal.
697     // This results when a set_stream_ parameter is out of range. Processing
698     // will continue, but the parameter may have been truncated.
699     kBadStreamParameterWarning = -13
700   };
701 
702   // Native rates supported by the integer interfaces.
703   enum NativeRate {
704     kSampleRate8kHz = 8000,
705     kSampleRate16kHz = 16000,
706     kSampleRate32kHz = 32000,
707     kSampleRate48kHz = 48000
708   };
709 
710   // TODO(kwiberg): We currently need to support a compiler (Visual C++) that
711   // complains if we don't explicitly state the size of the array here. Remove
712   // the size when that's no longer the case.
713   static constexpr int kNativeSampleRatesHz[4] = {
714       kSampleRate8kHz, kSampleRate16kHz, kSampleRate32kHz, kSampleRate48kHz};
715   static constexpr size_t kNumNativeSampleRates =
716       arraysize(kNativeSampleRatesHz);
717   static constexpr int kMaxNativeSampleRateHz =
718       kNativeSampleRatesHz[kNumNativeSampleRates - 1];
719 
720   // APM processes audio in chunks of about 10 ms. See GetFrameSize() for
721   // details.
722   static constexpr int kChunkSizeMs = 10;
723 
724   // Returns floor(sample_rate_hz/100): the number of samples per channel used
725   // as input and output to the audio processing module in calls to
726   // ProcessStream, ProcessReverseStream, AnalyzeReverseStream, and
727   // GetLinearAecOutput.
728   //
729   // This is exactly 10 ms for sample rates divisible by 100. For example:
730   //  - 48000 Hz (480 samples per channel),
731   //  - 44100 Hz (441 samples per channel),
732   //  - 16000 Hz (160 samples per channel).
733   //
734   // Sample rates not divisible by 100 are received/produced in frames of
735   // approximately 10 ms. For example:
736   //  - 22050 Hz (220 samples per channel, or ~9.98 ms per frame),
737   //  - 11025 Hz (110 samples per channel, or ~9.98 ms per frame).
738   // These nondivisible sample rates yield lower audio quality compared to
739   // multiples of 100. Internal resampling to 10 ms frames causes a simulated
740   // clock drift effect which impacts the performance of (for example) echo
741   // cancellation.
GetFrameSize(int sample_rate_hz)742   static int GetFrameSize(int sample_rate_hz) { return sample_rate_hz / 100; }
743 };
744 
745 class RTC_EXPORT AudioProcessingBuilder {
746  public:
747   AudioProcessingBuilder();
748   AudioProcessingBuilder(const AudioProcessingBuilder&) = delete;
749   AudioProcessingBuilder& operator=(const AudioProcessingBuilder&) = delete;
750   ~AudioProcessingBuilder();
751 
752   // Sets the APM configuration.
SetConfig(const AudioProcessing::Config & config)753   AudioProcessingBuilder& SetConfig(const AudioProcessing::Config& config) {
754     config_ = config;
755     return *this;
756   }
757 
758   // Sets the echo controller factory to inject when APM is created.
SetEchoControlFactory(std::unique_ptr<EchoControlFactory> echo_control_factory)759   AudioProcessingBuilder& SetEchoControlFactory(
760       std::unique_ptr<EchoControlFactory> echo_control_factory) {
761     echo_control_factory_ = std::move(echo_control_factory);
762     return *this;
763   }
764 
765   // Sets the capture post-processing sub-module to inject when APM is created.
SetCapturePostProcessing(std::unique_ptr<CustomProcessing> capture_post_processing)766   AudioProcessingBuilder& SetCapturePostProcessing(
767       std::unique_ptr<CustomProcessing> capture_post_processing) {
768     capture_post_processing_ = std::move(capture_post_processing);
769     return *this;
770   }
771 
772   // Sets the render pre-processing sub-module to inject when APM is created.
SetRenderPreProcessing(std::unique_ptr<CustomProcessing> render_pre_processing)773   AudioProcessingBuilder& SetRenderPreProcessing(
774       std::unique_ptr<CustomProcessing> render_pre_processing) {
775     render_pre_processing_ = std::move(render_pre_processing);
776     return *this;
777   }
778 
779   // Sets the echo detector to inject when APM is created.
SetEchoDetector(rtc::scoped_refptr<EchoDetector> echo_detector)780   AudioProcessingBuilder& SetEchoDetector(
781       rtc::scoped_refptr<EchoDetector> echo_detector) {
782     echo_detector_ = std::move(echo_detector);
783     return *this;
784   }
785 
786   // Sets the capture analyzer sub-module to inject when APM is created.
SetCaptureAnalyzer(std::unique_ptr<CustomAudioAnalyzer> capture_analyzer)787   AudioProcessingBuilder& SetCaptureAnalyzer(
788       std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
789     capture_analyzer_ = std::move(capture_analyzer);
790     return *this;
791   }
792 
793   // Creates an APM instance with the specified config or the default one if
794   // unspecified. Injects the specified components transferring the ownership
795   // to the newly created APM instance - i.e., except for the config, the
796   // builder is reset to its initial state.
797   rtc::scoped_refptr<AudioProcessing> Create();
798 
799  private:
800   AudioProcessing::Config config_;
801   std::unique_ptr<EchoControlFactory> echo_control_factory_;
802   std::unique_ptr<CustomProcessing> capture_post_processing_;
803   std::unique_ptr<CustomProcessing> render_pre_processing_;
804   rtc::scoped_refptr<EchoDetector> echo_detector_;
805   std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
806 };
807 
808 class StreamConfig {
809  public:
810   // sample_rate_hz: The sampling rate of the stream.
811   // num_channels: The number of audio channels in the stream.
812   StreamConfig(int sample_rate_hz = 0, size_t num_channels = 0)
sample_rate_hz_(sample_rate_hz)813       : sample_rate_hz_(sample_rate_hz),
814         num_channels_(num_channels),
815         num_frames_(calculate_frames(sample_rate_hz)) {}
816 
set_sample_rate_hz(int value)817   void set_sample_rate_hz(int value) {
818     sample_rate_hz_ = value;
819     num_frames_ = calculate_frames(value);
820   }
set_num_channels(size_t value)821   void set_num_channels(size_t value) { num_channels_ = value; }
822 
sample_rate_hz()823   int sample_rate_hz() const { return sample_rate_hz_; }
824 
825   // The number of channels in the stream.
num_channels()826   size_t num_channels() const { return num_channels_; }
827 
num_frames()828   size_t num_frames() const { return num_frames_; }
num_samples()829   size_t num_samples() const { return num_channels_ * num_frames_; }
830 
831   bool operator==(const StreamConfig& other) const {
832     return sample_rate_hz_ == other.sample_rate_hz_ &&
833            num_channels_ == other.num_channels_;
834   }
835 
836   bool operator!=(const StreamConfig& other) const { return !(*this == other); }
837 
838  private:
calculate_frames(int sample_rate_hz)839   static size_t calculate_frames(int sample_rate_hz) {
840     return static_cast<size_t>(AudioProcessing::GetFrameSize(sample_rate_hz));
841   }
842 
843   int sample_rate_hz_;
844   size_t num_channels_;
845   size_t num_frames_;
846 };
847 
848 class ProcessingConfig {
849  public:
850   enum StreamName {
851     kInputStream,
852     kOutputStream,
853     kReverseInputStream,
854     kReverseOutputStream,
855     kNumStreamNames,
856   };
857 
input_stream()858   const StreamConfig& input_stream() const {
859     return streams[StreamName::kInputStream];
860   }
output_stream()861   const StreamConfig& output_stream() const {
862     return streams[StreamName::kOutputStream];
863   }
reverse_input_stream()864   const StreamConfig& reverse_input_stream() const {
865     return streams[StreamName::kReverseInputStream];
866   }
reverse_output_stream()867   const StreamConfig& reverse_output_stream() const {
868     return streams[StreamName::kReverseOutputStream];
869   }
870 
input_stream()871   StreamConfig& input_stream() { return streams[StreamName::kInputStream]; }
output_stream()872   StreamConfig& output_stream() { return streams[StreamName::kOutputStream]; }
reverse_input_stream()873   StreamConfig& reverse_input_stream() {
874     return streams[StreamName::kReverseInputStream];
875   }
reverse_output_stream()876   StreamConfig& reverse_output_stream() {
877     return streams[StreamName::kReverseOutputStream];
878   }
879 
880   bool operator==(const ProcessingConfig& other) const {
881     for (int i = 0; i < StreamName::kNumStreamNames; ++i) {
882       if (this->streams[i] != other.streams[i]) {
883         return false;
884       }
885     }
886     return true;
887   }
888 
889   bool operator!=(const ProcessingConfig& other) const {
890     return !(*this == other);
891   }
892 
893   StreamConfig streams[StreamName::kNumStreamNames];
894 };
895 
896 // Experimental interface for a custom analysis submodule.
897 class CustomAudioAnalyzer {
898  public:
899   // (Re-) Initializes the submodule.
900   virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
901   // Analyzes the given capture or render signal.
902   virtual void Analyze(const AudioBuffer* audio) = 0;
903   // Returns a string representation of the module state.
904   virtual std::string ToString() const = 0;
905 
~CustomAudioAnalyzer()906   virtual ~CustomAudioAnalyzer() {}
907 };
908 
909 // Interface for a custom processing submodule.
910 class CustomProcessing {
911  public:
912   // (Re-)Initializes the submodule.
913   virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
914   // Processes the given capture or render signal.
915   virtual void Process(AudioBuffer* audio) = 0;
916   // Returns a string representation of the module state.
917   virtual std::string ToString() const = 0;
918   // Handles RuntimeSettings. TODO(webrtc:9262): make pure virtual
919   // after updating dependencies.
920   virtual void SetRuntimeSetting(AudioProcessing::RuntimeSetting setting);
921 
~CustomProcessing()922   virtual ~CustomProcessing() {}
923 };
924 
925 // Interface for an echo detector submodule.
926 class EchoDetector : public rtc::RefCountInterface {
927  public:
928   // (Re-)Initializes the submodule.
929   virtual void Initialize(int capture_sample_rate_hz,
930                           int num_capture_channels,
931                           int render_sample_rate_hz,
932                           int num_render_channels) = 0;
933 
934   // Analysis (not changing) of the first channel of the render signal.
935   virtual void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) = 0;
936 
937   // Analysis (not changing) of the capture signal.
938   virtual void AnalyzeCaptureAudio(
939       rtc::ArrayView<const float> capture_audio) = 0;
940 
941   struct Metrics {
942     absl::optional<double> echo_likelihood;
943     absl::optional<double> echo_likelihood_recent_max;
944   };
945 
946   // Collect current metrics from the echo detector.
947   virtual Metrics GetMetrics() const = 0;
948 };
949 
950 }  // namespace webrtc
951 
952 #endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
953