• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #ifndef MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
12 #define MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
13 
14 // MSVC++ requires this to be set before any other includes to get M_PI.
15 #ifndef _USE_MATH_DEFINES
16 #define _USE_MATH_DEFINES
17 #endif
18 
19 #include <math.h>
20 #include <stddef.h>  // size_t
21 #include <stdio.h>   // FILE
22 #include <string.h>
23 
24 #include <vector>
25 
26 #include "absl/types/optional.h"
27 #include "api/array_view.h"
28 #include "api/audio/echo_canceller3_config.h"
29 #include "api/audio/echo_control.h"
30 #include "api/scoped_refptr.h"
31 #include "modules/audio_processing/include/audio_processing_statistics.h"
32 #include "modules/audio_processing/include/config.h"
33 #include "rtc_base/arraysize.h"
34 #include "rtc_base/deprecation.h"
35 #include "rtc_base/ref_count.h"
36 #include "rtc_base/system/file_wrapper.h"
37 #include "rtc_base/system/rtc_export.h"
38 
39 namespace rtc {
40 class TaskQueue;
41 }  // namespace rtc
42 
43 namespace webrtc {
44 
45 class AecDump;
46 class AudioBuffer;
47 
48 class StreamConfig;
49 class ProcessingConfig;
50 
51 class EchoDetector;
52 class CustomAudioAnalyzer;
53 class CustomProcessing;
54 
55 // Use to enable experimental gain control (AGC). At startup the experimental
56 // AGC moves the microphone volume up to |startup_min_volume| if the current
57 // microphone volume is set too low. The value is clamped to its operating range
58 // [12, 255]. Here, 255 maps to 100%.
59 //
60 // Must be provided through AudioProcessingBuilder().Create(config).
61 #if defined(WEBRTC_CHROMIUM_BUILD)
62 static const int kAgcStartupMinVolume = 85;
63 #else
64 static const int kAgcStartupMinVolume = 0;
65 #endif  // defined(WEBRTC_CHROMIUM_BUILD)
66 static constexpr int kClippedLevelMin = 70;
67 
68 // To be deprecated: Please instead use the flag in the
69 // AudioProcessing::Config::AnalogGainController.
70 // TODO(webrtc:5298): Remove.
71 struct ExperimentalAgc {
72   ExperimentalAgc() = default;
ExperimentalAgcExperimentalAgc73   explicit ExperimentalAgc(bool enabled) : enabled(enabled) {}
ExperimentalAgcExperimentalAgc74   ExperimentalAgc(bool enabled,
75                   bool enabled_agc2_level_estimator,
76                   bool digital_adaptive_disabled)
77       : enabled(enabled),
78         enabled_agc2_level_estimator(enabled_agc2_level_estimator),
79         digital_adaptive_disabled(digital_adaptive_disabled) {}
80   // Deprecated constructor: will be removed.
ExperimentalAgcExperimentalAgc81   ExperimentalAgc(bool enabled,
82                   bool enabled_agc2_level_estimator,
83                   bool digital_adaptive_disabled,
84                   bool analyze_before_aec)
85       : enabled(enabled),
86         enabled_agc2_level_estimator(enabled_agc2_level_estimator),
87         digital_adaptive_disabled(digital_adaptive_disabled) {}
ExperimentalAgcExperimentalAgc88   ExperimentalAgc(bool enabled, int startup_min_volume)
89       : enabled(enabled), startup_min_volume(startup_min_volume) {}
ExperimentalAgcExperimentalAgc90   ExperimentalAgc(bool enabled, int startup_min_volume, int clipped_level_min)
91       : enabled(enabled),
92         startup_min_volume(startup_min_volume),
93         clipped_level_min(clipped_level_min) {}
94   static const ConfigOptionID identifier = ConfigOptionID::kExperimentalAgc;
95   bool enabled = true;
96   int startup_min_volume = kAgcStartupMinVolume;
97   // Lowest microphone level that will be applied in response to clipping.
98   int clipped_level_min = kClippedLevelMin;
99   bool enabled_agc2_level_estimator = false;
100   bool digital_adaptive_disabled = false;
101 };
102 
103 // To be deprecated: Please instead use the flag in the
104 // AudioProcessing::Config::TransientSuppression.
105 //
106 // Use to enable experimental noise suppression. It can be set in the
107 // constructor or using AudioProcessing::SetExtraOptions().
108 // TODO(webrtc:5298): Remove.
109 struct ExperimentalNs {
ExperimentalNsExperimentalNs110   ExperimentalNs() : enabled(false) {}
ExperimentalNsExperimentalNs111   explicit ExperimentalNs(bool enabled) : enabled(enabled) {}
112   static const ConfigOptionID identifier = ConfigOptionID::kExperimentalNs;
113   bool enabled;
114 };
115 
116 // The Audio Processing Module (APM) provides a collection of voice processing
117 // components designed for real-time communications software.
118 //
119 // APM operates on two audio streams on a frame-by-frame basis. Frames of the
120 // primary stream, on which all processing is applied, are passed to
121 // |ProcessStream()|. Frames of the reverse direction stream are passed to
122 // |ProcessReverseStream()|. On the client-side, this will typically be the
123 // near-end (capture) and far-end (render) streams, respectively. APM should be
124 // placed in the signal chain as close to the audio hardware abstraction layer
125 // (HAL) as possible.
126 //
127 // On the server-side, the reverse stream will normally not be used, with
128 // processing occurring on each incoming stream.
129 //
130 // Component interfaces follow a similar pattern and are accessed through
131 // corresponding getters in APM. All components are disabled at create-time,
132 // with default settings that are recommended for most situations. New settings
133 // can be applied without enabling a component. Enabling a component triggers
134 // memory allocation and initialization to allow it to start processing the
135 // streams.
136 //
137 // Thread safety is provided with the following assumptions to reduce locking
138 // overhead:
139 //   1. The stream getters and setters are called from the same thread as
140 //      ProcessStream(). More precisely, stream functions are never called
141 //      concurrently with ProcessStream().
142 //   2. Parameter getters are never called concurrently with the corresponding
143 //      setter.
144 //
145 // APM accepts only linear PCM audio data in chunks of 10 ms. The int16
146 // interfaces use interleaved data, while the float interfaces use deinterleaved
147 // data.
148 //
149 // Usage example, omitting error checking:
150 // AudioProcessing* apm = AudioProcessingBuilder().Create();
151 //
152 // AudioProcessing::Config config;
153 // config.echo_canceller.enabled = true;
154 // config.echo_canceller.mobile_mode = false;
155 //
156 // config.gain_controller1.enabled = true;
157 // config.gain_controller1.mode =
158 // AudioProcessing::Config::GainController1::kAdaptiveAnalog;
159 // config.gain_controller1.analog_level_minimum = 0;
160 // config.gain_controller1.analog_level_maximum = 255;
161 //
162 // config.gain_controller2.enabled = true;
163 //
164 // config.high_pass_filter.enabled = true;
165 //
166 // config.voice_detection.enabled = true;
167 //
168 // apm->ApplyConfig(config)
169 //
170 // apm->noise_reduction()->set_level(kHighSuppression);
171 // apm->noise_reduction()->Enable(true);
172 //
173 // // Start a voice call...
174 //
175 // // ... Render frame arrives bound for the audio HAL ...
176 // apm->ProcessReverseStream(render_frame);
177 //
178 // // ... Capture frame arrives from the audio HAL ...
179 // // Call required set_stream_ functions.
180 // apm->set_stream_delay_ms(delay_ms);
181 // apm->set_stream_analog_level(analog_level);
182 //
183 // apm->ProcessStream(capture_frame);
184 //
185 // // Call required stream_ functions.
186 // analog_level = apm->recommended_stream_analog_level();
187 // has_voice = apm->stream_has_voice();
188 //
189 // // Repeate render and capture processing for the duration of the call...
190 // // Start a new call...
191 // apm->Initialize();
192 //
193 // // Close the application...
194 // delete apm;
195 //
196 class RTC_EXPORT AudioProcessing : public rtc::RefCountInterface {
197  public:
198   // The struct below constitutes the new parameter scheme for the audio
199   // processing. It is being introduced gradually and until it is fully
200   // introduced, it is prone to change.
201   // TODO(peah): Remove this comment once the new config scheme is fully rolled
202   // out.
203   //
204   // The parameters and behavior of the audio processing module are controlled
205   // by changing the default values in the AudioProcessing::Config struct.
206   // The config is applied by passing the struct to the ApplyConfig method.
207   //
208   // This config is intended to be used during setup, and to enable/disable
209   // top-level processing effects. Use during processing may cause undesired
210   // submodule resets, affecting the audio quality. Use the RuntimeSetting
211   // construct for runtime configuration.
212   struct RTC_EXPORT Config {
213 
214     // Sets the properties of the audio processing pipeline.
215     struct RTC_EXPORT Pipeline {
216       Pipeline();
217 
218       // Maximum allowed processing rate used internally. May only be set to
219       // 32000 or 48000 and any differing values will be treated as 48000. The
220       // default rate is currently selected based on the CPU architecture, but
221       // that logic may change.
222       int maximum_internal_processing_rate;
223       // Allow multi-channel processing of render audio.
224       bool multi_channel_render = false;
225       // Allow multi-channel processing of capture audio when AEC3 is active
226       // or a custom AEC is injected..
227       bool multi_channel_capture = false;
228     } pipeline;
229 
230     // Enabled the pre-amplifier. It amplifies the capture signal
231     // before any other processing is done.
232     struct PreAmplifier {
233       bool enabled = false;
234       float fixed_gain_factor = 1.f;
235     } pre_amplifier;
236 
237     struct HighPassFilter {
238       bool enabled = false;
239       bool apply_in_full_band = true;
240     } high_pass_filter;
241 
242     struct EchoCanceller {
243       bool enabled = false;
244       bool mobile_mode = false;
245       bool export_linear_aec_output = false;
246       // Enforce the highpass filter to be on (has no effect for the mobile
247       // mode).
248       bool enforce_high_pass_filtering = true;
249     } echo_canceller;
250 
251     // Enables background noise suppression.
252     struct NoiseSuppression {
253       bool enabled = false;
254       enum Level { kLow, kModerate, kHigh, kVeryHigh };
255       Level level = kModerate;
256       bool analyze_linear_aec_output_when_available = false;
257     } noise_suppression;
258 
259     // Enables transient suppression.
260     struct TransientSuppression {
261       bool enabled = false;
262     } transient_suppression;
263 
264     // Enables reporting of |voice_detected| in webrtc::AudioProcessingStats.
265     struct VoiceDetection {
266       bool enabled = false;
267     } voice_detection;
268 
269     // Enables automatic gain control (AGC) functionality.
270     // The automatic gain control (AGC) component brings the signal to an
271     // appropriate range. This is done by applying a digital gain directly and,
272     // in the analog mode, prescribing an analog gain to be applied at the audio
273     // HAL.
274     // Recommended to be enabled on the client-side.
275     struct GainController1 {
276       bool enabled = false;
277       enum Mode {
278         // Adaptive mode intended for use if an analog volume control is
279         // available on the capture device. It will require the user to provide
280         // coupling between the OS mixer controls and AGC through the
281         // stream_analog_level() functions.
282         // It consists of an analog gain prescription for the audio device and a
283         // digital compression stage.
284         kAdaptiveAnalog,
285         // Adaptive mode intended for situations in which an analog volume
286         // control is unavailable. It operates in a similar fashion to the
287         // adaptive analog mode, but with scaling instead applied in the digital
288         // domain. As with the analog mode, it additionally uses a digital
289         // compression stage.
290         kAdaptiveDigital,
291         // Fixed mode which enables only the digital compression stage also used
292         // by the two adaptive modes.
293         // It is distinguished from the adaptive modes by considering only a
294         // short time-window of the input signal. It applies a fixed gain
295         // through most of the input level range, and compresses (gradually
296         // reduces gain with increasing level) the input signal at higher
297         // levels. This mode is preferred on embedded devices where the capture
298         // signal level is predictable, so that a known gain can be applied.
299         kFixedDigital
300       };
301       Mode mode = kAdaptiveAnalog;
302       // Sets the target peak level (or envelope) of the AGC in dBFs (decibels
303       // from digital full-scale). The convention is to use positive values. For
304       // instance, passing in a value of 3 corresponds to -3 dBFs, or a target
305       // level 3 dB below full-scale. Limited to [0, 31].
306       int target_level_dbfs = 3;
307       // Sets the maximum gain the digital compression stage may apply, in dB. A
308       // higher number corresponds to greater compression, while a value of 0
309       // will leave the signal uncompressed. Limited to [0, 90].
310       // For updates after APM setup, use a RuntimeSetting instead.
311       int compression_gain_db = 9;
312       // When enabled, the compression stage will hard limit the signal to the
313       // target level. Otherwise, the signal will be compressed but not limited
314       // above the target level.
315       bool enable_limiter = true;
316       // Sets the minimum and maximum analog levels of the audio capture device.
317       // Must be set if an analog mode is used. Limited to [0, 65535].
318       int analog_level_minimum = 0;
319       int analog_level_maximum = 255;
320 
321       // Enables the analog gain controller functionality.
322       struct AnalogGainController {
323         bool enabled = true;
324         int startup_min_volume = kAgcStartupMinVolume;
325         // Lowest analog microphone level that will be applied in response to
326         // clipping.
327         int clipped_level_min = kClippedLevelMin;
328         bool enable_agc2_level_estimator = false;
329         bool enable_digital_adaptive = true;
330       } analog_gain_controller;
331     } gain_controller1;
332 
333     // Enables the next generation AGC functionality. This feature replaces the
334     // standard methods of gain control in the previous AGC. Enabling this
335     // submodule enables an adaptive digital AGC followed by a limiter. By
336     // setting |fixed_gain_db|, the limiter can be turned into a compressor that
337     // first applies a fixed gain. The adaptive digital AGC can be turned off by
338     // setting |adaptive_digital_mode=false|.
339     struct GainController2 {
340       enum LevelEstimator { kRms, kPeak };
341       bool enabled = false;
342       struct {
343         float gain_db = 0.f;
344       } fixed_digital;
345       struct {
346         bool enabled = false;
347         LevelEstimator level_estimator = kRms;
348         bool use_saturation_protector = true;
349         float extra_saturation_margin_db = 2.f;
350       } adaptive_digital;
351     } gain_controller2;
352 
353     struct ResidualEchoDetector {
354       bool enabled = true;
355     } residual_echo_detector;
356 
357     // Enables reporting of |output_rms_dbfs| in webrtc::AudioProcessingStats.
358     struct LevelEstimation {
359       bool enabled = false;
360     } level_estimation;
361 
362     std::string ToString() const;
363   };
364 
365   // TODO(mgraczyk): Remove once all methods that use ChannelLayout are gone.
366   enum ChannelLayout {
367     kMono,
368     // Left, right.
369     kStereo,
370     // Mono, keyboard, and mic.
371     kMonoAndKeyboard,
372     // Left, right, keyboard, and mic.
373     kStereoAndKeyboard
374   };
375 
376   // Specifies the properties of a setting to be passed to AudioProcessing at
377   // runtime.
378   class RuntimeSetting {
379    public:
380     enum class Type {
381       kNotSpecified,
382       kCapturePreGain,
383       kCaptureCompressionGain,
384       kCaptureFixedPostGain,
385       kPlayoutVolumeChange,
386       kCustomRenderProcessingRuntimeSetting,
387       kPlayoutAudioDeviceChange
388     };
389 
390     // Play-out audio device properties.
391     struct PlayoutAudioDeviceInfo {
392       int id;          // Identifies the audio device.
393       int max_volume;  // Maximum play-out volume.
394     };
395 
RuntimeSetting()396     RuntimeSetting() : type_(Type::kNotSpecified), value_(0.f) {}
397     ~RuntimeSetting() = default;
398 
CreateCapturePreGain(float gain)399     static RuntimeSetting CreateCapturePreGain(float gain) {
400       RTC_DCHECK_GE(gain, 1.f) << "Attenuation is not allowed.";
401       return {Type::kCapturePreGain, gain};
402     }
403 
404     // Corresponds to Config::GainController1::compression_gain_db, but for
405     // runtime configuration.
CreateCompressionGainDb(int gain_db)406     static RuntimeSetting CreateCompressionGainDb(int gain_db) {
407       RTC_DCHECK_GE(gain_db, 0);
408       RTC_DCHECK_LE(gain_db, 90);
409       return {Type::kCaptureCompressionGain, static_cast<float>(gain_db)};
410     }
411 
412     // Corresponds to Config::GainController2::fixed_digital::gain_db, but for
413     // runtime configuration.
CreateCaptureFixedPostGain(float gain_db)414     static RuntimeSetting CreateCaptureFixedPostGain(float gain_db) {
415       RTC_DCHECK_GE(gain_db, 0.f);
416       RTC_DCHECK_LE(gain_db, 90.f);
417       return {Type::kCaptureFixedPostGain, gain_db};
418     }
419 
420     // Creates a runtime setting to notify play-out (aka render) audio device
421     // changes.
CreatePlayoutAudioDeviceChange(PlayoutAudioDeviceInfo audio_device)422     static RuntimeSetting CreatePlayoutAudioDeviceChange(
423         PlayoutAudioDeviceInfo audio_device) {
424       return {Type::kPlayoutAudioDeviceChange, audio_device};
425     }
426 
427     // Creates a runtime setting to notify play-out (aka render) volume changes.
428     // |volume| is the unnormalized volume, the maximum of which
CreatePlayoutVolumeChange(int volume)429     static RuntimeSetting CreatePlayoutVolumeChange(int volume) {
430       return {Type::kPlayoutVolumeChange, volume};
431     }
432 
CreateCustomRenderSetting(float payload)433     static RuntimeSetting CreateCustomRenderSetting(float payload) {
434       return {Type::kCustomRenderProcessingRuntimeSetting, payload};
435     }
436 
type()437     Type type() const { return type_; }
438     // Getters do not return a value but instead modify the argument to protect
439     // from implicit casting.
GetFloat(float * value)440     void GetFloat(float* value) const {
441       RTC_DCHECK(value);
442       *value = value_.float_value;
443     }
GetInt(int * value)444     void GetInt(int* value) const {
445       RTC_DCHECK(value);
446       *value = value_.int_value;
447     }
GetPlayoutAudioDeviceInfo(PlayoutAudioDeviceInfo * value)448     void GetPlayoutAudioDeviceInfo(PlayoutAudioDeviceInfo* value) const {
449       RTC_DCHECK(value);
450       *value = value_.playout_audio_device_info;
451     }
452 
453    private:
RuntimeSetting(Type id,float value)454     RuntimeSetting(Type id, float value) : type_(id), value_(value) {}
RuntimeSetting(Type id,int value)455     RuntimeSetting(Type id, int value) : type_(id), value_(value) {}
RuntimeSetting(Type id,PlayoutAudioDeviceInfo value)456     RuntimeSetting(Type id, PlayoutAudioDeviceInfo value)
457         : type_(id), value_(value) {}
458     Type type_;
459     union U {
U()460       U() {}
U(int value)461       U(int value) : int_value(value) {}
U(float value)462       U(float value) : float_value(value) {}
U(PlayoutAudioDeviceInfo value)463       U(PlayoutAudioDeviceInfo value) : playout_audio_device_info(value) {}
464       float float_value;
465       int int_value;
466       PlayoutAudioDeviceInfo playout_audio_device_info;
467     } value_;
468   };
469 
~AudioProcessing()470   ~AudioProcessing() override {}
471 
472   // Initializes internal states, while retaining all user settings. This
473   // should be called before beginning to process a new audio stream. However,
474   // it is not necessary to call before processing the first stream after
475   // creation.
476   //
477   // It is also not necessary to call if the audio parameters (sample
478   // rate and number of channels) have changed. Passing updated parameters
479   // directly to |ProcessStream()| and |ProcessReverseStream()| is permissible.
480   // If the parameters are known at init-time though, they may be provided.
481   virtual int Initialize() = 0;
482 
483   // The int16 interfaces require:
484   //   - only |NativeRate|s be used
485   //   - that the input, output and reverse rates must match
486   //   - that |processing_config.output_stream()| matches
487   //     |processing_config.input_stream()|.
488   //
489   // The float interfaces accept arbitrary rates and support differing input and
490   // output layouts, but the output must have either one channel or the same
491   // number of channels as the input.
492   virtual int Initialize(const ProcessingConfig& processing_config) = 0;
493 
494   // Initialize with unpacked parameters. See Initialize() above for details.
495   //
496   // TODO(mgraczyk): Remove once clients are updated to use the new interface.
497   virtual int Initialize(int capture_input_sample_rate_hz,
498                          int capture_output_sample_rate_hz,
499                          int render_sample_rate_hz,
500                          ChannelLayout capture_input_layout,
501                          ChannelLayout capture_output_layout,
502                          ChannelLayout render_input_layout) = 0;
503 
504   // TODO(peah): This method is a temporary solution used to take control
505   // over the parameters in the audio processing module and is likely to change.
506   virtual void ApplyConfig(const Config& config) = 0;
507 
508   // Pass down additional options which don't have explicit setters. This
509   // ensures the options are applied immediately.
510   virtual void SetExtraOptions(const webrtc::Config& config) = 0;
511 
512   // TODO(ajm): Only intended for internal use. Make private and friend the
513   // necessary classes?
514   virtual int proc_sample_rate_hz() const = 0;
515   virtual int proc_split_sample_rate_hz() const = 0;
516   virtual size_t num_input_channels() const = 0;
517   virtual size_t num_proc_channels() const = 0;
518   virtual size_t num_output_channels() const = 0;
519   virtual size_t num_reverse_channels() const = 0;
520 
521   // Set to true when the output of AudioProcessing will be muted or in some
522   // other way not used. Ideally, the captured audio would still be processed,
523   // but some components may change behavior based on this information.
524   // Default false.
525   virtual void set_output_will_be_muted(bool muted) = 0;
526 
527   // Enqueue a runtime setting.
528   virtual void SetRuntimeSetting(RuntimeSetting setting) = 0;
529 
530   // Accepts and produces a 10 ms frame interleaved 16 bit integer audio as
531   // specified in |input_config| and |output_config|. |src| and |dest| may use
532   // the same memory, if desired.
533   virtual int ProcessStream(const int16_t* const src,
534                             const StreamConfig& input_config,
535                             const StreamConfig& output_config,
536                             int16_t* const dest) = 0;
537 
538   // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
539   // |src| points to a channel buffer, arranged according to |input_stream|. At
540   // output, the channels will be arranged according to |output_stream| in
541   // |dest|.
542   //
543   // The output must have one channel or as many channels as the input. |src|
544   // and |dest| may use the same memory, if desired.
545   virtual int ProcessStream(const float* const* src,
546                             const StreamConfig& input_config,
547                             const StreamConfig& output_config,
548                             float* const* dest) = 0;
549 
550   // Accepts and produces a 10 ms frame of interleaved 16 bit integer audio for
551   // the reverse direction audio stream as specified in |input_config| and
552   // |output_config|. |src| and |dest| may use the same memory, if desired.
553   virtual int ProcessReverseStream(const int16_t* const src,
554                                    const StreamConfig& input_config,
555                                    const StreamConfig& output_config,
556                                    int16_t* const dest) = 0;
557 
558   // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
559   // |data| points to a channel buffer, arranged according to |reverse_config|.
560   virtual int ProcessReverseStream(const float* const* src,
561                                    const StreamConfig& input_config,
562                                    const StreamConfig& output_config,
563                                    float* const* dest) = 0;
564 
565   // Accepts deinterleaved float audio with the range [-1, 1]. Each element
566   // of |data| points to a channel buffer, arranged according to
567   // |reverse_config|.
568   virtual int AnalyzeReverseStream(const float* const* data,
569                                    const StreamConfig& reverse_config) = 0;
570 
571   // Returns the most recently produced 10 ms of the linear AEC output at a rate
572   // of 16 kHz. If there is more than one capture channel, a mono representation
573   // of the input is returned. Returns true/false to indicate whether an output
574   // returned.
575   virtual bool GetLinearAecOutput(
576       rtc::ArrayView<std::array<float, 160>> linear_output) const = 0;
577 
578   // This must be called prior to ProcessStream() if and only if adaptive analog
579   // gain control is enabled, to pass the current analog level from the audio
580   // HAL. Must be within the range provided in Config::GainController1.
581   virtual void set_stream_analog_level(int level) = 0;
582 
583   // When an analog mode is set, this should be called after ProcessStream()
584   // to obtain the recommended new analog level for the audio HAL. It is the
585   // user's responsibility to apply this level.
586   virtual int recommended_stream_analog_level() const = 0;
587 
588   // This must be called if and only if echo processing is enabled.
589   //
590   // Sets the |delay| in ms between ProcessReverseStream() receiving a far-end
591   // frame and ProcessStream() receiving a near-end frame containing the
592   // corresponding echo. On the client-side this can be expressed as
593   //   delay = (t_render - t_analyze) + (t_process - t_capture)
594   // where,
595   //   - t_analyze is the time a frame is passed to ProcessReverseStream() and
596   //     t_render is the time the first sample of the same frame is rendered by
597   //     the audio hardware.
598   //   - t_capture is the time the first sample of a frame is captured by the
599   //     audio hardware and t_process is the time the same frame is passed to
600   //     ProcessStream().
601   virtual int set_stream_delay_ms(int delay) = 0;
602   virtual int stream_delay_ms() const = 0;
603 
604   // Call to signal that a key press occurred (true) or did not occur (false)
605   // with this chunk of audio.
606   virtual void set_stream_key_pressed(bool key_pressed) = 0;
607 
608   // Creates and attaches an webrtc::AecDump for recording debugging
609   // information.
610   // The |worker_queue| may not be null and must outlive the created
611   // AecDump instance. |max_log_size_bytes == -1| means the log size
612   // will be unlimited. |handle| may not be null. The AecDump takes
613   // responsibility for |handle| and closes it in the destructor. A
614   // return value of true indicates that the file has been
615   // sucessfully opened, while a value of false indicates that
616   // opening the file failed.
617   virtual bool CreateAndAttachAecDump(const std::string& file_name,
618                                       int64_t max_log_size_bytes,
619                                       rtc::TaskQueue* worker_queue) = 0;
620   virtual bool CreateAndAttachAecDump(FILE* handle,
621                                       int64_t max_log_size_bytes,
622                                       rtc::TaskQueue* worker_queue) = 0;
623 
624   // TODO(webrtc:5298) Deprecated variant.
625   // Attaches provided webrtc::AecDump for recording debugging
626   // information. Log file and maximum file size logic is supposed to
627   // be handled by implementing instance of AecDump. Calling this
628   // method when another AecDump is attached resets the active AecDump
629   // with a new one. This causes the d-tor of the earlier AecDump to
630   // be called. The d-tor call may block until all pending logging
631   // tasks are completed.
632   virtual void AttachAecDump(std::unique_ptr<AecDump> aec_dump) = 0;
633 
634   // If no AecDump is attached, this has no effect. If an AecDump is
635   // attached, it's destructor is called. The d-tor may block until
636   // all pending logging tasks are completed.
637   virtual void DetachAecDump() = 0;
638 
639   // Get audio processing statistics.
640   virtual AudioProcessingStats GetStatistics() = 0;
641   // TODO(webrtc:5298) Deprecated variant. The |has_remote_tracks| argument
642   // should be set if there are active remote tracks (this would usually be true
643   // during a call). If there are no remote tracks some of the stats will not be
644   // set by AudioProcessing, because they only make sense if there is at least
645   // one remote track.
646   virtual AudioProcessingStats GetStatistics(bool has_remote_tracks) = 0;
647 
648   // Returns the last applied configuration.
649   virtual AudioProcessing::Config GetConfig() const = 0;
650 
651   enum Error {
652     // Fatal errors.
653     kNoError = 0,
654     kUnspecifiedError = -1,
655     kCreationFailedError = -2,
656     kUnsupportedComponentError = -3,
657     kUnsupportedFunctionError = -4,
658     kNullPointerError = -5,
659     kBadParameterError = -6,
660     kBadSampleRateError = -7,
661     kBadDataLengthError = -8,
662     kBadNumberChannelsError = -9,
663     kFileError = -10,
664     kStreamParameterNotSetError = -11,
665     kNotEnabledError = -12,
666 
667     // Warnings are non-fatal.
668     // This results when a set_stream_ parameter is out of range. Processing
669     // will continue, but the parameter may have been truncated.
670     kBadStreamParameterWarning = -13
671   };
672 
673   // Native rates supported by the integer interfaces.
674   enum NativeRate {
675     kSampleRate8kHz = 8000,
676     kSampleRate16kHz = 16000,
677     kSampleRate32kHz = 32000,
678     kSampleRate48kHz = 48000
679   };
680 
681   // TODO(kwiberg): We currently need to support a compiler (Visual C++) that
682   // complains if we don't explicitly state the size of the array here. Remove
683   // the size when that's no longer the case.
684   static constexpr int kNativeSampleRatesHz[4] = {
685       kSampleRate8kHz, kSampleRate16kHz, kSampleRate32kHz, kSampleRate48kHz};
686   static constexpr size_t kNumNativeSampleRates =
687       arraysize(kNativeSampleRatesHz);
688   static constexpr int kMaxNativeSampleRateHz =
689       kNativeSampleRatesHz[kNumNativeSampleRates - 1];
690 
691   static const int kChunkSizeMs = 10;
692 };
693 
694 class RTC_EXPORT AudioProcessingBuilder {
695  public:
696   AudioProcessingBuilder();
697   ~AudioProcessingBuilder();
698   // The AudioProcessingBuilder takes ownership of the echo_control_factory.
SetEchoControlFactory(std::unique_ptr<EchoControlFactory> echo_control_factory)699   AudioProcessingBuilder& SetEchoControlFactory(
700       std::unique_ptr<EchoControlFactory> echo_control_factory) {
701     echo_control_factory_ = std::move(echo_control_factory);
702     return *this;
703   }
704   // The AudioProcessingBuilder takes ownership of the capture_post_processing.
SetCapturePostProcessing(std::unique_ptr<CustomProcessing> capture_post_processing)705   AudioProcessingBuilder& SetCapturePostProcessing(
706       std::unique_ptr<CustomProcessing> capture_post_processing) {
707     capture_post_processing_ = std::move(capture_post_processing);
708     return *this;
709   }
710   // The AudioProcessingBuilder takes ownership of the render_pre_processing.
SetRenderPreProcessing(std::unique_ptr<CustomProcessing> render_pre_processing)711   AudioProcessingBuilder& SetRenderPreProcessing(
712       std::unique_ptr<CustomProcessing> render_pre_processing) {
713     render_pre_processing_ = std::move(render_pre_processing);
714     return *this;
715   }
716   // The AudioProcessingBuilder takes ownership of the echo_detector.
SetEchoDetector(rtc::scoped_refptr<EchoDetector> echo_detector)717   AudioProcessingBuilder& SetEchoDetector(
718       rtc::scoped_refptr<EchoDetector> echo_detector) {
719     echo_detector_ = std::move(echo_detector);
720     return *this;
721   }
722   // The AudioProcessingBuilder takes ownership of the capture_analyzer.
SetCaptureAnalyzer(std::unique_ptr<CustomAudioAnalyzer> capture_analyzer)723   AudioProcessingBuilder& SetCaptureAnalyzer(
724       std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
725     capture_analyzer_ = std::move(capture_analyzer);
726     return *this;
727   }
728   // This creates an APM instance using the previously set components. Calling
729   // the Create function resets the AudioProcessingBuilder to its initial state.
730   AudioProcessing* Create();
731   AudioProcessing* Create(const webrtc::Config& config);
732 
733  private:
734   std::unique_ptr<EchoControlFactory> echo_control_factory_;
735   std::unique_ptr<CustomProcessing> capture_post_processing_;
736   std::unique_ptr<CustomProcessing> render_pre_processing_;
737   rtc::scoped_refptr<EchoDetector> echo_detector_;
738   std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
739   RTC_DISALLOW_COPY_AND_ASSIGN(AudioProcessingBuilder);
740 };
741 
742 class StreamConfig {
743  public:
744   // sample_rate_hz: The sampling rate of the stream.
745   //
746   // num_channels: The number of audio channels in the stream, excluding the
747   //               keyboard channel if it is present. When passing a
748   //               StreamConfig with an array of arrays T*[N],
749   //
750   //                N == {num_channels + 1  if  has_keyboard
751   //                     {num_channels      if  !has_keyboard
752   //
753   // has_keyboard: True if the stream has a keyboard channel. When has_keyboard
754   //               is true, the last channel in any corresponding list of
755   //               channels is the keyboard channel.
756   StreamConfig(int sample_rate_hz = 0,
757                size_t num_channels = 0,
758                bool has_keyboard = false)
sample_rate_hz_(sample_rate_hz)759       : sample_rate_hz_(sample_rate_hz),
760         num_channels_(num_channels),
761         has_keyboard_(has_keyboard),
762         num_frames_(calculate_frames(sample_rate_hz)) {}
763 
set_sample_rate_hz(int value)764   void set_sample_rate_hz(int value) {
765     sample_rate_hz_ = value;
766     num_frames_ = calculate_frames(value);
767   }
set_num_channels(size_t value)768   void set_num_channels(size_t value) { num_channels_ = value; }
set_has_keyboard(bool value)769   void set_has_keyboard(bool value) { has_keyboard_ = value; }
770 
sample_rate_hz()771   int sample_rate_hz() const { return sample_rate_hz_; }
772 
773   // The number of channels in the stream, not including the keyboard channel if
774   // present.
num_channels()775   size_t num_channels() const { return num_channels_; }
776 
has_keyboard()777   bool has_keyboard() const { return has_keyboard_; }
num_frames()778   size_t num_frames() const { return num_frames_; }
num_samples()779   size_t num_samples() const { return num_channels_ * num_frames_; }
780 
781   bool operator==(const StreamConfig& other) const {
782     return sample_rate_hz_ == other.sample_rate_hz_ &&
783            num_channels_ == other.num_channels_ &&
784            has_keyboard_ == other.has_keyboard_;
785   }
786 
787   bool operator!=(const StreamConfig& other) const { return !(*this == other); }
788 
789  private:
calculate_frames(int sample_rate_hz)790   static size_t calculate_frames(int sample_rate_hz) {
791     return static_cast<size_t>(AudioProcessing::kChunkSizeMs * sample_rate_hz /
792                                1000);
793   }
794 
795   int sample_rate_hz_;
796   size_t num_channels_;
797   bool has_keyboard_;
798   size_t num_frames_;
799 };
800 
801 class ProcessingConfig {
802  public:
803   enum StreamName {
804     kInputStream,
805     kOutputStream,
806     kReverseInputStream,
807     kReverseOutputStream,
808     kNumStreamNames,
809   };
810 
input_stream()811   const StreamConfig& input_stream() const {
812     return streams[StreamName::kInputStream];
813   }
output_stream()814   const StreamConfig& output_stream() const {
815     return streams[StreamName::kOutputStream];
816   }
reverse_input_stream()817   const StreamConfig& reverse_input_stream() const {
818     return streams[StreamName::kReverseInputStream];
819   }
reverse_output_stream()820   const StreamConfig& reverse_output_stream() const {
821     return streams[StreamName::kReverseOutputStream];
822   }
823 
input_stream()824   StreamConfig& input_stream() { return streams[StreamName::kInputStream]; }
output_stream()825   StreamConfig& output_stream() { return streams[StreamName::kOutputStream]; }
reverse_input_stream()826   StreamConfig& reverse_input_stream() {
827     return streams[StreamName::kReverseInputStream];
828   }
reverse_output_stream()829   StreamConfig& reverse_output_stream() {
830     return streams[StreamName::kReverseOutputStream];
831   }
832 
833   bool operator==(const ProcessingConfig& other) const {
834     for (int i = 0; i < StreamName::kNumStreamNames; ++i) {
835       if (this->streams[i] != other.streams[i]) {
836         return false;
837       }
838     }
839     return true;
840   }
841 
842   bool operator!=(const ProcessingConfig& other) const {
843     return !(*this == other);
844   }
845 
846   StreamConfig streams[StreamName::kNumStreamNames];
847 };
848 
849 // Experimental interface for a custom analysis submodule.
850 class CustomAudioAnalyzer {
851  public:
852   // (Re-) Initializes the submodule.
853   virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
854   // Analyzes the given capture or render signal.
855   virtual void Analyze(const AudioBuffer* audio) = 0;
856   // Returns a string representation of the module state.
857   virtual std::string ToString() const = 0;
858 
~CustomAudioAnalyzer()859   virtual ~CustomAudioAnalyzer() {}
860 };
861 
862 // Interface for a custom processing submodule.
863 class CustomProcessing {
864  public:
865   // (Re-)Initializes the submodule.
866   virtual void Initialize(int sample_rate_hz, int num_channels) = 0;
867   // Processes the given capture or render signal.
868   virtual void Process(AudioBuffer* audio) = 0;
869   // Returns a string representation of the module state.
870   virtual std::string ToString() const = 0;
871   // Handles RuntimeSettings. TODO(webrtc:9262): make pure virtual
872   // after updating dependencies.
873   virtual void SetRuntimeSetting(AudioProcessing::RuntimeSetting setting);
874 
~CustomProcessing()875   virtual ~CustomProcessing() {}
876 };
877 
878 // Interface for an echo detector submodule.
879 class EchoDetector : public rtc::RefCountInterface {
880  public:
881   // (Re-)Initializes the submodule.
882   virtual void Initialize(int capture_sample_rate_hz,
883                           int num_capture_channels,
884                           int render_sample_rate_hz,
885                           int num_render_channels) = 0;
886 
887   // Analysis (not changing) of the render signal.
888   virtual void AnalyzeRenderAudio(rtc::ArrayView<const float> render_audio) = 0;
889 
890   // Analysis (not changing) of the capture signal.
891   virtual void AnalyzeCaptureAudio(
892       rtc::ArrayView<const float> capture_audio) = 0;
893 
894   // Pack an AudioBuffer into a vector<float>.
895   static void PackRenderAudioBuffer(AudioBuffer* audio,
896                                     std::vector<float>* packed_buffer);
897 
898   struct Metrics {
899     absl::optional<double> echo_likelihood;
900     absl::optional<double> echo_likelihood_recent_max;
901   };
902 
903   // Collect current metrics from the echo detector.
904   virtual Metrics GetMetrics() const = 0;
905 };
906 
907 }  // namespace webrtc
908 
909 #endif  // MODULES_AUDIO_PROCESSING_INCLUDE_AUDIO_PROCESSING_H_
910