1 /*
2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "modules/audio_mixer/frame_combiner.h"
12
13 #include <algorithm>
14 #include <array>
15 #include <cstdint>
16 #include <iterator>
17 #include <memory>
18 #include <string>
19
20 #include "api/array_view.h"
21 #include "common_audio/include/audio_util.h"
22 #include "modules/audio_mixer/audio_frame_manipulator.h"
23 #include "modules/audio_mixer/audio_mixer_impl.h"
24 #include "modules/audio_processing/include/audio_frame_view.h"
25 #include "modules/audio_processing/include/audio_processing.h"
26 #include "modules/audio_processing/logging/apm_data_dumper.h"
27 #include "rtc_base/arraysize.h"
28 #include "rtc_base/checks.h"
29 #include "system_wrappers/include/metrics.h"
30
31 namespace webrtc {
32 namespace {
33
34 using MixingBuffer =
35 std::array<std::array<float, FrameCombiner::kMaximumChannelSize>,
36 FrameCombiner::kMaximumNumberOfChannels>;
37
SetAudioFrameFields(const std::vector<AudioFrame * > & mix_list,size_t number_of_channels,int sample_rate,size_t number_of_streams,AudioFrame * audio_frame_for_mixing)38 void SetAudioFrameFields(const std::vector<AudioFrame*>& mix_list,
39 size_t number_of_channels,
40 int sample_rate,
41 size_t number_of_streams,
42 AudioFrame* audio_frame_for_mixing) {
43 const size_t samples_per_channel = static_cast<size_t>(
44 (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
45
46 // TODO(minyue): Issue bugs.webrtc.org/3390.
47 // Audio frame timestamp. The 'timestamp_' field is set to dummy
48 // value '0', because it is only supported in the one channel case and
49 // is then updated in the helper functions.
50 audio_frame_for_mixing->UpdateFrame(
51 0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined,
52 AudioFrame::kVadUnknown, number_of_channels);
53
54 if (mix_list.empty()) {
55 audio_frame_for_mixing->elapsed_time_ms_ = -1;
56 } else if (mix_list.size() == 1) {
57 audio_frame_for_mixing->timestamp_ = mix_list[0]->timestamp_;
58 audio_frame_for_mixing->elapsed_time_ms_ = mix_list[0]->elapsed_time_ms_;
59 audio_frame_for_mixing->ntp_time_ms_ = mix_list[0]->ntp_time_ms_;
60 audio_frame_for_mixing->packet_infos_ = mix_list[0]->packet_infos_;
61 }
62 }
63
MixFewFramesWithNoLimiter(const std::vector<AudioFrame * > & mix_list,AudioFrame * audio_frame_for_mixing)64 void MixFewFramesWithNoLimiter(const std::vector<AudioFrame*>& mix_list,
65 AudioFrame* audio_frame_for_mixing) {
66 if (mix_list.empty()) {
67 audio_frame_for_mixing->Mute();
68 return;
69 }
70 RTC_DCHECK_LE(mix_list.size(), 1);
71 std::copy(mix_list[0]->data(),
72 mix_list[0]->data() +
73 mix_list[0]->num_channels_ * mix_list[0]->samples_per_channel_,
74 audio_frame_for_mixing->mutable_data());
75 }
76
MixToFloatFrame(const std::vector<AudioFrame * > & mix_list,size_t samples_per_channel,size_t number_of_channels,MixingBuffer * mixing_buffer)77 void MixToFloatFrame(const std::vector<AudioFrame*>& mix_list,
78 size_t samples_per_channel,
79 size_t number_of_channels,
80 MixingBuffer* mixing_buffer) {
81 RTC_DCHECK_LE(samples_per_channel, FrameCombiner::kMaximumChannelSize);
82 RTC_DCHECK_LE(number_of_channels, FrameCombiner::kMaximumNumberOfChannels);
83 // Clear the mixing buffer.
84 for (auto& one_channel_buffer : *mixing_buffer) {
85 std::fill(one_channel_buffer.begin(), one_channel_buffer.end(), 0.f);
86 }
87
88 // Convert to FloatS16 and mix.
89 for (size_t i = 0; i < mix_list.size(); ++i) {
90 const AudioFrame* const frame = mix_list[i];
91 for (size_t j = 0; j < std::min(number_of_channels,
92 FrameCombiner::kMaximumNumberOfChannels);
93 ++j) {
94 for (size_t k = 0; k < std::min(samples_per_channel,
95 FrameCombiner::kMaximumChannelSize);
96 ++k) {
97 (*mixing_buffer)[j][k] += frame->data()[number_of_channels * k + j];
98 }
99 }
100 }
101 }
102
RunLimiter(AudioFrameView<float> mixing_buffer_view,Limiter * limiter)103 void RunLimiter(AudioFrameView<float> mixing_buffer_view, Limiter* limiter) {
104 const size_t sample_rate = mixing_buffer_view.samples_per_channel() * 1000 /
105 AudioMixerImpl::kFrameDurationInMs;
106 // TODO(alessiob): Avoid calling SetSampleRate every time.
107 limiter->SetSampleRate(sample_rate);
108 limiter->Process(mixing_buffer_view);
109 }
110
111 // Both interleaves and rounds.
InterleaveToAudioFrame(AudioFrameView<const float> mixing_buffer_view,AudioFrame * audio_frame_for_mixing)112 void InterleaveToAudioFrame(AudioFrameView<const float> mixing_buffer_view,
113 AudioFrame* audio_frame_for_mixing) {
114 const size_t number_of_channels = mixing_buffer_view.num_channels();
115 const size_t samples_per_channel = mixing_buffer_view.samples_per_channel();
116 // Put data in the result frame.
117 for (size_t i = 0; i < number_of_channels; ++i) {
118 for (size_t j = 0; j < samples_per_channel; ++j) {
119 audio_frame_for_mixing->mutable_data()[number_of_channels * j + i] =
120 FloatS16ToS16(mixing_buffer_view.channel(i)[j]);
121 }
122 }
123 }
124 } // namespace
125
126 constexpr size_t FrameCombiner::kMaximumNumberOfChannels;
127 constexpr size_t FrameCombiner::kMaximumChannelSize;
128
FrameCombiner(bool use_limiter)129 FrameCombiner::FrameCombiner(bool use_limiter)
130 : data_dumper_(new ApmDataDumper(0)),
131 mixing_buffer_(
132 std::make_unique<std::array<std::array<float, kMaximumChannelSize>,
133 kMaximumNumberOfChannels>>()),
134 limiter_(static_cast<size_t>(48000), data_dumper_.get(), "AudioMixer"),
135 use_limiter_(use_limiter) {
136 static_assert(kMaximumChannelSize * kMaximumNumberOfChannels <=
137 AudioFrame::kMaxDataSizeSamples,
138 "");
139 }
140
141 FrameCombiner::~FrameCombiner() = default;
142
Combine(const std::vector<AudioFrame * > & mix_list,size_t number_of_channels,int sample_rate,size_t number_of_streams,AudioFrame * audio_frame_for_mixing)143 void FrameCombiner::Combine(const std::vector<AudioFrame*>& mix_list,
144 size_t number_of_channels,
145 int sample_rate,
146 size_t number_of_streams,
147 AudioFrame* audio_frame_for_mixing) {
148 RTC_DCHECK(audio_frame_for_mixing);
149
150 LogMixingStats(mix_list, sample_rate, number_of_streams);
151
152 SetAudioFrameFields(mix_list, number_of_channels, sample_rate,
153 number_of_streams, audio_frame_for_mixing);
154
155 const size_t samples_per_channel = static_cast<size_t>(
156 (sample_rate * webrtc::AudioMixerImpl::kFrameDurationInMs) / 1000);
157
158 for (const auto* frame : mix_list) {
159 RTC_DCHECK_EQ(samples_per_channel, frame->samples_per_channel_);
160 RTC_DCHECK_EQ(sample_rate, frame->sample_rate_hz_);
161 }
162
163 // The 'num_channels_' field of frames in 'mix_list' could be
164 // different from 'number_of_channels'.
165 for (auto* frame : mix_list) {
166 RemixFrame(number_of_channels, frame);
167 }
168
169 if (number_of_streams <= 1) {
170 MixFewFramesWithNoLimiter(mix_list, audio_frame_for_mixing);
171 return;
172 }
173
174 MixToFloatFrame(mix_list, samples_per_channel, number_of_channels,
175 mixing_buffer_.get());
176
177 const size_t output_number_of_channels =
178 std::min(number_of_channels, kMaximumNumberOfChannels);
179 const size_t output_samples_per_channel =
180 std::min(samples_per_channel, kMaximumChannelSize);
181
182 // Put float data in an AudioFrameView.
183 std::array<float*, kMaximumNumberOfChannels> channel_pointers{};
184 for (size_t i = 0; i < output_number_of_channels; ++i) {
185 channel_pointers[i] = &(*mixing_buffer_.get())[i][0];
186 }
187 AudioFrameView<float> mixing_buffer_view(&channel_pointers[0],
188 output_number_of_channels,
189 output_samples_per_channel);
190
191 if (use_limiter_) {
192 RunLimiter(mixing_buffer_view, &limiter_);
193 }
194
195 InterleaveToAudioFrame(mixing_buffer_view, audio_frame_for_mixing);
196 }
197
LogMixingStats(const std::vector<AudioFrame * > & mix_list,int sample_rate,size_t number_of_streams) const198 void FrameCombiner::LogMixingStats(const std::vector<AudioFrame*>& mix_list,
199 int sample_rate,
200 size_t number_of_streams) const {
201 // Log every second.
202 uma_logging_counter_++;
203 if (uma_logging_counter_ > 1000 / AudioMixerImpl::kFrameDurationInMs) {
204 uma_logging_counter_ = 0;
205 RTC_HISTOGRAM_COUNTS_100("WebRTC.Audio.AudioMixer.NumIncomingStreams",
206 static_cast<int>(number_of_streams));
207 RTC_HISTOGRAM_ENUMERATION(
208 "WebRTC.Audio.AudioMixer.NumIncomingActiveStreams",
209 static_cast<int>(mix_list.size()),
210 AudioMixerImpl::kMaximumAmountOfMixedAudioSources);
211
212 using NativeRate = AudioProcessing::NativeRate;
213 static constexpr NativeRate native_rates[] = {
214 NativeRate::kSampleRate8kHz, NativeRate::kSampleRate16kHz,
215 NativeRate::kSampleRate32kHz, NativeRate::kSampleRate48kHz};
216 const auto* rate_position = std::lower_bound(
217 std::begin(native_rates), std::end(native_rates), sample_rate);
218 RTC_HISTOGRAM_ENUMERATION(
219 "WebRTC.Audio.AudioMixer.MixingRate",
220 std::distance(std::begin(native_rates), rate_position),
221 arraysize(native_rates));
222 }
223 }
224
225 } // namespace webrtc
226