1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/modules/audio_coding/neteq/merge.h"
12
13 #include <assert.h>
14 #include <string.h> // memmove, memcpy, memset, size_t
15
16 #include <algorithm> // min, max
17
18 #include "webrtc/base/scoped_ptr.h"
19 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
20 #include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
21 #include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
22 #include "webrtc/modules/audio_coding/neteq/expand.h"
23 #include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
24
25 namespace webrtc {
26
Merge(int fs_hz,size_t num_channels,Expand * expand,SyncBuffer * sync_buffer)27 Merge::Merge(int fs_hz,
28 size_t num_channels,
29 Expand* expand,
30 SyncBuffer* sync_buffer)
31 : fs_hz_(fs_hz),
32 num_channels_(num_channels),
33 fs_mult_(fs_hz_ / 8000),
34 timestamps_per_call_(static_cast<size_t>(fs_hz_ / 100)),
35 expand_(expand),
36 sync_buffer_(sync_buffer),
37 expanded_(num_channels_) {
38 assert(num_channels_ > 0);
39 }
40
Process(int16_t * input,size_t input_length,int16_t * external_mute_factor_array,AudioMultiVector * output)41 size_t Merge::Process(int16_t* input, size_t input_length,
42 int16_t* external_mute_factor_array,
43 AudioMultiVector* output) {
44 // TODO(hlundin): Change to an enumerator and skip assert.
45 assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
46 fs_hz_ == 48000);
47 assert(fs_hz_ <= kMaxSampleRate); // Should not be possible.
48
49 size_t old_length;
50 size_t expand_period;
51 // Get expansion data to overlap and mix with.
52 size_t expanded_length = GetExpandedSignal(&old_length, &expand_period);
53
54 // Transfer input signal to an AudioMultiVector.
55 AudioMultiVector input_vector(num_channels_);
56 input_vector.PushBackInterleaved(input, input_length);
57 size_t input_length_per_channel = input_vector.Size();
58 assert(input_length_per_channel == input_length / num_channels_);
59
60 size_t best_correlation_index = 0;
61 size_t output_length = 0;
62
63 for (size_t channel = 0; channel < num_channels_; ++channel) {
64 int16_t* input_channel = &input_vector[channel][0];
65 int16_t* expanded_channel = &expanded_[channel][0];
66 int16_t expanded_max, input_max;
67 int16_t new_mute_factor = SignalScaling(
68 input_channel, input_length_per_channel, expanded_channel,
69 &expanded_max, &input_max);
70
71 // Adjust muting factor (product of "main" muting factor and expand muting
72 // factor).
73 int16_t* external_mute_factor = &external_mute_factor_array[channel];
74 *external_mute_factor =
75 (*external_mute_factor * expand_->MuteFactor(channel)) >> 14;
76
77 // Update |external_mute_factor| if it is lower than |new_mute_factor|.
78 if (new_mute_factor > *external_mute_factor) {
79 *external_mute_factor = std::min(new_mute_factor,
80 static_cast<int16_t>(16384));
81 }
82
83 if (channel == 0) {
84 // Downsample, correlate, and find strongest correlation period for the
85 // master (i.e., first) channel only.
86 // Downsample to 4kHz sample rate.
87 Downsample(input_channel, input_length_per_channel, expanded_channel,
88 expanded_length);
89
90 // Calculate the lag of the strongest correlation period.
91 best_correlation_index = CorrelateAndPeakSearch(
92 expanded_max, input_max, old_length,
93 input_length_per_channel, expand_period);
94 }
95
96 static const int kTempDataSize = 3600;
97 int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
98 int16_t* decoded_output = temp_data + best_correlation_index;
99
100 // Mute the new decoded data if needed (and unmute it linearly).
101 // This is the overlapping part of expanded_signal.
102 size_t interpolation_length = std::min(
103 kMaxCorrelationLength * fs_mult_,
104 expanded_length - best_correlation_index);
105 interpolation_length = std::min(interpolation_length,
106 input_length_per_channel);
107 if (*external_mute_factor < 16384) {
108 // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
109 // and so on.
110 int increment = 4194 / fs_mult_;
111 *external_mute_factor =
112 static_cast<int16_t>(DspHelper::RampSignal(input_channel,
113 interpolation_length,
114 *external_mute_factor,
115 increment));
116 DspHelper::UnmuteSignal(&input_channel[interpolation_length],
117 input_length_per_channel - interpolation_length,
118 external_mute_factor, increment,
119 &decoded_output[interpolation_length]);
120 } else {
121 // No muting needed.
122 memmove(
123 &decoded_output[interpolation_length],
124 &input_channel[interpolation_length],
125 sizeof(int16_t) * (input_length_per_channel - interpolation_length));
126 }
127
128 // Do overlap and mix linearly.
129 int16_t increment =
130 static_cast<int16_t>(16384 / (interpolation_length + 1)); // In Q14.
131 int16_t mute_factor = 16384 - increment;
132 memmove(temp_data, expanded_channel,
133 sizeof(int16_t) * best_correlation_index);
134 DspHelper::CrossFade(&expanded_channel[best_correlation_index],
135 input_channel, interpolation_length,
136 &mute_factor, increment, decoded_output);
137
138 output_length = best_correlation_index + input_length_per_channel;
139 if (channel == 0) {
140 assert(output->Empty()); // Output should be empty at this point.
141 output->AssertSize(output_length);
142 } else {
143 assert(output->Size() == output_length);
144 }
145 memcpy(&(*output)[channel][0], temp_data,
146 sizeof(temp_data[0]) * output_length);
147 }
148
149 // Copy back the first part of the data to |sync_buffer_| and remove it from
150 // |output|.
151 sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
152 output->PopFront(old_length);
153
154 // Return new added length. |old_length| samples were borrowed from
155 // |sync_buffer_|.
156 return output_length - old_length;
157 }
158
GetExpandedSignal(size_t * old_length,size_t * expand_period)159 size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
160 // Check how much data that is left since earlier.
161 *old_length = sync_buffer_->FutureLength();
162 // Should never be less than overlap_length.
163 assert(*old_length >= expand_->overlap_length());
164 // Generate data to merge the overlap with using expand.
165 expand_->SetParametersForMergeAfterExpand();
166
167 if (*old_length >= 210 * kMaxSampleRate / 8000) {
168 // TODO(hlundin): Write test case for this.
169 // The number of samples available in the sync buffer is more than what fits
170 // in expanded_signal. Keep the first 210 * kMaxSampleRate / 8000 samples,
171 // but shift them towards the end of the buffer. This is ok, since all of
172 // the buffer will be expand data anyway, so as long as the beginning is
173 // left untouched, we're fine.
174 size_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
175 sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
176 *old_length = 210 * kMaxSampleRate / 8000;
177 // This is the truncated length.
178 }
179 // This assert should always be true thanks to the if statement above.
180 assert(210 * kMaxSampleRate / 8000 >= *old_length);
181
182 AudioMultiVector expanded_temp(num_channels_);
183 expand_->Process(&expanded_temp);
184 *expand_period = expanded_temp.Size(); // Samples per channel.
185
186 expanded_.Clear();
187 // Copy what is left since earlier into the expanded vector.
188 expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
189 assert(expanded_.Size() == *old_length);
190 assert(expanded_temp.Size() > 0);
191 // Do "ugly" copy and paste from the expanded in order to generate more data
192 // to correlate (but not interpolate) with.
193 const size_t required_length = static_cast<size_t>((120 + 80 + 2) * fs_mult_);
194 if (expanded_.Size() < required_length) {
195 while (expanded_.Size() < required_length) {
196 // Append one more pitch period each time.
197 expanded_.PushBack(expanded_temp);
198 }
199 // Trim the length to exactly |required_length|.
200 expanded_.PopBack(expanded_.Size() - required_length);
201 }
202 assert(expanded_.Size() >= required_length);
203 return required_length;
204 }
205
SignalScaling(const int16_t * input,size_t input_length,const int16_t * expanded_signal,int16_t * expanded_max,int16_t * input_max) const206 int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
207 const int16_t* expanded_signal,
208 int16_t* expanded_max, int16_t* input_max) const {
209 // Adjust muting factor if new vector is more or less of the BGN energy.
210 const size_t mod_input_length =
211 std::min(static_cast<size_t>(64 * fs_mult_), input_length);
212 *expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
213 *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
214
215 // Calculate energy of expanded signal.
216 // |log_fs_mult| is log2(fs_mult_), but is not exact for 48000 Hz.
217 int log_fs_mult = 30 - WebRtcSpl_NormW32(fs_mult_);
218 int expanded_shift = 6 + log_fs_mult
219 - WebRtcSpl_NormW32(*expanded_max * *expanded_max);
220 expanded_shift = std::max(expanded_shift, 0);
221 int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
222 expanded_signal,
223 mod_input_length,
224 expanded_shift);
225
226 // Calculate energy of input signal.
227 int input_shift = 6 + log_fs_mult -
228 WebRtcSpl_NormW32(*input_max * *input_max);
229 input_shift = std::max(input_shift, 0);
230 int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input,
231 mod_input_length,
232 input_shift);
233
234 // Align to the same Q-domain.
235 if (input_shift > expanded_shift) {
236 energy_expanded = energy_expanded >> (input_shift - expanded_shift);
237 } else {
238 energy_input = energy_input >> (expanded_shift - input_shift);
239 }
240
241 // Calculate muting factor to use for new frame.
242 int16_t mute_factor;
243 if (energy_input > energy_expanded) {
244 // Normalize |energy_input| to 14 bits.
245 int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
246 energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
247 // Put |energy_expanded| in a domain 14 higher, so that
248 // energy_expanded / energy_input is in Q14.
249 energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
250 // Calculate sqrt(energy_expanded / energy_input) in Q14.
251 mute_factor = static_cast<int16_t>(
252 WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
253 } else {
254 // Set to 1 (in Q14) when |expanded| has higher energy than |input|.
255 mute_factor = 16384;
256 }
257
258 return mute_factor;
259 }
260
261 // TODO(hlundin): There are some parameter values in this method that seem
262 // strange. Compare with Expand::Correlation.
Downsample(const int16_t * input,size_t input_length,const int16_t * expanded_signal,size_t expanded_length)263 void Merge::Downsample(const int16_t* input, size_t input_length,
264 const int16_t* expanded_signal, size_t expanded_length) {
265 const int16_t* filter_coefficients;
266 size_t num_coefficients;
267 int decimation_factor = fs_hz_ / 4000;
268 static const size_t kCompensateDelay = 0;
269 size_t length_limit = static_cast<size_t>(fs_hz_ / 100); // 10 ms in samples.
270 if (fs_hz_ == 8000) {
271 filter_coefficients = DspHelper::kDownsample8kHzTbl;
272 num_coefficients = 3;
273 } else if (fs_hz_ == 16000) {
274 filter_coefficients = DspHelper::kDownsample16kHzTbl;
275 num_coefficients = 5;
276 } else if (fs_hz_ == 32000) {
277 filter_coefficients = DspHelper::kDownsample32kHzTbl;
278 num_coefficients = 7;
279 } else { // fs_hz_ == 48000
280 filter_coefficients = DspHelper::kDownsample48kHzTbl;
281 num_coefficients = 7;
282 }
283 size_t signal_offset = num_coefficients - 1;
284 WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
285 expanded_length - signal_offset,
286 expanded_downsampled_, kExpandDownsampLength,
287 filter_coefficients, num_coefficients,
288 decimation_factor, kCompensateDelay);
289 if (input_length <= length_limit) {
290 // Not quite long enough, so we have to cheat a bit.
291 size_t temp_len = input_length - signal_offset;
292 // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
293 // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
294 size_t downsamp_temp_len = temp_len / decimation_factor;
295 WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
296 input_downsampled_, downsamp_temp_len,
297 filter_coefficients, num_coefficients,
298 decimation_factor, kCompensateDelay);
299 memset(&input_downsampled_[downsamp_temp_len], 0,
300 sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len));
301 } else {
302 WebRtcSpl_DownsampleFast(&input[signal_offset],
303 input_length - signal_offset, input_downsampled_,
304 kInputDownsampLength, filter_coefficients,
305 num_coefficients, decimation_factor,
306 kCompensateDelay);
307 }
308 }
309
CorrelateAndPeakSearch(int16_t expanded_max,int16_t input_max,size_t start_position,size_t input_length,size_t expand_period) const310 size_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
311 size_t start_position, size_t input_length,
312 size_t expand_period) const {
313 // Calculate correlation without any normalization.
314 const size_t max_corr_length = kMaxCorrelationLength;
315 size_t stop_position_downsamp =
316 std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
317 int correlation_shift = 0;
318 if (expanded_max * input_max > 26843546) {
319 correlation_shift = 3;
320 }
321
322 int32_t correlation[kMaxCorrelationLength];
323 WebRtcSpl_CrossCorrelation(correlation, input_downsampled_,
324 expanded_downsampled_, kInputDownsampLength,
325 stop_position_downsamp, correlation_shift, 1);
326
327 // Normalize correlation to 14 bits and copy to a 16-bit array.
328 const size_t pad_length = expand_->overlap_length() - 1;
329 const size_t correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
330 rtc::scoped_ptr<int16_t[]> correlation16(
331 new int16_t[correlation_buffer_size]);
332 memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
333 int16_t* correlation_ptr = &correlation16[pad_length];
334 int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
335 stop_position_downsamp);
336 int norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation));
337 WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp,
338 correlation, norm_shift);
339
340 // Calculate allowed starting point for peak finding.
341 // The peak location bestIndex must fulfill two criteria:
342 // (1) w16_bestIndex + input_length <
343 // timestamps_per_call_ + expand_->overlap_length();
344 // (2) w16_bestIndex + input_length < start_position.
345 size_t start_index = timestamps_per_call_ + expand_->overlap_length();
346 start_index = std::max(start_position, start_index);
347 start_index = (input_length > start_index) ? 0 : (start_index - input_length);
348 // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
349 size_t start_index_downsamp = start_index / (fs_mult_ * 2);
350
351 // Calculate a modified |stop_position_downsamp| to account for the increased
352 // start index |start_index_downsamp| and the effective array length.
353 size_t modified_stop_pos =
354 std::min(stop_position_downsamp,
355 kMaxCorrelationLength + pad_length - start_index_downsamp);
356 size_t best_correlation_index;
357 int16_t best_correlation;
358 static const size_t kNumCorrelationCandidates = 1;
359 DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
360 modified_stop_pos, kNumCorrelationCandidates,
361 fs_mult_, &best_correlation_index,
362 &best_correlation);
363 // Compensate for modified start index.
364 best_correlation_index += start_index;
365
366 // Ensure that underrun does not occur for 10ms case => we have to get at
367 // least 10ms + overlap . (This should never happen thanks to the above
368 // modification of peak-finding starting point.)
369 while (((best_correlation_index + input_length) <
370 (timestamps_per_call_ + expand_->overlap_length())) ||
371 ((best_correlation_index + input_length) < start_position)) {
372 assert(false); // Should never happen.
373 best_correlation_index += expand_period; // Jump one lag ahead.
374 }
375 return best_correlation_index;
376 }
377
RequiredFutureSamples()378 size_t Merge::RequiredFutureSamples() {
379 return fs_hz_ / 100 * num_channels_; // 10 ms.
380 }
381
382
383 } // namespace webrtc
384