1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/voice_engine/output_mixer.h"
12
13 #include "webrtc/base/format_macros.h"
14 #include "webrtc/modules/audio_processing/include/audio_processing.h"
15 #include "webrtc/modules/utility/include/audio_frame_operations.h"
16 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
17 #include "webrtc/system_wrappers/include/file_wrapper.h"
18 #include "webrtc/system_wrappers/include/trace.h"
19 #include "webrtc/voice_engine/include/voe_external_media.h"
20 #include "webrtc/voice_engine/statistics.h"
21 #include "webrtc/voice_engine/utility.h"
22
23 namespace webrtc {
24 namespace voe {
25
26 void
NewMixedAudio(int32_t id,const AudioFrame & generalAudioFrame,const AudioFrame ** uniqueAudioFrames,uint32_t size)27 OutputMixer::NewMixedAudio(int32_t id,
28 const AudioFrame& generalAudioFrame,
29 const AudioFrame** uniqueAudioFrames,
30 uint32_t size)
31 {
32 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
33 "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
34
35 _audioFrame.CopyFrom(generalAudioFrame);
36 _audioFrame.id_ = id;
37 }
38
PlayNotification(int32_t id,uint32_t durationMs)39 void OutputMixer::PlayNotification(int32_t id, uint32_t durationMs)
40 {
41 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
42 "OutputMixer::PlayNotification(id=%d, durationMs=%d)",
43 id, durationMs);
44 // Not implement yet
45 }
46
RecordNotification(int32_t id,uint32_t durationMs)47 void OutputMixer::RecordNotification(int32_t id,
48 uint32_t durationMs)
49 {
50 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
51 "OutputMixer::RecordNotification(id=%d, durationMs=%d)",
52 id, durationMs);
53
54 // Not implement yet
55 }
56
PlayFileEnded(int32_t id)57 void OutputMixer::PlayFileEnded(int32_t id)
58 {
59 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
60 "OutputMixer::PlayFileEnded(id=%d)", id);
61
62 // not needed
63 }
64
RecordFileEnded(int32_t id)65 void OutputMixer::RecordFileEnded(int32_t id)
66 {
67 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
68 "OutputMixer::RecordFileEnded(id=%d)", id);
69 assert(id == _instanceId);
70
71 CriticalSectionScoped cs(&_fileCritSect);
72 _outputFileRecording = false;
73 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
74 "OutputMixer::RecordFileEnded() =>"
75 "output file recorder module is shutdown");
76 }
77
78 int32_t
Create(OutputMixer * & mixer,uint32_t instanceId)79 OutputMixer::Create(OutputMixer*& mixer, uint32_t instanceId)
80 {
81 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
82 "OutputMixer::Create(instanceId=%d)", instanceId);
83 mixer = new OutputMixer(instanceId);
84 if (mixer == NULL)
85 {
86 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
87 "OutputMixer::Create() unable to allocate memory for"
88 "mixer");
89 return -1;
90 }
91 return 0;
92 }
93
OutputMixer(uint32_t instanceId)94 OutputMixer::OutputMixer(uint32_t instanceId) :
95 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
96 _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
97 _mixerModule(*AudioConferenceMixer::Create(instanceId)),
98 _audioLevel(),
99 _dtmfGenerator(instanceId),
100 _instanceId(instanceId),
101 _externalMediaCallbackPtr(NULL),
102 _externalMedia(false),
103 _panLeft(1.0f),
104 _panRight(1.0f),
105 _mixingFrequencyHz(8000),
106 _outputFileRecorderPtr(NULL),
107 _outputFileRecording(false)
108 {
109 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
110 "OutputMixer::OutputMixer() - ctor");
111
112 if (_mixerModule.RegisterMixedStreamCallback(this) == -1)
113 {
114 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
115 "OutputMixer::OutputMixer() failed to register mixer"
116 "callbacks");
117 }
118
119 _dtmfGenerator.Init();
120 }
121
122 void
Destroy(OutputMixer * & mixer)123 OutputMixer::Destroy(OutputMixer*& mixer)
124 {
125 if (mixer)
126 {
127 delete mixer;
128 mixer = NULL;
129 }
130 }
131
~OutputMixer()132 OutputMixer::~OutputMixer()
133 {
134 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
135 "OutputMixer::~OutputMixer() - dtor");
136 if (_externalMedia)
137 {
138 DeRegisterExternalMediaProcessing();
139 }
140 {
141 CriticalSectionScoped cs(&_fileCritSect);
142 if (_outputFileRecorderPtr)
143 {
144 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
145 _outputFileRecorderPtr->StopRecording();
146 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
147 _outputFileRecorderPtr = NULL;
148 }
149 }
150 _mixerModule.UnRegisterMixedStreamCallback();
151 delete &_mixerModule;
152 delete &_callbackCritSect;
153 delete &_fileCritSect;
154 }
155
156 int32_t
SetEngineInformation(voe::Statistics & engineStatistics)157 OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics)
158 {
159 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
160 "OutputMixer::SetEngineInformation()");
161 _engineStatisticsPtr = &engineStatistics;
162 return 0;
163 }
164
165 int32_t
SetAudioProcessingModule(AudioProcessing * audioProcessingModule)166 OutputMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
167 {
168 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
169 "OutputMixer::SetAudioProcessingModule("
170 "audioProcessingModule=0x%x)", audioProcessingModule);
171 _audioProcessingModulePtr = audioProcessingModule;
172 return 0;
173 }
174
RegisterExternalMediaProcessing(VoEMediaProcess & proccess_object)175 int OutputMixer::RegisterExternalMediaProcessing(
176 VoEMediaProcess& proccess_object)
177 {
178 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
179 "OutputMixer::RegisterExternalMediaProcessing()");
180
181 CriticalSectionScoped cs(&_callbackCritSect);
182 _externalMediaCallbackPtr = &proccess_object;
183 _externalMedia = true;
184
185 return 0;
186 }
187
DeRegisterExternalMediaProcessing()188 int OutputMixer::DeRegisterExternalMediaProcessing()
189 {
190 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
191 "OutputMixer::DeRegisterExternalMediaProcessing()");
192
193 CriticalSectionScoped cs(&_callbackCritSect);
194 _externalMedia = false;
195 _externalMediaCallbackPtr = NULL;
196
197 return 0;
198 }
199
PlayDtmfTone(uint8_t eventCode,int lengthMs,int attenuationDb)200 int OutputMixer::PlayDtmfTone(uint8_t eventCode, int lengthMs,
201 int attenuationDb)
202 {
203 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
204 "OutputMixer::PlayDtmfTone()");
205 if (_dtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb) != 0)
206 {
207 _engineStatisticsPtr->SetLastError(VE_STILL_PLAYING_PREV_DTMF,
208 kTraceError,
209 "OutputMixer::PlayDtmfTone()");
210 return -1;
211 }
212 return 0;
213 }
214
215 int32_t
SetMixabilityStatus(MixerParticipant & participant,bool mixable)216 OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
217 bool mixable)
218 {
219 return _mixerModule.SetMixabilityStatus(&participant, mixable);
220 }
221
222 int32_t
SetAnonymousMixabilityStatus(MixerParticipant & participant,bool mixable)223 OutputMixer::SetAnonymousMixabilityStatus(MixerParticipant& participant,
224 bool mixable)
225 {
226 return _mixerModule.SetAnonymousMixabilityStatus(&participant, mixable);
227 }
228
229 int32_t
MixActiveChannels()230 OutputMixer::MixActiveChannels()
231 {
232 return _mixerModule.Process();
233 }
234
235 int
GetSpeechOutputLevel(uint32_t & level)236 OutputMixer::GetSpeechOutputLevel(uint32_t& level)
237 {
238 int8_t currentLevel = _audioLevel.Level();
239 level = static_cast<uint32_t> (currentLevel);
240 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
241 "GetSpeechOutputLevel() => level=%u", level);
242 return 0;
243 }
244
245 int
GetSpeechOutputLevelFullRange(uint32_t & level)246 OutputMixer::GetSpeechOutputLevelFullRange(uint32_t& level)
247 {
248 int16_t currentLevel = _audioLevel.LevelFullRange();
249 level = static_cast<uint32_t> (currentLevel);
250 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
251 "GetSpeechOutputLevelFullRange() => level=%u", level);
252 return 0;
253 }
254
255 int
SetOutputVolumePan(float left,float right)256 OutputMixer::SetOutputVolumePan(float left, float right)
257 {
258 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
259 "OutputMixer::SetOutputVolumePan()");
260 _panLeft = left;
261 _panRight = right;
262 return 0;
263 }
264
265 int
GetOutputVolumePan(float & left,float & right)266 OutputMixer::GetOutputVolumePan(float& left, float& right)
267 {
268 left = _panLeft;
269 right = _panRight;
270 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
271 "GetOutputVolumePan() => left=%2.1f, right=%2.1f",
272 left, right);
273 return 0;
274 }
275
StartRecordingPlayout(const char * fileName,const CodecInst * codecInst)276 int OutputMixer::StartRecordingPlayout(const char* fileName,
277 const CodecInst* codecInst)
278 {
279 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
280 "OutputMixer::StartRecordingPlayout(fileName=%s)", fileName);
281
282 if (_outputFileRecording)
283 {
284 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
285 "StartRecordingPlayout() is already recording");
286 return 0;
287 }
288
289 FileFormats format;
290 const uint32_t notificationTime(0);
291 CodecInst dummyCodec={100,"L16",16000,320,1,320000};
292
293 if ((codecInst != NULL) &&
294 ((codecInst->channels < 1) || (codecInst->channels > 2)))
295 {
296 _engineStatisticsPtr->SetLastError(
297 VE_BAD_ARGUMENT, kTraceError,
298 "StartRecordingPlayout() invalid compression");
299 return(-1);
300 }
301 if(codecInst == NULL)
302 {
303 format = kFileFormatPcm16kHzFile;
304 codecInst=&dummyCodec;
305 }
306 else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
307 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
308 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
309 {
310 format = kFileFormatWavFile;
311 }
312 else
313 {
314 format = kFileFormatCompressedFile;
315 }
316
317 CriticalSectionScoped cs(&_fileCritSect);
318
319 // Destroy the old instance
320 if (_outputFileRecorderPtr)
321 {
322 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
323 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
324 _outputFileRecorderPtr = NULL;
325 }
326
327 _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
328 _instanceId,
329 (const FileFormats)format);
330 if (_outputFileRecorderPtr == NULL)
331 {
332 _engineStatisticsPtr->SetLastError(
333 VE_INVALID_ARGUMENT, kTraceError,
334 "StartRecordingPlayout() fileRecorder format isnot correct");
335 return -1;
336 }
337
338 if (_outputFileRecorderPtr->StartRecordingAudioFile(
339 fileName,
340 (const CodecInst&)*codecInst,
341 notificationTime) != 0)
342 {
343 _engineStatisticsPtr->SetLastError(
344 VE_BAD_FILE, kTraceError,
345 "StartRecordingAudioFile() failed to start file recording");
346 _outputFileRecorderPtr->StopRecording();
347 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
348 _outputFileRecorderPtr = NULL;
349 return -1;
350 }
351 _outputFileRecorderPtr->RegisterModuleFileCallback(this);
352 _outputFileRecording = true;
353
354 return 0;
355 }
356
StartRecordingPlayout(OutStream * stream,const CodecInst * codecInst)357 int OutputMixer::StartRecordingPlayout(OutStream* stream,
358 const CodecInst* codecInst)
359 {
360 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
361 "OutputMixer::StartRecordingPlayout()");
362
363 if (_outputFileRecording)
364 {
365 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
366 "StartRecordingPlayout() is already recording");
367 return 0;
368 }
369
370 FileFormats format;
371 const uint32_t notificationTime(0);
372 CodecInst dummyCodec={100,"L16",16000,320,1,320000};
373
374 if (codecInst != NULL && codecInst->channels != 1)
375 {
376 _engineStatisticsPtr->SetLastError(
377 VE_BAD_ARGUMENT, kTraceError,
378 "StartRecordingPlayout() invalid compression");
379 return(-1);
380 }
381 if(codecInst == NULL)
382 {
383 format = kFileFormatPcm16kHzFile;
384 codecInst=&dummyCodec;
385 }
386 else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
387 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
388 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
389 {
390 format = kFileFormatWavFile;
391 }
392 else
393 {
394 format = kFileFormatCompressedFile;
395 }
396
397 CriticalSectionScoped cs(&_fileCritSect);
398
399 // Destroy the old instance
400 if (_outputFileRecorderPtr)
401 {
402 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
403 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
404 _outputFileRecorderPtr = NULL;
405 }
406
407 _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
408 _instanceId,
409 (const FileFormats)format);
410 if (_outputFileRecorderPtr == NULL)
411 {
412 _engineStatisticsPtr->SetLastError(
413 VE_INVALID_ARGUMENT, kTraceError,
414 "StartRecordingPlayout() fileRecorder format isnot correct");
415 return -1;
416 }
417
418 if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream,
419 *codecInst,
420 notificationTime) != 0)
421 {
422 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
423 "StartRecordingAudioFile() failed to start file recording");
424 _outputFileRecorderPtr->StopRecording();
425 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
426 _outputFileRecorderPtr = NULL;
427 return -1;
428 }
429
430 _outputFileRecorderPtr->RegisterModuleFileCallback(this);
431 _outputFileRecording = true;
432
433 return 0;
434 }
435
StopRecordingPlayout()436 int OutputMixer::StopRecordingPlayout()
437 {
438 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
439 "OutputMixer::StopRecordingPlayout()");
440
441 if (!_outputFileRecording)
442 {
443 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
444 "StopRecordingPlayout() file isnot recording");
445 return -1;
446 }
447
448 CriticalSectionScoped cs(&_fileCritSect);
449
450 if (_outputFileRecorderPtr->StopRecording() != 0)
451 {
452 _engineStatisticsPtr->SetLastError(
453 VE_STOP_RECORDING_FAILED, kTraceError,
454 "StopRecording(), could not stop recording");
455 return -1;
456 }
457 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
458 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
459 _outputFileRecorderPtr = NULL;
460 _outputFileRecording = false;
461
462 return 0;
463 }
464
GetMixedAudio(int sample_rate_hz,size_t num_channels,AudioFrame * frame)465 int OutputMixer::GetMixedAudio(int sample_rate_hz,
466 size_t num_channels,
467 AudioFrame* frame) {
468 WEBRTC_TRACE(
469 kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
470 "OutputMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%" PRIuS ")",
471 sample_rate_hz, num_channels);
472
473 // --- Record playout if enabled
474 {
475 CriticalSectionScoped cs(&_fileCritSect);
476 if (_outputFileRecording && _outputFileRecorderPtr)
477 _outputFileRecorderPtr->RecordAudioToFile(_audioFrame);
478 }
479
480 frame->num_channels_ = num_channels;
481 frame->sample_rate_hz_ = sample_rate_hz;
482 // TODO(andrew): Ideally the downmixing would occur much earlier, in
483 // AudioCodingModule.
484 RemixAndResample(_audioFrame, &resampler_, frame);
485 return 0;
486 }
487
488 int32_t
DoOperationsOnCombinedSignal(bool feed_data_to_apm)489 OutputMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm)
490 {
491 if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz)
492 {
493 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
494 "OutputMixer::DoOperationsOnCombinedSignal() => "
495 "mixing frequency = %d", _audioFrame.sample_rate_hz_);
496 _mixingFrequencyHz = _audioFrame.sample_rate_hz_;
497 }
498
499 // --- Insert inband Dtmf tone
500 if (_dtmfGenerator.IsAddingTone())
501 {
502 InsertInbandDtmfTone();
503 }
504
505 // Scale left and/or right channel(s) if balance is active
506 if (_panLeft != 1.0 || _panRight != 1.0)
507 {
508 if (_audioFrame.num_channels_ == 1)
509 {
510 AudioFrameOperations::MonoToStereo(&_audioFrame);
511 }
512 else
513 {
514 // Pure stereo mode (we are receiving a stereo signal).
515 }
516
517 assert(_audioFrame.num_channels_ == 2);
518 AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
519 }
520
521 // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
522 if (feed_data_to_apm) {
523 // Convert from mixing to AudioProcessing sample rate, similarly to how it
524 // is done on the send side. Downmix to mono.
525 AudioFrame frame;
526 frame.num_channels_ = 1;
527 frame.sample_rate_hz_ = _audioProcessingModulePtr->input_sample_rate_hz();
528 RemixAndResample(_audioFrame, &audioproc_resampler_, &frame);
529
530 if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) != 0) {
531 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
532 "AudioProcessingModule::AnalyzeReverseStream() => error");
533 RTC_DCHECK(false);
534 }
535 }
536
537 // --- External media processing
538 {
539 CriticalSectionScoped cs(&_callbackCritSect);
540 if (_externalMedia)
541 {
542 const bool is_stereo = (_audioFrame.num_channels_ == 2);
543 if (_externalMediaCallbackPtr)
544 {
545 _externalMediaCallbackPtr->Process(
546 -1,
547 kPlaybackAllChannelsMixed,
548 (int16_t*)_audioFrame.data_,
549 _audioFrame.samples_per_channel_,
550 _audioFrame.sample_rate_hz_,
551 is_stereo);
552 }
553 }
554 }
555
556 // --- Measure audio level (0-9) for the combined signal
557 _audioLevel.ComputeLevel(_audioFrame);
558
559 return 0;
560 }
561
562 // ----------------------------------------------------------------------------
563 // Private methods
564 // ----------------------------------------------------------------------------
565
566 int
InsertInbandDtmfTone()567 OutputMixer::InsertInbandDtmfTone()
568 {
569 uint16_t sampleRate(0);
570 _dtmfGenerator.GetSampleRate(sampleRate);
571 if (sampleRate != _audioFrame.sample_rate_hz_)
572 {
573 // Update sample rate of Dtmf tone since the mixing frequency changed.
574 _dtmfGenerator.SetSampleRate(
575 (uint16_t)(_audioFrame.sample_rate_hz_));
576 // Reset the tone to be added taking the new sample rate into account.
577 _dtmfGenerator.ResetTone();
578 }
579
580 int16_t toneBuffer[320];
581 uint16_t toneSamples(0);
582 if (_dtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
583 {
584 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
585 "OutputMixer::InsertInbandDtmfTone() inserting Dtmf"
586 "tone failed");
587 return -1;
588 }
589
590 // replace mixed audio with Dtmf tone
591 if (_audioFrame.num_channels_ == 1)
592 {
593 // mono
594 memcpy(_audioFrame.data_, toneBuffer, sizeof(int16_t)
595 * toneSamples);
596 } else
597 {
598 // stereo
599 for (size_t i = 0; i < _audioFrame.samples_per_channel_; i++)
600 {
601 _audioFrame.data_[2 * i] = toneBuffer[i];
602 _audioFrame.data_[2 * i + 1] = 0;
603 }
604 }
605 assert(_audioFrame.samples_per_channel_ == toneSamples);
606
607 return 0;
608 }
609
610 } // namespace voe
611 } // namespace webrtc
612