1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/voice_engine/transmit_mixer.h"
12
13 #include "webrtc/base/format_macros.h"
14 #include "webrtc/base/logging.h"
15 #include "webrtc/modules/utility/include/audio_frame_operations.h"
16 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
17 #include "webrtc/system_wrappers/include/event_wrapper.h"
18 #include "webrtc/system_wrappers/include/trace.h"
19 #include "webrtc/voice_engine/channel.h"
20 #include "webrtc/voice_engine/channel_manager.h"
21 #include "webrtc/voice_engine/include/voe_external_media.h"
22 #include "webrtc/voice_engine/statistics.h"
23 #include "webrtc/voice_engine/utility.h"
24 #include "webrtc/voice_engine/voe_base_impl.h"
25
26 namespace webrtc {
27 namespace voe {
28
29 // TODO(ajm): The thread safety of this is dubious...
30 void
OnPeriodicProcess()31 TransmitMixer::OnPeriodicProcess()
32 {
33 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
34 "TransmitMixer::OnPeriodicProcess()");
35
36 #if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
37 bool send_typing_noise_warning = false;
38 bool typing_noise_detected = false;
39 {
40 CriticalSectionScoped cs(&_critSect);
41 if (_typingNoiseWarningPending) {
42 send_typing_noise_warning = true;
43 typing_noise_detected = _typingNoiseDetected;
44 _typingNoiseWarningPending = false;
45 }
46 }
47 if (send_typing_noise_warning) {
48 CriticalSectionScoped cs(&_callbackCritSect);
49 if (_voiceEngineObserverPtr) {
50 if (typing_noise_detected) {
51 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
52 "TransmitMixer::OnPeriodicProcess() => "
53 "CallbackOnError(VE_TYPING_NOISE_WARNING)");
54 _voiceEngineObserverPtr->CallbackOnError(
55 -1,
56 VE_TYPING_NOISE_WARNING);
57 } else {
58 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
59 "TransmitMixer::OnPeriodicProcess() => "
60 "CallbackOnError(VE_TYPING_NOISE_OFF_WARNING)");
61 _voiceEngineObserverPtr->CallbackOnError(
62 -1,
63 VE_TYPING_NOISE_OFF_WARNING);
64 }
65 }
66 }
67 #endif
68
69 bool saturationWarning = false;
70 {
71 // Modify |_saturationWarning| under lock to avoid conflict with write op
72 // in ProcessAudio and also ensure that we don't hold the lock during the
73 // callback.
74 CriticalSectionScoped cs(&_critSect);
75 saturationWarning = _saturationWarning;
76 if (_saturationWarning)
77 _saturationWarning = false;
78 }
79
80 if (saturationWarning)
81 {
82 CriticalSectionScoped cs(&_callbackCritSect);
83 if (_voiceEngineObserverPtr)
84 {
85 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
86 "TransmitMixer::OnPeriodicProcess() =>"
87 " CallbackOnError(VE_SATURATION_WARNING)");
88 _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
89 }
90 }
91 }
92
93
PlayNotification(int32_t id,uint32_t durationMs)94 void TransmitMixer::PlayNotification(int32_t id,
95 uint32_t durationMs)
96 {
97 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
98 "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
99 id, durationMs);
100
101 // Not implement yet
102 }
103
RecordNotification(int32_t id,uint32_t durationMs)104 void TransmitMixer::RecordNotification(int32_t id,
105 uint32_t durationMs)
106 {
107 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
108 "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
109 id, durationMs);
110
111 // Not implement yet
112 }
113
PlayFileEnded(int32_t id)114 void TransmitMixer::PlayFileEnded(int32_t id)
115 {
116 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
117 "TransmitMixer::PlayFileEnded(id=%d)", id);
118
119 assert(id == _filePlayerId);
120
121 CriticalSectionScoped cs(&_critSect);
122
123 _filePlaying = false;
124 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
125 "TransmitMixer::PlayFileEnded() =>"
126 "file player module is shutdown");
127 }
128
129 void
RecordFileEnded(int32_t id)130 TransmitMixer::RecordFileEnded(int32_t id)
131 {
132 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
133 "TransmitMixer::RecordFileEnded(id=%d)", id);
134
135 if (id == _fileRecorderId)
136 {
137 CriticalSectionScoped cs(&_critSect);
138 _fileRecording = false;
139 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
140 "TransmitMixer::RecordFileEnded() => fileRecorder module"
141 "is shutdown");
142 } else if (id == _fileCallRecorderId)
143 {
144 CriticalSectionScoped cs(&_critSect);
145 _fileCallRecording = false;
146 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
147 "TransmitMixer::RecordFileEnded() => fileCallRecorder"
148 "module is shutdown");
149 }
150 }
151
152 int32_t
Create(TransmitMixer * & mixer,uint32_t instanceId)153 TransmitMixer::Create(TransmitMixer*& mixer, uint32_t instanceId)
154 {
155 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
156 "TransmitMixer::Create(instanceId=%d)", instanceId);
157 mixer = new TransmitMixer(instanceId);
158 if (mixer == NULL)
159 {
160 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
161 "TransmitMixer::Create() unable to allocate memory"
162 "for mixer");
163 return -1;
164 }
165 return 0;
166 }
167
168 void
Destroy(TransmitMixer * & mixer)169 TransmitMixer::Destroy(TransmitMixer*& mixer)
170 {
171 if (mixer)
172 {
173 delete mixer;
174 mixer = NULL;
175 }
176 }
177
TransmitMixer(uint32_t instanceId)178 TransmitMixer::TransmitMixer(uint32_t instanceId) :
179 _engineStatisticsPtr(NULL),
180 _channelManagerPtr(NULL),
181 audioproc_(NULL),
182 _voiceEngineObserverPtr(NULL),
183 _processThreadPtr(NULL),
184 _filePlayerPtr(NULL),
185 _fileRecorderPtr(NULL),
186 _fileCallRecorderPtr(NULL),
187 // Avoid conflict with other channels by adding 1024 - 1026,
188 // won't use as much as 1024 channels.
189 _filePlayerId(instanceId + 1024),
190 _fileRecorderId(instanceId + 1025),
191 _fileCallRecorderId(instanceId + 1026),
192 _filePlaying(false),
193 _fileRecording(false),
194 _fileCallRecording(false),
195 _audioLevel(),
196 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
197 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
198 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
199 _typingNoiseWarningPending(false),
200 _typingNoiseDetected(false),
201 #endif
202 _saturationWarning(false),
203 _instanceId(instanceId),
204 _mixFileWithMicrophone(false),
205 _captureLevel(0),
206 external_postproc_ptr_(NULL),
207 external_preproc_ptr_(NULL),
208 _mute(false),
209 _remainingMuteMicTimeMs(0),
210 stereo_codec_(false),
211 swap_stereo_channels_(false)
212 {
213 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
214 "TransmitMixer::TransmitMixer() - ctor");
215 }
216
~TransmitMixer()217 TransmitMixer::~TransmitMixer()
218 {
219 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
220 "TransmitMixer::~TransmitMixer() - dtor");
221 _monitorModule.DeRegisterObserver();
222 if (_processThreadPtr)
223 {
224 _processThreadPtr->DeRegisterModule(&_monitorModule);
225 }
226 DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
227 DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
228 {
229 CriticalSectionScoped cs(&_critSect);
230 if (_fileRecorderPtr)
231 {
232 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
233 _fileRecorderPtr->StopRecording();
234 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
235 _fileRecorderPtr = NULL;
236 }
237 if (_fileCallRecorderPtr)
238 {
239 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
240 _fileCallRecorderPtr->StopRecording();
241 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
242 _fileCallRecorderPtr = NULL;
243 }
244 if (_filePlayerPtr)
245 {
246 _filePlayerPtr->RegisterModuleFileCallback(NULL);
247 _filePlayerPtr->StopPlayingFile();
248 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
249 _filePlayerPtr = NULL;
250 }
251 }
252 delete &_critSect;
253 delete &_callbackCritSect;
254 }
255
256 int32_t
SetEngineInformation(ProcessThread & processThread,Statistics & engineStatistics,ChannelManager & channelManager)257 TransmitMixer::SetEngineInformation(ProcessThread& processThread,
258 Statistics& engineStatistics,
259 ChannelManager& channelManager)
260 {
261 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
262 "TransmitMixer::SetEngineInformation()");
263
264 _processThreadPtr = &processThread;
265 _engineStatisticsPtr = &engineStatistics;
266 _channelManagerPtr = &channelManager;
267
268 _processThreadPtr->RegisterModule(&_monitorModule);
269 _monitorModule.RegisterObserver(*this);
270
271 return 0;
272 }
273
274 int32_t
RegisterVoiceEngineObserver(VoiceEngineObserver & observer)275 TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
276 {
277 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
278 "TransmitMixer::RegisterVoiceEngineObserver()");
279 CriticalSectionScoped cs(&_callbackCritSect);
280
281 if (_voiceEngineObserverPtr)
282 {
283 _engineStatisticsPtr->SetLastError(
284 VE_INVALID_OPERATION, kTraceError,
285 "RegisterVoiceEngineObserver() observer already enabled");
286 return -1;
287 }
288 _voiceEngineObserverPtr = &observer;
289 return 0;
290 }
291
292 int32_t
SetAudioProcessingModule(AudioProcessing * audioProcessingModule)293 TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
294 {
295 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
296 "TransmitMixer::SetAudioProcessingModule("
297 "audioProcessingModule=0x%x)",
298 audioProcessingModule);
299 audioproc_ = audioProcessingModule;
300 return 0;
301 }
302
GetSendCodecInfo(int * max_sample_rate,size_t * max_channels)303 void TransmitMixer::GetSendCodecInfo(int* max_sample_rate,
304 size_t* max_channels) {
305 *max_sample_rate = 8000;
306 *max_channels = 1;
307 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
308 it.Increment()) {
309 Channel* channel = it.GetChannel();
310 if (channel->Sending()) {
311 CodecInst codec;
312 channel->GetSendCodec(codec);
313 *max_sample_rate = std::max(*max_sample_rate, codec.plfreq);
314 *max_channels = std::max(*max_channels, codec.channels);
315 }
316 }
317 }
318
319 int32_t
PrepareDemux(const void * audioSamples,size_t nSamples,size_t nChannels,uint32_t samplesPerSec,uint16_t totalDelayMS,int32_t clockDrift,uint16_t currentMicLevel,bool keyPressed)320 TransmitMixer::PrepareDemux(const void* audioSamples,
321 size_t nSamples,
322 size_t nChannels,
323 uint32_t samplesPerSec,
324 uint16_t totalDelayMS,
325 int32_t clockDrift,
326 uint16_t currentMicLevel,
327 bool keyPressed)
328 {
329 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
330 "TransmitMixer::PrepareDemux(nSamples=%" PRIuS ", "
331 "nChannels=%" PRIuS ", samplesPerSec=%u, totalDelayMS=%u, "
332 "clockDrift=%d, currentMicLevel=%u)",
333 nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift,
334 currentMicLevel);
335
336 // --- Resample input audio and create/store the initial audio frame
337 GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
338 nSamples,
339 nChannels,
340 samplesPerSec);
341
342 {
343 CriticalSectionScoped cs(&_callbackCritSect);
344 if (external_preproc_ptr_) {
345 external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
346 _audioFrame.data_,
347 _audioFrame.samples_per_channel_,
348 _audioFrame.sample_rate_hz_,
349 _audioFrame.num_channels_ == 2);
350 }
351 }
352
353 // --- Near-end audio processing.
354 ProcessAudio(totalDelayMS, clockDrift, currentMicLevel, keyPressed);
355
356 if (swap_stereo_channels_ && stereo_codec_)
357 // Only bother swapping if we're using a stereo codec.
358 AudioFrameOperations::SwapStereoChannels(&_audioFrame);
359
360 // --- Annoying typing detection (utilizes the APM/VAD decision)
361 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
362 TypingDetection(keyPressed);
363 #endif
364
365 // --- Mute during DTMF tone if direct feedback is enabled
366 if (_remainingMuteMicTimeMs > 0)
367 {
368 AudioFrameOperations::Mute(_audioFrame);
369 _remainingMuteMicTimeMs -= 10;
370 if (_remainingMuteMicTimeMs < 0)
371 {
372 _remainingMuteMicTimeMs = 0;
373 }
374 }
375
376 // --- Mute signal
377 if (_mute)
378 {
379 AudioFrameOperations::Mute(_audioFrame);
380 }
381
382 // --- Mix with file (does not affect the mixing frequency)
383 if (_filePlaying)
384 {
385 MixOrReplaceAudioWithFile(_audioFrame.sample_rate_hz_);
386 }
387
388 // --- Record to file
389 bool file_recording = false;
390 {
391 CriticalSectionScoped cs(&_critSect);
392 file_recording = _fileRecording;
393 }
394 if (file_recording)
395 {
396 RecordAudioToFile(_audioFrame.sample_rate_hz_);
397 }
398
399 {
400 CriticalSectionScoped cs(&_callbackCritSect);
401 if (external_postproc_ptr_) {
402 external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
403 _audioFrame.data_,
404 _audioFrame.samples_per_channel_,
405 _audioFrame.sample_rate_hz_,
406 _audioFrame.num_channels_ == 2);
407 }
408 }
409
410 // --- Measure audio level of speech after all processing.
411 _audioLevel.ComputeLevel(_audioFrame);
412 return 0;
413 }
414
415 int32_t
DemuxAndMix()416 TransmitMixer::DemuxAndMix()
417 {
418 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
419 "TransmitMixer::DemuxAndMix()");
420
421 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
422 it.Increment())
423 {
424 Channel* channelPtr = it.GetChannel();
425 if (channelPtr->Sending())
426 {
427 // Demultiplex makes a copy of its input.
428 channelPtr->Demultiplex(_audioFrame);
429 channelPtr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
430 }
431 }
432 return 0;
433 }
434
DemuxAndMix(const int voe_channels[],size_t number_of_voe_channels)435 void TransmitMixer::DemuxAndMix(const int voe_channels[],
436 size_t number_of_voe_channels) {
437 for (size_t i = 0; i < number_of_voe_channels; ++i) {
438 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
439 voe::Channel* channel_ptr = ch.channel();
440 if (channel_ptr) {
441 if (channel_ptr->Sending()) {
442 // Demultiplex makes a copy of its input.
443 channel_ptr->Demultiplex(_audioFrame);
444 channel_ptr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
445 }
446 }
447 }
448 }
449
450 int32_t
EncodeAndSend()451 TransmitMixer::EncodeAndSend()
452 {
453 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
454 "TransmitMixer::EncodeAndSend()");
455
456 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
457 it.Increment())
458 {
459 Channel* channelPtr = it.GetChannel();
460 if (channelPtr->Sending())
461 {
462 channelPtr->EncodeAndSend();
463 }
464 }
465 return 0;
466 }
467
EncodeAndSend(const int voe_channels[],size_t number_of_voe_channels)468 void TransmitMixer::EncodeAndSend(const int voe_channels[],
469 size_t number_of_voe_channels) {
470 for (size_t i = 0; i < number_of_voe_channels; ++i) {
471 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
472 voe::Channel* channel_ptr = ch.channel();
473 if (channel_ptr && channel_ptr->Sending())
474 channel_ptr->EncodeAndSend();
475 }
476 }
477
CaptureLevel() const478 uint32_t TransmitMixer::CaptureLevel() const
479 {
480 return _captureLevel;
481 }
482
483 void
UpdateMuteMicrophoneTime(uint32_t lengthMs)484 TransmitMixer::UpdateMuteMicrophoneTime(uint32_t lengthMs)
485 {
486 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
487 "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)",
488 lengthMs);
489 _remainingMuteMicTimeMs = lengthMs;
490 }
491
492 int32_t
StopSend()493 TransmitMixer::StopSend()
494 {
495 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
496 "TransmitMixer::StopSend()");
497 _audioLevel.Clear();
498 return 0;
499 }
500
StartPlayingFileAsMicrophone(const char * fileName,bool loop,FileFormats format,int startPosition,float volumeScaling,int stopPosition,const CodecInst * codecInst)501 int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
502 bool loop,
503 FileFormats format,
504 int startPosition,
505 float volumeScaling,
506 int stopPosition,
507 const CodecInst* codecInst)
508 {
509 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
510 "TransmitMixer::StartPlayingFileAsMicrophone("
511 "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
512 " startPosition=%d, stopPosition=%d)", fileName, loop,
513 format, volumeScaling, startPosition, stopPosition);
514
515 if (_filePlaying)
516 {
517 _engineStatisticsPtr->SetLastError(
518 VE_ALREADY_PLAYING, kTraceWarning,
519 "StartPlayingFileAsMicrophone() is already playing");
520 return 0;
521 }
522
523 CriticalSectionScoped cs(&_critSect);
524
525 // Destroy the old instance
526 if (_filePlayerPtr)
527 {
528 _filePlayerPtr->RegisterModuleFileCallback(NULL);
529 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
530 _filePlayerPtr = NULL;
531 }
532
533 // Dynamically create the instance
534 _filePlayerPtr
535 = FilePlayer::CreateFilePlayer(_filePlayerId,
536 (const FileFormats) format);
537
538 if (_filePlayerPtr == NULL)
539 {
540 _engineStatisticsPtr->SetLastError(
541 VE_INVALID_ARGUMENT, kTraceError,
542 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
543 return -1;
544 }
545
546 const uint32_t notificationTime(0);
547
548 if (_filePlayerPtr->StartPlayingFile(
549 fileName,
550 loop,
551 startPosition,
552 volumeScaling,
553 notificationTime,
554 stopPosition,
555 (const CodecInst*) codecInst) != 0)
556 {
557 _engineStatisticsPtr->SetLastError(
558 VE_BAD_FILE, kTraceError,
559 "StartPlayingFile() failed to start file playout");
560 _filePlayerPtr->StopPlayingFile();
561 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
562 _filePlayerPtr = NULL;
563 return -1;
564 }
565
566 _filePlayerPtr->RegisterModuleFileCallback(this);
567 _filePlaying = true;
568
569 return 0;
570 }
571
StartPlayingFileAsMicrophone(InStream * stream,FileFormats format,int startPosition,float volumeScaling,int stopPosition,const CodecInst * codecInst)572 int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
573 FileFormats format,
574 int startPosition,
575 float volumeScaling,
576 int stopPosition,
577 const CodecInst* codecInst)
578 {
579 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
580 "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
581 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
582 format, volumeScaling, startPosition, stopPosition);
583
584 if (stream == NULL)
585 {
586 _engineStatisticsPtr->SetLastError(
587 VE_BAD_FILE, kTraceError,
588 "StartPlayingFileAsMicrophone() NULL as input stream");
589 return -1;
590 }
591
592 if (_filePlaying)
593 {
594 _engineStatisticsPtr->SetLastError(
595 VE_ALREADY_PLAYING, kTraceWarning,
596 "StartPlayingFileAsMicrophone() is already playing");
597 return 0;
598 }
599
600 CriticalSectionScoped cs(&_critSect);
601
602 // Destroy the old instance
603 if (_filePlayerPtr)
604 {
605 _filePlayerPtr->RegisterModuleFileCallback(NULL);
606 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
607 _filePlayerPtr = NULL;
608 }
609
610 // Dynamically create the instance
611 _filePlayerPtr
612 = FilePlayer::CreateFilePlayer(_filePlayerId,
613 (const FileFormats) format);
614
615 if (_filePlayerPtr == NULL)
616 {
617 _engineStatisticsPtr->SetLastError(
618 VE_INVALID_ARGUMENT, kTraceWarning,
619 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
620 return -1;
621 }
622
623 const uint32_t notificationTime(0);
624
625 if (_filePlayerPtr->StartPlayingFile(
626 (InStream&) *stream,
627 startPosition,
628 volumeScaling,
629 notificationTime,
630 stopPosition,
631 (const CodecInst*) codecInst) != 0)
632 {
633 _engineStatisticsPtr->SetLastError(
634 VE_BAD_FILE, kTraceError,
635 "StartPlayingFile() failed to start file playout");
636 _filePlayerPtr->StopPlayingFile();
637 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
638 _filePlayerPtr = NULL;
639 return -1;
640 }
641 _filePlayerPtr->RegisterModuleFileCallback(this);
642 _filePlaying = true;
643
644 return 0;
645 }
646
StopPlayingFileAsMicrophone()647 int TransmitMixer::StopPlayingFileAsMicrophone()
648 {
649 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
650 "TransmitMixer::StopPlayingFileAsMicrophone()");
651
652 if (!_filePlaying)
653 {
654 return 0;
655 }
656
657 CriticalSectionScoped cs(&_critSect);
658
659 if (_filePlayerPtr->StopPlayingFile() != 0)
660 {
661 _engineStatisticsPtr->SetLastError(
662 VE_CANNOT_STOP_PLAYOUT, kTraceError,
663 "StopPlayingFile() couldnot stop playing file");
664 return -1;
665 }
666
667 _filePlayerPtr->RegisterModuleFileCallback(NULL);
668 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
669 _filePlayerPtr = NULL;
670 _filePlaying = false;
671
672 return 0;
673 }
674
IsPlayingFileAsMicrophone() const675 int TransmitMixer::IsPlayingFileAsMicrophone() const
676 {
677 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
678 "TransmitMixer::IsPlayingFileAsMicrophone()");
679 return _filePlaying;
680 }
681
StartRecordingMicrophone(const char * fileName,const CodecInst * codecInst)682 int TransmitMixer::StartRecordingMicrophone(const char* fileName,
683 const CodecInst* codecInst)
684 {
685 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
686 "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
687 fileName);
688
689 CriticalSectionScoped cs(&_critSect);
690
691 if (_fileRecording)
692 {
693 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
694 "StartRecordingMicrophone() is already recording");
695 return 0;
696 }
697
698 FileFormats format;
699 const uint32_t notificationTime(0); // Not supported in VoE
700 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
701
702 if (codecInst != NULL && codecInst->channels > 2)
703 {
704 _engineStatisticsPtr->SetLastError(
705 VE_BAD_ARGUMENT, kTraceError,
706 "StartRecordingMicrophone() invalid compression");
707 return (-1);
708 }
709 if (codecInst == NULL)
710 {
711 format = kFileFormatPcm16kHzFile;
712 codecInst = &dummyCodec;
713 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
714 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
715 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
716 {
717 format = kFileFormatWavFile;
718 } else
719 {
720 format = kFileFormatCompressedFile;
721 }
722
723 // Destroy the old instance
724 if (_fileRecorderPtr)
725 {
726 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
727 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
728 _fileRecorderPtr = NULL;
729 }
730
731 _fileRecorderPtr =
732 FileRecorder::CreateFileRecorder(_fileRecorderId,
733 (const FileFormats) format);
734 if (_fileRecorderPtr == NULL)
735 {
736 _engineStatisticsPtr->SetLastError(
737 VE_INVALID_ARGUMENT, kTraceError,
738 "StartRecordingMicrophone() fileRecorder format isnot correct");
739 return -1;
740 }
741
742 if (_fileRecorderPtr->StartRecordingAudioFile(
743 fileName,
744 (const CodecInst&) *codecInst,
745 notificationTime) != 0)
746 {
747 _engineStatisticsPtr->SetLastError(
748 VE_BAD_FILE, kTraceError,
749 "StartRecordingAudioFile() failed to start file recording");
750 _fileRecorderPtr->StopRecording();
751 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
752 _fileRecorderPtr = NULL;
753 return -1;
754 }
755 _fileRecorderPtr->RegisterModuleFileCallback(this);
756 _fileRecording = true;
757
758 return 0;
759 }
760
StartRecordingMicrophone(OutStream * stream,const CodecInst * codecInst)761 int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
762 const CodecInst* codecInst)
763 {
764 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
765 "TransmitMixer::StartRecordingMicrophone()");
766
767 CriticalSectionScoped cs(&_critSect);
768
769 if (_fileRecording)
770 {
771 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
772 "StartRecordingMicrophone() is already recording");
773 return 0;
774 }
775
776 FileFormats format;
777 const uint32_t notificationTime(0); // Not supported in VoE
778 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
779
780 if (codecInst != NULL && codecInst->channels != 1)
781 {
782 _engineStatisticsPtr->SetLastError(
783 VE_BAD_ARGUMENT, kTraceError,
784 "StartRecordingMicrophone() invalid compression");
785 return (-1);
786 }
787 if (codecInst == NULL)
788 {
789 format = kFileFormatPcm16kHzFile;
790 codecInst = &dummyCodec;
791 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
792 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
793 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
794 {
795 format = kFileFormatWavFile;
796 } else
797 {
798 format = kFileFormatCompressedFile;
799 }
800
801 // Destroy the old instance
802 if (_fileRecorderPtr)
803 {
804 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
805 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
806 _fileRecorderPtr = NULL;
807 }
808
809 _fileRecorderPtr =
810 FileRecorder::CreateFileRecorder(_fileRecorderId,
811 (const FileFormats) format);
812 if (_fileRecorderPtr == NULL)
813 {
814 _engineStatisticsPtr->SetLastError(
815 VE_INVALID_ARGUMENT, kTraceError,
816 "StartRecordingMicrophone() fileRecorder format isnot correct");
817 return -1;
818 }
819
820 if (_fileRecorderPtr->StartRecordingAudioFile(*stream,
821 *codecInst,
822 notificationTime) != 0)
823 {
824 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
825 "StartRecordingAudioFile() failed to start file recording");
826 _fileRecorderPtr->StopRecording();
827 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
828 _fileRecorderPtr = NULL;
829 return -1;
830 }
831
832 _fileRecorderPtr->RegisterModuleFileCallback(this);
833 _fileRecording = true;
834
835 return 0;
836 }
837
838
StopRecordingMicrophone()839 int TransmitMixer::StopRecordingMicrophone()
840 {
841 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
842 "TransmitMixer::StopRecordingMicrophone()");
843
844 CriticalSectionScoped cs(&_critSect);
845
846 if (!_fileRecording)
847 {
848 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
849 "StopRecordingMicrophone() isnot recording");
850 return 0;
851 }
852
853 if (_fileRecorderPtr->StopRecording() != 0)
854 {
855 _engineStatisticsPtr->SetLastError(
856 VE_STOP_RECORDING_FAILED, kTraceError,
857 "StopRecording(), could not stop recording");
858 return -1;
859 }
860 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
861 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
862 _fileRecorderPtr = NULL;
863 _fileRecording = false;
864
865 return 0;
866 }
867
StartRecordingCall(const char * fileName,const CodecInst * codecInst)868 int TransmitMixer::StartRecordingCall(const char* fileName,
869 const CodecInst* codecInst)
870 {
871 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
872 "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
873
874 if (_fileCallRecording)
875 {
876 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
877 "StartRecordingCall() is already recording");
878 return 0;
879 }
880
881 FileFormats format;
882 const uint32_t notificationTime(0); // Not supported in VoE
883 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
884
885 if (codecInst != NULL && codecInst->channels != 1)
886 {
887 _engineStatisticsPtr->SetLastError(
888 VE_BAD_ARGUMENT, kTraceError,
889 "StartRecordingCall() invalid compression");
890 return (-1);
891 }
892 if (codecInst == NULL)
893 {
894 format = kFileFormatPcm16kHzFile;
895 codecInst = &dummyCodec;
896 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
897 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
898 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
899 {
900 format = kFileFormatWavFile;
901 } else
902 {
903 format = kFileFormatCompressedFile;
904 }
905
906 CriticalSectionScoped cs(&_critSect);
907
908 // Destroy the old instance
909 if (_fileCallRecorderPtr)
910 {
911 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
912 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
913 _fileCallRecorderPtr = NULL;
914 }
915
916 _fileCallRecorderPtr
917 = FileRecorder::CreateFileRecorder(_fileCallRecorderId,
918 (const FileFormats) format);
919 if (_fileCallRecorderPtr == NULL)
920 {
921 _engineStatisticsPtr->SetLastError(
922 VE_INVALID_ARGUMENT, kTraceError,
923 "StartRecordingCall() fileRecorder format isnot correct");
924 return -1;
925 }
926
927 if (_fileCallRecorderPtr->StartRecordingAudioFile(
928 fileName,
929 (const CodecInst&) *codecInst,
930 notificationTime) != 0)
931 {
932 _engineStatisticsPtr->SetLastError(
933 VE_BAD_FILE, kTraceError,
934 "StartRecordingAudioFile() failed to start file recording");
935 _fileCallRecorderPtr->StopRecording();
936 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
937 _fileCallRecorderPtr = NULL;
938 return -1;
939 }
940 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
941 _fileCallRecording = true;
942
943 return 0;
944 }
945
StartRecordingCall(OutStream * stream,const CodecInst * codecInst)946 int TransmitMixer::StartRecordingCall(OutStream* stream,
947 const CodecInst* codecInst)
948 {
949 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
950 "TransmitMixer::StartRecordingCall()");
951
952 if (_fileCallRecording)
953 {
954 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
955 "StartRecordingCall() is already recording");
956 return 0;
957 }
958
959 FileFormats format;
960 const uint32_t notificationTime(0); // Not supported in VoE
961 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
962
963 if (codecInst != NULL && codecInst->channels != 1)
964 {
965 _engineStatisticsPtr->SetLastError(
966 VE_BAD_ARGUMENT, kTraceError,
967 "StartRecordingCall() invalid compression");
968 return (-1);
969 }
970 if (codecInst == NULL)
971 {
972 format = kFileFormatPcm16kHzFile;
973 codecInst = &dummyCodec;
974 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
975 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
976 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
977 {
978 format = kFileFormatWavFile;
979 } else
980 {
981 format = kFileFormatCompressedFile;
982 }
983
984 CriticalSectionScoped cs(&_critSect);
985
986 // Destroy the old instance
987 if (_fileCallRecorderPtr)
988 {
989 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
990 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
991 _fileCallRecorderPtr = NULL;
992 }
993
994 _fileCallRecorderPtr =
995 FileRecorder::CreateFileRecorder(_fileCallRecorderId,
996 (const FileFormats) format);
997 if (_fileCallRecorderPtr == NULL)
998 {
999 _engineStatisticsPtr->SetLastError(
1000 VE_INVALID_ARGUMENT, kTraceError,
1001 "StartRecordingCall() fileRecorder format isnot correct");
1002 return -1;
1003 }
1004
1005 if (_fileCallRecorderPtr->StartRecordingAudioFile(*stream,
1006 *codecInst,
1007 notificationTime) != 0)
1008 {
1009 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
1010 "StartRecordingAudioFile() failed to start file recording");
1011 _fileCallRecorderPtr->StopRecording();
1012 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1013 _fileCallRecorderPtr = NULL;
1014 return -1;
1015 }
1016
1017 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
1018 _fileCallRecording = true;
1019
1020 return 0;
1021 }
1022
StopRecordingCall()1023 int TransmitMixer::StopRecordingCall()
1024 {
1025 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1026 "TransmitMixer::StopRecordingCall()");
1027
1028 if (!_fileCallRecording)
1029 {
1030 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1031 "StopRecordingCall() file isnot recording");
1032 return -1;
1033 }
1034
1035 CriticalSectionScoped cs(&_critSect);
1036
1037 if (_fileCallRecorderPtr->StopRecording() != 0)
1038 {
1039 _engineStatisticsPtr->SetLastError(
1040 VE_STOP_RECORDING_FAILED, kTraceError,
1041 "StopRecording(), could not stop recording");
1042 return -1;
1043 }
1044
1045 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
1046 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1047 _fileCallRecorderPtr = NULL;
1048 _fileCallRecording = false;
1049
1050 return 0;
1051 }
1052
1053 void
SetMixWithMicStatus(bool mix)1054 TransmitMixer::SetMixWithMicStatus(bool mix)
1055 {
1056 _mixFileWithMicrophone = mix;
1057 }
1058
RegisterExternalMediaProcessing(VoEMediaProcess * object,ProcessingTypes type)1059 int TransmitMixer::RegisterExternalMediaProcessing(
1060 VoEMediaProcess* object,
1061 ProcessingTypes type) {
1062 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1063 "TransmitMixer::RegisterExternalMediaProcessing()");
1064
1065 CriticalSectionScoped cs(&_callbackCritSect);
1066 if (!object) {
1067 return -1;
1068 }
1069
1070 // Store the callback object according to the processing type.
1071 if (type == kRecordingAllChannelsMixed) {
1072 external_postproc_ptr_ = object;
1073 } else if (type == kRecordingPreprocessing) {
1074 external_preproc_ptr_ = object;
1075 } else {
1076 return -1;
1077 }
1078 return 0;
1079 }
1080
DeRegisterExternalMediaProcessing(ProcessingTypes type)1081 int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
1082 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1083 "TransmitMixer::DeRegisterExternalMediaProcessing()");
1084
1085 CriticalSectionScoped cs(&_callbackCritSect);
1086 if (type == kRecordingAllChannelsMixed) {
1087 external_postproc_ptr_ = NULL;
1088 } else if (type == kRecordingPreprocessing) {
1089 external_preproc_ptr_ = NULL;
1090 } else {
1091 return -1;
1092 }
1093 return 0;
1094 }
1095
1096 int
SetMute(bool enable)1097 TransmitMixer::SetMute(bool enable)
1098 {
1099 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1100 "TransmitMixer::SetMute(enable=%d)", enable);
1101 _mute = enable;
1102 return 0;
1103 }
1104
1105 bool
Mute() const1106 TransmitMixer::Mute() const
1107 {
1108 return _mute;
1109 }
1110
AudioLevel() const1111 int8_t TransmitMixer::AudioLevel() const
1112 {
1113 // Speech + file level [0,9]
1114 return _audioLevel.Level();
1115 }
1116
AudioLevelFullRange() const1117 int16_t TransmitMixer::AudioLevelFullRange() const
1118 {
1119 // Speech + file level [0,32767]
1120 return _audioLevel.LevelFullRange();
1121 }
1122
IsRecordingCall()1123 bool TransmitMixer::IsRecordingCall()
1124 {
1125 return _fileCallRecording;
1126 }
1127
IsRecordingMic()1128 bool TransmitMixer::IsRecordingMic()
1129 {
1130 CriticalSectionScoped cs(&_critSect);
1131 return _fileRecording;
1132 }
1133
GenerateAudioFrame(const int16_t * audio,size_t samples_per_channel,size_t num_channels,int sample_rate_hz)1134 void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
1135 size_t samples_per_channel,
1136 size_t num_channels,
1137 int sample_rate_hz) {
1138 int codec_rate;
1139 size_t num_codec_channels;
1140 GetSendCodecInfo(&codec_rate, &num_codec_channels);
1141 stereo_codec_ = num_codec_channels == 2;
1142
1143 // We want to process at the lowest rate possible without losing information.
1144 // Choose the lowest native rate at least equal to the input and codec rates.
1145 const int min_processing_rate = std::min(sample_rate_hz, codec_rate);
1146 for (size_t i = 0; i < AudioProcessing::kNumNativeSampleRates; ++i) {
1147 _audioFrame.sample_rate_hz_ = AudioProcessing::kNativeSampleRatesHz[i];
1148 if (_audioFrame.sample_rate_hz_ >= min_processing_rate) {
1149 break;
1150 }
1151 }
1152 if (audioproc_->echo_control_mobile()->is_enabled()) {
1153 // AECM only supports 8 and 16 kHz.
1154 _audioFrame.sample_rate_hz_ = std::min(
1155 _audioFrame.sample_rate_hz_, AudioProcessing::kMaxAECMSampleRateHz);
1156 }
1157 _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels);
1158 RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz,
1159 &resampler_, &_audioFrame);
1160 }
1161
RecordAudioToFile(uint32_t mixingFrequency)1162 int32_t TransmitMixer::RecordAudioToFile(
1163 uint32_t mixingFrequency)
1164 {
1165 CriticalSectionScoped cs(&_critSect);
1166 if (_fileRecorderPtr == NULL)
1167 {
1168 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1169 "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
1170 "exist");
1171 return -1;
1172 }
1173
1174 if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
1175 {
1176 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1177 "TransmitMixer::RecordAudioToFile() file recording"
1178 "failed");
1179 return -1;
1180 }
1181
1182 return 0;
1183 }
1184
MixOrReplaceAudioWithFile(int mixingFrequency)1185 int32_t TransmitMixer::MixOrReplaceAudioWithFile(
1186 int mixingFrequency)
1187 {
1188 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
1189
1190 size_t fileSamples(0);
1191 {
1192 CriticalSectionScoped cs(&_critSect);
1193 if (_filePlayerPtr == NULL)
1194 {
1195 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1196 VoEId(_instanceId, -1),
1197 "TransmitMixer::MixOrReplaceAudioWithFile()"
1198 "fileplayer doesnot exist");
1199 return -1;
1200 }
1201
1202 if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
1203 fileSamples,
1204 mixingFrequency) == -1)
1205 {
1206 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1207 "TransmitMixer::MixOrReplaceAudioWithFile() file"
1208 " mixing failed");
1209 return -1;
1210 }
1211 }
1212
1213 assert(_audioFrame.samples_per_channel_ == fileSamples);
1214
1215 if (_mixFileWithMicrophone)
1216 {
1217 // Currently file stream is always mono.
1218 // TODO(xians): Change the code when FilePlayer supports real stereo.
1219 MixWithSat(_audioFrame.data_,
1220 _audioFrame.num_channels_,
1221 fileBuffer.get(),
1222 1,
1223 fileSamples);
1224 } else
1225 {
1226 // Replace ACM audio with file.
1227 // Currently file stream is always mono.
1228 // TODO(xians): Change the code when FilePlayer supports real stereo.
1229 _audioFrame.UpdateFrame(-1,
1230 0xFFFFFFFF,
1231 fileBuffer.get(),
1232 fileSamples,
1233 mixingFrequency,
1234 AudioFrame::kNormalSpeech,
1235 AudioFrame::kVadUnknown,
1236 1);
1237 }
1238 return 0;
1239 }
1240
ProcessAudio(int delay_ms,int clock_drift,int current_mic_level,bool key_pressed)1241 void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
1242 int current_mic_level, bool key_pressed) {
1243 if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
1244 // Silently ignore this failure to avoid flooding the logs.
1245 }
1246
1247 GainControl* agc = audioproc_->gain_control();
1248 if (agc->set_stream_analog_level(current_mic_level) != 0) {
1249 LOG(LS_ERROR) << "set_stream_analog_level failed: current_mic_level = "
1250 << current_mic_level;
1251 assert(false);
1252 }
1253
1254 EchoCancellation* aec = audioproc_->echo_cancellation();
1255 if (aec->is_drift_compensation_enabled()) {
1256 aec->set_stream_drift_samples(clock_drift);
1257 }
1258
1259 audioproc_->set_stream_key_pressed(key_pressed);
1260
1261 int err = audioproc_->ProcessStream(&_audioFrame);
1262 if (err != 0) {
1263 LOG(LS_ERROR) << "ProcessStream() error: " << err;
1264 assert(false);
1265 }
1266
1267 // Store new capture level. Only updated when analog AGC is enabled.
1268 _captureLevel = agc->stream_analog_level();
1269
1270 CriticalSectionScoped cs(&_critSect);
1271 // Triggers a callback in OnPeriodicProcess().
1272 _saturationWarning |= agc->stream_is_saturated();
1273 }
1274
1275 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
TypingDetection(bool keyPressed)1276 void TransmitMixer::TypingDetection(bool keyPressed)
1277 {
1278 // We let the VAD determine if we're using this feature or not.
1279 if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) {
1280 return;
1281 }
1282
1283 bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
1284 if (_typingDetection.Process(keyPressed, vadActive)) {
1285 CriticalSectionScoped cs(&_critSect);
1286 _typingNoiseWarningPending = true;
1287 _typingNoiseDetected = true;
1288 } else {
1289 CriticalSectionScoped cs(&_critSect);
1290 // If there is already a warning pending, do not change the state.
1291 // Otherwise set a warning pending if last callback was for noise detected.
1292 if (!_typingNoiseWarningPending && _typingNoiseDetected) {
1293 _typingNoiseWarningPending = true;
1294 _typingNoiseDetected = false;
1295 }
1296 }
1297 }
1298 #endif
1299
GetMixingFrequency()1300 int TransmitMixer::GetMixingFrequency()
1301 {
1302 assert(_audioFrame.sample_rate_hz_ != 0);
1303 return _audioFrame.sample_rate_hz_;
1304 }
1305
1306 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
TimeSinceLastTyping(int & seconds)1307 int TransmitMixer::TimeSinceLastTyping(int &seconds)
1308 {
1309 // We check in VoEAudioProcessingImpl that this is only called when
1310 // typing detection is active.
1311 seconds = _typingDetection.TimeSinceLastDetectionInSeconds();
1312 return 0;
1313 }
1314 #endif
1315
1316 #ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
SetTypingDetectionParameters(int timeWindow,int costPerTyping,int reportingThreshold,int penaltyDecay,int typeEventDelay)1317 int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
1318 int costPerTyping,
1319 int reportingThreshold,
1320 int penaltyDecay,
1321 int typeEventDelay)
1322 {
1323 _typingDetection.SetParameters(timeWindow,
1324 costPerTyping,
1325 reportingThreshold,
1326 penaltyDecay,
1327 typeEventDelay,
1328 0);
1329 return 0;
1330 }
1331 #endif
1332
EnableStereoChannelSwapping(bool enable)1333 void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
1334 swap_stereo_channels_ = enable;
1335 }
1336
IsStereoChannelSwappingEnabled()1337 bool TransmitMixer::IsStereoChannelSwappingEnabled() {
1338 return swap_stereo_channels_;
1339 }
1340
1341 } // namespace voe
1342 } // namespace webrtc
1343