1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/voice_engine/voe_base_impl.h"
12
13 #include "webrtc/common.h"
14 #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
15 #include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
16 #include "webrtc/modules/audio_device/audio_device_impl.h"
17 #include "webrtc/modules/audio_processing/include/audio_processing.h"
18 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
19 #include "webrtc/system_wrappers/interface/file_wrapper.h"
20 #include "webrtc/system_wrappers/interface/trace.h"
21 #include "webrtc/voice_engine/channel.h"
22 #include "webrtc/voice_engine/include/voe_errors.h"
23 #include "webrtc/voice_engine/output_mixer.h"
24 #include "webrtc/voice_engine/transmit_mixer.h"
25 #include "webrtc/voice_engine/utility.h"
26 #include "webrtc/voice_engine/voice_engine_impl.h"
27
28 namespace webrtc
29 {
30
GetInterface(VoiceEngine * voiceEngine)31 VoEBase* VoEBase::GetInterface(VoiceEngine* voiceEngine)
32 {
33 if (NULL == voiceEngine)
34 {
35 return NULL;
36 }
37 VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
38 s->AddRef();
39 return s;
40 }
41
VoEBaseImpl(voe::SharedData * shared)42 VoEBaseImpl::VoEBaseImpl(voe::SharedData* shared) :
43 _voiceEngineObserverPtr(NULL),
44 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
45 _voiceEngineObserver(false), _shared(shared)
46 {
47 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
48 "VoEBaseImpl() - ctor");
49 }
50
~VoEBaseImpl()51 VoEBaseImpl::~VoEBaseImpl()
52 {
53 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
54 "~VoEBaseImpl() - dtor");
55
56 TerminateInternal();
57
58 delete &_callbackCritSect;
59 }
60
OnErrorIsReported(ErrorCode error)61 void VoEBaseImpl::OnErrorIsReported(ErrorCode error)
62 {
63 CriticalSectionScoped cs(&_callbackCritSect);
64 if (_voiceEngineObserver)
65 {
66 if (_voiceEngineObserverPtr)
67 {
68 int errCode(0);
69 if (error == AudioDeviceObserver::kRecordingError)
70 {
71 errCode = VE_RUNTIME_REC_ERROR;
72 WEBRTC_TRACE(kTraceInfo, kTraceVoice,
73 VoEId(_shared->instance_id(), -1),
74 "VoEBaseImpl::OnErrorIsReported() => VE_RUNTIME_REC_ERROR");
75 }
76 else if (error == AudioDeviceObserver::kPlayoutError)
77 {
78 errCode = VE_RUNTIME_PLAY_ERROR;
79 WEBRTC_TRACE(kTraceInfo, kTraceVoice,
80 VoEId(_shared->instance_id(), -1),
81 "VoEBaseImpl::OnErrorIsReported() => "
82 "VE_RUNTIME_PLAY_ERROR");
83 }
84 // Deliver callback (-1 <=> no channel dependency)
85 _voiceEngineObserverPtr->CallbackOnError(-1, errCode);
86 }
87 }
88 }
89
OnWarningIsReported(WarningCode warning)90 void VoEBaseImpl::OnWarningIsReported(WarningCode warning)
91 {
92 CriticalSectionScoped cs(&_callbackCritSect);
93 if (_voiceEngineObserver)
94 {
95 if (_voiceEngineObserverPtr)
96 {
97 int warningCode(0);
98 if (warning == AudioDeviceObserver::kRecordingWarning)
99 {
100 warningCode = VE_RUNTIME_REC_WARNING;
101 WEBRTC_TRACE(kTraceInfo, kTraceVoice,
102 VoEId(_shared->instance_id(), -1),
103 "VoEBaseImpl::OnErrorIsReported() => "
104 "VE_RUNTIME_REC_WARNING");
105 }
106 else if (warning == AudioDeviceObserver::kPlayoutWarning)
107 {
108 warningCode = VE_RUNTIME_PLAY_WARNING;
109 WEBRTC_TRACE(kTraceInfo, kTraceVoice,
110 VoEId(_shared->instance_id(), -1),
111 "VoEBaseImpl::OnErrorIsReported() => "
112 "VE_RUNTIME_PLAY_WARNING");
113 }
114 // Deliver callback (-1 <=> no channel dependency)
115 _voiceEngineObserverPtr->CallbackOnError(-1, warningCode);
116 }
117 }
118 }
119
RecordedDataIsAvailable(const void * audioSamples,uint32_t nSamples,uint8_t nBytesPerSample,uint8_t nChannels,uint32_t samplesPerSec,uint32_t totalDelayMS,int32_t clockDrift,uint32_t micLevel,bool keyPressed,uint32_t & newMicLevel)120 int32_t VoEBaseImpl::RecordedDataIsAvailable(
121 const void* audioSamples,
122 uint32_t nSamples,
123 uint8_t nBytesPerSample,
124 uint8_t nChannels,
125 uint32_t samplesPerSec,
126 uint32_t totalDelayMS,
127 int32_t clockDrift,
128 uint32_t micLevel,
129 bool keyPressed,
130 uint32_t& newMicLevel)
131 {
132 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
133 "VoEBaseImpl::RecordedDataIsAvailable(nSamples=%u, "
134 "nBytesPerSample=%u, nChannels=%u, samplesPerSec=%u, "
135 "totalDelayMS=%u, clockDrift=%d, micLevel=%u)",
136 nSamples, nBytesPerSample, nChannels, samplesPerSec,
137 totalDelayMS, clockDrift, micLevel);
138 newMicLevel = static_cast<uint32_t>(ProcessRecordedDataWithAPM(
139 NULL, 0, audioSamples, samplesPerSec, nChannels, nSamples,
140 totalDelayMS, clockDrift, micLevel, keyPressed));
141
142 return 0;
143 }
144
NeedMorePlayData(uint32_t nSamples,uint8_t nBytesPerSample,uint8_t nChannels,uint32_t samplesPerSec,void * audioSamples,uint32_t & nSamplesOut,int64_t * elapsed_time_ms,int64_t * ntp_time_ms)145 int32_t VoEBaseImpl::NeedMorePlayData(
146 uint32_t nSamples,
147 uint8_t nBytesPerSample,
148 uint8_t nChannels,
149 uint32_t samplesPerSec,
150 void* audioSamples,
151 uint32_t& nSamplesOut,
152 int64_t* elapsed_time_ms,
153 int64_t* ntp_time_ms)
154 {
155 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
156 "VoEBaseImpl::NeedMorePlayData(nSamples=%u, "
157 "nBytesPerSample=%d, nChannels=%d, samplesPerSec=%u)",
158 nSamples, nBytesPerSample, nChannels, samplesPerSec);
159
160 GetPlayoutData(static_cast<int>(samplesPerSec),
161 static_cast<int>(nChannels),
162 static_cast<int>(nSamples), true, audioSamples,
163 elapsed_time_ms, ntp_time_ms);
164
165 nSamplesOut = _audioFrame.samples_per_channel_;
166
167 return 0;
168 }
169
OnDataAvailable(const int voe_channels[],int number_of_voe_channels,const int16_t * audio_data,int sample_rate,int number_of_channels,int number_of_frames,int audio_delay_milliseconds,int volume,bool key_pressed,bool need_audio_processing)170 int VoEBaseImpl::OnDataAvailable(const int voe_channels[],
171 int number_of_voe_channels,
172 const int16_t* audio_data,
173 int sample_rate,
174 int number_of_channels,
175 int number_of_frames,
176 int audio_delay_milliseconds,
177 int volume,
178 bool key_pressed,
179 bool need_audio_processing) {
180 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
181 "VoEBaseImpl::OnDataAvailable(number_of_voe_channels=%d, "
182 "sample_rate=%d, number_of_channels=%d, number_of_frames=%d, "
183 "audio_delay_milliseconds=%d, volume=%d, "
184 "key_pressed=%d, need_audio_processing=%d)",
185 number_of_voe_channels, sample_rate, number_of_channels,
186 number_of_frames, audio_delay_milliseconds, volume,
187 key_pressed, need_audio_processing);
188 if (number_of_voe_channels == 0)
189 return 0;
190
191 if (need_audio_processing) {
192 return ProcessRecordedDataWithAPM(
193 voe_channels, number_of_voe_channels, audio_data, sample_rate,
194 number_of_channels, number_of_frames, audio_delay_milliseconds,
195 0, volume, key_pressed);
196 }
197
198 // No need to go through the APM, demultiplex the data to each VoE channel,
199 // encode and send to the network.
200 for (int i = 0; i < number_of_voe_channels; ++i) {
201 // TODO(ajm): In the case where multiple channels are using the same codec
202 // rate, this path needlessly does extra conversions. We should convert once
203 // and share between channels.
204 PushCaptureData(voe_channels[i], audio_data, 16, sample_rate,
205 number_of_channels, number_of_frames);
206 }
207
208 // Return 0 to indicate no need to change the volume.
209 return 0;
210 }
211
OnData(int voe_channel,const void * audio_data,int bits_per_sample,int sample_rate,int number_of_channels,int number_of_frames)212 void VoEBaseImpl::OnData(int voe_channel, const void* audio_data,
213 int bits_per_sample, int sample_rate,
214 int number_of_channels,
215 int number_of_frames) {
216 PushCaptureData(voe_channel, audio_data, bits_per_sample, sample_rate,
217 number_of_channels, number_of_frames);
218 }
219
PushCaptureData(int voe_channel,const void * audio_data,int bits_per_sample,int sample_rate,int number_of_channels,int number_of_frames)220 void VoEBaseImpl::PushCaptureData(int voe_channel, const void* audio_data,
221 int bits_per_sample, int sample_rate,
222 int number_of_channels,
223 int number_of_frames) {
224 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(voe_channel);
225 voe::Channel* channel_ptr = ch.channel();
226 if (!channel_ptr)
227 return;
228
229 if (channel_ptr->Sending()) {
230 channel_ptr->Demultiplex(static_cast<const int16_t*>(audio_data),
231 sample_rate, number_of_frames, number_of_channels);
232 channel_ptr->PrepareEncodeAndSend(sample_rate);
233 channel_ptr->EncodeAndSend();
234 }
235 }
236
PullRenderData(int bits_per_sample,int sample_rate,int number_of_channels,int number_of_frames,void * audio_data,int64_t * elapsed_time_ms,int64_t * ntp_time_ms)237 void VoEBaseImpl::PullRenderData(int bits_per_sample, int sample_rate,
238 int number_of_channels, int number_of_frames,
239 void* audio_data,
240 int64_t* elapsed_time_ms,
241 int64_t* ntp_time_ms) {
242 assert(bits_per_sample == 16);
243 assert(number_of_frames == static_cast<int>(sample_rate / 100));
244
245 GetPlayoutData(sample_rate, number_of_channels, number_of_frames, false,
246 audio_data, elapsed_time_ms, ntp_time_ms);
247 }
248
RegisterVoiceEngineObserver(VoiceEngineObserver & observer)249 int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
250 {
251 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
252 "RegisterVoiceEngineObserver(observer=0x%d)", &observer);
253 CriticalSectionScoped cs(&_callbackCritSect);
254 if (_voiceEngineObserverPtr)
255 {
256 _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
257 "RegisterVoiceEngineObserver() observer already enabled");
258 return -1;
259 }
260
261 // Register the observer in all active channels
262 for (voe::ChannelManager::Iterator it(&_shared->channel_manager());
263 it.IsValid();
264 it.Increment()) {
265 it.GetChannel()->RegisterVoiceEngineObserver(observer);
266 }
267
268 _shared->transmit_mixer()->RegisterVoiceEngineObserver(observer);
269
270 _voiceEngineObserverPtr = &observer;
271 _voiceEngineObserver = true;
272
273 return 0;
274 }
275
DeRegisterVoiceEngineObserver()276 int VoEBaseImpl::DeRegisterVoiceEngineObserver()
277 {
278 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
279 "DeRegisterVoiceEngineObserver()");
280 CriticalSectionScoped cs(&_callbackCritSect);
281 if (!_voiceEngineObserverPtr)
282 {
283 _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
284 "DeRegisterVoiceEngineObserver() observer already disabled");
285 return 0;
286 }
287
288 _voiceEngineObserver = false;
289 _voiceEngineObserverPtr = NULL;
290
291 // Deregister the observer in all active channels
292 for (voe::ChannelManager::Iterator it(&_shared->channel_manager());
293 it.IsValid();
294 it.Increment()) {
295 it.GetChannel()->DeRegisterVoiceEngineObserver();
296 }
297
298 return 0;
299 }
300
Init(AudioDeviceModule * external_adm,AudioProcessing * audioproc)301 int VoEBaseImpl::Init(AudioDeviceModule* external_adm,
302 AudioProcessing* audioproc)
303 {
304 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
305 "Init(external_adm=0x%p)", external_adm);
306 CriticalSectionScoped cs(_shared->crit_sec());
307
308 WebRtcSpl_Init();
309
310 if (_shared->statistics().Initialized())
311 {
312 return 0;
313 }
314
315 if (_shared->process_thread())
316 {
317 if (_shared->process_thread()->Start() != 0)
318 {
319 _shared->SetLastError(VE_THREAD_ERROR, kTraceError,
320 "Init() failed to start module process thread");
321 return -1;
322 }
323 }
324
325 // Create an internal ADM if the user has not added an external
326 // ADM implementation as input to Init().
327 if (external_adm == NULL)
328 {
329 // Create the internal ADM implementation.
330 _shared->set_audio_device(AudioDeviceModuleImpl::Create(
331 VoEId(_shared->instance_id(), -1), _shared->audio_device_layer()));
332
333 if (_shared->audio_device() == NULL)
334 {
335 _shared->SetLastError(VE_NO_MEMORY, kTraceCritical,
336 "Init() failed to create the ADM");
337 return -1;
338 }
339 }
340 else
341 {
342 // Use the already existing external ADM implementation.
343 _shared->set_audio_device(external_adm);
344 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
345 "An external ADM implementation will be used in VoiceEngine");
346 }
347
348 // Register the ADM to the process thread, which will drive the error
349 // callback mechanism
350 if (_shared->process_thread() &&
351 _shared->process_thread()->RegisterModule(_shared->audio_device()) != 0)
352 {
353 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
354 "Init() failed to register the ADM");
355 return -1;
356 }
357
358 bool available(false);
359
360 // --------------------
361 // Reinitialize the ADM
362
363 // Register the AudioObserver implementation
364 if (_shared->audio_device()->RegisterEventObserver(this) != 0) {
365 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
366 "Init() failed to register event observer for the ADM");
367 }
368
369 // Register the AudioTransport implementation
370 if (_shared->audio_device()->RegisterAudioCallback(this) != 0) {
371 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
372 "Init() failed to register audio callback for the ADM");
373 }
374
375 // ADM initialization
376 if (_shared->audio_device()->Init() != 0)
377 {
378 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
379 "Init() failed to initialize the ADM");
380 return -1;
381 }
382
383 // Initialize the default speaker
384 if (_shared->audio_device()->SetPlayoutDevice(
385 WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE) != 0)
386 {
387 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceInfo,
388 "Init() failed to set the default output device");
389 }
390 if (_shared->audio_device()->InitSpeaker() != 0)
391 {
392 _shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceInfo,
393 "Init() failed to initialize the speaker");
394 }
395
396 // Initialize the default microphone
397 if (_shared->audio_device()->SetRecordingDevice(
398 WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE) != 0)
399 {
400 _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceInfo,
401 "Init() failed to set the default input device");
402 }
403 if (_shared->audio_device()->InitMicrophone() != 0)
404 {
405 _shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceInfo,
406 "Init() failed to initialize the microphone");
407 }
408
409 // Set number of channels
410 if (_shared->audio_device()->StereoPlayoutIsAvailable(&available) != 0) {
411 _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
412 "Init() failed to query stereo playout mode");
413 }
414 if (_shared->audio_device()->SetStereoPlayout(available) != 0)
415 {
416 _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
417 "Init() failed to set mono/stereo playout mode");
418 }
419
420 // TODO(andrew): These functions don't tell us whether stereo recording
421 // is truly available. We simply set the AudioProcessing input to stereo
422 // here, because we have to wait until receiving the first frame to
423 // determine the actual number of channels anyway.
424 //
425 // These functions may be changed; tracked here:
426 // http://code.google.com/p/webrtc/issues/detail?id=204
427 _shared->audio_device()->StereoRecordingIsAvailable(&available);
428 if (_shared->audio_device()->SetStereoRecording(available) != 0)
429 {
430 _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
431 "Init() failed to set mono/stereo recording mode");
432 }
433
434 if (!audioproc) {
435 audioproc = AudioProcessing::Create(VoEId(_shared->instance_id(), -1));
436 if (!audioproc) {
437 LOG(LS_ERROR) << "Failed to create AudioProcessing.";
438 _shared->SetLastError(VE_NO_MEMORY);
439 return -1;
440 }
441 }
442 _shared->set_audio_processing(audioproc);
443
444 // Set the error state for any failures in this block.
445 _shared->SetLastError(VE_APM_ERROR);
446 // Configure AudioProcessing components.
447 if (audioproc->high_pass_filter()->Enable(true) != 0) {
448 LOG_FERR1(LS_ERROR, high_pass_filter()->Enable, true);
449 return -1;
450 }
451 if (audioproc->echo_cancellation()->enable_drift_compensation(false) != 0) {
452 LOG_FERR1(LS_ERROR, enable_drift_compensation, false);
453 return -1;
454 }
455 if (audioproc->noise_suppression()->set_level(kDefaultNsMode) != 0) {
456 LOG_FERR1(LS_ERROR, noise_suppression()->set_level, kDefaultNsMode);
457 return -1;
458 }
459 GainControl* agc = audioproc->gain_control();
460 if (agc->set_analog_level_limits(kMinVolumeLevel, kMaxVolumeLevel) != 0) {
461 LOG_FERR2(LS_ERROR, agc->set_analog_level_limits, kMinVolumeLevel,
462 kMaxVolumeLevel);
463 return -1;
464 }
465 if (agc->set_mode(kDefaultAgcMode) != 0) {
466 LOG_FERR1(LS_ERROR, agc->set_mode, kDefaultAgcMode);
467 return -1;
468 }
469 if (agc->Enable(kDefaultAgcState) != 0) {
470 LOG_FERR1(LS_ERROR, agc->Enable, kDefaultAgcState);
471 return -1;
472 }
473 _shared->SetLastError(0); // Clear error state.
474
475 #ifdef WEBRTC_VOICE_ENGINE_AGC
476 bool agc_enabled = agc->mode() == GainControl::kAdaptiveAnalog &&
477 agc->is_enabled();
478 if (_shared->audio_device()->SetAGC(agc_enabled) != 0) {
479 LOG_FERR1(LS_ERROR, audio_device()->SetAGC, agc_enabled);
480 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR);
481 // TODO(ajm): No error return here due to
482 // https://code.google.com/p/webrtc/issues/detail?id=1464
483 }
484 #endif
485
486 return _shared->statistics().SetInitialized();
487 }
488
Terminate()489 int VoEBaseImpl::Terminate()
490 {
491 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
492 "Terminate()");
493 CriticalSectionScoped cs(_shared->crit_sec());
494 return TerminateInternal();
495 }
496
CreateChannel()497 int VoEBaseImpl::CreateChannel() {
498 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
499 "CreateChannel()");
500 CriticalSectionScoped cs(_shared->crit_sec());
501 if (!_shared->statistics().Initialized()) {
502 _shared->SetLastError(VE_NOT_INITED, kTraceError);
503 return -1;
504 }
505
506 voe::ChannelOwner channel_owner = _shared->channel_manager().CreateChannel();
507
508 return InitializeChannel(&channel_owner);
509 }
510
CreateChannel(const Config & config)511 int VoEBaseImpl::CreateChannel(const Config& config) {
512 CriticalSectionScoped cs(_shared->crit_sec());
513 if (!_shared->statistics().Initialized()) {
514 _shared->SetLastError(VE_NOT_INITED, kTraceError);
515 return -1;
516 }
517 voe::ChannelOwner channel_owner = _shared->channel_manager().CreateChannel(
518 config);
519 return InitializeChannel(&channel_owner);
520 }
521
InitializeChannel(voe::ChannelOwner * channel_owner)522 int VoEBaseImpl::InitializeChannel(voe::ChannelOwner* channel_owner)
523 {
524 if (channel_owner->channel()->SetEngineInformation(
525 _shared->statistics(),
526 *_shared->output_mixer(),
527 *_shared->transmit_mixer(),
528 *_shared->process_thread(),
529 *_shared->audio_device(),
530 _voiceEngineObserverPtr,
531 &_callbackCritSect) != 0) {
532 _shared->SetLastError(
533 VE_CHANNEL_NOT_CREATED,
534 kTraceError,
535 "CreateChannel() failed to associate engine and channel."
536 " Destroying channel.");
537 _shared->channel_manager()
538 .DestroyChannel(channel_owner->channel()->ChannelId());
539 return -1;
540 } else if (channel_owner->channel()->Init() != 0) {
541 _shared->SetLastError(
542 VE_CHANNEL_NOT_CREATED,
543 kTraceError,
544 "CreateChannel() failed to initialize channel. Destroying"
545 " channel.");
546 _shared->channel_manager()
547 .DestroyChannel(channel_owner->channel()->ChannelId());
548 return -1;
549 }
550
551 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
552 VoEId(_shared->instance_id(), -1),
553 "CreateChannel() => %d", channel_owner->channel()->ChannelId());
554 return channel_owner->channel()->ChannelId();
555 }
556
DeleteChannel(int channel)557 int VoEBaseImpl::DeleteChannel(int channel)
558 {
559 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
560 "DeleteChannel(channel=%d)", channel);
561 CriticalSectionScoped cs(_shared->crit_sec());
562
563 if (!_shared->statistics().Initialized())
564 {
565 _shared->SetLastError(VE_NOT_INITED, kTraceError);
566 return -1;
567 }
568
569 {
570 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
571 voe::Channel* channelPtr = ch.channel();
572 if (channelPtr == NULL)
573 {
574 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
575 "DeleteChannel() failed to locate channel");
576 return -1;
577 }
578 }
579
580 _shared->channel_manager().DestroyChannel(channel);
581
582 if (StopSend() != 0)
583 {
584 return -1;
585 }
586
587 if (StopPlayout() != 0)
588 {
589 return -1;
590 }
591
592 return 0;
593 }
594
StartReceive(int channel)595 int VoEBaseImpl::StartReceive(int channel)
596 {
597 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
598 "StartReceive(channel=%d)", channel);
599 CriticalSectionScoped cs(_shared->crit_sec());
600 if (!_shared->statistics().Initialized())
601 {
602 _shared->SetLastError(VE_NOT_INITED, kTraceError);
603 return -1;
604 }
605 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
606 voe::Channel* channelPtr = ch.channel();
607 if (channelPtr == NULL)
608 {
609 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
610 "StartReceive() failed to locate channel");
611 return -1;
612 }
613 return channelPtr->StartReceiving();
614 }
615
StopReceive(int channel)616 int VoEBaseImpl::StopReceive(int channel)
617 {
618 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
619 "StopListen(channel=%d)", channel);
620 CriticalSectionScoped cs(_shared->crit_sec());
621 if (!_shared->statistics().Initialized())
622 {
623 _shared->SetLastError(VE_NOT_INITED, kTraceError);
624 return -1;
625 }
626 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
627 voe::Channel* channelPtr = ch.channel();
628 if (channelPtr == NULL)
629 {
630 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
631 "SetLocalReceiver() failed to locate channel");
632 return -1;
633 }
634 return channelPtr->StopReceiving();
635 }
636
StartPlayout(int channel)637 int VoEBaseImpl::StartPlayout(int channel)
638 {
639 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
640 "StartPlayout(channel=%d)", channel);
641 CriticalSectionScoped cs(_shared->crit_sec());
642 if (!_shared->statistics().Initialized())
643 {
644 _shared->SetLastError(VE_NOT_INITED, kTraceError);
645 return -1;
646 }
647 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
648 voe::Channel* channelPtr = ch.channel();
649 if (channelPtr == NULL)
650 {
651 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
652 "StartPlayout() failed to locate channel");
653 return -1;
654 }
655 if (channelPtr->Playing())
656 {
657 return 0;
658 }
659 if (StartPlayout() != 0)
660 {
661 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
662 "StartPlayout() failed to start playout");
663 return -1;
664 }
665 return channelPtr->StartPlayout();
666 }
667
StopPlayout(int channel)668 int VoEBaseImpl::StopPlayout(int channel)
669 {
670 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
671 "StopPlayout(channel=%d)", channel);
672 CriticalSectionScoped cs(_shared->crit_sec());
673 if (!_shared->statistics().Initialized())
674 {
675 _shared->SetLastError(VE_NOT_INITED, kTraceError);
676 return -1;
677 }
678 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
679 voe::Channel* channelPtr = ch.channel();
680 if (channelPtr == NULL)
681 {
682 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
683 "StopPlayout() failed to locate channel");
684 return -1;
685 }
686 if (channelPtr->StopPlayout() != 0)
687 {
688 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
689 VoEId(_shared->instance_id(), -1),
690 "StopPlayout() failed to stop playout for channel %d", channel);
691 }
692 return StopPlayout();
693 }
694
StartSend(int channel)695 int VoEBaseImpl::StartSend(int channel)
696 {
697 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
698 "StartSend(channel=%d)", channel);
699 CriticalSectionScoped cs(_shared->crit_sec());
700 if (!_shared->statistics().Initialized())
701 {
702 _shared->SetLastError(VE_NOT_INITED, kTraceError);
703 return -1;
704 }
705 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
706 voe::Channel* channelPtr = ch.channel();
707 if (channelPtr == NULL)
708 {
709 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
710 "StartSend() failed to locate channel");
711 return -1;
712 }
713 if (channelPtr->Sending())
714 {
715 return 0;
716 }
717 if (StartSend() != 0)
718 {
719 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
720 "StartSend() failed to start recording");
721 return -1;
722 }
723 return channelPtr->StartSend();
724 }
725
StopSend(int channel)726 int VoEBaseImpl::StopSend(int channel)
727 {
728 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
729 "StopSend(channel=%d)", channel);
730 CriticalSectionScoped cs(_shared->crit_sec());
731 if (!_shared->statistics().Initialized())
732 {
733 _shared->SetLastError(VE_NOT_INITED, kTraceError);
734 return -1;
735 }
736 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
737 voe::Channel* channelPtr = ch.channel();
738 if (channelPtr == NULL)
739 {
740 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
741 "StopSend() failed to locate channel");
742 return -1;
743 }
744 if (channelPtr->StopSend() != 0)
745 {
746 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
747 VoEId(_shared->instance_id(), -1),
748 "StopSend() failed to stop sending for channel %d", channel);
749 }
750 return StopSend();
751 }
752
GetVersion(char version[1024])753 int VoEBaseImpl::GetVersion(char version[1024])
754 {
755 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
756 "GetVersion(version=?)");
757 assert(kVoiceEngineVersionMaxMessageSize == 1024);
758
759 if (version == NULL)
760 {
761 _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError);
762 return (-1);
763 }
764
765 char versionBuf[kVoiceEngineVersionMaxMessageSize];
766 char* versionPtr = versionBuf;
767
768 int32_t len = 0;
769 int32_t accLen = 0;
770
771 len = AddVoEVersion(versionPtr);
772 if (len == -1)
773 {
774 return -1;
775 }
776 versionPtr += len;
777 accLen += len;
778 assert(accLen < kVoiceEngineVersionMaxMessageSize);
779
780 len = AddBuildInfo(versionPtr);
781 if (len == -1)
782 {
783 return -1;
784 }
785 versionPtr += len;
786 accLen += len;
787 assert(accLen < kVoiceEngineVersionMaxMessageSize);
788
789 #ifdef WEBRTC_EXTERNAL_TRANSPORT
790 len = AddExternalTransportBuild(versionPtr);
791 if (len == -1)
792 {
793 return -1;
794 }
795 versionPtr += len;
796 accLen += len;
797 assert(accLen < kVoiceEngineVersionMaxMessageSize);
798 #endif
799
800 memcpy(version, versionBuf, accLen);
801 version[accLen] = '\0';
802
803 // to avoid the truncation in the trace, split the string into parts
804 char partOfVersion[256];
805 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
806 VoEId(_shared->instance_id(), -1), "GetVersion() =>");
807 for (int partStart = 0; partStart < accLen;)
808 {
809 memset(partOfVersion, 0, sizeof(partOfVersion));
810 int partEnd = partStart + 180;
811 while (version[partEnd] != '\n' && version[partEnd] != '\0')
812 {
813 partEnd--;
814 }
815 if (partEnd < accLen)
816 {
817 memcpy(partOfVersion, &version[partStart], partEnd - partStart);
818 }
819 else
820 {
821 memcpy(partOfVersion, &version[partStart], accLen - partStart);
822 }
823 partStart = partEnd;
824 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
825 VoEId(_shared->instance_id(), -1), "%s", partOfVersion);
826 }
827
828 return 0;
829 }
830
AddBuildInfo(char * str) const831 int32_t VoEBaseImpl::AddBuildInfo(char* str) const
832 {
833 return sprintf(str, "Build: %s\n", BUILDINFO);
834 }
835
AddVoEVersion(char * str) const836 int32_t VoEBaseImpl::AddVoEVersion(char* str) const
837 {
838 return sprintf(str, "VoiceEngine 4.1.0\n");
839 }
840
841 #ifdef WEBRTC_EXTERNAL_TRANSPORT
AddExternalTransportBuild(char * str) const842 int32_t VoEBaseImpl::AddExternalTransportBuild(char* str) const
843 {
844 return sprintf(str, "External transport build\n");
845 }
846 #endif
847
LastError()848 int VoEBaseImpl::LastError()
849 {
850 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
851 "LastError()");
852 return (_shared->statistics().LastError());
853 }
854
StartPlayout()855 int32_t VoEBaseImpl::StartPlayout()
856 {
857 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
858 "VoEBaseImpl::StartPlayout()");
859 if (_shared->audio_device()->Playing())
860 {
861 return 0;
862 }
863 if (!_shared->ext_playout())
864 {
865 if (_shared->audio_device()->InitPlayout() != 0)
866 {
867 WEBRTC_TRACE(kTraceError, kTraceVoice,
868 VoEId(_shared->instance_id(), -1),
869 "StartPlayout() failed to initialize playout");
870 return -1;
871 }
872 if (_shared->audio_device()->StartPlayout() != 0)
873 {
874 WEBRTC_TRACE(kTraceError, kTraceVoice,
875 VoEId(_shared->instance_id(), -1),
876 "StartPlayout() failed to start playout");
877 return -1;
878 }
879 }
880 return 0;
881 }
882
StopPlayout()883 int32_t VoEBaseImpl::StopPlayout() {
884 WEBRTC_TRACE(kTraceInfo,
885 kTraceVoice,
886 VoEId(_shared->instance_id(), -1),
887 "VoEBaseImpl::StopPlayout()");
888 // Stop audio-device playing if no channel is playing out
889 if (_shared->NumOfPlayingChannels() == 0) {
890 if (_shared->audio_device()->StopPlayout() != 0) {
891 _shared->SetLastError(VE_CANNOT_STOP_PLAYOUT,
892 kTraceError,
893 "StopPlayout() failed to stop playout");
894 return -1;
895 }
896 }
897 return 0;
898 }
899
StartSend()900 int32_t VoEBaseImpl::StartSend()
901 {
902 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
903 "VoEBaseImpl::StartSend()");
904 if (_shared->audio_device()->Recording())
905 {
906 return 0;
907 }
908 if (!_shared->ext_recording())
909 {
910 if (_shared->audio_device()->InitRecording() != 0)
911 {
912 WEBRTC_TRACE(kTraceError, kTraceVoice,
913 VoEId(_shared->instance_id(), -1),
914 "StartSend() failed to initialize recording");
915 return -1;
916 }
917 if (_shared->audio_device()->StartRecording() != 0)
918 {
919 WEBRTC_TRACE(kTraceError, kTraceVoice,
920 VoEId(_shared->instance_id(), -1),
921 "StartSend() failed to start recording");
922 return -1;
923 }
924 }
925
926 return 0;
927 }
928
StopSend()929 int32_t VoEBaseImpl::StopSend()
930 {
931 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
932 "VoEBaseImpl::StopSend()");
933
934 if (_shared->NumOfSendingChannels() == 0 &&
935 !_shared->transmit_mixer()->IsRecordingMic())
936 {
937 // Stop audio-device recording if no channel is recording
938 if (_shared->audio_device()->StopRecording() != 0)
939 {
940 _shared->SetLastError(VE_CANNOT_STOP_RECORDING, kTraceError,
941 "StopSend() failed to stop recording");
942 return -1;
943 }
944 _shared->transmit_mixer()->StopSend();
945 }
946
947 return 0;
948 }
949
TerminateInternal()950 int32_t VoEBaseImpl::TerminateInternal()
951 {
952 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
953 "VoEBaseImpl::TerminateInternal()");
954
955 // Delete any remaining channel objects
956 _shared->channel_manager().DestroyAllChannels();
957
958 if (_shared->process_thread())
959 {
960 if (_shared->audio_device())
961 {
962 if (_shared->process_thread()->
963 DeRegisterModule(_shared->audio_device()) != 0)
964 {
965 _shared->SetLastError(VE_THREAD_ERROR, kTraceError,
966 "TerminateInternal() failed to deregister ADM");
967 }
968 }
969 if (_shared->process_thread()->Stop() != 0)
970 {
971 _shared->SetLastError(VE_THREAD_ERROR, kTraceError,
972 "TerminateInternal() failed to stop module process thread");
973 }
974 }
975
976 if (_shared->audio_device())
977 {
978 if (_shared->audio_device()->StopPlayout() != 0)
979 {
980 _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
981 "TerminateInternal() failed to stop playout");
982 }
983 if (_shared->audio_device()->StopRecording() != 0)
984 {
985 _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
986 "TerminateInternal() failed to stop recording");
987 }
988 if (_shared->audio_device()->RegisterEventObserver(NULL) != 0) {
989 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
990 "TerminateInternal() failed to de-register event observer "
991 "for the ADM");
992 }
993 if (_shared->audio_device()->RegisterAudioCallback(NULL) != 0) {
994 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
995 "TerminateInternal() failed to de-register audio callback "
996 "for the ADM");
997 }
998 if (_shared->audio_device()->Terminate() != 0)
999 {
1000 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
1001 "TerminateInternal() failed to terminate the ADM");
1002 }
1003 _shared->set_audio_device(NULL);
1004 }
1005
1006 if (_shared->audio_processing()) {
1007 _shared->set_audio_processing(NULL);
1008 }
1009
1010 return _shared->statistics().SetUnInitialized();
1011 }
1012
ProcessRecordedDataWithAPM(const int voe_channels[],int number_of_voe_channels,const void * audio_data,uint32_t sample_rate,uint8_t number_of_channels,uint32_t number_of_frames,uint32_t audio_delay_milliseconds,int32_t clock_drift,uint32_t volume,bool key_pressed)1013 int VoEBaseImpl::ProcessRecordedDataWithAPM(
1014 const int voe_channels[],
1015 int number_of_voe_channels,
1016 const void* audio_data,
1017 uint32_t sample_rate,
1018 uint8_t number_of_channels,
1019 uint32_t number_of_frames,
1020 uint32_t audio_delay_milliseconds,
1021 int32_t clock_drift,
1022 uint32_t volume,
1023 bool key_pressed) {
1024 assert(_shared->transmit_mixer() != NULL);
1025 assert(_shared->audio_device() != NULL);
1026
1027 uint32_t max_volume = 0;
1028 uint16_t voe_mic_level = 0;
1029 // Check for zero to skip this calculation; the consumer may use this to
1030 // indicate no volume is available.
1031 if (volume != 0) {
1032 // Scale from ADM to VoE level range
1033 if (_shared->audio_device()->MaxMicrophoneVolume(&max_volume) == 0) {
1034 if (max_volume) {
1035 voe_mic_level = static_cast<uint16_t>(
1036 (volume * kMaxVolumeLevel +
1037 static_cast<int>(max_volume / 2)) / max_volume);
1038 }
1039 }
1040 // We learned that on certain systems (e.g Linux) the voe_mic_level
1041 // can be greater than the maxVolumeLevel therefore
1042 // we are going to cap the voe_mic_level to the maxVolumeLevel
1043 // and change the maxVolume to volume if it turns out that
1044 // the voe_mic_level is indeed greater than the maxVolumeLevel.
1045 if (voe_mic_level > kMaxVolumeLevel) {
1046 voe_mic_level = kMaxVolumeLevel;
1047 max_volume = volume;
1048 }
1049 }
1050
1051 // Perform channel-independent operations
1052 // (APM, mix with file, record to file, mute, etc.)
1053 _shared->transmit_mixer()->PrepareDemux(
1054 audio_data, number_of_frames, number_of_channels, sample_rate,
1055 static_cast<uint16_t>(audio_delay_milliseconds), clock_drift,
1056 voe_mic_level, key_pressed);
1057
1058 // Copy the audio frame to each sending channel and perform
1059 // channel-dependent operations (file mixing, mute, etc.), encode and
1060 // packetize+transmit the RTP packet. When |number_of_voe_channels| == 0,
1061 // do the operations on all the existing VoE channels; otherwise the
1062 // operations will be done on specific channels.
1063 if (number_of_voe_channels == 0) {
1064 _shared->transmit_mixer()->DemuxAndMix();
1065 _shared->transmit_mixer()->EncodeAndSend();
1066 } else {
1067 _shared->transmit_mixer()->DemuxAndMix(voe_channels,
1068 number_of_voe_channels);
1069 _shared->transmit_mixer()->EncodeAndSend(voe_channels,
1070 number_of_voe_channels);
1071 }
1072
1073 // Scale from VoE to ADM level range.
1074 uint32_t new_voe_mic_level = _shared->transmit_mixer()->CaptureLevel();
1075
1076 if (new_voe_mic_level != voe_mic_level) {
1077 // Return the new volume if AGC has changed the volume.
1078 return static_cast<int>(
1079 (new_voe_mic_level * max_volume +
1080 static_cast<int>(kMaxVolumeLevel / 2)) / kMaxVolumeLevel);
1081 }
1082
1083 // Return 0 to indicate no change on the volume.
1084 return 0;
1085 }
1086
GetPlayoutData(int sample_rate,int number_of_channels,int number_of_frames,bool feed_data_to_apm,void * audio_data,int64_t * elapsed_time_ms,int64_t * ntp_time_ms)1087 void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels,
1088 int number_of_frames, bool feed_data_to_apm,
1089 void* audio_data,
1090 int64_t* elapsed_time_ms,
1091 int64_t* ntp_time_ms) {
1092 assert(_shared->output_mixer() != NULL);
1093
1094 // TODO(andrew): if the device is running in mono, we should tell the mixer
1095 // here so that it will only request mono from AudioCodingModule.
1096 // Perform mixing of all active participants (channel-based mixing)
1097 _shared->output_mixer()->MixActiveChannels();
1098
1099 // Additional operations on the combined signal
1100 _shared->output_mixer()->DoOperationsOnCombinedSignal(feed_data_to_apm);
1101
1102 // Retrieve the final output mix (resampled to match the ADM)
1103 _shared->output_mixer()->GetMixedAudio(sample_rate, number_of_channels,
1104 &_audioFrame);
1105
1106 assert(number_of_frames == _audioFrame.samples_per_channel_);
1107 assert(sample_rate == _audioFrame.sample_rate_hz_);
1108
1109 // Deliver audio (PCM) samples to the ADM
1110 memcpy(audio_data, _audioFrame.data_,
1111 sizeof(int16_t) * number_of_frames * number_of_channels);
1112
1113 *elapsed_time_ms = _audioFrame.elapsed_time_ms_;
1114 *ntp_time_ms = _audioFrame.ntp_time_ms_;
1115 }
1116
1117 } // namespace webrtc
1118