1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12
13 #include "webrtc/modules/audio_device/audio_device_config.h"
14 #include "webrtc/modules/audio_device/audio_device_utility.h"
15 #include "webrtc/modules/audio_device/linux/audio_device_alsa_linux.h"
16
17 #include "webrtc/system_wrappers/interface/event_wrapper.h"
18 #include "webrtc/system_wrappers/interface/sleep.h"
19 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
20 #include "webrtc/system_wrappers/interface/trace.h"
21
22 webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable;
23
24 // Accesses ALSA functions through our late-binding symbol table instead of
25 // directly. This way we don't have to link to libasound, which means our binary
26 // will work on systems that don't have it.
27 #define LATE(sym) \
28 LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
29
30 // Redefine these here to be able to do late-binding
31 #undef snd_ctl_card_info_alloca
32 #define snd_ctl_card_info_alloca(ptr) \
33 do { *ptr = (snd_ctl_card_info_t *) \
34 __builtin_alloca (LATE(snd_ctl_card_info_sizeof)()); \
35 memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); } while (0)
36
37 #undef snd_pcm_info_alloca
38 #define snd_pcm_info_alloca(pInfo) \
39 do { *pInfo = (snd_pcm_info_t *) \
40 __builtin_alloca (LATE(snd_pcm_info_sizeof)()); \
41 memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); } while (0)
42
43 // snd_lib_error_handler_t
WebrtcAlsaErrorHandler(const char * file,int line,const char * function,int err,const char * fmt,...)44 void WebrtcAlsaErrorHandler(const char *file,
45 int line,
46 const char *function,
47 int err,
48 const char *fmt,...){};
49
50 namespace webrtc
51 {
52 static const unsigned int ALSA_PLAYOUT_FREQ = 48000;
53 static const unsigned int ALSA_PLAYOUT_CH = 2;
54 static const unsigned int ALSA_PLAYOUT_LATENCY = 40*1000; // in us
55 static const unsigned int ALSA_CAPTURE_FREQ = 48000;
56 static const unsigned int ALSA_CAPTURE_CH = 2;
57 static const unsigned int ALSA_CAPTURE_LATENCY = 40*1000; // in us
58 static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5; // in ms
59
60 #define FUNC_GET_NUM_OF_DEVICE 0
61 #define FUNC_GET_DEVICE_NAME 1
62 #define FUNC_GET_DEVICE_NAME_FOR_AN_ENUM 2
63
AudioDeviceLinuxALSA(const int32_t id)64 AudioDeviceLinuxALSA::AudioDeviceLinuxALSA(const int32_t id) :
65 _ptrAudioBuffer(NULL),
66 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
67 _ptrThreadRec(NULL),
68 _ptrThreadPlay(NULL),
69 _recThreadID(0),
70 _playThreadID(0),
71 _id(id),
72 _mixerManager(id),
73 _inputDeviceIndex(0),
74 _outputDeviceIndex(0),
75 _inputDeviceIsSpecified(false),
76 _outputDeviceIsSpecified(false),
77 _handleRecord(NULL),
78 _handlePlayout(NULL),
79 _recordingBuffersizeInFrame(0),
80 _recordingPeriodSizeInFrame(0),
81 _playoutBufferSizeInFrame(0),
82 _playoutPeriodSizeInFrame(0),
83 _recordingBufferSizeIn10MS(0),
84 _playoutBufferSizeIn10MS(0),
85 _recordingFramesIn10MS(0),
86 _playoutFramesIn10MS(0),
87 _recordingFreq(ALSA_CAPTURE_FREQ),
88 _playoutFreq(ALSA_PLAYOUT_FREQ),
89 _recChannels(ALSA_CAPTURE_CH),
90 _playChannels(ALSA_PLAYOUT_CH),
91 _recordingBuffer(NULL),
92 _playoutBuffer(NULL),
93 _recordingFramesLeft(0),
94 _playoutFramesLeft(0),
95 _playBufType(AudioDeviceModule::kFixedBufferSize),
96 _initialized(false),
97 _recording(false),
98 _playing(false),
99 _recIsInitialized(false),
100 _playIsInitialized(false),
101 _AGC(false),
102 _recordingDelay(0),
103 _playoutDelay(0),
104 _playWarning(0),
105 _playError(0),
106 _recWarning(0),
107 _recError(0),
108 _playBufDelay(80),
109 _playBufDelayFixed(80)
110 {
111 memset(_oldKeyState, 0, sizeof(_oldKeyState));
112 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
113 "%s created", __FUNCTION__);
114 }
115
116 // ----------------------------------------------------------------------------
117 // AudioDeviceLinuxALSA - dtor
118 // ----------------------------------------------------------------------------
119
~AudioDeviceLinuxALSA()120 AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA()
121 {
122 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
123 "%s destroyed", __FUNCTION__);
124
125 Terminate();
126
127 // Clean up the recording buffer and playout buffer.
128 if (_recordingBuffer)
129 {
130 delete [] _recordingBuffer;
131 _recordingBuffer = NULL;
132 }
133 if (_playoutBuffer)
134 {
135 delete [] _playoutBuffer;
136 _playoutBuffer = NULL;
137 }
138 delete &_critSect;
139 }
140
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)141 void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
142 {
143
144 CriticalSectionScoped lock(&_critSect);
145
146 _ptrAudioBuffer = audioBuffer;
147
148 // Inform the AudioBuffer about default settings for this implementation.
149 // Set all values to zero here since the actual settings will be done by
150 // InitPlayout and InitRecording later.
151 _ptrAudioBuffer->SetRecordingSampleRate(0);
152 _ptrAudioBuffer->SetPlayoutSampleRate(0);
153 _ptrAudioBuffer->SetRecordingChannels(0);
154 _ptrAudioBuffer->SetPlayoutChannels(0);
155 }
156
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const157 int32_t AudioDeviceLinuxALSA::ActiveAudioLayer(
158 AudioDeviceModule::AudioLayer& audioLayer) const
159 {
160 audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
161 return 0;
162 }
163
Init()164 int32_t AudioDeviceLinuxALSA::Init()
165 {
166
167 CriticalSectionScoped lock(&_critSect);
168
169 // Load libasound
170 if (!AlsaSymbolTable.Load())
171 {
172 // Alsa is not installed on
173 // this system
174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
175 " failed to load symbol table");
176 return -1;
177 }
178
179 if (_initialized)
180 {
181 return 0;
182 }
183 #if defined(USE_X11)
184 //Get X display handle for typing detection
185 _XDisplay = XOpenDisplay(NULL);
186 if (!_XDisplay)
187 {
188 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
189 " failed to open X display, typing detection will not work");
190 }
191 #endif
192 _playWarning = 0;
193 _playError = 0;
194 _recWarning = 0;
195 _recError = 0;
196
197 _initialized = true;
198
199 return 0;
200 }
201
Terminate()202 int32_t AudioDeviceLinuxALSA::Terminate()
203 {
204
205 if (!_initialized)
206 {
207 return 0;
208 }
209
210 CriticalSectionScoped lock(&_critSect);
211
212 _mixerManager.Close();
213
214 // RECORDING
215 if (_ptrThreadRec)
216 {
217 ThreadWrapper* tmpThread = _ptrThreadRec;
218 _ptrThreadRec = NULL;
219 _critSect.Leave();
220
221 tmpThread->SetNotAlive();
222
223 if (tmpThread->Stop())
224 {
225 delete tmpThread;
226 }
227 else
228 {
229 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
230 " failed to close down the rec audio thread");
231 }
232
233 _critSect.Enter();
234 }
235
236 // PLAYOUT
237 if (_ptrThreadPlay)
238 {
239 ThreadWrapper* tmpThread = _ptrThreadPlay;
240 _ptrThreadPlay = NULL;
241 _critSect.Leave();
242
243 tmpThread->SetNotAlive();
244
245 if (tmpThread->Stop())
246 {
247 delete tmpThread;
248 }
249 else
250 {
251 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
252 " failed to close down the play audio thread");
253 }
254
255 _critSect.Enter();
256 }
257 #if defined(USE_X11)
258 if (_XDisplay)
259 {
260 XCloseDisplay(_XDisplay);
261 _XDisplay = NULL;
262 }
263 #endif
264 _initialized = false;
265 _outputDeviceIsSpecified = false;
266 _inputDeviceIsSpecified = false;
267
268 return 0;
269 }
270
Initialized() const271 bool AudioDeviceLinuxALSA::Initialized() const
272 {
273 return (_initialized);
274 }
275
InitSpeaker()276 int32_t AudioDeviceLinuxALSA::InitSpeaker()
277 {
278
279 CriticalSectionScoped lock(&_critSect);
280
281 if (_playing)
282 {
283 return -1;
284 }
285
286 char devName[kAdmMaxDeviceNameSize] = {0};
287 GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
288 return _mixerManager.OpenSpeaker(devName);
289 }
290
InitMicrophone()291 int32_t AudioDeviceLinuxALSA::InitMicrophone()
292 {
293
294 CriticalSectionScoped lock(&_critSect);
295
296 if (_recording)
297 {
298 return -1;
299 }
300
301 char devName[kAdmMaxDeviceNameSize] = {0};
302 GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
303 return _mixerManager.OpenMicrophone(devName);
304 }
305
SpeakerIsInitialized() const306 bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const
307 {
308 return (_mixerManager.SpeakerIsInitialized());
309 }
310
MicrophoneIsInitialized() const311 bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const
312 {
313 return (_mixerManager.MicrophoneIsInitialized());
314 }
315
SpeakerVolumeIsAvailable(bool & available)316 int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available)
317 {
318
319 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
320
321 // Make an attempt to open up the
322 // output mixer corresponding to the currently selected output device.
323 if (!wasInitialized && InitSpeaker() == -1)
324 {
325 // If we end up here it means that the selected speaker has no volume
326 // control.
327 available = false;
328 return 0;
329 }
330
331 // Given that InitSpeaker was successful, we know that a volume control
332 // exists
333 available = true;
334
335 // Close the initialized output mixer
336 if (!wasInitialized)
337 {
338 _mixerManager.CloseSpeaker();
339 }
340
341 return 0;
342 }
343
SetSpeakerVolume(uint32_t volume)344 int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume)
345 {
346
347 return (_mixerManager.SetSpeakerVolume(volume));
348 }
349
SpeakerVolume(uint32_t & volume) const350 int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const
351 {
352
353 uint32_t level(0);
354
355 if (_mixerManager.SpeakerVolume(level) == -1)
356 {
357 return -1;
358 }
359
360 volume = level;
361
362 return 0;
363 }
364
365
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)366 int32_t AudioDeviceLinuxALSA::SetWaveOutVolume(uint16_t volumeLeft,
367 uint16_t volumeRight)
368 {
369
370 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
371 " API call not supported on this platform");
372 return -1;
373 }
374
WaveOutVolume(uint16_t &,uint16_t &) const375 int32_t AudioDeviceLinuxALSA::WaveOutVolume(
376 uint16_t& /*volumeLeft*/,
377 uint16_t& /*volumeRight*/) const
378 {
379
380 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
381 " API call not supported on this platform");
382 return -1;
383 }
384
MaxSpeakerVolume(uint32_t & maxVolume) const385 int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(
386 uint32_t& maxVolume) const
387 {
388
389 uint32_t maxVol(0);
390
391 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
392 {
393 return -1;
394 }
395
396 maxVolume = maxVol;
397
398 return 0;
399 }
400
MinSpeakerVolume(uint32_t & minVolume) const401 int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(
402 uint32_t& minVolume) const
403 {
404
405 uint32_t minVol(0);
406
407 if (_mixerManager.MinSpeakerVolume(minVol) == -1)
408 {
409 return -1;
410 }
411
412 minVolume = minVol;
413
414 return 0;
415 }
416
SpeakerVolumeStepSize(uint16_t & stepSize) const417 int32_t AudioDeviceLinuxALSA::SpeakerVolumeStepSize(
418 uint16_t& stepSize) const
419 {
420
421 uint16_t delta(0);
422
423 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
424 {
425 return -1;
426 }
427
428 stepSize = delta;
429
430 return 0;
431 }
432
SpeakerMuteIsAvailable(bool & available)433 int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available)
434 {
435
436 bool isAvailable(false);
437 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
438
439 // Make an attempt to open up the
440 // output mixer corresponding to the currently selected output device.
441 //
442 if (!wasInitialized && InitSpeaker() == -1)
443 {
444 // If we end up here it means that the selected speaker has no volume
445 // control, hence it is safe to state that there is no mute control
446 // already at this stage.
447 available = false;
448 return 0;
449 }
450
451 // Check if the selected speaker has a mute control
452 _mixerManager.SpeakerMuteIsAvailable(isAvailable);
453
454 available = isAvailable;
455
456 // Close the initialized output mixer
457 if (!wasInitialized)
458 {
459 _mixerManager.CloseSpeaker();
460 }
461
462 return 0;
463 }
464
SetSpeakerMute(bool enable)465 int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable)
466 {
467 return (_mixerManager.SetSpeakerMute(enable));
468 }
469
SpeakerMute(bool & enabled) const470 int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const
471 {
472
473 bool muted(0);
474
475 if (_mixerManager.SpeakerMute(muted) == -1)
476 {
477 return -1;
478 }
479
480 enabled = muted;
481
482 return 0;
483 }
484
MicrophoneMuteIsAvailable(bool & available)485 int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available)
486 {
487
488 bool isAvailable(false);
489 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
490
491 // Make an attempt to open up the
492 // input mixer corresponding to the currently selected input device.
493 //
494 if (!wasInitialized && InitMicrophone() == -1)
495 {
496 // If we end up here it means that the selected microphone has no volume
497 // control, hence it is safe to state that there is no mute control
498 // already at this stage.
499 available = false;
500 return 0;
501 }
502
503 // Check if the selected microphone has a mute control
504 //
505 _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
506 available = isAvailable;
507
508 // Close the initialized input mixer
509 //
510 if (!wasInitialized)
511 {
512 _mixerManager.CloseMicrophone();
513 }
514
515 return 0;
516 }
517
SetMicrophoneMute(bool enable)518 int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable)
519 {
520 return (_mixerManager.SetMicrophoneMute(enable));
521 }
522
523 // ----------------------------------------------------------------------------
524 // MicrophoneMute
525 // ----------------------------------------------------------------------------
526
MicrophoneMute(bool & enabled) const527 int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const
528 {
529
530 bool muted(0);
531
532 if (_mixerManager.MicrophoneMute(muted) == -1)
533 {
534 return -1;
535 }
536
537 enabled = muted;
538 return 0;
539 }
540
MicrophoneBoostIsAvailable(bool & available)541 int32_t AudioDeviceLinuxALSA::MicrophoneBoostIsAvailable(bool& available)
542 {
543
544 bool isAvailable(false);
545 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
546
547 // Enumerate all avaliable microphone and make an attempt to open up the
548 // input mixer corresponding to the currently selected input device.
549 //
550 if (!wasInitialized && InitMicrophone() == -1)
551 {
552 // If we end up here it means that the selected microphone has no volume
553 // control, hence it is safe to state that there is no boost control
554 // already at this stage.
555 available = false;
556 return 0;
557 }
558
559 // Check if the selected microphone has a boost control
560 _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
561 available = isAvailable;
562
563 // Close the initialized input mixer
564 if (!wasInitialized)
565 {
566 _mixerManager.CloseMicrophone();
567 }
568
569 return 0;
570 }
571
SetMicrophoneBoost(bool enable)572 int32_t AudioDeviceLinuxALSA::SetMicrophoneBoost(bool enable)
573 {
574
575 return (_mixerManager.SetMicrophoneBoost(enable));
576 }
577
MicrophoneBoost(bool & enabled) const578 int32_t AudioDeviceLinuxALSA::MicrophoneBoost(bool& enabled) const
579 {
580
581 bool onOff(0);
582
583 if (_mixerManager.MicrophoneBoost(onOff) == -1)
584 {
585 return -1;
586 }
587
588 enabled = onOff;
589
590 return 0;
591 }
592
StereoRecordingIsAvailable(bool & available)593 int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available)
594 {
595
596 CriticalSectionScoped lock(&_critSect);
597
598 // If we already have initialized in stereo it's obviously available
599 if (_recIsInitialized && (2 == _recChannels))
600 {
601 available = true;
602 return 0;
603 }
604
605 // Save rec states and the number of rec channels
606 bool recIsInitialized = _recIsInitialized;
607 bool recording = _recording;
608 int recChannels = _recChannels;
609
610 available = false;
611
612 // Stop/uninitialize recording if initialized (and possibly started)
613 if (_recIsInitialized)
614 {
615 StopRecording();
616 }
617
618 // Try init in stereo;
619 _recChannels = 2;
620 if (InitRecording() == 0)
621 {
622 available = true;
623 }
624
625 // Stop/uninitialize recording
626 StopRecording();
627
628 // Recover previous states
629 _recChannels = recChannels;
630 if (recIsInitialized)
631 {
632 InitRecording();
633 }
634 if (recording)
635 {
636 StartRecording();
637 }
638
639 return 0;
640 }
641
SetStereoRecording(bool enable)642 int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable)
643 {
644
645 if (enable)
646 _recChannels = 2;
647 else
648 _recChannels = 1;
649
650 return 0;
651 }
652
StereoRecording(bool & enabled) const653 int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const
654 {
655
656 if (_recChannels == 2)
657 enabled = true;
658 else
659 enabled = false;
660
661 return 0;
662 }
663
StereoPlayoutIsAvailable(bool & available)664 int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available)
665 {
666
667 CriticalSectionScoped lock(&_critSect);
668
669 // If we already have initialized in stereo it's obviously available
670 if (_playIsInitialized && (2 == _playChannels))
671 {
672 available = true;
673 return 0;
674 }
675
676 // Save rec states and the number of rec channels
677 bool playIsInitialized = _playIsInitialized;
678 bool playing = _playing;
679 int playChannels = _playChannels;
680
681 available = false;
682
683 // Stop/uninitialize recording if initialized (and possibly started)
684 if (_playIsInitialized)
685 {
686 StopPlayout();
687 }
688
689 // Try init in stereo;
690 _playChannels = 2;
691 if (InitPlayout() == 0)
692 {
693 available = true;
694 }
695
696 // Stop/uninitialize recording
697 StopPlayout();
698
699 // Recover previous states
700 _playChannels = playChannels;
701 if (playIsInitialized)
702 {
703 InitPlayout();
704 }
705 if (playing)
706 {
707 StartPlayout();
708 }
709
710 return 0;
711 }
712
SetStereoPlayout(bool enable)713 int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable)
714 {
715
716 if (enable)
717 _playChannels = 2;
718 else
719 _playChannels = 1;
720
721 return 0;
722 }
723
StereoPlayout(bool & enabled) const724 int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const
725 {
726
727 if (_playChannels == 2)
728 enabled = true;
729 else
730 enabled = false;
731
732 return 0;
733 }
734
SetAGC(bool enable)735 int32_t AudioDeviceLinuxALSA::SetAGC(bool enable)
736 {
737
738 _AGC = enable;
739
740 return 0;
741 }
742
AGC() const743 bool AudioDeviceLinuxALSA::AGC() const
744 {
745
746 return _AGC;
747 }
748
MicrophoneVolumeIsAvailable(bool & available)749 int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available)
750 {
751
752 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
753
754 // Make an attempt to open up the
755 // input mixer corresponding to the currently selected output device.
756 if (!wasInitialized && InitMicrophone() == -1)
757 {
758 // If we end up here it means that the selected microphone has no volume
759 // control.
760 available = false;
761 return 0;
762 }
763
764 // Given that InitMicrophone was successful, we know that a volume control
765 // exists
766 available = true;
767
768 // Close the initialized input mixer
769 if (!wasInitialized)
770 {
771 _mixerManager.CloseMicrophone();
772 }
773
774 return 0;
775 }
776
SetMicrophoneVolume(uint32_t volume)777 int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume)
778 {
779
780 return (_mixerManager.SetMicrophoneVolume(volume));
781
782 return 0;
783 }
784
MicrophoneVolume(uint32_t & volume) const785 int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const
786 {
787
788 uint32_t level(0);
789
790 if (_mixerManager.MicrophoneVolume(level) == -1)
791 {
792 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
793 " failed to retrive current microphone level");
794 return -1;
795 }
796
797 volume = level;
798
799 return 0;
800 }
801
MaxMicrophoneVolume(uint32_t & maxVolume) const802 int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(
803 uint32_t& maxVolume) const
804 {
805
806 uint32_t maxVol(0);
807
808 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
809 {
810 return -1;
811 }
812
813 maxVolume = maxVol;
814
815 return 0;
816 }
817
MinMicrophoneVolume(uint32_t & minVolume) const818 int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(
819 uint32_t& minVolume) const
820 {
821
822 uint32_t minVol(0);
823
824 if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
825 {
826 return -1;
827 }
828
829 minVolume = minVol;
830
831 return 0;
832 }
833
MicrophoneVolumeStepSize(uint16_t & stepSize) const834 int32_t AudioDeviceLinuxALSA::MicrophoneVolumeStepSize(
835 uint16_t& stepSize) const
836 {
837
838 uint16_t delta(0);
839
840 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
841 {
842 return -1;
843 }
844
845 stepSize = delta;
846
847 return 0;
848 }
849
PlayoutDevices()850 int16_t AudioDeviceLinuxALSA::PlayoutDevices()
851 {
852
853 return (int16_t)GetDevicesInfo(0, true);
854 }
855
SetPlayoutDevice(uint16_t index)856 int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index)
857 {
858
859 if (_playIsInitialized)
860 {
861 return -1;
862 }
863
864 uint32_t nDevices = GetDevicesInfo(0, true);
865 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
866 " number of availiable audio output devices is %u", nDevices);
867
868 if (index > (nDevices-1))
869 {
870 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
871 " device index is out of range [0,%u]", (nDevices-1));
872 return -1;
873 }
874
875 _outputDeviceIndex = index;
876 _outputDeviceIsSpecified = true;
877
878 return 0;
879 }
880
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)881 int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(
882 AudioDeviceModule::WindowsDeviceType /*device*/)
883 {
884 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
885 "WindowsDeviceType not supported");
886 return -1;
887 }
888
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])889 int32_t AudioDeviceLinuxALSA::PlayoutDeviceName(
890 uint16_t index,
891 char name[kAdmMaxDeviceNameSize],
892 char guid[kAdmMaxGuidSize])
893 {
894
895 const uint16_t nDevices(PlayoutDevices());
896
897 if ((index > (nDevices-1)) || (name == NULL))
898 {
899 return -1;
900 }
901
902 memset(name, 0, kAdmMaxDeviceNameSize);
903
904 if (guid != NULL)
905 {
906 memset(guid, 0, kAdmMaxGuidSize);
907 }
908
909 return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
910 }
911
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])912 int32_t AudioDeviceLinuxALSA::RecordingDeviceName(
913 uint16_t index,
914 char name[kAdmMaxDeviceNameSize],
915 char guid[kAdmMaxGuidSize])
916 {
917
918 const uint16_t nDevices(RecordingDevices());
919
920 if ((index > (nDevices-1)) || (name == NULL))
921 {
922 return -1;
923 }
924
925 memset(name, 0, kAdmMaxDeviceNameSize);
926
927 if (guid != NULL)
928 {
929 memset(guid, 0, kAdmMaxGuidSize);
930 }
931
932 return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
933 }
934
RecordingDevices()935 int16_t AudioDeviceLinuxALSA::RecordingDevices()
936 {
937
938 return (int16_t)GetDevicesInfo(0, false);
939 }
940
SetRecordingDevice(uint16_t index)941 int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index)
942 {
943
944 if (_recIsInitialized)
945 {
946 return -1;
947 }
948
949 uint32_t nDevices = GetDevicesInfo(0, false);
950 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
951 " number of availiable audio input devices is %u", nDevices);
952
953 if (index > (nDevices-1))
954 {
955 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
956 " device index is out of range [0,%u]", (nDevices-1));
957 return -1;
958 }
959
960 _inputDeviceIndex = index;
961 _inputDeviceIsSpecified = true;
962
963 return 0;
964 }
965
966 // ----------------------------------------------------------------------------
967 // SetRecordingDevice II (II)
968 // ----------------------------------------------------------------------------
969
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)970 int32_t AudioDeviceLinuxALSA::SetRecordingDevice(
971 AudioDeviceModule::WindowsDeviceType /*device*/)
972 {
973 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
974 "WindowsDeviceType not supported");
975 return -1;
976 }
977
PlayoutIsAvailable(bool & available)978 int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available)
979 {
980
981 available = false;
982
983 // Try to initialize the playout side with mono
984 // Assumes that user set num channels after calling this function
985 _playChannels = 1;
986 int32_t res = InitPlayout();
987
988 // Cancel effect of initialization
989 StopPlayout();
990
991 if (res != -1)
992 {
993 available = true;
994 }
995 else
996 {
997 // It may be possible to play out in stereo
998 res = StereoPlayoutIsAvailable(available);
999 if (available)
1000 {
1001 // Then set channels to 2 so InitPlayout doesn't fail
1002 _playChannels = 2;
1003 }
1004 }
1005
1006 return res;
1007 }
1008
RecordingIsAvailable(bool & available)1009 int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available)
1010 {
1011
1012 available = false;
1013
1014 // Try to initialize the recording side with mono
1015 // Assumes that user set num channels after calling this function
1016 _recChannels = 1;
1017 int32_t res = InitRecording();
1018
1019 // Cancel effect of initialization
1020 StopRecording();
1021
1022 if (res != -1)
1023 {
1024 available = true;
1025 }
1026 else
1027 {
1028 // It may be possible to record in stereo
1029 res = StereoRecordingIsAvailable(available);
1030 if (available)
1031 {
1032 // Then set channels to 2 so InitPlayout doesn't fail
1033 _recChannels = 2;
1034 }
1035 }
1036
1037 return res;
1038 }
1039
InitPlayout()1040 int32_t AudioDeviceLinuxALSA::InitPlayout()
1041 {
1042
1043 int errVal = 0;
1044
1045 CriticalSectionScoped lock(&_critSect);
1046 if (_playing)
1047 {
1048 return -1;
1049 }
1050
1051 if (!_outputDeviceIsSpecified)
1052 {
1053 return -1;
1054 }
1055
1056 if (_playIsInitialized)
1057 {
1058 return 0;
1059 }
1060 // Initialize the speaker (devices might have been added or removed)
1061 if (InitSpeaker() == -1)
1062 {
1063 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1064 " InitSpeaker() failed");
1065 }
1066
1067 // Start by closing any existing wave-output devices
1068 //
1069 if (_handlePlayout != NULL)
1070 {
1071 LATE(snd_pcm_close)(_handlePlayout);
1072 _handlePlayout = NULL;
1073 _playIsInitialized = false;
1074 if (errVal < 0)
1075 {
1076 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1077 " Error closing current playout sound device, error:"
1078 " %s", LATE(snd_strerror)(errVal));
1079 }
1080 }
1081
1082 // Open PCM device for playout
1083 char deviceName[kAdmMaxDeviceNameSize] = {0};
1084 GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
1085 kAdmMaxDeviceNameSize);
1086
1087 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1088 " InitPlayout open (%s)", deviceName);
1089
1090 errVal = LATE(snd_pcm_open)
1091 (&_handlePlayout,
1092 deviceName,
1093 SND_PCM_STREAM_PLAYBACK,
1094 SND_PCM_NONBLOCK);
1095
1096 if (errVal == -EBUSY) // Device busy - try some more!
1097 {
1098 for (int i=0; i < 5; i++)
1099 {
1100 SleepMs(1000);
1101 errVal = LATE(snd_pcm_open)
1102 (&_handlePlayout,
1103 deviceName,
1104 SND_PCM_STREAM_PLAYBACK,
1105 SND_PCM_NONBLOCK);
1106 if (errVal == 0)
1107 {
1108 break;
1109 }
1110 }
1111 }
1112 if (errVal < 0)
1113 {
1114 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1115 " unable to open playback device: %s (%d)",
1116 LATE(snd_strerror)(errVal),
1117 errVal);
1118 _handlePlayout = NULL;
1119 return -1;
1120 }
1121
1122 _playoutFramesIn10MS = _playoutFreq/100;
1123 if ((errVal = LATE(snd_pcm_set_params)( _handlePlayout,
1124 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1125 SND_PCM_FORMAT_S16_BE,
1126 #else
1127 SND_PCM_FORMAT_S16_LE, //format
1128 #endif
1129 SND_PCM_ACCESS_RW_INTERLEAVED, //access
1130 _playChannels, //channels
1131 _playoutFreq, //rate
1132 1, //soft_resample
1133 ALSA_PLAYOUT_LATENCY //40*1000 //latency required overall latency in us
1134 )) < 0)
1135 { /* 0.5sec */
1136 _playoutFramesIn10MS = 0;
1137 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1138 " unable to set playback device: %s (%d)",
1139 LATE(snd_strerror)(errVal),
1140 errVal);
1141 ErrorRecovery(errVal, _handlePlayout);
1142 errVal = LATE(snd_pcm_close)(_handlePlayout);
1143 _handlePlayout = NULL;
1144 return -1;
1145 }
1146
1147 errVal = LATE(snd_pcm_get_params)(_handlePlayout,
1148 &_playoutBufferSizeInFrame, &_playoutPeriodSizeInFrame);
1149 if (errVal < 0)
1150 {
1151 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1152 " snd_pcm_get_params %s",
1153 LATE(snd_strerror)(errVal),
1154 errVal);
1155 _playoutBufferSizeInFrame = 0;
1156 _playoutPeriodSizeInFrame = 0;
1157 }
1158 else {
1159 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1160 " playout snd_pcm_get_params "
1161 "buffer_size:%d period_size :%d",
1162 _playoutBufferSizeInFrame, _playoutPeriodSizeInFrame);
1163 }
1164
1165 if (_ptrAudioBuffer)
1166 {
1167 // Update webrtc audio buffer with the selected parameters
1168 _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
1169 _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
1170 }
1171
1172 // Set play buffer size
1173 _playoutBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
1174 _handlePlayout, _playoutFramesIn10MS);
1175
1176 // Init varaibles used for play
1177 _playWarning = 0;
1178 _playError = 0;
1179
1180 if (_handlePlayout != NULL)
1181 {
1182 _playIsInitialized = true;
1183 return 0;
1184 }
1185 else
1186 {
1187 return -1;
1188 }
1189
1190 return 0;
1191 }
1192
InitRecording()1193 int32_t AudioDeviceLinuxALSA::InitRecording()
1194 {
1195
1196 int errVal = 0;
1197
1198 CriticalSectionScoped lock(&_critSect);
1199
1200 if (_recording)
1201 {
1202 return -1;
1203 }
1204
1205 if (!_inputDeviceIsSpecified)
1206 {
1207 return -1;
1208 }
1209
1210 if (_recIsInitialized)
1211 {
1212 return 0;
1213 }
1214
1215 // Initialize the microphone (devices might have been added or removed)
1216 if (InitMicrophone() == -1)
1217 {
1218 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1219 " InitMicrophone() failed");
1220 }
1221
1222 // Start by closing any existing pcm-input devices
1223 //
1224 if (_handleRecord != NULL)
1225 {
1226 int errVal = LATE(snd_pcm_close)(_handleRecord);
1227 _handleRecord = NULL;
1228 _recIsInitialized = false;
1229 if (errVal < 0)
1230 {
1231 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1232 " Error closing current recording sound device,"
1233 " error: %s",
1234 LATE(snd_strerror)(errVal));
1235 }
1236 }
1237
1238 // Open PCM device for recording
1239 // The corresponding settings for playout are made after the record settings
1240 char deviceName[kAdmMaxDeviceNameSize] = {0};
1241 GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
1242 kAdmMaxDeviceNameSize);
1243
1244 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1245 "InitRecording open (%s)", deviceName);
1246 errVal = LATE(snd_pcm_open)
1247 (&_handleRecord,
1248 deviceName,
1249 SND_PCM_STREAM_CAPTURE,
1250 SND_PCM_NONBLOCK);
1251
1252 // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
1253 if (errVal == -EBUSY) // Device busy - try some more!
1254 {
1255 for (int i=0; i < 5; i++)
1256 {
1257 SleepMs(1000);
1258 errVal = LATE(snd_pcm_open)
1259 (&_handleRecord,
1260 deviceName,
1261 SND_PCM_STREAM_CAPTURE,
1262 SND_PCM_NONBLOCK);
1263 if (errVal == 0)
1264 {
1265 break;
1266 }
1267 }
1268 }
1269 if (errVal < 0)
1270 {
1271 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1272 " unable to open record device: %s",
1273 LATE(snd_strerror)(errVal));
1274 _handleRecord = NULL;
1275 return -1;
1276 }
1277
1278 _recordingFramesIn10MS = _recordingFreq/100;
1279 if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
1280 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1281 SND_PCM_FORMAT_S16_BE, //format
1282 #else
1283 SND_PCM_FORMAT_S16_LE, //format
1284 #endif
1285 SND_PCM_ACCESS_RW_INTERLEAVED, //access
1286 _recChannels, //channels
1287 _recordingFreq, //rate
1288 1, //soft_resample
1289 ALSA_CAPTURE_LATENCY //latency in us
1290 )) < 0)
1291 {
1292 // Fall back to another mode then.
1293 if (_recChannels == 1)
1294 _recChannels = 2;
1295 else
1296 _recChannels = 1;
1297
1298 if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
1299 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1300 SND_PCM_FORMAT_S16_BE, //format
1301 #else
1302 SND_PCM_FORMAT_S16_LE, //format
1303 #endif
1304 SND_PCM_ACCESS_RW_INTERLEAVED, //access
1305 _recChannels, //channels
1306 _recordingFreq, //rate
1307 1, //soft_resample
1308 ALSA_CAPTURE_LATENCY //latency in us
1309 )) < 0)
1310 {
1311 _recordingFramesIn10MS = 0;
1312 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1313 " unable to set record settings: %s (%d)",
1314 LATE(snd_strerror)(errVal), errVal);
1315 ErrorRecovery(errVal, _handleRecord);
1316 errVal = LATE(snd_pcm_close)(_handleRecord);
1317 _handleRecord = NULL;
1318 return -1;
1319 }
1320 }
1321
1322 errVal = LATE(snd_pcm_get_params)(_handleRecord,
1323 &_recordingBuffersizeInFrame, &_recordingPeriodSizeInFrame);
1324 if (errVal < 0)
1325 {
1326 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1327 " snd_pcm_get_params %s",
1328 LATE(snd_strerror)(errVal), errVal);
1329 _recordingBuffersizeInFrame = 0;
1330 _recordingPeriodSizeInFrame = 0;
1331 }
1332 else {
1333 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1334 " capture snd_pcm_get_params "
1335 "buffer_size:%d period_size:%d",
1336 _recordingBuffersizeInFrame, _recordingPeriodSizeInFrame);
1337 }
1338
1339 if (_ptrAudioBuffer)
1340 {
1341 // Update webrtc audio buffer with the selected parameters
1342 _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
1343 _ptrAudioBuffer->SetRecordingChannels(_recChannels);
1344 }
1345
1346 // Set rec buffer size and create buffer
1347 _recordingBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
1348 _handleRecord, _recordingFramesIn10MS);
1349
1350 if (_handleRecord != NULL)
1351 {
1352 // Mark recording side as initialized
1353 _recIsInitialized = true;
1354 return 0;
1355 }
1356 else
1357 {
1358 return -1;
1359 }
1360
1361 return 0;
1362 }
1363
StartRecording()1364 int32_t AudioDeviceLinuxALSA::StartRecording()
1365 {
1366
1367 if (!_recIsInitialized)
1368 {
1369 return -1;
1370 }
1371
1372 if (_recording)
1373 {
1374 return 0;
1375 }
1376
1377 _recording = true;
1378
1379 int errVal = 0;
1380 _recordingFramesLeft = _recordingFramesIn10MS;
1381
1382 // Make sure we only create the buffer once.
1383 if (!_recordingBuffer)
1384 _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
1385 if (!_recordingBuffer)
1386 {
1387 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1388 " failed to alloc recording buffer");
1389 _recording = false;
1390 return -1;
1391 }
1392 // RECORDING
1393 const char* threadName = "webrtc_audio_module_capture_thread";
1394 _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc,
1395 this,
1396 kRealtimePriority,
1397 threadName);
1398 if (_ptrThreadRec == NULL)
1399 {
1400 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1401 " failed to create the rec audio thread");
1402 _recording = false;
1403 delete [] _recordingBuffer;
1404 _recordingBuffer = NULL;
1405 return -1;
1406 }
1407
1408 unsigned int threadID(0);
1409 if (!_ptrThreadRec->Start(threadID))
1410 {
1411 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1412 " failed to start the rec audio thread");
1413 _recording = false;
1414 delete _ptrThreadRec;
1415 _ptrThreadRec = NULL;
1416 delete [] _recordingBuffer;
1417 _recordingBuffer = NULL;
1418 return -1;
1419 }
1420 _recThreadID = threadID;
1421
1422 errVal = LATE(snd_pcm_prepare)(_handleRecord);
1423 if (errVal < 0)
1424 {
1425 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1426 " capture snd_pcm_prepare failed (%s)\n",
1427 LATE(snd_strerror)(errVal));
1428 // just log error
1429 // if snd_pcm_open fails will return -1
1430 }
1431
1432 errVal = LATE(snd_pcm_start)(_handleRecord);
1433 if (errVal < 0)
1434 {
1435 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1436 " capture snd_pcm_start err: %s",
1437 LATE(snd_strerror)(errVal));
1438 errVal = LATE(snd_pcm_start)(_handleRecord);
1439 if (errVal < 0)
1440 {
1441 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1442 " capture snd_pcm_start 2nd try err: %s",
1443 LATE(snd_strerror)(errVal));
1444 StopRecording();
1445 return -1;
1446 }
1447 }
1448
1449 return 0;
1450 }
1451
StopRecording()1452 int32_t AudioDeviceLinuxALSA::StopRecording()
1453 {
1454
1455 {
1456 CriticalSectionScoped lock(&_critSect);
1457
1458 if (!_recIsInitialized)
1459 {
1460 return 0;
1461 }
1462
1463 if (_handleRecord == NULL)
1464 {
1465 return -1;
1466 }
1467
1468 // Make sure we don't start recording (it's asynchronous).
1469 _recIsInitialized = false;
1470 _recording = false;
1471 }
1472
1473 if (_ptrThreadRec && !_ptrThreadRec->Stop())
1474 {
1475 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1476 " failed to stop the rec audio thread");
1477 return -1;
1478 }
1479 else {
1480 delete _ptrThreadRec;
1481 _ptrThreadRec = NULL;
1482 }
1483
1484 CriticalSectionScoped lock(&_critSect);
1485 _recordingFramesLeft = 0;
1486 if (_recordingBuffer)
1487 {
1488 delete [] _recordingBuffer;
1489 _recordingBuffer = NULL;
1490 }
1491
1492 // Stop and close pcm recording device.
1493 int errVal = LATE(snd_pcm_drop)(_handleRecord);
1494 if (errVal < 0)
1495 {
1496 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1497 " Error stop recording: %s",
1498 LATE(snd_strerror)(errVal));
1499 return -1;
1500 }
1501
1502 errVal = LATE(snd_pcm_close)(_handleRecord);
1503 if (errVal < 0)
1504 {
1505 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1506 " Error closing record sound device, error: %s",
1507 LATE(snd_strerror)(errVal));
1508 return -1;
1509 }
1510
1511 // Check if we have muted and unmute if so.
1512 bool muteEnabled = false;
1513 MicrophoneMute(muteEnabled);
1514 if (muteEnabled)
1515 {
1516 SetMicrophoneMute(false);
1517 }
1518
1519 // set the pcm input handle to NULL
1520 _handleRecord = NULL;
1521 return 0;
1522 }
1523
RecordingIsInitialized() const1524 bool AudioDeviceLinuxALSA::RecordingIsInitialized() const
1525 {
1526 return (_recIsInitialized);
1527 }
1528
Recording() const1529 bool AudioDeviceLinuxALSA::Recording() const
1530 {
1531 return (_recording);
1532 }
1533
PlayoutIsInitialized() const1534 bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const
1535 {
1536 return (_playIsInitialized);
1537 }
1538
StartPlayout()1539 int32_t AudioDeviceLinuxALSA::StartPlayout()
1540 {
1541 if (!_playIsInitialized)
1542 {
1543 return -1;
1544 }
1545
1546 if (_playing)
1547 {
1548 return 0;
1549 }
1550
1551 _playing = true;
1552
1553 _playoutFramesLeft = 0;
1554 if (!_playoutBuffer)
1555 _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
1556 if (!_playoutBuffer)
1557 {
1558 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1559 " failed to alloc playout buf");
1560 _playing = false;
1561 return -1;
1562 }
1563
1564 // PLAYOUT
1565 const char* threadName = "webrtc_audio_module_play_thread";
1566 _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc,
1567 this,
1568 kRealtimePriority,
1569 threadName);
1570 if (_ptrThreadPlay == NULL)
1571 {
1572 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1573 " failed to create the play audio thread");
1574 _playing = false;
1575 delete [] _playoutBuffer;
1576 _playoutBuffer = NULL;
1577 return -1;
1578 }
1579
1580 unsigned int threadID(0);
1581 if (!_ptrThreadPlay->Start(threadID))
1582 {
1583 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1584 " failed to start the play audio thread");
1585 _playing = false;
1586 delete _ptrThreadPlay;
1587 _ptrThreadPlay = NULL;
1588 delete [] _playoutBuffer;
1589 _playoutBuffer = NULL;
1590 return -1;
1591 }
1592 _playThreadID = threadID;
1593
1594 int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
1595 if (errVal < 0)
1596 {
1597 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1598 " playout snd_pcm_prepare failed (%s)\n",
1599 LATE(snd_strerror)(errVal));
1600 // just log error
1601 // if snd_pcm_open fails will return -1
1602 }
1603
1604 return 0;
1605 }
1606
StopPlayout()1607 int32_t AudioDeviceLinuxALSA::StopPlayout()
1608 {
1609
1610 {
1611 CriticalSectionScoped lock(&_critSect);
1612
1613 if (!_playIsInitialized)
1614 {
1615 return 0;
1616 }
1617
1618 if (_handlePlayout == NULL)
1619 {
1620 return -1;
1621 }
1622
1623 _playing = false;
1624 }
1625
1626 // stop playout thread first
1627 if (_ptrThreadPlay && !_ptrThreadPlay->Stop())
1628 {
1629 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1630 " failed to stop the play audio thread");
1631 return -1;
1632 }
1633 else {
1634 delete _ptrThreadPlay;
1635 _ptrThreadPlay = NULL;
1636 }
1637
1638 CriticalSectionScoped lock(&_critSect);
1639
1640 _playoutFramesLeft = 0;
1641 delete [] _playoutBuffer;
1642 _playoutBuffer = NULL;
1643
1644 // stop and close pcm playout device
1645 int errVal = LATE(snd_pcm_drop)(_handlePlayout);
1646 if (errVal < 0)
1647 {
1648 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1649 " Error stop playing: %s",
1650 LATE(snd_strerror)(errVal));
1651 }
1652
1653 errVal = LATE(snd_pcm_close)(_handlePlayout);
1654 if (errVal < 0)
1655 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1656 " Error closing playout sound device, error: %s",
1657 LATE(snd_strerror)(errVal));
1658
1659 // set the pcm input handle to NULL
1660 _playIsInitialized = false;
1661 _handlePlayout = NULL;
1662 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1663 " handle_playout is now set to NULL");
1664
1665 return 0;
1666 }
1667
PlayoutDelay(uint16_t & delayMS) const1668 int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const
1669 {
1670 delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
1671 return 0;
1672 }
1673
RecordingDelay(uint16_t & delayMS) const1674 int32_t AudioDeviceLinuxALSA::RecordingDelay(uint16_t& delayMS) const
1675 {
1676 // Adding 10ms adjusted value to the record delay due to 10ms buffering.
1677 delayMS = (uint16_t)(10 + _recordingDelay * 1000 / _recordingFreq);
1678 return 0;
1679 }
1680
Playing() const1681 bool AudioDeviceLinuxALSA::Playing() const
1682 {
1683 return (_playing);
1684 }
1685 // ----------------------------------------------------------------------------
1686 // SetPlayoutBuffer
1687 // ----------------------------------------------------------------------------
1688
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)1689 int32_t AudioDeviceLinuxALSA::SetPlayoutBuffer(
1690 const AudioDeviceModule::BufferType type,
1691 uint16_t sizeMS)
1692 {
1693 _playBufType = type;
1694 if (type == AudioDeviceModule::kFixedBufferSize)
1695 {
1696 _playBufDelayFixed = sizeMS;
1697 }
1698 return 0;
1699 }
1700
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const1701 int32_t AudioDeviceLinuxALSA::PlayoutBuffer(
1702 AudioDeviceModule::BufferType& type,
1703 uint16_t& sizeMS) const
1704 {
1705 type = _playBufType;
1706 if (type == AudioDeviceModule::kFixedBufferSize)
1707 {
1708 sizeMS = _playBufDelayFixed;
1709 }
1710 else
1711 {
1712 sizeMS = _playBufDelay;
1713 }
1714
1715 return 0;
1716 }
1717
CPULoad(uint16_t & load) const1718 int32_t AudioDeviceLinuxALSA::CPULoad(uint16_t& load) const
1719 {
1720
1721 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1722 " API call not supported on this platform");
1723 return -1;
1724 }
1725
PlayoutWarning() const1726 bool AudioDeviceLinuxALSA::PlayoutWarning() const
1727 {
1728 CriticalSectionScoped lock(&_critSect);
1729 return (_playWarning > 0);
1730 }
1731
PlayoutError() const1732 bool AudioDeviceLinuxALSA::PlayoutError() const
1733 {
1734 CriticalSectionScoped lock(&_critSect);
1735 return (_playError > 0);
1736 }
1737
RecordingWarning() const1738 bool AudioDeviceLinuxALSA::RecordingWarning() const
1739 {
1740 CriticalSectionScoped lock(&_critSect);
1741 return (_recWarning > 0);
1742 }
1743
RecordingError() const1744 bool AudioDeviceLinuxALSA::RecordingError() const
1745 {
1746 CriticalSectionScoped lock(&_critSect);
1747 return (_recError > 0);
1748 }
1749
ClearPlayoutWarning()1750 void AudioDeviceLinuxALSA::ClearPlayoutWarning()
1751 {
1752 CriticalSectionScoped lock(&_critSect);
1753 _playWarning = 0;
1754 }
1755
ClearPlayoutError()1756 void AudioDeviceLinuxALSA::ClearPlayoutError()
1757 {
1758 CriticalSectionScoped lock(&_critSect);
1759 _playError = 0;
1760 }
1761
ClearRecordingWarning()1762 void AudioDeviceLinuxALSA::ClearRecordingWarning()
1763 {
1764 CriticalSectionScoped lock(&_critSect);
1765 _recWarning = 0;
1766 }
1767
ClearRecordingError()1768 void AudioDeviceLinuxALSA::ClearRecordingError()
1769 {
1770 CriticalSectionScoped lock(&_critSect);
1771 _recError = 0;
1772 }
1773
1774 // ============================================================================
1775 // Private Methods
1776 // ============================================================================
1777
GetDevicesInfo(const int32_t function,const bool playback,const int32_t enumDeviceNo,char * enumDeviceName,const int32_t ednLen) const1778 int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
1779 const int32_t function,
1780 const bool playback,
1781 const int32_t enumDeviceNo,
1782 char* enumDeviceName,
1783 const int32_t ednLen) const
1784 {
1785
1786 // Device enumeration based on libjingle implementation
1787 // by Tristan Schmelcher at Google Inc.
1788
1789 const char *type = playback ? "Output" : "Input";
1790 // dmix and dsnoop are only for playback and capture, respectively, but ALSA
1791 // stupidly includes them in both lists.
1792 const char *ignorePrefix = playback ? "dsnoop:" : "dmix:" ;
1793 // (ALSA lists many more "devices" of questionable interest, but we show them
1794 // just in case the weird devices may actually be desirable for some
1795 // users/systems.)
1796
1797 int err;
1798 int enumCount(0);
1799 bool keepSearching(true);
1800
1801 // From Chromium issue 95797
1802 // Loop through the sound cards to get Alsa device hints.
1803 // Don't use snd_device_name_hint(-1,..) since there is a access violation
1804 // inside this ALSA API with libasound.so.2.0.0.
1805 int card = -1;
1806 while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
1807 void **hints;
1808 err = LATE(snd_device_name_hint)(card, "pcm", &hints);
1809 if (err != 0)
1810 {
1811 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1812 "GetDevicesInfo - device name hint error: %s",
1813 LATE(snd_strerror)(err));
1814 return -1;
1815 }
1816
1817 enumCount++; // default is 0
1818 if ((function == FUNC_GET_DEVICE_NAME ||
1819 function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
1820 {
1821 strcpy(enumDeviceName, "default");
1822
1823 err = LATE(snd_device_name_free_hint)(hints);
1824 if (err != 0)
1825 {
1826 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1827 "GetDevicesInfo - device name free hint error: %s",
1828 LATE(snd_strerror)(err));
1829 }
1830
1831 return 0;
1832 }
1833
1834 for (void **list = hints; *list != NULL; ++list)
1835 {
1836 char *actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
1837 if (actualType)
1838 { // NULL means it's both.
1839 bool wrongType = (strcmp(actualType, type) != 0);
1840 free(actualType);
1841 if (wrongType)
1842 {
1843 // Wrong type of device (i.e., input vs. output).
1844 continue;
1845 }
1846 }
1847
1848 char *name = LATE(snd_device_name_get_hint)(*list, "NAME");
1849 if (!name)
1850 {
1851 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1852 "Device has no name");
1853 // Skip it.
1854 continue;
1855 }
1856
1857 // Now check if we actually want to show this device.
1858 if (strcmp(name, "default") != 0 &&
1859 strcmp(name, "null") != 0 &&
1860 strcmp(name, "pulse") != 0 &&
1861 strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0)
1862 {
1863 // Yes, we do.
1864 char *desc = LATE(snd_device_name_get_hint)(*list, "DESC");
1865 if (!desc)
1866 {
1867 // Virtual devices don't necessarily have descriptions.
1868 // Use their names instead.
1869 desc = name;
1870 }
1871
1872 if (FUNC_GET_NUM_OF_DEVICE == function)
1873 {
1874 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1875 " Enum device %d - %s", enumCount, name);
1876
1877 }
1878 if ((FUNC_GET_DEVICE_NAME == function) &&
1879 (enumDeviceNo == enumCount))
1880 {
1881 // We have found the enum device, copy the name to buffer.
1882 strncpy(enumDeviceName, desc, ednLen);
1883 enumDeviceName[ednLen-1] = '\0';
1884 keepSearching = false;
1885 // Replace '\n' with '-'.
1886 char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
1887 if (pret)
1888 *pret = '-';
1889 }
1890 if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
1891 (enumDeviceNo == enumCount))
1892 {
1893 // We have found the enum device, copy the name to buffer.
1894 strncpy(enumDeviceName, name, ednLen);
1895 enumDeviceName[ednLen-1] = '\0';
1896 keepSearching = false;
1897 }
1898
1899 if (keepSearching)
1900 ++enumCount;
1901
1902 if (desc != name)
1903 free(desc);
1904 }
1905
1906 free(name);
1907
1908 if (!keepSearching)
1909 break;
1910 }
1911
1912 err = LATE(snd_device_name_free_hint)(hints);
1913 if (err != 0)
1914 {
1915 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1916 "GetDevicesInfo - device name free hint error: %s",
1917 LATE(snd_strerror)(err));
1918 // Continue and return true anyway, since we did get the whole list.
1919 }
1920 }
1921
1922 if (FUNC_GET_NUM_OF_DEVICE == function)
1923 {
1924 if (enumCount == 1) // only default?
1925 enumCount = 0;
1926 return enumCount; // Normal return point for function 0
1927 }
1928
1929 if (keepSearching)
1930 {
1931 // If we get here for function 1 and 2, we didn't find the specified
1932 // enum device.
1933 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1934 "GetDevicesInfo - Could not find device name or numbers");
1935 return -1;
1936 }
1937
1938 return 0;
1939 }
1940
InputSanityCheckAfterUnlockedPeriod() const1941 int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const
1942 {
1943 if (_handleRecord == NULL)
1944 {
1945 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1946 " input state has been modified during unlocked period");
1947 return -1;
1948 }
1949 return 0;
1950 }
1951
OutputSanityCheckAfterUnlockedPeriod() const1952 int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const
1953 {
1954 if (_handlePlayout == NULL)
1955 {
1956 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1957 " output state has been modified during unlocked period");
1958 return -1;
1959 }
1960 return 0;
1961 }
1962
ErrorRecovery(int32_t error,snd_pcm_t * deviceHandle)1963 int32_t AudioDeviceLinuxALSA::ErrorRecovery(int32_t error,
1964 snd_pcm_t* deviceHandle)
1965 {
1966 int st = LATE(snd_pcm_state)(deviceHandle);
1967 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1968 "Trying to recover from error: %s (%d) (state %d)",
1969 (LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) ?
1970 "capture" : "playout", LATE(snd_strerror)(error), error, st);
1971
1972 // It is recommended to use snd_pcm_recover for all errors. If that function
1973 // cannot handle the error, the input error code will be returned, otherwise
1974 // 0 is returned. From snd_pcm_recover API doc: "This functions handles
1975 // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
1976 // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
1977 // trying to prepare given stream for next I/O."
1978
1979 /** Open */
1980 // SND_PCM_STATE_OPEN = 0,
1981 /** Setup installed */
1982 // SND_PCM_STATE_SETUP,
1983 /** Ready to start */
1984 // SND_PCM_STATE_PREPARED,
1985 /** Running */
1986 // SND_PCM_STATE_RUNNING,
1987 /** Stopped: underrun (playback) or overrun (capture) detected */
1988 // SND_PCM_STATE_XRUN,= 4
1989 /** Draining: running (playback) or stopped (capture) */
1990 // SND_PCM_STATE_DRAINING,
1991 /** Paused */
1992 // SND_PCM_STATE_PAUSED,
1993 /** Hardware is suspended */
1994 // SND_PCM_STATE_SUSPENDED,
1995 // ** Hardware is disconnected */
1996 // SND_PCM_STATE_DISCONNECTED,
1997 // SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
1998
1999 // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
2000 // in Sthlm lab.
2001
2002 int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
2003 if (0 == res)
2004 {
2005 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2006 " Recovery - snd_pcm_recover OK");
2007
2008 if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
2009 _recording &&
2010 LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE)
2011 {
2012 // For capture streams we also have to repeat the explicit start()
2013 // to get data flowing again.
2014 int err = LATE(snd_pcm_start)(deviceHandle);
2015 if (err != 0)
2016 {
2017 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2018 " Recovery - snd_pcm_start error: %u", err);
2019 return -1;
2020 }
2021 }
2022
2023 if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
2024 _playing &&
2025 LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK)
2026 {
2027 // For capture streams we also have to repeat the explicit start() to get
2028 // data flowing again.
2029 int err = LATE(snd_pcm_start)(deviceHandle);
2030 if (err != 0)
2031 {
2032 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2033 " Recovery - snd_pcm_start error: %s",
2034 LATE(snd_strerror)(err));
2035 return -1;
2036 }
2037 }
2038
2039 return -EPIPE == error ? 1 : 0;
2040 }
2041 else {
2042 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2043 " Unrecoverable alsa stream error: %d", res);
2044 }
2045
2046 return res;
2047 }
2048
2049 // ============================================================================
2050 // Thread Methods
2051 // ============================================================================
2052
PlayThreadFunc(void * pThis)2053 bool AudioDeviceLinuxALSA::PlayThreadFunc(void* pThis)
2054 {
2055 return (static_cast<AudioDeviceLinuxALSA*>(pThis)->PlayThreadProcess());
2056 }
2057
RecThreadFunc(void * pThis)2058 bool AudioDeviceLinuxALSA::RecThreadFunc(void* pThis)
2059 {
2060 return (static_cast<AudioDeviceLinuxALSA*>(pThis)->RecThreadProcess());
2061 }
2062
PlayThreadProcess()2063 bool AudioDeviceLinuxALSA::PlayThreadProcess()
2064 {
2065 if(!_playing)
2066 return false;
2067
2068 int err;
2069 snd_pcm_sframes_t frames;
2070 snd_pcm_sframes_t avail_frames;
2071
2072 Lock();
2073 //return a positive number of frames ready otherwise a negative error code
2074 avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
2075 if (avail_frames < 0)
2076 {
2077 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2078 "playout snd_pcm_avail_update error: %s",
2079 LATE(snd_strerror)(avail_frames));
2080 ErrorRecovery(avail_frames, _handlePlayout);
2081 UnLock();
2082 return true;
2083 }
2084 else if (avail_frames == 0)
2085 {
2086 UnLock();
2087
2088 //maximum tixe in milliseconds to wait, a negative value means infinity
2089 err = LATE(snd_pcm_wait)(_handlePlayout, 2);
2090 if (err == 0)
2091 { //timeout occured
2092 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2093 "playout snd_pcm_wait timeout");
2094 }
2095
2096 return true;
2097 }
2098
2099 if (_playoutFramesLeft <= 0)
2100 {
2101 UnLock();
2102 _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
2103 Lock();
2104
2105 _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
2106 assert(_playoutFramesLeft == _playoutFramesIn10MS);
2107 }
2108
2109 if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
2110 avail_frames = _playoutFramesLeft;
2111
2112 int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout,
2113 _playoutFramesLeft);
2114 frames = LATE(snd_pcm_writei)(
2115 _handlePlayout,
2116 &_playoutBuffer[_playoutBufferSizeIn10MS - size],
2117 avail_frames);
2118
2119 if (frames < 0)
2120 {
2121 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2122 "playout snd_pcm_writei error: %s",
2123 LATE(snd_strerror)(frames));
2124 _playoutFramesLeft = 0;
2125 ErrorRecovery(frames, _handlePlayout);
2126 UnLock();
2127 return true;
2128 }
2129 else {
2130 assert(frames==avail_frames);
2131 _playoutFramesLeft -= frames;
2132 }
2133
2134 UnLock();
2135 return true;
2136 }
2137
RecThreadProcess()2138 bool AudioDeviceLinuxALSA::RecThreadProcess()
2139 {
2140 if (!_recording)
2141 return false;
2142
2143 int err;
2144 snd_pcm_sframes_t frames;
2145 snd_pcm_sframes_t avail_frames;
2146 int8_t buffer[_recordingBufferSizeIn10MS];
2147
2148 Lock();
2149
2150 //return a positive number of frames ready otherwise a negative error code
2151 avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
2152 if (avail_frames < 0)
2153 {
2154 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2155 "capture snd_pcm_avail_update error: %s",
2156 LATE(snd_strerror)(avail_frames));
2157 ErrorRecovery(avail_frames, _handleRecord);
2158 UnLock();
2159 return true;
2160 }
2161 else if (avail_frames == 0)
2162 { // no frame is available now
2163 UnLock();
2164
2165 //maximum time in milliseconds to wait, a negative value means infinity
2166 err = LATE(snd_pcm_wait)(_handleRecord,
2167 ALSA_CAPTURE_WAIT_TIMEOUT);
2168 if (err == 0) //timeout occured
2169 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2170 "capture snd_pcm_wait timeout");
2171
2172 return true;
2173 }
2174
2175 if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
2176 avail_frames = _recordingFramesLeft;
2177
2178 frames = LATE(snd_pcm_readi)(_handleRecord,
2179 buffer, avail_frames); // frames to be written
2180 if (frames < 0)
2181 {
2182 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2183 "capture snd_pcm_readi error: %s",
2184 LATE(snd_strerror)(frames));
2185 ErrorRecovery(frames, _handleRecord);
2186 UnLock();
2187 return true;
2188 }
2189 else if (frames > 0)
2190 {
2191 assert(frames == avail_frames);
2192
2193 int left_size = LATE(snd_pcm_frames_to_bytes)(_handleRecord,
2194 _recordingFramesLeft);
2195 int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
2196
2197 memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
2198 buffer, size);
2199 _recordingFramesLeft -= frames;
2200
2201 if (!_recordingFramesLeft)
2202 { // buf is full
2203 _recordingFramesLeft = _recordingFramesIn10MS;
2204
2205 // store the recorded buffer (no action will be taken if the
2206 // #recorded samples is not a full buffer)
2207 _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
2208 _recordingFramesIn10MS);
2209
2210 uint32_t currentMicLevel = 0;
2211 uint32_t newMicLevel = 0;
2212
2213 if (AGC())
2214 {
2215 // store current mic level in the audio buffer if AGC is enabled
2216 if (MicrophoneVolume(currentMicLevel) == 0)
2217 {
2218 if (currentMicLevel == 0xffffffff)
2219 currentMicLevel = 100;
2220 // this call does not affect the actual microphone volume
2221 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2222 }
2223 }
2224
2225 // calculate delay
2226 _playoutDelay = 0;
2227 _recordingDelay = 0;
2228 if (_handlePlayout)
2229 {
2230 err = LATE(snd_pcm_delay)(_handlePlayout,
2231 &_playoutDelay); // returned delay in frames
2232 if (err < 0)
2233 {
2234 // TODO(xians): Shall we call ErrorRecovery() here?
2235 _playoutDelay = 0;
2236 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2237 "playout snd_pcm_delay: %s",
2238 LATE(snd_strerror)(err));
2239 }
2240 }
2241
2242 err = LATE(snd_pcm_delay)(_handleRecord,
2243 &_recordingDelay); // returned delay in frames
2244 if (err < 0)
2245 {
2246 // TODO(xians): Shall we call ErrorRecovery() here?
2247 _recordingDelay = 0;
2248 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2249 "capture snd_pcm_delay: %s",
2250 LATE(snd_strerror)(err));
2251 }
2252
2253 // TODO(xians): Shall we add 10ms buffer delay to the record delay?
2254 _ptrAudioBuffer->SetVQEData(
2255 _playoutDelay * 1000 / _playoutFreq,
2256 _recordingDelay * 1000 / _recordingFreq, 0);
2257
2258 _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2259
2260 // Deliver recorded samples at specified sample rate, mic level etc.
2261 // to the observer using callback.
2262 UnLock();
2263 _ptrAudioBuffer->DeliverRecordedData();
2264 Lock();
2265
2266 if (AGC())
2267 {
2268 newMicLevel = _ptrAudioBuffer->NewMicLevel();
2269 if (newMicLevel != 0)
2270 {
2271 // The VQE will only deliver non-zero microphone levels when a
2272 // change is needed. Set this new mic level (received from the
2273 // observer as return value in the callback).
2274 if (SetMicrophoneVolume(newMicLevel) == -1)
2275 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2276 " the required modification of the "
2277 "microphone volume failed");
2278 }
2279 }
2280 }
2281 }
2282
2283 UnLock();
2284 return true;
2285 }
2286
2287
KeyPressed() const2288 bool AudioDeviceLinuxALSA::KeyPressed() const{
2289 #if defined(USE_X11)
2290 char szKey[32];
2291 unsigned int i = 0;
2292 char state = 0;
2293
2294 if (!_XDisplay)
2295 return false;
2296
2297 // Check key map status
2298 XQueryKeymap(_XDisplay, szKey);
2299
2300 // A bit change in keymap means a key is pressed
2301 for (i = 0; i < sizeof(szKey); i++)
2302 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
2303
2304 // Save old state
2305 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
2306 return (state != 0);
2307 #else
2308 return false;
2309 #endif
2310 }
2311 } // namespace webrtc
2312