• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 
13 #include "webrtc/modules/audio_device/audio_device_config.h"
14 #include "webrtc/modules/audio_device/linux/audio_device_alsa_linux.h"
15 
16 #include "webrtc/system_wrappers/include/event_wrapper.h"
17 #include "webrtc/system_wrappers/include/sleep.h"
18 #include "webrtc/system_wrappers/include/trace.h"
19 
20 webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable;
21 
22 // Accesses ALSA functions through our late-binding symbol table instead of
23 // directly. This way we don't have to link to libasound, which means our binary
24 // will work on systems that don't have it.
25 #define LATE(sym) \
26   LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
27 
28 // Redefine these here to be able to do late-binding
29 #undef snd_ctl_card_info_alloca
30 #define snd_ctl_card_info_alloca(ptr) \
31         do { *ptr = (snd_ctl_card_info_t *) \
32             __builtin_alloca (LATE(snd_ctl_card_info_sizeof)()); \
33             memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); } while (0)
34 
35 #undef snd_pcm_info_alloca
36 #define snd_pcm_info_alloca(pInfo) \
37        do { *pInfo = (snd_pcm_info_t *) \
38        __builtin_alloca (LATE(snd_pcm_info_sizeof)()); \
39        memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); } while (0)
40 
41 // snd_lib_error_handler_t
WebrtcAlsaErrorHandler(const char * file,int line,const char * function,int err,const char * fmt,...)42 void WebrtcAlsaErrorHandler(const char *file,
43                           int line,
44                           const char *function,
45                           int err,
46                           const char *fmt,...){};
47 
48 namespace webrtc
49 {
50 static const unsigned int ALSA_PLAYOUT_FREQ = 48000;
51 static const unsigned int ALSA_PLAYOUT_CH = 2;
52 static const unsigned int ALSA_PLAYOUT_LATENCY = 40*1000; // in us
53 static const unsigned int ALSA_CAPTURE_FREQ = 48000;
54 static const unsigned int ALSA_CAPTURE_CH = 2;
55 static const unsigned int ALSA_CAPTURE_LATENCY = 40*1000; // in us
56 static const unsigned int ALSA_CAPTURE_WAIT_TIMEOUT = 5; // in ms
57 
58 #define FUNC_GET_NUM_OF_DEVICE 0
59 #define FUNC_GET_DEVICE_NAME 1
60 #define FUNC_GET_DEVICE_NAME_FOR_AN_ENUM 2
61 
AudioDeviceLinuxALSA(const int32_t id)62 AudioDeviceLinuxALSA::AudioDeviceLinuxALSA(const int32_t id) :
63     _ptrAudioBuffer(NULL),
64     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
65     _id(id),
66     _mixerManager(id),
67     _inputDeviceIndex(0),
68     _outputDeviceIndex(0),
69     _inputDeviceIsSpecified(false),
70     _outputDeviceIsSpecified(false),
71     _handleRecord(NULL),
72     _handlePlayout(NULL),
73     _recordingBuffersizeInFrame(0),
74     _recordingPeriodSizeInFrame(0),
75     _playoutBufferSizeInFrame(0),
76     _playoutPeriodSizeInFrame(0),
77     _recordingBufferSizeIn10MS(0),
78     _playoutBufferSizeIn10MS(0),
79     _recordingFramesIn10MS(0),
80     _playoutFramesIn10MS(0),
81     _recordingFreq(ALSA_CAPTURE_FREQ),
82     _playoutFreq(ALSA_PLAYOUT_FREQ),
83     _recChannels(ALSA_CAPTURE_CH),
84     _playChannels(ALSA_PLAYOUT_CH),
85     _recordingBuffer(NULL),
86     _playoutBuffer(NULL),
87     _recordingFramesLeft(0),
88     _playoutFramesLeft(0),
89     _playBufType(AudioDeviceModule::kFixedBufferSize),
90     _initialized(false),
91     _recording(false),
92     _playing(false),
93     _recIsInitialized(false),
94     _playIsInitialized(false),
95     _AGC(false),
96     _recordingDelay(0),
97     _playoutDelay(0),
98     _playWarning(0),
99     _playError(0),
100     _recWarning(0),
101     _recError(0),
102     _playBufDelay(80),
103     _playBufDelayFixed(80)
104 {
105     memset(_oldKeyState, 0, sizeof(_oldKeyState));
106     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
107                  "%s created", __FUNCTION__);
108 }
109 
110 // ----------------------------------------------------------------------------
111 //  AudioDeviceLinuxALSA - dtor
112 // ----------------------------------------------------------------------------
113 
~AudioDeviceLinuxALSA()114 AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA()
115 {
116     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
117                  "%s destroyed", __FUNCTION__);
118 
119     Terminate();
120 
121     // Clean up the recording buffer and playout buffer.
122     if (_recordingBuffer)
123     {
124         delete [] _recordingBuffer;
125         _recordingBuffer = NULL;
126     }
127     if (_playoutBuffer)
128     {
129         delete [] _playoutBuffer;
130         _playoutBuffer = NULL;
131     }
132     delete &_critSect;
133 }
134 
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)135 void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
136 {
137 
138     CriticalSectionScoped lock(&_critSect);
139 
140     _ptrAudioBuffer = audioBuffer;
141 
142     // Inform the AudioBuffer about default settings for this implementation.
143     // Set all values to zero here since the actual settings will be done by
144     // InitPlayout and InitRecording later.
145     _ptrAudioBuffer->SetRecordingSampleRate(0);
146     _ptrAudioBuffer->SetPlayoutSampleRate(0);
147     _ptrAudioBuffer->SetRecordingChannels(0);
148     _ptrAudioBuffer->SetPlayoutChannels(0);
149 }
150 
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const151 int32_t AudioDeviceLinuxALSA::ActiveAudioLayer(
152     AudioDeviceModule::AudioLayer& audioLayer) const
153 {
154     audioLayer = AudioDeviceModule::kLinuxAlsaAudio;
155     return 0;
156 }
157 
Init()158 int32_t AudioDeviceLinuxALSA::Init()
159 {
160 
161     CriticalSectionScoped lock(&_critSect);
162 
163     // Load libasound
164     if (!AlsaSymbolTable.Load())
165     {
166         // Alsa is not installed on
167         // this system
168         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
169                    "  failed to load symbol table");
170         return -1;
171     }
172 
173     if (_initialized)
174     {
175         return 0;
176     }
177 #if defined(USE_X11)
178     //Get X display handle for typing detection
179     _XDisplay = XOpenDisplay(NULL);
180     if (!_XDisplay)
181     {
182         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
183           "  failed to open X display, typing detection will not work");
184     }
185 #endif
186     _playWarning = 0;
187     _playError = 0;
188     _recWarning = 0;
189     _recError = 0;
190 
191     _initialized = true;
192 
193     return 0;
194 }
195 
Terminate()196 int32_t AudioDeviceLinuxALSA::Terminate()
197 {
198     if (!_initialized)
199     {
200         return 0;
201     }
202 
203     CriticalSectionScoped lock(&_critSect);
204 
205     _mixerManager.Close();
206 
207     // RECORDING
208     if (_ptrThreadRec)
209     {
210         rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
211         _critSect.Leave();
212 
213         tmpThread->Stop();
214         delete tmpThread;
215 
216         _critSect.Enter();
217     }
218 
219     // PLAYOUT
220     if (_ptrThreadPlay)
221     {
222         rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
223         _critSect.Leave();
224 
225         tmpThread->Stop();
226         delete tmpThread;
227 
228         _critSect.Enter();
229     }
230 #if defined(USE_X11)
231     if (_XDisplay)
232     {
233       XCloseDisplay(_XDisplay);
234       _XDisplay = NULL;
235     }
236 #endif
237     _initialized = false;
238     _outputDeviceIsSpecified = false;
239     _inputDeviceIsSpecified = false;
240 
241     return 0;
242 }
243 
Initialized() const244 bool AudioDeviceLinuxALSA::Initialized() const
245 {
246     return (_initialized);
247 }
248 
InitSpeaker()249 int32_t AudioDeviceLinuxALSA::InitSpeaker()
250 {
251 
252     CriticalSectionScoped lock(&_critSect);
253 
254     if (_playing)
255     {
256         return -1;
257     }
258 
259     char devName[kAdmMaxDeviceNameSize] = {0};
260     GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize);
261     return _mixerManager.OpenSpeaker(devName);
262 }
263 
InitMicrophone()264 int32_t AudioDeviceLinuxALSA::InitMicrophone()
265 {
266 
267     CriticalSectionScoped lock(&_critSect);
268 
269     if (_recording)
270     {
271         return -1;
272     }
273 
274     char devName[kAdmMaxDeviceNameSize] = {0};
275     GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize);
276     return _mixerManager.OpenMicrophone(devName);
277 }
278 
SpeakerIsInitialized() const279 bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const
280 {
281     return (_mixerManager.SpeakerIsInitialized());
282 }
283 
MicrophoneIsInitialized() const284 bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const
285 {
286     return (_mixerManager.MicrophoneIsInitialized());
287 }
288 
SpeakerVolumeIsAvailable(bool & available)289 int32_t AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available)
290 {
291 
292     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
293 
294     // Make an attempt to open up the
295     // output mixer corresponding to the currently selected output device.
296     if (!wasInitialized && InitSpeaker() == -1)
297     {
298         // If we end up here it means that the selected speaker has no volume
299         // control.
300         available = false;
301         return 0;
302     }
303 
304     // Given that InitSpeaker was successful, we know that a volume control
305     // exists
306     available = true;
307 
308     // Close the initialized output mixer
309     if (!wasInitialized)
310     {
311         _mixerManager.CloseSpeaker();
312     }
313 
314     return 0;
315 }
316 
SetSpeakerVolume(uint32_t volume)317 int32_t AudioDeviceLinuxALSA::SetSpeakerVolume(uint32_t volume)
318 {
319 
320     return (_mixerManager.SetSpeakerVolume(volume));
321 }
322 
SpeakerVolume(uint32_t & volume) const323 int32_t AudioDeviceLinuxALSA::SpeakerVolume(uint32_t& volume) const
324 {
325 
326     uint32_t level(0);
327 
328     if (_mixerManager.SpeakerVolume(level) == -1)
329     {
330         return -1;
331     }
332 
333     volume = level;
334 
335     return 0;
336 }
337 
338 
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)339 int32_t AudioDeviceLinuxALSA::SetWaveOutVolume(uint16_t volumeLeft,
340                                                uint16_t volumeRight)
341 {
342 
343     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
344                  "  API call not supported on this platform");
345     return -1;
346 }
347 
WaveOutVolume(uint16_t &,uint16_t &) const348 int32_t AudioDeviceLinuxALSA::WaveOutVolume(
349     uint16_t& /*volumeLeft*/,
350     uint16_t& /*volumeRight*/) const
351 {
352 
353     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
354                  "  API call not supported on this platform");
355     return -1;
356 }
357 
MaxSpeakerVolume(uint32_t & maxVolume) const358 int32_t AudioDeviceLinuxALSA::MaxSpeakerVolume(
359     uint32_t& maxVolume) const
360 {
361 
362     uint32_t maxVol(0);
363 
364     if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
365     {
366         return -1;
367     }
368 
369     maxVolume = maxVol;
370 
371     return 0;
372 }
373 
MinSpeakerVolume(uint32_t & minVolume) const374 int32_t AudioDeviceLinuxALSA::MinSpeakerVolume(
375     uint32_t& minVolume) const
376 {
377 
378     uint32_t minVol(0);
379 
380     if (_mixerManager.MinSpeakerVolume(minVol) == -1)
381     {
382         return -1;
383     }
384 
385     minVolume = minVol;
386 
387     return 0;
388 }
389 
SpeakerVolumeStepSize(uint16_t & stepSize) const390 int32_t AudioDeviceLinuxALSA::SpeakerVolumeStepSize(
391     uint16_t& stepSize) const
392 {
393 
394     uint16_t delta(0);
395 
396     if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
397     {
398         return -1;
399     }
400 
401     stepSize = delta;
402 
403     return 0;
404 }
405 
SpeakerMuteIsAvailable(bool & available)406 int32_t AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available)
407 {
408 
409     bool isAvailable(false);
410     bool wasInitialized = _mixerManager.SpeakerIsInitialized();
411 
412     // Make an attempt to open up the
413     // output mixer corresponding to the currently selected output device.
414     //
415     if (!wasInitialized && InitSpeaker() == -1)
416     {
417         // If we end up here it means that the selected speaker has no volume
418         // control, hence it is safe to state that there is no mute control
419         // already at this stage.
420         available = false;
421         return 0;
422     }
423 
424     // Check if the selected speaker has a mute control
425     _mixerManager.SpeakerMuteIsAvailable(isAvailable);
426 
427     available = isAvailable;
428 
429     // Close the initialized output mixer
430     if (!wasInitialized)
431     {
432         _mixerManager.CloseSpeaker();
433     }
434 
435     return 0;
436 }
437 
SetSpeakerMute(bool enable)438 int32_t AudioDeviceLinuxALSA::SetSpeakerMute(bool enable)
439 {
440     return (_mixerManager.SetSpeakerMute(enable));
441 }
442 
SpeakerMute(bool & enabled) const443 int32_t AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const
444 {
445 
446     bool muted(0);
447 
448     if (_mixerManager.SpeakerMute(muted) == -1)
449     {
450         return -1;
451     }
452 
453     enabled = muted;
454 
455     return 0;
456 }
457 
MicrophoneMuteIsAvailable(bool & available)458 int32_t AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available)
459 {
460 
461     bool isAvailable(false);
462     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
463 
464     // Make an attempt to open up the
465     // input mixer corresponding to the currently selected input device.
466     //
467     if (!wasInitialized && InitMicrophone() == -1)
468     {
469         // If we end up here it means that the selected microphone has no volume
470         // control, hence it is safe to state that there is no mute control
471         // already at this stage.
472         available = false;
473         return 0;
474     }
475 
476     // Check if the selected microphone has a mute control
477     //
478     _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
479     available = isAvailable;
480 
481     // Close the initialized input mixer
482     //
483     if (!wasInitialized)
484     {
485         _mixerManager.CloseMicrophone();
486     }
487 
488     return 0;
489 }
490 
SetMicrophoneMute(bool enable)491 int32_t AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable)
492 {
493     return (_mixerManager.SetMicrophoneMute(enable));
494 }
495 
496 // ----------------------------------------------------------------------------
497 //  MicrophoneMute
498 // ----------------------------------------------------------------------------
499 
MicrophoneMute(bool & enabled) const500 int32_t AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const
501 {
502 
503     bool muted(0);
504 
505     if (_mixerManager.MicrophoneMute(muted) == -1)
506     {
507         return -1;
508     }
509 
510     enabled = muted;
511     return 0;
512 }
513 
MicrophoneBoostIsAvailable(bool & available)514 int32_t AudioDeviceLinuxALSA::MicrophoneBoostIsAvailable(bool& available)
515 {
516 
517     bool isAvailable(false);
518     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
519 
520     // Enumerate all avaliable microphone and make an attempt to open up the
521     // input mixer corresponding to the currently selected input device.
522     //
523     if (!wasInitialized && InitMicrophone() == -1)
524     {
525         // If we end up here it means that the selected microphone has no volume
526         // control, hence it is safe to state that there is no boost control
527         // already at this stage.
528         available = false;
529         return 0;
530     }
531 
532     // Check if the selected microphone has a boost control
533     _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
534     available = isAvailable;
535 
536     // Close the initialized input mixer
537     if (!wasInitialized)
538     {
539         _mixerManager.CloseMicrophone();
540     }
541 
542     return 0;
543 }
544 
SetMicrophoneBoost(bool enable)545 int32_t AudioDeviceLinuxALSA::SetMicrophoneBoost(bool enable)
546 {
547 
548     return (_mixerManager.SetMicrophoneBoost(enable));
549 }
550 
MicrophoneBoost(bool & enabled) const551 int32_t AudioDeviceLinuxALSA::MicrophoneBoost(bool& enabled) const
552 {
553 
554     bool onOff(0);
555 
556     if (_mixerManager.MicrophoneBoost(onOff) == -1)
557     {
558         return -1;
559     }
560 
561     enabled = onOff;
562 
563     return 0;
564 }
565 
StereoRecordingIsAvailable(bool & available)566 int32_t AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available)
567 {
568 
569     CriticalSectionScoped lock(&_critSect);
570 
571     // If we already have initialized in stereo it's obviously available
572     if (_recIsInitialized && (2 == _recChannels))
573     {
574         available = true;
575         return 0;
576     }
577 
578     // Save rec states and the number of rec channels
579     bool recIsInitialized = _recIsInitialized;
580     bool recording = _recording;
581     int recChannels = _recChannels;
582 
583     available = false;
584 
585     // Stop/uninitialize recording if initialized (and possibly started)
586     if (_recIsInitialized)
587     {
588         StopRecording();
589     }
590 
591     // Try init in stereo;
592     _recChannels = 2;
593     if (InitRecording() == 0)
594     {
595         available = true;
596     }
597 
598     // Stop/uninitialize recording
599     StopRecording();
600 
601     // Recover previous states
602     _recChannels = recChannels;
603     if (recIsInitialized)
604     {
605         InitRecording();
606     }
607     if (recording)
608     {
609         StartRecording();
610     }
611 
612     return 0;
613 }
614 
SetStereoRecording(bool enable)615 int32_t AudioDeviceLinuxALSA::SetStereoRecording(bool enable)
616 {
617 
618     if (enable)
619         _recChannels = 2;
620     else
621         _recChannels = 1;
622 
623     return 0;
624 }
625 
StereoRecording(bool & enabled) const626 int32_t AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const
627 {
628 
629     if (_recChannels == 2)
630         enabled = true;
631     else
632         enabled = false;
633 
634     return 0;
635 }
636 
StereoPlayoutIsAvailable(bool & available)637 int32_t AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available)
638 {
639 
640     CriticalSectionScoped lock(&_critSect);
641 
642     // If we already have initialized in stereo it's obviously available
643     if (_playIsInitialized && (2 == _playChannels))
644     {
645         available = true;
646         return 0;
647     }
648 
649     // Save rec states and the number of rec channels
650     bool playIsInitialized = _playIsInitialized;
651     bool playing = _playing;
652     int playChannels = _playChannels;
653 
654     available = false;
655 
656     // Stop/uninitialize recording if initialized (and possibly started)
657     if (_playIsInitialized)
658     {
659         StopPlayout();
660     }
661 
662     // Try init in stereo;
663     _playChannels = 2;
664     if (InitPlayout() == 0)
665     {
666         available = true;
667     }
668 
669     // Stop/uninitialize recording
670     StopPlayout();
671 
672     // Recover previous states
673     _playChannels = playChannels;
674     if (playIsInitialized)
675     {
676         InitPlayout();
677     }
678     if (playing)
679     {
680         StartPlayout();
681     }
682 
683     return 0;
684 }
685 
SetStereoPlayout(bool enable)686 int32_t AudioDeviceLinuxALSA::SetStereoPlayout(bool enable)
687 {
688 
689     if (enable)
690         _playChannels = 2;
691     else
692         _playChannels = 1;
693 
694     return 0;
695 }
696 
StereoPlayout(bool & enabled) const697 int32_t AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const
698 {
699 
700     if (_playChannels == 2)
701         enabled = true;
702     else
703         enabled = false;
704 
705     return 0;
706 }
707 
SetAGC(bool enable)708 int32_t AudioDeviceLinuxALSA::SetAGC(bool enable)
709 {
710 
711     _AGC = enable;
712 
713     return 0;
714 }
715 
AGC() const716 bool AudioDeviceLinuxALSA::AGC() const
717 {
718 
719     return _AGC;
720 }
721 
MicrophoneVolumeIsAvailable(bool & available)722 int32_t AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available)
723 {
724 
725     bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
726 
727     // Make an attempt to open up the
728     // input mixer corresponding to the currently selected output device.
729     if (!wasInitialized && InitMicrophone() == -1)
730     {
731         // If we end up here it means that the selected microphone has no volume
732         // control.
733         available = false;
734         return 0;
735     }
736 
737     // Given that InitMicrophone was successful, we know that a volume control
738     // exists
739     available = true;
740 
741     // Close the initialized input mixer
742     if (!wasInitialized)
743     {
744         _mixerManager.CloseMicrophone();
745     }
746 
747     return 0;
748 }
749 
SetMicrophoneVolume(uint32_t volume)750 int32_t AudioDeviceLinuxALSA::SetMicrophoneVolume(uint32_t volume)
751 {
752 
753     return (_mixerManager.SetMicrophoneVolume(volume));
754 
755     return 0;
756 }
757 
MicrophoneVolume(uint32_t & volume) const758 int32_t AudioDeviceLinuxALSA::MicrophoneVolume(uint32_t& volume) const
759 {
760 
761     uint32_t level(0);
762 
763     if (_mixerManager.MicrophoneVolume(level) == -1)
764     {
765         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
766                      "  failed to retrive current microphone level");
767         return -1;
768     }
769 
770     volume = level;
771 
772     return 0;
773 }
774 
MaxMicrophoneVolume(uint32_t & maxVolume) const775 int32_t AudioDeviceLinuxALSA::MaxMicrophoneVolume(
776     uint32_t& maxVolume) const
777 {
778 
779     uint32_t maxVol(0);
780 
781     if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
782     {
783         return -1;
784     }
785 
786     maxVolume = maxVol;
787 
788     return 0;
789 }
790 
MinMicrophoneVolume(uint32_t & minVolume) const791 int32_t AudioDeviceLinuxALSA::MinMicrophoneVolume(
792     uint32_t& minVolume) const
793 {
794 
795     uint32_t minVol(0);
796 
797     if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
798     {
799         return -1;
800     }
801 
802     minVolume = minVol;
803 
804     return 0;
805 }
806 
MicrophoneVolumeStepSize(uint16_t & stepSize) const807 int32_t AudioDeviceLinuxALSA::MicrophoneVolumeStepSize(
808     uint16_t& stepSize) const
809 {
810 
811     uint16_t delta(0);
812 
813     if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
814     {
815         return -1;
816     }
817 
818     stepSize = delta;
819 
820     return 0;
821 }
822 
PlayoutDevices()823 int16_t AudioDeviceLinuxALSA::PlayoutDevices()
824 {
825 
826     return (int16_t)GetDevicesInfo(0, true);
827 }
828 
SetPlayoutDevice(uint16_t index)829 int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(uint16_t index)
830 {
831 
832     if (_playIsInitialized)
833     {
834         return -1;
835     }
836 
837     uint32_t nDevices = GetDevicesInfo(0, true);
838     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
839                  "  number of availiable audio output devices is %u", nDevices);
840 
841     if (index > (nDevices-1))
842     {
843         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
844                      "  device index is out of range [0,%u]", (nDevices-1));
845         return -1;
846     }
847 
848     _outputDeviceIndex = index;
849     _outputDeviceIsSpecified = true;
850 
851     return 0;
852 }
853 
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)854 int32_t AudioDeviceLinuxALSA::SetPlayoutDevice(
855     AudioDeviceModule::WindowsDeviceType /*device*/)
856 {
857     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
858                  "WindowsDeviceType not supported");
859     return -1;
860 }
861 
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])862 int32_t AudioDeviceLinuxALSA::PlayoutDeviceName(
863     uint16_t index,
864     char name[kAdmMaxDeviceNameSize],
865     char guid[kAdmMaxGuidSize])
866 {
867 
868     const uint16_t nDevices(PlayoutDevices());
869 
870     if ((index > (nDevices-1)) || (name == NULL))
871     {
872         return -1;
873     }
874 
875     memset(name, 0, kAdmMaxDeviceNameSize);
876 
877     if (guid != NULL)
878     {
879         memset(guid, 0, kAdmMaxGuidSize);
880     }
881 
882     return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize);
883 }
884 
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])885 int32_t AudioDeviceLinuxALSA::RecordingDeviceName(
886     uint16_t index,
887     char name[kAdmMaxDeviceNameSize],
888     char guid[kAdmMaxGuidSize])
889 {
890 
891     const uint16_t nDevices(RecordingDevices());
892 
893     if ((index > (nDevices-1)) || (name == NULL))
894     {
895         return -1;
896     }
897 
898     memset(name, 0, kAdmMaxDeviceNameSize);
899 
900     if (guid != NULL)
901     {
902         memset(guid, 0, kAdmMaxGuidSize);
903     }
904 
905     return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize);
906 }
907 
RecordingDevices()908 int16_t AudioDeviceLinuxALSA::RecordingDevices()
909 {
910 
911     return (int16_t)GetDevicesInfo(0, false);
912 }
913 
SetRecordingDevice(uint16_t index)914 int32_t AudioDeviceLinuxALSA::SetRecordingDevice(uint16_t index)
915 {
916 
917     if (_recIsInitialized)
918     {
919         return -1;
920     }
921 
922     uint32_t nDevices = GetDevicesInfo(0, false);
923     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
924                  "  number of availiable audio input devices is %u", nDevices);
925 
926     if (index > (nDevices-1))
927     {
928         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
929                      "  device index is out of range [0,%u]", (nDevices-1));
930         return -1;
931     }
932 
933     _inputDeviceIndex = index;
934     _inputDeviceIsSpecified = true;
935 
936     return 0;
937 }
938 
939 // ----------------------------------------------------------------------------
940 //  SetRecordingDevice II (II)
941 // ----------------------------------------------------------------------------
942 
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)943 int32_t AudioDeviceLinuxALSA::SetRecordingDevice(
944     AudioDeviceModule::WindowsDeviceType /*device*/)
945 {
946     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
947                  "WindowsDeviceType not supported");
948     return -1;
949 }
950 
PlayoutIsAvailable(bool & available)951 int32_t AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available)
952 {
953 
954     available = false;
955 
956     // Try to initialize the playout side with mono
957     // Assumes that user set num channels after calling this function
958     _playChannels = 1;
959     int32_t res = InitPlayout();
960 
961     // Cancel effect of initialization
962     StopPlayout();
963 
964     if (res != -1)
965     {
966         available = true;
967     }
968     else
969     {
970         // It may be possible to play out in stereo
971         res = StereoPlayoutIsAvailable(available);
972         if (available)
973         {
974             // Then set channels to 2 so InitPlayout doesn't fail
975             _playChannels = 2;
976         }
977     }
978 
979     return res;
980 }
981 
RecordingIsAvailable(bool & available)982 int32_t AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available)
983 {
984 
985     available = false;
986 
987     // Try to initialize the recording side with mono
988     // Assumes that user set num channels after calling this function
989     _recChannels = 1;
990     int32_t res = InitRecording();
991 
992     // Cancel effect of initialization
993     StopRecording();
994 
995     if (res != -1)
996     {
997         available = true;
998     }
999     else
1000     {
1001         // It may be possible to record in stereo
1002         res = StereoRecordingIsAvailable(available);
1003         if (available)
1004         {
1005             // Then set channels to 2 so InitPlayout doesn't fail
1006             _recChannels = 2;
1007         }
1008     }
1009 
1010     return res;
1011 }
1012 
InitPlayout()1013 int32_t AudioDeviceLinuxALSA::InitPlayout()
1014 {
1015 
1016     int errVal = 0;
1017 
1018     CriticalSectionScoped lock(&_critSect);
1019     if (_playing)
1020     {
1021         return -1;
1022     }
1023 
1024     if (!_outputDeviceIsSpecified)
1025     {
1026         return -1;
1027     }
1028 
1029     if (_playIsInitialized)
1030     {
1031         return 0;
1032     }
1033     // Initialize the speaker (devices might have been added or removed)
1034     if (InitSpeaker() == -1)
1035     {
1036         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1037                      "  InitSpeaker() failed");
1038     }
1039 
1040     // Start by closing any existing wave-output devices
1041     //
1042     if (_handlePlayout != NULL)
1043     {
1044         LATE(snd_pcm_close)(_handlePlayout);
1045         _handlePlayout = NULL;
1046         _playIsInitialized = false;
1047         if (errVal < 0)
1048         {
1049             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1050                          "  Error closing current playout sound device, error:"
1051                          " %s", LATE(snd_strerror)(errVal));
1052         }
1053     }
1054 
1055     // Open PCM device for playout
1056     char deviceName[kAdmMaxDeviceNameSize] = {0};
1057     GetDevicesInfo(2, true, _outputDeviceIndex, deviceName,
1058                    kAdmMaxDeviceNameSize);
1059 
1060     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1061                  "  InitPlayout open (%s)", deviceName);
1062 
1063     errVal = LATE(snd_pcm_open)
1064                  (&_handlePlayout,
1065                   deviceName,
1066                   SND_PCM_STREAM_PLAYBACK,
1067                   SND_PCM_NONBLOCK);
1068 
1069     if (errVal == -EBUSY) // Device busy - try some more!
1070     {
1071         for (int i=0; i < 5; i++)
1072         {
1073             SleepMs(1000);
1074             errVal = LATE(snd_pcm_open)
1075                          (&_handlePlayout,
1076                           deviceName,
1077                           SND_PCM_STREAM_PLAYBACK,
1078                           SND_PCM_NONBLOCK);
1079             if (errVal == 0)
1080             {
1081                 break;
1082             }
1083         }
1084     }
1085     if (errVal < 0)
1086     {
1087         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1088                      "     unable to open playback device: %s (%d)",
1089                      LATE(snd_strerror)(errVal),
1090                      errVal);
1091         _handlePlayout = NULL;
1092         return -1;
1093     }
1094 
1095     _playoutFramesIn10MS = _playoutFreq/100;
1096     if ((errVal = LATE(snd_pcm_set_params)( _handlePlayout,
1097 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1098         SND_PCM_FORMAT_S16_BE,
1099 #else
1100         SND_PCM_FORMAT_S16_LE, //format
1101 #endif
1102         SND_PCM_ACCESS_RW_INTERLEAVED, //access
1103         _playChannels, //channels
1104         _playoutFreq, //rate
1105         1, //soft_resample
1106         ALSA_PLAYOUT_LATENCY //40*1000 //latency required overall latency in us
1107     )) < 0)
1108     {   /* 0.5sec */
1109         _playoutFramesIn10MS = 0;
1110         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1111                      "     unable to set playback device: %s (%d)",
1112                      LATE(snd_strerror)(errVal),
1113                      errVal);
1114         ErrorRecovery(errVal, _handlePlayout);
1115         errVal = LATE(snd_pcm_close)(_handlePlayout);
1116         _handlePlayout = NULL;
1117         return -1;
1118     }
1119 
1120     errVal = LATE(snd_pcm_get_params)(_handlePlayout,
1121         &_playoutBufferSizeInFrame, &_playoutPeriodSizeInFrame);
1122     if (errVal < 0)
1123     {
1124         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1125                      "    snd_pcm_get_params %s",
1126                      LATE(snd_strerror)(errVal),
1127                      errVal);
1128         _playoutBufferSizeInFrame = 0;
1129         _playoutPeriodSizeInFrame = 0;
1130     }
1131     else {
1132         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1133                      "    playout snd_pcm_get_params "
1134                      "buffer_size:%d period_size :%d",
1135                      _playoutBufferSizeInFrame, _playoutPeriodSizeInFrame);
1136     }
1137 
1138     if (_ptrAudioBuffer)
1139     {
1140         // Update webrtc audio buffer with the selected parameters
1141         _ptrAudioBuffer->SetPlayoutSampleRate(_playoutFreq);
1142         _ptrAudioBuffer->SetPlayoutChannels(_playChannels);
1143     }
1144 
1145     // Set play buffer size
1146     _playoutBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
1147         _handlePlayout, _playoutFramesIn10MS);
1148 
1149     // Init varaibles used for play
1150     _playWarning = 0;
1151     _playError = 0;
1152 
1153     if (_handlePlayout != NULL)
1154     {
1155         _playIsInitialized = true;
1156         return 0;
1157     }
1158     else
1159     {
1160         return -1;
1161     }
1162 
1163     return 0;
1164 }
1165 
InitRecording()1166 int32_t AudioDeviceLinuxALSA::InitRecording()
1167 {
1168 
1169     int errVal = 0;
1170 
1171     CriticalSectionScoped lock(&_critSect);
1172 
1173     if (_recording)
1174     {
1175         return -1;
1176     }
1177 
1178     if (!_inputDeviceIsSpecified)
1179     {
1180         return -1;
1181     }
1182 
1183     if (_recIsInitialized)
1184     {
1185         return 0;
1186     }
1187 
1188     // Initialize the microphone (devices might have been added or removed)
1189     if (InitMicrophone() == -1)
1190     {
1191         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1192                    "  InitMicrophone() failed");
1193     }
1194 
1195     // Start by closing any existing pcm-input devices
1196     //
1197     if (_handleRecord != NULL)
1198     {
1199         int errVal = LATE(snd_pcm_close)(_handleRecord);
1200         _handleRecord = NULL;
1201         _recIsInitialized = false;
1202         if (errVal < 0)
1203         {
1204             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1205                          "     Error closing current recording sound device,"
1206                          " error: %s",
1207                          LATE(snd_strerror)(errVal));
1208         }
1209     }
1210 
1211     // Open PCM device for recording
1212     // The corresponding settings for playout are made after the record settings
1213     char deviceName[kAdmMaxDeviceNameSize] = {0};
1214     GetDevicesInfo(2, false, _inputDeviceIndex, deviceName,
1215                    kAdmMaxDeviceNameSize);
1216 
1217     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1218                  "InitRecording open (%s)", deviceName);
1219     errVal = LATE(snd_pcm_open)
1220                  (&_handleRecord,
1221                   deviceName,
1222                   SND_PCM_STREAM_CAPTURE,
1223                   SND_PCM_NONBLOCK);
1224 
1225     // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC
1226     if (errVal == -EBUSY) // Device busy - try some more!
1227     {
1228         for (int i=0; i < 5; i++)
1229         {
1230             SleepMs(1000);
1231             errVal = LATE(snd_pcm_open)
1232                          (&_handleRecord,
1233                           deviceName,
1234                           SND_PCM_STREAM_CAPTURE,
1235                           SND_PCM_NONBLOCK);
1236             if (errVal == 0)
1237             {
1238                 break;
1239             }
1240         }
1241     }
1242     if (errVal < 0)
1243     {
1244         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1245                      "    unable to open record device: %s",
1246                      LATE(snd_strerror)(errVal));
1247         _handleRecord = NULL;
1248         return -1;
1249     }
1250 
1251     _recordingFramesIn10MS = _recordingFreq/100;
1252     if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
1253 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1254         SND_PCM_FORMAT_S16_BE, //format
1255 #else
1256         SND_PCM_FORMAT_S16_LE, //format
1257 #endif
1258         SND_PCM_ACCESS_RW_INTERLEAVED, //access
1259         _recChannels, //channels
1260         _recordingFreq, //rate
1261         1, //soft_resample
1262         ALSA_CAPTURE_LATENCY //latency in us
1263     )) < 0)
1264     {
1265          // Fall back to another mode then.
1266          if (_recChannels == 1)
1267            _recChannels = 2;
1268          else
1269            _recChannels = 1;
1270 
1271          if ((errVal = LATE(snd_pcm_set_params)(_handleRecord,
1272 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1273              SND_PCM_FORMAT_S16_BE, //format
1274 #else
1275              SND_PCM_FORMAT_S16_LE, //format
1276 #endif
1277              SND_PCM_ACCESS_RW_INTERLEAVED, //access
1278              _recChannels, //channels
1279              _recordingFreq, //rate
1280              1, //soft_resample
1281              ALSA_CAPTURE_LATENCY //latency in us
1282          )) < 0)
1283          {
1284              _recordingFramesIn10MS = 0;
1285              WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1286                           "    unable to set record settings: %s (%d)",
1287                           LATE(snd_strerror)(errVal), errVal);
1288              ErrorRecovery(errVal, _handleRecord);
1289              errVal = LATE(snd_pcm_close)(_handleRecord);
1290              _handleRecord = NULL;
1291              return -1;
1292          }
1293     }
1294 
1295     errVal = LATE(snd_pcm_get_params)(_handleRecord,
1296         &_recordingBuffersizeInFrame, &_recordingPeriodSizeInFrame);
1297     if (errVal < 0)
1298     {
1299         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1300                      "    snd_pcm_get_params %s",
1301                      LATE(snd_strerror)(errVal), errVal);
1302         _recordingBuffersizeInFrame = 0;
1303         _recordingPeriodSizeInFrame = 0;
1304     }
1305     else {
1306         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1307                      "    capture snd_pcm_get_params "
1308                      "buffer_size:%d period_size:%d",
1309                      _recordingBuffersizeInFrame, _recordingPeriodSizeInFrame);
1310     }
1311 
1312     if (_ptrAudioBuffer)
1313     {
1314         // Update webrtc audio buffer with the selected parameters
1315         _ptrAudioBuffer->SetRecordingSampleRate(_recordingFreq);
1316         _ptrAudioBuffer->SetRecordingChannels(_recChannels);
1317     }
1318 
1319     // Set rec buffer size and create buffer
1320     _recordingBufferSizeIn10MS = LATE(snd_pcm_frames_to_bytes)(
1321         _handleRecord, _recordingFramesIn10MS);
1322 
1323     if (_handleRecord != NULL)
1324     {
1325         // Mark recording side as initialized
1326         _recIsInitialized = true;
1327         return 0;
1328     }
1329     else
1330     {
1331         return -1;
1332     }
1333 
1334     return 0;
1335 }
1336 
StartRecording()1337 int32_t AudioDeviceLinuxALSA::StartRecording()
1338 {
1339 
1340     if (!_recIsInitialized)
1341     {
1342         return -1;
1343     }
1344 
1345     if (_recording)
1346     {
1347         return 0;
1348     }
1349 
1350     _recording = true;
1351 
1352     int errVal = 0;
1353     _recordingFramesLeft = _recordingFramesIn10MS;
1354 
1355     // Make sure we only create the buffer once.
1356     if (!_recordingBuffer)
1357         _recordingBuffer = new int8_t[_recordingBufferSizeIn10MS];
1358     if (!_recordingBuffer)
1359     {
1360         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1361                      "   failed to alloc recording buffer");
1362         _recording = false;
1363         return -1;
1364     }
1365     // RECORDING
1366     _ptrThreadRec.reset(new rtc::PlatformThread(
1367         RecThreadFunc, this, "webrtc_audio_module_capture_thread"));
1368 
1369     _ptrThreadRec->Start();
1370     _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
1371 
1372     errVal = LATE(snd_pcm_prepare)(_handleRecord);
1373     if (errVal < 0)
1374     {
1375         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1376                      "     capture snd_pcm_prepare failed (%s)\n",
1377                      LATE(snd_strerror)(errVal));
1378         // just log error
1379         // if snd_pcm_open fails will return -1
1380     }
1381 
1382     errVal = LATE(snd_pcm_start)(_handleRecord);
1383     if (errVal < 0)
1384     {
1385         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1386                      "     capture snd_pcm_start err: %s",
1387                      LATE(snd_strerror)(errVal));
1388         errVal = LATE(snd_pcm_start)(_handleRecord);
1389         if (errVal < 0)
1390         {
1391             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1392                          "     capture snd_pcm_start 2nd try err: %s",
1393                          LATE(snd_strerror)(errVal));
1394             StopRecording();
1395             return -1;
1396         }
1397     }
1398 
1399     return 0;
1400 }
1401 
StopRecording()1402 int32_t AudioDeviceLinuxALSA::StopRecording()
1403 {
1404 
1405     {
1406       CriticalSectionScoped lock(&_critSect);
1407 
1408       if (!_recIsInitialized)
1409       {
1410           return 0;
1411       }
1412 
1413       if (_handleRecord == NULL)
1414       {
1415           return -1;
1416       }
1417 
1418       // Make sure we don't start recording (it's asynchronous).
1419       _recIsInitialized = false;
1420       _recording = false;
1421     }
1422 
1423     if (_ptrThreadRec)
1424     {
1425         _ptrThreadRec->Stop();
1426         _ptrThreadRec.reset();
1427     }
1428 
1429     CriticalSectionScoped lock(&_critSect);
1430     _recordingFramesLeft = 0;
1431     if (_recordingBuffer)
1432     {
1433         delete [] _recordingBuffer;
1434         _recordingBuffer = NULL;
1435     }
1436 
1437     // Stop and close pcm recording device.
1438     int errVal = LATE(snd_pcm_drop)(_handleRecord);
1439     if (errVal < 0)
1440     {
1441         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1442                      "     Error stop recording: %s",
1443                      LATE(snd_strerror)(errVal));
1444         return -1;
1445     }
1446 
1447     errVal = LATE(snd_pcm_close)(_handleRecord);
1448     if (errVal < 0)
1449     {
1450         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1451                      "     Error closing record sound device, error: %s",
1452                      LATE(snd_strerror)(errVal));
1453         return -1;
1454     }
1455 
1456     // Check if we have muted and unmute if so.
1457     bool muteEnabled = false;
1458     MicrophoneMute(muteEnabled);
1459     if (muteEnabled)
1460     {
1461         SetMicrophoneMute(false);
1462     }
1463 
1464     // set the pcm input handle to NULL
1465     _handleRecord = NULL;
1466     return 0;
1467 }
1468 
RecordingIsInitialized() const1469 bool AudioDeviceLinuxALSA::RecordingIsInitialized() const
1470 {
1471     return (_recIsInitialized);
1472 }
1473 
Recording() const1474 bool AudioDeviceLinuxALSA::Recording() const
1475 {
1476     return (_recording);
1477 }
1478 
PlayoutIsInitialized() const1479 bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const
1480 {
1481     return (_playIsInitialized);
1482 }
1483 
StartPlayout()1484 int32_t AudioDeviceLinuxALSA::StartPlayout()
1485 {
1486     if (!_playIsInitialized)
1487     {
1488         return -1;
1489     }
1490 
1491     if (_playing)
1492     {
1493         return 0;
1494     }
1495 
1496     _playing = true;
1497 
1498     _playoutFramesLeft = 0;
1499     if (!_playoutBuffer)
1500         _playoutBuffer = new int8_t[_playoutBufferSizeIn10MS];
1501     if (!_playoutBuffer)
1502     {
1503       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1504                    "    failed to alloc playout buf");
1505       _playing = false;
1506       return -1;
1507     }
1508 
1509     // PLAYOUT
1510     _ptrThreadPlay.reset(new rtc::PlatformThread(
1511         PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
1512     _ptrThreadPlay->Start();
1513     _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
1514 
1515     int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
1516     if (errVal < 0)
1517     {
1518         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
1519                      "     playout snd_pcm_prepare failed (%s)\n",
1520                      LATE(snd_strerror)(errVal));
1521         // just log error
1522         // if snd_pcm_open fails will return -1
1523     }
1524 
1525     return 0;
1526 }
1527 
StopPlayout()1528 int32_t AudioDeviceLinuxALSA::StopPlayout()
1529 {
1530 
1531     {
1532         CriticalSectionScoped lock(&_critSect);
1533 
1534         if (!_playIsInitialized)
1535         {
1536             return 0;
1537         }
1538 
1539         if (_handlePlayout == NULL)
1540         {
1541             return -1;
1542         }
1543 
1544         _playing = false;
1545     }
1546 
1547     // stop playout thread first
1548     if (_ptrThreadPlay)
1549     {
1550         _ptrThreadPlay->Stop();
1551         _ptrThreadPlay.reset();
1552     }
1553 
1554     CriticalSectionScoped lock(&_critSect);
1555 
1556     _playoutFramesLeft = 0;
1557     delete [] _playoutBuffer;
1558     _playoutBuffer = NULL;
1559 
1560     // stop and close pcm playout device
1561     int errVal = LATE(snd_pcm_drop)(_handlePlayout);
1562     if (errVal < 0)
1563     {
1564         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1565                      "    Error stop playing: %s",
1566                      LATE(snd_strerror)(errVal));
1567     }
1568 
1569     errVal = LATE(snd_pcm_close)(_handlePlayout);
1570      if (errVal < 0)
1571          WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1572                       "    Error closing playout sound device, error: %s",
1573                       LATE(snd_strerror)(errVal));
1574 
1575      // set the pcm input handle to NULL
1576      _playIsInitialized = false;
1577      _handlePlayout = NULL;
1578      WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1579                   "  handle_playout is now set to NULL");
1580 
1581      return 0;
1582 }
1583 
PlayoutDelay(uint16_t & delayMS) const1584 int32_t AudioDeviceLinuxALSA::PlayoutDelay(uint16_t& delayMS) const
1585 {
1586     delayMS = (uint16_t)_playoutDelay * 1000 / _playoutFreq;
1587     return 0;
1588 }
1589 
RecordingDelay(uint16_t & delayMS) const1590 int32_t AudioDeviceLinuxALSA::RecordingDelay(uint16_t& delayMS) const
1591 {
1592     // Adding 10ms adjusted value to the record delay due to 10ms buffering.
1593     delayMS = (uint16_t)(10 + _recordingDelay * 1000 / _recordingFreq);
1594     return 0;
1595 }
1596 
Playing() const1597 bool AudioDeviceLinuxALSA::Playing() const
1598 {
1599     return (_playing);
1600 }
1601 // ----------------------------------------------------------------------------
1602 //  SetPlayoutBuffer
1603 // ----------------------------------------------------------------------------
1604 
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)1605 int32_t AudioDeviceLinuxALSA::SetPlayoutBuffer(
1606     const AudioDeviceModule::BufferType type,
1607     uint16_t sizeMS)
1608 {
1609     _playBufType = type;
1610     if (type == AudioDeviceModule::kFixedBufferSize)
1611     {
1612         _playBufDelayFixed = sizeMS;
1613     }
1614     return 0;
1615 }
1616 
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const1617 int32_t AudioDeviceLinuxALSA::PlayoutBuffer(
1618     AudioDeviceModule::BufferType& type,
1619     uint16_t& sizeMS) const
1620 {
1621     type = _playBufType;
1622     if (type == AudioDeviceModule::kFixedBufferSize)
1623     {
1624         sizeMS = _playBufDelayFixed;
1625     }
1626     else
1627     {
1628         sizeMS = _playBufDelay;
1629     }
1630 
1631     return 0;
1632 }
1633 
CPULoad(uint16_t & load) const1634 int32_t AudioDeviceLinuxALSA::CPULoad(uint16_t& load) const
1635 {
1636 
1637     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1638                "  API call not supported on this platform");
1639     return -1;
1640 }
1641 
PlayoutWarning() const1642 bool AudioDeviceLinuxALSA::PlayoutWarning() const
1643 {
1644     CriticalSectionScoped lock(&_critSect);
1645     return (_playWarning > 0);
1646 }
1647 
PlayoutError() const1648 bool AudioDeviceLinuxALSA::PlayoutError() const
1649 {
1650     CriticalSectionScoped lock(&_critSect);
1651     return (_playError > 0);
1652 }
1653 
RecordingWarning() const1654 bool AudioDeviceLinuxALSA::RecordingWarning() const
1655 {
1656     CriticalSectionScoped lock(&_critSect);
1657     return (_recWarning > 0);
1658 }
1659 
RecordingError() const1660 bool AudioDeviceLinuxALSA::RecordingError() const
1661 {
1662     CriticalSectionScoped lock(&_critSect);
1663     return (_recError > 0);
1664 }
1665 
ClearPlayoutWarning()1666 void AudioDeviceLinuxALSA::ClearPlayoutWarning()
1667 {
1668     CriticalSectionScoped lock(&_critSect);
1669     _playWarning = 0;
1670 }
1671 
ClearPlayoutError()1672 void AudioDeviceLinuxALSA::ClearPlayoutError()
1673 {
1674     CriticalSectionScoped lock(&_critSect);
1675     _playError = 0;
1676 }
1677 
ClearRecordingWarning()1678 void AudioDeviceLinuxALSA::ClearRecordingWarning()
1679 {
1680     CriticalSectionScoped lock(&_critSect);
1681     _recWarning = 0;
1682 }
1683 
ClearRecordingError()1684 void AudioDeviceLinuxALSA::ClearRecordingError()
1685 {
1686     CriticalSectionScoped lock(&_critSect);
1687     _recError = 0;
1688 }
1689 
1690 // ============================================================================
1691 //                                 Private Methods
1692 // ============================================================================
1693 
GetDevicesInfo(const int32_t function,const bool playback,const int32_t enumDeviceNo,char * enumDeviceName,const int32_t ednLen) const1694 int32_t AudioDeviceLinuxALSA::GetDevicesInfo(
1695     const int32_t function,
1696     const bool playback,
1697     const int32_t enumDeviceNo,
1698     char* enumDeviceName,
1699     const int32_t ednLen) const
1700 {
1701 
1702     // Device enumeration based on libjingle implementation
1703     // by Tristan Schmelcher at Google Inc.
1704 
1705     const char *type = playback ? "Output" : "Input";
1706     // dmix and dsnoop are only for playback and capture, respectively, but ALSA
1707     // stupidly includes them in both lists.
1708     const char *ignorePrefix = playback ? "dsnoop:" : "dmix:" ;
1709     // (ALSA lists many more "devices" of questionable interest, but we show them
1710     // just in case the weird devices may actually be desirable for some
1711     // users/systems.)
1712 
1713     int err;
1714     int enumCount(0);
1715     bool keepSearching(true);
1716 
1717     // From Chromium issue 95797
1718     // Loop through the sound cards to get Alsa device hints.
1719     // Don't use snd_device_name_hint(-1,..) since there is a access violation
1720     // inside this ALSA API with libasound.so.2.0.0.
1721     int card = -1;
1722     while (!(LATE(snd_card_next)(&card)) && (card >= 0) && keepSearching) {
1723         void **hints;
1724         err = LATE(snd_device_name_hint)(card, "pcm", &hints);
1725         if (err != 0)
1726         {
1727             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1728                          "GetDevicesInfo - device name hint error: %s",
1729                          LATE(snd_strerror)(err));
1730             return -1;
1731         }
1732 
1733         enumCount++; // default is 0
1734         if ((function == FUNC_GET_DEVICE_NAME ||
1735             function == FUNC_GET_DEVICE_NAME_FOR_AN_ENUM) && enumDeviceNo == 0)
1736         {
1737             strcpy(enumDeviceName, "default");
1738 
1739             err = LATE(snd_device_name_free_hint)(hints);
1740             if (err != 0)
1741             {
1742                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1743                              "GetDevicesInfo - device name free hint error: %s",
1744                              LATE(snd_strerror)(err));
1745             }
1746 
1747             return 0;
1748         }
1749 
1750         for (void **list = hints; *list != NULL; ++list)
1751         {
1752             char *actualType = LATE(snd_device_name_get_hint)(*list, "IOID");
1753             if (actualType)
1754             {   // NULL means it's both.
1755                 bool wrongType = (strcmp(actualType, type) != 0);
1756                 free(actualType);
1757                 if (wrongType)
1758                 {
1759                     // Wrong type of device (i.e., input vs. output).
1760                     continue;
1761                 }
1762             }
1763 
1764             char *name = LATE(snd_device_name_get_hint)(*list, "NAME");
1765             if (!name)
1766             {
1767                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1768                              "Device has no name");
1769                 // Skip it.
1770                 continue;
1771             }
1772 
1773             // Now check if we actually want to show this device.
1774             if (strcmp(name, "default") != 0 &&
1775                 strcmp(name, "null") != 0 &&
1776                 strcmp(name, "pulse") != 0 &&
1777                 strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0)
1778             {
1779                 // Yes, we do.
1780                 char *desc = LATE(snd_device_name_get_hint)(*list, "DESC");
1781                 if (!desc)
1782                 {
1783                     // Virtual devices don't necessarily have descriptions.
1784                     // Use their names instead.
1785                     desc = name;
1786                 }
1787 
1788                 if (FUNC_GET_NUM_OF_DEVICE == function)
1789                 {
1790                     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1791                                  "    Enum device %d - %s", enumCount, name);
1792 
1793                 }
1794                 if ((FUNC_GET_DEVICE_NAME == function) &&
1795                     (enumDeviceNo == enumCount))
1796                 {
1797                     // We have found the enum device, copy the name to buffer.
1798                     strncpy(enumDeviceName, desc, ednLen);
1799                     enumDeviceName[ednLen-1] = '\0';
1800                     keepSearching = false;
1801                     // Replace '\n' with '-'.
1802                     char * pret = strchr(enumDeviceName, '\n'/*0xa*/); //LF
1803                     if (pret)
1804                         *pret = '-';
1805                 }
1806                 if ((FUNC_GET_DEVICE_NAME_FOR_AN_ENUM == function) &&
1807                     (enumDeviceNo == enumCount))
1808                 {
1809                     // We have found the enum device, copy the name to buffer.
1810                     strncpy(enumDeviceName, name, ednLen);
1811                     enumDeviceName[ednLen-1] = '\0';
1812                     keepSearching = false;
1813                 }
1814 
1815                 if (keepSearching)
1816                     ++enumCount;
1817 
1818                 if (desc != name)
1819                     free(desc);
1820             }
1821 
1822             free(name);
1823 
1824             if (!keepSearching)
1825                 break;
1826         }
1827 
1828         err = LATE(snd_device_name_free_hint)(hints);
1829         if (err != 0)
1830         {
1831             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1832                          "GetDevicesInfo - device name free hint error: %s",
1833                          LATE(snd_strerror)(err));
1834             // Continue and return true anyway, since we did get the whole list.
1835         }
1836     }
1837 
1838     if (FUNC_GET_NUM_OF_DEVICE == function)
1839     {
1840         if (enumCount == 1) // only default?
1841             enumCount = 0;
1842         return enumCount; // Normal return point for function 0
1843     }
1844 
1845     if (keepSearching)
1846     {
1847         // If we get here for function 1 and 2, we didn't find the specified
1848         // enum device.
1849         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1850                      "GetDevicesInfo - Could not find device name or numbers");
1851         return -1;
1852     }
1853 
1854     return 0;
1855 }
1856 
InputSanityCheckAfterUnlockedPeriod() const1857 int32_t AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const
1858 {
1859     if (_handleRecord == NULL)
1860     {
1861         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1862                      "  input state has been modified during unlocked period");
1863         return -1;
1864     }
1865     return 0;
1866 }
1867 
OutputSanityCheckAfterUnlockedPeriod() const1868 int32_t AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const
1869 {
1870     if (_handlePlayout == NULL)
1871     {
1872         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1873                      "  output state has been modified during unlocked period");
1874         return -1;
1875     }
1876     return 0;
1877 }
1878 
ErrorRecovery(int32_t error,snd_pcm_t * deviceHandle)1879 int32_t AudioDeviceLinuxALSA::ErrorRecovery(int32_t error,
1880                                             snd_pcm_t* deviceHandle)
1881 {
1882     int st = LATE(snd_pcm_state)(deviceHandle);
1883     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1884                "Trying to recover from error: %s (%d) (state %d)",
1885                (LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) ?
1886                    "capture" : "playout", LATE(snd_strerror)(error), error, st);
1887 
1888     // It is recommended to use snd_pcm_recover for all errors. If that function
1889     // cannot handle the error, the input error code will be returned, otherwise
1890     // 0 is returned. From snd_pcm_recover API doc: "This functions handles
1891     // -EINTR (4) (interrupted system call), -EPIPE (32) (playout overrun or
1892     // capture underrun) and -ESTRPIPE (86) (stream is suspended) error codes
1893     // trying to prepare given stream for next I/O."
1894 
1895     /** Open */
1896     //    SND_PCM_STATE_OPEN = 0,
1897     /** Setup installed */
1898     //    SND_PCM_STATE_SETUP,
1899     /** Ready to start */
1900     //    SND_PCM_STATE_PREPARED,
1901     /** Running */
1902     //    SND_PCM_STATE_RUNNING,
1903     /** Stopped: underrun (playback) or overrun (capture) detected */
1904     //    SND_PCM_STATE_XRUN,= 4
1905     /** Draining: running (playback) or stopped (capture) */
1906     //    SND_PCM_STATE_DRAINING,
1907     /** Paused */
1908     //    SND_PCM_STATE_PAUSED,
1909     /** Hardware is suspended */
1910     //    SND_PCM_STATE_SUSPENDED,
1911     //  ** Hardware is disconnected */
1912     //    SND_PCM_STATE_DISCONNECTED,
1913     //    SND_PCM_STATE_LAST = SND_PCM_STATE_DISCONNECTED
1914 
1915     // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine
1916     // in Sthlm lab.
1917 
1918     int res = LATE(snd_pcm_recover)(deviceHandle, error, 1);
1919     if (0 == res)
1920     {
1921         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1922                    "    Recovery - snd_pcm_recover OK");
1923 
1924         if ((error == -EPIPE || error == -ESTRPIPE) && // Buf underrun/overrun.
1925             _recording &&
1926             LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE)
1927         {
1928             // For capture streams we also have to repeat the explicit start()
1929             // to get data flowing again.
1930             int err = LATE(snd_pcm_start)(deviceHandle);
1931             if (err != 0)
1932             {
1933                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1934                              "  Recovery - snd_pcm_start error: %u", err);
1935                 return -1;
1936             }
1937         }
1938 
1939         if ((error == -EPIPE || error == -ESTRPIPE) &&  // Buf underrun/overrun.
1940             _playing &&
1941             LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_PLAYBACK)
1942         {
1943             // For capture streams we also have to repeat the explicit start() to get
1944             // data flowing again.
1945             int err = LATE(snd_pcm_start)(deviceHandle);
1946             if (err != 0)
1947             {
1948               WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1949                        "    Recovery - snd_pcm_start error: %s",
1950                        LATE(snd_strerror)(err));
1951               return -1;
1952             }
1953         }
1954 
1955         return -EPIPE == error ? 1 : 0;
1956     }
1957     else {
1958         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1959                      "  Unrecoverable alsa stream error: %d", res);
1960     }
1961 
1962     return res;
1963 }
1964 
1965 // ============================================================================
1966 //                                  Thread Methods
1967 // ============================================================================
1968 
PlayThreadFunc(void * pThis)1969 bool AudioDeviceLinuxALSA::PlayThreadFunc(void* pThis)
1970 {
1971     return (static_cast<AudioDeviceLinuxALSA*>(pThis)->PlayThreadProcess());
1972 }
1973 
RecThreadFunc(void * pThis)1974 bool AudioDeviceLinuxALSA::RecThreadFunc(void* pThis)
1975 {
1976     return (static_cast<AudioDeviceLinuxALSA*>(pThis)->RecThreadProcess());
1977 }
1978 
PlayThreadProcess()1979 bool AudioDeviceLinuxALSA::PlayThreadProcess()
1980 {
1981     if(!_playing)
1982         return false;
1983 
1984     int err;
1985     snd_pcm_sframes_t frames;
1986     snd_pcm_sframes_t avail_frames;
1987 
1988     Lock();
1989     //return a positive number of frames ready otherwise a negative error code
1990     avail_frames = LATE(snd_pcm_avail_update)(_handlePlayout);
1991     if (avail_frames < 0)
1992     {
1993         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1994                    "playout snd_pcm_avail_update error: %s",
1995                    LATE(snd_strerror)(avail_frames));
1996         ErrorRecovery(avail_frames, _handlePlayout);
1997         UnLock();
1998         return true;
1999     }
2000     else if (avail_frames == 0)
2001     {
2002         UnLock();
2003 
2004         //maximum tixe in milliseconds to wait, a negative value means infinity
2005         err = LATE(snd_pcm_wait)(_handlePlayout, 2);
2006         if (err == 0)
2007         { //timeout occured
2008             WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2009                          "playout snd_pcm_wait timeout");
2010         }
2011 
2012         return true;
2013     }
2014 
2015     if (_playoutFramesLeft <= 0)
2016     {
2017         UnLock();
2018         _ptrAudioBuffer->RequestPlayoutData(_playoutFramesIn10MS);
2019         Lock();
2020 
2021         _playoutFramesLeft = _ptrAudioBuffer->GetPlayoutData(_playoutBuffer);
2022         assert(_playoutFramesLeft == _playoutFramesIn10MS);
2023     }
2024 
2025     if (static_cast<uint32_t>(avail_frames) > _playoutFramesLeft)
2026         avail_frames = _playoutFramesLeft;
2027 
2028     int size = LATE(snd_pcm_frames_to_bytes)(_handlePlayout,
2029         _playoutFramesLeft);
2030     frames = LATE(snd_pcm_writei)(
2031         _handlePlayout,
2032         &_playoutBuffer[_playoutBufferSizeIn10MS - size],
2033         avail_frames);
2034 
2035     if (frames < 0)
2036     {
2037         WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2038                      "playout snd_pcm_writei error: %s",
2039                      LATE(snd_strerror)(frames));
2040         _playoutFramesLeft = 0;
2041         ErrorRecovery(frames, _handlePlayout);
2042         UnLock();
2043         return true;
2044     }
2045     else {
2046         assert(frames==avail_frames);
2047         _playoutFramesLeft -= frames;
2048     }
2049 
2050     UnLock();
2051     return true;
2052 }
2053 
RecThreadProcess()2054 bool AudioDeviceLinuxALSA::RecThreadProcess()
2055 {
2056     if (!_recording)
2057         return false;
2058 
2059     int err;
2060     snd_pcm_sframes_t frames;
2061     snd_pcm_sframes_t avail_frames;
2062     int8_t buffer[_recordingBufferSizeIn10MS];
2063 
2064     Lock();
2065 
2066     //return a positive number of frames ready otherwise a negative error code
2067     avail_frames = LATE(snd_pcm_avail_update)(_handleRecord);
2068     if (avail_frames < 0)
2069     {
2070         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2071                      "capture snd_pcm_avail_update error: %s",
2072                      LATE(snd_strerror)(avail_frames));
2073         ErrorRecovery(avail_frames, _handleRecord);
2074         UnLock();
2075         return true;
2076     }
2077     else if (avail_frames == 0)
2078     { // no frame is available now
2079         UnLock();
2080 
2081         //maximum time in milliseconds to wait, a negative value means infinity
2082         err = LATE(snd_pcm_wait)(_handleRecord,
2083             ALSA_CAPTURE_WAIT_TIMEOUT);
2084         if (err == 0) //timeout occured
2085             WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2086                          "capture snd_pcm_wait timeout");
2087 
2088         return true;
2089     }
2090 
2091     if (static_cast<uint32_t>(avail_frames) > _recordingFramesLeft)
2092         avail_frames = _recordingFramesLeft;
2093 
2094     frames = LATE(snd_pcm_readi)(_handleRecord,
2095         buffer, avail_frames); // frames to be written
2096     if (frames < 0)
2097     {
2098         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2099                      "capture snd_pcm_readi error: %s",
2100                      LATE(snd_strerror)(frames));
2101         ErrorRecovery(frames, _handleRecord);
2102         UnLock();
2103         return true;
2104     }
2105     else if (frames > 0)
2106     {
2107         assert(frames == avail_frames);
2108 
2109         int left_size = LATE(snd_pcm_frames_to_bytes)(_handleRecord,
2110             _recordingFramesLeft);
2111         int size = LATE(snd_pcm_frames_to_bytes)(_handleRecord, frames);
2112 
2113         memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
2114                buffer, size);
2115         _recordingFramesLeft -= frames;
2116 
2117         if (!_recordingFramesLeft)
2118         { // buf is full
2119             _recordingFramesLeft = _recordingFramesIn10MS;
2120 
2121             // store the recorded buffer (no action will be taken if the
2122             // #recorded samples is not a full buffer)
2123             _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
2124                                                _recordingFramesIn10MS);
2125 
2126             uint32_t currentMicLevel = 0;
2127             uint32_t newMicLevel = 0;
2128 
2129             if (AGC())
2130             {
2131                 // store current mic level in the audio buffer if AGC is enabled
2132                 if (MicrophoneVolume(currentMicLevel) == 0)
2133                 {
2134                     if (currentMicLevel == 0xffffffff)
2135                         currentMicLevel = 100;
2136                     // this call does not affect the actual microphone volume
2137                     _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2138                 }
2139             }
2140 
2141             // calculate delay
2142             _playoutDelay = 0;
2143             _recordingDelay = 0;
2144             if (_handlePlayout)
2145             {
2146                 err = LATE(snd_pcm_delay)(_handlePlayout,
2147                     &_playoutDelay); // returned delay in frames
2148                 if (err < 0)
2149                 {
2150                     // TODO(xians): Shall we call ErrorRecovery() here?
2151                     _playoutDelay = 0;
2152                     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2153                                  "playout snd_pcm_delay: %s",
2154                                  LATE(snd_strerror)(err));
2155                 }
2156             }
2157 
2158             err = LATE(snd_pcm_delay)(_handleRecord,
2159                 &_recordingDelay); // returned delay in frames
2160             if (err < 0)
2161             {
2162                 // TODO(xians): Shall we call ErrorRecovery() here?
2163                 _recordingDelay = 0;
2164                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2165                              "capture snd_pcm_delay: %s",
2166                              LATE(snd_strerror)(err));
2167             }
2168 
2169            // TODO(xians): Shall we add 10ms buffer delay to the record delay?
2170             _ptrAudioBuffer->SetVQEData(
2171                 _playoutDelay * 1000 / _playoutFreq,
2172                 _recordingDelay * 1000 / _recordingFreq, 0);
2173 
2174             _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2175 
2176             // Deliver recorded samples at specified sample rate, mic level etc.
2177             // to the observer using callback.
2178             UnLock();
2179             _ptrAudioBuffer->DeliverRecordedData();
2180             Lock();
2181 
2182             if (AGC())
2183             {
2184                 newMicLevel = _ptrAudioBuffer->NewMicLevel();
2185                 if (newMicLevel != 0)
2186                 {
2187                     // The VQE will only deliver non-zero microphone levels when a
2188                     // change is needed. Set this new mic level (received from the
2189                     // observer as return value in the callback).
2190                     if (SetMicrophoneVolume(newMicLevel) == -1)
2191                         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2192                                      "  the required modification of the "
2193                                      "microphone volume failed");
2194                 }
2195             }
2196         }
2197     }
2198 
2199     UnLock();
2200     return true;
2201 }
2202 
2203 
KeyPressed() const2204 bool AudioDeviceLinuxALSA::KeyPressed() const{
2205 #if defined(USE_X11)
2206   char szKey[32];
2207   unsigned int i = 0;
2208   char state = 0;
2209 
2210   if (!_XDisplay)
2211     return false;
2212 
2213   // Check key map status
2214   XQueryKeymap(_XDisplay, szKey);
2215 
2216   // A bit change in keymap means a key is pressed
2217   for (i = 0; i < sizeof(szKey); i++)
2218     state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
2219 
2220   // Save old state
2221   memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
2222   return (state != 0);
2223 #else
2224   return false;
2225 #endif
2226 }
2227 }  // namespace webrtc
2228