1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12
13 #include "webrtc/modules/audio_device/audio_device_config.h"
14 #include "webrtc/modules/audio_device/audio_device_utility.h"
15 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
16
17 #include "webrtc/system_wrappers/interface/event_wrapper.h"
18 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
19 #include "webrtc/system_wrappers/interface/trace.h"
20
21 webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
22
23 // Accesses Pulse functions through our late-binding symbol table instead of
24 // directly. This way we don't have to link to libpulse, which means our binary
25 // will work on systems that don't have it.
26 #define LATE(sym) \
27 LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym)
28
29 namespace webrtc
30 {
31
32 // ============================================================================
33 // Static Methods
34 // ============================================================================
35
AudioDeviceLinuxPulse(const int32_t id)36 AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) :
37 _ptrAudioBuffer(NULL),
38 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
39 _timeEventRec(*EventWrapper::Create()),
40 _timeEventPlay(*EventWrapper::Create()),
41 _recStartEvent(*EventWrapper::Create()),
42 _playStartEvent(*EventWrapper::Create()),
43 _ptrThreadPlay(NULL),
44 _ptrThreadRec(NULL),
45 _recThreadID(0),
46 _playThreadID(0),
47 _id(id),
48 _mixerManager(id),
49 _inputDeviceIndex(0),
50 _outputDeviceIndex(0),
51 _inputDeviceIsSpecified(false),
52 _outputDeviceIsSpecified(false),
53 sample_rate_hz_(0),
54 _recChannels(1),
55 _playChannels(1),
56 _playBufType(AudioDeviceModule::kFixedBufferSize),
57 _initialized(false),
58 _recording(false),
59 _playing(false),
60 _recIsInitialized(false),
61 _playIsInitialized(false),
62 _startRec(false),
63 _stopRec(false),
64 _startPlay(false),
65 _stopPlay(false),
66 _AGC(false),
67 update_speaker_volume_at_startup_(false),
68 _playBufDelayFixed(20),
69 _sndCardPlayDelay(0),
70 _sndCardRecDelay(0),
71 _writeErrors(0),
72 _playWarning(0),
73 _playError(0),
74 _recWarning(0),
75 _recError(0),
76 _deviceIndex(-1),
77 _numPlayDevices(0),
78 _numRecDevices(0),
79 _playDeviceName(NULL),
80 _recDeviceName(NULL),
81 _playDisplayDeviceName(NULL),
82 _recDisplayDeviceName(NULL),
83 _playBuffer(NULL),
84 _playbackBufferSize(0),
85 _playbackBufferUnused(0),
86 _tempBufferSpace(0),
87 _recBuffer(NULL),
88 _recordBufferSize(0),
89 _recordBufferUsed(0),
90 _tempSampleData(NULL),
91 _tempSampleDataSize(0),
92 _configuredLatencyPlay(0),
93 _configuredLatencyRec(0),
94 _paDeviceIndex(-1),
95 _paStateChanged(false),
96 _paMainloop(NULL),
97 _paMainloopApi(NULL),
98 _paContext(NULL),
99 _recStream(NULL),
100 _playStream(NULL),
101 _recStreamFlags(0),
102 _playStreamFlags(0)
103 {
104 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
105 "%s created", __FUNCTION__);
106
107 memset(_paServerVersion, 0, sizeof(_paServerVersion));
108 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
109 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
110 memset(_oldKeyState, 0, sizeof(_oldKeyState));
111 }
112
~AudioDeviceLinuxPulse()113 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
114 {
115 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
116 "%s destroyed", __FUNCTION__);
117
118 Terminate();
119
120 if (_recBuffer)
121 {
122 delete [] _recBuffer;
123 _recBuffer = NULL;
124 }
125 if (_playBuffer)
126 {
127 delete [] _playBuffer;
128 _playBuffer = NULL;
129 }
130 if (_playDeviceName)
131 {
132 delete [] _playDeviceName;
133 _playDeviceName = NULL;
134 }
135 if (_recDeviceName)
136 {
137 delete [] _recDeviceName;
138 _recDeviceName = NULL;
139 }
140
141 delete &_recStartEvent;
142 delete &_playStartEvent;
143 delete &_timeEventRec;
144 delete &_timeEventPlay;
145 delete &_critSect;
146 }
147
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)148 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
149 {
150
151 CriticalSectionScoped lock(&_critSect);
152
153 _ptrAudioBuffer = audioBuffer;
154
155 // Inform the AudioBuffer about default settings for this implementation.
156 // Set all values to zero here since the actual settings will be done by
157 // InitPlayout and InitRecording later.
158 _ptrAudioBuffer->SetRecordingSampleRate(0);
159 _ptrAudioBuffer->SetPlayoutSampleRate(0);
160 _ptrAudioBuffer->SetRecordingChannels(0);
161 _ptrAudioBuffer->SetPlayoutChannels(0);
162 }
163
164 // ----------------------------------------------------------------------------
165 // ActiveAudioLayer
166 // ----------------------------------------------------------------------------
167
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const168 int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
169 AudioDeviceModule::AudioLayer& audioLayer) const
170 {
171 audioLayer = AudioDeviceModule::kLinuxPulseAudio;
172 return 0;
173 }
174
Init()175 int32_t AudioDeviceLinuxPulse::Init()
176 {
177
178 CriticalSectionScoped lock(&_critSect);
179
180 if (_initialized)
181 {
182 return 0;
183 }
184
185 // Initialize PulseAudio
186 if (InitPulseAudio() < 0)
187 {
188 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
189 " failed to initialize PulseAudio");
190
191 if (TerminatePulseAudio() < 0)
192 {
193 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
194 " failed to terminate PulseAudio");
195 }
196
197 return -1;
198 }
199
200 _playWarning = 0;
201 _playError = 0;
202 _recWarning = 0;
203 _recError = 0;
204
205 //Get X display handle for typing detection
206 _XDisplay = XOpenDisplay(NULL);
207 if (!_XDisplay)
208 {
209 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
210 " failed to open X display, typing detection will not work");
211 }
212
213 // RECORDING
214 const char* threadName = "webrtc_audio_module_rec_thread";
215 _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this,
216 kRealtimePriority, threadName);
217 if (_ptrThreadRec == NULL)
218 {
219 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
220 " failed to create the rec audio thread");
221 return -1;
222 }
223
224 unsigned int threadID(0);
225 if (!_ptrThreadRec->Start(threadID))
226 {
227 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
228 " failed to start the rec audio thread");
229
230 delete _ptrThreadRec;
231 _ptrThreadRec = NULL;
232 return -1;
233 }
234 _recThreadID = threadID;
235
236 // PLAYOUT
237 threadName = "webrtc_audio_module_play_thread";
238 _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this,
239 kRealtimePriority, threadName);
240 if (_ptrThreadPlay == NULL)
241 {
242 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
243 " failed to create the play audio thread");
244 return -1;
245 }
246
247 threadID = 0;
248 if (!_ptrThreadPlay->Start(threadID))
249 {
250 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
251 " failed to start the play audio thread");
252
253 delete _ptrThreadPlay;
254 _ptrThreadPlay = NULL;
255 return -1;
256 }
257 _playThreadID = threadID;
258
259 _initialized = true;
260
261 return 0;
262 }
263
Terminate()264 int32_t AudioDeviceLinuxPulse::Terminate()
265 {
266
267 if (!_initialized)
268 {
269 return 0;
270 }
271
272 Lock();
273
274 _mixerManager.Close();
275
276 // RECORDING
277 if (_ptrThreadRec)
278 {
279 ThreadWrapper* tmpThread = _ptrThreadRec;
280 _ptrThreadRec = NULL;
281 UnLock();
282
283 tmpThread->SetNotAlive();
284 _timeEventRec.Set();
285 if (tmpThread->Stop())
286 {
287 delete tmpThread;
288 } else
289 {
290 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
291 " failed to close down the rec audio thread");
292 }
293 // Lock again since we need to protect _ptrThreadPlay.
294 Lock();
295 }
296
297 // PLAYOUT
298 if (_ptrThreadPlay)
299 {
300 ThreadWrapper* tmpThread = _ptrThreadPlay;
301 _ptrThreadPlay = NULL;
302 _critSect.Leave();
303
304 tmpThread->SetNotAlive();
305 _timeEventPlay.Set();
306 if (tmpThread->Stop())
307 {
308 delete tmpThread;
309 } else
310 {
311 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
312 " failed to close down the play audio thread");
313 }
314 } else {
315 UnLock();
316 }
317
318 // Terminate PulseAudio
319 if (TerminatePulseAudio() < 0)
320 {
321 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
322 " failed to terminate PulseAudio");
323 return -1;
324 }
325
326 if (_XDisplay)
327 {
328 XCloseDisplay(_XDisplay);
329 _XDisplay = NULL;
330 }
331
332 _initialized = false;
333 _outputDeviceIsSpecified = false;
334 _inputDeviceIsSpecified = false;
335
336 return 0;
337 }
338
Initialized() const339 bool AudioDeviceLinuxPulse::Initialized() const
340 {
341 return (_initialized);
342 }
343
InitSpeaker()344 int32_t AudioDeviceLinuxPulse::InitSpeaker()
345 {
346
347 CriticalSectionScoped lock(&_critSect);
348
349 if (_playing)
350 {
351 return -1;
352 }
353
354 if (!_outputDeviceIsSpecified)
355 {
356 return -1;
357 }
358
359 // check if default device
360 if (_outputDeviceIndex == 0)
361 {
362 uint16_t deviceIndex = 0;
363 GetDefaultDeviceInfo(false, NULL, deviceIndex);
364 _paDeviceIndex = deviceIndex;
365 } else
366 {
367 // get the PA device index from
368 // the callback
369 _deviceIndex = _outputDeviceIndex;
370
371 // get playout devices
372 PlayoutDevices();
373 }
374
375 // the callback has now set the _paDeviceIndex to
376 // the PulseAudio index of the device
377 if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1)
378 {
379 return -1;
380 }
381
382 // clear _deviceIndex
383 _deviceIndex = -1;
384 _paDeviceIndex = -1;
385
386 return 0;
387 }
388
InitMicrophone()389 int32_t AudioDeviceLinuxPulse::InitMicrophone()
390 {
391
392 CriticalSectionScoped lock(&_critSect);
393
394 if (_recording)
395 {
396 return -1;
397 }
398
399 if (!_inputDeviceIsSpecified)
400 {
401 return -1;
402 }
403
404 // Check if default device
405 if (_inputDeviceIndex == 0)
406 {
407 uint16_t deviceIndex = 0;
408 GetDefaultDeviceInfo(true, NULL, deviceIndex);
409 _paDeviceIndex = deviceIndex;
410 } else
411 {
412 // Get the PA device index from
413 // the callback
414 _deviceIndex = _inputDeviceIndex;
415
416 // get recording devices
417 RecordingDevices();
418 }
419
420 // The callback has now set the _paDeviceIndex to
421 // the PulseAudio index of the device
422 if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1)
423 {
424 return -1;
425 }
426
427 // Clear _deviceIndex
428 _deviceIndex = -1;
429 _paDeviceIndex = -1;
430
431 return 0;
432 }
433
SpeakerIsInitialized() const434 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
435 {
436 return (_mixerManager.SpeakerIsInitialized());
437 }
438
MicrophoneIsInitialized() const439 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
440 {
441 return (_mixerManager.MicrophoneIsInitialized());
442 }
443
SpeakerVolumeIsAvailable(bool & available)444 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
445 {
446
447 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
448
449 // Make an attempt to open up the
450 // output mixer corresponding to the currently selected output device.
451 if (!wasInitialized && InitSpeaker() == -1)
452 {
453 // If we end up here it means that the selected speaker has no volume
454 // control.
455 available = false;
456 return 0;
457 }
458
459 // Given that InitSpeaker was successful, we know that a volume control exists
460 available = true;
461
462 // Close the initialized output mixer
463 if (!wasInitialized)
464 {
465 _mixerManager.CloseSpeaker();
466 }
467
468 return 0;
469 }
470
SetSpeakerVolume(uint32_t volume)471 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
472 {
473 if (!_playing) {
474 // Only update the volume if it's been set while we weren't playing.
475 update_speaker_volume_at_startup_ = true;
476 }
477 return (_mixerManager.SetSpeakerVolume(volume));
478 }
479
SpeakerVolume(uint32_t & volume) const480 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
481 {
482
483 uint32_t level(0);
484
485 if (_mixerManager.SpeakerVolume(level) == -1)
486 {
487 return -1;
488 }
489
490 volume = level;
491
492 return 0;
493 }
494
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)495 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(
496 uint16_t volumeLeft,
497 uint16_t volumeRight)
498 {
499
500 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
501 " API call not supported on this platform");
502 return -1;
503 }
504
WaveOutVolume(uint16_t &,uint16_t &) const505 int32_t AudioDeviceLinuxPulse::WaveOutVolume(
506 uint16_t& /*volumeLeft*/,
507 uint16_t& /*volumeRight*/) const
508 {
509
510 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
511 " API call not supported on this platform");
512 return -1;
513 }
514
MaxSpeakerVolume(uint32_t & maxVolume) const515 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
516 uint32_t& maxVolume) const
517 {
518
519 uint32_t maxVol(0);
520
521 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
522 {
523 return -1;
524 }
525
526 maxVolume = maxVol;
527
528 return 0;
529 }
530
MinSpeakerVolume(uint32_t & minVolume) const531 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
532 uint32_t& minVolume) const
533 {
534
535 uint32_t minVol(0);
536
537 if (_mixerManager.MinSpeakerVolume(minVol) == -1)
538 {
539 return -1;
540 }
541
542 minVolume = minVol;
543
544 return 0;
545 }
546
SpeakerVolumeStepSize(uint16_t & stepSize) const547 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
548 uint16_t& stepSize) const
549 {
550
551 uint16_t delta(0);
552
553 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
554 {
555 return -1;
556 }
557
558 stepSize = delta;
559
560 return 0;
561 }
562
SpeakerMuteIsAvailable(bool & available)563 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
564 {
565
566 bool isAvailable(false);
567 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
568
569 // Make an attempt to open up the
570 // output mixer corresponding to the currently selected output device.
571 //
572 if (!wasInitialized && InitSpeaker() == -1)
573 {
574 // If we end up here it means that the selected speaker has no volume
575 // control, hence it is safe to state that there is no mute control
576 // already at this stage.
577 available = false;
578 return 0;
579 }
580
581 // Check if the selected speaker has a mute control
582 _mixerManager.SpeakerMuteIsAvailable(isAvailable);
583
584 available = isAvailable;
585
586 // Close the initialized output mixer
587 if (!wasInitialized)
588 {
589 _mixerManager.CloseSpeaker();
590 }
591
592 return 0;
593 }
594
SetSpeakerMute(bool enable)595 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
596 {
597
598 return (_mixerManager.SetSpeakerMute(enable));
599 }
600
SpeakerMute(bool & enabled) const601 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
602 {
603
604 bool muted(0);
605 if (_mixerManager.SpeakerMute(muted) == -1)
606 {
607 return -1;
608 }
609
610 enabled = muted;
611 return 0;
612 }
613
MicrophoneMuteIsAvailable(bool & available)614 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
615 {
616
617 bool isAvailable(false);
618 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
619
620 // Make an attempt to open up the
621 // input mixer corresponding to the currently selected input device.
622 //
623 if (!wasInitialized && InitMicrophone() == -1)
624 {
625 // If we end up here it means that the selected microphone has no volume
626 // control, hence it is safe to state that there is no boost control
627 // already at this stage.
628 available = false;
629 return 0;
630 }
631
632 // Check if the selected microphone has a mute control
633 //
634 _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
635 available = isAvailable;
636
637 // Close the initialized input mixer
638 //
639 if (!wasInitialized)
640 {
641 _mixerManager.CloseMicrophone();
642 }
643
644 return 0;
645 }
646
SetMicrophoneMute(bool enable)647 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
648 {
649
650 return (_mixerManager.SetMicrophoneMute(enable));
651 }
652
MicrophoneMute(bool & enabled) const653 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
654 {
655
656 bool muted(0);
657 if (_mixerManager.MicrophoneMute(muted) == -1)
658 {
659 return -1;
660 }
661
662 enabled = muted;
663 return 0;
664 }
665
MicrophoneBoostIsAvailable(bool & available)666 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
667 {
668
669 bool isAvailable(false);
670 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
671
672 // Enumerate all avaliable microphone and make an attempt to open up the
673 // input mixer corresponding to the currently selected input device.
674 //
675 if (!wasInitialized && InitMicrophone() == -1)
676 {
677 // If we end up here it means that the selected microphone has no volume
678 // control, hence it is safe to state that there is no boost control
679 // already at this stage.
680 available = false;
681 return 0;
682 }
683
684 // Check if the selected microphone has a boost control
685 _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
686 available = isAvailable;
687
688 // Close the initialized input mixer
689 if (!wasInitialized)
690 {
691 _mixerManager.CloseMicrophone();
692 }
693
694 return 0;
695 }
696
SetMicrophoneBoost(bool enable)697 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
698 {
699
700 return (_mixerManager.SetMicrophoneBoost(enable));
701 }
702
MicrophoneBoost(bool & enabled) const703 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
704 {
705
706 bool onOff(0);
707
708 if (_mixerManager.MicrophoneBoost(onOff) == -1)
709 {
710 return -1;
711 }
712
713 enabled = onOff;
714
715 return 0;
716 }
717
StereoRecordingIsAvailable(bool & available)718 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
719 {
720
721 if (_recChannels == 2 && _recording) {
722 available = true;
723 return 0;
724 }
725
726 available = false;
727 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
728 int error = 0;
729
730 if (!wasInitialized && InitMicrophone() == -1)
731 {
732 // Cannot open the specified device
733 available = false;
734 return 0;
735 }
736
737 // Check if the selected microphone can record stereo.
738 bool isAvailable(false);
739 error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
740 if (!error)
741 available = isAvailable;
742
743 // Close the initialized input mixer
744 if (!wasInitialized)
745 {
746 _mixerManager.CloseMicrophone();
747 }
748
749 return error;
750 }
751
SetStereoRecording(bool enable)752 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
753 {
754
755 if (enable)
756 _recChannels = 2;
757 else
758 _recChannels = 1;
759
760 return 0;
761 }
762
StereoRecording(bool & enabled) const763 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
764 {
765
766 if (_recChannels == 2)
767 enabled = true;
768 else
769 enabled = false;
770
771 return 0;
772 }
773
StereoPlayoutIsAvailable(bool & available)774 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
775 {
776
777 if (_playChannels == 2 && _playing) {
778 available = true;
779 return 0;
780 }
781
782 available = false;
783 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
784 int error = 0;
785
786 if (!wasInitialized && InitSpeaker() == -1)
787 {
788 // Cannot open the specified device.
789 return -1;
790 }
791
792 // Check if the selected speaker can play stereo.
793 bool isAvailable(false);
794 error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
795 if (!error)
796 available = isAvailable;
797
798 // Close the initialized input mixer
799 if (!wasInitialized)
800 {
801 _mixerManager.CloseSpeaker();
802 }
803
804 return error;
805 }
806
SetStereoPlayout(bool enable)807 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
808 {
809
810 if (enable)
811 _playChannels = 2;
812 else
813 _playChannels = 1;
814
815 return 0;
816 }
817
StereoPlayout(bool & enabled) const818 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
819 {
820
821 if (_playChannels == 2)
822 enabled = true;
823 else
824 enabled = false;
825
826 return 0;
827 }
828
SetAGC(bool enable)829 int32_t AudioDeviceLinuxPulse::SetAGC(bool enable)
830 {
831
832 _AGC = enable;
833
834 return 0;
835 }
836
AGC() const837 bool AudioDeviceLinuxPulse::AGC() const
838 {
839
840 return _AGC;
841 }
842
MicrophoneVolumeIsAvailable(bool & available)843 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
844 bool& available)
845 {
846
847 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
848
849 // Make an attempt to open up the
850 // input mixer corresponding to the currently selected output device.
851 if (!wasInitialized && InitMicrophone() == -1)
852 {
853 // If we end up here it means that the selected microphone has no volume
854 // control.
855 available = false;
856 return 0;
857 }
858
859 // Given that InitMicrophone was successful, we know that a volume control
860 // exists
861 available = true;
862
863 // Close the initialized input mixer
864 if (!wasInitialized)
865 {
866 _mixerManager.CloseMicrophone();
867 }
868
869 return 0;
870 }
871
SetMicrophoneVolume(uint32_t volume)872 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume)
873 {
874
875 return (_mixerManager.SetMicrophoneVolume(volume));
876 }
877
MicrophoneVolume(uint32_t & volume) const878 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(
879 uint32_t& volume) const
880 {
881
882 uint32_t level(0);
883
884 if (_mixerManager.MicrophoneVolume(level) == -1)
885 {
886 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
887 " failed to retrive current microphone level");
888 return -1;
889 }
890
891 volume = level;
892
893 return 0;
894 }
895
MaxMicrophoneVolume(uint32_t & maxVolume) const896 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(
897 uint32_t& maxVolume) const
898 {
899
900 uint32_t maxVol(0);
901
902 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
903 {
904 return -1;
905 }
906
907 maxVolume = maxVol;
908
909 return 0;
910 }
911
MinMicrophoneVolume(uint32_t & minVolume) const912 int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(
913 uint32_t& minVolume) const
914 {
915
916 uint32_t minVol(0);
917
918 if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
919 {
920 return -1;
921 }
922
923 minVolume = minVol;
924
925 return 0;
926 }
927
MicrophoneVolumeStepSize(uint16_t & stepSize) const928 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
929 uint16_t& stepSize) const
930 {
931
932 uint16_t delta(0);
933
934 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
935 {
936 return -1;
937 }
938
939 stepSize = delta;
940
941 return 0;
942 }
943
PlayoutDevices()944 int16_t AudioDeviceLinuxPulse::PlayoutDevices()
945 {
946
947 PaLock();
948
949 pa_operation* paOperation = NULL;
950 _numPlayDevices = 1; // init to 1 to account for "default"
951
952 // get the whole list of devices and update _numPlayDevices
953 paOperation = LATE(pa_context_get_sink_info_list)(_paContext,
954 PaSinkInfoCallback,
955 this);
956
957 WaitForOperationCompletion(paOperation);
958
959 PaUnLock();
960
961 return _numPlayDevices;
962 }
963
SetPlayoutDevice(uint16_t index)964 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
965 {
966
967 if (_playIsInitialized)
968 {
969 return -1;
970 }
971
972 const uint16_t nDevices = PlayoutDevices();
973
974 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
975 " number of availiable output devices is %u", nDevices);
976
977 if (index > (nDevices - 1))
978 {
979 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
980 " device index is out of range [0,%u]", (nDevices - 1));
981 return -1;
982 }
983
984 _outputDeviceIndex = index;
985 _outputDeviceIsSpecified = true;
986
987 return 0;
988 }
989
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)990 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
991 AudioDeviceModule::WindowsDeviceType /*device*/)
992 {
993 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
994 "WindowsDeviceType not supported");
995 return -1;
996 }
997
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])998 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
999 uint16_t index,
1000 char name[kAdmMaxDeviceNameSize],
1001 char guid[kAdmMaxGuidSize])
1002 {
1003
1004 const uint16_t nDevices = PlayoutDevices();
1005
1006 if ((index > (nDevices - 1)) || (name == NULL))
1007 {
1008 return -1;
1009 }
1010
1011 memset(name, 0, kAdmMaxDeviceNameSize);
1012
1013 if (guid != NULL)
1014 {
1015 memset(guid, 0, kAdmMaxGuidSize);
1016 }
1017
1018 // Check if default device
1019 if (index == 0)
1020 {
1021 uint16_t deviceIndex = 0;
1022 return GetDefaultDeviceInfo(false, name, deviceIndex);
1023 }
1024
1025 // Tell the callback that we want
1026 // The name for this device
1027 _playDisplayDeviceName = name;
1028 _deviceIndex = index;
1029
1030 // get playout devices
1031 PlayoutDevices();
1032
1033 // clear device name and index
1034 _playDisplayDeviceName = NULL;
1035 _deviceIndex = -1;
1036
1037 return 0;
1038 }
1039
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])1040 int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
1041 uint16_t index,
1042 char name[kAdmMaxDeviceNameSize],
1043 char guid[kAdmMaxGuidSize])
1044 {
1045
1046 const uint16_t nDevices(RecordingDevices());
1047
1048 if ((index > (nDevices - 1)) || (name == NULL))
1049 {
1050 return -1;
1051 }
1052
1053 memset(name, 0, kAdmMaxDeviceNameSize);
1054
1055 if (guid != NULL)
1056 {
1057 memset(guid, 0, kAdmMaxGuidSize);
1058 }
1059
1060 // Check if default device
1061 if (index == 0)
1062 {
1063 uint16_t deviceIndex = 0;
1064 return GetDefaultDeviceInfo(true, name, deviceIndex);
1065 }
1066
1067 // Tell the callback that we want
1068 // the name for this device
1069 _recDisplayDeviceName = name;
1070 _deviceIndex = index;
1071
1072 // Get recording devices
1073 RecordingDevices();
1074
1075 // Clear device name and index
1076 _recDisplayDeviceName = NULL;
1077 _deviceIndex = -1;
1078
1079 return 0;
1080 }
1081
RecordingDevices()1082 int16_t AudioDeviceLinuxPulse::RecordingDevices()
1083 {
1084
1085 PaLock();
1086
1087 pa_operation* paOperation = NULL;
1088 _numRecDevices = 1; // Init to 1 to account for "default"
1089
1090 // Get the whole list of devices and update _numRecDevices
1091 paOperation = LATE(pa_context_get_source_info_list)(_paContext,
1092 PaSourceInfoCallback,
1093 this);
1094
1095 WaitForOperationCompletion(paOperation);
1096
1097 PaUnLock();
1098
1099 return _numRecDevices;
1100 }
1101
SetRecordingDevice(uint16_t index)1102 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
1103 {
1104
1105 if (_recIsInitialized)
1106 {
1107 return -1;
1108 }
1109
1110 const uint16_t nDevices(RecordingDevices());
1111
1112 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1113 " number of availiable input devices is %u", nDevices);
1114
1115 if (index > (nDevices - 1))
1116 {
1117 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1118 " device index is out of range [0,%u]", (nDevices - 1));
1119 return -1;
1120 }
1121
1122 _inputDeviceIndex = index;
1123 _inputDeviceIsSpecified = true;
1124
1125 return 0;
1126 }
1127
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)1128 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
1129 AudioDeviceModule::WindowsDeviceType /*device*/)
1130 {
1131 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1132 "WindowsDeviceType not supported");
1133 return -1;
1134 }
1135
PlayoutIsAvailable(bool & available)1136 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
1137 {
1138
1139 available = false;
1140
1141 // Try to initialize the playout side
1142 int32_t res = InitPlayout();
1143
1144 // Cancel effect of initialization
1145 StopPlayout();
1146
1147 if (res != -1)
1148 {
1149 available = true;
1150 }
1151
1152 return res;
1153 }
1154
RecordingIsAvailable(bool & available)1155 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
1156 {
1157
1158 available = false;
1159
1160 // Try to initialize the playout side
1161 int32_t res = InitRecording();
1162
1163 // Cancel effect of initialization
1164 StopRecording();
1165
1166 if (res != -1)
1167 {
1168 available = true;
1169 }
1170
1171 return res;
1172 }
1173
InitPlayout()1174 int32_t AudioDeviceLinuxPulse::InitPlayout()
1175 {
1176
1177 CriticalSectionScoped lock(&_critSect);
1178
1179 if (_playing)
1180 {
1181 return -1;
1182 }
1183
1184 if (!_outputDeviceIsSpecified)
1185 {
1186 return -1;
1187 }
1188
1189 if (_playIsInitialized)
1190 {
1191 return 0;
1192 }
1193
1194 // Initialize the speaker (devices might have been added or removed)
1195 if (InitSpeaker() == -1)
1196 {
1197 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1198 " InitSpeaker() failed");
1199 }
1200
1201 // Set the play sample specification
1202 pa_sample_spec playSampleSpec;
1203 playSampleSpec.channels = _playChannels;
1204 playSampleSpec.format = PA_SAMPLE_S16LE;
1205 playSampleSpec.rate = sample_rate_hz_;
1206
1207 // Create a new play stream
1208 _playStream = LATE(pa_stream_new)(_paContext, "playStream",
1209 &playSampleSpec, NULL);
1210
1211 if (!_playStream)
1212 {
1213 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1214 " failed to create play stream, err=%d",
1215 LATE(pa_context_errno)(_paContext));
1216 return -1;
1217 }
1218
1219 // Provide the playStream to the mixer
1220 _mixerManager.SetPlayStream(_playStream);
1221
1222 if (_ptrAudioBuffer)
1223 {
1224 // Update audio buffer with the selected parameters
1225 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1226 _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
1227 }
1228
1229 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1230 " stream state %d\n", LATE(pa_stream_get_state)(_playStream));
1231
1232 // Set stream flags
1233 _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1234 | PA_STREAM_INTERPOLATE_TIMING);
1235
1236 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1237 {
1238 // If configuring a specific latency then we want to specify
1239 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1240 // automatically to reach that target latency. However, that flag doesn't
1241 // exist in Ubuntu 8.04 and many people still use that, so we have to check
1242 // the protocol version of libpulse.
1243 if (LATE(pa_context_get_protocol_version)(_paContext)
1244 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1245 {
1246 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1247 }
1248
1249 const pa_sample_spec *spec =
1250 LATE(pa_stream_get_sample_spec)(_playStream);
1251 if (!spec)
1252 {
1253 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1254 " pa_stream_get_sample_spec()");
1255 return -1;
1256 }
1257
1258 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1259 uint32_t latency = bytesPerSec
1260 * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1261
1262 // Set the play buffer attributes
1263 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1264 _playBufferAttr.tlength = latency; // target fill level of play buffer
1265 // minimum free num bytes before server request more data
1266 _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
1267 _playBufferAttr.prebuf = _playBufferAttr.tlength
1268 - _playBufferAttr.minreq; // prebuffer tlength before starting playout
1269
1270 _configuredLatencyPlay = latency;
1271 }
1272
1273 // num samples in bytes * num channels
1274 _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
1275 _playbackBufferUnused = _playbackBufferSize;
1276 _playBuffer = new int8_t[_playbackBufferSize];
1277
1278 // Enable underflow callback
1279 LATE(pa_stream_set_underflow_callback)(_playStream,
1280 PaStreamUnderflowCallback, this);
1281
1282 // Set the state callback function for the stream
1283 LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this);
1284
1285 // Mark playout side as initialized
1286 _playIsInitialized = true;
1287 _sndCardPlayDelay = 0;
1288 _sndCardRecDelay = 0;
1289
1290 return 0;
1291 }
1292
InitRecording()1293 int32_t AudioDeviceLinuxPulse::InitRecording()
1294 {
1295
1296 CriticalSectionScoped lock(&_critSect);
1297
1298 if (_recording)
1299 {
1300 return -1;
1301 }
1302
1303 if (!_inputDeviceIsSpecified)
1304 {
1305 return -1;
1306 }
1307
1308 if (_recIsInitialized)
1309 {
1310 return 0;
1311 }
1312
1313 // Initialize the microphone (devices might have been added or removed)
1314 if (InitMicrophone() == -1)
1315 {
1316 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1317 " InitMicrophone() failed");
1318 }
1319
1320 // Set the rec sample specification
1321 pa_sample_spec recSampleSpec;
1322 recSampleSpec.channels = _recChannels;
1323 recSampleSpec.format = PA_SAMPLE_S16LE;
1324 recSampleSpec.rate = sample_rate_hz_;
1325
1326 // Create a new rec stream
1327 _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec,
1328 NULL);
1329 if (!_recStream)
1330 {
1331 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1332 " failed to create rec stream, err=%d",
1333 LATE(pa_context_errno)(_paContext));
1334 return -1;
1335 }
1336
1337 // Provide the recStream to the mixer
1338 _mixerManager.SetRecStream(_recStream);
1339
1340 if (_ptrAudioBuffer)
1341 {
1342 // Update audio buffer with the selected parameters
1343 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1344 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
1345 }
1346
1347 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1348 {
1349 _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1350 | PA_STREAM_INTERPOLATE_TIMING);
1351
1352 // If configuring a specific latency then we want to specify
1353 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1354 // automatically to reach that target latency. However, that flag doesn't
1355 // exist in Ubuntu 8.04 and many people still use that, so we have to check
1356 // the protocol version of libpulse.
1357 if (LATE(pa_context_get_protocol_version)(_paContext)
1358 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1359 {
1360 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1361 }
1362
1363 const pa_sample_spec *spec =
1364 LATE(pa_stream_get_sample_spec)(_recStream);
1365 if (!spec)
1366 {
1367 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1368 " pa_stream_get_sample_spec(rec)");
1369 return -1;
1370 }
1371
1372 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1373 uint32_t latency = bytesPerSec
1374 * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1375
1376 // Set the rec buffer attributes
1377 // Note: fragsize specifies a maximum transfer size, not a minimum, so
1378 // it is not possible to force a high latency setting, only a low one.
1379 _recBufferAttr.fragsize = latency; // size of fragment
1380 _recBufferAttr.maxlength = latency + bytesPerSec
1381 * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1382
1383 _configuredLatencyRec = latency;
1384 }
1385
1386 _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
1387 _recordBufferUsed = 0;
1388 _recBuffer = new int8_t[_recordBufferSize];
1389
1390 // Enable overflow callback
1391 LATE(pa_stream_set_overflow_callback)(_recStream, PaStreamOverflowCallback,
1392 this);
1393
1394 // Set the state callback function for the stream
1395 LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this);
1396
1397 // Mark recording side as initialized
1398 _recIsInitialized = true;
1399
1400 return 0;
1401 }
1402
StartRecording()1403 int32_t AudioDeviceLinuxPulse::StartRecording()
1404 {
1405
1406 if (!_recIsInitialized)
1407 {
1408 return -1;
1409 }
1410
1411 if (_recording)
1412 {
1413 return 0;
1414 }
1415
1416 // set state to ensure that the recording starts from the audio thread
1417 _startRec = true;
1418
1419 // the audio thread will signal when recording has started
1420 _timeEventRec.Set();
1421 if (kEventTimeout == _recStartEvent.Wait(10000))
1422 {
1423 {
1424 CriticalSectionScoped lock(&_critSect);
1425 _startRec = false;
1426 }
1427 StopRecording();
1428 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1429 " failed to activate recording");
1430 return -1;
1431 }
1432
1433 {
1434 CriticalSectionScoped lock(&_critSect);
1435 if (_recording)
1436 {
1437 // the recording state is set by the audio thread after recording has started
1438 } else
1439 {
1440 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1441 " failed to activate recording");
1442 return -1;
1443 }
1444 }
1445
1446 return 0;
1447 }
1448
StopRecording()1449 int32_t AudioDeviceLinuxPulse::StopRecording()
1450 {
1451
1452 CriticalSectionScoped lock(&_critSect);
1453
1454 if (!_recIsInitialized)
1455 {
1456 return 0;
1457 }
1458
1459 if (_recStream == NULL)
1460 {
1461 return -1;
1462 }
1463
1464 _recIsInitialized = false;
1465 _recording = false;
1466
1467 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1468 " stopping recording");
1469
1470 // Stop Recording
1471 PaLock();
1472
1473 DisableReadCallback();
1474 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1475
1476 // Unset this here so that we don't get a TERMINATED callback
1477 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1478
1479 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED)
1480 {
1481 // Disconnect the stream
1482 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK)
1483 {
1484 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1485 " failed to disconnect rec stream, err=%d\n",
1486 LATE(pa_context_errno)(_paContext));
1487 PaUnLock();
1488 return -1;
1489 }
1490
1491 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1492 " disconnected recording");
1493 }
1494
1495 LATE(pa_stream_unref)(_recStream);
1496 _recStream = NULL;
1497
1498 PaUnLock();
1499
1500 // Provide the recStream to the mixer
1501 _mixerManager.SetRecStream(_recStream);
1502
1503 if (_recBuffer)
1504 {
1505 delete [] _recBuffer;
1506 _recBuffer = NULL;
1507 }
1508
1509 return 0;
1510 }
1511
RecordingIsInitialized() const1512 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
1513 {
1514 return (_recIsInitialized);
1515 }
1516
Recording() const1517 bool AudioDeviceLinuxPulse::Recording() const
1518 {
1519 CriticalSectionScoped lock(&_critSect);
1520 return (_recording);
1521 }
1522
PlayoutIsInitialized() const1523 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
1524 {
1525 return (_playIsInitialized);
1526 }
1527
StartPlayout()1528 int32_t AudioDeviceLinuxPulse::StartPlayout()
1529 {
1530 if (!_playIsInitialized)
1531 {
1532 return -1;
1533 }
1534
1535 if (_playing)
1536 {
1537 return 0;
1538 }
1539
1540 // set state to ensure that playout starts from the audio thread
1541 _startPlay = true;
1542
1543 // Both |_startPlay| and |_playing| needs protction since they are also
1544 // accessed on the playout thread.
1545
1546 // the audio thread will signal when playout has started
1547 _timeEventPlay.Set();
1548 if (kEventTimeout == _playStartEvent.Wait(10000))
1549 {
1550 {
1551 CriticalSectionScoped lock(&_critSect);
1552 _startPlay = false;
1553 }
1554 StopPlayout();
1555 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1556 " failed to activate playout");
1557 return -1;
1558 }
1559
1560 {
1561 CriticalSectionScoped lock(&_critSect);
1562 if (_playing)
1563 {
1564 // the playing state is set by the audio thread after playout has started
1565 } else
1566 {
1567 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1568 " failed to activate playing");
1569 return -1;
1570 }
1571 }
1572
1573 return 0;
1574 }
1575
StopPlayout()1576 int32_t AudioDeviceLinuxPulse::StopPlayout()
1577 {
1578
1579 CriticalSectionScoped lock(&_critSect);
1580
1581 if (!_playIsInitialized)
1582 {
1583 return 0;
1584 }
1585
1586 if (_playStream == NULL)
1587 {
1588 return -1;
1589 }
1590
1591 _playIsInitialized = false;
1592 _playing = false;
1593 _sndCardPlayDelay = 0;
1594 _sndCardRecDelay = 0;
1595
1596 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1597 " stopping playback");
1598
1599 // Stop Playout
1600 PaLock();
1601
1602 DisableWriteCallback();
1603 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1604
1605 // Unset this here so that we don't get a TERMINATED callback
1606 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1607
1608 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED)
1609 {
1610 // Disconnect the stream
1611 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK)
1612 {
1613 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1614 " failed to disconnect play stream, err=%d",
1615 LATE(pa_context_errno)(_paContext));
1616 PaUnLock();
1617 return -1;
1618 }
1619
1620 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1621 " disconnected playback");
1622 }
1623
1624 LATE(pa_stream_unref)(_playStream);
1625 _playStream = NULL;
1626
1627 PaUnLock();
1628
1629 // Provide the playStream to the mixer
1630 _mixerManager.SetPlayStream(_playStream);
1631
1632 if (_playBuffer)
1633 {
1634 delete [] _playBuffer;
1635 _playBuffer = NULL;
1636 }
1637
1638 return 0;
1639 }
1640
PlayoutDelay(uint16_t & delayMS) const1641 int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const
1642 {
1643 CriticalSectionScoped lock(&_critSect);
1644 delayMS = (uint16_t) _sndCardPlayDelay;
1645 return 0;
1646 }
1647
RecordingDelay(uint16_t & delayMS) const1648 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
1649 {
1650 CriticalSectionScoped lock(&_critSect);
1651 delayMS = (uint16_t) _sndCardRecDelay;
1652 return 0;
1653 }
1654
Playing() const1655 bool AudioDeviceLinuxPulse::Playing() const
1656 {
1657 CriticalSectionScoped lock(&_critSect);
1658 return (_playing);
1659 }
1660
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)1661 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1662 const AudioDeviceModule::BufferType type,
1663 uint16_t sizeMS)
1664 {
1665
1666 if (type != AudioDeviceModule::kFixedBufferSize)
1667 {
1668 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1669 " Adaptive buffer size not supported on this platform");
1670 return -1;
1671 }
1672
1673 _playBufType = type;
1674 _playBufDelayFixed = sizeMS;
1675
1676 return 0;
1677 }
1678
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const1679 int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1680 AudioDeviceModule::BufferType& type,
1681 uint16_t& sizeMS) const
1682 {
1683
1684 type = _playBufType;
1685 sizeMS = _playBufDelayFixed;
1686
1687 return 0;
1688 }
1689
CPULoad(uint16_t &) const1690 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const
1691 {
1692
1693 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1694 " API call not supported on this platform");
1695 return -1;
1696 }
1697
PlayoutWarning() const1698 bool AudioDeviceLinuxPulse::PlayoutWarning() const
1699 {
1700 CriticalSectionScoped lock(&_critSect);
1701 return (_playWarning > 0);
1702 }
1703
PlayoutError() const1704 bool AudioDeviceLinuxPulse::PlayoutError() const
1705 {
1706 CriticalSectionScoped lock(&_critSect);
1707 return (_playError > 0);
1708 }
1709
RecordingWarning() const1710 bool AudioDeviceLinuxPulse::RecordingWarning() const
1711 {
1712 CriticalSectionScoped lock(&_critSect);
1713 return (_recWarning > 0);
1714 }
1715
RecordingError() const1716 bool AudioDeviceLinuxPulse::RecordingError() const
1717 {
1718 CriticalSectionScoped lock(&_critSect);
1719 return (_recError > 0);
1720 }
1721
ClearPlayoutWarning()1722 void AudioDeviceLinuxPulse::ClearPlayoutWarning()
1723 {
1724 CriticalSectionScoped lock(&_critSect);
1725 _playWarning = 0;
1726 }
1727
ClearPlayoutError()1728 void AudioDeviceLinuxPulse::ClearPlayoutError()
1729 {
1730 CriticalSectionScoped lock(&_critSect);
1731 _playError = 0;
1732 }
1733
ClearRecordingWarning()1734 void AudioDeviceLinuxPulse::ClearRecordingWarning()
1735 {
1736 CriticalSectionScoped lock(&_critSect);
1737 _recWarning = 0;
1738 }
1739
ClearRecordingError()1740 void AudioDeviceLinuxPulse::ClearRecordingError()
1741 {
1742 CriticalSectionScoped lock(&_critSect);
1743 _recError = 0;
1744 }
1745
1746 // ============================================================================
1747 // Private Methods
1748 // ============================================================================
1749
PaContextStateCallback(pa_context * c,void * pThis)1750 void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis)
1751 {
1752 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaContextStateCallbackHandler(
1753 c);
1754 }
1755
1756 // ----------------------------------------------------------------------------
1757 // PaSinkInfoCallback
1758 // ----------------------------------------------------------------------------
1759
PaSinkInfoCallback(pa_context *,const pa_sink_info * i,int eol,void * pThis)1760 void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
1761 const pa_sink_info *i, int eol,
1762 void *pThis)
1763 {
1764 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler(
1765 i, eol);
1766 }
1767
PaSourceInfoCallback(pa_context *,const pa_source_info * i,int eol,void * pThis)1768 void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
1769 const pa_source_info *i,
1770 int eol, void *pThis)
1771 {
1772 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler(
1773 i, eol);
1774 }
1775
PaServerInfoCallback(pa_context *,const pa_server_info * i,void * pThis)1776 void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/,
1777 const pa_server_info *i,
1778 void *pThis)
1779 {
1780 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaServerInfoCallbackHandler(i);
1781 }
1782
PaStreamStateCallback(pa_stream * p,void * pThis)1783 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis)
1784 {
1785 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamStateCallbackHandler(p);
1786 }
1787
PaContextStateCallbackHandler(pa_context * c)1788 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c)
1789 {
1790 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1791 " context state cb");
1792
1793 pa_context_state_t state = LATE(pa_context_get_state)(c);
1794 switch (state)
1795 {
1796 case PA_CONTEXT_UNCONNECTED:
1797 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1798 " unconnected");
1799 break;
1800 case PA_CONTEXT_CONNECTING:
1801 case PA_CONTEXT_AUTHORIZING:
1802 case PA_CONTEXT_SETTING_NAME:
1803 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1804 " no state");
1805 break;
1806 case PA_CONTEXT_FAILED:
1807 case PA_CONTEXT_TERMINATED:
1808 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1809 " failed");
1810 _paStateChanged = true;
1811 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1812 break;
1813 case PA_CONTEXT_READY:
1814 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1815 " ready");
1816 _paStateChanged = true;
1817 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1818 break;
1819 }
1820 }
1821
PaSinkInfoCallbackHandler(const pa_sink_info * i,int eol)1822 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i,
1823 int eol)
1824 {
1825 if (eol)
1826 {
1827 // Signal that we are done
1828 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1829 return;
1830 }
1831
1832 if (_numPlayDevices == _deviceIndex)
1833 {
1834 // Convert the device index to the one of the sink
1835 _paDeviceIndex = i->index;
1836
1837 if (_playDeviceName)
1838 {
1839 // Copy the sink name
1840 strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
1841 _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1842 }
1843 if (_playDisplayDeviceName)
1844 {
1845 // Copy the sink display name
1846 strncpy(_playDisplayDeviceName, i->description,
1847 kAdmMaxDeviceNameSize);
1848 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1849 }
1850 }
1851
1852 _numPlayDevices++;
1853 }
1854
PaSourceInfoCallbackHandler(const pa_source_info * i,int eol)1855 void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(
1856 const pa_source_info *i,
1857 int eol)
1858 {
1859 if (eol)
1860 {
1861 // Signal that we are done
1862 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1863 return;
1864 }
1865
1866 // We don't want to list output devices
1867 if (i->monitor_of_sink == PA_INVALID_INDEX)
1868 {
1869 if (_numRecDevices == _deviceIndex)
1870 {
1871 // Convert the device index to the one of the source
1872 _paDeviceIndex = i->index;
1873
1874 if (_recDeviceName)
1875 {
1876 // copy the source name
1877 strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
1878 _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1879 }
1880 if (_recDisplayDeviceName)
1881 {
1882 // Copy the source display name
1883 strncpy(_recDisplayDeviceName, i->description,
1884 kAdmMaxDeviceNameSize);
1885 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1886 }
1887 }
1888
1889 _numRecDevices++;
1890 }
1891 }
1892
PaServerInfoCallbackHandler(const pa_server_info * i)1893 void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(const pa_server_info *i)
1894 {
1895 // Use PA native sampling rate
1896 sample_rate_hz_ = i->sample_spec.rate;
1897
1898 // Copy the PA server version
1899 strncpy(_paServerVersion, i->server_version, 31);
1900 _paServerVersion[31] = '\0';
1901
1902 if (_recDisplayDeviceName)
1903 {
1904 // Copy the source name
1905 strncpy(_recDisplayDeviceName, i->default_source_name,
1906 kAdmMaxDeviceNameSize);
1907 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1908 }
1909
1910 if (_playDisplayDeviceName)
1911 {
1912 // Copy the sink name
1913 strncpy(_playDisplayDeviceName, i->default_sink_name,
1914 kAdmMaxDeviceNameSize);
1915 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1916 }
1917
1918 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1919 }
1920
PaStreamStateCallbackHandler(pa_stream * p)1921 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p)
1922 {
1923 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1924 " stream state cb");
1925
1926 pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1927 switch (state)
1928 {
1929 case PA_STREAM_UNCONNECTED:
1930 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1931 " unconnected");
1932 break;
1933 case PA_STREAM_CREATING:
1934 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1935 " creating");
1936 break;
1937 case PA_STREAM_FAILED:
1938 case PA_STREAM_TERMINATED:
1939 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1940 " failed");
1941 break;
1942 case PA_STREAM_READY:
1943 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1944 " ready");
1945 break;
1946 }
1947
1948 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1949 }
1950
CheckPulseAudioVersion()1951 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
1952 {
1953 PaLock();
1954
1955 pa_operation* paOperation = NULL;
1956
1957 // get the server info and update deviceName
1958 paOperation = LATE(pa_context_get_server_info)(_paContext,
1959 PaServerInfoCallback, this);
1960
1961 WaitForOperationCompletion(paOperation);
1962
1963 PaUnLock();
1964
1965 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
1966 " checking PulseAudio version: %s", _paServerVersion);
1967
1968 return 0;
1969 }
1970
InitSamplingFrequency()1971 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency()
1972 {
1973 PaLock();
1974
1975 pa_operation* paOperation = NULL;
1976
1977 // Get the server info and update sample_rate_hz_
1978 paOperation = LATE(pa_context_get_server_info)(_paContext,
1979 PaServerInfoCallback, this);
1980
1981 WaitForOperationCompletion(paOperation);
1982
1983 PaUnLock();
1984
1985 return 0;
1986 }
1987
GetDefaultDeviceInfo(bool recDevice,char * name,uint16_t & index)1988 int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
1989 char* name,
1990 uint16_t& index)
1991 {
1992 char tmpName[kAdmMaxDeviceNameSize] = {0};
1993 // subtract length of "default: "
1994 uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
1995 char* pName = NULL;
1996
1997 if (name)
1998 {
1999 // Add "default: "
2000 strcpy(name, "default: ");
2001 pName = &name[9];
2002 }
2003
2004 // Tell the callback that we want
2005 // the name for this device
2006 if (recDevice)
2007 {
2008 _recDisplayDeviceName = tmpName;
2009 } else
2010 {
2011 _playDisplayDeviceName = tmpName;
2012 }
2013
2014 // Set members
2015 _paDeviceIndex = -1;
2016 _deviceIndex = 0;
2017 _numPlayDevices = 0;
2018 _numRecDevices = 0;
2019
2020 PaLock();
2021
2022 pa_operation* paOperation = NULL;
2023
2024 // Get the server info and update deviceName
2025 paOperation = LATE(pa_context_get_server_info)(_paContext,
2026 PaServerInfoCallback, this);
2027
2028 WaitForOperationCompletion(paOperation);
2029
2030 // Get the device index
2031 if (recDevice)
2032 {
2033 paOperation
2034 = LATE(pa_context_get_source_info_by_name)(_paContext,
2035 (char *) tmpName,
2036 PaSourceInfoCallback,
2037 this);
2038 } else
2039 {
2040 paOperation
2041 = LATE(pa_context_get_sink_info_by_name)(_paContext,
2042 (char *) tmpName,
2043 PaSinkInfoCallback, this);
2044 }
2045
2046 WaitForOperationCompletion(paOperation);
2047
2048 PaUnLock();
2049
2050 // Set the index
2051 index = _paDeviceIndex;
2052
2053 if (name)
2054 {
2055 // Copy to name string
2056 strncpy(pName, tmpName, nameLen);
2057 }
2058
2059 // Clear members
2060 _playDisplayDeviceName = NULL;
2061 _recDisplayDeviceName = NULL;
2062 _paDeviceIndex = -1;
2063 _deviceIndex = -1;
2064 _numPlayDevices = 0;
2065 _numRecDevices = 0;
2066
2067 return 0;
2068 }
2069
InitPulseAudio()2070 int32_t AudioDeviceLinuxPulse::InitPulseAudio()
2071 {
2072 int retVal = 0;
2073
2074 // Load libpulse
2075 if (!PaSymbolTable.Load())
2076 {
2077 // Most likely the Pulse library and sound server are not installed on
2078 // this system
2079 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2080 " failed to load symbol table");
2081 return -1;
2082 }
2083
2084 // Create a mainloop API and connection to the default server
2085 // the mainloop is the internal asynchronous API event loop
2086 if (_paMainloop) {
2087 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2088 " PA mainloop has already existed");
2089 return -1;
2090 }
2091 _paMainloop = LATE(pa_threaded_mainloop_new)();
2092 if (!_paMainloop)
2093 {
2094 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2095 " could not create mainloop");
2096 return -1;
2097 }
2098
2099 // Start the threaded main loop
2100 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
2101 if (retVal != PA_OK)
2102 {
2103 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2104 " failed to start main loop, error=%d", retVal);
2105 return -1;
2106 }
2107
2108 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2109 " mainloop running!");
2110
2111 PaLock();
2112
2113 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
2114 if (!_paMainloopApi)
2115 {
2116 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2117 " could not create mainloop API");
2118 PaUnLock();
2119 return -1;
2120 }
2121
2122 // Create a new PulseAudio context
2123 if (_paContext){
2124 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2125 " PA context has already existed");
2126 PaUnLock();
2127 return -1;
2128 }
2129 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
2130
2131 if (!_paContext)
2132 {
2133 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2134 " could not create context");
2135 PaUnLock();
2136 return -1;
2137 }
2138
2139 // Set state callback function
2140 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback,
2141 this);
2142
2143 // Connect the context to a server (default)
2144 _paStateChanged = false;
2145 retVal = LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN,
2146 NULL);
2147
2148 if (retVal != PA_OK)
2149 {
2150 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2151 " failed to connect context, error=%d", retVal);
2152 PaUnLock();
2153 return -1;
2154 }
2155
2156 // Wait for state change
2157 while (!_paStateChanged)
2158 {
2159 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2160 }
2161
2162 // Now check to see what final state we reached.
2163 pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
2164
2165 if (state != PA_CONTEXT_READY)
2166 {
2167 if (state == PA_CONTEXT_FAILED)
2168 {
2169 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2170 " failed to connect to PulseAudio sound server");
2171 } else if (state == PA_CONTEXT_TERMINATED)
2172 {
2173 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2174 " PulseAudio connection terminated early");
2175 } else
2176 {
2177 // Shouldn't happen, because we only signal on one of those three
2178 // states
2179 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2180 " unknown problem connecting to PulseAudio");
2181 }
2182 PaUnLock();
2183 return -1;
2184 }
2185
2186 PaUnLock();
2187
2188 // Give the objects to the mixer manager
2189 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
2190
2191 // Check the version
2192 if (CheckPulseAudioVersion() < 0)
2193 {
2194 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2195 " PulseAudio version %s not supported", _paServerVersion);
2196 return -1;
2197 }
2198
2199 // Initialize sampling frequency
2200 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0)
2201 {
2202 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2203 " failed to initialize sampling frequency, set to %d Hz",
2204 sample_rate_hz_);
2205 return -1;
2206 }
2207
2208 return 0;
2209 }
2210
TerminatePulseAudio()2211 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio()
2212 {
2213 // Do nothing if the instance doesn't exist
2214 // likely PaSymbolTable.Load() fails
2215 if (!_paMainloop) {
2216 return 0;
2217 }
2218
2219 PaLock();
2220
2221 // Disconnect the context
2222 if (_paContext)
2223 {
2224 LATE(pa_context_disconnect)(_paContext);
2225 }
2226
2227 // Unreference the context
2228 if (_paContext)
2229 {
2230 LATE(pa_context_unref)(_paContext);
2231 }
2232
2233 PaUnLock();
2234 _paContext = NULL;
2235
2236 // Stop the threaded main loop
2237 if (_paMainloop)
2238 {
2239 LATE(pa_threaded_mainloop_stop)(_paMainloop);
2240 }
2241
2242 // Free the mainloop
2243 if (_paMainloop)
2244 {
2245 LATE(pa_threaded_mainloop_free)(_paMainloop);
2246 }
2247
2248 _paMainloop = NULL;
2249
2250 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2251 " PulseAudio terminated");
2252
2253 return 0;
2254 }
2255
PaLock()2256 void AudioDeviceLinuxPulse::PaLock()
2257 {
2258 LATE(pa_threaded_mainloop_lock)(_paMainloop);
2259 }
2260
PaUnLock()2261 void AudioDeviceLinuxPulse::PaUnLock()
2262 {
2263 LATE(pa_threaded_mainloop_unlock)(_paMainloop);
2264 }
2265
WaitForOperationCompletion(pa_operation * paOperation) const2266 void AudioDeviceLinuxPulse::WaitForOperationCompletion(
2267 pa_operation* paOperation) const
2268 {
2269 if (!paOperation)
2270 {
2271 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2272 "paOperation NULL in WaitForOperationCompletion");
2273 return;
2274 }
2275
2276 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
2277 {
2278 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2279 }
2280
2281 LATE(pa_operation_unref)(paOperation);
2282 }
2283
2284 // ============================================================================
2285 // Thread Methods
2286 // ============================================================================
2287
EnableWriteCallback()2288 void AudioDeviceLinuxPulse::EnableWriteCallback()
2289 {
2290 if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY)
2291 {
2292 // May already have available space. Must check.
2293 _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
2294 if (_tempBufferSpace > 0)
2295 {
2296 // Yup, there is already space available, so if we register a write
2297 // callback then it will not receive any event. So dispatch one ourself
2298 // instead
2299 _timeEventPlay.Set();
2300 return;
2301 }
2302 }
2303
2304 LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback,
2305 this);
2306 }
2307
DisableWriteCallback()2308 void AudioDeviceLinuxPulse::DisableWriteCallback()
2309 {
2310 LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
2311 }
2312
PaStreamWriteCallback(pa_stream *,size_t buffer_space,void * pThis)2313 void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/,
2314 size_t buffer_space,
2315 void *pThis)
2316 {
2317 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler(
2318 buffer_space);
2319 }
2320
PaStreamWriteCallbackHandler(size_t bufferSpace)2321 void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace)
2322 {
2323 _tempBufferSpace = bufferSpace;
2324
2325 // Since we write the data asynchronously on a different thread, we have
2326 // to temporarily disable the write callback or else Pulse will call it
2327 // continuously until we write the data. We re-enable it below.
2328 DisableWriteCallback();
2329 _timeEventPlay.Set();
2330 }
2331
PaStreamUnderflowCallback(pa_stream *,void * pThis)2332 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/,
2333 void *pThis)
2334 {
2335 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamUnderflowCallbackHandler();
2336 }
2337
PaStreamUnderflowCallbackHandler()2338 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler()
2339 {
2340 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2341 " Playout underflow");
2342
2343 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
2344 {
2345 // We didn't configure a pa_buffer_attr before, so switching to one now
2346 // would be questionable.
2347 return;
2348 }
2349
2350 // Otherwise reconfigure the stream with a higher target latency.
2351
2352 const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream);
2353 if (!spec)
2354 {
2355 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2356 " pa_stream_get_sample_spec()");
2357 return;
2358 }
2359
2360 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2361 uint32_t newLatency = _configuredLatencyPlay + bytesPerSec
2362 * WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / WEBRTC_PA_MSECS_PER_SEC;
2363
2364 // Set the play buffer attributes
2365 _playBufferAttr.maxlength = newLatency;
2366 _playBufferAttr.tlength = newLatency;
2367 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2368 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2369
2370 pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream,
2371 &_playBufferAttr, NULL,
2372 NULL);
2373 if (!op)
2374 {
2375 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2376 " pa_stream_set_buffer_attr()");
2377 return;
2378 }
2379
2380 // Don't need to wait for this to complete.
2381 LATE(pa_operation_unref)(op);
2382
2383 // Save the new latency in case we underflow again.
2384 _configuredLatencyPlay = newLatency;
2385 }
2386
EnableReadCallback()2387 void AudioDeviceLinuxPulse::EnableReadCallback()
2388 {
2389 LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this);
2390 }
2391
DisableReadCallback()2392 void AudioDeviceLinuxPulse::DisableReadCallback()
2393 {
2394 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2395 }
2396
PaStreamReadCallback(pa_stream *,size_t,void * pThis)2397 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/,
2398 size_t /*unused2*/,
2399 void *pThis)
2400 {
2401 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamReadCallbackHandler();
2402 }
2403
PaStreamReadCallbackHandler()2404 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler()
2405 {
2406 // We get the data pointer and size now in order to save one Lock/Unlock
2407 // in the worker thread
2408 if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, &_tempSampleDataSize)
2409 != 0)
2410 {
2411 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2412 " Can't read data!");
2413 return;
2414 }
2415
2416 // Since we consume the data asynchronously on a different thread, we have
2417 // to temporarily disable the read callback or else Pulse will call it
2418 // continuously until we consume the data. We re-enable it below
2419 DisableReadCallback();
2420 _timeEventRec.Set();
2421 }
2422
PaStreamOverflowCallback(pa_stream *,void * pThis)2423 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/,
2424 void *pThis)
2425 {
2426 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamOverflowCallbackHandler();
2427 }
2428
PaStreamOverflowCallbackHandler()2429 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler()
2430 {
2431 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2432 " Recording overflow");
2433 }
2434
LatencyUsecs(pa_stream * stream)2435 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream)
2436 {
2437 if (!WEBRTC_PA_REPORT_LATENCY)
2438 {
2439 return 0;
2440 }
2441
2442 if (!stream)
2443 {
2444 return 0;
2445 }
2446
2447 pa_usec_t latency;
2448 int negative;
2449 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0)
2450 {
2451 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2452 " Can't query latency");
2453 // We'd rather continue playout/capture with an incorrect delay than stop
2454 // it altogether, so return a valid value.
2455 return 0;
2456 }
2457
2458 if (negative)
2459 {
2460 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2461 " warning: pa_stream_get_latency reported negative delay");
2462
2463 // The delay can be negative for monitoring streams if the captured
2464 // samples haven't been played yet. In such a case, "latency" contains the
2465 // magnitude, so we must negate it to get the real value.
2466 int32_t tmpLatency = (int32_t) -latency;
2467 if (tmpLatency < 0)
2468 {
2469 // Make sure that we don't use a negative delay
2470 tmpLatency = 0;
2471 }
2472
2473 return tmpLatency;
2474 } else
2475 {
2476 return (int32_t) latency;
2477 }
2478 }
2479
ReadRecordedData(const void * bufferData,size_t bufferSize)2480 int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData,
2481 size_t bufferSize)
2482 {
2483 size_t size = bufferSize;
2484 uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
2485
2486 // Account for the peeked data and the used data
2487 uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream)
2488 / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize));
2489
2490 _sndCardRecDelay = recDelay;
2491
2492 if (_playStream)
2493 {
2494 // Get the playout delay
2495 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000);
2496 }
2497
2498 if (_recordBufferUsed > 0)
2499 {
2500 // Have to copy to the buffer until it is full
2501 size_t copy = _recordBufferSize - _recordBufferUsed;
2502 if (size < copy)
2503 {
2504 copy = size;
2505 }
2506
2507 memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
2508 _recordBufferUsed += copy;
2509 bufferData = static_cast<const char *> (bufferData) + copy;
2510 size -= copy;
2511
2512 if (_recordBufferUsed != _recordBufferSize)
2513 {
2514 // Not enough data yet to pass to VoE
2515 return 0;
2516 }
2517
2518 // Provide data to VoiceEngine
2519 if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1)
2520 {
2521 // We have stopped recording
2522 return -1;
2523 }
2524
2525 _recordBufferUsed = 0;
2526 }
2527
2528 // Now process full 10ms sample sets directly from the input
2529 while (size >= _recordBufferSize)
2530 {
2531 // Provide data to VoiceEngine
2532 if (ProcessRecordedData(
2533 static_cast<int8_t *> (const_cast<void *> (bufferData)),
2534 numRecSamples, recDelay) == -1)
2535 {
2536 // We have stopped recording
2537 return -1;
2538 }
2539
2540 bufferData = static_cast<const char *> (bufferData) + _recordBufferSize;
2541 size -= _recordBufferSize;
2542
2543 // We have consumed 10ms of data
2544 recDelay -= 10;
2545 }
2546
2547 // Now save any leftovers for later.
2548 if (size > 0)
2549 {
2550 memcpy(_recBuffer, bufferData, size);
2551 _recordBufferUsed = size;
2552 }
2553
2554 return 0;
2555 }
2556
ProcessRecordedData(int8_t * bufferData,uint32_t bufferSizeInSamples,uint32_t recDelay)2557 int32_t AudioDeviceLinuxPulse::ProcessRecordedData(
2558 int8_t *bufferData,
2559 uint32_t bufferSizeInSamples,
2560 uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2561 {
2562 uint32_t currentMicLevel(0);
2563 uint32_t newMicLevel(0);
2564
2565 _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
2566
2567 if (AGC())
2568 {
2569 // Store current mic level in the audio buffer if AGC is enabled
2570 if (MicrophoneVolume(currentMicLevel) == 0)
2571 {
2572 // This call does not affect the actual microphone volume
2573 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2574 }
2575 }
2576
2577 const uint32_t clockDrift(0);
2578 // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
2579 // near-end signals at the AEC for PulseAudio. I think the system delay is
2580 // being correctly calculated here, but for legacy reasons we add +10 ms to
2581 // the value in the AEC. The real fix will be part of a larger investigation
2582 // into managing system delay in the AEC.
2583 if (recDelay > 10)
2584 recDelay -= 10;
2585 else
2586 recDelay = 0;
2587 _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift);
2588 _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2589 // Deliver recorded samples at specified sample rate,
2590 // mic level etc. to the observer using callback
2591 UnLock();
2592 _ptrAudioBuffer->DeliverRecordedData();
2593 Lock();
2594
2595 // We have been unlocked - check the flag again
2596 if (!_recording)
2597 {
2598 return -1;
2599 }
2600
2601 if (AGC())
2602 {
2603 newMicLevel = _ptrAudioBuffer->NewMicLevel();
2604 if (newMicLevel != 0)
2605 {
2606 // The VQE will only deliver non-zero microphone levels when a
2607 // change is needed.
2608 // Set this new mic level (received from the observer as return
2609 // value in the callback).
2610 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2611 " AGC change of volume: old=%u => new=%u",
2612 currentMicLevel, newMicLevel);
2613 if (SetMicrophoneVolume(newMicLevel) == -1)
2614 {
2615 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2616 _id,
2617 " the required modification of the microphone "
2618 "volume failed");
2619 }
2620 }
2621 }
2622
2623 return 0;
2624 }
2625
PlayThreadFunc(void * pThis)2626 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis)
2627 {
2628 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess());
2629 }
2630
RecThreadFunc(void * pThis)2631 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis)
2632 {
2633 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess());
2634 }
2635
PlayThreadProcess()2636 bool AudioDeviceLinuxPulse::PlayThreadProcess()
2637 {
2638 switch (_timeEventPlay.Wait(1000))
2639 {
2640 case kEventSignaled:
2641 _timeEventPlay.Reset();
2642 break;
2643 case kEventError:
2644 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2645 "EventWrapper::Wait() failed");
2646 return true;
2647 case kEventTimeout:
2648 return true;
2649 }
2650
2651 Lock();
2652
2653 if (_startPlay)
2654 {
2655 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2656 "_startPlay true, performing initial actions");
2657
2658 _startPlay = false;
2659 _playDeviceName = NULL;
2660
2661 // Set if not default device
2662 if (_outputDeviceIndex > 0)
2663 {
2664 // Get the playout device name
2665 _playDeviceName = new char[kAdmMaxDeviceNameSize];
2666 _deviceIndex = _outputDeviceIndex;
2667 PlayoutDevices();
2668 }
2669
2670 // Start muted only supported on 0.9.11 and up
2671 if (LATE(pa_context_get_protocol_version)(_paContext)
2672 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
2673 {
2674 // Get the currently saved speaker mute status
2675 // and set the initial mute status accordingly
2676 bool enabled(false);
2677 _mixerManager.SpeakerMute(enabled);
2678 if (enabled)
2679 {
2680 _playStreamFlags |= PA_STREAM_START_MUTED;
2681 }
2682 }
2683
2684 // Get the currently saved speaker volume
2685 uint32_t volume = 0;
2686 if (update_speaker_volume_at_startup_)
2687 _mixerManager.SpeakerVolume(volume);
2688
2689 PaLock();
2690
2691 // NULL gives PA the choice of startup volume.
2692 pa_cvolume* ptr_cvolume = NULL;
2693 if (update_speaker_volume_at_startup_) {
2694 pa_cvolume cVolumes;
2695 ptr_cvolume = &cVolumes;
2696
2697 // Set the same volume for all channels
2698 const pa_sample_spec *spec =
2699 LATE(pa_stream_get_sample_spec)(_playStream);
2700 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2701 update_speaker_volume_at_startup_ = false;
2702 }
2703
2704 // Connect the stream to a sink
2705 if (LATE(pa_stream_connect_playback)(
2706 _playStream,
2707 _playDeviceName,
2708 &_playBufferAttr,
2709 (pa_stream_flags_t) _playStreamFlags,
2710 ptr_cvolume, NULL) != PA_OK)
2711 {
2712 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2713 " failed to connect play stream, err=%d",
2714 LATE(pa_context_errno)(_paContext));
2715 }
2716
2717 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2718 " play stream connected");
2719
2720 // Wait for state change
2721 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY)
2722 {
2723 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2724 }
2725
2726 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2727 " play stream ready");
2728
2729 // We can now handle write callbacks
2730 EnableWriteCallback();
2731
2732 PaUnLock();
2733
2734 // Clear device name
2735 if (_playDeviceName)
2736 {
2737 delete [] _playDeviceName;
2738 _playDeviceName = NULL;
2739 }
2740
2741 _playing = true;
2742 _playStartEvent.Set();
2743
2744 UnLock();
2745 return true;
2746 }
2747
2748 if (_playing)
2749 {
2750 if (!_recording)
2751 {
2752 // Update the playout delay
2753 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream)
2754 / 1000);
2755 }
2756
2757 if (_playbackBufferUnused < _playbackBufferSize)
2758 {
2759
2760 size_t write = _playbackBufferSize - _playbackBufferUnused;
2761 if (_tempBufferSpace < write)
2762 {
2763 write = _tempBufferSpace;
2764 }
2765
2766 PaLock();
2767 if (LATE(pa_stream_write)(
2768 _playStream,
2769 (void *) &_playBuffer[_playbackBufferUnused],
2770 write, NULL, (int64_t) 0,
2771 PA_SEEK_RELATIVE) != PA_OK)
2772 {
2773 _writeErrors++;
2774 if (_writeErrors > 10)
2775 {
2776 if (_playError == 1)
2777 {
2778 WEBRTC_TRACE(kTraceWarning,
2779 kTraceUtility, _id,
2780 " pending playout error exists");
2781 }
2782 _playError = 1; // Triggers callback from module process thread
2783 WEBRTC_TRACE(
2784 kTraceError,
2785 kTraceUtility,
2786 _id,
2787 " kPlayoutError message posted: "
2788 "_writeErrors=%u, error=%d",
2789 _writeErrors,
2790 LATE(pa_context_errno)(_paContext));
2791 _writeErrors = 0;
2792 }
2793 }
2794 PaUnLock();
2795
2796 _playbackBufferUnused += write;
2797 _tempBufferSpace -= write;
2798 }
2799
2800 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2801 if (_tempBufferSpace > 0) // Might have been reduced to zero by the above
2802 {
2803 // Ask for new PCM data to be played out using the AudioDeviceBuffer
2804 // ensure that this callback is executed without taking the
2805 // audio-thread lock
2806 UnLock();
2807 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2808 " requesting data");
2809 uint32_t nSamples =
2810 _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2811 Lock();
2812
2813 // We have been unlocked - check the flag again
2814 if (!_playing)
2815 {
2816 UnLock();
2817 return true;
2818 }
2819
2820 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2821 if (nSamples != numPlaySamples)
2822 {
2823 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2824 _id, " invalid number of output samples(%d)",
2825 nSamples);
2826 }
2827
2828 size_t write = _playbackBufferSize;
2829 if (_tempBufferSpace < write)
2830 {
2831 write = _tempBufferSpace;
2832 }
2833
2834 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2835 " will write");
2836 PaLock();
2837 if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0],
2838 write, NULL, (int64_t) 0,
2839 PA_SEEK_RELATIVE) != PA_OK)
2840 {
2841 _writeErrors++;
2842 if (_writeErrors > 10)
2843 {
2844 if (_playError == 1)
2845 {
2846 WEBRTC_TRACE(kTraceWarning,
2847 kTraceUtility, _id,
2848 " pending playout error exists");
2849 }
2850 _playError = 1; // triggers callback from module process thread
2851 WEBRTC_TRACE(
2852 kTraceError,
2853 kTraceUtility,
2854 _id,
2855 " kPlayoutError message posted: "
2856 "_writeErrors=%u, error=%d",
2857 _writeErrors,
2858 LATE(pa_context_errno)(_paContext));
2859 _writeErrors = 0;
2860 }
2861 }
2862 PaUnLock();
2863
2864 _playbackBufferUnused = write;
2865 }
2866
2867 _tempBufferSpace = 0;
2868 PaLock();
2869 EnableWriteCallback();
2870 PaUnLock();
2871
2872 } // _playing
2873
2874 UnLock();
2875 return true;
2876 }
2877
RecThreadProcess()2878 bool AudioDeviceLinuxPulse::RecThreadProcess()
2879 {
2880 switch (_timeEventRec.Wait(1000))
2881 {
2882 case kEventSignaled:
2883 _timeEventRec.Reset();
2884 break;
2885 case kEventError:
2886 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2887 "EventWrapper::Wait() failed");
2888 return true;
2889 case kEventTimeout:
2890 return true;
2891 }
2892
2893 Lock();
2894
2895 if (_startRec)
2896 {
2897 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2898 "_startRec true, performing initial actions");
2899
2900 _recDeviceName = NULL;
2901
2902 // Set if not default device
2903 if (_inputDeviceIndex > 0)
2904 {
2905 // Get the recording device name
2906 _recDeviceName = new char[kAdmMaxDeviceNameSize];
2907 _deviceIndex = _inputDeviceIndex;
2908 RecordingDevices();
2909 }
2910
2911 PaLock();
2912
2913 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2914 " connecting stream");
2915
2916 // Connect the stream to a source
2917 if (LATE(pa_stream_connect_record)(_recStream, _recDeviceName,
2918 &_recBufferAttr,
2919 (pa_stream_flags_t) _recStreamFlags)
2920 != PA_OK)
2921 {
2922 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2923 " failed to connect rec stream, err=%d",
2924 LATE(pa_context_errno)(_paContext));
2925 }
2926
2927 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2928 " connected");
2929
2930 // Wait for state change
2931 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY)
2932 {
2933 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2934 }
2935
2936 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2937 " done");
2938
2939 // We can now handle read callbacks
2940 EnableReadCallback();
2941
2942 PaUnLock();
2943
2944 // Clear device name
2945 if (_recDeviceName)
2946 {
2947 delete [] _recDeviceName;
2948 _recDeviceName = NULL;
2949 }
2950
2951 _startRec = false;
2952 _recording = true;
2953 _recStartEvent.Set();
2954
2955 UnLock();
2956 return true;
2957 }
2958
2959 if (_recording)
2960 {
2961 // Read data and provide it to VoiceEngine
2962 if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1)
2963 {
2964 UnLock();
2965 return true;
2966 }
2967
2968 _tempSampleData = NULL;
2969 _tempSampleDataSize = 0;
2970
2971 PaLock();
2972 while (true)
2973 {
2974 // Ack the last thing we read
2975 if (LATE(pa_stream_drop)(_recStream) != 0)
2976 {
2977 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2978 _id, " failed to drop, err=%d\n",
2979 LATE(pa_context_errno)(_paContext));
2980 }
2981
2982 if (LATE(pa_stream_readable_size)(_recStream) <= 0)
2983 {
2984 // Then that was all the data
2985 break;
2986 }
2987
2988 // Else more data.
2989 const void *sampleData;
2990 size_t sampleDataSize;
2991
2992 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize)
2993 != 0)
2994 {
2995 _recError = 1; // triggers callback from module process thread
2996 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2997 _id, " RECORD_ERROR message posted, error = %d",
2998 LATE(pa_context_errno)(_paContext));
2999 break;
3000 }
3001
3002 _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream)
3003 / 1000);
3004
3005 // Drop lock for sigslot dispatch, which could take a while.
3006 PaUnLock();
3007 // Read data and provide it to VoiceEngine
3008 if (ReadRecordedData(sampleData, sampleDataSize) == -1)
3009 {
3010 UnLock();
3011 return true;
3012 }
3013 PaLock();
3014
3015 // Return to top of loop for the ack and the check for more data.
3016 }
3017
3018 EnableReadCallback();
3019 PaUnLock();
3020
3021 } // _recording
3022
3023 UnLock();
3024 return true;
3025 }
3026
KeyPressed() const3027 bool AudioDeviceLinuxPulse::KeyPressed() const{
3028
3029 char szKey[32];
3030 unsigned int i = 0;
3031 char state = 0;
3032
3033 if (!_XDisplay)
3034 return false;
3035
3036 // Check key map status
3037 XQueryKeymap(_XDisplay, szKey);
3038
3039 // A bit change in keymap means a key is pressed
3040 for (i = 0; i < sizeof(szKey); i++)
3041 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
3042
3043 // Save old state
3044 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
3045 return (state != 0);
3046 }
3047 }
3048