1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/modules/audio_device/audio_device_config.h"
12 #include "webrtc/modules/audio_device/win/audio_device_wave_win.h"
13
14 #include "webrtc/system_wrappers/include/event_wrapper.h"
15 #include "webrtc/system_wrappers/include/tick_util.h"
16 #include "webrtc/system_wrappers/include/trace.h"
17
18 #include <windows.h>
19 #include <objbase.h> // CoTaskMemAlloc, CoTaskMemFree
20 #include <strsafe.h> // StringCchCopy(), StringCchCat(), StringCchPrintf()
21 #include <assert.h>
22
23 // Avoids the need of Windows 7 SDK
24 #ifndef WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE
25 #define WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE 0x0010
26 #endif
27
28 // Supported in Windows Vista and Windows 7.
29 // http://msdn.microsoft.com/en-us/library/dd370819(v=VS.85).aspx
30 // Taken from Mmddk.h.
31 #define DRV_RESERVED 0x0800
32 #define DRV_QUERYFUNCTIONINSTANCEID (DRV_RESERVED + 17)
33 #define DRV_QUERYFUNCTIONINSTANCEIDSIZE (DRV_RESERVED + 18)
34
35 #define POW2(A) (2 << ((A) - 1))
36
37 namespace webrtc {
38
39 // ============================================================================
40 // Construction & Destruction
41 // ============================================================================
42
43 // ----------------------------------------------------------------------------
44 // AudioDeviceWindowsWave - ctor
45 // ----------------------------------------------------------------------------
46
AudioDeviceWindowsWave(const int32_t id)47 AudioDeviceWindowsWave::AudioDeviceWindowsWave(const int32_t id) :
48 _ptrAudioBuffer(NULL),
49 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
50 _timeEvent(*EventTimerWrapper::Create()),
51 _recStartEvent(*EventWrapper::Create()),
52 _playStartEvent(*EventWrapper::Create()),
53 _hGetCaptureVolumeThread(NULL),
54 _hShutdownGetVolumeEvent(NULL),
55 _hSetCaptureVolumeThread(NULL),
56 _hShutdownSetVolumeEvent(NULL),
57 _hSetCaptureVolumeEvent(NULL),
58 _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()),
59 _id(id),
60 _mixerManager(id),
61 _usingInputDeviceIndex(false),
62 _usingOutputDeviceIndex(false),
63 _inputDevice(AudioDeviceModule::kDefaultDevice),
64 _outputDevice(AudioDeviceModule::kDefaultDevice),
65 _inputDeviceIndex(0),
66 _outputDeviceIndex(0),
67 _inputDeviceIsSpecified(false),
68 _outputDeviceIsSpecified(false),
69 _initialized(false),
70 _recIsInitialized(false),
71 _playIsInitialized(false),
72 _recording(false),
73 _playing(false),
74 _startRec(false),
75 _stopRec(false),
76 _startPlay(false),
77 _stopPlay(false),
78 _AGC(false),
79 _hWaveIn(NULL),
80 _hWaveOut(NULL),
81 _recChannels(N_REC_CHANNELS),
82 _playChannels(N_PLAY_CHANNELS),
83 _recBufCount(0),
84 _recPutBackDelay(0),
85 _recDelayCount(0),
86 _playBufCount(0),
87 _prevPlayTime(0),
88 _prevRecTime(0),
89 _prevTimerCheckTime(0),
90 _timesdwBytes(0),
91 _timerFaults(0),
92 _timerRestartAttempts(0),
93 _no_of_msecleft_warnings(0),
94 _MAX_minBuffer(65),
95 _useHeader(0),
96 _dTcheckPlayBufDelay(10),
97 _playBufDelay(80),
98 _playBufDelayFixed(80),
99 _minPlayBufDelay(20),
100 _avgCPULoad(0),
101 _sndCardPlayDelay(0),
102 _sndCardRecDelay(0),
103 _plSampOld(0),
104 _rcSampOld(0),
105 _playBufType(AudioDeviceModule::kAdaptiveBufferSize),
106 _recordedBytes(0),
107 _playWarning(0),
108 _playError(0),
109 _recWarning(0),
110 _recError(0),
111 _newMicLevel(0),
112 _minMicVolume(0),
113 _maxMicVolume(0)
114 {
115 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
116
117 // Initialize value, set to 0 if it fails
118 if (!QueryPerformanceFrequency(&_perfFreq))
119 {
120 _perfFreq.QuadPart = 0;
121 }
122
123 _hShutdownGetVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
124 _hShutdownSetVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
125 _hSetCaptureVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
126 }
127
128 // ----------------------------------------------------------------------------
129 // AudioDeviceWindowsWave - dtor
130 // ----------------------------------------------------------------------------
131
~AudioDeviceWindowsWave()132 AudioDeviceWindowsWave::~AudioDeviceWindowsWave()
133 {
134 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTION__);
135
136 Terminate();
137
138 delete &_recStartEvent;
139 delete &_playStartEvent;
140 delete &_timeEvent;
141 delete &_critSect;
142 delete &_critSectCb;
143
144 if (NULL != _hShutdownGetVolumeEvent)
145 {
146 CloseHandle(_hShutdownGetVolumeEvent);
147 _hShutdownGetVolumeEvent = NULL;
148 }
149
150 if (NULL != _hShutdownSetVolumeEvent)
151 {
152 CloseHandle(_hShutdownSetVolumeEvent);
153 _hShutdownSetVolumeEvent = NULL;
154 }
155
156 if (NULL != _hSetCaptureVolumeEvent)
157 {
158 CloseHandle(_hSetCaptureVolumeEvent);
159 _hSetCaptureVolumeEvent = NULL;
160 }
161 }
162
163 // ============================================================================
164 // API
165 // ============================================================================
166
167 // ----------------------------------------------------------------------------
168 // AttachAudioBuffer
169 // ----------------------------------------------------------------------------
170
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)171 void AudioDeviceWindowsWave::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
172 {
173
174 CriticalSectionScoped lock(&_critSect);
175
176 _ptrAudioBuffer = audioBuffer;
177
178 // inform the AudioBuffer about default settings for this implementation
179 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
180 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
181 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
182 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
183 }
184
185 // ----------------------------------------------------------------------------
186 // ActiveAudioLayer
187 // ----------------------------------------------------------------------------
188
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const189 int32_t AudioDeviceWindowsWave::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const
190 {
191 audioLayer = AudioDeviceModule::kWindowsWaveAudio;
192 return 0;
193 }
194
195 // ----------------------------------------------------------------------------
196 // Init
197 // ----------------------------------------------------------------------------
198
Init()199 int32_t AudioDeviceWindowsWave::Init()
200 {
201
202 CriticalSectionScoped lock(&_critSect);
203
204 if (_initialized)
205 {
206 return 0;
207 }
208
209 const uint32_t nowTime(TickTime::MillisecondTimestamp());
210
211 _recordedBytes = 0;
212 _prevRecByteCheckTime = nowTime;
213 _prevRecTime = nowTime;
214 _prevPlayTime = nowTime;
215 _prevTimerCheckTime = nowTime;
216
217 _playWarning = 0;
218 _playError = 0;
219 _recWarning = 0;
220 _recError = 0;
221
222 _mixerManager.EnumerateAll();
223
224 if (_ptrThread)
225 {
226 // thread is already created and active
227 return 0;
228 }
229
230 const char* threadName = "webrtc_audio_module_thread";
231 _ptrThread.reset(new rtc::PlatformThread(ThreadFunc, this, threadName));
232 _ptrThread->Start();
233 _ptrThread->SetPriority(rtc::kRealtimePriority);
234
235 const bool periodic(true);
236 if (!_timeEvent.StartTimer(periodic, TIMER_PERIOD_MS))
237 {
238 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
239 "failed to start the timer event");
240 _ptrThread->Stop();
241 _ptrThread.reset();
242 return -1;
243 }
244 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
245 "periodic timer (dT=%d) is now active", TIMER_PERIOD_MS);
246
247 _hGetCaptureVolumeThread =
248 CreateThread(NULL, 0, GetCaptureVolumeThread, this, 0, NULL);
249 if (_hGetCaptureVolumeThread == NULL)
250 {
251 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
252 " failed to create the volume getter thread");
253 return -1;
254 }
255
256 SetThreadPriority(_hGetCaptureVolumeThread, THREAD_PRIORITY_NORMAL);
257
258 _hSetCaptureVolumeThread =
259 CreateThread(NULL, 0, SetCaptureVolumeThread, this, 0, NULL);
260 if (_hSetCaptureVolumeThread == NULL)
261 {
262 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
263 " failed to create the volume setter thread");
264 return -1;
265 }
266
267 SetThreadPriority(_hSetCaptureVolumeThread, THREAD_PRIORITY_NORMAL);
268
269 _initialized = true;
270
271 return 0;
272 }
273
274 // ----------------------------------------------------------------------------
275 // Terminate
276 // ----------------------------------------------------------------------------
277
Terminate()278 int32_t AudioDeviceWindowsWave::Terminate()
279 {
280
281 if (!_initialized)
282 {
283 return 0;
284 }
285
286 _critSect.Enter();
287
288 _mixerManager.Close();
289
290 if (_ptrThread)
291 {
292 rtc::PlatformThread* tmpThread = _ptrThread.release();
293 _critSect.Leave();
294
295 _timeEvent.Set();
296
297 tmpThread->Stop();
298 delete tmpThread;
299 }
300 else
301 {
302 _critSect.Leave();
303 }
304
305 _critSect.Enter();
306 SetEvent(_hShutdownGetVolumeEvent);
307 _critSect.Leave();
308 int32_t ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
309 if (ret != WAIT_OBJECT_0)
310 {
311 // the thread did not stop as it should
312 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
313 " failed to close down volume getter thread");
314 CloseHandle(_hGetCaptureVolumeThread);
315 _hGetCaptureVolumeThread = NULL;
316 return -1;
317 }
318 _critSect.Enter();
319 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
320 " volume getter thread is now closed");
321
322 SetEvent(_hShutdownSetVolumeEvent);
323 _critSect.Leave();
324 ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
325 if (ret != WAIT_OBJECT_0)
326 {
327 // the thread did not stop as it should
328 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
329 " failed to close down volume setter thread");
330 CloseHandle(_hSetCaptureVolumeThread);
331 _hSetCaptureVolumeThread = NULL;
332 return -1;
333 }
334 _critSect.Enter();
335 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
336 " volume setter thread is now closed");
337
338 CloseHandle(_hGetCaptureVolumeThread);
339 _hGetCaptureVolumeThread = NULL;
340
341 CloseHandle(_hSetCaptureVolumeThread);
342 _hSetCaptureVolumeThread = NULL;
343
344 _critSect.Leave();
345
346 _timeEvent.StopTimer();
347
348 _initialized = false;
349 _outputDeviceIsSpecified = false;
350 _inputDeviceIsSpecified = false;
351
352 return 0;
353 }
354
355
GetCaptureVolumeThread(LPVOID context)356 DWORD WINAPI AudioDeviceWindowsWave::GetCaptureVolumeThread(LPVOID context)
357 {
358 return(((AudioDeviceWindowsWave*)context)->DoGetCaptureVolumeThread());
359 }
360
SetCaptureVolumeThread(LPVOID context)361 DWORD WINAPI AudioDeviceWindowsWave::SetCaptureVolumeThread(LPVOID context)
362 {
363 return(((AudioDeviceWindowsWave*)context)->DoSetCaptureVolumeThread());
364 }
365
DoGetCaptureVolumeThread()366 DWORD AudioDeviceWindowsWave::DoGetCaptureVolumeThread()
367 {
368 HANDLE waitObject = _hShutdownGetVolumeEvent;
369
370 while (1)
371 {
372 DWORD waitResult = WaitForSingleObject(waitObject,
373 GET_MIC_VOLUME_INTERVAL_MS);
374 switch (waitResult)
375 {
376 case WAIT_OBJECT_0: // _hShutdownGetVolumeEvent
377 return 0;
378 case WAIT_TIMEOUT: // timeout notification
379 break;
380 default: // unexpected error
381 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
382 " unknown wait termination on get volume thread");
383 return 1;
384 }
385
386 if (AGC())
387 {
388 uint32_t currentMicLevel = 0;
389 if (MicrophoneVolume(currentMicLevel) == 0)
390 {
391 // This doesn't set the system volume, just stores it.
392 _critSect.Enter();
393 if (_ptrAudioBuffer)
394 {
395 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
396 }
397 _critSect.Leave();
398 }
399 }
400 }
401 }
402
DoSetCaptureVolumeThread()403 DWORD AudioDeviceWindowsWave::DoSetCaptureVolumeThread()
404 {
405 HANDLE waitArray[2] = {_hShutdownSetVolumeEvent, _hSetCaptureVolumeEvent};
406
407 while (1)
408 {
409 DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
410 switch (waitResult)
411 {
412 case WAIT_OBJECT_0: // _hShutdownSetVolumeEvent
413 return 0;
414 case WAIT_OBJECT_0 + 1: // _hSetCaptureVolumeEvent
415 break;
416 default: // unexpected error
417 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
418 " unknown wait termination on set volume thread");
419 return 1;
420 }
421
422 _critSect.Enter();
423 uint32_t newMicLevel = _newMicLevel;
424 _critSect.Leave();
425
426 if (SetMicrophoneVolume(newMicLevel) == -1)
427 {
428 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
429 " the required modification of the microphone volume failed");
430 }
431 }
432 return 0;
433 }
434
435 // ----------------------------------------------------------------------------
436 // Initialized
437 // ----------------------------------------------------------------------------
438
Initialized() const439 bool AudioDeviceWindowsWave::Initialized() const
440 {
441 return (_initialized);
442 }
443
444 // ----------------------------------------------------------------------------
445 // InitSpeaker
446 // ----------------------------------------------------------------------------
447
InitSpeaker()448 int32_t AudioDeviceWindowsWave::InitSpeaker()
449 {
450
451 CriticalSectionScoped lock(&_critSect);
452
453 if (_playing)
454 {
455 return -1;
456 }
457
458 if (_mixerManager.EnumerateSpeakers() == -1)
459 {
460 // failed to locate any valid/controllable speaker
461 return -1;
462 }
463
464 if (IsUsingOutputDeviceIndex())
465 {
466 if (_mixerManager.OpenSpeaker(OutputDeviceIndex()) == -1)
467 {
468 return -1;
469 }
470 }
471 else
472 {
473 if (_mixerManager.OpenSpeaker(OutputDevice()) == -1)
474 {
475 return -1;
476 }
477 }
478
479 return 0;
480 }
481
482 // ----------------------------------------------------------------------------
483 // InitMicrophone
484 // ----------------------------------------------------------------------------
485
InitMicrophone()486 int32_t AudioDeviceWindowsWave::InitMicrophone()
487 {
488
489 CriticalSectionScoped lock(&_critSect);
490
491 if (_recording)
492 {
493 return -1;
494 }
495
496 if (_mixerManager.EnumerateMicrophones() == -1)
497 {
498 // failed to locate any valid/controllable microphone
499 return -1;
500 }
501
502 if (IsUsingInputDeviceIndex())
503 {
504 if (_mixerManager.OpenMicrophone(InputDeviceIndex()) == -1)
505 {
506 return -1;
507 }
508 }
509 else
510 {
511 if (_mixerManager.OpenMicrophone(InputDevice()) == -1)
512 {
513 return -1;
514 }
515 }
516
517 uint32_t maxVol = 0;
518 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
519 {
520 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
521 " unable to retrieve max microphone volume");
522 }
523 _maxMicVolume = maxVol;
524
525 uint32_t minVol = 0;
526 if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
527 {
528 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
529 " unable to retrieve min microphone volume");
530 }
531 _minMicVolume = minVol;
532
533 return 0;
534 }
535
536 // ----------------------------------------------------------------------------
537 // SpeakerIsInitialized
538 // ----------------------------------------------------------------------------
539
SpeakerIsInitialized() const540 bool AudioDeviceWindowsWave::SpeakerIsInitialized() const
541 {
542 return (_mixerManager.SpeakerIsInitialized());
543 }
544
545 // ----------------------------------------------------------------------------
546 // MicrophoneIsInitialized
547 // ----------------------------------------------------------------------------
548
MicrophoneIsInitialized() const549 bool AudioDeviceWindowsWave::MicrophoneIsInitialized() const
550 {
551 return (_mixerManager.MicrophoneIsInitialized());
552 }
553
554 // ----------------------------------------------------------------------------
555 // SpeakerVolumeIsAvailable
556 // ----------------------------------------------------------------------------
557
SpeakerVolumeIsAvailable(bool & available)558 int32_t AudioDeviceWindowsWave::SpeakerVolumeIsAvailable(bool& available)
559 {
560
561 bool isAvailable(false);
562
563 // Enumerate all avaliable speakers and make an attempt to open up the
564 // output mixer corresponding to the currently selected output device.
565 //
566 if (InitSpeaker() == -1)
567 {
568 // failed to find a valid speaker
569 available = false;
570 return 0;
571 }
572
573 // Check if the selected speaker has a volume control
574 //
575 _mixerManager.SpeakerVolumeIsAvailable(isAvailable);
576 available = isAvailable;
577
578 // Close the initialized output mixer
579 //
580 _mixerManager.CloseSpeaker();
581
582 return 0;
583 }
584
585 // ----------------------------------------------------------------------------
586 // SetSpeakerVolume
587 // ----------------------------------------------------------------------------
588
SetSpeakerVolume(uint32_t volume)589 int32_t AudioDeviceWindowsWave::SetSpeakerVolume(uint32_t volume)
590 {
591
592 return (_mixerManager.SetSpeakerVolume(volume));
593 }
594
595 // ----------------------------------------------------------------------------
596 // SpeakerVolume
597 // ----------------------------------------------------------------------------
598
SpeakerVolume(uint32_t & volume) const599 int32_t AudioDeviceWindowsWave::SpeakerVolume(uint32_t& volume) const
600 {
601
602 uint32_t level(0);
603
604 if (_mixerManager.SpeakerVolume(level) == -1)
605 {
606 return -1;
607 }
608
609 volume = level;
610 return 0;
611 }
612
613 // ----------------------------------------------------------------------------
614 // SetWaveOutVolume
615 //
616 // The low-order word contains the left-channel volume setting, and the
617 // high-order word contains the right-channel setting.
618 // A value of 0xFFFF represents full volume, and a value of 0x0000 is silence.
619 //
620 // If a device does not support both left and right volume control,
621 // the low-order word of dwVolume specifies the volume level,
622 // and the high-order word is ignored.
623 //
624 // Most devices do not support the full 16 bits of volume-level control
625 // and will not use the least-significant bits of the requested volume setting.
626 // For example, if a device supports 4 bits of volume control, the values
627 // 0x4000, 0x4FFF, and 0x43BE will all be truncated to 0x4000.
628 // ----------------------------------------------------------------------------
629
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)630 int32_t AudioDeviceWindowsWave::SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight)
631 {
632
633 MMRESULT res(0);
634 WAVEOUTCAPS caps;
635
636 CriticalSectionScoped lock(&_critSect);
637
638 if (_hWaveOut == NULL)
639 {
640 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "no open playout device exists => using default");
641 }
642
643 // To determine whether the device supports volume control on both
644 // the left and right channels, use the WAVECAPS_LRVOLUME flag.
645 //
646 res = waveOutGetDevCaps((UINT_PTR)_hWaveOut, &caps, sizeof(WAVEOUTCAPS));
647 if (MMSYSERR_NOERROR != res)
648 {
649 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCaps() failed (err=%d)", res);
650 TraceWaveOutError(res);
651 }
652 if (!(caps.dwSupport & WAVECAPS_VOLUME))
653 {
654 // this device does not support volume control using the waveOutSetVolume API
655 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device does not support volume control using the Wave API");
656 return -1;
657 }
658 if (!(caps.dwSupport & WAVECAPS_LRVOLUME))
659 {
660 // high-order word (right channel) is ignored
661 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "device does not support volume control on both channels");
662 }
663
664 DWORD dwVolume(0x00000000);
665 dwVolume = (DWORD)(((volumeRight & 0xFFFF) << 16) | (volumeLeft & 0xFFFF));
666
667 res = waveOutSetVolume(_hWaveOut, dwVolume);
668 if (MMSYSERR_NOERROR != res)
669 {
670 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutSetVolume() failed (err=%d)", res);
671 TraceWaveOutError(res);
672 return -1;
673 }
674
675 return 0;
676 }
677
678 // ----------------------------------------------------------------------------
679 // WaveOutVolume
680 //
681 // The low-order word of this location contains the left-channel volume setting,
682 // and the high-order word contains the right-channel setting.
683 // A value of 0xFFFF (65535) represents full volume, and a value of 0x0000
684 // is silence.
685 //
686 // If a device does not support both left and right volume control,
687 // the low-order word of the specified location contains the mono volume level.
688 //
689 // The full 16-bit setting(s) set with the waveOutSetVolume function is returned,
690 // regardless of whether the device supports the full 16 bits of volume-level
691 // control.
692 // ----------------------------------------------------------------------------
693
WaveOutVolume(uint16_t & volumeLeft,uint16_t & volumeRight) const694 int32_t AudioDeviceWindowsWave::WaveOutVolume(uint16_t& volumeLeft, uint16_t& volumeRight) const
695 {
696
697 MMRESULT res(0);
698 WAVEOUTCAPS caps;
699
700 CriticalSectionScoped lock(&_critSect);
701
702 if (_hWaveOut == NULL)
703 {
704 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "no open playout device exists => using default");
705 }
706
707 // To determine whether the device supports volume control on both
708 // the left and right channels, use the WAVECAPS_LRVOLUME flag.
709 //
710 res = waveOutGetDevCaps((UINT_PTR)_hWaveOut, &caps, sizeof(WAVEOUTCAPS));
711 if (MMSYSERR_NOERROR != res)
712 {
713 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCaps() failed (err=%d)", res);
714 TraceWaveOutError(res);
715 }
716 if (!(caps.dwSupport & WAVECAPS_VOLUME))
717 {
718 // this device does not support volume control using the waveOutSetVolume API
719 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device does not support volume control using the Wave API");
720 return -1;
721 }
722 if (!(caps.dwSupport & WAVECAPS_LRVOLUME))
723 {
724 // high-order word (right channel) is ignored
725 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "device does not support volume control on both channels");
726 }
727
728 DWORD dwVolume(0x00000000);
729
730 res = waveOutGetVolume(_hWaveOut, &dwVolume);
731 if (MMSYSERR_NOERROR != res)
732 {
733 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutGetVolume() failed (err=%d)", res);
734 TraceWaveOutError(res);
735 return -1;
736 }
737
738 WORD wVolumeLeft = LOWORD(dwVolume);
739 WORD wVolumeRight = HIWORD(dwVolume);
740
741 volumeLeft = static_cast<uint16_t> (wVolumeLeft);
742 volumeRight = static_cast<uint16_t> (wVolumeRight);
743
744 return 0;
745 }
746
747 // ----------------------------------------------------------------------------
748 // MaxSpeakerVolume
749 // ----------------------------------------------------------------------------
750
MaxSpeakerVolume(uint32_t & maxVolume) const751 int32_t AudioDeviceWindowsWave::MaxSpeakerVolume(uint32_t& maxVolume) const
752 {
753
754 uint32_t maxVol(0);
755
756 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
757 {
758 return -1;
759 }
760
761 maxVolume = maxVol;
762 return 0;
763 }
764
765 // ----------------------------------------------------------------------------
766 // MinSpeakerVolume
767 // ----------------------------------------------------------------------------
768
MinSpeakerVolume(uint32_t & minVolume) const769 int32_t AudioDeviceWindowsWave::MinSpeakerVolume(uint32_t& minVolume) const
770 {
771
772 uint32_t minVol(0);
773
774 if (_mixerManager.MinSpeakerVolume(minVol) == -1)
775 {
776 return -1;
777 }
778
779 minVolume = minVol;
780 return 0;
781 }
782
783 // ----------------------------------------------------------------------------
784 // SpeakerVolumeStepSize
785 // ----------------------------------------------------------------------------
786
SpeakerVolumeStepSize(uint16_t & stepSize) const787 int32_t AudioDeviceWindowsWave::SpeakerVolumeStepSize(uint16_t& stepSize) const
788 {
789
790 uint16_t delta(0);
791
792 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
793 {
794 return -1;
795 }
796
797 stepSize = delta;
798 return 0;
799 }
800
801 // ----------------------------------------------------------------------------
802 // SpeakerMuteIsAvailable
803 // ----------------------------------------------------------------------------
804
SpeakerMuteIsAvailable(bool & available)805 int32_t AudioDeviceWindowsWave::SpeakerMuteIsAvailable(bool& available)
806 {
807
808 bool isAvailable(false);
809
810 // Enumerate all avaliable speakers and make an attempt to open up the
811 // output mixer corresponding to the currently selected output device.
812 //
813 if (InitSpeaker() == -1)
814 {
815 // If we end up here it means that the selected speaker has no volume
816 // control, hence it is safe to state that there is no mute control
817 // already at this stage.
818 available = false;
819 return 0;
820 }
821
822 // Check if the selected speaker has a mute control
823 //
824 _mixerManager.SpeakerMuteIsAvailable(isAvailable);
825 available = isAvailable;
826
827 // Close the initialized output mixer
828 //
829 _mixerManager.CloseSpeaker();
830
831 return 0;
832 }
833
834 // ----------------------------------------------------------------------------
835 // SetSpeakerMute
836 // ----------------------------------------------------------------------------
837
SetSpeakerMute(bool enable)838 int32_t AudioDeviceWindowsWave::SetSpeakerMute(bool enable)
839 {
840 return (_mixerManager.SetSpeakerMute(enable));
841 }
842
843 // ----------------------------------------------------------------------------
844 // SpeakerMute
845 // ----------------------------------------------------------------------------
846
SpeakerMute(bool & enabled) const847 int32_t AudioDeviceWindowsWave::SpeakerMute(bool& enabled) const
848 {
849
850 bool muted(0);
851
852 if (_mixerManager.SpeakerMute(muted) == -1)
853 {
854 return -1;
855 }
856
857 enabled = muted;
858 return 0;
859 }
860
861 // ----------------------------------------------------------------------------
862 // MicrophoneMuteIsAvailable
863 // ----------------------------------------------------------------------------
864
MicrophoneMuteIsAvailable(bool & available)865 int32_t AudioDeviceWindowsWave::MicrophoneMuteIsAvailable(bool& available)
866 {
867
868 bool isAvailable(false);
869
870 // Enumerate all avaliable microphones and make an attempt to open up the
871 // input mixer corresponding to the currently selected input device.
872 //
873 if (InitMicrophone() == -1)
874 {
875 // If we end up here it means that the selected microphone has no volume
876 // control, hence it is safe to state that there is no boost control
877 // already at this stage.
878 available = false;
879 return 0;
880 }
881
882 // Check if the selected microphone has a mute control
883 //
884 _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
885 available = isAvailable;
886
887 // Close the initialized input mixer
888 //
889 _mixerManager.CloseMicrophone();
890
891 return 0;
892 }
893
894 // ----------------------------------------------------------------------------
895 // SetMicrophoneMute
896 // ----------------------------------------------------------------------------
897
SetMicrophoneMute(bool enable)898 int32_t AudioDeviceWindowsWave::SetMicrophoneMute(bool enable)
899 {
900 return (_mixerManager.SetMicrophoneMute(enable));
901 }
902
903 // ----------------------------------------------------------------------------
904 // MicrophoneMute
905 // ----------------------------------------------------------------------------
906
MicrophoneMute(bool & enabled) const907 int32_t AudioDeviceWindowsWave::MicrophoneMute(bool& enabled) const
908 {
909
910 bool muted(0);
911
912 if (_mixerManager.MicrophoneMute(muted) == -1)
913 {
914 return -1;
915 }
916
917 enabled = muted;
918 return 0;
919 }
920
921 // ----------------------------------------------------------------------------
922 // MicrophoneBoostIsAvailable
923 // ----------------------------------------------------------------------------
924
MicrophoneBoostIsAvailable(bool & available)925 int32_t AudioDeviceWindowsWave::MicrophoneBoostIsAvailable(bool& available)
926 {
927
928 bool isAvailable(false);
929
930 // Enumerate all avaliable microphones and make an attempt to open up the
931 // input mixer corresponding to the currently selected input device.
932 //
933 if (InitMicrophone() == -1)
934 {
935 // If we end up here it means that the selected microphone has no volume
936 // control, hence it is safe to state that there is no boost control
937 // already at this stage.
938 available = false;
939 return 0;
940 }
941
942 // Check if the selected microphone has a boost control
943 //
944 _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
945 available = isAvailable;
946
947 // Close the initialized input mixer
948 //
949 _mixerManager.CloseMicrophone();
950
951 return 0;
952 }
953
954 // ----------------------------------------------------------------------------
955 // SetMicrophoneBoost
956 // ----------------------------------------------------------------------------
957
SetMicrophoneBoost(bool enable)958 int32_t AudioDeviceWindowsWave::SetMicrophoneBoost(bool enable)
959 {
960
961 return (_mixerManager.SetMicrophoneBoost(enable));
962 }
963
964 // ----------------------------------------------------------------------------
965 // MicrophoneBoost
966 // ----------------------------------------------------------------------------
967
MicrophoneBoost(bool & enabled) const968 int32_t AudioDeviceWindowsWave::MicrophoneBoost(bool& enabled) const
969 {
970
971 bool onOff(0);
972
973 if (_mixerManager.MicrophoneBoost(onOff) == -1)
974 {
975 return -1;
976 }
977
978 enabled = onOff;
979 return 0;
980 }
981
982 // ----------------------------------------------------------------------------
983 // StereoRecordingIsAvailable
984 // ----------------------------------------------------------------------------
985
StereoRecordingIsAvailable(bool & available)986 int32_t AudioDeviceWindowsWave::StereoRecordingIsAvailable(bool& available)
987 {
988 available = true;
989 return 0;
990 }
991
992 // ----------------------------------------------------------------------------
993 // SetStereoRecording
994 // ----------------------------------------------------------------------------
995
SetStereoRecording(bool enable)996 int32_t AudioDeviceWindowsWave::SetStereoRecording(bool enable)
997 {
998
999 if (enable)
1000 _recChannels = 2;
1001 else
1002 _recChannels = 1;
1003
1004 return 0;
1005 }
1006
1007 // ----------------------------------------------------------------------------
1008 // StereoRecording
1009 // ----------------------------------------------------------------------------
1010
StereoRecording(bool & enabled) const1011 int32_t AudioDeviceWindowsWave::StereoRecording(bool& enabled) const
1012 {
1013
1014 if (_recChannels == 2)
1015 enabled = true;
1016 else
1017 enabled = false;
1018
1019 return 0;
1020 }
1021
1022 // ----------------------------------------------------------------------------
1023 // StereoPlayoutIsAvailable
1024 // ----------------------------------------------------------------------------
1025
StereoPlayoutIsAvailable(bool & available)1026 int32_t AudioDeviceWindowsWave::StereoPlayoutIsAvailable(bool& available)
1027 {
1028 available = true;
1029 return 0;
1030 }
1031
1032 // ----------------------------------------------------------------------------
1033 // SetStereoPlayout
1034 //
1035 // Specifies the number of output channels.
1036 //
1037 // NOTE - the setting will only have an effect after InitPlayout has
1038 // been called.
1039 //
1040 // 16-bit mono:
1041 //
1042 // Each sample is 2 bytes. Sample 1 is followed by samples 2, 3, 4, and so on.
1043 // For each sample, the first byte is the low-order byte of channel 0 and the
1044 // second byte is the high-order byte of channel 0.
1045 //
1046 // 16-bit stereo:
1047 //
1048 // Each sample is 4 bytes. Sample 1 is followed by samples 2, 3, 4, and so on.
1049 // For each sample, the first byte is the low-order byte of channel 0 (left channel);
1050 // the second byte is the high-order byte of channel 0; the third byte is the
1051 // low-order byte of channel 1 (right channel); and the fourth byte is the
1052 // high-order byte of channel 1.
1053 // ----------------------------------------------------------------------------
1054
SetStereoPlayout(bool enable)1055 int32_t AudioDeviceWindowsWave::SetStereoPlayout(bool enable)
1056 {
1057
1058 if (enable)
1059 _playChannels = 2;
1060 else
1061 _playChannels = 1;
1062
1063 return 0;
1064 }
1065
1066 // ----------------------------------------------------------------------------
1067 // StereoPlayout
1068 // ----------------------------------------------------------------------------
1069
StereoPlayout(bool & enabled) const1070 int32_t AudioDeviceWindowsWave::StereoPlayout(bool& enabled) const
1071 {
1072
1073 if (_playChannels == 2)
1074 enabled = true;
1075 else
1076 enabled = false;
1077
1078 return 0;
1079 }
1080
1081 // ----------------------------------------------------------------------------
1082 // SetAGC
1083 // ----------------------------------------------------------------------------
1084
SetAGC(bool enable)1085 int32_t AudioDeviceWindowsWave::SetAGC(bool enable)
1086 {
1087
1088 _AGC = enable;
1089
1090 return 0;
1091 }
1092
1093 // ----------------------------------------------------------------------------
1094 // AGC
1095 // ----------------------------------------------------------------------------
1096
AGC() const1097 bool AudioDeviceWindowsWave::AGC() const
1098 {
1099 return _AGC;
1100 }
1101
1102 // ----------------------------------------------------------------------------
1103 // MicrophoneVolumeIsAvailable
1104 // ----------------------------------------------------------------------------
1105
MicrophoneVolumeIsAvailable(bool & available)1106 int32_t AudioDeviceWindowsWave::MicrophoneVolumeIsAvailable(bool& available)
1107 {
1108
1109 bool isAvailable(false);
1110
1111 // Enumerate all avaliable microphones and make an attempt to open up the
1112 // input mixer corresponding to the currently selected output device.
1113 //
1114 if (InitMicrophone() == -1)
1115 {
1116 // Failed to find valid microphone
1117 available = false;
1118 return 0;
1119 }
1120
1121 // Check if the selected microphone has a volume control
1122 //
1123 _mixerManager.MicrophoneVolumeIsAvailable(isAvailable);
1124 available = isAvailable;
1125
1126 // Close the initialized input mixer
1127 //
1128 _mixerManager.CloseMicrophone();
1129
1130 return 0;
1131 }
1132
1133 // ----------------------------------------------------------------------------
1134 // SetMicrophoneVolume
1135 // ----------------------------------------------------------------------------
1136
SetMicrophoneVolume(uint32_t volume)1137 int32_t AudioDeviceWindowsWave::SetMicrophoneVolume(uint32_t volume)
1138 {
1139 return (_mixerManager.SetMicrophoneVolume(volume));
1140 }
1141
1142 // ----------------------------------------------------------------------------
1143 // MicrophoneVolume
1144 // ----------------------------------------------------------------------------
1145
MicrophoneVolume(uint32_t & volume) const1146 int32_t AudioDeviceWindowsWave::MicrophoneVolume(uint32_t& volume) const
1147 {
1148 uint32_t level(0);
1149
1150 if (_mixerManager.MicrophoneVolume(level) == -1)
1151 {
1152 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to retrive current microphone level");
1153 return -1;
1154 }
1155
1156 volume = level;
1157 return 0;
1158 }
1159
1160 // ----------------------------------------------------------------------------
1161 // MaxMicrophoneVolume
1162 // ----------------------------------------------------------------------------
1163
MaxMicrophoneVolume(uint32_t & maxVolume) const1164 int32_t AudioDeviceWindowsWave::MaxMicrophoneVolume(uint32_t& maxVolume) const
1165 {
1166 // _maxMicVolume can be zero in AudioMixerManager::MaxMicrophoneVolume():
1167 // (1) API GetLineControl() returns failure at querying the max Mic level.
1168 // (2) API GetLineControl() returns maxVolume as zero in rare cases.
1169 // Both cases show we don't have access to the mixer controls.
1170 // We return -1 here to indicate that.
1171 if (_maxMicVolume == 0)
1172 {
1173 return -1;
1174 }
1175
1176 maxVolume = _maxMicVolume;;
1177 return 0;
1178 }
1179
1180 // ----------------------------------------------------------------------------
1181 // MinMicrophoneVolume
1182 // ----------------------------------------------------------------------------
1183
MinMicrophoneVolume(uint32_t & minVolume) const1184 int32_t AudioDeviceWindowsWave::MinMicrophoneVolume(uint32_t& minVolume) const
1185 {
1186 minVolume = _minMicVolume;
1187 return 0;
1188 }
1189
1190 // ----------------------------------------------------------------------------
1191 // MicrophoneVolumeStepSize
1192 // ----------------------------------------------------------------------------
1193
MicrophoneVolumeStepSize(uint16_t & stepSize) const1194 int32_t AudioDeviceWindowsWave::MicrophoneVolumeStepSize(uint16_t& stepSize) const
1195 {
1196
1197 uint16_t delta(0);
1198
1199 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
1200 {
1201 return -1;
1202 }
1203
1204 stepSize = delta;
1205 return 0;
1206 }
1207
1208 // ----------------------------------------------------------------------------
1209 // PlayoutDevices
1210 // ----------------------------------------------------------------------------
1211
PlayoutDevices()1212 int16_t AudioDeviceWindowsWave::PlayoutDevices()
1213 {
1214
1215 return (waveOutGetNumDevs());
1216 }
1217
1218 // ----------------------------------------------------------------------------
1219 // SetPlayoutDevice I (II)
1220 // ----------------------------------------------------------------------------
1221
SetPlayoutDevice(uint16_t index)1222 int32_t AudioDeviceWindowsWave::SetPlayoutDevice(uint16_t index)
1223 {
1224
1225 if (_playIsInitialized)
1226 {
1227 return -1;
1228 }
1229
1230 UINT nDevices = waveOutGetNumDevs();
1231 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "number of availiable waveform-audio output devices is %u", nDevices);
1232
1233 if (index < 0 || index > (nDevices-1))
1234 {
1235 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
1236 return -1;
1237 }
1238
1239 _usingOutputDeviceIndex = true;
1240 _outputDeviceIndex = index;
1241 _outputDeviceIsSpecified = true;
1242
1243 return 0;
1244 }
1245
1246 // ----------------------------------------------------------------------------
1247 // SetPlayoutDevice II (II)
1248 // ----------------------------------------------------------------------------
1249
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)1250 int32_t AudioDeviceWindowsWave::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)
1251 {
1252 if (_playIsInitialized)
1253 {
1254 return -1;
1255 }
1256
1257 if (device == AudioDeviceModule::kDefaultDevice)
1258 {
1259 }
1260 else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
1261 {
1262 }
1263
1264 _usingOutputDeviceIndex = false;
1265 _outputDevice = device;
1266 _outputDeviceIsSpecified = true;
1267
1268 return 0;
1269 }
1270
1271 // ----------------------------------------------------------------------------
1272 // PlayoutDeviceName
1273 // ----------------------------------------------------------------------------
1274
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])1275 int32_t AudioDeviceWindowsWave::PlayoutDeviceName(
1276 uint16_t index,
1277 char name[kAdmMaxDeviceNameSize],
1278 char guid[kAdmMaxGuidSize])
1279 {
1280
1281 uint16_t nDevices(PlayoutDevices());
1282
1283 // Special fix for the case when the user asks for the name of the default device.
1284 //
1285 if (index == (uint16_t)(-1))
1286 {
1287 index = 0;
1288 }
1289
1290 if ((index > (nDevices-1)) || (name == NULL))
1291 {
1292 return -1;
1293 }
1294
1295 memset(name, 0, kAdmMaxDeviceNameSize);
1296
1297 if (guid != NULL)
1298 {
1299 memset(guid, 0, kAdmMaxGuidSize);
1300 }
1301
1302 WAVEOUTCAPSW caps; // szPname member (product name (NULL terminated) is a WCHAR
1303 MMRESULT res;
1304
1305 res = waveOutGetDevCapsW(index, &caps, sizeof(WAVEOUTCAPSW));
1306 if (res != MMSYSERR_NOERROR)
1307 {
1308 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCapsW() failed (err=%d)", res);
1309 return -1;
1310 }
1311 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1312 {
1313 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 1", GetLastError());
1314 }
1315
1316 if (guid == NULL)
1317 {
1318 return 0;
1319 }
1320
1321 // It is possible to get the unique endpoint ID string using the Wave API.
1322 // However, it is only supported on Windows Vista and Windows 7.
1323
1324 size_t cbEndpointId(0);
1325
1326 // Get the size (including the terminating null) of the endpoint ID string of the waveOut device.
1327 // Windows Vista supports the DRV_QUERYFUNCTIONINSTANCEIDSIZE and DRV_QUERYFUNCTIONINSTANCEID messages.
1328 res = waveOutMessage((HWAVEOUT)IntToPtr(index),
1329 DRV_QUERYFUNCTIONINSTANCEIDSIZE,
1330 (DWORD_PTR)&cbEndpointId, NULL);
1331 if (res != MMSYSERR_NOERROR)
1332 {
1333 // DRV_QUERYFUNCTIONINSTANCEIDSIZE is not supported <=> earlier version of Windows than Vista
1334 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveOutMessage(DRV_QUERYFUNCTIONINSTANCEIDSIZE) failed (err=%d)", res);
1335 TraceWaveOutError(res);
1336 // Best we can do is to copy the friendly name and use it as guid
1337 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1338 {
1339 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 2", GetLastError());
1340 }
1341 return 0;
1342 }
1343
1344 // waveOutMessage(DRV_QUERYFUNCTIONINSTANCEIDSIZE) worked => we are on a Vista or Windows 7 device
1345
1346 WCHAR *pstrEndpointId = NULL;
1347 pstrEndpointId = (WCHAR*)CoTaskMemAlloc(cbEndpointId);
1348
1349 // Get the endpoint ID string for this waveOut device.
1350 res = waveOutMessage((HWAVEOUT)IntToPtr(index),
1351 DRV_QUERYFUNCTIONINSTANCEID,
1352 (DWORD_PTR)pstrEndpointId,
1353 cbEndpointId);
1354 if (res != MMSYSERR_NOERROR)
1355 {
1356 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveOutMessage(DRV_QUERYFUNCTIONINSTANCEID) failed (err=%d)", res);
1357 TraceWaveOutError(res);
1358 // Best we can do is to copy the friendly name and use it as guid
1359 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1360 {
1361 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 3", GetLastError());
1362 }
1363 CoTaskMemFree(pstrEndpointId);
1364 return 0;
1365 }
1366
1367 if (WideCharToMultiByte(CP_UTF8, 0, pstrEndpointId, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1368 {
1369 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 4", GetLastError());
1370 }
1371 CoTaskMemFree(pstrEndpointId);
1372
1373 return 0;
1374 }
1375
1376 // ----------------------------------------------------------------------------
1377 // RecordingDeviceName
1378 // ----------------------------------------------------------------------------
1379
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])1380 int32_t AudioDeviceWindowsWave::RecordingDeviceName(
1381 uint16_t index,
1382 char name[kAdmMaxDeviceNameSize],
1383 char guid[kAdmMaxGuidSize])
1384 {
1385
1386 uint16_t nDevices(RecordingDevices());
1387
1388 // Special fix for the case when the user asks for the name of the default device.
1389 //
1390 if (index == (uint16_t)(-1))
1391 {
1392 index = 0;
1393 }
1394
1395 if ((index > (nDevices-1)) || (name == NULL))
1396 {
1397 return -1;
1398 }
1399
1400 memset(name, 0, kAdmMaxDeviceNameSize);
1401
1402 if (guid != NULL)
1403 {
1404 memset(guid, 0, kAdmMaxGuidSize);
1405 }
1406
1407 WAVEINCAPSW caps; // szPname member (product name (NULL terminated) is a WCHAR
1408 MMRESULT res;
1409
1410 res = waveInGetDevCapsW(index, &caps, sizeof(WAVEINCAPSW));
1411 if (res != MMSYSERR_NOERROR)
1412 {
1413 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetDevCapsW() failed (err=%d)", res);
1414 return -1;
1415 }
1416 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1417 {
1418 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 1", GetLastError());
1419 }
1420
1421 if (guid == NULL)
1422 {
1423 return 0;
1424 }
1425
1426 // It is possible to get the unique endpoint ID string using the Wave API.
1427 // However, it is only supported on Windows Vista and Windows 7.
1428
1429 size_t cbEndpointId(0);
1430
1431 // Get the size (including the terminating null) of the endpoint ID string of the waveOut device.
1432 // Windows Vista supports the DRV_QUERYFUNCTIONINSTANCEIDSIZE and DRV_QUERYFUNCTIONINSTANCEID messages.
1433 res = waveInMessage((HWAVEIN)IntToPtr(index),
1434 DRV_QUERYFUNCTIONINSTANCEIDSIZE,
1435 (DWORD_PTR)&cbEndpointId, NULL);
1436 if (res != MMSYSERR_NOERROR)
1437 {
1438 // DRV_QUERYFUNCTIONINSTANCEIDSIZE is not supported <=> earlier version of Windows than Vista
1439 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveInMessage(DRV_QUERYFUNCTIONINSTANCEIDSIZE) failed (err=%d)", res);
1440 TraceWaveInError(res);
1441 // Best we can do is to copy the friendly name and use it as guid
1442 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1443 {
1444 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 2", GetLastError());
1445 }
1446 return 0;
1447 }
1448
1449 // waveOutMessage(DRV_QUERYFUNCTIONINSTANCEIDSIZE) worked => we are on a Vista or Windows 7 device
1450
1451 WCHAR *pstrEndpointId = NULL;
1452 pstrEndpointId = (WCHAR*)CoTaskMemAlloc(cbEndpointId);
1453
1454 // Get the endpoint ID string for this waveOut device.
1455 res = waveInMessage((HWAVEIN)IntToPtr(index),
1456 DRV_QUERYFUNCTIONINSTANCEID,
1457 (DWORD_PTR)pstrEndpointId,
1458 cbEndpointId);
1459 if (res != MMSYSERR_NOERROR)
1460 {
1461 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveInMessage(DRV_QUERYFUNCTIONINSTANCEID) failed (err=%d)", res);
1462 TraceWaveInError(res);
1463 // Best we can do is to copy the friendly name and use it as guid
1464 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1465 {
1466 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 3", GetLastError());
1467 }
1468 CoTaskMemFree(pstrEndpointId);
1469 return 0;
1470 }
1471
1472 if (WideCharToMultiByte(CP_UTF8, 0, pstrEndpointId, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1473 {
1474 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d - 4", GetLastError());
1475 }
1476 CoTaskMemFree(pstrEndpointId);
1477
1478 return 0;
1479 }
1480
1481 // ----------------------------------------------------------------------------
1482 // RecordingDevices
1483 // ----------------------------------------------------------------------------
1484
RecordingDevices()1485 int16_t AudioDeviceWindowsWave::RecordingDevices()
1486 {
1487
1488 return (waveInGetNumDevs());
1489 }
1490
1491 // ----------------------------------------------------------------------------
1492 // SetRecordingDevice I (II)
1493 // ----------------------------------------------------------------------------
1494
SetRecordingDevice(uint16_t index)1495 int32_t AudioDeviceWindowsWave::SetRecordingDevice(uint16_t index)
1496 {
1497
1498 if (_recIsInitialized)
1499 {
1500 return -1;
1501 }
1502
1503 UINT nDevices = waveInGetNumDevs();
1504 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "number of availiable waveform-audio input devices is %u", nDevices);
1505
1506 if (index < 0 || index > (nDevices-1))
1507 {
1508 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
1509 return -1;
1510 }
1511
1512 _usingInputDeviceIndex = true;
1513 _inputDeviceIndex = index;
1514 _inputDeviceIsSpecified = true;
1515
1516 return 0;
1517 }
1518
1519 // ----------------------------------------------------------------------------
1520 // SetRecordingDevice II (II)
1521 // ----------------------------------------------------------------------------
1522
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)1523 int32_t AudioDeviceWindowsWave::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)
1524 {
1525 if (device == AudioDeviceModule::kDefaultDevice)
1526 {
1527 }
1528 else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
1529 {
1530 }
1531
1532 if (_recIsInitialized)
1533 {
1534 return -1;
1535 }
1536
1537 _usingInputDeviceIndex = false;
1538 _inputDevice = device;
1539 _inputDeviceIsSpecified = true;
1540
1541 return 0;
1542 }
1543
1544 // ----------------------------------------------------------------------------
1545 // PlayoutIsAvailable
1546 // ----------------------------------------------------------------------------
1547
PlayoutIsAvailable(bool & available)1548 int32_t AudioDeviceWindowsWave::PlayoutIsAvailable(bool& available)
1549 {
1550
1551 available = false;
1552
1553 // Try to initialize the playout side
1554 int32_t res = InitPlayout();
1555
1556 // Cancel effect of initialization
1557 StopPlayout();
1558
1559 if (res != -1)
1560 {
1561 available = true;
1562 }
1563
1564 return 0;
1565 }
1566
1567 // ----------------------------------------------------------------------------
1568 // RecordingIsAvailable
1569 // ----------------------------------------------------------------------------
1570
RecordingIsAvailable(bool & available)1571 int32_t AudioDeviceWindowsWave::RecordingIsAvailable(bool& available)
1572 {
1573
1574 available = false;
1575
1576 // Try to initialize the recording side
1577 int32_t res = InitRecording();
1578
1579 // Cancel effect of initialization
1580 StopRecording();
1581
1582 if (res != -1)
1583 {
1584 available = true;
1585 }
1586
1587 return 0;
1588 }
1589
1590 // ----------------------------------------------------------------------------
1591 // InitPlayout
1592 // ----------------------------------------------------------------------------
1593
InitPlayout()1594 int32_t AudioDeviceWindowsWave::InitPlayout()
1595 {
1596
1597 CriticalSectionScoped lock(&_critSect);
1598
1599 if (_playing)
1600 {
1601 return -1;
1602 }
1603
1604 if (!_outputDeviceIsSpecified)
1605 {
1606 return -1;
1607 }
1608
1609 if (_playIsInitialized)
1610 {
1611 return 0;
1612 }
1613
1614 // Initialize the speaker (devices might have been added or removed)
1615 if (InitSpeaker() == -1)
1616 {
1617 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitSpeaker() failed");
1618 }
1619
1620 // Enumerate all availiable output devices
1621 EnumeratePlayoutDevices();
1622
1623 // Start by closing any existing wave-output devices
1624 //
1625 MMRESULT res(MMSYSERR_ERROR);
1626
1627 if (_hWaveOut != NULL)
1628 {
1629 res = waveOutClose(_hWaveOut);
1630 if (MMSYSERR_NOERROR != res)
1631 {
1632 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutClose() failed (err=%d)", res);
1633 TraceWaveOutError(res);
1634 }
1635 }
1636
1637 // Set the output wave format
1638 //
1639 WAVEFORMATEX waveFormat;
1640
1641 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
1642 waveFormat.nChannels = _playChannels; // mono <=> 1, stereo <=> 2
1643 waveFormat.nSamplesPerSec = N_PLAY_SAMPLES_PER_SEC;
1644 waveFormat.wBitsPerSample = 16;
1645 waveFormat.nBlockAlign = waveFormat.nChannels * (waveFormat.wBitsPerSample/8);
1646 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
1647 waveFormat.cbSize = 0;
1648
1649 // Open the given waveform-audio output device for playout
1650 //
1651 HWAVEOUT hWaveOut(NULL);
1652
1653 if (IsUsingOutputDeviceIndex())
1654 {
1655 // verify settings first
1656 res = waveOutOpen(NULL, _outputDeviceIndex, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_FORMAT_QUERY);
1657 if (MMSYSERR_NOERROR == res)
1658 {
1659 // open the given waveform-audio output device for recording
1660 res = waveOutOpen(&hWaveOut, _outputDeviceIndex, &waveFormat, 0, 0, CALLBACK_NULL);
1661 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening output device corresponding to device ID %u", _outputDeviceIndex);
1662 }
1663 }
1664 else
1665 {
1666 if (_outputDevice == AudioDeviceModule::kDefaultCommunicationDevice)
1667 {
1668 // check if it is possible to open the default communication device (supported on Windows 7)
1669 res = waveOutOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE | WAVE_FORMAT_QUERY);
1670 if (MMSYSERR_NOERROR == res)
1671 {
1672 // if so, open the default communication device for real
1673 res = waveOutOpen(&hWaveOut, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE);
1674 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening default communication device");
1675 }
1676 else
1677 {
1678 // use default device since default communication device was not avaliable
1679 res = waveOutOpen(&hWaveOut, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL);
1680 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "unable to open default communication device => using default instead");
1681 }
1682 }
1683 else if (_outputDevice == AudioDeviceModule::kDefaultDevice)
1684 {
1685 // open default device since it has been requested
1686 res = waveOutOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_FORMAT_QUERY);
1687 if (MMSYSERR_NOERROR == res)
1688 {
1689 res = waveOutOpen(&hWaveOut, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL);
1690 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening default output device");
1691 }
1692 }
1693 }
1694
1695 if (MMSYSERR_NOERROR != res)
1696 {
1697 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutOpen() failed (err=%d)", res);
1698 TraceWaveOutError(res);
1699 return -1;
1700 }
1701
1702 // Log information about the aquired output device
1703 //
1704 WAVEOUTCAPS caps;
1705
1706 res = waveOutGetDevCaps((UINT_PTR)hWaveOut, &caps, sizeof(WAVEOUTCAPS));
1707 if (res != MMSYSERR_NOERROR)
1708 {
1709 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCaps() failed (err=%d)", res);
1710 TraceWaveOutError(res);
1711 }
1712
1713 UINT deviceID(0);
1714 res = waveOutGetID(hWaveOut, &deviceID);
1715 if (res != MMSYSERR_NOERROR)
1716 {
1717 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetID() failed (err=%d)", res);
1718 TraceWaveOutError(res);
1719 }
1720 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "utilized device ID : %u", deviceID);
1721 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name : %s", caps.szPname);
1722
1723 // Store valid handle for the open waveform-audio output device
1724 _hWaveOut = hWaveOut;
1725
1726 // Store the input wave header as well
1727 _waveFormatOut = waveFormat;
1728
1729 // Prepare wave-out headers
1730 //
1731 const uint8_t bytesPerSample = 2*_playChannels;
1732
1733 for (int n = 0; n < N_BUFFERS_OUT; n++)
1734 {
1735 // set up the output wave header
1736 _waveHeaderOut[n].lpData = reinterpret_cast<LPSTR>(&_playBuffer[n]);
1737 _waveHeaderOut[n].dwBufferLength = bytesPerSample*PLAY_BUF_SIZE_IN_SAMPLES;
1738 _waveHeaderOut[n].dwFlags = 0;
1739 _waveHeaderOut[n].dwLoops = 0;
1740
1741 memset(_playBuffer[n], 0, bytesPerSample*PLAY_BUF_SIZE_IN_SAMPLES);
1742
1743 // The waveOutPrepareHeader function prepares a waveform-audio data block for playback.
1744 // The lpData, dwBufferLength, and dwFlags members of the WAVEHDR structure must be set
1745 // before calling this function.
1746 //
1747 res = waveOutPrepareHeader(_hWaveOut, &_waveHeaderOut[n], sizeof(WAVEHDR));
1748 if (MMSYSERR_NOERROR != res)
1749 {
1750 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutPrepareHeader(%d) failed (err=%d)", n, res);
1751 TraceWaveOutError(res);
1752 }
1753
1754 // perform extra check to ensure that the header is prepared
1755 if (_waveHeaderOut[n].dwFlags != WHDR_PREPARED)
1756 {
1757 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutPrepareHeader(%d) failed (dwFlags != WHDR_PREPARED)", n);
1758 }
1759 }
1760
1761 // Mark playout side as initialized
1762 _playIsInitialized = true;
1763
1764 _dTcheckPlayBufDelay = 10; // check playback buffer delay every 10 ms
1765 _playBufCount = 0; // index of active output wave header (<=> output buffer index)
1766 _playBufDelay = 80; // buffer delay/size is initialized to 80 ms and slowly decreased until er < 25
1767 _minPlayBufDelay = 25; // minimum playout buffer delay
1768 _MAX_minBuffer = 65; // adaptive minimum playout buffer delay cannot be larger than this value
1769 _intro = 1; // Used to make sure that adaption starts after (2000-1700)/100 seconds
1770 _waitCounter = 1700; // Counter for start of adaption of playback buffer
1771 _erZeroCounter = 0; // Log how many times er = 0 in consequtive calls to RecTimeProc
1772 _useHeader = 0; // Counts number of "useHeader" detections. Stops at 2.
1773
1774 _writtenSamples = 0;
1775 _writtenSamplesOld = 0;
1776 _playedSamplesOld = 0;
1777 _sndCardPlayDelay = 0;
1778 _sndCardRecDelay = 0;
1779
1780 WEBRTC_TRACE(kTraceInfo, kTraceUtility, _id,"initial playout status: _playBufDelay=%d, _minPlayBufDelay=%d",
1781 _playBufDelay, _minPlayBufDelay);
1782
1783 return 0;
1784 }
1785
1786 // ----------------------------------------------------------------------------
1787 // InitRecording
1788 // ----------------------------------------------------------------------------
1789
InitRecording()1790 int32_t AudioDeviceWindowsWave::InitRecording()
1791 {
1792
1793 CriticalSectionScoped lock(&_critSect);
1794
1795 if (_recording)
1796 {
1797 return -1;
1798 }
1799
1800 if (!_inputDeviceIsSpecified)
1801 {
1802 return -1;
1803 }
1804
1805 if (_recIsInitialized)
1806 {
1807 return 0;
1808 }
1809
1810 _avgCPULoad = 0;
1811 _playAcc = 0;
1812
1813 // Initialize the microphone (devices might have been added or removed)
1814 if (InitMicrophone() == -1)
1815 {
1816 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitMicrophone() failed");
1817 }
1818
1819 // Enumerate all availiable input devices
1820 EnumerateRecordingDevices();
1821
1822 // Start by closing any existing wave-input devices
1823 //
1824 MMRESULT res(MMSYSERR_ERROR);
1825
1826 if (_hWaveIn != NULL)
1827 {
1828 res = waveInClose(_hWaveIn);
1829 if (MMSYSERR_NOERROR != res)
1830 {
1831 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInClose() failed (err=%d)", res);
1832 TraceWaveInError(res);
1833 }
1834 }
1835
1836 // Set the input wave format
1837 //
1838 WAVEFORMATEX waveFormat;
1839
1840 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
1841 waveFormat.nChannels = _recChannels; // mono <=> 1, stereo <=> 2
1842 waveFormat.nSamplesPerSec = N_REC_SAMPLES_PER_SEC;
1843 waveFormat.wBitsPerSample = 16;
1844 waveFormat.nBlockAlign = waveFormat.nChannels * (waveFormat.wBitsPerSample/8);
1845 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
1846 waveFormat.cbSize = 0;
1847
1848 // Open the given waveform-audio input device for recording
1849 //
1850 HWAVEIN hWaveIn(NULL);
1851
1852 if (IsUsingInputDeviceIndex())
1853 {
1854 // verify settings first
1855 res = waveInOpen(NULL, _inputDeviceIndex, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_FORMAT_QUERY);
1856 if (MMSYSERR_NOERROR == res)
1857 {
1858 // open the given waveform-audio input device for recording
1859 res = waveInOpen(&hWaveIn, _inputDeviceIndex, &waveFormat, 0, 0, CALLBACK_NULL);
1860 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening input device corresponding to device ID %u", _inputDeviceIndex);
1861 }
1862 }
1863 else
1864 {
1865 if (_inputDevice == AudioDeviceModule::kDefaultCommunicationDevice)
1866 {
1867 // check if it is possible to open the default communication device (supported on Windows 7)
1868 res = waveInOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE | WAVE_FORMAT_QUERY);
1869 if (MMSYSERR_NOERROR == res)
1870 {
1871 // if so, open the default communication device for real
1872 res = waveInOpen(&hWaveIn, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE);
1873 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening default communication device");
1874 }
1875 else
1876 {
1877 // use default device since default communication device was not avaliable
1878 res = waveInOpen(&hWaveIn, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL);
1879 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "unable to open default communication device => using default instead");
1880 }
1881 }
1882 else if (_inputDevice == AudioDeviceModule::kDefaultDevice)
1883 {
1884 // open default device since it has been requested
1885 res = waveInOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL | WAVE_FORMAT_QUERY);
1886 if (MMSYSERR_NOERROR == res)
1887 {
1888 res = waveInOpen(&hWaveIn, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL);
1889 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening default input device");
1890 }
1891 }
1892 }
1893
1894 if (MMSYSERR_NOERROR != res)
1895 {
1896 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveInOpen() failed (err=%d)", res);
1897 TraceWaveInError(res);
1898 return -1;
1899 }
1900
1901 // Log information about the aquired input device
1902 //
1903 WAVEINCAPS caps;
1904
1905 res = waveInGetDevCaps((UINT_PTR)hWaveIn, &caps, sizeof(WAVEINCAPS));
1906 if (res != MMSYSERR_NOERROR)
1907 {
1908 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetDevCaps() failed (err=%d)", res);
1909 TraceWaveInError(res);
1910 }
1911
1912 UINT deviceID(0);
1913 res = waveInGetID(hWaveIn, &deviceID);
1914 if (res != MMSYSERR_NOERROR)
1915 {
1916 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetID() failed (err=%d)", res);
1917 TraceWaveInError(res);
1918 }
1919 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "utilized device ID : %u", deviceID);
1920 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name : %s", caps.szPname);
1921
1922 // Store valid handle for the open waveform-audio input device
1923 _hWaveIn = hWaveIn;
1924
1925 // Store the input wave header as well
1926 _waveFormatIn = waveFormat;
1927
1928 // Mark recording side as initialized
1929 _recIsInitialized = true;
1930
1931 _recBufCount = 0; // index of active input wave header (<=> input buffer index)
1932 _recDelayCount = 0; // ensures that input buffers are returned with certain delay
1933
1934 return 0;
1935 }
1936
1937 // ----------------------------------------------------------------------------
1938 // StartRecording
1939 // ----------------------------------------------------------------------------
1940
StartRecording()1941 int32_t AudioDeviceWindowsWave::StartRecording()
1942 {
1943
1944 if (!_recIsInitialized)
1945 {
1946 return -1;
1947 }
1948
1949 if (_recording)
1950 {
1951 return 0;
1952 }
1953
1954 // set state to ensure that the recording starts from the audio thread
1955 _startRec = true;
1956
1957 // the audio thread will signal when recording has stopped
1958 if (kEventTimeout == _recStartEvent.Wait(10000))
1959 {
1960 _startRec = false;
1961 StopRecording();
1962 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate recording");
1963 return -1;
1964 }
1965
1966 if (_recording)
1967 {
1968 // the recording state is set by the audio thread after recording has started
1969 }
1970 else
1971 {
1972 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate recording");
1973 return -1;
1974 }
1975
1976 return 0;
1977 }
1978
1979 // ----------------------------------------------------------------------------
1980 // StopRecording
1981 // ----------------------------------------------------------------------------
1982
StopRecording()1983 int32_t AudioDeviceWindowsWave::StopRecording()
1984 {
1985
1986 CriticalSectionScoped lock(&_critSect);
1987
1988 if (!_recIsInitialized)
1989 {
1990 return 0;
1991 }
1992
1993 if (_hWaveIn == NULL)
1994 {
1995 return -1;
1996 }
1997
1998 bool wasRecording = _recording;
1999 _recIsInitialized = false;
2000 _recording = false;
2001
2002 MMRESULT res;
2003
2004 // Stop waveform-adio input. If there are any buffers in the queue, the
2005 // current buffer will be marked as done (the dwBytesRecorded member in
2006 // the header will contain the length of data), but any empty buffers in
2007 // the queue will remain there.
2008 //
2009 res = waveInStop(_hWaveIn);
2010 if (MMSYSERR_NOERROR != res)
2011 {
2012 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInStop() failed (err=%d)", res);
2013 TraceWaveInError(res);
2014 }
2015
2016 // Stop input on the given waveform-audio input device and resets the current
2017 // position to zero. All pending buffers are marked as done and returned to
2018 // the application.
2019 //
2020 res = waveInReset(_hWaveIn);
2021 if (MMSYSERR_NOERROR != res)
2022 {
2023 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInReset() failed (err=%d)", res);
2024 TraceWaveInError(res);
2025 }
2026
2027 // Clean up the preparation performed by the waveInPrepareHeader function.
2028 // Only unprepare header if recording was ever started (and headers are prepared).
2029 //
2030 if (wasRecording)
2031 {
2032 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveInUnprepareHeader() will be performed");
2033 for (int n = 0; n < N_BUFFERS_IN; n++)
2034 {
2035 res = waveInUnprepareHeader(_hWaveIn, &_waveHeaderIn[n], sizeof(WAVEHDR));
2036 if (MMSYSERR_NOERROR != res)
2037 {
2038 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInUnprepareHeader() failed (err=%d)", res);
2039 TraceWaveInError(res);
2040 }
2041 }
2042 }
2043
2044 // Close the given waveform-audio input device.
2045 //
2046 res = waveInClose(_hWaveIn);
2047 if (MMSYSERR_NOERROR != res)
2048 {
2049 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInClose() failed (err=%d)", res);
2050 TraceWaveInError(res);
2051 }
2052
2053 // Set the wave input handle to NULL
2054 //
2055 _hWaveIn = NULL;
2056 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_hWaveIn is now set to NULL");
2057
2058 return 0;
2059 }
2060
2061 // ----------------------------------------------------------------------------
2062 // RecordingIsInitialized
2063 // ----------------------------------------------------------------------------
2064
RecordingIsInitialized() const2065 bool AudioDeviceWindowsWave::RecordingIsInitialized() const
2066 {
2067 return (_recIsInitialized);
2068 }
2069
2070 // ----------------------------------------------------------------------------
2071 // Recording
2072 // ----------------------------------------------------------------------------
2073
Recording() const2074 bool AudioDeviceWindowsWave::Recording() const
2075 {
2076 return (_recording);
2077 }
2078
2079 // ----------------------------------------------------------------------------
2080 // PlayoutIsInitialized
2081 // ----------------------------------------------------------------------------
2082
PlayoutIsInitialized() const2083 bool AudioDeviceWindowsWave::PlayoutIsInitialized() const
2084 {
2085 return (_playIsInitialized);
2086 }
2087
2088 // ----------------------------------------------------------------------------
2089 // StartPlayout
2090 // ----------------------------------------------------------------------------
2091
StartPlayout()2092 int32_t AudioDeviceWindowsWave::StartPlayout()
2093 {
2094
2095 if (!_playIsInitialized)
2096 {
2097 return -1;
2098 }
2099
2100 if (_playing)
2101 {
2102 return 0;
2103 }
2104
2105 // set state to ensure that playout starts from the audio thread
2106 _startPlay = true;
2107
2108 // the audio thread will signal when recording has started
2109 if (kEventTimeout == _playStartEvent.Wait(10000))
2110 {
2111 _startPlay = false;
2112 StopPlayout();
2113 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate playout");
2114 return -1;
2115 }
2116
2117 if (_playing)
2118 {
2119 // the playing state is set by the audio thread after playout has started
2120 }
2121 else
2122 {
2123 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate playing");
2124 return -1;
2125 }
2126
2127 return 0;
2128 }
2129
2130 // ----------------------------------------------------------------------------
2131 // StopPlayout
2132 // ----------------------------------------------------------------------------
2133
StopPlayout()2134 int32_t AudioDeviceWindowsWave::StopPlayout()
2135 {
2136
2137 CriticalSectionScoped lock(&_critSect);
2138
2139 if (!_playIsInitialized)
2140 {
2141 return 0;
2142 }
2143
2144 if (_hWaveOut == NULL)
2145 {
2146 return -1;
2147 }
2148
2149 _playIsInitialized = false;
2150 _playing = false;
2151 _sndCardPlayDelay = 0;
2152 _sndCardRecDelay = 0;
2153
2154 MMRESULT res;
2155
2156 // The waveOutReset function stops playback on the given waveform-audio
2157 // output device and resets the current position to zero. All pending
2158 // playback buffers are marked as done (WHDR_DONE) and returned to the application.
2159 // After this function returns, the application can send new playback buffers
2160 // to the device by calling waveOutWrite, or close the device by calling waveOutClose.
2161 //
2162 res = waveOutReset(_hWaveOut);
2163 if (MMSYSERR_NOERROR != res)
2164 {
2165 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutReset() failed (err=%d)", res);
2166 TraceWaveOutError(res);
2167 }
2168
2169 // The waveOutUnprepareHeader function cleans up the preparation performed
2170 // by the waveOutPrepareHeader function. This function must be called after
2171 // the device driver is finished with a data block.
2172 // You must call this function before freeing the buffer.
2173 //
2174 for (int n = 0; n < N_BUFFERS_OUT; n++)
2175 {
2176 res = waveOutUnprepareHeader(_hWaveOut, &_waveHeaderOut[n], sizeof(WAVEHDR));
2177 if (MMSYSERR_NOERROR != res)
2178 {
2179 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutUnprepareHeader() failed (err=%d)", res);
2180 TraceWaveOutError(res);
2181 }
2182 }
2183
2184 // The waveOutClose function closes the given waveform-audio output device.
2185 // The close operation fails if the device is still playing a waveform-audio
2186 // buffer that was previously sent by calling waveOutWrite. Before calling
2187 // waveOutClose, the application must wait for all buffers to finish playing
2188 // or call the waveOutReset function to terminate playback.
2189 //
2190 res = waveOutClose(_hWaveOut);
2191 if (MMSYSERR_NOERROR != res)
2192 {
2193 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutClose() failed (err=%d)", res);
2194 TraceWaveOutError(res);
2195 }
2196
2197 _hWaveOut = NULL;
2198 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_hWaveOut is now set to NULL");
2199
2200 return 0;
2201 }
2202
2203 // ----------------------------------------------------------------------------
2204 // PlayoutDelay
2205 // ----------------------------------------------------------------------------
2206
PlayoutDelay(uint16_t & delayMS) const2207 int32_t AudioDeviceWindowsWave::PlayoutDelay(uint16_t& delayMS) const
2208 {
2209 CriticalSectionScoped lock(&_critSect);
2210 delayMS = (uint16_t)_sndCardPlayDelay;
2211 return 0;
2212 }
2213
2214 // ----------------------------------------------------------------------------
2215 // RecordingDelay
2216 // ----------------------------------------------------------------------------
2217
RecordingDelay(uint16_t & delayMS) const2218 int32_t AudioDeviceWindowsWave::RecordingDelay(uint16_t& delayMS) const
2219 {
2220 CriticalSectionScoped lock(&_critSect);
2221 delayMS = (uint16_t)_sndCardRecDelay;
2222 return 0;
2223 }
2224
2225 // ----------------------------------------------------------------------------
2226 // Playing
2227 // ----------------------------------------------------------------------------
2228
Playing() const2229 bool AudioDeviceWindowsWave::Playing() const
2230 {
2231 return (_playing);
2232 }
2233 // ----------------------------------------------------------------------------
2234 // SetPlayoutBuffer
2235 // ----------------------------------------------------------------------------
2236
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)2237 int32_t AudioDeviceWindowsWave::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, uint16_t sizeMS)
2238 {
2239 CriticalSectionScoped lock(&_critSect);
2240 _playBufType = type;
2241 if (type == AudioDeviceModule::kFixedBufferSize)
2242 {
2243 _playBufDelayFixed = sizeMS;
2244 }
2245 return 0;
2246 }
2247
2248 // ----------------------------------------------------------------------------
2249 // PlayoutBuffer
2250 // ----------------------------------------------------------------------------
2251
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const2252 int32_t AudioDeviceWindowsWave::PlayoutBuffer(AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const
2253 {
2254 CriticalSectionScoped lock(&_critSect);
2255 type = _playBufType;
2256 if (type == AudioDeviceModule::kFixedBufferSize)
2257 {
2258 sizeMS = _playBufDelayFixed;
2259 }
2260 else
2261 {
2262 sizeMS = _playBufDelay;
2263 }
2264
2265 return 0;
2266 }
2267
2268 // ----------------------------------------------------------------------------
2269 // CPULoad
2270 // ----------------------------------------------------------------------------
2271
CPULoad(uint16_t & load) const2272 int32_t AudioDeviceWindowsWave::CPULoad(uint16_t& load) const
2273 {
2274
2275 load = static_cast<uint16_t>(100*_avgCPULoad);
2276
2277 return 0;
2278 }
2279
2280 // ----------------------------------------------------------------------------
2281 // PlayoutWarning
2282 // ----------------------------------------------------------------------------
2283
PlayoutWarning() const2284 bool AudioDeviceWindowsWave::PlayoutWarning() const
2285 {
2286 return ( _playWarning > 0);
2287 }
2288
2289 // ----------------------------------------------------------------------------
2290 // PlayoutError
2291 // ----------------------------------------------------------------------------
2292
PlayoutError() const2293 bool AudioDeviceWindowsWave::PlayoutError() const
2294 {
2295 return ( _playError > 0);
2296 }
2297
2298 // ----------------------------------------------------------------------------
2299 // RecordingWarning
2300 // ----------------------------------------------------------------------------
2301
RecordingWarning() const2302 bool AudioDeviceWindowsWave::RecordingWarning() const
2303 {
2304 return ( _recWarning > 0);
2305 }
2306
2307 // ----------------------------------------------------------------------------
2308 // RecordingError
2309 // ----------------------------------------------------------------------------
2310
RecordingError() const2311 bool AudioDeviceWindowsWave::RecordingError() const
2312 {
2313 return ( _recError > 0);
2314 }
2315
2316 // ----------------------------------------------------------------------------
2317 // ClearPlayoutWarning
2318 // ----------------------------------------------------------------------------
2319
ClearPlayoutWarning()2320 void AudioDeviceWindowsWave::ClearPlayoutWarning()
2321 {
2322 _playWarning = 0;
2323 }
2324
2325 // ----------------------------------------------------------------------------
2326 // ClearPlayoutError
2327 // ----------------------------------------------------------------------------
2328
ClearPlayoutError()2329 void AudioDeviceWindowsWave::ClearPlayoutError()
2330 {
2331 _playError = 0;
2332 }
2333
2334 // ----------------------------------------------------------------------------
2335 // ClearRecordingWarning
2336 // ----------------------------------------------------------------------------
2337
ClearRecordingWarning()2338 void AudioDeviceWindowsWave::ClearRecordingWarning()
2339 {
2340 _recWarning = 0;
2341 }
2342
2343 // ----------------------------------------------------------------------------
2344 // ClearRecordingError
2345 // ----------------------------------------------------------------------------
2346
ClearRecordingError()2347 void AudioDeviceWindowsWave::ClearRecordingError()
2348 {
2349 _recError = 0;
2350 }
2351
2352 // ============================================================================
2353 // Private Methods
2354 // ============================================================================
2355
2356 // ----------------------------------------------------------------------------
2357 // InputSanityCheckAfterUnlockedPeriod
2358 // ----------------------------------------------------------------------------
2359
InputSanityCheckAfterUnlockedPeriod() const2360 int32_t AudioDeviceWindowsWave::InputSanityCheckAfterUnlockedPeriod() const
2361 {
2362 if (_hWaveIn == NULL)
2363 {
2364 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "input state has been modified during unlocked period");
2365 return -1;
2366 }
2367 return 0;
2368 }
2369
2370 // ----------------------------------------------------------------------------
2371 // OutputSanityCheckAfterUnlockedPeriod
2372 // ----------------------------------------------------------------------------
2373
OutputSanityCheckAfterUnlockedPeriod() const2374 int32_t AudioDeviceWindowsWave::OutputSanityCheckAfterUnlockedPeriod() const
2375 {
2376 if (_hWaveOut == NULL)
2377 {
2378 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "output state has been modified during unlocked period");
2379 return -1;
2380 }
2381 return 0;
2382 }
2383
2384 // ----------------------------------------------------------------------------
2385 // EnumeratePlayoutDevices
2386 // ----------------------------------------------------------------------------
2387
EnumeratePlayoutDevices()2388 int32_t AudioDeviceWindowsWave::EnumeratePlayoutDevices()
2389 {
2390
2391 uint16_t nDevices(PlayoutDevices());
2392 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "===============================================================");
2393 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#output devices: %u", nDevices);
2394
2395 WAVEOUTCAPS caps;
2396 MMRESULT res;
2397
2398 for (UINT deviceID = 0; deviceID < nDevices; deviceID++)
2399 {
2400 res = waveOutGetDevCaps(deviceID, &caps, sizeof(WAVEOUTCAPS));
2401 if (res != MMSYSERR_NOERROR)
2402 {
2403 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCaps() failed (err=%d)", res);
2404 }
2405
2406 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "===============================================================");
2407 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Device ID %u:", deviceID);
2408 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "manufacturer ID : %u", caps.wMid);
2409 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product ID : %u",caps.wPid);
2410 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "version of driver : %u.%u", HIBYTE(caps.vDriverVersion), LOBYTE(caps.vDriverVersion));
2411 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name : %s", caps.szPname);
2412 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "dwFormats : 0x%x", caps.dwFormats);
2413 if (caps.dwFormats & WAVE_FORMAT_48S16)
2414 {
2415 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,stereo,16bit : SUPPORTED");
2416 }
2417 else
2418 {
2419 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,stereo,16bit : *NOT* SUPPORTED");
2420 }
2421 if (caps.dwFormats & WAVE_FORMAT_48M16)
2422 {
2423 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,mono,16bit : SUPPORTED");
2424 }
2425 else
2426 {
2427 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,mono,16bit : *NOT* SUPPORTED");
2428 }
2429 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wChannels : %u", caps.wChannels);
2430 TraceSupportFlags(caps.dwSupport);
2431 }
2432
2433 return 0;
2434 }
2435
2436 // ----------------------------------------------------------------------------
2437 // EnumerateRecordingDevices
2438 // ----------------------------------------------------------------------------
2439
EnumerateRecordingDevices()2440 int32_t AudioDeviceWindowsWave::EnumerateRecordingDevices()
2441 {
2442
2443 uint16_t nDevices(RecordingDevices());
2444 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "===============================================================");
2445 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#input devices: %u", nDevices);
2446
2447 WAVEINCAPS caps;
2448 MMRESULT res;
2449
2450 for (UINT deviceID = 0; deviceID < nDevices; deviceID++)
2451 {
2452 res = waveInGetDevCaps(deviceID, &caps, sizeof(WAVEINCAPS));
2453 if (res != MMSYSERR_NOERROR)
2454 {
2455 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetDevCaps() failed (err=%d)", res);
2456 }
2457
2458 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "===============================================================");
2459 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Device ID %u:", deviceID);
2460 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "manufacturer ID : %u", caps.wMid);
2461 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product ID : %u",caps.wPid);
2462 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "version of driver : %u.%u", HIBYTE(caps.vDriverVersion), LOBYTE(caps.vDriverVersion));
2463 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name : %s", caps.szPname);
2464 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "dwFormats : 0x%x", caps.dwFormats);
2465 if (caps.dwFormats & WAVE_FORMAT_48S16)
2466 {
2467 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,stereo,16bit : SUPPORTED");
2468 }
2469 else
2470 {
2471 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,stereo,16bit : *NOT* SUPPORTED");
2472 }
2473 if (caps.dwFormats & WAVE_FORMAT_48M16)
2474 {
2475 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,mono,16bit : SUPPORTED");
2476 }
2477 else
2478 {
2479 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,mono,16bit : *NOT* SUPPORTED");
2480 }
2481 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wChannels : %u", caps.wChannels);
2482 }
2483
2484 return 0;
2485 }
2486
2487 // ----------------------------------------------------------------------------
2488 // TraceSupportFlags
2489 // ----------------------------------------------------------------------------
2490
TraceSupportFlags(DWORD dwSupport) const2491 void AudioDeviceWindowsWave::TraceSupportFlags(DWORD dwSupport) const
2492 {
2493 TCHAR buf[256];
2494
2495 StringCchPrintf(buf, 128, TEXT("support flags : 0x%x "), dwSupport);
2496
2497 if (dwSupport & WAVECAPS_PITCH)
2498 {
2499 // supports pitch control
2500 StringCchCat(buf, 256, TEXT("(PITCH)"));
2501 }
2502 if (dwSupport & WAVECAPS_PLAYBACKRATE)
2503 {
2504 // supports playback rate control
2505 StringCchCat(buf, 256, TEXT("(PLAYBACKRATE)"));
2506 }
2507 if (dwSupport & WAVECAPS_VOLUME)
2508 {
2509 // supports volume control
2510 StringCchCat(buf, 256, TEXT("(VOLUME)"));
2511 }
2512 if (dwSupport & WAVECAPS_LRVOLUME)
2513 {
2514 // supports separate left and right volume control
2515 StringCchCat(buf, 256, TEXT("(LRVOLUME)"));
2516 }
2517 if (dwSupport & WAVECAPS_SYNC)
2518 {
2519 // the driver is synchronous and will block while playing a buffer
2520 StringCchCat(buf, 256, TEXT("(SYNC)"));
2521 }
2522 if (dwSupport & WAVECAPS_SAMPLEACCURATE)
2523 {
2524 // returns sample-accurate position information
2525 StringCchCat(buf, 256, TEXT("(SAMPLEACCURATE)"));
2526 }
2527
2528 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%S", buf);
2529 }
2530
2531 // ----------------------------------------------------------------------------
2532 // TraceWaveInError
2533 // ----------------------------------------------------------------------------
2534
TraceWaveInError(MMRESULT error) const2535 void AudioDeviceWindowsWave::TraceWaveInError(MMRESULT error) const
2536 {
2537 TCHAR buf[MAXERRORLENGTH];
2538 TCHAR msg[MAXERRORLENGTH];
2539
2540 StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
2541 waveInGetErrorText(error, msg, MAXERRORLENGTH);
2542 StringCchCat(buf, MAXERRORLENGTH, msg);
2543 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%S", buf);
2544 }
2545
2546 // ----------------------------------------------------------------------------
2547 // TraceWaveOutError
2548 // ----------------------------------------------------------------------------
2549
TraceWaveOutError(MMRESULT error) const2550 void AudioDeviceWindowsWave::TraceWaveOutError(MMRESULT error) const
2551 {
2552 TCHAR buf[MAXERRORLENGTH];
2553 TCHAR msg[MAXERRORLENGTH];
2554
2555 StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
2556 waveOutGetErrorText(error, msg, MAXERRORLENGTH);
2557 StringCchCat(buf, MAXERRORLENGTH, msg);
2558 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%S", buf);
2559 }
2560
2561 // ----------------------------------------------------------------------------
2562 // PrepareStartPlayout
2563 // ----------------------------------------------------------------------------
2564
PrepareStartPlayout()2565 int32_t AudioDeviceWindowsWave::PrepareStartPlayout()
2566 {
2567
2568 CriticalSectionScoped lock(&_critSect);
2569
2570 if (_hWaveOut == NULL)
2571 {
2572 return -1;
2573 }
2574
2575 // A total of 30ms of data is immediately placed in the SC buffer
2576 //
2577 int8_t zeroVec[4*PLAY_BUF_SIZE_IN_SAMPLES]; // max allocation
2578 memset(zeroVec, 0, 4*PLAY_BUF_SIZE_IN_SAMPLES);
2579
2580 {
2581 Write(zeroVec, PLAY_BUF_SIZE_IN_SAMPLES);
2582 Write(zeroVec, PLAY_BUF_SIZE_IN_SAMPLES);
2583 Write(zeroVec, PLAY_BUF_SIZE_IN_SAMPLES);
2584 }
2585
2586 _playAcc = 0;
2587 _playWarning = 0;
2588 _playError = 0;
2589 _dc_diff_mean = 0;
2590 _dc_y_prev = 0;
2591 _dc_penalty_counter = 20;
2592 _dc_prevtime = 0;
2593 _dc_prevplay = 0;
2594
2595 return 0;
2596 }
2597
2598 // ----------------------------------------------------------------------------
2599 // PrepareStartRecording
2600 // ----------------------------------------------------------------------------
2601
PrepareStartRecording()2602 int32_t AudioDeviceWindowsWave::PrepareStartRecording()
2603 {
2604
2605 CriticalSectionScoped lock(&_critSect);
2606
2607 if (_hWaveIn == NULL)
2608 {
2609 return -1;
2610 }
2611
2612 _playAcc = 0;
2613 _recordedBytes = 0;
2614 _recPutBackDelay = REC_PUT_BACK_DELAY;
2615
2616 MMRESULT res;
2617 MMTIME mmtime;
2618 mmtime.wType = TIME_SAMPLES;
2619
2620 res = waveInGetPosition(_hWaveIn, &mmtime, sizeof(mmtime));
2621 if (MMSYSERR_NOERROR != res)
2622 {
2623 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetPosition(TIME_SAMPLES) failed (err=%d)", res);
2624 TraceWaveInError(res);
2625 }
2626
2627 _read_samples = mmtime.u.sample;
2628 _read_samples_old = _read_samples;
2629 _rec_samples_old = mmtime.u.sample;
2630 _wrapCounter = 0;
2631
2632 for (int n = 0; n < N_BUFFERS_IN; n++)
2633 {
2634 const uint8_t nBytesPerSample = 2*_recChannels;
2635
2636 // set up the input wave header
2637 _waveHeaderIn[n].lpData = reinterpret_cast<LPSTR>(&_recBuffer[n]);
2638 _waveHeaderIn[n].dwBufferLength = nBytesPerSample * REC_BUF_SIZE_IN_SAMPLES;
2639 _waveHeaderIn[n].dwFlags = 0;
2640 _waveHeaderIn[n].dwBytesRecorded = 0;
2641 _waveHeaderIn[n].dwUser = 0;
2642
2643 memset(_recBuffer[n], 0, nBytesPerSample * REC_BUF_SIZE_IN_SAMPLES);
2644
2645 // prepare a buffer for waveform-audio input
2646 res = waveInPrepareHeader(_hWaveIn, &_waveHeaderIn[n], sizeof(WAVEHDR));
2647 if (MMSYSERR_NOERROR != res)
2648 {
2649 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInPrepareHeader(%d) failed (err=%d)", n, res);
2650 TraceWaveInError(res);
2651 }
2652
2653 // send an input buffer to the given waveform-audio input device
2654 res = waveInAddBuffer(_hWaveIn, &_waveHeaderIn[n], sizeof(WAVEHDR));
2655 if (MMSYSERR_NOERROR != res)
2656 {
2657 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInAddBuffer(%d) failed (err=%d)", n, res);
2658 TraceWaveInError(res);
2659 }
2660 }
2661
2662 // start input on the given waveform-audio input device
2663 res = waveInStart(_hWaveIn);
2664 if (MMSYSERR_NOERROR != res)
2665 {
2666 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInStart() failed (err=%d)", res);
2667 TraceWaveInError(res);
2668 }
2669
2670 return 0;
2671 }
2672
2673 // ----------------------------------------------------------------------------
2674 // GetPlayoutBufferDelay
2675 // ----------------------------------------------------------------------------
2676
GetPlayoutBufferDelay(uint32_t & writtenSamples,uint32_t & playedSamples)2677 int32_t AudioDeviceWindowsWave::GetPlayoutBufferDelay(uint32_t& writtenSamples, uint32_t& playedSamples)
2678 {
2679 int i;
2680 int ms_Header;
2681 long playedDifference;
2682 int msecInPlayoutBuffer(0); // #milliseconds of audio in the playout buffer
2683
2684 const uint16_t nSamplesPerMs = (uint16_t)(N_PLAY_SAMPLES_PER_SEC/1000); // default is 48000/1000 = 48
2685
2686 MMRESULT res;
2687 MMTIME mmtime;
2688
2689 if (!_playing)
2690 {
2691 playedSamples = 0;
2692 return (0);
2693 }
2694
2695 // Retrieve the current playback position.
2696 //
2697 mmtime.wType = TIME_SAMPLES; // number of waveform-audio samples
2698 res = waveOutGetPosition(_hWaveOut, &mmtime, sizeof(mmtime));
2699 if (MMSYSERR_NOERROR != res)
2700 {
2701 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetPosition() failed (err=%d)", res);
2702 TraceWaveOutError(res);
2703 }
2704
2705 writtenSamples = _writtenSamples; // #samples written to the playout buffer
2706 playedSamples = mmtime.u.sample; // current playout position in the playout buffer
2707
2708 // derive remaining amount (in ms) of data in the playout buffer
2709 msecInPlayoutBuffer = ((writtenSamples - playedSamples)/nSamplesPerMs);
2710
2711 playedDifference = (long) (_playedSamplesOld - playedSamples);
2712
2713 if (playedDifference > 64000)
2714 {
2715 // If the sound cards number-of-played-out-samples variable wraps around before
2716 // written_sampels wraps around this needs to be adjusted. This can happen on
2717 // sound cards that uses less than 32 bits to keep track of number of played out
2718 // sampels. To avoid being fooled by sound cards that sometimes produces false
2719 // output we compare old value minus the new value with a large value. This is
2720 // neccessary because some SC:s produce an output like 153, 198, 175, 230 which
2721 // would trigger the wrap-around function if we didn't compare with a large value.
2722 // The value 64000 is chosen because 2^16=65536 so we allow wrap around at 16 bits.
2723
2724 i = 31;
2725 while((_playedSamplesOld <= (unsigned long)POW2(i)) && (i > 14)) {
2726 i--;
2727 }
2728
2729 if((i < 31) && (i > 14)) {
2730 // Avoid adjusting when there is 32-bit wrap-around since that is
2731 // something neccessary.
2732 //
2733 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "msecleft() => wrap around occured: %d bits used by sound card)", (i+1));
2734
2735 _writtenSamples = _writtenSamples - POW2(i + 1);
2736 writtenSamples = _writtenSamples;
2737 msecInPlayoutBuffer = ((writtenSamples - playedSamples)/nSamplesPerMs);
2738 }
2739 }
2740 else if ((_writtenSamplesOld > POW2(31)) && (writtenSamples < 96000))
2741 {
2742 // Wrap around as expected after having used all 32 bits. (But we still
2743 // test if the wrap around happened earlier which it should not)
2744
2745 i = 31;
2746 while (_writtenSamplesOld <= (unsigned long)POW2(i)) {
2747 i--;
2748 }
2749
2750 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, " msecleft() (wrap around occured after having used all 32 bits)");
2751
2752 _writtenSamplesOld = writtenSamples;
2753 _playedSamplesOld = playedSamples;
2754 msecInPlayoutBuffer = (int)((writtenSamples + POW2(i + 1) - playedSamples)/nSamplesPerMs);
2755
2756 }
2757 else if ((writtenSamples < 96000) && (playedSamples > POW2(31)))
2758 {
2759 // Wrap around has, as expected, happened for written_sampels before
2760 // playedSampels so we have to adjust for this until also playedSampels
2761 // has had wrap around.
2762
2763 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, " msecleft() (wrap around occured: correction of output is done)");
2764
2765 _writtenSamplesOld = writtenSamples;
2766 _playedSamplesOld = playedSamples;
2767 msecInPlayoutBuffer = (int)((writtenSamples + POW2(32) - playedSamples)/nSamplesPerMs);
2768 }
2769
2770 _writtenSamplesOld = writtenSamples;
2771 _playedSamplesOld = playedSamples;
2772
2773
2774 // We use the following formaula to track that playout works as it should
2775 // y=playedSamples/48 - timeGetTime();
2776 // y represent the clock drift between system clock and sound card clock - should be fairly stable
2777 // When the exponential mean value of diff(y) goes away from zero something is wrong
2778 // The exponential formula will accept 1% clock drift but not more
2779 // The driver error means that we will play to little audio and have a high negative clock drift
2780 // We kick in our alternative method when the clock drift reaches 20%
2781
2782 int diff,y;
2783 int unsigned time =0;
2784
2785 // If we have other problems that causes playout glitches
2786 // we don't want to switch playout method.
2787 // Check if playout buffer is extremely low, or if we haven't been able to
2788 // exectue our code in more than 40 ms
2789
2790 time = timeGetTime();
2791
2792 if ((msecInPlayoutBuffer < 20) || (time - _dc_prevtime > 40))
2793 {
2794 _dc_penalty_counter = 100;
2795 }
2796
2797 if ((playedSamples != 0))
2798 {
2799 y = playedSamples/48 - time;
2800 if ((_dc_y_prev != 0) && (_dc_penalty_counter == 0))
2801 {
2802 diff = y - _dc_y_prev;
2803 _dc_diff_mean = (990*_dc_diff_mean)/1000 + 10*diff;
2804 }
2805 _dc_y_prev = y;
2806 }
2807
2808 if (_dc_penalty_counter)
2809 {
2810 _dc_penalty_counter--;
2811 }
2812
2813 if (_dc_diff_mean < -200)
2814 {
2815 // Always reset the filter
2816 _dc_diff_mean = 0;
2817
2818 // Problem is detected. Switch delay method and set min buffer to 80.
2819 // Reset the filter and keep monitoring the filter output.
2820 // If issue is detected a second time, increase min buffer to 100.
2821 // If that does not help, we must modify this scheme further.
2822
2823 _useHeader++;
2824 if (_useHeader == 1)
2825 {
2826 _minPlayBufDelay = 80;
2827 _playWarning = 1; // only warn first time
2828 WEBRTC_TRACE(kTraceInfo, kTraceUtility, -1, "Modification #1: _useHeader = %d, _minPlayBufDelay = %d", _useHeader, _minPlayBufDelay);
2829 }
2830 else if (_useHeader == 2)
2831 {
2832 _minPlayBufDelay = 100; // add some more safety
2833 WEBRTC_TRACE(kTraceInfo, kTraceUtility, -1, "Modification #2: _useHeader = %d, _minPlayBufDelay = %d", _useHeader, _minPlayBufDelay);
2834 }
2835 else
2836 {
2837 // This state should not be entered... (HA)
2838 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1, "further actions are required!");
2839 }
2840 if (_playWarning == 1)
2841 {
2842 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending playout warning exists");
2843 }
2844 _playWarning = 1; // triggers callback from module process thread
2845 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "kPlayoutWarning message posted: switching to alternative playout delay method");
2846 }
2847 _dc_prevtime = time;
2848 _dc_prevplay = playedSamples;
2849
2850 // Try a very rough method of looking at how many buffers are still playing
2851 ms_Header = 0;
2852 for (i = 0; i < N_BUFFERS_OUT; i++) {
2853 if ((_waveHeaderOut[i].dwFlags & WHDR_INQUEUE)!=0) {
2854 ms_Header += 10;
2855 }
2856 }
2857
2858 if ((ms_Header-50) > msecInPlayoutBuffer) {
2859 // Test for cases when GetPosition appears to be screwed up (currently just log....)
2860 TCHAR infoStr[300];
2861 if (_no_of_msecleft_warnings%20==0)
2862 {
2863 StringCchPrintf(infoStr, 300, TEXT("writtenSamples=%i, playedSamples=%i, msecInPlayoutBuffer=%i, ms_Header=%i"), writtenSamples, playedSamples, msecInPlayoutBuffer, ms_Header);
2864 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "%S", infoStr);
2865 }
2866 _no_of_msecleft_warnings++;
2867 }
2868
2869 // If this is true we have had a problem with the playout
2870 if (_useHeader > 0)
2871 {
2872 return (ms_Header);
2873 }
2874
2875
2876 if (ms_Header < msecInPlayoutBuffer)
2877 {
2878 if (_no_of_msecleft_warnings % 100 == 0)
2879 {
2880 TCHAR str[300];
2881 StringCchPrintf(str, 300, TEXT("_no_of_msecleft_warnings=%i, msecInPlayoutBuffer=%i ms_Header=%i (minBuffer=%i buffersize=%i writtenSamples=%i playedSamples=%i)"),
2882 _no_of_msecleft_warnings, msecInPlayoutBuffer, ms_Header, _minPlayBufDelay, _playBufDelay, writtenSamples, playedSamples);
2883 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "%S", str);
2884 }
2885 _no_of_msecleft_warnings++;
2886 ms_Header -= 6; // Round off as we only have 10ms resolution + Header info is usually slightly delayed compared to GetPosition
2887
2888 if (ms_Header < 0)
2889 ms_Header = 0;
2890
2891 return (ms_Header);
2892 }
2893 else
2894 {
2895 return (msecInPlayoutBuffer);
2896 }
2897 }
2898
2899 // ----------------------------------------------------------------------------
2900 // GetRecordingBufferDelay
2901 // ----------------------------------------------------------------------------
2902
GetRecordingBufferDelay(uint32_t & readSamples,uint32_t & recSamples)2903 int32_t AudioDeviceWindowsWave::GetRecordingBufferDelay(uint32_t& readSamples, uint32_t& recSamples)
2904 {
2905 long recDifference;
2906 MMTIME mmtime;
2907 MMRESULT mmr;
2908
2909 const uint16_t nSamplesPerMs = (uint16_t)(N_REC_SAMPLES_PER_SEC/1000); // default is 48000/1000 = 48
2910
2911 // Retrieve the current input position of the given waveform-audio input device
2912 //
2913 mmtime.wType = TIME_SAMPLES;
2914 mmr = waveInGetPosition(_hWaveIn, &mmtime, sizeof(mmtime));
2915 if (MMSYSERR_NOERROR != mmr)
2916 {
2917 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetPosition() failed (err=%d)", mmr);
2918 TraceWaveInError(mmr);
2919 }
2920
2921 readSamples = _read_samples; // updated for each full fram in RecProc()
2922 recSamples = mmtime.u.sample; // remaining time in input queue (recorded but not read yet)
2923
2924 recDifference = (long) (_rec_samples_old - recSamples);
2925
2926 if( recDifference > 64000) {
2927 WEBRTC_TRACE (kTraceDebug, kTraceUtility, -1,"WRAP 1 (recDifference =%d)", recDifference);
2928 // If the sound cards number-of-recorded-samples variable wraps around before
2929 // read_sampels wraps around this needs to be adjusted. This can happen on
2930 // sound cards that uses less than 32 bits to keep track of number of played out
2931 // sampels. To avoid being fooled by sound cards that sometimes produces false
2932 // output we compare old value minus the new value with a large value. This is
2933 // neccessary because some SC:s produce an output like 153, 198, 175, 230 which
2934 // would trigger the wrap-around function if we didn't compare with a large value.
2935 // The value 64000 is chosen because 2^16=65536 so we allow wrap around at 16 bits.
2936 //
2937 int i = 31;
2938 while((_rec_samples_old <= (unsigned long)POW2(i)) && (i > 14))
2939 i--;
2940
2941 if((i < 31) && (i > 14)) {
2942 // Avoid adjusting when there is 32-bit wrap-around since that is
2943 // somethying neccessary.
2944 //
2945 _read_samples = _read_samples - POW2(i + 1);
2946 readSamples = _read_samples;
2947 _wrapCounter++;
2948 } else {
2949 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1,"AEC (_rec_samples_old %d recSamples %d)",_rec_samples_old, recSamples);
2950 }
2951 }
2952
2953 if((_wrapCounter>200)){
2954 // Do nothing, handled later
2955 }
2956 else if((_rec_samples_old > POW2(31)) && (recSamples < 96000)) {
2957 WEBRTC_TRACE (kTraceDebug, kTraceUtility, -1,"WRAP 2 (_rec_samples_old %d recSamples %d)",_rec_samples_old, recSamples);
2958 // Wrap around as expected after having used all 32 bits.
2959 _read_samples_old = readSamples;
2960 _rec_samples_old = recSamples;
2961 _wrapCounter++;
2962 return (int)((recSamples + POW2(32) - readSamples)/nSamplesPerMs);
2963
2964
2965 } else if((recSamples < 96000) && (readSamples > POW2(31))) {
2966 WEBRTC_TRACE (kTraceDebug, kTraceUtility, -1,"WRAP 3 (readSamples %d recSamples %d)",readSamples, recSamples);
2967 // Wrap around has, as expected, happened for rec_sampels before
2968 // readSampels so we have to adjust for this until also readSampels
2969 // has had wrap around.
2970 _read_samples_old = readSamples;
2971 _rec_samples_old = recSamples;
2972 _wrapCounter++;
2973 return (int)((recSamples + POW2(32) - readSamples)/nSamplesPerMs);
2974 }
2975
2976 _read_samples_old = _read_samples;
2977 _rec_samples_old = recSamples;
2978 int res=(((int)_rec_samples_old - (int)_read_samples_old)/nSamplesPerMs);
2979
2980 if((res > 2000)||(res < 0)||(_wrapCounter>200)){
2981 // Reset everything
2982 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1,"msec_read error (res %d wrapCounter %d)",res, _wrapCounter);
2983 MMTIME mmtime;
2984 mmtime.wType = TIME_SAMPLES;
2985
2986 mmr=waveInGetPosition(_hWaveIn, &mmtime, sizeof(mmtime));
2987 if (mmr != MMSYSERR_NOERROR) {
2988 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1, "waveInGetPosition failed (mmr=%d)", mmr);
2989 }
2990 _read_samples=mmtime.u.sample;
2991 _read_samples_old=_read_samples;
2992 _rec_samples_old=mmtime.u.sample;
2993
2994 // Guess a decent value
2995 res = 20;
2996 }
2997
2998 _wrapCounter = 0;
2999 return res;
3000 }
3001
3002 // ============================================================================
3003 // Thread Methods
3004 // ============================================================================
3005
3006 // ----------------------------------------------------------------------------
3007 // ThreadFunc
3008 // ----------------------------------------------------------------------------
3009
ThreadFunc(void * pThis)3010 bool AudioDeviceWindowsWave::ThreadFunc(void* pThis)
3011 {
3012 return (static_cast<AudioDeviceWindowsWave*>(pThis)->ThreadProcess());
3013 }
3014
3015 // ----------------------------------------------------------------------------
3016 // ThreadProcess
3017 // ----------------------------------------------------------------------------
3018
ThreadProcess()3019 bool AudioDeviceWindowsWave::ThreadProcess()
3020 {
3021 uint32_t time(0);
3022 uint32_t playDiff(0);
3023 uint32_t recDiff(0);
3024
3025 LONGLONG playTime(0);
3026 LONGLONG recTime(0);
3027
3028 switch (_timeEvent.Wait(1000))
3029 {
3030 case kEventSignaled:
3031 break;
3032 case kEventError:
3033 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "EventWrapper::Wait() failed => restarting timer");
3034 _timeEvent.StopTimer();
3035 _timeEvent.StartTimer(true, TIMER_PERIOD_MS);
3036 return true;
3037 case kEventTimeout:
3038 return true;
3039 }
3040
3041 time = TickTime::MillisecondTimestamp();
3042
3043 if (_startPlay)
3044 {
3045 if (PrepareStartPlayout() == 0)
3046 {
3047 _prevTimerCheckTime = time;
3048 _prevPlayTime = time;
3049 _startPlay = false;
3050 _playing = true;
3051 _playStartEvent.Set();
3052 }
3053 }
3054
3055 if (_startRec)
3056 {
3057 if (PrepareStartRecording() == 0)
3058 {
3059 _prevTimerCheckTime = time;
3060 _prevRecTime = time;
3061 _prevRecByteCheckTime = time;
3062 _startRec = false;
3063 _recording = true;
3064 _recStartEvent.Set();
3065 }
3066 }
3067
3068 if (_playing)
3069 {
3070 playDiff = time - _prevPlayTime;
3071 }
3072
3073 if (_recording)
3074 {
3075 recDiff = time - _prevRecTime;
3076 }
3077
3078 if (_playing || _recording)
3079 {
3080 RestartTimerIfNeeded(time);
3081 }
3082
3083 if (_playing &&
3084 (playDiff > (uint32_t)(_dTcheckPlayBufDelay - 1)) ||
3085 (playDiff < 0))
3086 {
3087 Lock();
3088 if (_playing)
3089 {
3090 if (PlayProc(playTime) == -1)
3091 {
3092 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "PlayProc() failed");
3093 }
3094 _prevPlayTime = time;
3095 if (playTime != 0)
3096 _playAcc += playTime;
3097 }
3098 UnLock();
3099 }
3100
3101 if (_playing && (playDiff > 12))
3102 {
3103 // It has been a long time since we were able to play out, try to
3104 // compensate by calling PlayProc again.
3105 //
3106 Lock();
3107 if (_playing)
3108 {
3109 if (PlayProc(playTime))
3110 {
3111 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "PlayProc() failed");
3112 }
3113 _prevPlayTime = time;
3114 if (playTime != 0)
3115 _playAcc += playTime;
3116 }
3117 UnLock();
3118 }
3119
3120 if (_recording &&
3121 (recDiff > REC_CHECK_TIME_PERIOD_MS) ||
3122 (recDiff < 0))
3123 {
3124 Lock();
3125 if (_recording)
3126 {
3127 int32_t nRecordedBytes(0);
3128 uint16_t maxIter(10);
3129
3130 // Deliver all availiable recorded buffers and update the CPU load measurement.
3131 // We use a while loop here to compensate for the fact that the multi-media timer
3132 // can sometimed enter a "bad state" after hibernation where the resolution is
3133 // reduced from ~1ms to ~10-15 ms.
3134 //
3135 while ((nRecordedBytes = RecProc(recTime)) > 0)
3136 {
3137 maxIter--;
3138 _recordedBytes += nRecordedBytes;
3139 if (recTime && _perfFreq.QuadPart)
3140 {
3141 // Measure the average CPU load:
3142 // This is a simplified expression where an exponential filter is used:
3143 // _avgCPULoad = 0.99 * _avgCPULoad + 0.01 * newCPU,
3144 // newCPU = (recTime+playAcc)/f is time in seconds
3145 // newCPU / 0.01 is the fraction of a 10 ms period
3146 // The two 0.01 cancels each other.
3147 // NOTE - assumes 10ms audio buffers.
3148 //
3149 _avgCPULoad = (float)(_avgCPULoad*.99 + (recTime+_playAcc)/(double)(_perfFreq.QuadPart));
3150 _playAcc = 0;
3151 }
3152 if (maxIter == 0)
3153 {
3154 // If we get this message ofte, our compensation scheme is not sufficient.
3155 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "failed to compensate for reduced MM-timer resolution");
3156 }
3157 }
3158
3159 if (nRecordedBytes == -1)
3160 {
3161 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "RecProc() failed");
3162 }
3163
3164 _prevRecTime = time;
3165
3166 // Monitor the recording process and generate error/warning callbacks if needed
3167 MonitorRecording(time);
3168 }
3169 UnLock();
3170 }
3171
3172 if (!_recording)
3173 {
3174 _prevRecByteCheckTime = time;
3175 _avgCPULoad = 0;
3176 }
3177
3178 return true;
3179 }
3180
3181 // ----------------------------------------------------------------------------
3182 // RecProc
3183 // ----------------------------------------------------------------------------
3184
RecProc(LONGLONG & consumedTime)3185 int32_t AudioDeviceWindowsWave::RecProc(LONGLONG& consumedTime)
3186 {
3187 MMRESULT res;
3188 uint32_t bufCount(0);
3189 uint32_t nBytesRecorded(0);
3190
3191 consumedTime = 0;
3192
3193 // count modulo N_BUFFERS_IN (0,1,2,...,(N_BUFFERS_IN-1),0,1,2,..)
3194 if (_recBufCount == N_BUFFERS_IN)
3195 {
3196 _recBufCount = 0;
3197 }
3198
3199 bufCount = _recBufCount;
3200
3201 // take mono/stereo mode into account when deriving size of a full buffer
3202 const uint16_t bytesPerSample = 2*_recChannels;
3203 const uint32_t fullBufferSizeInBytes = bytesPerSample * REC_BUF_SIZE_IN_SAMPLES;
3204
3205 // read number of recorded bytes for the given input-buffer
3206 nBytesRecorded = _waveHeaderIn[bufCount].dwBytesRecorded;
3207
3208 if (nBytesRecorded == fullBufferSizeInBytes ||
3209 (nBytesRecorded > 0))
3210 {
3211 int32_t msecOnPlaySide;
3212 int32_t msecOnRecordSide;
3213 uint32_t writtenSamples;
3214 uint32_t playedSamples;
3215 uint32_t readSamples, recSamples;
3216 bool send = true;
3217
3218 uint32_t nSamplesRecorded = (nBytesRecorded/bytesPerSample); // divide by 2 or 4 depending on mono or stereo
3219
3220 if (nBytesRecorded == fullBufferSizeInBytes)
3221 {
3222 _timesdwBytes = 0;
3223 }
3224 else
3225 {
3226 // Test if it is stuck on this buffer
3227 _timesdwBytes++;
3228 if (_timesdwBytes < 5)
3229 {
3230 // keep trying
3231 return (0);
3232 }
3233 else
3234 {
3235 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id,"nBytesRecorded=%d => don't use", nBytesRecorded);
3236 _timesdwBytes = 0;
3237 send = false;
3238 }
3239 }
3240
3241 // store the recorded buffer (no action will be taken if the #recorded samples is not a full buffer)
3242 _ptrAudioBuffer->SetRecordedBuffer(_waveHeaderIn[bufCount].lpData, nSamplesRecorded);
3243
3244 // update #samples read
3245 _read_samples += nSamplesRecorded;
3246
3247 // Check how large the playout and recording buffers are on the sound card.
3248 // This info is needed by the AEC.
3249 //
3250 msecOnPlaySide = GetPlayoutBufferDelay(writtenSamples, playedSamples);
3251 msecOnRecordSide = GetRecordingBufferDelay(readSamples, recSamples);
3252
3253 // If we use the alternative playout delay method, skip the clock drift compensation
3254 // since it will be an unreliable estimate and might degrade AEC performance.
3255 int32_t drift = (_useHeader > 0) ? 0 : GetClockDrift(playedSamples, recSamples);
3256
3257 _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, drift);
3258
3259 _ptrAudioBuffer->SetTypingStatus(KeyPressed());
3260
3261 // Store the play and rec delay values for video synchronization
3262 _sndCardPlayDelay = msecOnPlaySide;
3263 _sndCardRecDelay = msecOnRecordSide;
3264
3265 LARGE_INTEGER t1={0},t2={0};
3266
3267 if (send)
3268 {
3269 QueryPerformanceCounter(&t1);
3270
3271 // deliver recorded samples at specified sample rate, mic level etc. to the observer using callback
3272 UnLock();
3273 _ptrAudioBuffer->DeliverRecordedData();
3274 Lock();
3275
3276 QueryPerformanceCounter(&t2);
3277
3278 if (InputSanityCheckAfterUnlockedPeriod() == -1)
3279 {
3280 // assert(false);
3281 return -1;
3282 }
3283 }
3284
3285 if (_AGC)
3286 {
3287 uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
3288 if (newMicLevel != 0)
3289 {
3290 // The VQE will only deliver non-zero microphone levels when a change is needed.
3291 WEBRTC_TRACE(kTraceStream, kTraceUtility, _id,"AGC change of volume: => new=%u", newMicLevel);
3292
3293 // We store this outside of the audio buffer to avoid
3294 // having it overwritten by the getter thread.
3295 _newMicLevel = newMicLevel;
3296 SetEvent(_hSetCaptureVolumeEvent);
3297 }
3298 }
3299
3300 // return utilized buffer to queue after specified delay (default is 4)
3301 if (_recDelayCount > (_recPutBackDelay-1))
3302 {
3303 // deley buffer counter to compensate for "put-back-delay"
3304 bufCount = (bufCount + N_BUFFERS_IN - _recPutBackDelay) % N_BUFFERS_IN;
3305
3306 // reset counter so we can make new detection
3307 _waveHeaderIn[bufCount].dwBytesRecorded = 0;
3308
3309 // return the utilized wave-header after certain delay (given by _recPutBackDelay)
3310 res = waveInUnprepareHeader(_hWaveIn, &(_waveHeaderIn[bufCount]), sizeof(WAVEHDR));
3311 if (MMSYSERR_NOERROR != res)
3312 {
3313 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInUnprepareHeader(%d) failed (err=%d)", bufCount, res);
3314 TraceWaveInError(res);
3315 }
3316
3317 // ensure that the utilized header can be used again
3318 res = waveInPrepareHeader(_hWaveIn, &(_waveHeaderIn[bufCount]), sizeof(WAVEHDR));
3319 if (res != MMSYSERR_NOERROR)
3320 {
3321 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveInPrepareHeader(%d) failed (err=%d)", bufCount, res);
3322 TraceWaveInError(res);
3323 return -1;
3324 }
3325
3326 // add the utilized buffer to the queue again
3327 res = waveInAddBuffer(_hWaveIn, &(_waveHeaderIn[bufCount]), sizeof(WAVEHDR));
3328 if (res != MMSYSERR_NOERROR)
3329 {
3330 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveInAddBuffer(%d) failed (err=%d)", bufCount, res);
3331 TraceWaveInError(res);
3332 if (_recPutBackDelay < 50)
3333 {
3334 _recPutBackDelay++;
3335 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "_recPutBackDelay increased to %d", _recPutBackDelay);
3336 }
3337 else
3338 {
3339 if (_recError == 1)
3340 {
3341 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending recording error exists");
3342 }
3343 _recError = 1; // triggers callback from module process thread
3344 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kRecordingError message posted: _recPutBackDelay=%u", _recPutBackDelay);
3345 }
3346 }
3347 } // if (_recDelayCount > (_recPutBackDelay-1))
3348
3349 if (_recDelayCount < (_recPutBackDelay+1))
3350 {
3351 _recDelayCount++;
3352 }
3353
3354 // increase main buffer count since one complete buffer has now been delivered
3355 _recBufCount++;
3356
3357 if (send) {
3358 // Calculate processing time
3359 consumedTime = (int)(t2.QuadPart-t1.QuadPart);
3360 // handle wraps, time should not be higher than a second
3361 if ((consumedTime > _perfFreq.QuadPart) || (consumedTime < 0))
3362 consumedTime = 0;
3363 }
3364
3365 } // if ((nBytesRecorded == fullBufferSizeInBytes))
3366
3367 return nBytesRecorded;
3368 }
3369
3370 // ----------------------------------------------------------------------------
3371 // PlayProc
3372 // ----------------------------------------------------------------------------
3373
PlayProc(LONGLONG & consumedTime)3374 int AudioDeviceWindowsWave::PlayProc(LONGLONG& consumedTime)
3375 {
3376 int32_t remTimeMS(0);
3377 int8_t playBuffer[4*PLAY_BUF_SIZE_IN_SAMPLES];
3378 uint32_t writtenSamples(0);
3379 uint32_t playedSamples(0);
3380
3381 LARGE_INTEGER t1;
3382 LARGE_INTEGER t2;
3383
3384 consumedTime = 0;
3385 _waitCounter++;
3386
3387 // Get number of ms of sound that remains in the sound card buffer for playback.
3388 //
3389 remTimeMS = GetPlayoutBufferDelay(writtenSamples, playedSamples);
3390
3391 // The threshold can be adaptive or fixed. The adaptive scheme is updated
3392 // also for fixed mode but the updated threshold is not utilized.
3393 //
3394 const uint16_t thresholdMS =
3395 (_playBufType == AudioDeviceModule::kAdaptiveBufferSize) ? _playBufDelay : _playBufDelayFixed;
3396
3397 if (remTimeMS < thresholdMS + 9)
3398 {
3399 _dTcheckPlayBufDelay = 5;
3400
3401 if (remTimeMS == 0)
3402 {
3403 WEBRTC_TRACE(kTraceInfo, kTraceUtility, _id, "playout buffer is empty => we must adapt...");
3404 if (_waitCounter > 30)
3405 {
3406 _erZeroCounter++;
3407 if (_erZeroCounter == 2)
3408 {
3409 _playBufDelay += 15;
3410 _minPlayBufDelay += 20;
3411 _waitCounter = 50;
3412 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "New playout states (er=0,erZero=2): minPlayBufDelay=%u, playBufDelay=%u", _minPlayBufDelay, _playBufDelay);
3413 }
3414 else if (_erZeroCounter == 3)
3415 {
3416 _erZeroCounter = 0;
3417 _playBufDelay += 30;
3418 _minPlayBufDelay += 25;
3419 _waitCounter = 0;
3420 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "New playout states (er=0, erZero=3): minPlayBufDelay=%u, playBufDelay=%u", _minPlayBufDelay, _playBufDelay);
3421 }
3422 else
3423 {
3424 _minPlayBufDelay += 10;
3425 _playBufDelay += 15;
3426 _waitCounter = 50;
3427 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "New playout states (er=0, erZero=1): minPlayBufDelay=%u, playBufDelay=%u", _minPlayBufDelay, _playBufDelay);
3428 }
3429 }
3430 }
3431 else if (remTimeMS < _minPlayBufDelay)
3432 {
3433 // If there is less than 25 ms of audio in the play out buffer
3434 // increase the buffersize limit value. _waitCounter prevents
3435 // _playBufDelay to be increased every time this function is called.
3436
3437 if (_waitCounter > 30)
3438 {
3439 _playBufDelay += 10;
3440 if (_intro == 0)
3441 _waitCounter = 0;
3442 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold is increased: playBufDelay=%u", _playBufDelay);
3443 }
3444 }
3445 else if (remTimeMS < thresholdMS - 9)
3446 {
3447 _erZeroCounter = 0;
3448 }
3449 else
3450 {
3451 _erZeroCounter = 0;
3452 _dTcheckPlayBufDelay = 10;
3453 }
3454
3455 QueryPerformanceCounter(&t1); // measure time: START
3456
3457 // Ask for new PCM data to be played out using the AudioDeviceBuffer.
3458 // Ensure that this callback is executed without taking the audio-thread lock.
3459 //
3460 UnLock();
3461 uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(PLAY_BUF_SIZE_IN_SAMPLES);
3462 Lock();
3463
3464 if (OutputSanityCheckAfterUnlockedPeriod() == -1)
3465 {
3466 // assert(false);
3467 return -1;
3468 }
3469
3470 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
3471 if (nSamples != PLAY_BUF_SIZE_IN_SAMPLES)
3472 {
3473 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "invalid number of output samples(%d)", nSamples);
3474 }
3475
3476 QueryPerformanceCounter(&t2); // measure time: STOP
3477 consumedTime = (int)(t2.QuadPart - t1.QuadPart);
3478
3479 Write(playBuffer, PLAY_BUF_SIZE_IN_SAMPLES);
3480
3481 } // if (er < thresholdMS + 9)
3482 else if (thresholdMS + 9 < remTimeMS )
3483 {
3484 _erZeroCounter = 0;
3485 _dTcheckPlayBufDelay = 2; // check buffer more often
3486 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Need to check playout buffer more often (dT=%u, remTimeMS=%u)", _dTcheckPlayBufDelay, remTimeMS);
3487 }
3488
3489 // If the buffersize has been stable for 20 seconds try to decrease the buffer size
3490 if (_waitCounter > 2000)
3491 {
3492 _intro = 0;
3493 _playBufDelay--;
3494 _waitCounter = 1990;
3495 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold is decreased: playBufDelay=%u", _playBufDelay);
3496 }
3497
3498 // Limit the minimum sound card (playback) delay to adaptive minimum delay
3499 if (_playBufDelay < _minPlayBufDelay)
3500 {
3501 _playBufDelay = _minPlayBufDelay;
3502 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold is limited to %u", _minPlayBufDelay);
3503 }
3504
3505 // Limit the maximum sound card (playback) delay to 150 ms
3506 if (_playBufDelay > 150)
3507 {
3508 _playBufDelay = 150;
3509 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold is limited to %d", _playBufDelay);
3510 }
3511
3512 // Upper limit of the minimum sound card (playback) delay to 65 ms.
3513 // Deactivated during "useHeader mode" (_useHeader > 0).
3514 if (_minPlayBufDelay > _MAX_minBuffer &&
3515 (_useHeader == 0))
3516 {
3517 _minPlayBufDelay = _MAX_minBuffer;
3518 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Minimum playout threshold is limited to %d", _MAX_minBuffer);
3519 }
3520
3521 return (0);
3522 }
3523
3524 // ----------------------------------------------------------------------------
3525 // Write
3526 // ----------------------------------------------------------------------------
3527
Write(int8_t * data,uint16_t nSamples)3528 int32_t AudioDeviceWindowsWave::Write(int8_t* data, uint16_t nSamples)
3529 {
3530 if (_hWaveOut == NULL)
3531 {
3532 return -1;
3533 }
3534
3535 if (_playIsInitialized)
3536 {
3537 MMRESULT res;
3538
3539 const uint16_t bufCount(_playBufCount);
3540
3541 // Place data in the memory associated with _waveHeaderOut[bufCount]
3542 //
3543 const int16_t nBytes = (2*_playChannels)*nSamples;
3544 memcpy(&_playBuffer[bufCount][0], &data[0], nBytes);
3545
3546 // Send a data block to the given waveform-audio output device.
3547 //
3548 // When the buffer is finished, the WHDR_DONE bit is set in the dwFlags
3549 // member of the WAVEHDR structure. The buffer must be prepared with the
3550 // waveOutPrepareHeader function before it is passed to waveOutWrite.
3551 // Unless the device is paused by calling the waveOutPause function,
3552 // playback begins when the first data block is sent to the device.
3553 //
3554 res = waveOutWrite(_hWaveOut, &_waveHeaderOut[bufCount], sizeof(_waveHeaderOut[bufCount]));
3555 if (MMSYSERR_NOERROR != res)
3556 {
3557 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutWrite(%d) failed (err=%d)", bufCount, res);
3558 TraceWaveOutError(res);
3559
3560 _writeErrors++;
3561 if (_writeErrors > 10)
3562 {
3563 if (_playError == 1)
3564 {
3565 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending playout error exists");
3566 }
3567 _playError = 1; // triggers callback from module process thread
3568 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kPlayoutError message posted: _writeErrors=%u", _writeErrors);
3569 }
3570
3571 return -1;
3572 }
3573
3574 _playBufCount = (_playBufCount+1) % N_BUFFERS_OUT; // increase buffer counter modulo size of total buffer
3575 _writtenSamples += nSamples; // each sample is 2 or 4 bytes
3576 _writeErrors = 0;
3577 }
3578
3579 return 0;
3580 }
3581
3582 // ----------------------------------------------------------------------------
3583 // GetClockDrift
3584 // ----------------------------------------------------------------------------
3585
GetClockDrift(const uint32_t plSamp,const uint32_t rcSamp)3586 int32_t AudioDeviceWindowsWave::GetClockDrift(const uint32_t plSamp, const uint32_t rcSamp)
3587 {
3588 int drift = 0;
3589 unsigned int plSampDiff = 0, rcSampDiff = 0;
3590
3591 if (plSamp >= _plSampOld)
3592 {
3593 plSampDiff = plSamp - _plSampOld;
3594 }
3595 else
3596 {
3597 // Wrap
3598 int i = 31;
3599 while(_plSampOld <= (unsigned int)POW2(i))
3600 {
3601 i--;
3602 }
3603
3604 // Add the amount remaining prior to wrapping
3605 plSampDiff = plSamp + POW2(i + 1) - _plSampOld;
3606 }
3607
3608 if (rcSamp >= _rcSampOld)
3609 {
3610 rcSampDiff = rcSamp - _rcSampOld;
3611 }
3612 else
3613 { // Wrap
3614 int i = 31;
3615 while(_rcSampOld <= (unsigned int)POW2(i))
3616 {
3617 i--;
3618 }
3619
3620 rcSampDiff = rcSamp + POW2(i + 1) - _rcSampOld;
3621 }
3622
3623 drift = plSampDiff - rcSampDiff;
3624
3625 _plSampOld = plSamp;
3626 _rcSampOld = rcSamp;
3627
3628 return drift;
3629 }
3630
3631 // ----------------------------------------------------------------------------
3632 // MonitorRecording
3633 // ----------------------------------------------------------------------------
3634
MonitorRecording(const uint32_t time)3635 int32_t AudioDeviceWindowsWave::MonitorRecording(const uint32_t time)
3636 {
3637 const uint16_t bytesPerSample = 2*_recChannels;
3638 const uint32_t nRecordedSamples = _recordedBytes/bytesPerSample;
3639
3640 if (nRecordedSamples > 5*N_REC_SAMPLES_PER_SEC)
3641 {
3642 // 5 seconds of audio has been recorded...
3643 if ((time - _prevRecByteCheckTime) > 5700)
3644 {
3645 // ...and it was more than 5.7 seconds since we last did this check <=>
3646 // we have not been able to record 5 seconds of audio in 5.7 seconds,
3647 // hence a problem should be reported.
3648 // This problem can be related to USB overload.
3649 //
3650 if (_recWarning == 1)
3651 {
3652 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending recording warning exists");
3653 }
3654 _recWarning = 1; // triggers callback from module process thread
3655 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "kRecordingWarning message posted: time-_prevRecByteCheckTime=%d", time - _prevRecByteCheckTime);
3656 }
3657
3658 _recordedBytes = 0; // restart "check again when 5 seconds are recorded"
3659 _prevRecByteCheckTime = time; // reset timer to measure time for recording of 5 seconds
3660 }
3661
3662 if ((time - _prevRecByteCheckTime) > 8000)
3663 {
3664 // It has been more than 8 seconds since we able to confirm that 5 seconds of
3665 // audio was recorded, hence we have not been able to record 5 seconds in
3666 // 8 seconds => the complete recording process is most likely dead.
3667 //
3668 if (_recError == 1)
3669 {
3670 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending recording error exists");
3671 }
3672 _recError = 1; // triggers callback from module process thread
3673 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kRecordingError message posted: time-_prevRecByteCheckTime=%d", time - _prevRecByteCheckTime);
3674
3675 _prevRecByteCheckTime = time;
3676 }
3677
3678 return 0;
3679 }
3680
3681 // ----------------------------------------------------------------------------
3682 // MonitorRecording
3683 //
3684 // Restart timer if needed (they seem to be messed up after a hibernate).
3685 // ----------------------------------------------------------------------------
3686
RestartTimerIfNeeded(const uint32_t time)3687 int32_t AudioDeviceWindowsWave::RestartTimerIfNeeded(const uint32_t time)
3688 {
3689 const uint32_t diffMS = time - _prevTimerCheckTime;
3690 _prevTimerCheckTime = time;
3691
3692 if (diffMS > 7)
3693 {
3694 // one timer-issue detected...
3695 _timerFaults++;
3696 if (_timerFaults > 5 && _timerRestartAttempts < 2)
3697 {
3698 // Reinitialize timer event if event fails to execute at least every 5ms.
3699 // On some machines it helps and the timer starts working as it should again;
3700 // however, not all machines (we have seen issues on e.g. IBM T60).
3701 // Therefore, the scheme below ensures that we do max 2 attempts to restart the timer.
3702 // For the cases where restart does not do the trick, we compensate for the reduced
3703 // resolution on both the recording and playout sides.
3704 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, " timer issue detected => timer is restarted");
3705 _timeEvent.StopTimer();
3706 _timeEvent.StartTimer(true, TIMER_PERIOD_MS);
3707 // make sure timer gets time to start up and we don't kill/start timer serveral times over and over again
3708 _timerFaults = -20;
3709 _timerRestartAttempts++;
3710 }
3711 }
3712 else
3713 {
3714 // restart timer-check scheme since we are OK
3715 _timerFaults = 0;
3716 _timerRestartAttempts = 0;
3717 }
3718
3719 return 0;
3720 }
3721
3722
KeyPressed() const3723 bool AudioDeviceWindowsWave::KeyPressed() const{
3724
3725 int key_down = 0;
3726 for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
3727 short res = GetAsyncKeyState(key);
3728 key_down |= res & 0x1; // Get the LSB
3729 }
3730 return (key_down > 0);
3731 }
3732 } // namespace webrtc
3733