• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #pragma warning(disable: 4995)  // name was marked as #pragma deprecated
12 
13 #if (_MSC_VER >= 1310) && (_MSC_VER < 1400)
14 // Reports the major and minor versions of the compiler.
15 // For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version 13 and a 1.0 point release.
16 // The Visual C++ 2005 compiler version is 1400.
17 // Type cl /? at the command line to see the major and minor versions of your compiler along with the build number.
18 #pragma message(">> INFO: Windows Core Audio is not supported in VS 2003")
19 #endif
20 
21 #include "webrtc/modules/audio_device/audio_device_config.h"
22 
23 #ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD
24 
25 #include "webrtc/modules/audio_device/win/audio_device_core_win.h"
26 
27 #include <assert.h>
28 #include <string.h>
29 
30 #include <windows.h>
31 #include <comdef.h>
32 #include <dmo.h>
33 #include <Functiondiscoverykeys_devpkey.h>
34 #include <mmsystem.h>
35 #include <strsafe.h>
36 #include <uuids.h>
37 
38 #include "webrtc/base/platform_thread.h"
39 #include "webrtc/system_wrappers/include/sleep.h"
40 #include "webrtc/system_wrappers/include/trace.h"
41 
42 // Macro that calls a COM method returning HRESULT value.
43 #define EXIT_ON_ERROR(hres)    do { if (FAILED(hres)) goto Exit; } while(0)
44 
45 // Macro that continues to a COM error.
46 #define CONTINUE_ON_ERROR(hres) do { if (FAILED(hres)) goto Next; } while(0)
47 
48 // Macro that releases a COM object if not NULL.
49 #define SAFE_RELEASE(p)     do { if ((p)) { (p)->Release(); (p) = NULL; } } while(0)
50 
51 #define ROUND(x) ((x) >=0 ? (int)((x) + 0.5) : (int)((x) - 0.5))
52 
53 // REFERENCE_TIME time units per millisecond
54 #define REFTIMES_PER_MILLISEC  10000
55 
56 typedef struct tagTHREADNAME_INFO
57 {
58    DWORD dwType;        // must be 0x1000
59    LPCSTR szName;       // pointer to name (in user addr space)
60    DWORD dwThreadID;    // thread ID (-1=caller thread)
61    DWORD dwFlags;       // reserved for future use, must be zero
62 } THREADNAME_INFO;
63 
64 namespace webrtc {
65 namespace {
66 
67 enum { COM_THREADING_MODEL = COINIT_MULTITHREADED };
68 
69 enum
70 {
71     kAecCaptureStreamIndex = 0,
72     kAecRenderStreamIndex = 1
73 };
74 
75 // An implementation of IMediaBuffer, as required for
76 // IMediaObject::ProcessOutput(). After consuming data provided by
77 // ProcessOutput(), call SetLength() to update the buffer availability.
78 //
79 // Example implementation:
80 // http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx
81 class MediaBufferImpl : public IMediaBuffer
82 {
83 public:
MediaBufferImpl(DWORD maxLength)84     explicit MediaBufferImpl(DWORD maxLength)
85         : _data(new BYTE[maxLength]),
86           _length(0),
87           _maxLength(maxLength),
88           _refCount(0)
89     {}
90 
91     // IMediaBuffer methods.
STDMETHOD(GetBufferAndLength (BYTE ** ppBuffer,DWORD * pcbLength))92     STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength))
93     {
94         if (!ppBuffer || !pcbLength)
95         {
96             return E_POINTER;
97         }
98 
99         *ppBuffer = _data;
100         *pcbLength = _length;
101 
102         return S_OK;
103     }
104 
STDMETHOD(GetMaxLength (DWORD * pcbMaxLength))105     STDMETHOD(GetMaxLength(DWORD* pcbMaxLength))
106     {
107         if (!pcbMaxLength)
108         {
109             return E_POINTER;
110         }
111 
112         *pcbMaxLength = _maxLength;
113         return S_OK;
114     }
115 
STDMETHOD(SetLength (DWORD cbLength))116     STDMETHOD(SetLength(DWORD cbLength))
117     {
118         if (cbLength > _maxLength)
119         {
120             return E_INVALIDARG;
121         }
122 
123         _length = cbLength;
124         return S_OK;
125     }
126 
127     // IUnknown methods.
STDMETHOD_(ULONG,AddRef ())128     STDMETHOD_(ULONG, AddRef())
129     {
130         return InterlockedIncrement(&_refCount);
131     }
132 
STDMETHOD(QueryInterface (REFIID riid,void ** ppv))133     STDMETHOD(QueryInterface(REFIID riid, void** ppv))
134     {
135         if (!ppv)
136         {
137             return E_POINTER;
138         }
139         else if (riid != IID_IMediaBuffer && riid != IID_IUnknown)
140         {
141             return E_NOINTERFACE;
142         }
143 
144         *ppv = static_cast<IMediaBuffer*>(this);
145         AddRef();
146         return S_OK;
147     }
148 
STDMETHOD_(ULONG,Release ())149     STDMETHOD_(ULONG, Release())
150     {
151         LONG refCount = InterlockedDecrement(&_refCount);
152         if (refCount == 0)
153         {
154             delete this;
155         }
156 
157         return refCount;
158     }
159 
160 private:
~MediaBufferImpl()161     ~MediaBufferImpl()
162     {
163         delete [] _data;
164     }
165 
166     BYTE* _data;
167     DWORD _length;
168     const DWORD _maxLength;
169     LONG _refCount;
170 };
171 }  // namespace
172 
173 // ============================================================================
174 //                              Static Methods
175 // ============================================================================
176 
177 // ----------------------------------------------------------------------------
178 //  CoreAudioIsSupported
179 // ----------------------------------------------------------------------------
180 
CoreAudioIsSupported()181 bool AudioDeviceWindowsCore::CoreAudioIsSupported()
182 {
183     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1, "%s", __FUNCTION__);
184 
185     bool MMDeviceIsAvailable(false);
186     bool coreAudioIsSupported(false);
187 
188     HRESULT hr(S_OK);
189     TCHAR buf[MAXERRORLENGTH];
190     TCHAR errorText[MAXERRORLENGTH];
191 
192     // 1) Check if Windows version is Vista SP1 or later.
193     //
194     // CoreAudio is only available on Vista SP1 and later.
195     //
196     OSVERSIONINFOEX osvi;
197     DWORDLONG dwlConditionMask = 0;
198     int op = VER_LESS_EQUAL;
199 
200     // Initialize the OSVERSIONINFOEX structure.
201     ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
202     osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
203     osvi.dwMajorVersion = 6;
204     osvi.dwMinorVersion = 0;
205     osvi.wServicePackMajor = 0;
206     osvi.wServicePackMinor = 0;
207     osvi.wProductType = VER_NT_WORKSTATION;
208 
209     // Initialize the condition mask.
210     VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
211     VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
212     VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
213     VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
214     VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
215 
216     DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
217                        VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
218                        VER_PRODUCT_TYPE;
219 
220     // Perform the test.
221     BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask,
222                                             dwlConditionMask);
223     if (isVistaRTMorXP != 0)
224     {
225         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
226             "*** Windows Core Audio is only supported on Vista SP1 or later "
227             "=> will revert to the Wave API ***");
228         return false;
229     }
230 
231     // 2) Initializes the COM library for use by the calling thread.
232 
233     // The COM init wrapper sets the thread's concurrency model to MTA,
234     // and creates a new apartment for the thread if one is required. The
235     // wrapper also ensures that each call to CoInitializeEx is balanced
236     // by a corresponding call to CoUninitialize.
237     //
238     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
239     if (!comInit.succeeded()) {
240       // Things will work even if an STA thread is calling this method but we
241       // want to ensure that MTA is used and therefore return false here.
242       return false;
243     }
244 
245     // 3) Check if the MMDevice API is available.
246     //
247     // The Windows Multimedia Device (MMDevice) API enables audio clients to
248     // discover audio endpoint devices, determine their capabilities, and create
249     // driver instances for those devices.
250     // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
251     // The MMDevice API consists of several interfaces. The first of these is the
252     // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice API,
253     // a client obtains a reference to the IMMDeviceEnumerator interface of a
254     // device-enumerator object by calling the CoCreateInstance function.
255     //
256     // Through the IMMDeviceEnumerator interface, the client can obtain references
257     // to the other interfaces in the MMDevice API. The MMDevice API implements
258     // the following interfaces:
259     //
260     // IMMDevice            Represents an audio device.
261     // IMMDeviceCollection  Represents a collection of audio devices.
262     // IMMDeviceEnumerator  Provides methods for enumerating audio devices.
263     // IMMEndpoint          Represents an audio endpoint device.
264     //
265     IMMDeviceEnumerator* pIMMD(NULL);
266     const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
267     const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
268 
269     hr = CoCreateInstance(
270             CLSID_MMDeviceEnumerator,   // GUID value of MMDeviceEnumerator coclass
271             NULL,
272             CLSCTX_ALL,
273             IID_IMMDeviceEnumerator,    // GUID value of the IMMDeviceEnumerator interface
274             (void**)&pIMMD );
275 
276     if (FAILED(hr))
277     {
278         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
279             "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to create the required COM object", hr);
280         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1,
281             "AudioDeviceWindowsCore::CoreAudioIsSupported() CoCreateInstance(MMDeviceEnumerator) failed (hr=0x%x)", hr);
282 
283         const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
284                               FORMAT_MESSAGE_IGNORE_INSERTS;
285         const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
286 
287         // Gets the system's human readable message string for this HRESULT.
288         // All error message in English by default.
289         DWORD messageLength = ::FormatMessageW(dwFlags,
290                                                0,
291                                                hr,
292                                                dwLangID,
293                                                errorText,
294                                                MAXERRORLENGTH,
295                                                NULL);
296 
297         assert(messageLength <= MAXERRORLENGTH);
298 
299         // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
300         for (; messageLength && ::isspace(errorText[messageLength - 1]);
301              --messageLength)
302         {
303             errorText[messageLength - 1] = '\0';
304         }
305 
306         StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
307         StringCchCat(buf, MAXERRORLENGTH, errorText);
308         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1, "%S", buf);
309     }
310     else
311     {
312         MMDeviceIsAvailable = true;
313         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1,
314             "AudioDeviceWindowsCore::CoreAudioIsSupported() CoCreateInstance(MMDeviceEnumerator) succeeded", hr);
315         SAFE_RELEASE(pIMMD);
316     }
317 
318     // 4) Verify that we can create and initialize our Core Audio class.
319     //
320     // Also, perform a limited "API test" to ensure that Core Audio is supported for all devices.
321     //
322     if (MMDeviceIsAvailable)
323     {
324         coreAudioIsSupported = false;
325 
326         AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore(-1);
327         if (p == NULL)
328         {
329             return false;
330         }
331 
332         int ok(0);
333         int temp_ok(0);
334         bool available(false);
335 
336         ok |= p->Init();
337 
338         int16_t numDevsRec = p->RecordingDevices();
339         for (uint16_t i = 0; i < numDevsRec; i++)
340         {
341             ok |= p->SetRecordingDevice(i);
342             temp_ok = p->RecordingIsAvailable(available);
343             ok |= temp_ok;
344             ok |= (available == false);
345             if (available)
346             {
347                 ok |= p->InitMicrophone();
348             }
349             if (ok)
350             {
351                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
352                     "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to use Core Audio Recording for device id=%i", i);
353             }
354         }
355 
356         int16_t numDevsPlay = p->PlayoutDevices();
357         for (uint16_t i = 0; i < numDevsPlay; i++)
358         {
359             ok |= p->SetPlayoutDevice(i);
360             temp_ok = p->PlayoutIsAvailable(available);
361             ok |= temp_ok;
362             ok |= (available == false);
363             if (available)
364             {
365                 ok |= p->InitSpeaker();
366             }
367             if (ok)
368             {
369                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1 ,
370                     "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to use Core Audio Playout for device id=%i", i);
371             }
372         }
373 
374         ok |= p->Terminate();
375 
376         if (ok == 0)
377         {
378             coreAudioIsSupported = true;
379         }
380 
381         delete p;
382     }
383 
384     if (coreAudioIsSupported)
385     {
386         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, "*** Windows Core Audio is supported ***");
387     }
388     else
389     {
390         WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, "*** Windows Core Audio is NOT supported => will revert to the Wave API ***");
391     }
392 
393     return (coreAudioIsSupported);
394 }
395 
396 // ============================================================================
397 //                            Construction & Destruction
398 // ============================================================================
399 
400 // ----------------------------------------------------------------------------
401 //  AudioDeviceWindowsCore() - ctor
402 // ----------------------------------------------------------------------------
403 
AudioDeviceWindowsCore(const int32_t id)404 AudioDeviceWindowsCore::AudioDeviceWindowsCore(const int32_t id) :
405     _comInit(ScopedCOMInitializer::kMTA),
406     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
407     _volumeMutex(*CriticalSectionWrapper::CreateCriticalSection()),
408     _id(id),
409     _ptrAudioBuffer(NULL),
410     _ptrEnumerator(NULL),
411     _ptrRenderCollection(NULL),
412     _ptrCaptureCollection(NULL),
413     _ptrDeviceOut(NULL),
414     _ptrDeviceIn(NULL),
415     _ptrClientOut(NULL),
416     _ptrClientIn(NULL),
417     _ptrRenderClient(NULL),
418     _ptrCaptureClient(NULL),
419     _ptrCaptureVolume(NULL),
420     _ptrRenderSimpleVolume(NULL),
421     _dmo(NULL),
422     _mediaBuffer(NULL),
423     _builtInAecEnabled(false),
424     _playAudioFrameSize(0),
425     _playSampleRate(0),
426     _playBlockSize(0),
427     _playChannels(2),
428     _sndCardPlayDelay(0),
429     _sndCardRecDelay(0),
430     _writtenSamples(0),
431     _readSamples(0),
432     _playAcc(0),
433     _recAudioFrameSize(0),
434     _recSampleRate(0),
435     _recBlockSize(0),
436     _recChannels(2),
437     _avrtLibrary(NULL),
438     _winSupportAvrt(false),
439     _hRenderSamplesReadyEvent(NULL),
440     _hPlayThread(NULL),
441     _hCaptureSamplesReadyEvent(NULL),
442     _hRecThread(NULL),
443     _hShutdownRenderEvent(NULL),
444     _hShutdownCaptureEvent(NULL),
445     _hRenderStartedEvent(NULL),
446     _hCaptureStartedEvent(NULL),
447     _hGetCaptureVolumeThread(NULL),
448     _hSetCaptureVolumeThread(NULL),
449     _hSetCaptureVolumeEvent(NULL),
450     _hMmTask(NULL),
451     _initialized(false),
452     _recording(false),
453     _playing(false),
454     _recIsInitialized(false),
455     _playIsInitialized(false),
456     _speakerIsInitialized(false),
457     _microphoneIsInitialized(false),
458     _AGC(false),
459     _playWarning(0),
460     _playError(0),
461     _recWarning(0),
462     _recError(0),
463     _playBufType(AudioDeviceModule::kAdaptiveBufferSize),
464     _playBufDelay(80),
465     _playBufDelayFixed(80),
466     _usingInputDeviceIndex(false),
467     _usingOutputDeviceIndex(false),
468     _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
469     _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
470     _inputDeviceIndex(0),
471     _outputDeviceIndex(0),
472     _newMicLevel(0)
473 {
474     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
475     assert(_comInit.succeeded());
476 
477     // Try to load the Avrt DLL
478     if (!_avrtLibrary)
479     {
480         // Get handle to the Avrt DLL module.
481         _avrtLibrary = LoadLibrary(TEXT("Avrt.dll"));
482         if (_avrtLibrary)
483         {
484             // Handle is valid (should only happen if OS larger than vista & win7).
485             // Try to get the function addresses.
486             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() The Avrt DLL module is now loaded");
487 
488             _PAvRevertMmThreadCharacteristics = (PAvRevertMmThreadCharacteristics)GetProcAddress(_avrtLibrary, "AvRevertMmThreadCharacteristics");
489             _PAvSetMmThreadCharacteristicsA = (PAvSetMmThreadCharacteristicsA)GetProcAddress(_avrtLibrary, "AvSetMmThreadCharacteristicsA");
490             _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress(_avrtLibrary, "AvSetMmThreadPriority");
491 
492             if ( _PAvRevertMmThreadCharacteristics &&
493                  _PAvSetMmThreadCharacteristicsA &&
494                  _PAvSetMmThreadPriority)
495             {
496                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvRevertMmThreadCharacteristics() is OK");
497                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadCharacteristicsA() is OK");
498                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadPriority() is OK");
499                 _winSupportAvrt = true;
500             }
501         }
502     }
503 
504     // Create our samples ready events - we want auto reset events that start in the not-signaled state.
505     // The state of an auto-reset event object remains signaled until a single waiting thread is released,
506     // at which time the system automatically sets the state to nonsignaled. If no threads are waiting,
507     // the event object's state remains signaled.
508     // (Except for _hShutdownCaptureEvent, which is used to shutdown multiple threads).
509     _hRenderSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
510     _hCaptureSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
511     _hShutdownRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
512     _hShutdownCaptureEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
513     _hRenderStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
514     _hCaptureStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
515     _hSetCaptureVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
516 
517     _perfCounterFreq.QuadPart = 1;
518     _perfCounterFactor = 0.0;
519     _avgCPULoad = 0.0;
520 
521     // list of number of channels to use on recording side
522     _recChannelsPrioList[0] = 2;    // stereo is prio 1
523     _recChannelsPrioList[1] = 1;    // mono is prio 2
524 
525     // list of number of channels to use on playout side
526     _playChannelsPrioList[0] = 2;    // stereo is prio 1
527     _playChannelsPrioList[1] = 1;    // mono is prio 2
528 
529     HRESULT hr;
530 
531     // We know that this API will work since it has already been verified in
532     // CoreAudioIsSupported, hence no need to check for errors here as well.
533 
534     // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll)
535     // TODO(henrika): we should probably move this allocation to Init() instead
536     // and deallocate in Terminate() to make the implementation more symmetric.
537     CoCreateInstance(
538       __uuidof(MMDeviceEnumerator),
539       NULL,
540       CLSCTX_ALL,
541       __uuidof(IMMDeviceEnumerator),
542       reinterpret_cast<void**>(&_ptrEnumerator));
543     assert(NULL != _ptrEnumerator);
544 
545     // DMO initialization for built-in WASAPI AEC.
546     {
547         IMediaObject* ptrDMO = NULL;
548         hr = CoCreateInstance(CLSID_CWMAudioAEC,
549                               NULL,
550                               CLSCTX_INPROC_SERVER,
551                               IID_IMediaObject,
552                               reinterpret_cast<void**>(&ptrDMO));
553         if (FAILED(hr) || ptrDMO == NULL)
554         {
555             // Since we check that _dmo is non-NULL in EnableBuiltInAEC(), the
556             // feature is prevented from being enabled.
557             _builtInAecEnabled = false;
558             _TraceCOMError(hr);
559         }
560         _dmo = ptrDMO;
561         SAFE_RELEASE(ptrDMO);
562     }
563 }
564 
565 // ----------------------------------------------------------------------------
566 //  AudioDeviceWindowsCore() - dtor
567 // ----------------------------------------------------------------------------
568 
~AudioDeviceWindowsCore()569 AudioDeviceWindowsCore::~AudioDeviceWindowsCore()
570 {
571     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTION__);
572 
573     Terminate();
574 
575     // The IMMDeviceEnumerator is created during construction. Must release
576     // it here and not in Terminate() since we don't recreate it in Init().
577     SAFE_RELEASE(_ptrEnumerator);
578 
579     _ptrAudioBuffer = NULL;
580 
581     if (NULL != _hRenderSamplesReadyEvent)
582     {
583         CloseHandle(_hRenderSamplesReadyEvent);
584         _hRenderSamplesReadyEvent = NULL;
585     }
586 
587     if (NULL != _hCaptureSamplesReadyEvent)
588     {
589         CloseHandle(_hCaptureSamplesReadyEvent);
590         _hCaptureSamplesReadyEvent = NULL;
591     }
592 
593     if (NULL != _hRenderStartedEvent)
594     {
595         CloseHandle(_hRenderStartedEvent);
596         _hRenderStartedEvent = NULL;
597     }
598 
599     if (NULL != _hCaptureStartedEvent)
600     {
601         CloseHandle(_hCaptureStartedEvent);
602         _hCaptureStartedEvent = NULL;
603     }
604 
605     if (NULL != _hShutdownRenderEvent)
606     {
607         CloseHandle(_hShutdownRenderEvent);
608         _hShutdownRenderEvent = NULL;
609     }
610 
611     if (NULL != _hShutdownCaptureEvent)
612     {
613         CloseHandle(_hShutdownCaptureEvent);
614         _hShutdownCaptureEvent = NULL;
615     }
616 
617     if (NULL != _hSetCaptureVolumeEvent)
618     {
619         CloseHandle(_hSetCaptureVolumeEvent);
620         _hSetCaptureVolumeEvent = NULL;
621     }
622 
623     if (_avrtLibrary)
624     {
625         BOOL freeOK = FreeLibrary(_avrtLibrary);
626         if (!freeOK)
627         {
628             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
629                 "AudioDeviceWindowsCore::~AudioDeviceWindowsCore() failed to free the loaded Avrt DLL module correctly");
630         }
631         else
632         {
633             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
634                 "AudioDeviceWindowsCore::~AudioDeviceWindowsCore() the Avrt DLL module is now unloaded");
635         }
636     }
637 
638     delete &_critSect;
639     delete &_volumeMutex;
640 }
641 
642 // ============================================================================
643 //                                     API
644 // ============================================================================
645 
646 // ----------------------------------------------------------------------------
647 //  AttachAudioBuffer
648 // ----------------------------------------------------------------------------
649 
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)650 void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
651 {
652 
653     _ptrAudioBuffer = audioBuffer;
654 
655     // Inform the AudioBuffer about default settings for this implementation.
656     // Set all values to zero here since the actual settings will be done by
657     // InitPlayout and InitRecording later.
658     _ptrAudioBuffer->SetRecordingSampleRate(0);
659     _ptrAudioBuffer->SetPlayoutSampleRate(0);
660     _ptrAudioBuffer->SetRecordingChannels(0);
661     _ptrAudioBuffer->SetPlayoutChannels(0);
662 }
663 
664 // ----------------------------------------------------------------------------
665 //  ActiveAudioLayer
666 // ----------------------------------------------------------------------------
667 
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const668 int32_t AudioDeviceWindowsCore::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const
669 {
670     audioLayer = AudioDeviceModule::kWindowsCoreAudio;
671     return 0;
672 }
673 
674 // ----------------------------------------------------------------------------
675 //  Init
676 // ----------------------------------------------------------------------------
677 
Init()678 int32_t AudioDeviceWindowsCore::Init()
679 {
680 
681     CriticalSectionScoped lock(&_critSect);
682 
683     if (_initialized)
684     {
685         return 0;
686     }
687 
688     _playWarning = 0;
689     _playError = 0;
690     _recWarning = 0;
691     _recError = 0;
692 
693     // Enumerate all audio rendering and capturing endpoint devices.
694     // Note that, some of these will not be able to select by the user.
695     // The complete collection is for internal use only.
696     //
697     _EnumerateEndpointDevicesAll(eRender);
698     _EnumerateEndpointDevicesAll(eCapture);
699 
700     _initialized = true;
701 
702     return 0;
703 }
704 
705 // ----------------------------------------------------------------------------
706 //  Terminate
707 // ----------------------------------------------------------------------------
708 
Terminate()709 int32_t AudioDeviceWindowsCore::Terminate()
710 {
711 
712     CriticalSectionScoped lock(&_critSect);
713 
714     if (!_initialized) {
715         return 0;
716     }
717 
718     _initialized = false;
719     _speakerIsInitialized = false;
720     _microphoneIsInitialized = false;
721     _playing = false;
722     _recording = false;
723 
724     SAFE_RELEASE(_ptrRenderCollection);
725     SAFE_RELEASE(_ptrCaptureCollection);
726     SAFE_RELEASE(_ptrDeviceOut);
727     SAFE_RELEASE(_ptrDeviceIn);
728     SAFE_RELEASE(_ptrClientOut);
729     SAFE_RELEASE(_ptrClientIn);
730     SAFE_RELEASE(_ptrRenderClient);
731     SAFE_RELEASE(_ptrCaptureClient);
732     SAFE_RELEASE(_ptrCaptureVolume);
733     SAFE_RELEASE(_ptrRenderSimpleVolume);
734 
735     return 0;
736 }
737 
738 // ----------------------------------------------------------------------------
739 //  Initialized
740 // ----------------------------------------------------------------------------
741 
Initialized() const742 bool AudioDeviceWindowsCore::Initialized() const
743 {
744     return (_initialized);
745 }
746 
747 // ----------------------------------------------------------------------------
748 //  InitSpeaker
749 // ----------------------------------------------------------------------------
750 
InitSpeaker()751 int32_t AudioDeviceWindowsCore::InitSpeaker()
752 {
753 
754     CriticalSectionScoped lock(&_critSect);
755 
756     if (_playing)
757     {
758         return -1;
759     }
760 
761     if (_ptrDeviceOut == NULL)
762     {
763         return -1;
764     }
765 
766     if (_usingOutputDeviceIndex)
767     {
768         int16_t nDevices = PlayoutDevices();
769         if (_outputDeviceIndex > (nDevices - 1))
770         {
771             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "current device selection is invalid => unable to initialize");
772             return -1;
773         }
774     }
775 
776     int32_t ret(0);
777 
778     SAFE_RELEASE(_ptrDeviceOut);
779     if (_usingOutputDeviceIndex)
780     {
781         // Refresh the selected rendering endpoint device using current index
782         ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
783     }
784     else
785     {
786         ERole role;
787         (_outputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
788         // Refresh the selected rendering endpoint device using role
789         ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
790     }
791 
792     if (ret != 0 || (_ptrDeviceOut == NULL))
793     {
794         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the rendering enpoint device");
795         SAFE_RELEASE(_ptrDeviceOut);
796         return -1;
797     }
798 
799     IAudioSessionManager* pManager = NULL;
800     ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager),
801                                   CLSCTX_ALL,
802                                   NULL,
803                                   (void**)&pManager);
804     if (ret != 0 || pManager == NULL)
805     {
806         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
807                     "  failed to initialize the render manager");
808         SAFE_RELEASE(pManager);
809         return -1;
810     }
811 
812     SAFE_RELEASE(_ptrRenderSimpleVolume);
813     ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
814     if (ret != 0 || _ptrRenderSimpleVolume == NULL)
815     {
816         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
817                     "  failed to initialize the render simple volume");
818         SAFE_RELEASE(pManager);
819         SAFE_RELEASE(_ptrRenderSimpleVolume);
820         return -1;
821     }
822     SAFE_RELEASE(pManager);
823 
824     _speakerIsInitialized = true;
825 
826     return 0;
827 }
828 
829 // ----------------------------------------------------------------------------
830 //  InitMicrophone
831 // ----------------------------------------------------------------------------
832 
InitMicrophone()833 int32_t AudioDeviceWindowsCore::InitMicrophone()
834 {
835 
836     CriticalSectionScoped lock(&_critSect);
837 
838     if (_recording)
839     {
840         return -1;
841     }
842 
843     if (_ptrDeviceIn == NULL)
844     {
845         return -1;
846     }
847 
848     if (_usingInputDeviceIndex)
849     {
850         int16_t nDevices = RecordingDevices();
851         if (_inputDeviceIndex > (nDevices - 1))
852         {
853             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "current device selection is invalid => unable to initialize");
854             return -1;
855         }
856     }
857 
858     int32_t ret(0);
859 
860     SAFE_RELEASE(_ptrDeviceIn);
861     if (_usingInputDeviceIndex)
862     {
863         // Refresh the selected capture endpoint device using current index
864         ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
865     }
866     else
867     {
868         ERole role;
869         (_inputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
870         // Refresh the selected capture endpoint device using role
871         ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
872     }
873 
874     if (ret != 0 || (_ptrDeviceIn == NULL))
875     {
876         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the capturing enpoint device");
877         SAFE_RELEASE(_ptrDeviceIn);
878         return -1;
879     }
880 
881     ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume),
882                                  CLSCTX_ALL,
883                                  NULL,
884                                  reinterpret_cast<void **>(&_ptrCaptureVolume));
885     if (ret != 0 || _ptrCaptureVolume == NULL)
886     {
887         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
888                     "  failed to initialize the capture volume");
889         SAFE_RELEASE(_ptrCaptureVolume);
890         return -1;
891     }
892 
893     _microphoneIsInitialized = true;
894 
895     return 0;
896 }
897 
898 // ----------------------------------------------------------------------------
899 //  SpeakerIsInitialized
900 // ----------------------------------------------------------------------------
901 
SpeakerIsInitialized() const902 bool AudioDeviceWindowsCore::SpeakerIsInitialized() const
903 {
904 
905     return (_speakerIsInitialized);
906 }
907 
908 // ----------------------------------------------------------------------------
909 //  MicrophoneIsInitialized
910 // ----------------------------------------------------------------------------
911 
MicrophoneIsInitialized() const912 bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const
913 {
914 
915     return (_microphoneIsInitialized);
916 }
917 
918 // ----------------------------------------------------------------------------
919 //  SpeakerVolumeIsAvailable
920 // ----------------------------------------------------------------------------
921 
SpeakerVolumeIsAvailable(bool & available)922 int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available)
923 {
924 
925     CriticalSectionScoped lock(&_critSect);
926 
927     if (_ptrDeviceOut == NULL)
928     {
929         return -1;
930     }
931 
932     HRESULT hr = S_OK;
933     IAudioSessionManager* pManager = NULL;
934     ISimpleAudioVolume* pVolume = NULL;
935 
936     hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL, (void**)&pManager);
937     EXIT_ON_ERROR(hr);
938 
939     hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
940     EXIT_ON_ERROR(hr);
941 
942     float volume(0.0f);
943     hr = pVolume->GetMasterVolume(&volume);
944     if (FAILED(hr))
945     {
946         available = false;
947     }
948     available = true;
949 
950     SAFE_RELEASE(pManager);
951     SAFE_RELEASE(pVolume);
952 
953     return 0;
954 
955 Exit:
956     _TraceCOMError(hr);
957     SAFE_RELEASE(pManager);
958     SAFE_RELEASE(pVolume);
959     return -1;
960 }
961 
962 // ----------------------------------------------------------------------------
963 //  SetSpeakerVolume
964 // ----------------------------------------------------------------------------
965 
SetSpeakerVolume(uint32_t volume)966 int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume)
967 {
968 
969     {
970         CriticalSectionScoped lock(&_critSect);
971 
972         if (!_speakerIsInitialized)
973         {
974         return -1;
975         }
976 
977         if (_ptrDeviceOut == NULL)
978         {
979             return -1;
980         }
981     }
982 
983     if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
984         volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME)
985     {
986         return -1;
987     }
988 
989     HRESULT hr = S_OK;
990 
991     // scale input volume to valid range (0.0 to 1.0)
992     const float fLevel = (float)volume/MAX_CORE_SPEAKER_VOLUME;
993     _volumeMutex.Enter();
994     hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel,NULL);
995     _volumeMutex.Leave();
996     EXIT_ON_ERROR(hr);
997 
998     return 0;
999 
1000 Exit:
1001     _TraceCOMError(hr);
1002     return -1;
1003 }
1004 
1005 // ----------------------------------------------------------------------------
1006 //  SpeakerVolume
1007 // ----------------------------------------------------------------------------
1008 
SpeakerVolume(uint32_t & volume) const1009 int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const
1010 {
1011 
1012     {
1013         CriticalSectionScoped lock(&_critSect);
1014 
1015         if (!_speakerIsInitialized)
1016         {
1017             return -1;
1018         }
1019 
1020         if (_ptrDeviceOut == NULL)
1021         {
1022             return -1;
1023         }
1024     }
1025 
1026     HRESULT hr = S_OK;
1027     float fLevel(0.0f);
1028 
1029     _volumeMutex.Enter();
1030     hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
1031     _volumeMutex.Leave();
1032     EXIT_ON_ERROR(hr);
1033 
1034     // scale input volume range [0.0,1.0] to valid output range
1035     volume = static_cast<uint32_t> (fLevel*MAX_CORE_SPEAKER_VOLUME);
1036 
1037     return 0;
1038 
1039 Exit:
1040     _TraceCOMError(hr);
1041     return -1;
1042 }
1043 
1044 // ----------------------------------------------------------------------------
1045 //  SetWaveOutVolume
1046 // ----------------------------------------------------------------------------
1047 
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)1048 int32_t AudioDeviceWindowsCore::SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight)
1049 {
1050     return -1;
1051 }
1052 
1053 // ----------------------------------------------------------------------------
1054 //  WaveOutVolume
1055 // ----------------------------------------------------------------------------
1056 
WaveOutVolume(uint16_t & volumeLeft,uint16_t & volumeRight) const1057 int32_t AudioDeviceWindowsCore::WaveOutVolume(uint16_t& volumeLeft, uint16_t& volumeRight) const
1058 {
1059     return -1;
1060 }
1061 
1062 // ----------------------------------------------------------------------------
1063 //  MaxSpeakerVolume
1064 //
1065 //  The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
1066 //  silence and 1.0 indicates full volume (no attenuation).
1067 //  We add our (webrtc-internal) own max level to match the Wave API and
1068 //  how it is used today in VoE.
1069 // ----------------------------------------------------------------------------
1070 
MaxSpeakerVolume(uint32_t & maxVolume) const1071 int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const
1072 {
1073 
1074     if (!_speakerIsInitialized)
1075     {
1076         return -1;
1077     }
1078 
1079     maxVolume = static_cast<uint32_t> (MAX_CORE_SPEAKER_VOLUME);
1080 
1081     return 0;
1082 }
1083 
1084 // ----------------------------------------------------------------------------
1085 //  MinSpeakerVolume
1086 // ----------------------------------------------------------------------------
1087 
MinSpeakerVolume(uint32_t & minVolume) const1088 int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const
1089 {
1090 
1091     if (!_speakerIsInitialized)
1092     {
1093         return -1;
1094     }
1095 
1096     minVolume = static_cast<uint32_t> (MIN_CORE_SPEAKER_VOLUME);
1097 
1098     return 0;
1099 }
1100 
1101 // ----------------------------------------------------------------------------
1102 //  SpeakerVolumeStepSize
1103 // ----------------------------------------------------------------------------
1104 
SpeakerVolumeStepSize(uint16_t & stepSize) const1105 int32_t AudioDeviceWindowsCore::SpeakerVolumeStepSize(uint16_t& stepSize) const
1106 {
1107 
1108     if (!_speakerIsInitialized)
1109     {
1110         return -1;
1111     }
1112 
1113     stepSize = CORE_SPEAKER_VOLUME_STEP_SIZE;
1114 
1115     return 0;
1116 }
1117 
1118 // ----------------------------------------------------------------------------
1119 //  SpeakerMuteIsAvailable
1120 // ----------------------------------------------------------------------------
1121 
SpeakerMuteIsAvailable(bool & available)1122 int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available)
1123 {
1124 
1125     CriticalSectionScoped lock(&_critSect);
1126 
1127     if (_ptrDeviceOut == NULL)
1128     {
1129         return -1;
1130     }
1131 
1132     HRESULT hr = S_OK;
1133     IAudioEndpointVolume* pVolume = NULL;
1134 
1135     // Query the speaker system mute state.
1136     hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume),
1137         CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1138     EXIT_ON_ERROR(hr);
1139 
1140     BOOL mute;
1141     hr = pVolume->GetMute(&mute);
1142     if (FAILED(hr))
1143         available = false;
1144     else
1145         available = true;
1146 
1147     SAFE_RELEASE(pVolume);
1148 
1149     return 0;
1150 
1151 Exit:
1152     _TraceCOMError(hr);
1153     SAFE_RELEASE(pVolume);
1154     return -1;
1155 }
1156 
1157 // ----------------------------------------------------------------------------
1158 //  SetSpeakerMute
1159 // ----------------------------------------------------------------------------
1160 
SetSpeakerMute(bool enable)1161 int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable)
1162 {
1163 
1164     CriticalSectionScoped lock(&_critSect);
1165 
1166     if (!_speakerIsInitialized)
1167     {
1168         return -1;
1169     }
1170 
1171     if (_ptrDeviceOut == NULL)
1172     {
1173         return -1;
1174     }
1175 
1176     HRESULT hr = S_OK;
1177     IAudioEndpointVolume* pVolume = NULL;
1178 
1179     // Set the speaker system mute state.
1180     hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1181     EXIT_ON_ERROR(hr);
1182 
1183     const BOOL mute(enable);
1184     hr = pVolume->SetMute(mute, NULL);
1185     EXIT_ON_ERROR(hr);
1186 
1187     SAFE_RELEASE(pVolume);
1188 
1189     return 0;
1190 
1191 Exit:
1192     _TraceCOMError(hr);
1193     SAFE_RELEASE(pVolume);
1194     return -1;
1195 }
1196 
1197 // ----------------------------------------------------------------------------
1198 //  SpeakerMute
1199 // ----------------------------------------------------------------------------
1200 
SpeakerMute(bool & enabled) const1201 int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const
1202 {
1203 
1204     if (!_speakerIsInitialized)
1205     {
1206         return -1;
1207     }
1208 
1209     if (_ptrDeviceOut == NULL)
1210     {
1211         return -1;
1212     }
1213 
1214     HRESULT hr = S_OK;
1215     IAudioEndpointVolume* pVolume = NULL;
1216 
1217     // Query the speaker system mute state.
1218     hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1219     EXIT_ON_ERROR(hr);
1220 
1221     BOOL mute;
1222     hr = pVolume->GetMute(&mute);
1223     EXIT_ON_ERROR(hr);
1224 
1225     enabled = (mute == TRUE) ? true : false;
1226 
1227     SAFE_RELEASE(pVolume);
1228 
1229     return 0;
1230 
1231 Exit:
1232     _TraceCOMError(hr);
1233     SAFE_RELEASE(pVolume);
1234     return -1;
1235 }
1236 
1237 // ----------------------------------------------------------------------------
1238 //  MicrophoneMuteIsAvailable
1239 // ----------------------------------------------------------------------------
1240 
MicrophoneMuteIsAvailable(bool & available)1241 int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available)
1242 {
1243 
1244     CriticalSectionScoped lock(&_critSect);
1245 
1246     if (_ptrDeviceIn == NULL)
1247     {
1248         return -1;
1249     }
1250 
1251     HRESULT hr = S_OK;
1252     IAudioEndpointVolume* pVolume = NULL;
1253 
1254     // Query the microphone system mute state.
1255     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1256     EXIT_ON_ERROR(hr);
1257 
1258     BOOL mute;
1259     hr = pVolume->GetMute(&mute);
1260     if (FAILED(hr))
1261         available = false;
1262     else
1263         available = true;
1264 
1265     SAFE_RELEASE(pVolume);
1266     return 0;
1267 
1268 Exit:
1269     _TraceCOMError(hr);
1270     SAFE_RELEASE(pVolume);
1271     return -1;
1272 }
1273 
1274 // ----------------------------------------------------------------------------
1275 //  SetMicrophoneMute
1276 // ----------------------------------------------------------------------------
1277 
SetMicrophoneMute(bool enable)1278 int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable)
1279 {
1280 
1281     if (!_microphoneIsInitialized)
1282     {
1283         return -1;
1284     }
1285 
1286     if (_ptrDeviceIn == NULL)
1287     {
1288         return -1;
1289     }
1290 
1291     HRESULT hr = S_OK;
1292     IAudioEndpointVolume* pVolume = NULL;
1293 
1294     // Set the microphone system mute state.
1295     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1296     EXIT_ON_ERROR(hr);
1297 
1298     const BOOL mute(enable);
1299     hr = pVolume->SetMute(mute, NULL);
1300     EXIT_ON_ERROR(hr);
1301 
1302     SAFE_RELEASE(pVolume);
1303     return 0;
1304 
1305 Exit:
1306     _TraceCOMError(hr);
1307     SAFE_RELEASE(pVolume);
1308     return -1;
1309 }
1310 
1311 // ----------------------------------------------------------------------------
1312 //  MicrophoneMute
1313 // ----------------------------------------------------------------------------
1314 
MicrophoneMute(bool & enabled) const1315 int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const
1316 {
1317 
1318     if (!_microphoneIsInitialized)
1319     {
1320         return -1;
1321     }
1322 
1323     HRESULT hr = S_OK;
1324     IAudioEndpointVolume* pVolume = NULL;
1325 
1326     // Query the microphone system mute state.
1327     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1328     EXIT_ON_ERROR(hr);
1329 
1330     BOOL mute;
1331     hr = pVolume->GetMute(&mute);
1332     EXIT_ON_ERROR(hr);
1333 
1334     enabled = (mute == TRUE) ? true : false;
1335 
1336     SAFE_RELEASE(pVolume);
1337     return 0;
1338 
1339 Exit:
1340     _TraceCOMError(hr);
1341     SAFE_RELEASE(pVolume);
1342     return -1;
1343 }
1344 
1345 // ----------------------------------------------------------------------------
1346 //  MicrophoneBoostIsAvailable
1347 // ----------------------------------------------------------------------------
1348 
MicrophoneBoostIsAvailable(bool & available)1349 int32_t AudioDeviceWindowsCore::MicrophoneBoostIsAvailable(bool& available)
1350 {
1351 
1352     available = false;
1353     return 0;
1354 }
1355 
1356 // ----------------------------------------------------------------------------
1357 //  SetMicrophoneBoost
1358 // ----------------------------------------------------------------------------
1359 
SetMicrophoneBoost(bool enable)1360 int32_t AudioDeviceWindowsCore::SetMicrophoneBoost(bool enable)
1361 {
1362 
1363     if (!_microphoneIsInitialized)
1364     {
1365         return -1;
1366     }
1367 
1368     return -1;
1369 }
1370 
1371 // ----------------------------------------------------------------------------
1372 //  MicrophoneBoost
1373 // ----------------------------------------------------------------------------
1374 
MicrophoneBoost(bool & enabled) const1375 int32_t AudioDeviceWindowsCore::MicrophoneBoost(bool& enabled) const
1376 {
1377 
1378     if (!_microphoneIsInitialized)
1379     {
1380         return -1;
1381     }
1382 
1383     return -1;
1384 }
1385 
1386 // ----------------------------------------------------------------------------
1387 //  StereoRecordingIsAvailable
1388 // ----------------------------------------------------------------------------
1389 
StereoRecordingIsAvailable(bool & available)1390 int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available)
1391 {
1392 
1393     available = true;
1394     return 0;
1395 }
1396 
1397 // ----------------------------------------------------------------------------
1398 //  SetStereoRecording
1399 // ----------------------------------------------------------------------------
1400 
SetStereoRecording(bool enable)1401 int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable)
1402 {
1403 
1404     CriticalSectionScoped lock(&_critSect);
1405 
1406     if (enable)
1407     {
1408         _recChannelsPrioList[0] = 2;    // try stereo first
1409         _recChannelsPrioList[1] = 1;
1410         _recChannels = 2;
1411     }
1412     else
1413     {
1414         _recChannelsPrioList[0] = 1;    // try mono first
1415         _recChannelsPrioList[1] = 2;
1416         _recChannels = 1;
1417     }
1418 
1419     return 0;
1420 }
1421 
1422 // ----------------------------------------------------------------------------
1423 //  StereoRecording
1424 // ----------------------------------------------------------------------------
1425 
StereoRecording(bool & enabled) const1426 int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const
1427 {
1428 
1429     if (_recChannels == 2)
1430         enabled = true;
1431     else
1432         enabled = false;
1433 
1434     return 0;
1435 }
1436 
1437 // ----------------------------------------------------------------------------
1438 //  StereoPlayoutIsAvailable
1439 // ----------------------------------------------------------------------------
1440 
StereoPlayoutIsAvailable(bool & available)1441 int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available)
1442 {
1443 
1444     available = true;
1445     return 0;
1446 }
1447 
1448 // ----------------------------------------------------------------------------
1449 //  SetStereoPlayout
1450 // ----------------------------------------------------------------------------
1451 
SetStereoPlayout(bool enable)1452 int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable)
1453 {
1454 
1455     CriticalSectionScoped lock(&_critSect);
1456 
1457     if (enable)
1458     {
1459         _playChannelsPrioList[0] = 2;    // try stereo first
1460         _playChannelsPrioList[1] = 1;
1461         _playChannels = 2;
1462     }
1463     else
1464     {
1465         _playChannelsPrioList[0] = 1;    // try mono first
1466         _playChannelsPrioList[1] = 2;
1467         _playChannels = 1;
1468     }
1469 
1470     return 0;
1471 }
1472 
1473 // ----------------------------------------------------------------------------
1474 //  StereoPlayout
1475 // ----------------------------------------------------------------------------
1476 
StereoPlayout(bool & enabled) const1477 int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const
1478 {
1479 
1480     if (_playChannels == 2)
1481         enabled = true;
1482     else
1483         enabled = false;
1484 
1485     return 0;
1486 }
1487 
1488 // ----------------------------------------------------------------------------
1489 //  SetAGC
1490 // ----------------------------------------------------------------------------
1491 
SetAGC(bool enable)1492 int32_t AudioDeviceWindowsCore::SetAGC(bool enable)
1493 {
1494     CriticalSectionScoped lock(&_critSect);
1495     _AGC = enable;
1496     return 0;
1497 }
1498 
1499 // ----------------------------------------------------------------------------
1500 //  AGC
1501 // ----------------------------------------------------------------------------
1502 
AGC() const1503 bool AudioDeviceWindowsCore::AGC() const
1504 {
1505     CriticalSectionScoped lock(&_critSect);
1506     return _AGC;
1507 }
1508 
1509 // ----------------------------------------------------------------------------
1510 //  MicrophoneVolumeIsAvailable
1511 // ----------------------------------------------------------------------------
1512 
MicrophoneVolumeIsAvailable(bool & available)1513 int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available)
1514 {
1515 
1516     CriticalSectionScoped lock(&_critSect);
1517 
1518     if (_ptrDeviceIn == NULL)
1519     {
1520         return -1;
1521     }
1522 
1523     HRESULT hr = S_OK;
1524     IAudioEndpointVolume* pVolume = NULL;
1525 
1526     hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
1527     EXIT_ON_ERROR(hr);
1528 
1529     float volume(0.0f);
1530     hr = pVolume->GetMasterVolumeLevelScalar(&volume);
1531     if (FAILED(hr))
1532     {
1533         available = false;
1534     }
1535     available = true;
1536 
1537     SAFE_RELEASE(pVolume);
1538     return 0;
1539 
1540 Exit:
1541     _TraceCOMError(hr);
1542     SAFE_RELEASE(pVolume);
1543     return -1;
1544 }
1545 
1546 // ----------------------------------------------------------------------------
1547 //  SetMicrophoneVolume
1548 // ----------------------------------------------------------------------------
1549 
SetMicrophoneVolume(uint32_t volume)1550 int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume)
1551 {
1552     WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::SetMicrophoneVolume(volume=%u)", volume);
1553 
1554     {
1555         CriticalSectionScoped lock(&_critSect);
1556 
1557         if (!_microphoneIsInitialized)
1558         {
1559             return -1;
1560         }
1561 
1562         if (_ptrDeviceIn == NULL)
1563         {
1564             return -1;
1565         }
1566     }
1567 
1568     if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
1569         volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME))
1570     {
1571         return -1;
1572     }
1573 
1574     HRESULT hr = S_OK;
1575     // scale input volume to valid range (0.0 to 1.0)
1576     const float fLevel = static_cast<float>(volume)/MAX_CORE_MICROPHONE_VOLUME;
1577     _volumeMutex.Enter();
1578     _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
1579     _volumeMutex.Leave();
1580     EXIT_ON_ERROR(hr);
1581 
1582     return 0;
1583 
1584 Exit:
1585     _TraceCOMError(hr);
1586     return -1;
1587 }
1588 
1589 // ----------------------------------------------------------------------------
1590 //  MicrophoneVolume
1591 // ----------------------------------------------------------------------------
1592 
MicrophoneVolume(uint32_t & volume) const1593 int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const
1594 {
1595     {
1596         CriticalSectionScoped lock(&_critSect);
1597 
1598         if (!_microphoneIsInitialized)
1599         {
1600             return -1;
1601         }
1602 
1603         if (_ptrDeviceIn == NULL)
1604         {
1605             return -1;
1606         }
1607     }
1608 
1609     HRESULT hr = S_OK;
1610     float fLevel(0.0f);
1611     volume = 0;
1612     _volumeMutex.Enter();
1613     hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
1614     _volumeMutex.Leave();
1615     EXIT_ON_ERROR(hr);
1616 
1617     // scale input volume range [0.0,1.0] to valid output range
1618     volume = static_cast<uint32_t> (fLevel*MAX_CORE_MICROPHONE_VOLUME);
1619 
1620     return 0;
1621 
1622 Exit:
1623     _TraceCOMError(hr);
1624     return -1;
1625 }
1626 
1627 // ----------------------------------------------------------------------------
1628 //  MaxMicrophoneVolume
1629 //
1630 //  The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
1631 //  silence and 1.0 indicates full volume (no attenuation).
1632 //  We add our (webrtc-internal) own max level to match the Wave API and
1633 //  how it is used today in VoE.
1634 // ----------------------------------------------------------------------------
1635 
MaxMicrophoneVolume(uint32_t & maxVolume) const1636 int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const
1637 {
1638     WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1639 
1640     if (!_microphoneIsInitialized)
1641     {
1642         return -1;
1643     }
1644 
1645     maxVolume = static_cast<uint32_t> (MAX_CORE_MICROPHONE_VOLUME);
1646 
1647     return 0;
1648 }
1649 
1650 // ----------------------------------------------------------------------------
1651 //  MinMicrophoneVolume
1652 // ----------------------------------------------------------------------------
1653 
MinMicrophoneVolume(uint32_t & minVolume) const1654 int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const
1655 {
1656 
1657     if (!_microphoneIsInitialized)
1658     {
1659         return -1;
1660     }
1661 
1662     minVolume = static_cast<uint32_t> (MIN_CORE_MICROPHONE_VOLUME);
1663 
1664     return 0;
1665 }
1666 
1667 // ----------------------------------------------------------------------------
1668 //  MicrophoneVolumeStepSize
1669 // ----------------------------------------------------------------------------
1670 
MicrophoneVolumeStepSize(uint16_t & stepSize) const1671 int32_t AudioDeviceWindowsCore::MicrophoneVolumeStepSize(uint16_t& stepSize) const
1672 {
1673 
1674     if (!_microphoneIsInitialized)
1675     {
1676         return -1;
1677     }
1678 
1679     stepSize = CORE_MICROPHONE_VOLUME_STEP_SIZE;
1680 
1681     return 0;
1682 }
1683 
1684 // ----------------------------------------------------------------------------
1685 //  PlayoutDevices
1686 // ----------------------------------------------------------------------------
1687 
PlayoutDevices()1688 int16_t AudioDeviceWindowsCore::PlayoutDevices()
1689 {
1690 
1691     CriticalSectionScoped lock(&_critSect);
1692 
1693     if (_RefreshDeviceList(eRender) != -1)
1694     {
1695         return (_DeviceListCount(eRender));
1696     }
1697 
1698     return -1;
1699 }
1700 
1701 // ----------------------------------------------------------------------------
1702 //  SetPlayoutDevice I (II)
1703 // ----------------------------------------------------------------------------
1704 
SetPlayoutDevice(uint16_t index)1705 int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index)
1706 {
1707 
1708     if (_playIsInitialized)
1709     {
1710         return -1;
1711     }
1712 
1713     // Get current number of available rendering endpoint devices and refresh the rendering collection.
1714     UINT nDevices = PlayoutDevices();
1715 
1716     if (index < 0 || index > (nDevices-1))
1717     {
1718         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
1719         return -1;
1720     }
1721 
1722     CriticalSectionScoped lock(&_critSect);
1723 
1724     HRESULT hr(S_OK);
1725 
1726     assert(_ptrRenderCollection != NULL);
1727 
1728     //  Select an endpoint rendering device given the specified index
1729     SAFE_RELEASE(_ptrDeviceOut);
1730     hr = _ptrRenderCollection->Item(
1731                                  index,
1732                                  &_ptrDeviceOut);
1733     if (FAILED(hr))
1734     {
1735         _TraceCOMError(hr);
1736         SAFE_RELEASE(_ptrDeviceOut);
1737         return -1;
1738     }
1739 
1740     WCHAR szDeviceName[MAX_PATH];
1741     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1742 
1743     // Get the endpoint device's friendly-name
1744     if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
1745     {
1746         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
1747     }
1748 
1749     _usingOutputDeviceIndex = true;
1750     _outputDeviceIndex = index;
1751 
1752     return 0;
1753 }
1754 
1755 // ----------------------------------------------------------------------------
1756 //  SetPlayoutDevice II (II)
1757 // ----------------------------------------------------------------------------
1758 
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)1759 int32_t AudioDeviceWindowsCore::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)
1760 {
1761     if (_playIsInitialized)
1762     {
1763         return -1;
1764     }
1765 
1766     ERole role(eCommunications);
1767 
1768     if (device == AudioDeviceModule::kDefaultDevice)
1769     {
1770         role = eConsole;
1771     }
1772     else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
1773     {
1774         role = eCommunications;
1775     }
1776 
1777     CriticalSectionScoped lock(&_critSect);
1778 
1779     // Refresh the list of rendering endpoint devices
1780     _RefreshDeviceList(eRender);
1781 
1782     HRESULT hr(S_OK);
1783 
1784     assert(_ptrEnumerator != NULL);
1785 
1786     //  Select an endpoint rendering device given the specified role
1787     SAFE_RELEASE(_ptrDeviceOut);
1788     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
1789                            eRender,
1790                            role,
1791                            &_ptrDeviceOut);
1792     if (FAILED(hr))
1793     {
1794         _TraceCOMError(hr);
1795         SAFE_RELEASE(_ptrDeviceOut);
1796         return -1;
1797     }
1798 
1799     WCHAR szDeviceName[MAX_PATH];
1800     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1801 
1802     // Get the endpoint device's friendly-name
1803     if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
1804     {
1805         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
1806     }
1807 
1808     _usingOutputDeviceIndex = false;
1809     _outputDevice = device;
1810 
1811     return 0;
1812 }
1813 
1814 // ----------------------------------------------------------------------------
1815 //  PlayoutDeviceName
1816 // ----------------------------------------------------------------------------
1817 
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])1818 int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
1819     uint16_t index,
1820     char name[kAdmMaxDeviceNameSize],
1821     char guid[kAdmMaxGuidSize])
1822 {
1823 
1824     bool defaultCommunicationDevice(false);
1825     const int16_t nDevices(PlayoutDevices());  // also updates the list of devices
1826 
1827     // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
1828     if (index == (uint16_t)(-1))
1829     {
1830         defaultCommunicationDevice = true;
1831         index = 0;
1832         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
1833     }
1834 
1835     if ((index > (nDevices-1)) || (name == NULL))
1836     {
1837         return -1;
1838     }
1839 
1840     memset(name, 0, kAdmMaxDeviceNameSize);
1841 
1842     if (guid != NULL)
1843     {
1844         memset(guid, 0, kAdmMaxGuidSize);
1845     }
1846 
1847     CriticalSectionScoped lock(&_critSect);
1848 
1849     int32_t ret(-1);
1850     WCHAR szDeviceName[MAX_PATH];
1851     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1852 
1853     // Get the endpoint device's friendly-name
1854     if (defaultCommunicationDevice)
1855     {
1856         ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName, bufferLen);
1857     }
1858     else
1859     {
1860         ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
1861     }
1862 
1863     if (ret == 0)
1864     {
1865         // Convert the endpoint device's friendly-name to UTF-8
1866         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1867         {
1868             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1869         }
1870     }
1871 
1872     // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
1873     if (defaultCommunicationDevice)
1874     {
1875         ret = _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
1876     }
1877     else
1878     {
1879         ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
1880     }
1881 
1882     if (guid != NULL && ret == 0)
1883     {
1884         // Convert the endpoint device's ID string to UTF-8
1885         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1886         {
1887             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1888         }
1889     }
1890 
1891     return ret;
1892 }
1893 
1894 // ----------------------------------------------------------------------------
1895 //  RecordingDeviceName
1896 // ----------------------------------------------------------------------------
1897 
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])1898 int32_t AudioDeviceWindowsCore::RecordingDeviceName(
1899     uint16_t index,
1900     char name[kAdmMaxDeviceNameSize],
1901     char guid[kAdmMaxGuidSize])
1902 {
1903 
1904     bool defaultCommunicationDevice(false);
1905     const int16_t nDevices(RecordingDevices());  // also updates the list of devices
1906 
1907     // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
1908     if (index == (uint16_t)(-1))
1909     {
1910         defaultCommunicationDevice = true;
1911         index = 0;
1912         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
1913     }
1914 
1915     if ((index > (nDevices-1)) || (name == NULL))
1916     {
1917         return -1;
1918     }
1919 
1920     memset(name, 0, kAdmMaxDeviceNameSize);
1921 
1922     if (guid != NULL)
1923     {
1924         memset(guid, 0, kAdmMaxGuidSize);
1925     }
1926 
1927     CriticalSectionScoped lock(&_critSect);
1928 
1929     int32_t ret(-1);
1930     WCHAR szDeviceName[MAX_PATH];
1931     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1932 
1933     // Get the endpoint device's friendly-name
1934     if (defaultCommunicationDevice)
1935     {
1936         ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName, bufferLen);
1937     }
1938     else
1939     {
1940         ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
1941     }
1942 
1943     if (ret == 0)
1944     {
1945         // Convert the endpoint device's friendly-name to UTF-8
1946         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1947         {
1948             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1949         }
1950     }
1951 
1952     // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
1953     if (defaultCommunicationDevice)
1954     {
1955         ret = _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
1956     }
1957     else
1958     {
1959         ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
1960     }
1961 
1962     if (guid != NULL && ret == 0)
1963     {
1964         // Convert the endpoint device's ID string to UTF-8
1965         if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1966         {
1967             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1968         }
1969     }
1970 
1971     return ret;
1972 }
1973 
1974 // ----------------------------------------------------------------------------
1975 //  RecordingDevices
1976 // ----------------------------------------------------------------------------
1977 
RecordingDevices()1978 int16_t AudioDeviceWindowsCore::RecordingDevices()
1979 {
1980 
1981     CriticalSectionScoped lock(&_critSect);
1982 
1983     if (_RefreshDeviceList(eCapture) != -1)
1984     {
1985         return (_DeviceListCount(eCapture));
1986     }
1987 
1988     return -1;
1989 }
1990 
1991 // ----------------------------------------------------------------------------
1992 //  SetRecordingDevice I (II)
1993 // ----------------------------------------------------------------------------
1994 
SetRecordingDevice(uint16_t index)1995 int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index)
1996 {
1997 
1998     if (_recIsInitialized)
1999     {
2000         return -1;
2001     }
2002 
2003     // Get current number of available capture endpoint devices and refresh the capture collection.
2004     UINT nDevices = RecordingDevices();
2005 
2006     if (index < 0 || index > (nDevices-1))
2007     {
2008         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
2009         return -1;
2010     }
2011 
2012     CriticalSectionScoped lock(&_critSect);
2013 
2014     HRESULT hr(S_OK);
2015 
2016     assert(_ptrCaptureCollection != NULL);
2017 
2018     // Select an endpoint capture device given the specified index
2019     SAFE_RELEASE(_ptrDeviceIn);
2020     hr = _ptrCaptureCollection->Item(
2021                                  index,
2022                                  &_ptrDeviceIn);
2023     if (FAILED(hr))
2024     {
2025         _TraceCOMError(hr);
2026         SAFE_RELEASE(_ptrDeviceIn);
2027         return -1;
2028     }
2029 
2030     WCHAR szDeviceName[MAX_PATH];
2031     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
2032 
2033     // Get the endpoint device's friendly-name
2034     if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
2035     {
2036         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
2037     }
2038 
2039     _usingInputDeviceIndex = true;
2040     _inputDeviceIndex = index;
2041 
2042     return 0;
2043 }
2044 
2045 // ----------------------------------------------------------------------------
2046 //  SetRecordingDevice II (II)
2047 // ----------------------------------------------------------------------------
2048 
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)2049 int32_t AudioDeviceWindowsCore::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)
2050 {
2051     if (_recIsInitialized)
2052     {
2053         return -1;
2054     }
2055 
2056     ERole role(eCommunications);
2057 
2058     if (device == AudioDeviceModule::kDefaultDevice)
2059     {
2060         role = eConsole;
2061     }
2062     else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
2063     {
2064         role = eCommunications;
2065     }
2066 
2067     CriticalSectionScoped lock(&_critSect);
2068 
2069     // Refresh the list of capture endpoint devices
2070     _RefreshDeviceList(eCapture);
2071 
2072     HRESULT hr(S_OK);
2073 
2074     assert(_ptrEnumerator != NULL);
2075 
2076     //  Select an endpoint capture device given the specified role
2077     SAFE_RELEASE(_ptrDeviceIn);
2078     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
2079                            eCapture,
2080                            role,
2081                            &_ptrDeviceIn);
2082     if (FAILED(hr))
2083     {
2084         _TraceCOMError(hr);
2085         SAFE_RELEASE(_ptrDeviceIn);
2086         return -1;
2087     }
2088 
2089     WCHAR szDeviceName[MAX_PATH];
2090     const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
2091 
2092     // Get the endpoint device's friendly-name
2093     if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
2094     {
2095         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
2096     }
2097 
2098     _usingInputDeviceIndex = false;
2099     _inputDevice = device;
2100 
2101     return 0;
2102 }
2103 
2104 // ----------------------------------------------------------------------------
2105 //  PlayoutIsAvailable
2106 // ----------------------------------------------------------------------------
2107 
PlayoutIsAvailable(bool & available)2108 int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available)
2109 {
2110 
2111     available = false;
2112 
2113     // Try to initialize the playout side
2114     int32_t res = InitPlayout();
2115 
2116     // Cancel effect of initialization
2117     StopPlayout();
2118 
2119     if (res != -1)
2120     {
2121         available = true;
2122     }
2123 
2124     return 0;
2125 }
2126 
2127 // ----------------------------------------------------------------------------
2128 //  RecordingIsAvailable
2129 // ----------------------------------------------------------------------------
2130 
RecordingIsAvailable(bool & available)2131 int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available)
2132 {
2133 
2134     available = false;
2135 
2136     // Try to initialize the recording side
2137     int32_t res = InitRecording();
2138 
2139     // Cancel effect of initialization
2140     StopRecording();
2141 
2142     if (res != -1)
2143     {
2144         available = true;
2145     }
2146 
2147     return 0;
2148 }
2149 
2150 // ----------------------------------------------------------------------------
2151 //  InitPlayout
2152 // ----------------------------------------------------------------------------
2153 
InitPlayout()2154 int32_t AudioDeviceWindowsCore::InitPlayout()
2155 {
2156 
2157     CriticalSectionScoped lock(&_critSect);
2158 
2159     if (_playing)
2160     {
2161         return -1;
2162     }
2163 
2164     if (_playIsInitialized)
2165     {
2166         return 0;
2167     }
2168 
2169     if (_ptrDeviceOut == NULL)
2170     {
2171         return -1;
2172     }
2173 
2174     // Initialize the speaker (devices might have been added or removed)
2175     if (InitSpeaker() == -1)
2176     {
2177         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitSpeaker() failed");
2178     }
2179 
2180     // Ensure that the updated rendering endpoint device is valid
2181     if (_ptrDeviceOut == NULL)
2182     {
2183         return -1;
2184     }
2185 
2186     if (_builtInAecEnabled && _recIsInitialized)
2187     {
2188         // Ensure the correct render device is configured in case
2189         // InitRecording() was called before InitPlayout().
2190         if (SetDMOProperties() == -1)
2191         {
2192             return -1;
2193         }
2194     }
2195 
2196     HRESULT hr = S_OK;
2197     WAVEFORMATEX* pWfxOut = NULL;
2198     WAVEFORMATEX Wfx = WAVEFORMATEX();
2199     WAVEFORMATEX* pWfxClosestMatch = NULL;
2200 
2201     // Create COM object with IAudioClient interface.
2202     SAFE_RELEASE(_ptrClientOut);
2203     hr = _ptrDeviceOut->Activate(
2204                           __uuidof(IAudioClient),
2205                           CLSCTX_ALL,
2206                           NULL,
2207                           (void**)&_ptrClientOut);
2208     EXIT_ON_ERROR(hr);
2209 
2210     // Retrieve the stream format that the audio engine uses for its internal
2211     // processing (mixing) of shared-mode streams.
2212     hr = _ptrClientOut->GetMixFormat(&pWfxOut);
2213     if (SUCCEEDED(hr))
2214     {
2215         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Audio Engine's current rendering mix format:");
2216         // format type
2217         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag     : 0x%X (%u)", pWfxOut->wFormatTag, pWfxOut->wFormatTag);
2218         // number of channels (i.e. mono, stereo...)
2219         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels      : %d", pWfxOut->nChannels);
2220         // sample rate
2221         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec : %d", pWfxOut->nSamplesPerSec);
2222         // for buffer estimation
2223         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec: %d", pWfxOut->nAvgBytesPerSec);
2224         // block size of data
2225         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign    : %d", pWfxOut->nBlockAlign);
2226         // number of bits per sample of mono data
2227         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample : %d", pWfxOut->wBitsPerSample);
2228         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize         : %d", pWfxOut->cbSize);
2229     }
2230 
2231     // Set wave format
2232     Wfx.wFormatTag = WAVE_FORMAT_PCM;
2233     Wfx.wBitsPerSample = 16;
2234     Wfx.cbSize = 0;
2235 
2236     const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
2237     hr = S_FALSE;
2238 
2239     // Iterate over frequencies and channels, in order of priority
2240     for (int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
2241     {
2242         for (int chan = 0; chan < sizeof(_playChannelsPrioList)/sizeof(_playChannelsPrioList[0]); chan++)
2243         {
2244             Wfx.nChannels = _playChannelsPrioList[chan];
2245             Wfx.nSamplesPerSec = freqs[freq];
2246             Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
2247             Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
2248             // If the method succeeds and the audio endpoint device supports the specified stream format,
2249             // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
2250             // it returns S_FALSE.
2251             hr = _ptrClientOut->IsFormatSupported(
2252                                   AUDCLNT_SHAREMODE_SHARED,
2253                                   &Wfx,
2254                                   &pWfxClosestMatch);
2255             if (hr == S_OK)
2256             {
2257                 break;
2258             }
2259             else
2260             {
2261                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels=%d, nSamplesPerSec=%d is not supported",
2262                     Wfx.nChannels, Wfx.nSamplesPerSec);
2263             }
2264         }
2265         if (hr == S_OK)
2266             break;
2267     }
2268 
2269     // TODO(andrew): what happens in the event of failure in the above loop?
2270     //   Is _ptrClientOut->Initialize expected to fail?
2271     //   Same in InitRecording().
2272     if (hr == S_OK)
2273     {
2274         _playAudioFrameSize = Wfx.nBlockAlign;
2275         _playBlockSize = Wfx.nSamplesPerSec/100;
2276         _playSampleRate = Wfx.nSamplesPerSec;
2277         _devicePlaySampleRate = Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
2278         _devicePlayBlockSize = Wfx.nSamplesPerSec/100;
2279         _playChannels = Wfx.nChannels;
2280 
2281         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this rendering format:");
2282         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag         : 0x%X (%u)", Wfx.wFormatTag, Wfx.wFormatTag);
2283         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels          : %d", Wfx.nChannels);
2284         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec     : %d", Wfx.nSamplesPerSec);
2285         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec    : %d", Wfx.nAvgBytesPerSec);
2286         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign        : %d", Wfx.nBlockAlign);
2287         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample     : %d", Wfx.wBitsPerSample);
2288         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize             : %d", Wfx.cbSize);
2289         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Additional settings:");
2290         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playAudioFrameSize: %d", _playAudioFrameSize);
2291         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playBlockSize     : %d", _playBlockSize);
2292         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playChannels      : %d", _playChannels);
2293     }
2294 
2295     // Create a rendering stream.
2296     //
2297     // ****************************************************************************
2298     // For a shared-mode stream that uses event-driven buffering, the caller must
2299     // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
2300     // determines how large a buffer to allocate based on the scheduling period
2301     // of the audio engine. Although the client's buffer processing thread is
2302     // event driven, the basic buffer management process, as described previously,
2303     // is unaltered.
2304     // Each time the thread awakens, it should call IAudioClient::GetCurrentPadding
2305     // to determine how much data to write to a rendering buffer or read from a capture
2306     // buffer. In contrast to the two buffers that the Initialize method allocates
2307     // for an exclusive-mode stream that uses event-driven buffering, a shared-mode
2308     // stream requires a single buffer.
2309     // ****************************************************************************
2310     //
2311     REFERENCE_TIME hnsBufferDuration = 0;  // ask for minimum buffer size (default)
2312     if (_devicePlaySampleRate == 44100)
2313     {
2314         // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
2315         // There seems to be a larger risk of underruns for 44.1 compared
2316         // with the default rate (48kHz). When using default, we set the requested
2317         // buffer duration to 0, which sets the buffer to the minimum size
2318         // required by the engine thread. The actual buffer size can then be
2319         // read by GetBufferSize() and it is 20ms on most machines.
2320         hnsBufferDuration = 30*10000;
2321     }
2322     hr = _ptrClientOut->Initialize(
2323                           AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
2324                           AUDCLNT_STREAMFLAGS_EVENTCALLBACK,    // processing of the audio buffer by the client will be event driven
2325                           hnsBufferDuration,                    // requested buffer capacity as a time value (in 100-nanosecond units)
2326                           0,                                    // periodicity
2327                           &Wfx,                                 // selected wave format
2328                           NULL);                                // session GUID
2329 
2330     if (FAILED(hr))
2331     {
2332         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
2333         if (pWfxClosestMatch != NULL)
2334         {
2335             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
2336                 pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
2337         }
2338         else
2339         {
2340             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "no format suggested");
2341         }
2342     }
2343     EXIT_ON_ERROR(hr);
2344 
2345     if (_ptrAudioBuffer)
2346     {
2347         // Update the audio buffer with the selected parameters
2348         _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
2349         _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
2350     }
2351     else
2352     {
2353         // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
2354         // has been created, hence the AudioDeviceBuffer does not exist.
2355         // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
2356         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceBuffer must be attached before streaming can start");
2357     }
2358 
2359     // Get the actual size of the shared (endpoint buffer).
2360     // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
2361     UINT bufferFrameCount(0);
2362     hr = _ptrClientOut->GetBufferSize(
2363                           &bufferFrameCount);
2364     if (SUCCEEDED(hr))
2365     {
2366         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "IAudioClient::GetBufferSize() => %u (<=> %u bytes)",
2367             bufferFrameCount, bufferFrameCount*_playAudioFrameSize);
2368     }
2369 
2370     // Set the event handle that the system signals when an audio buffer is ready
2371     // to be processed by the client.
2372     hr = _ptrClientOut->SetEventHandle(
2373                           _hRenderSamplesReadyEvent);
2374     EXIT_ON_ERROR(hr);
2375 
2376     // Get an IAudioRenderClient interface.
2377     SAFE_RELEASE(_ptrRenderClient);
2378     hr = _ptrClientOut->GetService(
2379                           __uuidof(IAudioRenderClient),
2380                           (void**)&_ptrRenderClient);
2381     EXIT_ON_ERROR(hr);
2382 
2383     // Mark playout side as initialized
2384     _playIsInitialized = true;
2385 
2386     CoTaskMemFree(pWfxOut);
2387     CoTaskMemFree(pWfxClosestMatch);
2388 
2389     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "render side is now initialized");
2390     return 0;
2391 
2392 Exit:
2393     _TraceCOMError(hr);
2394     CoTaskMemFree(pWfxOut);
2395     CoTaskMemFree(pWfxClosestMatch);
2396     SAFE_RELEASE(_ptrClientOut);
2397     SAFE_RELEASE(_ptrRenderClient);
2398     return -1;
2399 }
2400 
2401 // Capture initialization when the built-in AEC DirectX Media Object (DMO) is
2402 // used. Called from InitRecording(), most of which is skipped over. The DMO
2403 // handles device initialization itself.
2404 // Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx
InitRecordingDMO()2405 int32_t AudioDeviceWindowsCore::InitRecordingDMO()
2406 {
2407     assert(_builtInAecEnabled);
2408     assert(_dmo != NULL);
2409 
2410     if (SetDMOProperties() == -1)
2411     {
2412         return -1;
2413     }
2414 
2415     DMO_MEDIA_TYPE mt = {0};
2416     HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
2417     if (FAILED(hr))
2418     {
2419         MoFreeMediaType(&mt);
2420         _TraceCOMError(hr);
2421         return -1;
2422     }
2423     mt.majortype = MEDIATYPE_Audio;
2424     mt.subtype = MEDIASUBTYPE_PCM;
2425     mt.formattype = FORMAT_WaveFormatEx;
2426 
2427     // Supported formats
2428     // nChannels: 1 (in AEC-only mode)
2429     // nSamplesPerSec: 8000, 11025, 16000, 22050
2430     // wBitsPerSample: 16
2431     WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
2432     ptrWav->wFormatTag = WAVE_FORMAT_PCM;
2433     ptrWav->nChannels = 1;
2434     // 16000 is the highest we can support with our resampler.
2435     ptrWav->nSamplesPerSec = 16000;
2436     ptrWav->nAvgBytesPerSec = 32000;
2437     ptrWav->nBlockAlign = 2;
2438     ptrWav->wBitsPerSample = 16;
2439     ptrWav->cbSize = 0;
2440 
2441     // Set the VoE format equal to the AEC output format.
2442     _recAudioFrameSize = ptrWav->nBlockAlign;
2443     _recSampleRate = ptrWav->nSamplesPerSec;
2444     _recBlockSize = ptrWav->nSamplesPerSec / 100;
2445     _recChannels = ptrWav->nChannels;
2446 
2447     // Set the DMO output format parameters.
2448     hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
2449     MoFreeMediaType(&mt);
2450     if (FAILED(hr))
2451     {
2452         _TraceCOMError(hr);
2453         return -1;
2454     }
2455 
2456     if (_ptrAudioBuffer)
2457     {
2458         _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
2459         _ptrAudioBuffer->SetRecordingChannels(_recChannels);
2460     }
2461     else
2462     {
2463         // Refer to InitRecording() for comments.
2464         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2465             "AudioDeviceBuffer must be attached before streaming can start");
2466     }
2467 
2468     _mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
2469 
2470     // Optional, but if called, must be after media types are set.
2471     hr = _dmo->AllocateStreamingResources();
2472     if (FAILED(hr))
2473     {
2474          _TraceCOMError(hr);
2475         return -1;
2476     }
2477 
2478     _recIsInitialized = true;
2479     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2480         "Capture side is now initialized");
2481 
2482     return 0;
2483 }
2484 
2485 // ----------------------------------------------------------------------------
2486 //  InitRecording
2487 // ----------------------------------------------------------------------------
2488 
InitRecording()2489 int32_t AudioDeviceWindowsCore::InitRecording()
2490 {
2491 
2492     CriticalSectionScoped lock(&_critSect);
2493 
2494     if (_recording)
2495     {
2496         return -1;
2497     }
2498 
2499     if (_recIsInitialized)
2500     {
2501         return 0;
2502     }
2503 
2504     if (QueryPerformanceFrequency(&_perfCounterFreq) == 0)
2505     {
2506         return -1;
2507     }
2508     _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
2509 
2510     if (_ptrDeviceIn == NULL)
2511     {
2512         return -1;
2513     }
2514 
2515     // Initialize the microphone (devices might have been added or removed)
2516     if (InitMicrophone() == -1)
2517     {
2518         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitMicrophone() failed");
2519     }
2520 
2521     // Ensure that the updated capturing endpoint device is valid
2522     if (_ptrDeviceIn == NULL)
2523     {
2524         return -1;
2525     }
2526 
2527     if (_builtInAecEnabled)
2528     {
2529         // The DMO will configure the capture device.
2530         return InitRecordingDMO();
2531     }
2532 
2533     HRESULT hr = S_OK;
2534     WAVEFORMATEX* pWfxIn = NULL;
2535     WAVEFORMATEX Wfx = WAVEFORMATEX();
2536     WAVEFORMATEX* pWfxClosestMatch = NULL;
2537 
2538     // Create COM object with IAudioClient interface.
2539     SAFE_RELEASE(_ptrClientIn);
2540     hr = _ptrDeviceIn->Activate(
2541                           __uuidof(IAudioClient),
2542                           CLSCTX_ALL,
2543                           NULL,
2544                           (void**)&_ptrClientIn);
2545     EXIT_ON_ERROR(hr);
2546 
2547     // Retrieve the stream format that the audio engine uses for its internal
2548     // processing (mixing) of shared-mode streams.
2549     hr = _ptrClientIn->GetMixFormat(&pWfxIn);
2550     if (SUCCEEDED(hr))
2551     {
2552         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Audio Engine's current capturing mix format:");
2553         // format type
2554         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag     : 0x%X (%u)", pWfxIn->wFormatTag, pWfxIn->wFormatTag);
2555         // number of channels (i.e. mono, stereo...)
2556         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels      : %d", pWfxIn->nChannels);
2557         // sample rate
2558         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec : %d", pWfxIn->nSamplesPerSec);
2559         // for buffer estimation
2560         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec: %d", pWfxIn->nAvgBytesPerSec);
2561         // block size of data
2562         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign    : %d", pWfxIn->nBlockAlign);
2563         // number of bits per sample of mono data
2564         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample : %d", pWfxIn->wBitsPerSample);
2565         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize         : %d", pWfxIn->cbSize);
2566     }
2567 
2568     // Set wave format
2569     Wfx.wFormatTag = WAVE_FORMAT_PCM;
2570     Wfx.wBitsPerSample = 16;
2571     Wfx.cbSize = 0;
2572 
2573     const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
2574     hr = S_FALSE;
2575 
2576     // Iterate over frequencies and channels, in order of priority
2577     for (int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
2578     {
2579         for (int chan = 0; chan < sizeof(_recChannelsPrioList)/sizeof(_recChannelsPrioList[0]); chan++)
2580         {
2581             Wfx.nChannels = _recChannelsPrioList[chan];
2582             Wfx.nSamplesPerSec = freqs[freq];
2583             Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
2584             Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
2585             // If the method succeeds and the audio endpoint device supports the specified stream format,
2586             // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
2587             // it returns S_FALSE.
2588             hr = _ptrClientIn->IsFormatSupported(
2589                                   AUDCLNT_SHAREMODE_SHARED,
2590                                   &Wfx,
2591                                   &pWfxClosestMatch);
2592             if (hr == S_OK)
2593             {
2594                 break;
2595             }
2596             else
2597             {
2598                 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels=%d, nSamplesPerSec=%d is not supported",
2599                     Wfx.nChannels, Wfx.nSamplesPerSec);
2600             }
2601         }
2602         if (hr == S_OK)
2603             break;
2604     }
2605 
2606     if (hr == S_OK)
2607     {
2608         _recAudioFrameSize = Wfx.nBlockAlign;
2609         _recSampleRate = Wfx.nSamplesPerSec;
2610         _recBlockSize = Wfx.nSamplesPerSec/100;
2611         _recChannels = Wfx.nChannels;
2612 
2613         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this capturing format:");
2614         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag        : 0x%X (%u)", Wfx.wFormatTag, Wfx.wFormatTag);
2615         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels         : %d", Wfx.nChannels);
2616         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec    : %d", Wfx.nSamplesPerSec);
2617         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec   : %d", Wfx.nAvgBytesPerSec);
2618         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign       : %d", Wfx.nBlockAlign);
2619         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample    : %d", Wfx.wBitsPerSample);
2620         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize            : %d", Wfx.cbSize);
2621         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Additional settings:");
2622         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recAudioFrameSize: %d", _recAudioFrameSize);
2623         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recBlockSize     : %d", _recBlockSize);
2624         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recChannels      : %d", _recChannels);
2625     }
2626 
2627     // Create a capturing stream.
2628     hr = _ptrClientIn->Initialize(
2629                           AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
2630                           AUDCLNT_STREAMFLAGS_EVENTCALLBACK |   // processing of the audio buffer by the client will be event driven
2631                           AUDCLNT_STREAMFLAGS_NOPERSIST,        // volume and mute settings for an audio session will not persist across system restarts
2632                           0,                                    // required for event-driven shared mode
2633                           0,                                    // periodicity
2634                           &Wfx,                                 // selected wave format
2635                           NULL);                                // session GUID
2636 
2637 
2638     if (hr != S_OK)
2639     {
2640         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
2641         if (pWfxClosestMatch != NULL)
2642         {
2643             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
2644                 pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
2645         }
2646         else
2647         {
2648             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "no format suggested");
2649         }
2650     }
2651     EXIT_ON_ERROR(hr);
2652 
2653     if (_ptrAudioBuffer)
2654     {
2655         // Update the audio buffer with the selected parameters
2656         _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
2657         _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
2658     }
2659     else
2660     {
2661         // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
2662         // has been created, hence the AudioDeviceBuffer does not exist.
2663         // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
2664         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceBuffer must be attached before streaming can start");
2665     }
2666 
2667     // Get the actual size of the shared (endpoint buffer).
2668     // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
2669     UINT bufferFrameCount(0);
2670     hr = _ptrClientIn->GetBufferSize(
2671                           &bufferFrameCount);
2672     if (SUCCEEDED(hr))
2673     {
2674         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "IAudioClient::GetBufferSize() => %u (<=> %u bytes)",
2675             bufferFrameCount, bufferFrameCount*_recAudioFrameSize);
2676     }
2677 
2678     // Set the event handle that the system signals when an audio buffer is ready
2679     // to be processed by the client.
2680     hr = _ptrClientIn->SetEventHandle(
2681                           _hCaptureSamplesReadyEvent);
2682     EXIT_ON_ERROR(hr);
2683 
2684     // Get an IAudioCaptureClient interface.
2685     SAFE_RELEASE(_ptrCaptureClient);
2686     hr = _ptrClientIn->GetService(
2687                           __uuidof(IAudioCaptureClient),
2688                           (void**)&_ptrCaptureClient);
2689     EXIT_ON_ERROR(hr);
2690 
2691     // Mark capture side as initialized
2692     _recIsInitialized = true;
2693 
2694     CoTaskMemFree(pWfxIn);
2695     CoTaskMemFree(pWfxClosestMatch);
2696 
2697     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "capture side is now initialized");
2698     return 0;
2699 
2700 Exit:
2701     _TraceCOMError(hr);
2702     CoTaskMemFree(pWfxIn);
2703     CoTaskMemFree(pWfxClosestMatch);
2704     SAFE_RELEASE(_ptrClientIn);
2705     SAFE_RELEASE(_ptrCaptureClient);
2706     return -1;
2707 }
2708 
2709 // ----------------------------------------------------------------------------
2710 //  StartRecording
2711 // ----------------------------------------------------------------------------
2712 
StartRecording()2713 int32_t AudioDeviceWindowsCore::StartRecording()
2714 {
2715 
2716     if (!_recIsInitialized)
2717     {
2718         return -1;
2719     }
2720 
2721     if (_hRecThread != NULL)
2722     {
2723         return 0;
2724     }
2725 
2726     if (_recording)
2727     {
2728         return 0;
2729     }
2730 
2731     {
2732         CriticalSectionScoped critScoped(&_critSect);
2733 
2734         // Create thread which will drive the capturing
2735         LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
2736         if (_builtInAecEnabled)
2737         {
2738             // Redirect to the DMO polling method.
2739             lpStartAddress = WSAPICaptureThreadPollDMO;
2740 
2741             if (!_playing)
2742             {
2743                 // The DMO won't provide us captured output data unless we
2744                 // give it render data to process.
2745                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2746                     "Playout must be started before recording when using the "
2747                     "built-in AEC");
2748                 return -1;
2749             }
2750         }
2751 
2752         assert(_hRecThread == NULL);
2753         _hRecThread = CreateThread(NULL,
2754                                    0,
2755                                    lpStartAddress,
2756                                    this,
2757                                    0,
2758                                    NULL);
2759         if (_hRecThread == NULL)
2760         {
2761             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2762                          "failed to create the recording thread");
2763             return -1;
2764         }
2765 
2766         // Set thread priority to highest possible
2767         SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
2768 
2769         assert(_hGetCaptureVolumeThread == NULL);
2770         _hGetCaptureVolumeThread = CreateThread(NULL,
2771                                                 0,
2772                                                 GetCaptureVolumeThread,
2773                                                 this,
2774                                                 0,
2775                                                 NULL);
2776         if (_hGetCaptureVolumeThread == NULL)
2777         {
2778             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2779                          "  failed to create the volume getter thread");
2780             return -1;
2781         }
2782 
2783         assert(_hSetCaptureVolumeThread == NULL);
2784         _hSetCaptureVolumeThread = CreateThread(NULL,
2785                                                 0,
2786                                                 SetCaptureVolumeThread,
2787                                                 this,
2788                                                 0,
2789                                                 NULL);
2790         if (_hSetCaptureVolumeThread == NULL)
2791         {
2792             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2793                          "  failed to create the volume setter thread");
2794             return -1;
2795         }
2796     }  // critScoped
2797 
2798     DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
2799     if (ret != WAIT_OBJECT_0)
2800     {
2801         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2802             "capturing did not start up properly");
2803         return -1;
2804     }
2805     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2806         "capture audio stream has now started...");
2807 
2808     _avgCPULoad = 0.0f;
2809     _playAcc = 0;
2810     _recording = true;
2811 
2812     return 0;
2813 }
2814 
2815 // ----------------------------------------------------------------------------
2816 //  StopRecording
2817 // ----------------------------------------------------------------------------
2818 
StopRecording()2819 int32_t AudioDeviceWindowsCore::StopRecording()
2820 {
2821     int32_t err = 0;
2822 
2823     if (!_recIsInitialized)
2824     {
2825         return 0;
2826     }
2827 
2828     _Lock();
2829 
2830     if (_hRecThread == NULL)
2831     {
2832         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2833             "no capturing stream is active => close down WASAPI only");
2834         SAFE_RELEASE(_ptrClientIn);
2835         SAFE_RELEASE(_ptrCaptureClient);
2836         _recIsInitialized = false;
2837         _recording = false;
2838         _UnLock();
2839         return 0;
2840     }
2841 
2842     // Stop the driving thread...
2843     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2844         "closing down the webrtc_core_audio_capture_thread...");
2845     // Manual-reset event; it will remain signalled to stop all capture threads.
2846     SetEvent(_hShutdownCaptureEvent);
2847 
2848     _UnLock();
2849     DWORD ret = WaitForSingleObject(_hRecThread, 2000);
2850     if (ret != WAIT_OBJECT_0)
2851     {
2852         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2853             "failed to close down webrtc_core_audio_capture_thread");
2854         err = -1;
2855     }
2856     else
2857     {
2858         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2859             "webrtc_core_audio_capture_thread is now closed");
2860     }
2861 
2862     ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
2863     if (ret != WAIT_OBJECT_0)
2864     {
2865         // the thread did not stop as it should
2866         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2867                      "  failed to close down volume getter thread");
2868         err = -1;
2869     }
2870     else
2871     {
2872         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2873             "  volume getter thread is now closed");
2874     }
2875 
2876     ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
2877     if (ret != WAIT_OBJECT_0)
2878     {
2879         // the thread did not stop as it should
2880         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2881                      "  failed to close down volume setter thread");
2882         err = -1;
2883     }
2884     else
2885     {
2886         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2887             "  volume setter thread is now closed");
2888     }
2889     _Lock();
2890 
2891     ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
2892     // Ensure that the thread has released these interfaces properly.
2893     assert(err == -1 || _ptrClientIn == NULL);
2894     assert(err == -1 || _ptrCaptureClient == NULL);
2895 
2896     _recIsInitialized = false;
2897     _recording = false;
2898 
2899     // These will create thread leaks in the result of an error,
2900     // but we can at least resume the call.
2901     CloseHandle(_hRecThread);
2902     _hRecThread = NULL;
2903 
2904     CloseHandle(_hGetCaptureVolumeThread);
2905     _hGetCaptureVolumeThread = NULL;
2906 
2907     CloseHandle(_hSetCaptureVolumeThread);
2908     _hSetCaptureVolumeThread = NULL;
2909 
2910     if (_builtInAecEnabled)
2911     {
2912         assert(_dmo != NULL);
2913         // This is necessary. Otherwise the DMO can generate garbage render
2914         // audio even after rendering has stopped.
2915         HRESULT hr = _dmo->FreeStreamingResources();
2916         if (FAILED(hr))
2917         {
2918             _TraceCOMError(hr);
2919             err = -1;
2920         }
2921     }
2922 
2923     // Reset the recording delay value.
2924     _sndCardRecDelay = 0;
2925 
2926     _UnLock();
2927 
2928     return err;
2929 }
2930 
2931 // ----------------------------------------------------------------------------
2932 //  RecordingIsInitialized
2933 // ----------------------------------------------------------------------------
2934 
RecordingIsInitialized() const2935 bool AudioDeviceWindowsCore::RecordingIsInitialized() const
2936 {
2937     return (_recIsInitialized);
2938 }
2939 
2940 // ----------------------------------------------------------------------------
2941 //  Recording
2942 // ----------------------------------------------------------------------------
2943 
Recording() const2944 bool AudioDeviceWindowsCore::Recording() const
2945 {
2946     return (_recording);
2947 }
2948 
2949 // ----------------------------------------------------------------------------
2950 //  PlayoutIsInitialized
2951 // ----------------------------------------------------------------------------
2952 
PlayoutIsInitialized() const2953 bool AudioDeviceWindowsCore::PlayoutIsInitialized() const
2954 {
2955 
2956     return (_playIsInitialized);
2957 }
2958 
2959 // ----------------------------------------------------------------------------
2960 //  StartPlayout
2961 // ----------------------------------------------------------------------------
2962 
StartPlayout()2963 int32_t AudioDeviceWindowsCore::StartPlayout()
2964 {
2965 
2966     if (!_playIsInitialized)
2967     {
2968         return -1;
2969     }
2970 
2971     if (_hPlayThread != NULL)
2972     {
2973         return 0;
2974     }
2975 
2976     if (_playing)
2977     {
2978         return 0;
2979     }
2980 
2981     {
2982         CriticalSectionScoped critScoped(&_critSect);
2983 
2984         // Create thread which will drive the rendering.
2985         assert(_hPlayThread == NULL);
2986         _hPlayThread = CreateThread(
2987                          NULL,
2988                          0,
2989                          WSAPIRenderThread,
2990                          this,
2991                          0,
2992                          NULL);
2993         if (_hPlayThread == NULL)
2994         {
2995             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2996                 "failed to create the playout thread");
2997             return -1;
2998         }
2999 
3000         // Set thread priority to highest possible.
3001         SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
3002     }  // critScoped
3003 
3004     DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
3005     if (ret != WAIT_OBJECT_0)
3006     {
3007         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3008             "rendering did not start up properly");
3009         return -1;
3010     }
3011 
3012     _playing = true;
3013     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3014         "rendering audio stream has now started...");
3015 
3016     return 0;
3017 }
3018 
3019 // ----------------------------------------------------------------------------
3020 //  StopPlayout
3021 // ----------------------------------------------------------------------------
3022 
StopPlayout()3023 int32_t AudioDeviceWindowsCore::StopPlayout()
3024 {
3025 
3026     if (!_playIsInitialized)
3027     {
3028         return 0;
3029     }
3030 
3031     {
3032         CriticalSectionScoped critScoped(&_critSect) ;
3033 
3034         if (_hPlayThread == NULL)
3035         {
3036             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3037                 "no rendering stream is active => close down WASAPI only");
3038             SAFE_RELEASE(_ptrClientOut);
3039             SAFE_RELEASE(_ptrRenderClient);
3040             _playIsInitialized = false;
3041             _playing = false;
3042             return 0;
3043         }
3044 
3045         // stop the driving thread...
3046         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3047             "closing down the webrtc_core_audio_render_thread...");
3048         SetEvent(_hShutdownRenderEvent);
3049     }  // critScoped
3050 
3051     DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
3052     if (ret != WAIT_OBJECT_0)
3053     {
3054         // the thread did not stop as it should
3055         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3056             "failed to close down webrtc_core_audio_render_thread");
3057         CloseHandle(_hPlayThread);
3058         _hPlayThread = NULL;
3059         _playIsInitialized = false;
3060         _playing = false;
3061         return -1;
3062     }
3063 
3064     {
3065         CriticalSectionScoped critScoped(&_critSect);
3066         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3067             "webrtc_core_audio_render_thread is now closed");
3068 
3069         // to reset this event manually at each time we finish with it,
3070         // in case that the render thread has exited before StopPlayout(),
3071         // this event might be caught by the new render thread within same VoE instance.
3072         ResetEvent(_hShutdownRenderEvent);
3073 
3074         SAFE_RELEASE(_ptrClientOut);
3075         SAFE_RELEASE(_ptrRenderClient);
3076 
3077         _playIsInitialized = false;
3078         _playing = false;
3079 
3080         CloseHandle(_hPlayThread);
3081         _hPlayThread = NULL;
3082 
3083         if (_builtInAecEnabled && _recording)
3084         {
3085             // The DMO won't provide us captured output data unless we
3086             // give it render data to process.
3087             //
3088             // We still permit the playout to shutdown, and trace a warning.
3089             // Otherwise, VoE can get into a state which will never permit
3090             // playout to stop properly.
3091             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3092                 "Recording should be stopped before playout when using the "
3093                 "built-in AEC");
3094         }
3095 
3096         // Reset the playout delay value.
3097         _sndCardPlayDelay = 0;
3098     }  // critScoped
3099 
3100     return 0;
3101 }
3102 
3103 // ----------------------------------------------------------------------------
3104 //  PlayoutDelay
3105 // ----------------------------------------------------------------------------
3106 
PlayoutDelay(uint16_t & delayMS) const3107 int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const
3108 {
3109     CriticalSectionScoped critScoped(&_critSect);
3110     delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
3111     return 0;
3112 }
3113 
3114 // ----------------------------------------------------------------------------
3115 //  RecordingDelay
3116 // ----------------------------------------------------------------------------
3117 
RecordingDelay(uint16_t & delayMS) const3118 int32_t AudioDeviceWindowsCore::RecordingDelay(uint16_t& delayMS) const
3119 {
3120     CriticalSectionScoped critScoped(&_critSect);
3121     delayMS = static_cast<uint16_t>(_sndCardRecDelay);
3122     return 0;
3123 }
3124 
3125 // ----------------------------------------------------------------------------
3126 //  Playing
3127 // ----------------------------------------------------------------------------
3128 
Playing() const3129 bool AudioDeviceWindowsCore::Playing() const
3130 {
3131     return (_playing);
3132 }
3133 // ----------------------------------------------------------------------------
3134 //  SetPlayoutBuffer
3135 // ----------------------------------------------------------------------------
3136 
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)3137 int32_t AudioDeviceWindowsCore::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, uint16_t sizeMS)
3138 {
3139 
3140     CriticalSectionScoped lock(&_critSect);
3141 
3142     _playBufType = type;
3143 
3144     if (type == AudioDeviceModule::kFixedBufferSize)
3145     {
3146         _playBufDelayFixed = sizeMS;
3147     }
3148 
3149     return 0;
3150 }
3151 
3152 // ----------------------------------------------------------------------------
3153 //  PlayoutBuffer
3154 // ----------------------------------------------------------------------------
3155 
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const3156 int32_t AudioDeviceWindowsCore::PlayoutBuffer(AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const
3157 {
3158     CriticalSectionScoped lock(&_critSect);
3159     type = _playBufType;
3160 
3161     if (type == AudioDeviceModule::kFixedBufferSize)
3162     {
3163         sizeMS = _playBufDelayFixed;
3164     }
3165     else
3166     {
3167         // Use same value as for PlayoutDelay
3168         sizeMS = static_cast<uint16_t>(_sndCardPlayDelay);
3169     }
3170 
3171     return 0;
3172 }
3173 
3174 // ----------------------------------------------------------------------------
3175 //  CPULoad
3176 // ----------------------------------------------------------------------------
3177 
CPULoad(uint16_t & load) const3178 int32_t AudioDeviceWindowsCore::CPULoad(uint16_t& load) const
3179 {
3180 
3181     load = static_cast<uint16_t> (100*_avgCPULoad);
3182 
3183     return 0;
3184 }
3185 
3186 // ----------------------------------------------------------------------------
3187 //  PlayoutWarning
3188 // ----------------------------------------------------------------------------
3189 
PlayoutWarning() const3190 bool AudioDeviceWindowsCore::PlayoutWarning() const
3191 {
3192     return ( _playWarning > 0);
3193 }
3194 
3195 // ----------------------------------------------------------------------------
3196 //  PlayoutError
3197 // ----------------------------------------------------------------------------
3198 
PlayoutError() const3199 bool AudioDeviceWindowsCore::PlayoutError() const
3200 {
3201     return ( _playError > 0);
3202 }
3203 
3204 // ----------------------------------------------------------------------------
3205 //  RecordingWarning
3206 // ----------------------------------------------------------------------------
3207 
RecordingWarning() const3208 bool AudioDeviceWindowsCore::RecordingWarning() const
3209 {
3210     return ( _recWarning > 0);
3211 }
3212 
3213 // ----------------------------------------------------------------------------
3214 //  RecordingError
3215 // ----------------------------------------------------------------------------
3216 
RecordingError() const3217 bool AudioDeviceWindowsCore::RecordingError() const
3218 {
3219     return ( _recError > 0);
3220 }
3221 
3222 // ----------------------------------------------------------------------------
3223 //  ClearPlayoutWarning
3224 // ----------------------------------------------------------------------------
3225 
ClearPlayoutWarning()3226 void AudioDeviceWindowsCore::ClearPlayoutWarning()
3227 {
3228     _playWarning = 0;
3229 }
3230 
3231 // ----------------------------------------------------------------------------
3232 //  ClearPlayoutError
3233 // ----------------------------------------------------------------------------
3234 
ClearPlayoutError()3235 void AudioDeviceWindowsCore::ClearPlayoutError()
3236 {
3237     _playError = 0;
3238 }
3239 
3240 // ----------------------------------------------------------------------------
3241 //  ClearRecordingWarning
3242 // ----------------------------------------------------------------------------
3243 
ClearRecordingWarning()3244 void AudioDeviceWindowsCore::ClearRecordingWarning()
3245 {
3246     _recWarning = 0;
3247 }
3248 
3249 // ----------------------------------------------------------------------------
3250 //  ClearRecordingError
3251 // ----------------------------------------------------------------------------
3252 
ClearRecordingError()3253 void AudioDeviceWindowsCore::ClearRecordingError()
3254 {
3255     _recError = 0;
3256 }
3257 
3258 // ============================================================================
3259 //                                 Private Methods
3260 // ============================================================================
3261 
3262 // ----------------------------------------------------------------------------
3263 //  [static] WSAPIRenderThread
3264 // ----------------------------------------------------------------------------
3265 
WSAPIRenderThread(LPVOID context)3266 DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context)
3267 {
3268     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3269         DoRenderThread();
3270 }
3271 
3272 // ----------------------------------------------------------------------------
3273 //  [static] WSAPICaptureThread
3274 // ----------------------------------------------------------------------------
3275 
WSAPICaptureThread(LPVOID context)3276 DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context)
3277 {
3278     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3279         DoCaptureThread();
3280 }
3281 
WSAPICaptureThreadPollDMO(LPVOID context)3282 DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context)
3283 {
3284     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3285         DoCaptureThreadPollDMO();
3286 }
3287 
GetCaptureVolumeThread(LPVOID context)3288 DWORD WINAPI AudioDeviceWindowsCore::GetCaptureVolumeThread(LPVOID context)
3289 {
3290     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3291         DoGetCaptureVolumeThread();
3292 }
3293 
SetCaptureVolumeThread(LPVOID context)3294 DWORD WINAPI AudioDeviceWindowsCore::SetCaptureVolumeThread(LPVOID context)
3295 {
3296     return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3297         DoSetCaptureVolumeThread();
3298 }
3299 
DoGetCaptureVolumeThread()3300 DWORD AudioDeviceWindowsCore::DoGetCaptureVolumeThread()
3301 {
3302     HANDLE waitObject = _hShutdownCaptureEvent;
3303 
3304     while (1)
3305     {
3306         if (AGC())
3307         {
3308             uint32_t currentMicLevel = 0;
3309             if (MicrophoneVolume(currentMicLevel) == 0)
3310             {
3311                 // This doesn't set the system volume, just stores it.
3312                 _Lock();
3313                 if (_ptrAudioBuffer)
3314                 {
3315                     _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
3316                 }
3317                 _UnLock();
3318             }
3319         }
3320 
3321         DWORD waitResult = WaitForSingleObject(waitObject,
3322                                                GET_MIC_VOLUME_INTERVAL_MS);
3323         switch (waitResult)
3324         {
3325             case WAIT_OBJECT_0: // _hShutdownCaptureEvent
3326                 return 0;
3327             case WAIT_TIMEOUT:  // timeout notification
3328                 break;
3329             default:            // unexpected error
3330                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3331                     "  unknown wait termination on get volume thread");
3332                 return 1;
3333         }
3334     }
3335 }
3336 
DoSetCaptureVolumeThread()3337 DWORD AudioDeviceWindowsCore::DoSetCaptureVolumeThread()
3338 {
3339     HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hSetCaptureVolumeEvent};
3340 
3341     while (1)
3342     {
3343         DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
3344         switch (waitResult)
3345         {
3346             case WAIT_OBJECT_0:      // _hShutdownCaptureEvent
3347                 return 0;
3348             case WAIT_OBJECT_0 + 1:  // _hSetCaptureVolumeEvent
3349                 break;
3350             default:                 // unexpected error
3351                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3352                     "  unknown wait termination on set volume thread");
3353                     return 1;
3354         }
3355 
3356         _Lock();
3357         uint32_t newMicLevel = _newMicLevel;
3358         _UnLock();
3359 
3360         if (SetMicrophoneVolume(newMicLevel) == -1)
3361         {
3362             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3363                 "  the required modification of the microphone volume failed");
3364         }
3365     }
3366 }
3367 
3368 // ----------------------------------------------------------------------------
3369 //  DoRenderThread
3370 // ----------------------------------------------------------------------------
3371 
DoRenderThread()3372 DWORD AudioDeviceWindowsCore::DoRenderThread()
3373 {
3374 
3375     bool keepPlaying = true;
3376     HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
3377     HRESULT hr = S_OK;
3378     HANDLE hMmTask = NULL;
3379 
3380     LARGE_INTEGER t1;
3381     LARGE_INTEGER t2;
3382     int32_t time(0);
3383 
3384     // Initialize COM as MTA in this thread.
3385     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3386     if (!comInit.succeeded()) {
3387       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3388           "failed to initialize COM in render thread");
3389       return 1;
3390     }
3391 
3392     rtc::SetCurrentThreadName("webrtc_core_audio_render_thread");
3393 
3394     // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority.
3395     //
3396     if (_winSupportAvrt)
3397     {
3398         DWORD taskIndex(0);
3399         hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
3400         if (hMmTask)
3401         {
3402             if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL))
3403             {
3404                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to boost play-thread using MMCSS");
3405             }
3406             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "render thread is now registered with MMCSS (taskIndex=%d)", taskIndex);
3407         }
3408         else
3409         {
3410             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to enable MMCSS on render thread (err=%d)", GetLastError());
3411             _TraceCOMError(GetLastError());
3412         }
3413     }
3414 
3415     _Lock();
3416 
3417     IAudioClock* clock = NULL;
3418 
3419     // Get size of rendering buffer (length is expressed as the number of audio frames the buffer can hold).
3420     // This value is fixed during the rendering session.
3421     //
3422     UINT32 bufferLength = 0;
3423     hr = _ptrClientOut->GetBufferSize(&bufferLength);
3424     EXIT_ON_ERROR(hr);
3425     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] size of buffer       : %u", bufferLength);
3426 
3427     // Get maximum latency for the current stream (will not change for the lifetime  of the IAudioClient object).
3428     //
3429     REFERENCE_TIME latency;
3430     _ptrClientOut->GetStreamLatency(&latency);
3431     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] max stream latency   : %u (%3.2f ms)",
3432         (DWORD)latency, (double)(latency/10000.0));
3433 
3434     // Get the length of the periodic interval separating successive processing passes by
3435     // the audio engine on the data in the endpoint buffer.
3436     //
3437     // The period between processing passes by the audio engine is fixed for a particular
3438     // audio endpoint device and represents the smallest processing quantum for the audio engine.
3439     // This period plus the stream latency between the buffer and endpoint device represents
3440     // the minimum possible latency that an audio application can achieve.
3441     // Typical value: 100000 <=> 0.01 sec = 10ms.
3442     //
3443     REFERENCE_TIME devPeriod = 0;
3444     REFERENCE_TIME devPeriodMin = 0;
3445     _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
3446     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] device period        : %u (%3.2f ms)",
3447         (DWORD)devPeriod, (double)(devPeriod/10000.0));
3448 
3449     // Derive initial rendering delay.
3450     // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
3451     //
3452     int playout_delay = 10 * (bufferLength / _playBlockSize) +
3453         (int)((latency + devPeriod) / 10000);
3454     _sndCardPlayDelay = playout_delay;
3455     _writtenSamples = 0;
3456     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3457                  "[REND] initial delay        : %u", playout_delay);
3458 
3459     double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
3460     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] endpointBufferSizeMS : %3.2f", endpointBufferSizeMS);
3461 
3462     // Before starting the stream, fill the rendering buffer with silence.
3463     //
3464     BYTE *pData = NULL;
3465     hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
3466     EXIT_ON_ERROR(hr);
3467 
3468     hr = _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
3469     EXIT_ON_ERROR(hr);
3470 
3471     _writtenSamples += bufferLength;
3472 
3473     hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
3474     if (FAILED(hr)) {
3475       WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3476                    "failed to get IAudioClock interface from the IAudioClient");
3477     }
3478 
3479     // Start up the rendering audio stream.
3480     hr = _ptrClientOut->Start();
3481     EXIT_ON_ERROR(hr);
3482 
3483     _UnLock();
3484 
3485     // Set event which will ensure that the calling thread modifies the playing state to true.
3486     //
3487     SetEvent(_hRenderStartedEvent);
3488 
3489     // >> ------------------ THREAD LOOP ------------------
3490 
3491     while (keepPlaying)
3492     {
3493         // Wait for a render notification event or a shutdown event
3494         DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
3495         switch (waitResult)
3496         {
3497         case WAIT_OBJECT_0 + 0:     // _hShutdownRenderEvent
3498             keepPlaying = false;
3499             break;
3500         case WAIT_OBJECT_0 + 1:     // _hRenderSamplesReadyEvent
3501             break;
3502         case WAIT_TIMEOUT:          // timeout notification
3503             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "render event timed out after 0.5 seconds");
3504             goto Exit;
3505         default:                    // unexpected error
3506             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "unknown wait termination on render side");
3507             goto Exit;
3508         }
3509 
3510         while (keepPlaying)
3511         {
3512             _Lock();
3513 
3514             // Sanity check to ensure that essential states are not modified
3515             // during the unlocked period.
3516             if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
3517             {
3518                 _UnLock();
3519                 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3520                     "output state has been modified during unlocked period");
3521                 goto Exit;
3522             }
3523 
3524             // Get the number of frames of padding (queued up to play) in the endpoint buffer.
3525             UINT32 padding = 0;
3526             hr = _ptrClientOut->GetCurrentPadding(&padding);
3527             EXIT_ON_ERROR(hr);
3528 
3529             // Derive the amount of available space in the output buffer
3530             uint32_t framesAvailable = bufferLength - padding;
3531             // WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "#avaliable audio frames = %u", framesAvailable);
3532 
3533             // Do we have 10 ms available in the render buffer?
3534             if (framesAvailable < _playBlockSize)
3535             {
3536                 // Not enough space in render buffer to store next render packet.
3537                 _UnLock();
3538                 break;
3539             }
3540 
3541             // Write n*10ms buffers to the render buffer
3542             const uint32_t n10msBuffers = (framesAvailable / _playBlockSize);
3543             for (uint32_t n = 0; n < n10msBuffers; n++)
3544             {
3545                 // Get pointer (i.e., grab the buffer) to next space in the shared render buffer.
3546                 hr = _ptrRenderClient->GetBuffer(_playBlockSize, &pData);
3547                 EXIT_ON_ERROR(hr);
3548 
3549                 QueryPerformanceCounter(&t1);    // measure time: START
3550 
3551                 if (_ptrAudioBuffer)
3552                 {
3553                     // Request data to be played out (#bytes = _playBlockSize*_audioFrameSize)
3554                     _UnLock();
3555                     int32_t nSamples =
3556                     _ptrAudioBuffer->RequestPlayoutData(_playBlockSize);
3557                     _Lock();
3558 
3559                     if (nSamples == -1)
3560                     {
3561                         _UnLock();
3562                         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3563                                      "failed to read data from render client");
3564                         goto Exit;
3565                     }
3566 
3567                     // Sanity check to ensure that essential states are not modified during the unlocked period
3568                     if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
3569                     {
3570                         _UnLock();
3571                         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "output state has been modified during unlocked period");
3572                         goto Exit;
3573                     }
3574                     if (nSamples != static_cast<int32_t>(_playBlockSize))
3575                     {
3576                         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "nSamples(%d) != _playBlockSize(%d)", nSamples, _playBlockSize);
3577                     }
3578 
3579                     // Get the actual (stored) data
3580                     nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
3581                 }
3582 
3583                 QueryPerformanceCounter(&t2);    // measure time: STOP
3584                 time = (int)(t2.QuadPart-t1.QuadPart);
3585                 _playAcc += time;
3586 
3587                 DWORD dwFlags(0);
3588                 hr = _ptrRenderClient->ReleaseBuffer(_playBlockSize, dwFlags);
3589                 // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
3590                 // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
3591                 EXIT_ON_ERROR(hr);
3592 
3593                 _writtenSamples += _playBlockSize;
3594             }
3595 
3596             // Check the current delay on the playout side.
3597             if (clock) {
3598               UINT64 pos = 0;
3599               UINT64 freq = 1;
3600               clock->GetPosition(&pos, NULL);
3601               clock->GetFrequency(&freq);
3602               playout_delay = ROUND((double(_writtenSamples) /
3603                   _devicePlaySampleRate - double(pos) / freq) * 1000.0);
3604               _sndCardPlayDelay = playout_delay;
3605             }
3606 
3607             _UnLock();
3608         }
3609     }
3610 
3611     // ------------------ THREAD LOOP ------------------ <<
3612 
3613     SleepMs(static_cast<DWORD>(endpointBufferSizeMS+0.5));
3614     hr = _ptrClientOut->Stop();
3615 
3616 Exit:
3617     SAFE_RELEASE(clock);
3618 
3619     if (FAILED(hr))
3620     {
3621         _ptrClientOut->Stop();
3622         _UnLock();
3623         _TraceCOMError(hr);
3624     }
3625 
3626     if (_winSupportAvrt)
3627     {
3628         if (NULL != hMmTask)
3629         {
3630             _PAvRevertMmThreadCharacteristics(hMmTask);
3631         }
3632     }
3633 
3634     _Lock();
3635 
3636     if (keepPlaying)
3637     {
3638         if (_ptrClientOut != NULL)
3639         {
3640             hr = _ptrClientOut->Stop();
3641             if (FAILED(hr))
3642             {
3643                 _TraceCOMError(hr);
3644             }
3645             hr = _ptrClientOut->Reset();
3646             if (FAILED(hr))
3647             {
3648                 _TraceCOMError(hr);
3649             }
3650         }
3651         // Trigger callback from module process thread
3652         _playError = 1;
3653         WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kPlayoutError message posted: rendering thread has ended pre-maturely");
3654     }
3655     else
3656     {
3657         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_Rendering thread is now terminated properly");
3658     }
3659 
3660     _UnLock();
3661 
3662     return (DWORD)hr;
3663 }
3664 
InitCaptureThreadPriority()3665 DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority()
3666 {
3667     _hMmTask = NULL;
3668 
3669     rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
3670 
3671     // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
3672     // priority.
3673     if (_winSupportAvrt)
3674     {
3675         DWORD taskIndex(0);
3676         _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
3677         if (_hMmTask)
3678         {
3679             if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL))
3680             {
3681                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3682                     "failed to boost rec-thread using MMCSS");
3683             }
3684             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3685                 "capture thread is now registered with MMCSS (taskIndex=%d)",
3686                 taskIndex);
3687         }
3688         else
3689         {
3690             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3691                 "failed to enable MMCSS on capture thread (err=%d)",
3692                 GetLastError());
3693             _TraceCOMError(GetLastError());
3694         }
3695     }
3696 
3697     return S_OK;
3698 }
3699 
RevertCaptureThreadPriority()3700 void AudioDeviceWindowsCore::RevertCaptureThreadPriority()
3701 {
3702     if (_winSupportAvrt)
3703     {
3704         if (NULL != _hMmTask)
3705         {
3706             _PAvRevertMmThreadCharacteristics(_hMmTask);
3707         }
3708     }
3709 
3710     _hMmTask = NULL;
3711 }
3712 
DoCaptureThreadPollDMO()3713 DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO()
3714 {
3715     assert(_mediaBuffer != NULL);
3716     bool keepRecording = true;
3717 
3718     // Initialize COM as MTA in this thread.
3719     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3720     if (!comInit.succeeded()) {
3721       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3722         "failed to initialize COM in polling DMO thread");
3723       return 1;
3724     }
3725 
3726     HRESULT hr = InitCaptureThreadPriority();
3727     if (FAILED(hr))
3728     {
3729         return hr;
3730     }
3731 
3732     // Set event which will ensure that the calling thread modifies the
3733     // recording state to true.
3734     SetEvent(_hCaptureStartedEvent);
3735 
3736     // >> ---------------------------- THREAD LOOP ----------------------------
3737     while (keepRecording)
3738     {
3739         // Poll the DMO every 5 ms.
3740         // (The same interval used in the Wave implementation.)
3741         DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
3742         switch (waitResult)
3743         {
3744         case WAIT_OBJECT_0:         // _hShutdownCaptureEvent
3745             keepRecording = false;
3746             break;
3747         case WAIT_TIMEOUT:          // timeout notification
3748             break;
3749         default:                    // unexpected error
3750             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3751                 "Unknown wait termination on capture side");
3752             hr = -1; // To signal an error callback.
3753             keepRecording = false;
3754             break;
3755         }
3756 
3757         while (keepRecording)
3758         {
3759             CriticalSectionScoped critScoped(&_critSect);
3760 
3761             DWORD dwStatus = 0;
3762             {
3763                 DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
3764                 dmoBuffer.pBuffer = _mediaBuffer;
3765                 dmoBuffer.pBuffer->AddRef();
3766 
3767                 // Poll the DMO for AEC processed capture data. The DMO will
3768                 // copy available data to |dmoBuffer|, and should only return
3769                 // 10 ms frames. The value of |dwStatus| should be ignored.
3770                 hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
3771                 SAFE_RELEASE(dmoBuffer.pBuffer);
3772                 dwStatus = dmoBuffer.dwStatus;
3773             }
3774             if (FAILED(hr))
3775             {
3776                 _TraceCOMError(hr);
3777                 keepRecording = false;
3778                 assert(false);
3779                 break;
3780             }
3781 
3782             ULONG bytesProduced = 0;
3783             BYTE* data;
3784             // Get a pointer to the data buffer. This should be valid until
3785             // the next call to ProcessOutput.
3786             hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
3787             if (FAILED(hr))
3788             {
3789                 _TraceCOMError(hr);
3790                 keepRecording = false;
3791                 assert(false);
3792                 break;
3793             }
3794 
3795             // TODO(andrew): handle AGC.
3796 
3797             if (bytesProduced > 0)
3798             {
3799                 const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
3800                 // TODO(andrew): verify that this is always satisfied. It might
3801                 // be that ProcessOutput will try to return more than 10 ms if
3802                 // we fail to call it frequently enough.
3803                 assert(kSamplesProduced == static_cast<int>(_recBlockSize));
3804                 assert(sizeof(BYTE) == sizeof(int8_t));
3805                 _ptrAudioBuffer->SetRecordedBuffer(
3806                     reinterpret_cast<int8_t*>(data),
3807                     kSamplesProduced);
3808                 _ptrAudioBuffer->SetVQEData(0, 0, 0);
3809 
3810                 _UnLock();  // Release lock while making the callback.
3811                 _ptrAudioBuffer->DeliverRecordedData();
3812                 _Lock();
3813             }
3814 
3815             // Reset length to indicate buffer availability.
3816             hr = _mediaBuffer->SetLength(0);
3817             if (FAILED(hr))
3818             {
3819                 _TraceCOMError(hr);
3820                 keepRecording = false;
3821                 assert(false);
3822                 break;
3823             }
3824 
3825             if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE))
3826             {
3827                 // The DMO cannot currently produce more data. This is the
3828                 // normal case; otherwise it means the DMO had more than 10 ms
3829                 // of data available and ProcessOutput should be called again.
3830                 break;
3831             }
3832         }
3833     }
3834     // ---------------------------- THREAD LOOP ---------------------------- <<
3835 
3836     RevertCaptureThreadPriority();
3837 
3838     if (FAILED(hr))
3839     {
3840         // Trigger callback from module process thread
3841         _recError = 1;
3842         WEBRTC_TRACE(kTraceError, kTraceUtility, _id,
3843             "kRecordingError message posted: capturing thread has ended "
3844             "prematurely");
3845     }
3846     else
3847     {
3848         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3849             "Capturing thread is now terminated properly");
3850     }
3851 
3852     return hr;
3853 }
3854 
3855 
3856 // ----------------------------------------------------------------------------
3857 //  DoCaptureThread
3858 // ----------------------------------------------------------------------------
3859 
DoCaptureThread()3860 DWORD AudioDeviceWindowsCore::DoCaptureThread()
3861 {
3862 
3863     bool keepRecording = true;
3864     HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
3865     HRESULT hr = S_OK;
3866 
3867     LARGE_INTEGER t1;
3868     LARGE_INTEGER t2;
3869     int32_t time(0);
3870 
3871     BYTE* syncBuffer = NULL;
3872     UINT32 syncBufIndex = 0;
3873 
3874     _readSamples = 0;
3875 
3876     // Initialize COM as MTA in this thread.
3877     ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3878     if (!comInit.succeeded()) {
3879       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3880         "failed to initialize COM in capture thread");
3881       return 1;
3882     }
3883 
3884     hr = InitCaptureThreadPriority();
3885     if (FAILED(hr))
3886     {
3887         return hr;
3888     }
3889 
3890     _Lock();
3891 
3892     // Get size of capturing buffer (length is expressed as the number of audio frames the buffer can hold).
3893     // This value is fixed during the capturing session.
3894     //
3895     UINT32 bufferLength = 0;
3896     if (_ptrClientIn == NULL)
3897     {
3898       WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3899         "input state has been modified before capture loop starts.");
3900       return 1;
3901     }
3902     hr = _ptrClientIn->GetBufferSize(&bufferLength);
3903     EXIT_ON_ERROR(hr);
3904     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] size of buffer       : %u", bufferLength);
3905 
3906     // Allocate memory for sync buffer.
3907     // It is used for compensation between native 44.1 and internal 44.0 and
3908     // for cases when the capture buffer is larger than 10ms.
3909     //
3910     const UINT32 syncBufferSize = 2*(bufferLength * _recAudioFrameSize);
3911     syncBuffer = new BYTE[syncBufferSize];
3912     if (syncBuffer == NULL)
3913     {
3914         return (DWORD)E_POINTER;
3915     }
3916     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] size of sync buffer  : %u [bytes]", syncBufferSize);
3917 
3918     // Get maximum latency for the current stream (will not change for the lifetime of the IAudioClient object).
3919     //
3920     REFERENCE_TIME latency;
3921     _ptrClientIn->GetStreamLatency(&latency);
3922     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] max stream latency   : %u (%3.2f ms)",
3923         (DWORD)latency, (double)(latency / 10000.0));
3924 
3925     // Get the length of the periodic interval separating successive processing passes by
3926     // the audio engine on the data in the endpoint buffer.
3927     //
3928     REFERENCE_TIME devPeriod = 0;
3929     REFERENCE_TIME devPeriodMin = 0;
3930     _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
3931     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] device period        : %u (%3.2f ms)",
3932         (DWORD)devPeriod, (double)(devPeriod / 10000.0));
3933 
3934     double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
3935     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] extraDelayMS         : %3.2f", extraDelayMS);
3936 
3937     double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_recBlockSize);
3938     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] endpointBufferSizeMS : %3.2f", endpointBufferSizeMS);
3939 
3940     // Start up the capturing stream.
3941     //
3942     hr = _ptrClientIn->Start();
3943     EXIT_ON_ERROR(hr);
3944 
3945     _UnLock();
3946 
3947     // Set event which will ensure that the calling thread modifies the recording state to true.
3948     //
3949     SetEvent(_hCaptureStartedEvent);
3950 
3951     // >> ---------------------------- THREAD LOOP ----------------------------
3952 
3953     while (keepRecording)
3954     {
3955         // Wait for a capture notification event or a shutdown event
3956         DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
3957         switch (waitResult)
3958         {
3959         case WAIT_OBJECT_0 + 0:        // _hShutdownCaptureEvent
3960             keepRecording = false;
3961             break;
3962         case WAIT_OBJECT_0 + 1:        // _hCaptureSamplesReadyEvent
3963             break;
3964         case WAIT_TIMEOUT:            // timeout notification
3965             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "capture event timed out after 0.5 seconds");
3966             goto Exit;
3967         default:                    // unexpected error
3968             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "unknown wait termination on capture side");
3969             goto Exit;
3970         }
3971 
3972         while (keepRecording)
3973         {
3974             BYTE *pData = 0;
3975             UINT32 framesAvailable = 0;
3976             DWORD flags = 0;
3977             UINT64 recTime = 0;
3978             UINT64 recPos = 0;
3979 
3980             _Lock();
3981 
3982             // Sanity check to ensure that essential states are not modified
3983             // during the unlocked period.
3984             if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
3985             {
3986                 _UnLock();
3987                 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3988                     "input state has been modified during unlocked period");
3989                 goto Exit;
3990             }
3991 
3992             //  Find out how much capture data is available
3993             //
3994             hr = _ptrCaptureClient->GetBuffer(&pData,           // packet which is ready to be read by used
3995                                               &framesAvailable, // #frames in the captured packet (can be zero)
3996                                               &flags,           // support flags (check)
3997                                               &recPos,          // device position of first audio frame in data packet
3998                                               &recTime);        // value of performance counter at the time of recording the first audio frame
3999 
4000             if (SUCCEEDED(hr))
4001             {
4002                 if (AUDCLNT_S_BUFFER_EMPTY == hr)
4003                 {
4004                     // Buffer was empty => start waiting for a new capture notification event
4005                     _UnLock();
4006                     break;
4007                 }
4008 
4009                 if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
4010                 {
4011                     // Treat all of the data in the packet as silence and ignore the actual data values.
4012                     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "AUDCLNT_BUFFERFLAGS_SILENT");
4013                     pData = NULL;
4014                 }
4015 
4016                 assert(framesAvailable != 0);
4017 
4018                 if (pData)
4019                 {
4020                     CopyMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], pData, framesAvailable*_recAudioFrameSize);
4021                 }
4022                 else
4023                 {
4024                     ZeroMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], framesAvailable*_recAudioFrameSize);
4025                 }
4026                 assert(syncBufferSize >= (syncBufIndex*_recAudioFrameSize)+framesAvailable*_recAudioFrameSize);
4027 
4028                 // Release the capture buffer
4029                 //
4030                 hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
4031                 EXIT_ON_ERROR(hr);
4032 
4033                 _readSamples += framesAvailable;
4034                 syncBufIndex += framesAvailable;
4035 
4036                 QueryPerformanceCounter(&t1);
4037 
4038                 // Get the current recording and playout delay.
4039                 uint32_t sndCardRecDelay = (uint32_t)
4040                     (((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime)
4041                         / 10000) + (10*syncBufIndex) / _recBlockSize - 10);
4042                 uint32_t sndCardPlayDelay =
4043                     static_cast<uint32_t>(_sndCardPlayDelay);
4044 
4045                 _sndCardRecDelay = sndCardRecDelay;
4046 
4047                 while (syncBufIndex >= _recBlockSize)
4048                 {
4049                     if (_ptrAudioBuffer)
4050                     {
4051                         _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer, _recBlockSize);
4052                         _ptrAudioBuffer->SetVQEData(sndCardPlayDelay,
4053                                                     sndCardRecDelay,
4054                                                     0);
4055 
4056                         _ptrAudioBuffer->SetTypingStatus(KeyPressed());
4057 
4058                         QueryPerformanceCounter(&t1);    // measure time: START
4059 
4060                         _UnLock();  // release lock while making the callback
4061                         _ptrAudioBuffer->DeliverRecordedData();
4062                         _Lock();    // restore the lock
4063 
4064                         QueryPerformanceCounter(&t2);    // measure time: STOP
4065 
4066                         // Measure "average CPU load".
4067                         // Basically what we do here is to measure how many percent of our 10ms period
4068                         // is used for encoding and decoding. This value shuld be used as a warning indicator
4069                         // only and not seen as an absolute value. Running at ~100% will lead to bad QoS.
4070                         time = (int)(t2.QuadPart - t1.QuadPart);
4071                         _avgCPULoad = (float)(_avgCPULoad*.99 + (time + _playAcc) / (double)(_perfCounterFreq.QuadPart));
4072                         _playAcc = 0;
4073 
4074                         // Sanity check to ensure that essential states are not modified during the unlocked period
4075                         if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
4076                         {
4077                             _UnLock();
4078                             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "input state has been modified during unlocked period");
4079                             goto Exit;
4080                         }
4081                     }
4082 
4083                     // store remaining data which was not able to deliver as 10ms segment
4084                     MoveMemory(&syncBuffer[0], &syncBuffer[_recBlockSize*_recAudioFrameSize], (syncBufIndex-_recBlockSize)*_recAudioFrameSize);
4085                     syncBufIndex -= _recBlockSize;
4086                     sndCardRecDelay -= 10;
4087                 }
4088 
4089                 if (_AGC)
4090                 {
4091                     uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
4092                     if (newMicLevel != 0)
4093                     {
4094                         // The VQE will only deliver non-zero microphone levels when a change is needed.
4095                         // Set this new mic level (received from the observer as return value in the callback).
4096                         WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "AGC change of volume: new=%u",  newMicLevel);
4097                         // We store this outside of the audio buffer to avoid
4098                         // having it overwritten by the getter thread.
4099                         _newMicLevel = newMicLevel;
4100                         SetEvent(_hSetCaptureVolumeEvent);
4101                     }
4102                 }
4103             }
4104             else
4105             {
4106                 // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the audio samples
4107                 // must wait for the next processing pass. The client might benefit from keeping a count
4108                 // of the failed GetBuffer calls. If GetBuffer returns this error repeatedly, the client
4109                 // can start a new processing loop after shutting down the current client by calling
4110                 // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio client.
4111                 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4112                     "IAudioCaptureClient::GetBuffer returned AUDCLNT_E_BUFFER_ERROR, hr = 0x%08X",  hr);
4113                 goto Exit;
4114             }
4115 
4116             _UnLock();
4117         }
4118     }
4119 
4120     // ---------------------------- THREAD LOOP ---------------------------- <<
4121 
4122     if (_ptrClientIn)
4123     {
4124         hr = _ptrClientIn->Stop();
4125     }
4126 
4127 Exit:
4128     if (FAILED(hr))
4129     {
4130         _ptrClientIn->Stop();
4131         _UnLock();
4132         _TraceCOMError(hr);
4133     }
4134 
4135     RevertCaptureThreadPriority();
4136 
4137     _Lock();
4138 
4139     if (keepRecording)
4140     {
4141         if (_ptrClientIn != NULL)
4142         {
4143             hr = _ptrClientIn->Stop();
4144             if (FAILED(hr))
4145             {
4146                 _TraceCOMError(hr);
4147             }
4148             hr = _ptrClientIn->Reset();
4149             if (FAILED(hr))
4150             {
4151                 _TraceCOMError(hr);
4152             }
4153         }
4154 
4155         // Trigger callback from module process thread
4156         _recError = 1;
4157         WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kRecordingError message posted: capturing thread has ended pre-maturely");
4158     }
4159     else
4160     {
4161         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_Capturing thread is now terminated properly");
4162     }
4163 
4164     SAFE_RELEASE(_ptrClientIn);
4165     SAFE_RELEASE(_ptrCaptureClient);
4166 
4167     _UnLock();
4168 
4169     if (syncBuffer)
4170     {
4171         delete [] syncBuffer;
4172     }
4173 
4174     return (DWORD)hr;
4175 }
4176 
EnableBuiltInAEC(bool enable)4177 int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable)
4178 {
4179 
4180     if (_recIsInitialized)
4181     {
4182         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4183             "Attempt to set Windows AEC with recording already initialized");
4184         return -1;
4185     }
4186 
4187     if (_dmo == NULL)
4188     {
4189         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4190             "Built-in AEC DMO was not initialized properly at create time");
4191         return -1;
4192     }
4193 
4194     _builtInAecEnabled = enable;
4195     return 0;
4196 }
4197 
BuiltInAECIsEnabled() const4198 bool AudioDeviceWindowsCore::BuiltInAECIsEnabled() const
4199 {
4200     return _builtInAecEnabled;
4201 }
4202 
SetDMOProperties()4203 int AudioDeviceWindowsCore::SetDMOProperties()
4204 {
4205     HRESULT hr = S_OK;
4206     assert(_dmo != NULL);
4207 
4208     rtc::scoped_refptr<IPropertyStore> ps;
4209     {
4210         IPropertyStore* ptrPS = NULL;
4211         hr = _dmo->QueryInterface(IID_IPropertyStore,
4212                                   reinterpret_cast<void**>(&ptrPS));
4213         if (FAILED(hr) || ptrPS == NULL)
4214         {
4215             _TraceCOMError(hr);
4216             return -1;
4217         }
4218         ps = ptrPS;
4219         SAFE_RELEASE(ptrPS);
4220     }
4221 
4222     // Set the AEC system mode.
4223     // SINGLE_CHANNEL_AEC - AEC processing only.
4224     if (SetVtI4Property(ps,
4225                         MFPKEY_WMAAECMA_SYSTEM_MODE,
4226                         SINGLE_CHANNEL_AEC))
4227     {
4228         return -1;
4229     }
4230 
4231     // Set the AEC source mode.
4232     // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
4233     if (SetBoolProperty(ps,
4234                         MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
4235                         VARIANT_TRUE) == -1)
4236     {
4237         return -1;
4238     }
4239 
4240     // Enable the feature mode.
4241     // This lets us override all the default processing settings below.
4242     if (SetBoolProperty(ps,
4243                         MFPKEY_WMAAECMA_FEATURE_MODE,
4244                         VARIANT_TRUE) == -1)
4245     {
4246         return -1;
4247     }
4248 
4249     // Disable analog AGC (default enabled).
4250     if (SetBoolProperty(ps,
4251                         MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
4252                         VARIANT_FALSE) == -1)
4253     {
4254         return -1;
4255     }
4256 
4257     // Disable noise suppression (default enabled).
4258     // 0 - Disabled, 1 - Enabled
4259     if (SetVtI4Property(ps,
4260                         MFPKEY_WMAAECMA_FEATR_NS,
4261                         0) == -1)
4262     {
4263         return -1;
4264     }
4265 
4266     // Relevant parameters to leave at default settings:
4267     // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
4268     // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
4269     // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
4270     //   TODO(andrew): investigate decresing the length to 128 ms.
4271     // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
4272     //   0 is automatic; defaults to 160 samples (or 10 ms frames at the
4273     //   selected 16 kHz) as long as mic array processing is disabled.
4274     // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
4275     // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
4276 
4277     // Set the devices selected by VoE. If using a default device, we need to
4278     // search for the device index.
4279     int inDevIndex = _inputDeviceIndex;
4280     int outDevIndex = _outputDeviceIndex;
4281     if (!_usingInputDeviceIndex)
4282     {
4283         ERole role = eCommunications;
4284         if (_inputDevice == AudioDeviceModule::kDefaultDevice)
4285         {
4286             role = eConsole;
4287         }
4288 
4289         if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1)
4290         {
4291             return -1;
4292         }
4293     }
4294 
4295     if (!_usingOutputDeviceIndex)
4296     {
4297         ERole role = eCommunications;
4298         if (_outputDevice == AudioDeviceModule::kDefaultDevice)
4299         {
4300             role = eConsole;
4301         }
4302 
4303         if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1)
4304         {
4305             return -1;
4306         }
4307     }
4308 
4309     DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
4310                      static_cast<uint32_t>(0x0000ffff & inDevIndex);
4311     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
4312         "Capture device index: %d, render device index: %d",
4313         inDevIndex, outDevIndex);
4314     if (SetVtI4Property(ps,
4315                         MFPKEY_WMAAECMA_DEVICE_INDEXES,
4316                         devIndex) == -1)
4317     {
4318         return -1;
4319     }
4320 
4321     return 0;
4322 }
4323 
SetBoolProperty(IPropertyStore * ptrPS,REFPROPERTYKEY key,VARIANT_BOOL value)4324 int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS,
4325                                             REFPROPERTYKEY key,
4326                                             VARIANT_BOOL value)
4327 {
4328     PROPVARIANT pv;
4329     PropVariantInit(&pv);
4330     pv.vt = VT_BOOL;
4331     pv.boolVal = value;
4332     HRESULT hr = ptrPS->SetValue(key, pv);
4333     PropVariantClear(&pv);
4334     if (FAILED(hr))
4335     {
4336         _TraceCOMError(hr);
4337         return -1;
4338     }
4339     return 0;
4340 }
4341 
SetVtI4Property(IPropertyStore * ptrPS,REFPROPERTYKEY key,LONG value)4342 int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS,
4343                                             REFPROPERTYKEY key,
4344                                             LONG value)
4345 {
4346     PROPVARIANT pv;
4347     PropVariantInit(&pv);
4348     pv.vt = VT_I4;
4349     pv.lVal = value;
4350     HRESULT hr = ptrPS->SetValue(key, pv);
4351     PropVariantClear(&pv);
4352     if (FAILED(hr))
4353     {
4354         _TraceCOMError(hr);
4355         return -1;
4356     }
4357     return 0;
4358 }
4359 
4360 // ----------------------------------------------------------------------------
4361 //  _RefreshDeviceList
4362 //
4363 //  Creates a new list of endpoint rendering or capture devices after
4364 //  deleting any previously created (and possibly out-of-date) list of
4365 //  such devices.
4366 // ----------------------------------------------------------------------------
4367 
_RefreshDeviceList(EDataFlow dir)4368 int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir)
4369 {
4370     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4371 
4372     HRESULT hr = S_OK;
4373     IMMDeviceCollection *pCollection = NULL;
4374 
4375     assert(dir == eRender || dir == eCapture);
4376     assert(_ptrEnumerator != NULL);
4377 
4378     // Create a fresh list of devices using the specified direction
4379     hr = _ptrEnumerator->EnumAudioEndpoints(
4380                            dir,
4381                            DEVICE_STATE_ACTIVE,
4382                            &pCollection);
4383     if (FAILED(hr))
4384     {
4385         _TraceCOMError(hr);
4386         SAFE_RELEASE(pCollection);
4387         return -1;
4388     }
4389 
4390     if (dir == eRender)
4391     {
4392         SAFE_RELEASE(_ptrRenderCollection);
4393         _ptrRenderCollection = pCollection;
4394     }
4395     else
4396     {
4397         SAFE_RELEASE(_ptrCaptureCollection);
4398         _ptrCaptureCollection = pCollection;
4399     }
4400 
4401     return 0;
4402 }
4403 
4404 // ----------------------------------------------------------------------------
4405 //  _DeviceListCount
4406 //
4407 //  Gets a count of the endpoint rendering or capture devices in the
4408 //  current list of such devices.
4409 // ----------------------------------------------------------------------------
4410 
_DeviceListCount(EDataFlow dir)4411 int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir)
4412 {
4413     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4414 
4415     HRESULT hr = S_OK;
4416     UINT count = 0;
4417 
4418     assert(eRender == dir || eCapture == dir);
4419 
4420     if (eRender == dir && NULL != _ptrRenderCollection)
4421     {
4422         hr = _ptrRenderCollection->GetCount(&count);
4423     }
4424     else if (NULL != _ptrCaptureCollection)
4425     {
4426         hr = _ptrCaptureCollection->GetCount(&count);
4427     }
4428 
4429     if (FAILED(hr))
4430     {
4431         _TraceCOMError(hr);
4432         return -1;
4433     }
4434 
4435     return static_cast<int16_t> (count);
4436 }
4437 
4438 // ----------------------------------------------------------------------------
4439 //  _GetListDeviceName
4440 //
4441 //  Gets the friendly name of an endpoint rendering or capture device
4442 //  from the current list of such devices. The caller uses an index
4443 //  into the list to identify the device.
4444 //
4445 //  Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
4446 //  in _RefreshDeviceList().
4447 // ----------------------------------------------------------------------------
4448 
_GetListDeviceName(EDataFlow dir,int index,LPWSTR szBuffer,int bufferLen)4449 int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
4450 {
4451     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4452 
4453     HRESULT hr = S_OK;
4454     IMMDevice *pDevice = NULL;
4455 
4456     assert(dir == eRender || dir == eCapture);
4457 
4458     if (eRender == dir && NULL != _ptrRenderCollection)
4459     {
4460         hr = _ptrRenderCollection->Item(index, &pDevice);
4461     }
4462     else if (NULL != _ptrCaptureCollection)
4463     {
4464         hr = _ptrCaptureCollection->Item(index, &pDevice);
4465     }
4466 
4467     if (FAILED(hr))
4468     {
4469         _TraceCOMError(hr);
4470         SAFE_RELEASE(pDevice);
4471         return -1;
4472     }
4473 
4474     int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
4475     SAFE_RELEASE(pDevice);
4476     return res;
4477 }
4478 
4479 // ----------------------------------------------------------------------------
4480 //  _GetDefaultDeviceName
4481 //
4482 //  Gets the friendly name of an endpoint rendering or capture device
4483 //  given a specified device role.
4484 //
4485 //  Uses: _ptrEnumerator
4486 // ----------------------------------------------------------------------------
4487 
_GetDefaultDeviceName(EDataFlow dir,ERole role,LPWSTR szBuffer,int bufferLen)4488 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
4489 {
4490     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4491 
4492     HRESULT hr = S_OK;
4493     IMMDevice *pDevice = NULL;
4494 
4495     assert(dir == eRender || dir == eCapture);
4496     assert(role == eConsole || role == eCommunications);
4497     assert(_ptrEnumerator != NULL);
4498 
4499     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4500                            dir,
4501                            role,
4502                            &pDevice);
4503 
4504     if (FAILED(hr))
4505     {
4506         _TraceCOMError(hr);
4507         SAFE_RELEASE(pDevice);
4508         return -1;
4509     }
4510 
4511     int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
4512     SAFE_RELEASE(pDevice);
4513     return res;
4514 }
4515 
4516 // ----------------------------------------------------------------------------
4517 //  _GetListDeviceID
4518 //
4519 //  Gets the unique ID string of an endpoint rendering or capture device
4520 //  from the current list of such devices. The caller uses an index
4521 //  into the list to identify the device.
4522 //
4523 //  Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
4524 //  in _RefreshDeviceList().
4525 // ----------------------------------------------------------------------------
4526 
_GetListDeviceID(EDataFlow dir,int index,LPWSTR szBuffer,int bufferLen)4527 int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
4528 {
4529     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4530 
4531     HRESULT hr = S_OK;
4532     IMMDevice *pDevice = NULL;
4533 
4534     assert(dir == eRender || dir == eCapture);
4535 
4536     if (eRender == dir && NULL != _ptrRenderCollection)
4537     {
4538         hr = _ptrRenderCollection->Item(index, &pDevice);
4539     }
4540     else if (NULL != _ptrCaptureCollection)
4541     {
4542         hr = _ptrCaptureCollection->Item(index, &pDevice);
4543     }
4544 
4545     if (FAILED(hr))
4546     {
4547         _TraceCOMError(hr);
4548         SAFE_RELEASE(pDevice);
4549         return -1;
4550     }
4551 
4552     int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
4553     SAFE_RELEASE(pDevice);
4554     return res;
4555 }
4556 
4557 // ----------------------------------------------------------------------------
4558 //  _GetDefaultDeviceID
4559 //
4560 //  Gets the uniqe device ID of an endpoint rendering or capture device
4561 //  given a specified device role.
4562 //
4563 //  Uses: _ptrEnumerator
4564 // ----------------------------------------------------------------------------
4565 
_GetDefaultDeviceID(EDataFlow dir,ERole role,LPWSTR szBuffer,int bufferLen)4566 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
4567 {
4568     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4569 
4570     HRESULT hr = S_OK;
4571     IMMDevice *pDevice = NULL;
4572 
4573     assert(dir == eRender || dir == eCapture);
4574     assert(role == eConsole || role == eCommunications);
4575     assert(_ptrEnumerator != NULL);
4576 
4577     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4578                            dir,
4579                            role,
4580                            &pDevice);
4581 
4582     if (FAILED(hr))
4583     {
4584         _TraceCOMError(hr);
4585         SAFE_RELEASE(pDevice);
4586         return -1;
4587     }
4588 
4589     int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
4590     SAFE_RELEASE(pDevice);
4591     return res;
4592 }
4593 
_GetDefaultDeviceIndex(EDataFlow dir,ERole role,int * index)4594 int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
4595                                                        ERole role,
4596                                                        int* index)
4597 {
4598     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4599 
4600     HRESULT hr = S_OK;
4601     WCHAR szDefaultDeviceID[MAX_PATH] = {0};
4602     WCHAR szDeviceID[MAX_PATH] = {0};
4603 
4604     const size_t kDeviceIDLength = sizeof(szDeviceID)/sizeof(szDeviceID[0]);
4605     assert(kDeviceIDLength ==
4606         sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
4607 
4608     if (_GetDefaultDeviceID(dir,
4609                             role,
4610                             szDefaultDeviceID,
4611                             kDeviceIDLength) == -1)
4612     {
4613         return -1;
4614     }
4615 
4616     IMMDeviceCollection* collection = _ptrCaptureCollection;
4617     if (dir == eRender)
4618     {
4619         collection = _ptrRenderCollection;
4620     }
4621 
4622     if (!collection)
4623     {
4624         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4625             "Device collection not valid");
4626         return -1;
4627     }
4628 
4629     UINT count = 0;
4630     hr = collection->GetCount(&count);
4631     if (FAILED(hr))
4632     {
4633         _TraceCOMError(hr);
4634         return -1;
4635     }
4636 
4637     *index = -1;
4638     for (UINT i = 0; i < count; i++)
4639     {
4640         memset(szDeviceID, 0, sizeof(szDeviceID));
4641         rtc::scoped_refptr<IMMDevice> device;
4642         {
4643             IMMDevice* ptrDevice = NULL;
4644             hr = collection->Item(i, &ptrDevice);
4645             if (FAILED(hr) || ptrDevice == NULL)
4646             {
4647                 _TraceCOMError(hr);
4648                 return -1;
4649             }
4650             device = ptrDevice;
4651             SAFE_RELEASE(ptrDevice);
4652         }
4653 
4654         if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1)
4655         {
4656            return -1;
4657         }
4658 
4659         if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0)
4660         {
4661             // Found a match.
4662             *index = i;
4663             break;
4664         }
4665 
4666     }
4667 
4668     if (*index == -1)
4669     {
4670         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4671             "Unable to find collection index for default device");
4672         return -1;
4673     }
4674 
4675     return 0;
4676 }
4677 
4678 // ----------------------------------------------------------------------------
4679 //  _GetDeviceName
4680 // ----------------------------------------------------------------------------
4681 
_GetDeviceName(IMMDevice * pDevice,LPWSTR pszBuffer,int bufferLen)4682 int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
4683                                                LPWSTR pszBuffer,
4684                                                int bufferLen)
4685 {
4686     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4687 
4688     static const WCHAR szDefault[] = L"<Device not available>";
4689 
4690     HRESULT hr = E_FAIL;
4691     IPropertyStore *pProps = NULL;
4692     PROPVARIANT varName;
4693 
4694     assert(pszBuffer != NULL);
4695     assert(bufferLen > 0);
4696 
4697     if (pDevice != NULL)
4698     {
4699         hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
4700         if (FAILED(hr))
4701         {
4702             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4703                 "IMMDevice::OpenPropertyStore failed, hr = 0x%08X", hr);
4704         }
4705     }
4706 
4707     // Initialize container for property value.
4708     PropVariantInit(&varName);
4709 
4710     if (SUCCEEDED(hr))
4711     {
4712         // Get the endpoint device's friendly-name property.
4713         hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
4714         if (FAILED(hr))
4715         {
4716             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4717                 "IPropertyStore::GetValue failed, hr = 0x%08X", hr);
4718         }
4719     }
4720 
4721     if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt))
4722     {
4723         hr = E_FAIL;
4724         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4725             "IPropertyStore::GetValue returned no value, hr = 0x%08X", hr);
4726     }
4727 
4728     if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt))
4729     {
4730         // The returned value is not a wide null terminated string.
4731         hr = E_UNEXPECTED;
4732         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4733             "IPropertyStore::GetValue returned unexpected type, hr = 0x%08X", hr);
4734     }
4735 
4736     if (SUCCEEDED(hr) && (varName.pwszVal != NULL))
4737     {
4738         // Copy the valid device name to the provided ouput buffer.
4739         wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
4740     }
4741     else
4742     {
4743         // Failed to find the device name.
4744         wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
4745     }
4746 
4747     PropVariantClear(&varName);
4748     SAFE_RELEASE(pProps);
4749 
4750     return 0;
4751 }
4752 
4753 // ----------------------------------------------------------------------------
4754 //  _GetDeviceID
4755 // ----------------------------------------------------------------------------
4756 
_GetDeviceID(IMMDevice * pDevice,LPWSTR pszBuffer,int bufferLen)4757 int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen)
4758 {
4759     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4760 
4761     static const WCHAR szDefault[] = L"<Device not available>";
4762 
4763     HRESULT hr = E_FAIL;
4764     LPWSTR pwszID = NULL;
4765 
4766     assert(pszBuffer != NULL);
4767     assert(bufferLen > 0);
4768 
4769     if (pDevice != NULL)
4770     {
4771         hr = pDevice->GetId(&pwszID);
4772     }
4773 
4774     if (hr == S_OK)
4775     {
4776         // Found the device ID.
4777         wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
4778     }
4779     else
4780     {
4781         // Failed to find the device ID.
4782         wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
4783     }
4784 
4785     CoTaskMemFree(pwszID);
4786     return 0;
4787 }
4788 
4789 // ----------------------------------------------------------------------------
4790 //  _GetDefaultDevice
4791 // ----------------------------------------------------------------------------
4792 
_GetDefaultDevice(EDataFlow dir,ERole role,IMMDevice ** ppDevice)4793 int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice)
4794 {
4795     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4796 
4797     HRESULT hr(S_OK);
4798 
4799     assert(_ptrEnumerator != NULL);
4800 
4801     hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4802                                    dir,
4803                                    role,
4804                                    ppDevice);
4805     if (FAILED(hr))
4806     {
4807         _TraceCOMError(hr);
4808         return -1;
4809     }
4810 
4811     return 0;
4812 }
4813 
4814 // ----------------------------------------------------------------------------
4815 //  _GetListDevice
4816 // ----------------------------------------------------------------------------
4817 
_GetListDevice(EDataFlow dir,int index,IMMDevice ** ppDevice)4818 int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice)
4819 {
4820     HRESULT hr(S_OK);
4821 
4822     assert(_ptrEnumerator != NULL);
4823 
4824     IMMDeviceCollection *pCollection = NULL;
4825 
4826     hr = _ptrEnumerator->EnumAudioEndpoints(
4827                                dir,
4828                                DEVICE_STATE_ACTIVE,        // only active endpoints are OK
4829                                &pCollection);
4830     if (FAILED(hr))
4831     {
4832         _TraceCOMError(hr);
4833         SAFE_RELEASE(pCollection);
4834         return -1;
4835     }
4836 
4837     hr = pCollection->Item(
4838                         index,
4839                         ppDevice);
4840     if (FAILED(hr))
4841     {
4842         _TraceCOMError(hr);
4843         SAFE_RELEASE(pCollection);
4844         return -1;
4845     }
4846 
4847     return 0;
4848 }
4849 
4850 // ----------------------------------------------------------------------------
4851 //  _EnumerateEndpointDevicesAll
4852 // ----------------------------------------------------------------------------
4853 
_EnumerateEndpointDevicesAll(EDataFlow dataFlow) const4854 int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(EDataFlow dataFlow) const
4855 {
4856     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4857 
4858     assert(_ptrEnumerator != NULL);
4859 
4860     HRESULT hr = S_OK;
4861     IMMDeviceCollection *pCollection = NULL;
4862     IMMDevice *pEndpoint = NULL;
4863     IPropertyStore *pProps = NULL;
4864     IAudioEndpointVolume* pEndpointVolume = NULL;
4865     LPWSTR pwszID = NULL;
4866 
4867     // Generate a collection of audio endpoint devices in the system.
4868     // Get states for *all* endpoint devices.
4869     // Output: IMMDeviceCollection interface.
4870     hr = _ptrEnumerator->EnumAudioEndpoints(
4871                                  dataFlow,            // data-flow direction (input parameter)
4872                                  DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
4873                                  &pCollection);        // release interface when done
4874 
4875     EXIT_ON_ERROR(hr);
4876 
4877     // use the IMMDeviceCollection interface...
4878 
4879     UINT count = 0;
4880 
4881     // Retrieve a count of the devices in the device collection.
4882     hr = pCollection->GetCount(&count);
4883     EXIT_ON_ERROR(hr);
4884     if (dataFlow == eRender)
4885         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#rendering endpoint devices (counting all): %u", count);
4886     else if (dataFlow == eCapture)
4887         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#capturing endpoint devices (counting all): %u", count);
4888 
4889     if (count == 0)
4890     {
4891         return 0;
4892     }
4893 
4894     // Each loop prints the name of an endpoint device.
4895     for (ULONG i = 0; i < count; i++)
4896     {
4897         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Endpoint %d:", i);
4898 
4899         // Get pointer to endpoint number i.
4900         // Output: IMMDevice interface.
4901         hr = pCollection->Item(
4902                             i,
4903                             &pEndpoint);
4904         CONTINUE_ON_ERROR(hr);
4905 
4906         // use the IMMDevice interface of the specified endpoint device...
4907 
4908         // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
4909         hr = pEndpoint->GetId(&pwszID);
4910         CONTINUE_ON_ERROR(hr);
4911         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "ID string    : %S", pwszID);
4912 
4913         // Retrieve an interface to the device's property store.
4914         // Output: IPropertyStore interface.
4915         hr = pEndpoint->OpenPropertyStore(
4916                           STGM_READ,
4917                           &pProps);
4918         CONTINUE_ON_ERROR(hr);
4919 
4920         // use the IPropertyStore interface...
4921 
4922         PROPVARIANT varName;
4923         // Initialize container for property value.
4924         PropVariantInit(&varName);
4925 
4926         // Get the endpoint's friendly-name property.
4927         // Example: "Speakers (Realtek High Definition Audio)"
4928         hr = pProps->GetValue(
4929                        PKEY_Device_FriendlyName,
4930                        &varName);
4931         CONTINUE_ON_ERROR(hr);
4932         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", varName.pwszVal);
4933 
4934         // Get the endpoint's current device state
4935         DWORD dwState;
4936         hr = pEndpoint->GetState(&dwState);
4937         CONTINUE_ON_ERROR(hr);
4938         if (dwState & DEVICE_STATE_ACTIVE)
4939             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : *ACTIVE*", dwState);
4940         if (dwState & DEVICE_STATE_DISABLED)
4941             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : DISABLED", dwState);
4942         if (dwState & DEVICE_STATE_NOTPRESENT)
4943             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : NOTPRESENT", dwState);
4944         if (dwState & DEVICE_STATE_UNPLUGGED)
4945             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : UNPLUGGED", dwState);
4946 
4947         // Check the hardware volume capabilities.
4948         DWORD dwHwSupportMask = 0;
4949         hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
4950                                NULL, (void**)&pEndpointVolume);
4951         CONTINUE_ON_ERROR(hr);
4952         hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
4953         CONTINUE_ON_ERROR(hr);
4954         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
4955             // The audio endpoint device supports a hardware volume control
4956             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_VOLUME", dwHwSupportMask);
4957         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
4958             // The audio endpoint device supports a hardware mute control
4959             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_MUTE", dwHwSupportMask);
4960         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
4961             // The audio endpoint device supports a hardware peak meter
4962             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_METER", dwHwSupportMask);
4963 
4964         // Check the channel count (#channels in the audio stream that enters or leaves the audio endpoint device)
4965         UINT nChannelCount(0);
4966         hr = pEndpointVolume->GetChannelCount(
4967                                 &nChannelCount);
4968         CONTINUE_ON_ERROR(hr);
4969         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#channels    : %u", nChannelCount);
4970 
4971         if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
4972         {
4973             // Get the volume range.
4974             float fLevelMinDB(0.0);
4975             float fLevelMaxDB(0.0);
4976             float fVolumeIncrementDB(0.0);
4977             hr = pEndpointVolume->GetVolumeRange(
4978                                     &fLevelMinDB,
4979                                     &fLevelMaxDB,
4980                                     &fVolumeIncrementDB);
4981             CONTINUE_ON_ERROR(hr);
4982             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "volume range : %4.2f (min), %4.2f (max), %4.2f (inc) [dB]",
4983                 fLevelMinDB, fLevelMaxDB, fVolumeIncrementDB);
4984 
4985             // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is divided
4986             // into n uniform intervals of size vinc = fVolumeIncrementDB, where
4987             // n = (vmax ?vmin) / vinc.
4988             // The values vmin, vmax, and vinc are measured in decibels. The client can set
4989             // the volume level to one of n + 1 discrete values in the range from vmin to vmax.
4990             int n = (int)((fLevelMaxDB-fLevelMinDB)/fVolumeIncrementDB);
4991             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#intervals   : %d", n);
4992 
4993             // Get information about the current step in the volume range.
4994             // This method represents the volume level of the audio stream that enters or leaves
4995             // the audio endpoint device as an index or "step" in a range of discrete volume levels.
4996             // Output value nStepCount is the number of steps in the range. Output value nStep
4997             // is the step index of the current volume level. If the number of steps is n = nStepCount,
4998             // then step index nStep can assume values from 0 (minimum volume) to n ?1 (maximum volume).
4999             UINT nStep(0);
5000             UINT nStepCount(0);
5001             hr = pEndpointVolume->GetVolumeStepInfo(
5002                                     &nStep,
5003                                     &nStepCount);
5004             CONTINUE_ON_ERROR(hr);
5005             WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "volume steps : %d (nStep), %d (nStepCount)", nStep, nStepCount);
5006         }
5007 Next:
5008         if (FAILED(hr)) {
5009           WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
5010                        "Error when logging device information");
5011         }
5012         CoTaskMemFree(pwszID);
5013         pwszID = NULL;
5014         PropVariantClear(&varName);
5015         SAFE_RELEASE(pProps);
5016         SAFE_RELEASE(pEndpoint);
5017         SAFE_RELEASE(pEndpointVolume);
5018     }
5019     SAFE_RELEASE(pCollection);
5020     return 0;
5021 
5022 Exit:
5023     _TraceCOMError(hr);
5024     CoTaskMemFree(pwszID);
5025     pwszID = NULL;
5026     SAFE_RELEASE(pCollection);
5027     SAFE_RELEASE(pEndpoint);
5028     SAFE_RELEASE(pEndpointVolume);
5029     SAFE_RELEASE(pProps);
5030     return -1;
5031 }
5032 
5033 // ----------------------------------------------------------------------------
5034 //  _TraceCOMError
5035 // ----------------------------------------------------------------------------
5036 
_TraceCOMError(HRESULT hr) const5037 void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const
5038 {
5039     TCHAR buf[MAXERRORLENGTH];
5040     TCHAR errorText[MAXERRORLENGTH];
5041 
5042     const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
5043                           FORMAT_MESSAGE_IGNORE_INSERTS;
5044     const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
5045 
5046     // Gets the system's human readable message string for this HRESULT.
5047     // All error message in English by default.
5048     DWORD messageLength = ::FormatMessageW(dwFlags,
5049                                            0,
5050                                            hr,
5051                                            dwLangID,
5052                                            errorText,
5053                                            MAXERRORLENGTH,
5054                                            NULL);
5055 
5056     assert(messageLength <= MAXERRORLENGTH);
5057 
5058     // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
5059     for (; messageLength && ::isspace(errorText[messageLength - 1]);
5060          --messageLength)
5061     {
5062         errorText[messageLength - 1] = '\0';
5063     }
5064 
5065     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
5066         "Core Audio method failed (hr=0x%x)", hr);
5067     StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
5068     StringCchCat(buf, MAXERRORLENGTH, errorText);
5069     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "%s", WideToUTF8(buf));
5070 }
5071 
5072 // ----------------------------------------------------------------------------
5073 //  WideToUTF8
5074 // ----------------------------------------------------------------------------
5075 
WideToUTF8(const TCHAR * src) const5076 char* AudioDeviceWindowsCore::WideToUTF8(const TCHAR* src) const {
5077 #ifdef UNICODE
5078     const size_t kStrLen = sizeof(_str);
5079     memset(_str, 0, kStrLen);
5080     // Get required size (in bytes) to be able to complete the conversion.
5081     int required_size = WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, 0, 0, 0);
5082     if (required_size <= kStrLen)
5083     {
5084         // Process the entire input string, including the terminating null char.
5085         if (WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, kStrLen, 0, 0) == 0)
5086             memset(_str, 0, kStrLen);
5087     }
5088     return _str;
5089 #else
5090     return const_cast<char*>(src);
5091 #endif
5092 }
5093 
5094 
KeyPressed() const5095 bool AudioDeviceWindowsCore::KeyPressed() const{
5096 
5097   int key_down = 0;
5098   for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
5099     short res = GetAsyncKeyState(key);
5100     key_down |= res & 0x1; // Get the LSB
5101   }
5102   return (key_down > 0);
5103 }
5104 }  // namespace webrtc
5105 
5106 #endif  // WEBRTC_WINDOWS_CORE_AUDIO_BUILD
5107