• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "modules/audio_device/win/core_audio_utility_win.h"
12 
13 #include <functiondiscoverykeys_devpkey.h>
14 #include <stdio.h>
15 #include <tchar.h>
16 
17 #include <iomanip>
18 #include <string>
19 #include <utility>
20 
21 #include "rtc_base/arraysize.h"
22 #include "rtc_base/logging.h"
23 #include "rtc_base/platform_thread_types.h"
24 #include "rtc_base/string_utils.h"
25 #include "rtc_base/strings/string_builder.h"
26 #include "rtc_base/win/windows_version.h"
27 
28 using Microsoft::WRL::ComPtr;
29 using webrtc::AudioDeviceName;
30 using webrtc::AudioParameters;
31 
32 namespace webrtc {
33 namespace webrtc_win {
34 namespace {
35 
36 using core_audio_utility::ErrorToString;
37 
38 // Converts from channel mask to list of included channels.
39 // Each audio data format contains channels for one or more of the positions
40 // listed below. The number of channels simply equals the number of nonzero
41 // flag bits in the |channel_mask|. The relative positions of the channels
42 // within each block of audio data always follow the same relative ordering
43 // as the flag bits in the table below. For example, if |channel_mask| contains
44 // the value 0x00000033, the format defines four audio channels that are
45 // assigned for playback to the front-left, front-right, back-left,
46 // and back-right speakers, respectively. The channel data should be interleaved
47 // in that order within each block.
ChannelMaskToString(DWORD channel_mask)48 std::string ChannelMaskToString(DWORD channel_mask) {
49   std::string ss;
50   int n = 0;
51   if (channel_mask & SPEAKER_FRONT_LEFT) {
52     ss += "FRONT_LEFT | ";
53     ++n;
54   }
55   if (channel_mask & SPEAKER_FRONT_RIGHT) {
56     ss += "FRONT_RIGHT | ";
57     ++n;
58   }
59   if (channel_mask & SPEAKER_FRONT_CENTER) {
60     ss += "FRONT_CENTER | ";
61     ++n;
62   }
63   if (channel_mask & SPEAKER_LOW_FREQUENCY) {
64     ss += "LOW_FREQUENCY | ";
65     ++n;
66   }
67   if (channel_mask & SPEAKER_BACK_LEFT) {
68     ss += "BACK_LEFT | ";
69     ++n;
70   }
71   if (channel_mask & SPEAKER_BACK_RIGHT) {
72     ss += "BACK_RIGHT | ";
73     ++n;
74   }
75   if (channel_mask & SPEAKER_FRONT_LEFT_OF_CENTER) {
76     ss += "FRONT_LEFT_OF_CENTER | ";
77     ++n;
78   }
79   if (channel_mask & SPEAKER_FRONT_RIGHT_OF_CENTER) {
80     ss += "RIGHT_OF_CENTER | ";
81     ++n;
82   }
83   if (channel_mask & SPEAKER_BACK_CENTER) {
84     ss += "BACK_CENTER | ";
85     ++n;
86   }
87   if (channel_mask & SPEAKER_SIDE_LEFT) {
88     ss += "SIDE_LEFT | ";
89     ++n;
90   }
91   if (channel_mask & SPEAKER_SIDE_RIGHT) {
92     ss += "SIDE_RIGHT | ";
93     ++n;
94   }
95   if (channel_mask & SPEAKER_TOP_CENTER) {
96     ss += "TOP_CENTER | ";
97     ++n;
98   }
99   if (channel_mask & SPEAKER_TOP_FRONT_LEFT) {
100     ss += "TOP_FRONT_LEFT | ";
101     ++n;
102   }
103   if (channel_mask & SPEAKER_TOP_FRONT_CENTER) {
104     ss += "TOP_FRONT_CENTER | ";
105     ++n;
106   }
107   if (channel_mask & SPEAKER_TOP_FRONT_RIGHT) {
108     ss += "TOP_FRONT_RIGHT | ";
109     ++n;
110   }
111   if (channel_mask & SPEAKER_TOP_BACK_LEFT) {
112     ss += "TOP_BACK_LEFT | ";
113     ++n;
114   }
115   if (channel_mask & SPEAKER_TOP_BACK_CENTER) {
116     ss += "TOP_BACK_CENTER | ";
117     ++n;
118   }
119   if (channel_mask & SPEAKER_TOP_BACK_RIGHT) {
120     ss += "TOP_BACK_RIGHT | ";
121     ++n;
122   }
123 
124   if (!ss.empty()) {
125     // Delete last appended " | " substring.
126     ss.erase(ss.end() - 3, ss.end());
127   }
128   ss += " (";
129   ss += std::to_string(n);
130   ss += ")";
131   return ss;
132 }
133 
134 #if !defined(KSAUDIO_SPEAKER_1POINT1)
135 // These values are only defined in ksmedia.h after a certain version, to build
136 // cleanly for older windows versions this just defines the ones that are
137 // missing.
138 #define KSAUDIO_SPEAKER_1POINT1 (SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY)
139 #define KSAUDIO_SPEAKER_2POINT1 \
140   (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY)
141 #define KSAUDIO_SPEAKER_3POINT0 \
142   (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER)
143 #define KSAUDIO_SPEAKER_3POINT1                                      \
144   (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
145    SPEAKER_LOW_FREQUENCY)
146 #define KSAUDIO_SPEAKER_5POINT0                                      \
147   (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
148    SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT)
149 #define KSAUDIO_SPEAKER_7POINT0                                      \
150   (SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | \
151    SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT |      \
152    SPEAKER_SIDE_RIGHT)
153 #endif
154 
155 #if !defined(AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY)
156 #define AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000
157 #define AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
158 #endif
159 
160 // Converts the most common format tags defined in mmreg.h into string
161 // equivalents. Mainly intended for log messages.
WaveFormatTagToString(WORD format_tag)162 const char* WaveFormatTagToString(WORD format_tag) {
163   switch (format_tag) {
164     case WAVE_FORMAT_UNKNOWN:
165       return "WAVE_FORMAT_UNKNOWN";
166     case WAVE_FORMAT_PCM:
167       return "WAVE_FORMAT_PCM";
168     case WAVE_FORMAT_IEEE_FLOAT:
169       return "WAVE_FORMAT_IEEE_FLOAT";
170     case WAVE_FORMAT_EXTENSIBLE:
171       return "WAVE_FORMAT_EXTENSIBLE";
172     default:
173       return "UNKNOWN";
174   }
175 }
176 
RoleToString(const ERole role)177 const char* RoleToString(const ERole role) {
178   switch (role) {
179     case eConsole:
180       return "Console";
181     case eMultimedia:
182       return "Multimedia";
183     case eCommunications:
184       return "Communications";
185     default:
186       return "Unsupported";
187   }
188 }
189 
FlowToString(const EDataFlow flow)190 const char* FlowToString(const EDataFlow flow) {
191   switch (flow) {
192     case eRender:
193       return "Render";
194     case eCapture:
195       return "Capture";
196     case eAll:
197       return "Render or Capture";
198     default:
199       return "Unsupported";
200   }
201 }
202 
LoadAudiosesDll()203 bool LoadAudiosesDll() {
204   static const wchar_t* const kAudiosesDLL =
205       L"%WINDIR%\\system32\\audioses.dll";
206   wchar_t path[MAX_PATH] = {0};
207   ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
208   RTC_DLOG(INFO) << rtc::ToUtf8(path);
209   return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
210           nullptr);
211 }
212 
LoadAvrtDll()213 bool LoadAvrtDll() {
214   static const wchar_t* const kAvrtDLL = L"%WINDIR%\\system32\\Avrt.dll";
215   wchar_t path[MAX_PATH] = {0};
216   ExpandEnvironmentStringsW(kAvrtDLL, path, arraysize(path));
217   RTC_DLOG(INFO) << rtc::ToUtf8(path);
218   return (LoadLibraryExW(path, nullptr, LOAD_WITH_ALTERED_SEARCH_PATH) !=
219           nullptr);
220 }
221 
CreateDeviceEnumeratorInternal(bool allow_reinitialize)222 ComPtr<IMMDeviceEnumerator> CreateDeviceEnumeratorInternal(
223     bool allow_reinitialize) {
224   ComPtr<IMMDeviceEnumerator> device_enumerator;
225   _com_error error =
226       ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL,
227                          IID_PPV_ARGS(&device_enumerator));
228   if (FAILED(error.Error())) {
229     RTC_LOG(LS_ERROR) << "CoCreateInstance failed: " << ErrorToString(error);
230   }
231 
232   if (error.Error() == CO_E_NOTINITIALIZED && allow_reinitialize) {
233     RTC_LOG(LS_ERROR) << "CoCreateInstance failed with CO_E_NOTINITIALIZED";
234     // We have seen crashes which indicates that this method can in fact
235     // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
236     // modules. Calling CoInitializeEx() is an attempt to resolve the reported
237     // issues. See http://crbug.com/378465 for details.
238     error = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
239     if (FAILED(error.Error())) {
240       error = ::CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr,
241                                  CLSCTX_ALL, IID_PPV_ARGS(&device_enumerator));
242       if (FAILED(error.Error())) {
243         RTC_LOG(LS_ERROR) << "CoCreateInstance failed: "
244                           << ErrorToString(error);
245       }
246     }
247   }
248   return device_enumerator;
249 }
250 
IsSupportedInternal()251 bool IsSupportedInternal() {
252   // The Core Audio APIs are implemented in the user-mode system components
253   // Audioses.dll and Mmdevapi.dll. Dependency Walker shows that it is
254   // enough to verify possibility to load the Audioses DLL since it depends
255   // on Mmdevapi.dll. See http://crbug.com/166397 why this extra step is
256   // required to guarantee Core Audio support.
257   if (!LoadAudiosesDll())
258     return false;
259 
260   // Being able to load the Audioses.dll does not seem to be sufficient for
261   // all devices to guarantee Core Audio support. To be 100%, we also verify
262   // that it is possible to a create the IMMDeviceEnumerator interface. If
263   // this works as well we should be home free.
264   ComPtr<IMMDeviceEnumerator> device_enumerator =
265       CreateDeviceEnumeratorInternal(false);
266   if (!device_enumerator) {
267     RTC_LOG(LS_ERROR)
268         << "Failed to create Core Audio device enumerator on thread with ID "
269         << rtc::CurrentThreadId();
270     return false;
271   }
272 
273   return true;
274 }
275 
IsDeviceActive(IMMDevice * device)276 bool IsDeviceActive(IMMDevice* device) {
277   DWORD state = DEVICE_STATE_DISABLED;
278   return SUCCEEDED(device->GetState(&state)) && (state & DEVICE_STATE_ACTIVE);
279 }
280 
281 // Retrieve an audio device specified by |device_id| or a default device
282 // specified by data-flow direction and role if |device_id| is default.
CreateDeviceInternal(const std::string & device_id,EDataFlow data_flow,ERole role)283 ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
284                                        EDataFlow data_flow,
285                                        ERole role) {
286   RTC_DLOG(INFO) << "CreateDeviceInternal: "
287                     "id="
288                  << device_id << ", flow=" << FlowToString(data_flow)
289                  << ", role=" << RoleToString(role);
290   ComPtr<IMMDevice> audio_endpoint_device;
291 
292   // Create the IMMDeviceEnumerator interface.
293   ComPtr<IMMDeviceEnumerator> device_enum(CreateDeviceEnumeratorInternal(true));
294   if (!device_enum.Get())
295     return audio_endpoint_device;
296 
297   _com_error error(S_FALSE);
298   if (device_id == AudioDeviceName::kDefaultDeviceId) {
299     // Get the default audio endpoint for the specified data-flow direction and
300     // role. Note that, if only a single rendering or capture device is
301     // available, the system always assigns all three rendering or capture roles
302     // to that device. If the method fails to find a rendering or capture device
303     // for the specified role, this means that no rendering or capture device is
304     // available at all. If no device is available, the method sets the output
305     // pointer to NULL and returns ERROR_NOT_FOUND.
306     error = device_enum->GetDefaultAudioEndpoint(
307         data_flow, role, audio_endpoint_device.GetAddressOf());
308     if (FAILED(error.Error())) {
309       RTC_LOG(LS_ERROR)
310           << "IMMDeviceEnumerator::GetDefaultAudioEndpoint failed: "
311           << ErrorToString(error);
312     }
313   } else {
314     // Ask for an audio endpoint device that is identified by an endpoint ID
315     // string.
316     error = device_enum->GetDevice(rtc::ToUtf16(device_id).c_str(),
317                                    audio_endpoint_device.GetAddressOf());
318     if (FAILED(error.Error())) {
319       RTC_LOG(LS_ERROR) << "IMMDeviceEnumerator::GetDevice failed: "
320                         << ErrorToString(error);
321     }
322   }
323 
324   // Verify that the audio endpoint device is active, i.e., that the audio
325   // adapter that connects to the endpoint device is present and enabled.
326   if (SUCCEEDED(error.Error()) && !audio_endpoint_device.Get() &&
327       !IsDeviceActive(audio_endpoint_device.Get())) {
328     RTC_LOG(LS_WARNING) << "Selected endpoint device is not active";
329     audio_endpoint_device.Reset();
330   }
331 
332   return audio_endpoint_device;
333 }
334 
GetDeviceIdInternal(IMMDevice * device)335 std::string GetDeviceIdInternal(IMMDevice* device) {
336   // Retrieve unique name of endpoint device.
337   // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
338   LPWSTR device_id;
339   if (SUCCEEDED(device->GetId(&device_id))) {
340     std::string device_id_utf8 = rtc::ToUtf8(device_id, wcslen(device_id));
341     CoTaskMemFree(device_id);
342     return device_id_utf8;
343   } else {
344     return std::string();
345   }
346 }
347 
GetDeviceFriendlyNameInternal(IMMDevice * device)348 std::string GetDeviceFriendlyNameInternal(IMMDevice* device) {
349   // Retrieve user-friendly name of endpoint device.
350   // Example: "Microphone (Realtek High Definition Audio)".
351   ComPtr<IPropertyStore> properties;
352   HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.GetAddressOf());
353   if (FAILED(hr))
354     return std::string();
355 
356   ScopedPropVariant friendly_name_pv;
357   hr = properties->GetValue(PKEY_Device_FriendlyName,
358                             friendly_name_pv.Receive());
359   if (FAILED(hr))
360     return std::string();
361 
362   if (friendly_name_pv.get().vt == VT_LPWSTR &&
363       friendly_name_pv.get().pwszVal) {
364     return rtc::ToUtf8(friendly_name_pv.get().pwszVal,
365                        wcslen(friendly_name_pv.get().pwszVal));
366   } else {
367     return std::string();
368   }
369 }
370 
CreateSessionManager2Internal(IMMDevice * audio_device)371 ComPtr<IAudioSessionManager2> CreateSessionManager2Internal(
372     IMMDevice* audio_device) {
373   if (!audio_device)
374     return ComPtr<IAudioSessionManager2>();
375 
376   ComPtr<IAudioSessionManager2> audio_session_manager;
377   _com_error error =
378       audio_device->Activate(__uuidof(IAudioSessionManager2), CLSCTX_ALL,
379                              nullptr, &audio_session_manager);
380   if (FAILED(error.Error())) {
381     RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioSessionManager2) failed: "
382                       << ErrorToString(error);
383   }
384   return audio_session_manager;
385 }
386 
CreateSessionEnumeratorInternal(IMMDevice * audio_device)387 ComPtr<IAudioSessionEnumerator> CreateSessionEnumeratorInternal(
388     IMMDevice* audio_device) {
389   if (!audio_device) {
390     return ComPtr<IAudioSessionEnumerator>();
391   }
392 
393   ComPtr<IAudioSessionEnumerator> audio_session_enumerator;
394   ComPtr<IAudioSessionManager2> audio_session_manager =
395       CreateSessionManager2Internal(audio_device);
396   if (!audio_session_manager.Get()) {
397     return audio_session_enumerator;
398   }
399   _com_error error =
400       audio_session_manager->GetSessionEnumerator(&audio_session_enumerator);
401   if (FAILED(error.Error())) {
402     RTC_LOG(LS_ERROR)
403         << "IAudioSessionEnumerator::IAudioSessionEnumerator failed: "
404         << ErrorToString(error);
405     return ComPtr<IAudioSessionEnumerator>();
406   }
407   return audio_session_enumerator;
408 }
409 
410 // Creates and activates an IAudioClient COM object given the selected
411 // endpoint device.
CreateClientInternal(IMMDevice * audio_device)412 ComPtr<IAudioClient> CreateClientInternal(IMMDevice* audio_device) {
413   if (!audio_device)
414     return ComPtr<IAudioClient>();
415 
416   ComPtr<IAudioClient> audio_client;
417   _com_error error = audio_device->Activate(__uuidof(IAudioClient), CLSCTX_ALL,
418                                             nullptr, &audio_client);
419   if (FAILED(error.Error())) {
420     RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient) failed: "
421                       << ErrorToString(error);
422   }
423   return audio_client;
424 }
425 
CreateClient2Internal(IMMDevice * audio_device)426 ComPtr<IAudioClient2> CreateClient2Internal(IMMDevice* audio_device) {
427   if (!audio_device)
428     return ComPtr<IAudioClient2>();
429 
430   ComPtr<IAudioClient2> audio_client;
431   _com_error error = audio_device->Activate(__uuidof(IAudioClient2), CLSCTX_ALL,
432                                             nullptr, &audio_client);
433   if (FAILED(error.Error())) {
434     RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient2) failed: "
435                       << ErrorToString(error);
436   }
437   return audio_client;
438 }
439 
CreateClient3Internal(IMMDevice * audio_device)440 ComPtr<IAudioClient3> CreateClient3Internal(IMMDevice* audio_device) {
441   if (!audio_device)
442     return ComPtr<IAudioClient3>();
443 
444   ComPtr<IAudioClient3> audio_client;
445   _com_error error = audio_device->Activate(__uuidof(IAudioClient3), CLSCTX_ALL,
446                                             nullptr, &audio_client);
447   if (FAILED(error.Error())) {
448     RTC_LOG(LS_ERROR) << "IMMDevice::Activate(IAudioClient3) failed: "
449                       << ErrorToString(error);
450   }
451   return audio_client;
452 }
453 
CreateCollectionInternal(EDataFlow data_flow)454 ComPtr<IMMDeviceCollection> CreateCollectionInternal(EDataFlow data_flow) {
455   ComPtr<IMMDeviceEnumerator> device_enumerator(
456       CreateDeviceEnumeratorInternal(true));
457   if (!device_enumerator) {
458     return ComPtr<IMMDeviceCollection>();
459   }
460 
461   // Generate a collection of active (present and not disabled) audio endpoint
462   // devices for the specified data-flow direction.
463   // This method will succeed even if all devices are disabled.
464   ComPtr<IMMDeviceCollection> collection;
465   _com_error error = device_enumerator->EnumAudioEndpoints(
466       data_flow, DEVICE_STATE_ACTIVE, collection.GetAddressOf());
467   if (FAILED(error.Error())) {
468     RTC_LOG(LS_ERROR) << "IMMDeviceCollection::EnumAudioEndpoints failed: "
469                       << ErrorToString(error);
470   }
471   return collection;
472 }
473 
GetDeviceNamesInternal(EDataFlow data_flow,webrtc::AudioDeviceNames * device_names)474 bool GetDeviceNamesInternal(EDataFlow data_flow,
475                             webrtc::AudioDeviceNames* device_names) {
476   RTC_DLOG(LS_INFO) << "GetDeviceNamesInternal: flow="
477                     << FlowToString(data_flow);
478 
479   // Generate a collection of active audio endpoint devices for the specified
480   // direction.
481   ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
482   if (!collection.Get()) {
483     RTC_LOG(LS_ERROR) << "Failed to create a collection of active devices";
484     return false;
485   }
486 
487   // Retrieve the number of active (present, not disabled and plugged in) audio
488   // devices for the specified direction.
489   UINT number_of_active_devices = 0;
490   _com_error error = collection->GetCount(&number_of_active_devices);
491   if (FAILED(error.Error())) {
492     RTC_LOG(LS_ERROR) << "IMMDeviceCollection::GetCount failed: "
493                       << ErrorToString(error);
494     return false;
495   }
496 
497   if (number_of_active_devices == 0) {
498     RTC_DLOG(LS_WARNING) << "Found no active devices";
499     return false;
500   }
501 
502   // Loop over all active devices and add friendly name and unique id to the
503   // |device_names| queue. For now, devices are added at indexes 0, 1, ..., N-1
504   // but they will be moved to 2,3,..., N+1 at the next stage when default and
505   // default communication devices are added at index 0 and 1.
506   ComPtr<IMMDevice> audio_device;
507   for (UINT i = 0; i < number_of_active_devices; ++i) {
508     // Retrieve a pointer to the specified item in the device collection.
509     error = collection->Item(i, audio_device.GetAddressOf());
510     if (FAILED(error.Error())) {
511       // Skip this item and try to get the next item instead; will result in an
512       // incomplete list of devices.
513       RTC_LOG(LS_WARNING) << "IMMDeviceCollection::Item failed: "
514                           << ErrorToString(error);
515       continue;
516     }
517     if (!audio_device.Get()) {
518       RTC_LOG(LS_WARNING) << "Invalid audio device";
519       continue;
520     }
521 
522     // Retrieve the complete device name for the given audio device endpoint.
523     AudioDeviceName device_name(
524         GetDeviceFriendlyNameInternal(audio_device.Get()),
525         GetDeviceIdInternal(audio_device.Get()));
526     // Add combination of user-friendly and unique name to the output list.
527     device_names->push_back(device_name);
528   }
529 
530   // Log a warning of the list of device is not complete but let's keep on
531   // trying to add default and default communications device at the front.
532   if (device_names->size() != number_of_active_devices) {
533     RTC_DLOG(LS_WARNING)
534         << "List of device names does not contain all active devices";
535   }
536 
537   // Avoid adding default and default communication devices if no active device
538   // could be added to the queue. We might as well break here and return false
539   // since no active devices were identified.
540   if (device_names->empty()) {
541     RTC_DLOG(LS_ERROR) << "List of active devices is empty";
542     return false;
543   }
544 
545   // Prepend the queue with two more elements: one for the default device and
546   // one for the default communication device (can correspond to the same unique
547   // id if only one active device exists). The first element (index 0) is the
548   // default device and the second element (index 1) is the default
549   // communication device.
550   ERole role[] = {eCommunications, eConsole};
551   ComPtr<IMMDevice> default_device;
552   AudioDeviceName default_device_name;
553   for (size_t i = 0; i < arraysize(role); ++i) {
554     default_device = CreateDeviceInternal(AudioDeviceName::kDefaultDeviceId,
555                                           data_flow, role[i]);
556     if (!default_device.Get()) {
557       // Add empty strings to device name if the device could not be created.
558       RTC_DLOG(LS_WARNING) << "Failed to add device with role: "
559                            << RoleToString(role[i]);
560       default_device_name.device_name = std::string();
561       default_device_name.unique_id = std::string();
562     } else {
563       // Populate the device name with friendly name and unique id.
564       std::string device_name;
565       device_name += (role[i] == eConsole ? "Default - " : "Communication - ");
566       device_name += GetDeviceFriendlyNameInternal(default_device.Get());
567       std::string unique_id = GetDeviceIdInternal(default_device.Get());
568       default_device_name.device_name = std::move(device_name);
569       default_device_name.unique_id = std::move(unique_id);
570     }
571 
572     // Add combination of user-friendly and unique name to the output queue.
573     // The last element (<=> eConsole) will be at the front of the queue, hence
574     // at index 0. Empty strings will be added for cases where no default
575     // devices were found.
576     device_names->push_front(default_device_name);
577   }
578 
579   // Example of log output when only one device is active. Note that the queue
580   // contains two extra elements at index 0 (Default) and 1 (Communication) to
581   // allow selection of device by role instead of id. All elements corresponds
582   // the same unique id.
583   // [0] friendly name: Default - Headset Microphone (2- Arctis 7 Chat)
584   // [0] unique id    : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
585   // [1] friendly name: Communication - Headset Microphone (2- Arctis 7 Chat)
586   // [1] unique id    : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
587   // [2] friendly name: Headset Microphone (2- Arctis 7 Chat)
588   // [2] unique id    : {0.0.1.00000000}.{ff9eed76-196e-467a-b295-26986e69451c}
589   for (size_t i = 0; i < device_names->size(); ++i) {
590     RTC_DLOG(INFO) << "[" << i
591                    << "] friendly name: " << (*device_names)[i].device_name;
592     RTC_DLOG(INFO) << "[" << i
593                    << "] unique id    : " << (*device_names)[i].unique_id;
594   }
595 
596   return true;
597 }
598 
GetPreferredAudioParametersInternal(IAudioClient * client,AudioParameters * params,int fixed_sample_rate)599 HRESULT GetPreferredAudioParametersInternal(IAudioClient* client,
600                                             AudioParameters* params,
601                                             int fixed_sample_rate) {
602   WAVEFORMATPCMEX mix_format;
603   HRESULT hr = core_audio_utility::GetSharedModeMixFormat(client, &mix_format);
604   if (FAILED(hr))
605     return hr;
606 
607   REFERENCE_TIME default_period = 0;
608   hr = core_audio_utility::GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED,
609                                            &default_period);
610   if (FAILED(hr))
611     return hr;
612 
613   int sample_rate = mix_format.Format.nSamplesPerSec;
614   // Override default sample rate if |fixed_sample_rate| is set and different
615   // from the default rate.
616   if (fixed_sample_rate > 0 && fixed_sample_rate != sample_rate) {
617     RTC_DLOG(INFO) << "Using fixed sample rate instead of the preferred: "
618                    << sample_rate << " is replaced by " << fixed_sample_rate;
619     sample_rate = fixed_sample_rate;
620   }
621   // TODO(henrika): utilize full mix_format.Format.wBitsPerSample.
622   // const size_t bits_per_sample = AudioParameters::kBitsPerSample;
623   // TODO(henrika): improve channel layout support.
624   const size_t channels = mix_format.Format.nChannels;
625 
626   // Use the native device period to derive the smallest possible buffer size
627   // in shared mode.
628   double device_period_in_seconds =
629       static_cast<double>(
630           core_audio_utility::ReferenceTimeToTimeDelta(default_period).ms()) /
631       1000.0L;
632   const size_t frames_per_buffer =
633       static_cast<size_t>(sample_rate * device_period_in_seconds + 0.5);
634 
635   AudioParameters audio_params(sample_rate, channels, frames_per_buffer);
636   *params = audio_params;
637   RTC_DLOG(INFO) << audio_params.ToString();
638 
639   return hr;
640 }
641 
642 }  // namespace
643 
644 namespace core_audio_utility {
645 
646 // core_audio_utility::WaveFormatWrapper implementation.
GetExtensible() const647 WAVEFORMATEXTENSIBLE* WaveFormatWrapper::GetExtensible() const {
648   RTC_CHECK(IsExtensible());
649   return reinterpret_cast<WAVEFORMATEXTENSIBLE*>(ptr_);
650 }
651 
IsExtensible() const652 bool WaveFormatWrapper::IsExtensible() const {
653   return ptr_->wFormatTag == WAVE_FORMAT_EXTENSIBLE && ptr_->cbSize >= 22;
654 }
655 
IsPcm() const656 bool WaveFormatWrapper::IsPcm() const {
657   return IsExtensible() ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_PCM
658                         : ptr_->wFormatTag == WAVE_FORMAT_PCM;
659 }
660 
IsFloat() const661 bool WaveFormatWrapper::IsFloat() const {
662   return IsExtensible()
663              ? GetExtensible()->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
664              : ptr_->wFormatTag == WAVE_FORMAT_IEEE_FLOAT;
665 }
666 
size() const667 size_t WaveFormatWrapper::size() const {
668   return sizeof(*ptr_) + ptr_->cbSize;
669 }
670 
IsSupported()671 bool IsSupported() {
672   RTC_DLOG(INFO) << "IsSupported";
673   static bool g_is_supported = IsSupportedInternal();
674   return g_is_supported;
675 }
676 
IsMMCSSSupported()677 bool IsMMCSSSupported() {
678   RTC_DLOG(INFO) << "IsMMCSSSupported";
679   return LoadAvrtDll();
680 }
681 
NumberOfActiveDevices(EDataFlow data_flow)682 int NumberOfActiveDevices(EDataFlow data_flow) {
683   // Generate a collection of active audio endpoint devices for the specified
684   // data-flow direction.
685   ComPtr<IMMDeviceCollection> collection = CreateCollectionInternal(data_flow);
686   if (!collection.Get()) {
687     return 0;
688   }
689 
690   // Retrieve the number of active audio devices for the specified direction.
691   UINT number_of_active_devices = 0;
692   collection->GetCount(&number_of_active_devices);
693   std::string str;
694   if (data_flow == eCapture) {
695     str = "Number of capture devices: ";
696   } else if (data_flow == eRender) {
697     str = "Number of render devices: ";
698   } else if (data_flow == eAll) {
699     str = "Total number of devices: ";
700   }
701   RTC_DLOG(INFO) << str << number_of_active_devices;
702   return static_cast<int>(number_of_active_devices);
703 }
704 
GetAudioClientVersion()705 uint32_t GetAudioClientVersion() {
706   uint32_t version = 1;
707   if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN10) {
708     version = 3;
709   } else if (rtc::rtc_win::GetVersion() >= rtc::rtc_win::VERSION_WIN8) {
710     version = 2;
711   }
712   return version;
713 }
714 
CreateDeviceEnumerator()715 ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator() {
716   RTC_DLOG(INFO) << "CreateDeviceEnumerator";
717   return CreateDeviceEnumeratorInternal(true);
718 }
719 
GetDefaultInputDeviceID()720 std::string GetDefaultInputDeviceID() {
721   RTC_DLOG(INFO) << "GetDefaultInputDeviceID";
722   ComPtr<IMMDevice> device(
723       CreateDevice(AudioDeviceName::kDefaultDeviceId, eCapture, eConsole));
724   return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
725 }
726 
GetDefaultOutputDeviceID()727 std::string GetDefaultOutputDeviceID() {
728   RTC_DLOG(INFO) << "GetDefaultOutputDeviceID";
729   ComPtr<IMMDevice> device(
730       CreateDevice(AudioDeviceName::kDefaultDeviceId, eRender, eConsole));
731   return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
732 }
733 
GetCommunicationsInputDeviceID()734 std::string GetCommunicationsInputDeviceID() {
735   RTC_DLOG(INFO) << "GetCommunicationsInputDeviceID";
736   ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
737                                         eCapture, eCommunications));
738   return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
739 }
740 
GetCommunicationsOutputDeviceID()741 std::string GetCommunicationsOutputDeviceID() {
742   RTC_DLOG(INFO) << "GetCommunicationsOutputDeviceID";
743   ComPtr<IMMDevice> device(CreateDevice(AudioDeviceName::kDefaultDeviceId,
744                                         eRender, eCommunications));
745   return device.Get() ? GetDeviceIdInternal(device.Get()) : std::string();
746 }
747 
CreateDevice(const std::string & device_id,EDataFlow data_flow,ERole role)748 ComPtr<IMMDevice> CreateDevice(const std::string& device_id,
749                                EDataFlow data_flow,
750                                ERole role) {
751   RTC_DLOG(INFO) << "CreateDevice";
752   return CreateDeviceInternal(device_id, data_flow, role);
753 }
754 
GetDeviceName(IMMDevice * device)755 AudioDeviceName GetDeviceName(IMMDevice* device) {
756   RTC_DLOG(INFO) << "GetDeviceName";
757   RTC_DCHECK(device);
758   AudioDeviceName device_name(GetDeviceFriendlyNameInternal(device),
759                               GetDeviceIdInternal(device));
760   RTC_DLOG(INFO) << "friendly name: " << device_name.device_name;
761   RTC_DLOG(INFO) << "unique id    : " << device_name.unique_id;
762   return device_name;
763 }
764 
GetFriendlyName(const std::string & device_id,EDataFlow data_flow,ERole role)765 std::string GetFriendlyName(const std::string& device_id,
766                             EDataFlow data_flow,
767                             ERole role) {
768   RTC_DLOG(INFO) << "GetFriendlyName";
769   ComPtr<IMMDevice> audio_device = CreateDevice(device_id, data_flow, role);
770   if (!audio_device.Get())
771     return std::string();
772 
773   AudioDeviceName device_name = GetDeviceName(audio_device.Get());
774   return device_name.device_name;
775 }
776 
GetDataFlow(IMMDevice * device)777 EDataFlow GetDataFlow(IMMDevice* device) {
778   RTC_DLOG(INFO) << "GetDataFlow";
779   RTC_DCHECK(device);
780   ComPtr<IMMEndpoint> endpoint;
781   _com_error error = device->QueryInterface(endpoint.GetAddressOf());
782   if (FAILED(error.Error())) {
783     RTC_LOG(LS_ERROR) << "IMMDevice::QueryInterface failed: "
784                       << ErrorToString(error);
785     return eAll;
786   }
787 
788   EDataFlow data_flow;
789   error = endpoint->GetDataFlow(&data_flow);
790   if (FAILED(error.Error())) {
791     RTC_LOG(LS_ERROR) << "IMMEndpoint::GetDataFlow failed: "
792                       << ErrorToString(error);
793     return eAll;
794   }
795   return data_flow;
796 }
797 
GetInputDeviceNames(webrtc::AudioDeviceNames * device_names)798 bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names) {
799   RTC_DLOG(INFO) << "GetInputDeviceNames";
800   RTC_DCHECK(device_names);
801   RTC_DCHECK(device_names->empty());
802   return GetDeviceNamesInternal(eCapture, device_names);
803 }
804 
GetOutputDeviceNames(webrtc::AudioDeviceNames * device_names)805 bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names) {
806   RTC_DLOG(INFO) << "GetOutputDeviceNames";
807   RTC_DCHECK(device_names);
808   RTC_DCHECK(device_names->empty());
809   return GetDeviceNamesInternal(eRender, device_names);
810 }
811 
CreateSessionManager2(IMMDevice * device)812 ComPtr<IAudioSessionManager2> CreateSessionManager2(IMMDevice* device) {
813   RTC_DLOG(INFO) << "CreateSessionManager2";
814   return CreateSessionManager2Internal(device);
815 }
816 
CreateSessionEnumerator(IMMDevice * device)817 Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator(
818     IMMDevice* device) {
819   RTC_DLOG(INFO) << "CreateSessionEnumerator";
820   return CreateSessionEnumeratorInternal(device);
821 }
822 
NumberOfActiveSessions(IMMDevice * device)823 int NumberOfActiveSessions(IMMDevice* device) {
824   RTC_DLOG(INFO) << "NumberOfActiveSessions";
825   ComPtr<IAudioSessionEnumerator> session_enumerator =
826       CreateSessionEnumerator(device);
827 
828   // Iterate over all audio sessions for the given device.
829   int session_count = 0;
830   _com_error error = session_enumerator->GetCount(&session_count);
831   if (FAILED(error.Error())) {
832     RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetCount failed: "
833                       << ErrorToString(error);
834     return 0;
835   }
836   RTC_DLOG(INFO) << "Total number of audio sessions: " << session_count;
837 
838   int num_active = 0;
839   for (int session = 0; session < session_count; session++) {
840     // Acquire the session control interface.
841     ComPtr<IAudioSessionControl> session_control;
842     error = session_enumerator->GetSession(session, &session_control);
843     if (FAILED(error.Error())) {
844       RTC_LOG(LS_ERROR) << "IAudioSessionEnumerator::GetSession failed: "
845                         << ErrorToString(error);
846       return 0;
847     }
848 
849     // Log the display name of the audio session for debugging purposes.
850     LPWSTR display_name;
851     if (SUCCEEDED(session_control->GetDisplayName(&display_name))) {
852       RTC_DLOG(INFO) << "display name: "
853                      << rtc::ToUtf8(display_name, wcslen(display_name));
854       CoTaskMemFree(display_name);
855     }
856 
857     // Get the current state and check if the state is active or not.
858     AudioSessionState state;
859     error = session_control->GetState(&state);
860     if (FAILED(error.Error())) {
861       RTC_LOG(LS_ERROR) << "IAudioSessionControl::GetState failed: "
862                         << ErrorToString(error);
863       return 0;
864     }
865     if (state == AudioSessionStateActive) {
866       ++num_active;
867     }
868   }
869 
870   RTC_DLOG(INFO) << "Number of active audio sessions: " << num_active;
871   return num_active;
872 }
873 
CreateClient(const std::string & device_id,EDataFlow data_flow,ERole role)874 ComPtr<IAudioClient> CreateClient(const std::string& device_id,
875                                   EDataFlow data_flow,
876                                   ERole role) {
877   RTC_DLOG(INFO) << "CreateClient";
878   ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
879   return CreateClientInternal(device.Get());
880 }
881 
CreateClient2(const std::string & device_id,EDataFlow data_flow,ERole role)882 ComPtr<IAudioClient2> CreateClient2(const std::string& device_id,
883                                     EDataFlow data_flow,
884                                     ERole role) {
885   RTC_DLOG(INFO) << "CreateClient2";
886   ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
887   return CreateClient2Internal(device.Get());
888 }
889 
CreateClient3(const std::string & device_id,EDataFlow data_flow,ERole role)890 ComPtr<IAudioClient3> CreateClient3(const std::string& device_id,
891                                     EDataFlow data_flow,
892                                     ERole role) {
893   RTC_DLOG(INFO) << "CreateClient3";
894   ComPtr<IMMDevice> device(CreateDevice(device_id, data_flow, role));
895   return CreateClient3Internal(device.Get());
896 }
897 
SetClientProperties(IAudioClient2 * client)898 HRESULT SetClientProperties(IAudioClient2* client) {
899   RTC_DLOG(INFO) << "SetClientProperties";
900   RTC_DCHECK(client);
901   if (GetAudioClientVersion() < 2) {
902     RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher";
903     return AUDCLNT_E_UNSUPPORTED_FORMAT;
904   }
905   AudioClientProperties props = {0};
906   props.cbSize = sizeof(AudioClientProperties);
907   // Real-time VoIP communication.
908   // TODO(henrika): other categories?
909   props.eCategory = AudioCategory_Communications;
910   // Hardware-offloaded audio processing allows the main audio processing tasks
911   // to be performed outside the computer's main CPU. Check support and log the
912   // result but hard-code |bIsOffload| to FALSE for now.
913   // TODO(henrika): evaluate hardware-offloading. Might complicate usage of
914   // IAudioClient::GetMixFormat().
915   BOOL supports_offload = FALSE;
916   _com_error error =
917       client->IsOffloadCapable(props.eCategory, &supports_offload);
918   if (FAILED(error.Error())) {
919     RTC_LOG(LS_ERROR) << "IAudioClient2::IsOffloadCapable failed: "
920                       << ErrorToString(error);
921   }
922   RTC_DLOG(INFO) << "supports_offload: " << supports_offload;
923   props.bIsOffload = false;
924 #if (NTDDI_VERSION < NTDDI_WINBLUE)
925   RTC_DLOG(INFO) << "options: Not supported in this build";
926 #else
927   // TODO(henrika): pros and cons compared with AUDCLNT_STREAMOPTIONS_NONE?
928   props.Options |= AUDCLNT_STREAMOPTIONS_NONE;
929   // Requires System.Devices.AudioDevice.RawProcessingSupported.
930   // The application can choose to *always ignore* the OEM AEC/AGC by setting
931   // the AUDCLNT_STREAMOPTIONS_RAW flag in the call to SetClientProperties.
932   // This flag will preserve the user experience aspect of Communications
933   // streams, but will not insert any OEM provided communications specific
934   // processing in the audio signal path.
935   // props.Options |= AUDCLNT_STREAMOPTIONS_RAW;
936 
937   // If it is important to avoid resampling in the audio engine, set this flag.
938   // AUDCLNT_STREAMOPTIONS_MATCH_FORMAT (or anything in IAudioClient3) is not
939   // an appropriate interface to use for communications scenarios.
940   // This interface is mainly meant for pro audio scenarios.
941   // props.Options |= AUDCLNT_STREAMOPTIONS_MATCH_FORMAT;
942   RTC_DLOG(INFO) << "options: 0x" << rtc::ToHex(props.Options);
943 #endif
944   error = client->SetClientProperties(&props);
945   if (FAILED(error.Error())) {
946     RTC_LOG(LS_ERROR) << "IAudioClient2::SetClientProperties failed: "
947                       << ErrorToString(error);
948   }
949   return error.Error();
950 }
951 
GetBufferSizeLimits(IAudioClient2 * client,const WAVEFORMATEXTENSIBLE * format,REFERENCE_TIME * min_buffer_duration,REFERENCE_TIME * max_buffer_duration)952 HRESULT GetBufferSizeLimits(IAudioClient2* client,
953                             const WAVEFORMATEXTENSIBLE* format,
954                             REFERENCE_TIME* min_buffer_duration,
955                             REFERENCE_TIME* max_buffer_duration) {
956   RTC_DLOG(INFO) << "GetBufferSizeLimits";
957   RTC_DCHECK(client);
958   if (GetAudioClientVersion() < 2) {
959     RTC_LOG(LS_WARNING) << "Requires IAudioClient2 or higher";
960     return AUDCLNT_E_UNSUPPORTED_FORMAT;
961   }
962   REFERENCE_TIME min_duration = 0;
963   REFERENCE_TIME max_duration = 0;
964   _com_error error =
965       client->GetBufferSizeLimits(reinterpret_cast<const WAVEFORMATEX*>(format),
966                                   TRUE, &min_duration, &max_duration);
967   if (error.Error() == AUDCLNT_E_OFFLOAD_MODE_ONLY) {
968     // This API seems to be supported in off-load mode only but it is not
969     // documented as a valid error code. Making a special note about it here.
970     RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: "
971                          "AUDCLNT_E_OFFLOAD_MODE_ONLY";
972   } else if (FAILED(error.Error())) {
973     RTC_LOG(LS_ERROR) << "IAudioClient2::GetBufferSizeLimits failed: "
974                       << ErrorToString(error);
975   } else {
976     *min_buffer_duration = min_duration;
977     *max_buffer_duration = max_duration;
978     RTC_DLOG(INFO) << "min_buffer_duration: " << min_buffer_duration;
979     RTC_DLOG(INFO) << "max_buffer_duration: " << max_buffer_duration;
980   }
981   return error.Error();
982 }
983 
GetSharedModeMixFormat(IAudioClient * client,WAVEFORMATEXTENSIBLE * format)984 HRESULT GetSharedModeMixFormat(IAudioClient* client,
985                                WAVEFORMATEXTENSIBLE* format) {
986   RTC_DLOG(INFO) << "GetSharedModeMixFormat";
987   RTC_DCHECK(client);
988 
989   // The GetMixFormat method retrieves the stream format that the audio engine
990   // uses for its internal processing of shared-mode streams. The method
991   // allocates the storage for the structure and this memory will be released
992   // when |mix_format| goes out of scope. The GetMixFormat method retrieves a
993   // format descriptor that is in the form of a WAVEFORMATEXTENSIBLE structure
994   // instead of a standalone WAVEFORMATEX structure. The method outputs a
995   // pointer to the WAVEFORMATEX structure that is embedded at the start of
996   // this WAVEFORMATEXTENSIBLE structure.
997   // Note that, crbug/803056 indicates that some devices can return a format
998   // where only the WAVEFORMATEX parts is initialized and we must be able to
999   // account for that.
1000   ScopedCoMem<WAVEFORMATEXTENSIBLE> mix_format;
1001   _com_error error =
1002       client->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&mix_format));
1003   if (FAILED(error.Error())) {
1004     RTC_LOG(LS_ERROR) << "IAudioClient::GetMixFormat failed: "
1005                       << ErrorToString(error);
1006     return error.Error();
1007   }
1008 
1009   // Use a wave format wrapper to make things simpler.
1010   WaveFormatWrapper wrapped_format(mix_format.Get());
1011 
1012   // Verify that the reported format can be mixed by the audio engine in
1013   // shared mode.
1014   if (!wrapped_format.IsPcm() && !wrapped_format.IsFloat()) {
1015     RTC_DLOG(LS_ERROR)
1016         << "Only pure PCM or float audio streams can be mixed in shared mode";
1017     return AUDCLNT_E_UNSUPPORTED_FORMAT;
1018   }
1019 
1020   // Log a warning for the rare case where |mix_format| only contains a
1021   // stand-alone WAVEFORMATEX structure but don't return.
1022   if (!wrapped_format.IsExtensible()) {
1023     RTC_DLOG(WARNING)
1024         << "The returned format contains no extended information. "
1025            "The size is "
1026         << wrapped_format.size() << " bytes.";
1027   }
1028 
1029   // Copy the correct number of bytes into |*format| taking into account if
1030   // the returned structure is correctly extended or not.
1031   RTC_CHECK_LE(wrapped_format.size(), sizeof(WAVEFORMATEXTENSIBLE));
1032   memcpy(format, wrapped_format.get(), wrapped_format.size());
1033   RTC_DLOG(INFO) << WaveFormatToString(format);
1034 
1035   return error.Error();
1036 }
1037 
IsFormatSupported(IAudioClient * client,AUDCLNT_SHAREMODE share_mode,const WAVEFORMATEXTENSIBLE * format)1038 bool IsFormatSupported(IAudioClient* client,
1039                        AUDCLNT_SHAREMODE share_mode,
1040                        const WAVEFORMATEXTENSIBLE* format) {
1041   RTC_DLOG(INFO) << "IsFormatSupported";
1042   RTC_DCHECK(client);
1043   ScopedCoMem<WAVEFORMATEX> closest_match;
1044   // This method provides a way for a client to determine, before calling
1045   // IAudioClient::Initialize, whether the audio engine supports a particular
1046   // stream format or not. In shared mode, the audio engine always supports
1047   // the mix format (see GetSharedModeMixFormat).
1048   // TODO(henrika): verify support for exclusive mode as well?
1049   _com_error error = client->IsFormatSupported(
1050       share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
1051       &closest_match);
1052   RTC_LOG(INFO) << WaveFormatToString(
1053       const_cast<WAVEFORMATEXTENSIBLE*>(format));
1054   if ((error.Error() == S_OK) && (closest_match == nullptr)) {
1055     RTC_DLOG(INFO)
1056         << "The audio endpoint device supports the specified stream format";
1057   } else if ((error.Error() == S_FALSE) && (closest_match != nullptr)) {
1058     // Call succeeded with a closest match to the specified format. This log can
1059     // only be triggered for shared mode.
1060     RTC_LOG(LS_WARNING)
1061         << "Exact format is not supported, but a closest match exists";
1062     RTC_LOG(INFO) << WaveFormatToString(closest_match.Get());
1063   } else if ((error.Error() == AUDCLNT_E_UNSUPPORTED_FORMAT) &&
1064              (closest_match == nullptr)) {
1065     // The audio engine does not support the caller-specified format or any
1066     // similar format.
1067     RTC_DLOG(INFO) << "The audio endpoint device does not support the "
1068                       "specified stream format";
1069   } else {
1070     RTC_LOG(LS_ERROR) << "IAudioClient::IsFormatSupported failed: "
1071                       << ErrorToString(error);
1072   }
1073 
1074   return (error.Error() == S_OK);
1075 }
1076 
GetDevicePeriod(IAudioClient * client,AUDCLNT_SHAREMODE share_mode,REFERENCE_TIME * device_period)1077 HRESULT GetDevicePeriod(IAudioClient* client,
1078                         AUDCLNT_SHAREMODE share_mode,
1079                         REFERENCE_TIME* device_period) {
1080   RTC_DLOG(INFO) << "GetDevicePeriod";
1081   RTC_DCHECK(client);
1082   // The |default_period| parameter specifies the default scheduling period
1083   // for a shared-mode stream. The |minimum_period| parameter specifies the
1084   // minimum scheduling period for an exclusive-mode stream.
1085   // The time is expressed in 100-nanosecond units.
1086   REFERENCE_TIME default_period = 0;
1087   REFERENCE_TIME minimum_period = 0;
1088   _com_error error = client->GetDevicePeriod(&default_period, &minimum_period);
1089   if (FAILED(error.Error())) {
1090     RTC_LOG(LS_ERROR) << "IAudioClient::GetDevicePeriod failed: "
1091                       << ErrorToString(error);
1092     return error.Error();
1093   }
1094 
1095   *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period
1096                                                             : minimum_period;
1097   RTC_LOG(INFO) << "device_period: "
1098                 << ReferenceTimeToTimeDelta(*device_period).ms() << " [ms]";
1099   RTC_LOG(INFO) << "minimum_period: "
1100                 << ReferenceTimeToTimeDelta(minimum_period).ms() << " [ms]";
1101   return error.Error();
1102 }
1103 
GetSharedModeEnginePeriod(IAudioClient3 * client3,const WAVEFORMATEXTENSIBLE * format,uint32_t * default_period_in_frames,uint32_t * fundamental_period_in_frames,uint32_t * min_period_in_frames,uint32_t * max_period_in_frames)1104 HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3,
1105                                   const WAVEFORMATEXTENSIBLE* format,
1106                                   uint32_t* default_period_in_frames,
1107                                   uint32_t* fundamental_period_in_frames,
1108                                   uint32_t* min_period_in_frames,
1109                                   uint32_t* max_period_in_frames) {
1110   RTC_DLOG(INFO) << "GetSharedModeEnginePeriod";
1111   RTC_DCHECK(client3);
1112 
1113   UINT32 default_period = 0;
1114   UINT32 fundamental_period = 0;
1115   UINT32 min_period = 0;
1116   UINT32 max_period = 0;
1117   _com_error error = client3->GetSharedModeEnginePeriod(
1118       reinterpret_cast<const WAVEFORMATEX*>(format), &default_period,
1119       &fundamental_period, &min_period, &max_period);
1120   if (FAILED(error.Error())) {
1121     RTC_LOG(LS_ERROR) << "IAudioClient3::GetSharedModeEnginePeriod failed: "
1122                       << ErrorToString(error);
1123     return error.Error();
1124   }
1125 
1126   WAVEFORMATEX format_ex = format->Format;
1127   const WORD sample_rate = format_ex.nSamplesPerSec;
1128   RTC_LOG(INFO) << "default_period_in_frames: " << default_period << " ("
1129                 << FramesToMilliseconds(default_period, sample_rate) << " ms)";
1130   RTC_LOG(INFO) << "fundamental_period_in_frames: " << fundamental_period
1131                 << " (" << FramesToMilliseconds(fundamental_period, sample_rate)
1132                 << " ms)";
1133   RTC_LOG(INFO) << "min_period_in_frames: " << min_period << " ("
1134                 << FramesToMilliseconds(min_period, sample_rate) << " ms)";
1135   RTC_LOG(INFO) << "max_period_in_frames: " << max_period << " ("
1136                 << FramesToMilliseconds(max_period, sample_rate) << " ms)";
1137   *default_period_in_frames = default_period;
1138   *fundamental_period_in_frames = fundamental_period;
1139   *min_period_in_frames = min_period;
1140   *max_period_in_frames = max_period;
1141   return error.Error();
1142 }
1143 
GetPreferredAudioParameters(IAudioClient * client,AudioParameters * params)1144 HRESULT GetPreferredAudioParameters(IAudioClient* client,
1145                                     AudioParameters* params) {
1146   RTC_DLOG(INFO) << "GetPreferredAudioParameters";
1147   RTC_DCHECK(client);
1148   return GetPreferredAudioParametersInternal(client, params, -1);
1149 }
1150 
GetPreferredAudioParameters(IAudioClient * client,webrtc::AudioParameters * params,uint32_t sample_rate)1151 HRESULT GetPreferredAudioParameters(IAudioClient* client,
1152                                     webrtc::AudioParameters* params,
1153                                     uint32_t sample_rate) {
1154   RTC_DLOG(INFO) << "GetPreferredAudioParameters: " << sample_rate;
1155   RTC_DCHECK(client);
1156   return GetPreferredAudioParametersInternal(client, params, sample_rate);
1157 }
1158 
SharedModeInitialize(IAudioClient * client,const WAVEFORMATEXTENSIBLE * format,HANDLE event_handle,REFERENCE_TIME buffer_duration,bool auto_convert_pcm,uint32_t * endpoint_buffer_size)1159 HRESULT SharedModeInitialize(IAudioClient* client,
1160                              const WAVEFORMATEXTENSIBLE* format,
1161                              HANDLE event_handle,
1162                              REFERENCE_TIME buffer_duration,
1163                              bool auto_convert_pcm,
1164                              uint32_t* endpoint_buffer_size) {
1165   RTC_DLOG(INFO) << "SharedModeInitialize: buffer_duration=" << buffer_duration
1166                  << ", auto_convert_pcm=" << auto_convert_pcm;
1167   RTC_DCHECK(client);
1168   RTC_DCHECK_GE(buffer_duration, 0);
1169   if (buffer_duration != 0) {
1170     RTC_DLOG(LS_WARNING) << "Non-default buffer size is used";
1171   }
1172   if (auto_convert_pcm) {
1173     RTC_DLOG(LS_WARNING) << "Sample rate converter can be utilized";
1174   }
1175   // The AUDCLNT_STREAMFLAGS_NOPERSIST flag disables persistence of the volume
1176   // and mute settings for a session that contains rendering streams.
1177   // By default, the volume level and muting state for a rendering session are
1178   // persistent across system restarts. The volume level and muting state for a
1179   // capture session are never persistent.
1180   DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
1181 
1182   // Enable event-driven streaming if a valid event handle is provided.
1183   // After the stream starts, the audio engine will signal the event handle
1184   // to notify the client each time a buffer becomes ready to process.
1185   // Event-driven buffering is supported for both rendering and capturing.
1186   // Both shared-mode and exclusive-mode streams can use event-driven buffering.
1187   bool use_event =
1188       (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE);
1189   if (use_event) {
1190     stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
1191     RTC_DLOG(INFO) << "The stream is initialized to be event driven";
1192   }
1193 
1194   // Check if sample-rate conversion is requested.
1195   if (auto_convert_pcm) {
1196     // Add channel matrixer (not utilized here) and rate converter to convert
1197     // from our (the client's) format to the audio engine mix format.
1198     // Currently only supported for testing, i.e., not possible to enable using
1199     // public APIs.
1200     RTC_DLOG(INFO) << "The stream is initialized to support rate conversion";
1201     stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
1202     stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
1203   }
1204   RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
1205 
1206   // Initialize the shared mode client for minimal delay if |buffer_duration|
1207   // is 0 or possibly a higher delay (more robust) if |buffer_duration| is
1208   // larger than 0. The actual size is given by IAudioClient::GetBufferSize().
1209   _com_error error = client->Initialize(
1210       AUDCLNT_SHAREMODE_SHARED, stream_flags, buffer_duration, 0,
1211       reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
1212   if (FAILED(error.Error())) {
1213     RTC_LOG(LS_ERROR) << "IAudioClient::Initialize failed: "
1214                       << ErrorToString(error);
1215     return error.Error();
1216   }
1217 
1218   // If a stream is initialized to be event driven and in shared mode, the
1219   // associated application must also obtain a handle by making a call to
1220   // IAudioClient::SetEventHandle.
1221   if (use_event) {
1222     error = client->SetEventHandle(event_handle);
1223     if (FAILED(error.Error())) {
1224       RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: "
1225                         << ErrorToString(error);
1226       return error.Error();
1227     }
1228   }
1229 
1230   UINT32 buffer_size_in_frames = 0;
1231   // Retrieves the size (maximum capacity) of the endpoint buffer. The size is
1232   // expressed as the number of audio frames the buffer can hold.
1233   // For rendering clients, the buffer length determines the maximum amount of
1234   // rendering data that the application can write to the endpoint buffer
1235   // during a single processing pass. For capture clients, the buffer length
1236   // determines the maximum amount of capture data that the audio engine can
1237   // read from the endpoint buffer during a single processing pass.
1238   error = client->GetBufferSize(&buffer_size_in_frames);
1239   if (FAILED(error.Error())) {
1240     RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
1241                       << ErrorToString(error);
1242     return error.Error();
1243   }
1244 
1245   *endpoint_buffer_size = buffer_size_in_frames;
1246   RTC_DLOG(INFO) << "endpoint buffer size: " << buffer_size_in_frames
1247                  << " [audio frames]";
1248   const double size_in_ms = static_cast<double>(buffer_size_in_frames) /
1249                             (format->Format.nSamplesPerSec / 1000.0);
1250   RTC_DLOG(INFO) << "endpoint buffer size: "
1251                  << static_cast<int>(size_in_ms + 0.5) << " [ms]";
1252   RTC_DLOG(INFO) << "bytes per audio frame: " << format->Format.nBlockAlign;
1253   RTC_DLOG(INFO) << "endpoint buffer size: "
1254                  << buffer_size_in_frames * format->Format.nChannels *
1255                         (format->Format.wBitsPerSample / 8)
1256                  << " [bytes]";
1257 
1258   // TODO(henrika): utilize when delay measurements are added.
1259   REFERENCE_TIME latency = 0;
1260   error = client->GetStreamLatency(&latency);
1261   RTC_DLOG(INFO) << "stream latency: " << ReferenceTimeToTimeDelta(latency).ms()
1262                  << " [ms]";
1263   return error.Error();
1264 }
1265 
SharedModeInitializeLowLatency(IAudioClient3 * client,const WAVEFORMATEXTENSIBLE * format,HANDLE event_handle,uint32_t period_in_frames,bool auto_convert_pcm,uint32_t * endpoint_buffer_size)1266 HRESULT SharedModeInitializeLowLatency(IAudioClient3* client,
1267                                        const WAVEFORMATEXTENSIBLE* format,
1268                                        HANDLE event_handle,
1269                                        uint32_t period_in_frames,
1270                                        bool auto_convert_pcm,
1271                                        uint32_t* endpoint_buffer_size) {
1272   RTC_DLOG(INFO) << "SharedModeInitializeLowLatency: period_in_frames="
1273                  << period_in_frames
1274                  << ", auto_convert_pcm=" << auto_convert_pcm;
1275   RTC_DCHECK(client);
1276   RTC_DCHECK_GT(period_in_frames, 0);
1277   if (auto_convert_pcm) {
1278     RTC_DLOG(LS_WARNING) << "Sample rate converter is enabled";
1279   }
1280 
1281   // Define stream flags.
1282   DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
1283   bool use_event =
1284       (event_handle != nullptr && event_handle != INVALID_HANDLE_VALUE);
1285   if (use_event) {
1286     stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
1287     RTC_DLOG(INFO) << "The stream is initialized to be event driven";
1288   }
1289   if (auto_convert_pcm) {
1290     stream_flags |= AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
1291     stream_flags |= AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
1292   }
1293   RTC_DLOG(INFO) << "stream_flags: 0x" << rtc::ToHex(stream_flags);
1294 
1295   // Initialize the shared mode client for lowest possible latency.
1296   // It is assumed that GetSharedModeEnginePeriod() has been used to query the
1297   // smallest possible engine period and that it is given by |period_in_frames|.
1298   _com_error error = client->InitializeSharedAudioStream(
1299       stream_flags, period_in_frames,
1300       reinterpret_cast<const WAVEFORMATEX*>(format), nullptr);
1301   if (FAILED(error.Error())) {
1302     RTC_LOG(LS_ERROR) << "IAudioClient3::InitializeSharedAudioStream failed: "
1303                       << ErrorToString(error);
1304     return error.Error();
1305   }
1306 
1307   // Set the event handle.
1308   if (use_event) {
1309     error = client->SetEventHandle(event_handle);
1310     if (FAILED(error.Error())) {
1311       RTC_LOG(LS_ERROR) << "IAudioClient::SetEventHandle failed: "
1312                         << ErrorToString(error);
1313       return error.Error();
1314     }
1315   }
1316 
1317   UINT32 buffer_size_in_frames = 0;
1318   // Retrieve the size (maximum capacity) of the endpoint buffer.
1319   error = client->GetBufferSize(&buffer_size_in_frames);
1320   if (FAILED(error.Error())) {
1321     RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
1322                       << ErrorToString(error);
1323     return error.Error();
1324   }
1325 
1326   *endpoint_buffer_size = buffer_size_in_frames;
1327   RTC_DLOG(INFO) << "endpoint buffer size: " << buffer_size_in_frames
1328                  << " [audio frames]";
1329   const double size_in_ms = static_cast<double>(buffer_size_in_frames) /
1330                             (format->Format.nSamplesPerSec / 1000.0);
1331   RTC_DLOG(INFO) << "endpoint buffer size: "
1332                  << static_cast<int>(size_in_ms + 0.5) << " [ms]";
1333   RTC_DLOG(INFO) << "bytes per audio frame: " << format->Format.nBlockAlign;
1334   RTC_DLOG(INFO) << "endpoint buffer size: "
1335                  << buffer_size_in_frames * format->Format.nChannels *
1336                         (format->Format.wBitsPerSample / 8)
1337                  << " [bytes]";
1338 
1339   // TODO(henrika): utilize when delay measurements are added.
1340   REFERENCE_TIME latency = 0;
1341   error = client->GetStreamLatency(&latency);
1342   if (FAILED(error.Error())) {
1343     RTC_LOG(LS_WARNING) << "IAudioClient::GetStreamLatency failed: "
1344                         << ErrorToString(error);
1345   } else {
1346     RTC_DLOG(INFO) << "stream latency: "
1347                    << ReferenceTimeToTimeDelta(latency).ms() << " [ms]";
1348   }
1349   return error.Error();
1350 }
1351 
CreateRenderClient(IAudioClient * client)1352 ComPtr<IAudioRenderClient> CreateRenderClient(IAudioClient* client) {
1353   RTC_DLOG(INFO) << "CreateRenderClient";
1354   RTC_DCHECK(client);
1355   // Get access to the IAudioRenderClient interface. This interface
1356   // enables us to write output data to a rendering endpoint buffer.
1357   ComPtr<IAudioRenderClient> audio_render_client;
1358   _com_error error = client->GetService(IID_PPV_ARGS(&audio_render_client));
1359   if (FAILED(error.Error())) {
1360     RTC_LOG(LS_ERROR)
1361         << "IAudioClient::GetService(IID_IAudioRenderClient) failed: "
1362         << ErrorToString(error);
1363     return ComPtr<IAudioRenderClient>();
1364   }
1365   return audio_render_client;
1366 }
1367 
CreateCaptureClient(IAudioClient * client)1368 ComPtr<IAudioCaptureClient> CreateCaptureClient(IAudioClient* client) {
1369   RTC_DLOG(INFO) << "CreateCaptureClient";
1370   RTC_DCHECK(client);
1371   // Get access to the IAudioCaptureClient interface. This interface
1372   // enables us to read input data from a capturing endpoint buffer.
1373   ComPtr<IAudioCaptureClient> audio_capture_client;
1374   _com_error error = client->GetService(IID_PPV_ARGS(&audio_capture_client));
1375   if (FAILED(error.Error())) {
1376     RTC_LOG(LS_ERROR)
1377         << "IAudioClient::GetService(IID_IAudioCaptureClient) failed: "
1378         << ErrorToString(error);
1379     return ComPtr<IAudioCaptureClient>();
1380   }
1381   return audio_capture_client;
1382 }
1383 
CreateAudioClock(IAudioClient * client)1384 ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client) {
1385   RTC_DLOG(INFO) << "CreateAudioClock";
1386   RTC_DCHECK(client);
1387   // Get access to the IAudioClock interface. This interface enables us to
1388   // monitor a stream's data rate and the current position in the stream.
1389   ComPtr<IAudioClock> audio_clock;
1390   _com_error error = client->GetService(IID_PPV_ARGS(&audio_clock));
1391   if (FAILED(error.Error())) {
1392     RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioClock) failed: "
1393                       << ErrorToString(error);
1394     return ComPtr<IAudioClock>();
1395   }
1396   return audio_clock;
1397 }
1398 
CreateAudioSessionControl(IAudioClient * client)1399 ComPtr<IAudioSessionControl> CreateAudioSessionControl(IAudioClient* client) {
1400   RTC_DLOG(INFO) << "CreateAudioSessionControl";
1401   RTC_DCHECK(client);
1402   ComPtr<IAudioSessionControl> audio_session_control;
1403   _com_error error = client->GetService(IID_PPV_ARGS(&audio_session_control));
1404   if (FAILED(error.Error())) {
1405     RTC_LOG(LS_ERROR) << "IAudioClient::GetService(IID_IAudioControl) failed: "
1406                       << ErrorToString(error);
1407     return ComPtr<IAudioSessionControl>();
1408   }
1409   return audio_session_control;
1410 }
1411 
CreateSimpleAudioVolume(IAudioClient * client)1412 ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume(IAudioClient* client) {
1413   RTC_DLOG(INFO) << "CreateSimpleAudioVolume";
1414   RTC_DCHECK(client);
1415   // Get access to the ISimpleAudioVolume interface. This interface enables a
1416   // client to control the master volume level of an audio session.
1417   ComPtr<ISimpleAudioVolume> simple_audio_volume;
1418   _com_error error = client->GetService(IID_PPV_ARGS(&simple_audio_volume));
1419   if (FAILED(error.Error())) {
1420     RTC_LOG(LS_ERROR)
1421         << "IAudioClient::GetService(IID_ISimpleAudioVolume) failed: "
1422         << ErrorToString(error);
1423     return ComPtr<ISimpleAudioVolume>();
1424   }
1425   return simple_audio_volume;
1426 }
1427 
FillRenderEndpointBufferWithSilence(IAudioClient * client,IAudioRenderClient * render_client)1428 bool FillRenderEndpointBufferWithSilence(IAudioClient* client,
1429                                          IAudioRenderClient* render_client) {
1430   RTC_DLOG(INFO) << "FillRenderEndpointBufferWithSilence";
1431   RTC_DCHECK(client);
1432   RTC_DCHECK(render_client);
1433   UINT32 endpoint_buffer_size = 0;
1434   _com_error error = client->GetBufferSize(&endpoint_buffer_size);
1435   if (FAILED(error.Error())) {
1436     RTC_LOG(LS_ERROR) << "IAudioClient::GetBufferSize failed: "
1437                       << ErrorToString(error);
1438     return false;
1439   }
1440 
1441   UINT32 num_queued_frames = 0;
1442   // Get number of audio frames that are queued up to play in the endpoint
1443   // buffer.
1444   error = client->GetCurrentPadding(&num_queued_frames);
1445   if (FAILED(error.Error())) {
1446     RTC_LOG(LS_ERROR) << "IAudioClient::GetCurrentPadding failed: "
1447                       << ErrorToString(error);
1448     return false;
1449   }
1450   RTC_DLOG(INFO) << "num_queued_frames: " << num_queued_frames;
1451 
1452   BYTE* data = nullptr;
1453   int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
1454   RTC_DLOG(INFO) << "num_frames_to_fill: " << num_frames_to_fill;
1455   error = render_client->GetBuffer(num_frames_to_fill, &data);
1456   if (FAILED(error.Error())) {
1457     RTC_LOG(LS_ERROR) << "IAudioRenderClient::GetBuffer failed: "
1458                       << ErrorToString(error);
1459     return false;
1460   }
1461 
1462   // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
1463   // explicitly write silence data to the rendering buffer.
1464   error = render_client->ReleaseBuffer(num_frames_to_fill,
1465                                        AUDCLNT_BUFFERFLAGS_SILENT);
1466   if (FAILED(error.Error())) {
1467     RTC_LOG(LS_ERROR) << "IAudioRenderClient::ReleaseBuffer failed: "
1468                       << ErrorToString(error);
1469     return false;
1470   }
1471 
1472   return true;
1473 }
1474 
WaveFormatToString(const WaveFormatWrapper format)1475 std::string WaveFormatToString(const WaveFormatWrapper format) {
1476   char ss_buf[1024];
1477   rtc::SimpleStringBuilder ss(ss_buf);
1478   // Start with the WAVEFORMATEX part (which always exists).
1479   ss.AppendFormat("wFormatTag: %s (0x%X)",
1480                   WaveFormatTagToString(format->wFormatTag),
1481                   format->wFormatTag);
1482   ss.AppendFormat(", nChannels: %d", format->nChannels);
1483   ss.AppendFormat(", nSamplesPerSec: %d", format->nSamplesPerSec);
1484   ss.AppendFormat(", nAvgBytesPerSec: %d", format->nAvgBytesPerSec);
1485   ss.AppendFormat(", nBlockAlign: %d", format->nBlockAlign);
1486   ss.AppendFormat(", wBitsPerSample: %d", format->wBitsPerSample);
1487   ss.AppendFormat(", cbSize: %d", format->cbSize);
1488   if (!format.IsExtensible())
1489     return ss.str();
1490 
1491   // Append the WAVEFORMATEXTENSIBLE part (which we know exists).
1492   ss.AppendFormat(
1493       " [+] wValidBitsPerSample: %d, dwChannelMask: %s",
1494       format.GetExtensible()->Samples.wValidBitsPerSample,
1495       ChannelMaskToString(format.GetExtensible()->dwChannelMask).c_str());
1496   if (format.IsPcm()) {
1497     ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_PCM");
1498   } else if (format.IsFloat()) {
1499     ss.AppendFormat("%s", ", SubFormat: KSDATAFORMAT_SUBTYPE_IEEE_FLOAT");
1500   } else {
1501     ss.AppendFormat("%s", ", SubFormat: NOT_SUPPORTED");
1502   }
1503   return ss.str();
1504 }
1505 
ReferenceTimeToTimeDelta(REFERENCE_TIME time)1506 webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time) {
1507   // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
1508   return webrtc::TimeDelta::Micros(0.1 * time + 0.5);
1509 }
1510 
FramesToMilliseconds(uint32_t num_frames,uint16_t sample_rate)1511 double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate) {
1512   // Convert the current period in frames into milliseconds.
1513   return static_cast<double>(num_frames) / (sample_rate / 1000.0);
1514 }
1515 
ErrorToString(const _com_error & error)1516 std::string ErrorToString(const _com_error& error) {
1517   char ss_buf[1024];
1518   rtc::SimpleStringBuilder ss(ss_buf);
1519   ss.AppendFormat("(HRESULT: 0x%08X)", error.Error());
1520   return ss.str();
1521 }
1522 
1523 }  // namespace core_audio_utility
1524 }  // namespace webrtc_win
1525 }  // namespace webrtc
1526