• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>
3  * Copyright (C) 2013 Collabora Ltd.
4  *   Author: Sebastian Dröge <sebastian.droege@collabora.co.uk>
5  * Copyright (C) 2018 Centricular Ltd.
6  *   Author: Nirbheek Chauhan <nirbheek@centricular.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Library General Public
10  * License as published by the Free Software Foundation; either
11  * version 2 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Library General Public License for more details.
17  *
18  * You should have received a copy of the GNU Library General Public
19  * License along with this library; if not, write to the
20  * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
21  * Boston, MA 02110-1301, USA.
22  */
23 
24 /**
25  * SECTION:element-wasapisink
26  * @title: wasapisink
27  *
28  * Provides audio playback using the Windows Audio Session API available with
29  * Vista and newer.
30  *
31  * ## Example pipelines
32  * |[
33  * gst-launch-1.0 -v audiotestsrc samplesperbuffer=160 ! wasapisink
34  * ]| Generate 20 ms buffers and render to the default audio device.
35  *
36  * |[
37  * gst-launch-1.0 -v audiotestsrc samplesperbuffer=160 ! wasapisink low-latency=true
38  * ]| Same as above, but with the minimum possible latency
39  *
40  */
41 #ifdef HAVE_CONFIG_H
42 #  include <config.h>
43 #endif
44 
45 #include "gstwasapisink.h"
46 
47 #include <avrt.h>
48 
49 GST_DEBUG_CATEGORY_STATIC (gst_wasapi_sink_debug);
50 #define GST_CAT_DEFAULT gst_wasapi_sink_debug
51 
52 static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE ("sink",
53     GST_PAD_SINK,
54     GST_PAD_ALWAYS,
55     GST_STATIC_CAPS (GST_WASAPI_STATIC_CAPS));
56 
57 #define DEFAULT_ROLE          GST_WASAPI_DEVICE_ROLE_CONSOLE
58 #define DEFAULT_MUTE          FALSE
59 #define DEFAULT_EXCLUSIVE     FALSE
60 #define DEFAULT_LOW_LATENCY   FALSE
61 #define DEFAULT_AUDIOCLIENT3  TRUE
62 
63 enum
64 {
65   PROP_0,
66   PROP_ROLE,
67   PROP_MUTE,
68   PROP_DEVICE,
69   PROP_EXCLUSIVE,
70   PROP_LOW_LATENCY,
71   PROP_AUDIOCLIENT3
72 };
73 
74 static void gst_wasapi_sink_dispose (GObject * object);
75 static void gst_wasapi_sink_finalize (GObject * object);
76 static void gst_wasapi_sink_set_property (GObject * object, guint prop_id,
77     const GValue * value, GParamSpec * pspec);
78 static void gst_wasapi_sink_get_property (GObject * object, guint prop_id,
79     GValue * value, GParamSpec * pspec);
80 
81 static GstCaps *gst_wasapi_sink_get_caps (GstBaseSink * bsink,
82     GstCaps * filter);
83 
84 static gboolean gst_wasapi_sink_prepare (GstAudioSink * asink,
85     GstAudioRingBufferSpec * spec);
86 static gboolean gst_wasapi_sink_unprepare (GstAudioSink * asink);
87 static gboolean gst_wasapi_sink_open (GstAudioSink * asink);
88 static gboolean gst_wasapi_sink_close (GstAudioSink * asink);
89 static gint gst_wasapi_sink_write (GstAudioSink * asink,
90     gpointer data, guint length);
91 static guint gst_wasapi_sink_delay (GstAudioSink * asink);
92 static void gst_wasapi_sink_reset (GstAudioSink * asink);
93 
94 #define gst_wasapi_sink_parent_class parent_class
95 G_DEFINE_TYPE (GstWasapiSink, gst_wasapi_sink, GST_TYPE_AUDIO_SINK);
96 
97 static void
gst_wasapi_sink_class_init(GstWasapiSinkClass * klass)98 gst_wasapi_sink_class_init (GstWasapiSinkClass * klass)
99 {
100   GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
101   GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
102   GstBaseSinkClass *gstbasesink_class = GST_BASE_SINK_CLASS (klass);
103   GstAudioSinkClass *gstaudiosink_class = GST_AUDIO_SINK_CLASS (klass);
104 
105   gobject_class->dispose = gst_wasapi_sink_dispose;
106   gobject_class->finalize = gst_wasapi_sink_finalize;
107   gobject_class->set_property = gst_wasapi_sink_set_property;
108   gobject_class->get_property = gst_wasapi_sink_get_property;
109 
110   g_object_class_install_property (gobject_class,
111       PROP_ROLE,
112       g_param_spec_enum ("role", "Role",
113           "Role of the device: communications, multimedia, etc",
114           GST_WASAPI_DEVICE_TYPE_ROLE, DEFAULT_ROLE, G_PARAM_READWRITE |
115           G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY));
116 
117   g_object_class_install_property (gobject_class,
118       PROP_MUTE,
119       g_param_spec_boolean ("mute", "Mute", "Mute state of this stream",
120           DEFAULT_MUTE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
121           GST_PARAM_MUTABLE_PLAYING));
122 
123   g_object_class_install_property (gobject_class,
124       PROP_DEVICE,
125       g_param_spec_string ("device", "Device",
126           "WASAPI playback device as a GUID string",
127           NULL, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
128 
129   g_object_class_install_property (gobject_class,
130       PROP_EXCLUSIVE,
131       g_param_spec_boolean ("exclusive", "Exclusive mode",
132           "Open the device in exclusive mode",
133           DEFAULT_EXCLUSIVE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
134 
135   g_object_class_install_property (gobject_class,
136       PROP_LOW_LATENCY,
137       g_param_spec_boolean ("low-latency", "Low latency",
138           "Optimize all settings for lowest latency. Always safe to enable.",
139           DEFAULT_LOW_LATENCY, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
140 
141   g_object_class_install_property (gobject_class,
142       PROP_AUDIOCLIENT3,
143       g_param_spec_boolean ("use-audioclient3", "Use the AudioClient3 API",
144           "Use the Windows 10 AudioClient3 API when available and if the "
145           "low-latency property is set to TRUE",
146           DEFAULT_AUDIOCLIENT3, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
147 
148   gst_element_class_add_static_pad_template (gstelement_class, &sink_template);
149   gst_element_class_set_static_metadata (gstelement_class, "WasapiSrc",
150       "Sink/Audio/Hardware",
151       "Stream audio to an audio capture device through WASAPI",
152       "Nirbheek Chauhan <nirbheek@centricular.com>, "
153       "Ole André Vadla Ravnås <ole.andre.ravnas@tandberg.com>");
154 
155   gstbasesink_class->get_caps = GST_DEBUG_FUNCPTR (gst_wasapi_sink_get_caps);
156 
157   gstaudiosink_class->prepare = GST_DEBUG_FUNCPTR (gst_wasapi_sink_prepare);
158   gstaudiosink_class->unprepare = GST_DEBUG_FUNCPTR (gst_wasapi_sink_unprepare);
159   gstaudiosink_class->open = GST_DEBUG_FUNCPTR (gst_wasapi_sink_open);
160   gstaudiosink_class->close = GST_DEBUG_FUNCPTR (gst_wasapi_sink_close);
161   gstaudiosink_class->write = GST_DEBUG_FUNCPTR (gst_wasapi_sink_write);
162   gstaudiosink_class->delay = GST_DEBUG_FUNCPTR (gst_wasapi_sink_delay);
163   gstaudiosink_class->reset = GST_DEBUG_FUNCPTR (gst_wasapi_sink_reset);
164 
165   GST_DEBUG_CATEGORY_INIT (gst_wasapi_sink_debug, "wasapisink",
166       0, "Windows audio session API sink");
167 }
168 
169 static void
gst_wasapi_sink_init(GstWasapiSink * self)170 gst_wasapi_sink_init (GstWasapiSink * self)
171 {
172   self->role = DEFAULT_ROLE;
173   self->mute = DEFAULT_MUTE;
174   self->sharemode = AUDCLNT_SHAREMODE_SHARED;
175   self->low_latency = DEFAULT_LOW_LATENCY;
176   self->try_audioclient3 = DEFAULT_AUDIOCLIENT3;
177   self->event_handle = CreateEvent (NULL, FALSE, FALSE, NULL);
178   self->client_needs_restart = FALSE;
179 
180   CoInitializeEx (NULL, COINIT_MULTITHREADED);
181 }
182 
183 static void
gst_wasapi_sink_dispose(GObject * object)184 gst_wasapi_sink_dispose (GObject * object)
185 {
186   GstWasapiSink *self = GST_WASAPI_SINK (object);
187 
188   if (self->event_handle != NULL) {
189     CloseHandle (self->event_handle);
190     self->event_handle = NULL;
191   }
192 
193   if (self->client != NULL) {
194     IUnknown_Release (self->client);
195     self->client = NULL;
196   }
197 
198   if (self->render_client != NULL) {
199     IUnknown_Release (self->render_client);
200     self->render_client = NULL;
201   }
202 
203   G_OBJECT_CLASS (gst_wasapi_sink_parent_class)->dispose (object);
204 }
205 
206 static void
gst_wasapi_sink_finalize(GObject * object)207 gst_wasapi_sink_finalize (GObject * object)
208 {
209   GstWasapiSink *self = GST_WASAPI_SINK (object);
210 
211   CoTaskMemFree (self->mix_format);
212   self->mix_format = NULL;
213 
214   CoUninitialize ();
215 
216   if (self->cached_caps != NULL) {
217     gst_caps_unref (self->cached_caps);
218     self->cached_caps = NULL;
219   }
220 
221   g_clear_pointer (&self->positions, g_free);
222   g_clear_pointer (&self->device_strid, g_free);
223   self->mute = FALSE;
224 
225   G_OBJECT_CLASS (gst_wasapi_sink_parent_class)->finalize (object);
226 }
227 
228 static void
gst_wasapi_sink_set_property(GObject * object,guint prop_id,const GValue * value,GParamSpec * pspec)229 gst_wasapi_sink_set_property (GObject * object, guint prop_id,
230     const GValue * value, GParamSpec * pspec)
231 {
232   GstWasapiSink *self = GST_WASAPI_SINK (object);
233 
234   switch (prop_id) {
235     case PROP_ROLE:
236       self->role = gst_wasapi_device_role_to_erole (g_value_get_enum (value));
237       break;
238     case PROP_MUTE:
239       self->mute = g_value_get_boolean (value);
240       break;
241     case PROP_DEVICE:
242     {
243       const gchar *device = g_value_get_string (value);
244       g_free (self->device_strid);
245       self->device_strid =
246           device ? g_utf8_to_utf16 (device, -1, NULL, NULL, NULL) : NULL;
247       break;
248     }
249     case PROP_EXCLUSIVE:
250       self->sharemode = g_value_get_boolean (value)
251           ? AUDCLNT_SHAREMODE_EXCLUSIVE : AUDCLNT_SHAREMODE_SHARED;
252       break;
253     case PROP_LOW_LATENCY:
254       self->low_latency = g_value_get_boolean (value);
255       break;
256     case PROP_AUDIOCLIENT3:
257       self->try_audioclient3 = g_value_get_boolean (value);
258       break;
259     default:
260       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
261       break;
262   }
263 }
264 
265 static void
gst_wasapi_sink_get_property(GObject * object,guint prop_id,GValue * value,GParamSpec * pspec)266 gst_wasapi_sink_get_property (GObject * object, guint prop_id,
267     GValue * value, GParamSpec * pspec)
268 {
269   GstWasapiSink *self = GST_WASAPI_SINK (object);
270 
271   switch (prop_id) {
272     case PROP_ROLE:
273       g_value_set_enum (value, gst_wasapi_erole_to_device_role (self->role));
274       break;
275     case PROP_MUTE:
276       g_value_set_boolean (value, self->mute);
277       break;
278     case PROP_DEVICE:
279       g_value_take_string (value, self->device_strid ?
280           g_utf16_to_utf8 (self->device_strid, -1, NULL, NULL, NULL) : NULL);
281       break;
282     case PROP_EXCLUSIVE:
283       g_value_set_boolean (value,
284           self->sharemode == AUDCLNT_SHAREMODE_EXCLUSIVE);
285       break;
286     case PROP_LOW_LATENCY:
287       g_value_set_boolean (value, self->low_latency);
288       break;
289     case PROP_AUDIOCLIENT3:
290       g_value_set_boolean (value, self->try_audioclient3);
291       break;
292     default:
293       G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
294       break;
295   }
296 }
297 
298 static gboolean
gst_wasapi_sink_can_audioclient3(GstWasapiSink * self)299 gst_wasapi_sink_can_audioclient3 (GstWasapiSink * self)
300 {
301   /* AudioClient3 API only makes sense in shared mode */
302   if (self->sharemode != AUDCLNT_SHAREMODE_SHARED)
303     return FALSE;
304 
305   if (!self->try_audioclient3) {
306     GST_INFO_OBJECT (self, "AudioClient3 disabled by user");
307     return FALSE;
308   }
309 
310   if (!gst_wasapi_util_have_audioclient3 ()) {
311     GST_INFO_OBJECT (self, "AudioClient3 not available on this OS");
312     return FALSE;
313   }
314 
315   /* Only use audioclient3 when low-latency is requested because otherwise
316    * very slow machines and VMs with 1 CPU allocated will get glitches:
317    * https://bugzilla.gnome.org/show_bug.cgi?id=794497 */
318   if (!self->low_latency) {
319     GST_INFO_OBJECT (self, "AudioClient3 disabled because low-latency mode "
320         "was not requested");
321     return FALSE;
322   }
323 
324   return TRUE;
325 }
326 
327 static GstCaps *
gst_wasapi_sink_get_caps(GstBaseSink * bsink,GstCaps * filter)328 gst_wasapi_sink_get_caps (GstBaseSink * bsink, GstCaps * filter)
329 {
330   GstWasapiSink *self = GST_WASAPI_SINK (bsink);
331   WAVEFORMATEX *format = NULL;
332   GstCaps *caps = NULL;
333 
334   GST_DEBUG_OBJECT (self, "entering get caps");
335 
336   if (self->cached_caps) {
337     caps = gst_caps_ref (self->cached_caps);
338   } else {
339     GstCaps *template_caps;
340     gboolean ret;
341 
342     template_caps = gst_pad_get_pad_template_caps (bsink->sinkpad);
343 
344     if (!self->client) {
345       caps = template_caps;
346       goto out;
347     }
348 
349     ret = gst_wasapi_util_get_device_format (GST_ELEMENT (self),
350         self->sharemode, self->device, self->client, &format);
351     if (!ret) {
352       GST_ELEMENT_ERROR (self, STREAM, FORMAT, (NULL),
353           ("failed to detect format"));
354       gst_caps_unref (template_caps);
355       return NULL;
356     }
357 
358     gst_wasapi_util_parse_waveformatex ((WAVEFORMATEXTENSIBLE *) format,
359         template_caps, &caps, &self->positions);
360     if (caps == NULL) {
361       GST_ELEMENT_ERROR (self, STREAM, FORMAT, (NULL), ("unknown format"));
362       gst_caps_unref (template_caps);
363       return NULL;
364     }
365 
366     {
367       gchar *pos_str = gst_audio_channel_positions_to_string (self->positions,
368           format->nChannels);
369       GST_INFO_OBJECT (self, "positions are: %s", pos_str);
370       g_free (pos_str);
371     }
372 
373     self->mix_format = format;
374     gst_caps_replace (&self->cached_caps, caps);
375     gst_caps_unref (template_caps);
376   }
377 
378   if (filter) {
379     GstCaps *filtered =
380         gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
381     gst_caps_unref (caps);
382     caps = filtered;
383   }
384 
385 out:
386   GST_DEBUG_OBJECT (self, "returning caps %" GST_PTR_FORMAT, caps);
387   return caps;
388 }
389 
390 static gboolean
gst_wasapi_sink_open(GstAudioSink * asink)391 gst_wasapi_sink_open (GstAudioSink * asink)
392 {
393   GstWasapiSink *self = GST_WASAPI_SINK (asink);
394   gboolean res = FALSE;
395   IMMDevice *device = NULL;
396   IAudioClient *client = NULL;
397 
398   GST_DEBUG_OBJECT (self, "opening device");
399 
400   if (self->client)
401     return TRUE;
402 
403   /* FIXME: Switching the default device does not switch the stream to it,
404    * even if the old device was unplugged. We need to handle this somehow.
405    * For example, perhaps we should automatically switch to the new device if
406    * the default device is changed and a device isn't explicitly selected. */
407   if (!gst_wasapi_util_get_device_client (GST_ELEMENT (self), eRender,
408           self->role, self->device_strid, &device, &client)) {
409     if (!self->device_strid)
410       GST_ELEMENT_ERROR (self, RESOURCE, OPEN_WRITE, (NULL),
411           ("Failed to get default device"));
412     else
413       GST_ELEMENT_ERROR (self, RESOURCE, OPEN_WRITE, (NULL),
414           ("Failed to open device %S", self->device_strid));
415     goto beach;
416   }
417 
418   self->client = client;
419   self->device = device;
420   res = TRUE;
421 
422 beach:
423 
424   return res;
425 }
426 
427 static gboolean
gst_wasapi_sink_close(GstAudioSink * asink)428 gst_wasapi_sink_close (GstAudioSink * asink)
429 {
430   GstWasapiSink *self = GST_WASAPI_SINK (asink);
431 
432   if (self->device != NULL) {
433     IUnknown_Release (self->device);
434     self->device = NULL;
435   }
436 
437   if (self->client != NULL) {
438     IUnknown_Release (self->client);
439     self->client = NULL;
440   }
441 
442   return TRUE;
443 }
444 
445 /* Get the empty space in the buffer that we have to write to */
446 static gint
gst_wasapi_sink_get_can_frames(GstWasapiSink * self)447 gst_wasapi_sink_get_can_frames (GstWasapiSink * self)
448 {
449   HRESULT hr;
450   guint n_frames_padding;
451 
452   /* There is no padding in exclusive mode since there is no ringbuffer */
453   if (self->sharemode == AUDCLNT_SHAREMODE_EXCLUSIVE) {
454     GST_DEBUG_OBJECT (self, "exclusive mode, can write: %i",
455         self->buffer_frame_count);
456     return self->buffer_frame_count;
457   }
458 
459   /* Frames the card hasn't rendered yet */
460   hr = IAudioClient_GetCurrentPadding (self->client, &n_frames_padding);
461   HR_FAILED_ELEMENT_ERROR_RET (hr, IAudioClient::GetCurrentPadding, self, -1);
462 
463   GST_DEBUG_OBJECT (self, "%i unread frames (padding)", n_frames_padding);
464 
465   /* We can write out these many frames */
466   return self->buffer_frame_count - n_frames_padding;
467 }
468 
469 static gboolean
gst_wasapi_sink_prepare(GstAudioSink * asink,GstAudioRingBufferSpec * spec)470 gst_wasapi_sink_prepare (GstAudioSink * asink, GstAudioRingBufferSpec * spec)
471 {
472   GstWasapiSink *self = GST_WASAPI_SINK (asink);
473   gboolean res = FALSE;
474   REFERENCE_TIME latency_rt;
475   guint bpf, rate, devicep_frames;
476   HRESULT hr;
477 
478   CoInitializeEx (NULL, COINIT_MULTITHREADED);
479 
480   if (gst_wasapi_sink_can_audioclient3 (self)) {
481     if (!gst_wasapi_util_initialize_audioclient3 (GST_ELEMENT (self), spec,
482             (IAudioClient3 *) self->client, self->mix_format, self->low_latency,
483             FALSE, &devicep_frames))
484       goto beach;
485   } else {
486     if (!gst_wasapi_util_initialize_audioclient (GST_ELEMENT (self), spec,
487             self->client, self->mix_format, self->sharemode, self->low_latency,
488             FALSE, &devicep_frames))
489       goto beach;
490   }
491 
492   bpf = GST_AUDIO_INFO_BPF (&spec->info);
493   rate = GST_AUDIO_INFO_RATE (&spec->info);
494 
495   /* Total size of the allocated buffer that we will write to */
496   hr = IAudioClient_GetBufferSize (self->client, &self->buffer_frame_count);
497   HR_FAILED_GOTO (hr, IAudioClient::GetBufferSize, beach);
498 
499   GST_INFO_OBJECT (self, "buffer size is %i frames, device period is %i "
500       "frames, bpf is %i bytes, rate is %i Hz", self->buffer_frame_count,
501       devicep_frames, bpf, rate);
502 
503   /* Actual latency-time/buffer-time will be different now */
504   spec->segsize = devicep_frames * bpf;
505 
506   /* We need a minimum of 2 segments to ensure glitch-free playback */
507   spec->segtotal = MAX (self->buffer_frame_count * bpf / spec->segsize, 2);
508 
509   GST_INFO_OBJECT (self, "segsize is %i, segtotal is %i", spec->segsize,
510       spec->segtotal);
511 
512   /* Get latency for logging */
513   hr = IAudioClient_GetStreamLatency (self->client, &latency_rt);
514   HR_FAILED_GOTO (hr, IAudioClient::GetStreamLatency, beach);
515 
516   GST_INFO_OBJECT (self, "wasapi stream latency: %" G_GINT64_FORMAT " (%"
517       G_GINT64_FORMAT "ms)", latency_rt, latency_rt / 10000);
518 
519   /* Set the event handler which will trigger writes */
520   hr = IAudioClient_SetEventHandle (self->client, self->event_handle);
521   HR_FAILED_GOTO (hr, IAudioClient::SetEventHandle, beach);
522 
523   /* Get render sink client and start it up */
524   if (!gst_wasapi_util_get_render_client (GST_ELEMENT (self), self->client,
525           &self->render_client)) {
526     goto beach;
527   }
528 
529   GST_INFO_OBJECT (self, "got render client");
530 
531   /* To avoid start-up glitches, before starting the streaming, we fill the
532    * buffer with silence as recommended by the documentation:
533    * https://msdn.microsoft.com/en-us/library/windows/desktop/dd370879%28v=vs.85%29.aspx */
534   {
535     gint n_frames, len;
536     gint16 *dst = NULL;
537 
538     n_frames = gst_wasapi_sink_get_can_frames (self);
539     if (n_frames < 1) {
540       GST_ELEMENT_ERROR (self, RESOURCE, WRITE, (NULL),
541           ("should have more than %i frames to write", n_frames));
542       goto beach;
543     }
544 
545     len = n_frames * self->mix_format->nBlockAlign;
546 
547     hr = IAudioRenderClient_GetBuffer (self->render_client, n_frames,
548         (BYTE **) & dst);
549     HR_FAILED_GOTO (hr, IAudioRenderClient::GetBuffer, beach);
550 
551     GST_DEBUG_OBJECT (self, "pre-wrote %i bytes of silence", len);
552 
553     hr = IAudioRenderClient_ReleaseBuffer (self->render_client, n_frames,
554         AUDCLNT_BUFFERFLAGS_SILENT);
555     HR_FAILED_GOTO (hr, IAudioRenderClient::ReleaseBuffer, beach);
556   }
557 
558   hr = IAudioClient_Start (self->client);
559   HR_FAILED_GOTO (hr, IAudioClient::Start, beach);
560   self->client_needs_restart = FALSE;
561 
562   gst_audio_ring_buffer_set_channel_positions (GST_AUDIO_BASE_SINK
563       (self)->ringbuffer, self->positions);
564 
565   res = TRUE;
566 
567 beach:
568   /* unprepare() is not called if prepare() fails, but we want it to be, so call
569    * it manually when needed */
570   if (!res)
571     gst_wasapi_sink_unprepare (asink);
572 
573   return res;
574 }
575 
576 static gboolean
gst_wasapi_sink_unprepare(GstAudioSink * asink)577 gst_wasapi_sink_unprepare (GstAudioSink * asink)
578 {
579   GstWasapiSink *self = GST_WASAPI_SINK (asink);
580 
581   if (self->client != NULL) {
582     IAudioClient_Stop (self->client);
583   }
584 
585   if (self->render_client != NULL) {
586     IUnknown_Release (self->render_client);
587     self->render_client = NULL;
588   }
589 
590   CoUninitialize ();
591 
592   return TRUE;
593 }
594 
595 static gint
gst_wasapi_sink_write(GstAudioSink * asink,gpointer data,guint length)596 gst_wasapi_sink_write (GstAudioSink * asink, gpointer data, guint length)
597 {
598   GstWasapiSink *self = GST_WASAPI_SINK (asink);
599   HRESULT hr;
600   gint16 *dst = NULL;
601   DWORD dwWaitResult;
602   guint can_frames, have_frames, n_frames, write_len, written_len = 0;
603 
604   GST_OBJECT_LOCK (self);
605   if (self->client_needs_restart) {
606     hr = IAudioClient_Start (self->client);
607     HR_FAILED_ELEMENT_ERROR_AND (hr, IAudioClient::Start, self,
608         GST_OBJECT_UNLOCK (self); goto err);
609     self->client_needs_restart = FALSE;
610   }
611   GST_OBJECT_UNLOCK (self);
612 
613   /* We have N frames to be written out */
614   have_frames = length / (self->mix_format->nBlockAlign);
615 
616   if (self->sharemode == AUDCLNT_SHAREMODE_EXCLUSIVE) {
617     /* In exlusive mode we have to wait always */
618     dwWaitResult = WaitForSingleObject (self->event_handle, INFINITE);
619     if (dwWaitResult != WAIT_OBJECT_0) {
620       GST_ERROR_OBJECT (self, "Error waiting for event handle: %x",
621           (guint) dwWaitResult);
622       goto err;
623     }
624 
625     can_frames = gst_wasapi_sink_get_can_frames (self);
626     if (can_frames < 0) {
627       GST_ERROR_OBJECT (self, "Error getting frames to write to");
628       goto err;
629     }
630     /* In exclusive mode we need to fill the whole buffer in one go or
631      * GetBuffer will error out */
632     if (can_frames != have_frames) {
633       GST_ERROR_OBJECT (self,
634           "Need at %i frames to write for exclusive mode, but got %i",
635           can_frames, have_frames);
636       goto err;
637     }
638   } else {
639     /* In shared mode we can write parts of the buffer, so only wait
640      * in case we can't write anything */
641     can_frames = gst_wasapi_sink_get_can_frames (self);
642     if (can_frames < 0) {
643       GST_ERROR_OBJECT (self, "Error getting frames to write to");
644       goto err;
645     }
646 
647     if (can_frames == 0) {
648       dwWaitResult = WaitForSingleObject (self->event_handle, INFINITE);
649       if (dwWaitResult != WAIT_OBJECT_0) {
650         GST_ERROR_OBJECT (self, "Error waiting for event handle: %x",
651             (guint) dwWaitResult);
652         goto err;
653       }
654       can_frames = gst_wasapi_sink_get_can_frames (self);
655       if (can_frames < 0) {
656         GST_ERROR_OBJECT (self, "Error getting frames to write to");
657         goto err;
658       }
659     }
660   }
661 
662   /* We will write out these many frames, and this much length */
663   n_frames = MIN (can_frames, have_frames);
664   write_len = n_frames * self->mix_format->nBlockAlign;
665 
666   GST_DEBUG_OBJECT (self, "total: %i, have_frames: %i (%i bytes), "
667       "can_frames: %i, will write: %i (%i bytes)", self->buffer_frame_count,
668       have_frames, length, can_frames, n_frames, write_len);
669 
670   hr = IAudioRenderClient_GetBuffer (self->render_client, n_frames,
671       (BYTE **) & dst);
672   HR_FAILED_ELEMENT_ERROR_AND (hr, IAudioRenderClient::GetBuffer, self,
673       goto err);
674 
675   memcpy (dst, data, write_len);
676 
677   hr = IAudioRenderClient_ReleaseBuffer (self->render_client, n_frames,
678       self->mute ? AUDCLNT_BUFFERFLAGS_SILENT : 0);
679   HR_FAILED_ELEMENT_ERROR_AND (hr, IAudioRenderClient::ReleaseBuffer, self,
680       goto err);
681 
682   written_len = write_len;
683 
684 out:
685   return written_len;
686 
687 err:
688   written_len = -1;
689   goto out;
690 }
691 
692 static guint
gst_wasapi_sink_delay(GstAudioSink * asink)693 gst_wasapi_sink_delay (GstAudioSink * asink)
694 {
695   GstWasapiSink *self = GST_WASAPI_SINK (asink);
696   guint delay = 0;
697   HRESULT hr;
698 
699   hr = IAudioClient_GetCurrentPadding (self->client, &delay);
700   HR_FAILED_RET (hr, IAudioClient::GetCurrentPadding, 0);
701 
702   return delay;
703 }
704 
705 static void
gst_wasapi_sink_reset(GstAudioSink * asink)706 gst_wasapi_sink_reset (GstAudioSink * asink)
707 {
708   GstWasapiSink *self = GST_WASAPI_SINK (asink);
709   HRESULT hr;
710 
711   GST_INFO_OBJECT (self, "reset called");
712 
713   if (!self->client)
714     return;
715 
716   GST_OBJECT_LOCK (self);
717   hr = IAudioClient_Stop (self->client);
718   HR_FAILED_AND (hr, IAudioClient::Stop,);
719 
720   hr = IAudioClient_Reset (self->client);
721   HR_FAILED_AND (hr, IAudioClient::Reset,);
722 
723   self->client_needs_restart = TRUE;
724   GST_OBJECT_UNLOCK (self);
725 }
726