• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2006 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37 
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/core-subscribe.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/play-memblockq.h>
48 #include <pulsecore/flist.h>
49 
50 #include "sink.h"
51 
52 #define MAX_MIX_CHANNELS 32
53 #define MIX_BUFFER_LENGTH (pa_page_size())
54 #define ABSOLUTE_MIN_LATENCY (500)
55 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
56 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
57 
58 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
59 
60 struct pa_sink_volume_change {
61     pa_usec_t at;
62     pa_cvolume hw_volume;
63 
64     PA_LLIST_FIELDS(pa_sink_volume_change);
65 };
66 
67 struct set_state_data {
68     pa_sink_state_t state;
69     pa_suspend_cause_t suspend_cause;
70 };
71 
72 static void sink_free(pa_object *s);
73 
74 static void pa_sink_volume_change_push(pa_sink *s);
75 static void pa_sink_volume_change_flush(pa_sink *s);
76 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
77 
pa_sink_new_data_init(pa_sink_new_data * data)78 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
79     pa_assert(data);
80 
81     pa_zero(*data);
82     data->proplist = pa_proplist_new();
83     data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
84 
85     return data;
86 }
87 
pa_sink_new_data_set_name(pa_sink_new_data * data,const char * name)88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
89     pa_assert(data);
90 
91     pa_xfree(data->name);
92     data->name = pa_xstrdup(name);
93 }
94 
pa_sink_new_data_set_sample_spec(pa_sink_new_data * data,const pa_sample_spec * spec)95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
96     pa_assert(data);
97 
98     if ((data->sample_spec_is_set = !!spec))
99         data->sample_spec = *spec;
100 }
101 
pa_sink_new_data_set_channel_map(pa_sink_new_data * data,const pa_channel_map * map)102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
103     pa_assert(data);
104 
105     if ((data->channel_map_is_set = !!map))
106         data->channel_map = *map;
107 }
108 
pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data * data,const uint32_t alternate_sample_rate)109 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
110     pa_assert(data);
111 
112     data->alternate_sample_rate_is_set = true;
113     data->alternate_sample_rate = alternate_sample_rate;
114 }
115 
pa_sink_new_data_set_avoid_resampling(pa_sink_new_data * data,bool avoid_resampling)116 void pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling) {
117     pa_assert(data);
118 
119     data->avoid_resampling_is_set = true;
120     data->avoid_resampling = avoid_resampling;
121 }
122 
pa_sink_new_data_set_volume(pa_sink_new_data * data,const pa_cvolume * volume)123 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
124     pa_assert(data);
125 
126     if ((data->volume_is_set = !!volume))
127         data->volume = *volume;
128 }
129 
pa_sink_new_data_set_muted(pa_sink_new_data * data,bool mute)130 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
131     pa_assert(data);
132 
133     data->muted_is_set = true;
134     data->muted = mute;
135 }
136 
pa_sink_new_data_set_port(pa_sink_new_data * data,const char * port)137 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
138     pa_assert(data);
139 
140     pa_xfree(data->active_port);
141     data->active_port = pa_xstrdup(port);
142 }
143 
pa_sink_new_data_done(pa_sink_new_data * data)144 void pa_sink_new_data_done(pa_sink_new_data *data) {
145     pa_assert(data);
146 
147     pa_proplist_free(data->proplist);
148 
149     if (data->ports)
150         pa_hashmap_free(data->ports);
151 
152     pa_xfree(data->name);
153     pa_xfree(data->active_port);
154 }
155 
156 /* Called from main context */
reset_callbacks(pa_sink * s)157 static void reset_callbacks(pa_sink *s) {
158     pa_assert(s);
159 
160     s->set_state_in_main_thread = NULL;
161     s->set_state_in_io_thread = NULL;
162     s->get_volume = NULL;
163     s->set_volume = NULL;
164     s->write_volume = NULL;
165     s->get_mute = NULL;
166     s->set_mute = NULL;
167     s->request_rewind = NULL;
168     s->update_requested_latency = NULL;
169     s->set_port = NULL;
170     s->get_formats = NULL;
171     s->set_formats = NULL;
172     s->reconfigure = NULL;
173 }
174 
175 /* Called from main context */
pa_sink_new(pa_core * core,pa_sink_new_data * data,pa_sink_flags_t flags)176 pa_sink* pa_sink_new(
177         pa_core *core,
178         pa_sink_new_data *data,
179         pa_sink_flags_t flags) {
180 
181     pa_sink *s;
182     const char *name;
183     char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
184     pa_source_new_data source_data;
185     const char *dn;
186     char *pt;
187 
188     pa_assert(core);
189     pa_assert(data);
190     pa_assert(data->name);
191     pa_assert_ctl_context();
192 
193     s = pa_msgobject_new(pa_sink);
194 
195     if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
196         pa_log_debug("Failed to register name %s.", data->name);
197         pa_xfree(s);
198         return NULL;
199     }
200 
201     pa_sink_new_data_set_name(data, name);
202 
203     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
204         pa_xfree(s);
205         pa_namereg_unregister(core, name);
206         return NULL;
207     }
208 
209     /* FIXME, need to free s here on failure */
210 
211     pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
212     pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
213 
214     pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
215 
216     if (!data->channel_map_is_set)
217         pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
218 
219     pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
220     pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
221 
222     /* FIXME: There should probably be a general function for checking whether
223      * the sink volume is allowed to be set, like there is for sink inputs. */
224     pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
225 
226     if (!data->volume_is_set) {
227         pa_cvolume_reset(&data->volume, data->sample_spec.channels);
228         data->save_volume = false;
229     }
230 
231     pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
232     pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
233 
234     if (!data->muted_is_set)
235         data->muted = false;
236 
237     if (data->card)
238         pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
239 
240     pa_device_init_description(data->proplist, data->card);
241     pa_device_init_icon(data->proplist, true);
242     pa_device_init_intended_roles(data->proplist);
243 
244     if (!data->active_port) {
245         pa_device_port *p = pa_device_port_find_best(data->ports);
246         if (p)
247             pa_sink_new_data_set_port(data, p->name);
248     }
249 
250     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
251         pa_xfree(s);
252         pa_namereg_unregister(core, name);
253         return NULL;
254     }
255 
256     s->parent.parent.free = sink_free;
257     s->parent.process_msg = pa_sink_process_msg;
258 
259     s->core = core;
260     s->state = PA_SINK_INIT;
261     s->flags = flags;
262     s->priority = 0;
263     s->suspend_cause = data->suspend_cause;
264     s->name = pa_xstrdup(name);
265     s->proplist = pa_proplist_copy(data->proplist);
266     s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
267     s->module = data->module;
268     s->card = data->card;
269 
270     s->priority = pa_device_init_priority(s->proplist);
271 
272     s->sample_spec = data->sample_spec;
273     s->channel_map = data->channel_map;
274     s->default_sample_rate = s->sample_spec.rate;
275 
276     if (data->alternate_sample_rate_is_set)
277         s->alternate_sample_rate = data->alternate_sample_rate;
278     else
279         s->alternate_sample_rate = s->core->alternate_sample_rate;
280 
281     if (data->avoid_resampling_is_set)
282         s->avoid_resampling = data->avoid_resampling;
283     else
284         s->avoid_resampling = s->core->avoid_resampling;
285 
286     s->inputs = pa_idxset_new(NULL, NULL);
287     s->n_corked = 0;
288     s->input_to_master = NULL;
289 
290     s->reference_volume = s->real_volume = data->volume;
291     pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
292     s->base_volume = PA_VOLUME_NORM;
293     s->n_volume_steps = PA_VOLUME_NORM+1;
294     s->muted = data->muted;
295     s->refresh_volume = s->refresh_muted = false;
296 
297     reset_callbacks(s);
298     s->userdata = NULL;
299 
300     s->asyncmsgq = NULL;
301 
302     /* As a minor optimization we just steal the list instead of
303      * copying it here */
304     s->ports = data->ports;
305     data->ports = NULL;
306 
307     s->active_port = NULL;
308     s->save_port = false;
309 
310     if (data->active_port)
311         if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
312             s->save_port = data->save_port;
313 
314     /* Hopefully the active port has already been assigned in the previous call
315        to pa_device_port_find_best, but better safe than sorry */
316     if (!s->active_port)
317         s->active_port = pa_device_port_find_best(s->ports);
318 
319     if (s->active_port)
320         s->port_latency_offset = s->active_port->latency_offset;
321     else
322         s->port_latency_offset = 0;
323 
324     s->save_volume = data->save_volume;
325     s->save_muted = data->save_muted;
326 
327     pa_silence_memchunk_get(
328             &core->silence_cache,
329             core->mempool,
330             &s->silence,
331             &s->sample_spec,
332             0);
333 
334     s->thread_info.rtpoll = NULL;
335     s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
336                                                 (pa_free_cb_t) pa_sink_input_unref);
337     s->thread_info.soft_volume =  s->soft_volume;
338     s->thread_info.soft_muted = s->muted;
339     s->thread_info.state = s->state;
340     s->thread_info.rewind_nbytes = 0;
341     s->thread_info.rewind_requested = false;
342     s->thread_info.max_rewind = 0;
343     s->thread_info.max_request = 0;
344     s->thread_info.requested_latency_valid = false;
345     s->thread_info.requested_latency = 0;
346     s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
347     s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
348     s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
349 
350     PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
351     s->thread_info.volume_changes_tail = NULL;
352     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
353     s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
354     s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
355     s->thread_info.port_latency_offset = s->port_latency_offset;
356 
357     /* FIXME: This should probably be moved to pa_sink_put() */
358     pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
359 
360     if (s->card)
361         pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
362 
363     pt = pa_proplist_to_string_sep(s->proplist, "\n    ");
364     pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n    %s",
365                 s->index,
366                 s->name,
367                 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
368                 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
369                 pt);
370     pa_xfree(pt);
371 
372     pa_source_new_data_init(&source_data);
373     pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
374     pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
375     pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
376     pa_source_new_data_set_avoid_resampling(&source_data, s->avoid_resampling);
377     source_data.name = pa_sprintf_malloc("%s.monitor", name);
378     source_data.driver = data->driver;
379     source_data.module = data->module;
380     source_data.card = data->card;
381 
382     dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
383     pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
384     pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
385 
386     s->monitor_source = pa_source_new(core, &source_data,
387                                       ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
388                                       ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
389 
390     pa_source_new_data_done(&source_data);
391 
392     if (!s->monitor_source) {
393         pa_sink_unlink(s);
394         pa_sink_unref(s);
395         return NULL;
396     }
397 
398     s->monitor_source->monitor_of = s;
399 
400     pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
401     pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
402     pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
403 
404     return s;
405 }
406 
407 /* Called from main context */
sink_set_state(pa_sink * s,pa_sink_state_t state,pa_suspend_cause_t suspend_cause)408 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
409     int ret = 0;
410     bool state_changed;
411     bool suspend_cause_changed;
412     bool suspending;
413     bool resuming;
414     pa_sink_state_t old_state;
415     pa_suspend_cause_t old_suspend_cause;
416 
417     pa_assert(s);
418     pa_assert_ctl_context();
419 
420     state_changed = state != s->state;
421     suspend_cause_changed = suspend_cause != s->suspend_cause;
422 
423     if (!state_changed && !suspend_cause_changed)
424         return 0;
425 
426     suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
427     resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
428 
429     /* If we are resuming, suspend_cause must be 0. */
430     pa_assert(!resuming || !suspend_cause);
431 
432     /* Here's something to think about: what to do with the suspend cause if
433      * resuming the sink fails? The old suspend cause will be incorrect, so we
434      * can't use that. On the other hand, if we set no suspend cause (as is the
435      * case currently), then it looks strange to have a sink suspended without
436      * any cause. It might be a good idea to add a new "resume failed" suspend
437      * cause, or it might just add unnecessary complexity, given that the
438      * current approach of not setting any suspend cause works well enough. */
439 
440     if (s->set_state_in_main_thread) {
441         if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
442             /* set_state_in_main_thread() is allowed to fail only when resuming. */
443             pa_assert(resuming);
444 
445             /* If resuming fails, we set the state to SUSPENDED and
446              * suspend_cause to 0. */
447             state = PA_SINK_SUSPENDED;
448             suspend_cause = 0;
449             state_changed = false;
450             suspend_cause_changed = suspend_cause != s->suspend_cause;
451             resuming = false;
452 
453             /* We know the state isn't changing. If the suspend cause isn't
454              * changing either, then there's nothing more to do. */
455             if (!suspend_cause_changed)
456                 return ret;
457         }
458     }
459 
460     if (s->asyncmsgq) {
461         struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
462 
463         if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
464             /* SET_STATE is allowed to fail only when resuming. */
465             pa_assert(resuming);
466 
467             if (s->set_state_in_main_thread)
468                 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
469 
470             /* If resuming fails, we set the state to SUSPENDED and
471              * suspend_cause to 0. */
472             state = PA_SINK_SUSPENDED;
473             suspend_cause = 0;
474             state_changed = false;
475             suspend_cause_changed = suspend_cause != s->suspend_cause;
476             resuming = false;
477 
478             /* We know the state isn't changing. If the suspend cause isn't
479              * changing either, then there's nothing more to do. */
480             if (!suspend_cause_changed)
481                 return ret;
482         }
483     }
484 
485     old_suspend_cause = s->suspend_cause;
486     if (suspend_cause_changed) {
487         char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
488         char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
489 
490         pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
491                      pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
492         s->suspend_cause = suspend_cause;
493     }
494 
495     old_state = s->state;
496     if (state_changed) {
497         pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
498         s->state = state;
499 
500         /* If we enter UNLINKED state, then we don't send change notifications.
501          * pa_sink_unlink() will send unlink notifications instead. */
502         if (state != PA_SINK_UNLINKED) {
503             pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
504             pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
505         }
506     }
507 
508     if (suspending || resuming || suspend_cause_changed) {
509         pa_sink_input *i;
510         uint32_t idx;
511 
512         /* We're suspending or resuming, tell everyone about it */
513 
514         PA_IDXSET_FOREACH(i, s->inputs, idx)
515             if (s->state == PA_SINK_SUSPENDED &&
516                 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
517                 pa_sink_input_kill(i);
518             else if (i->suspend)
519                 i->suspend(i, old_state, old_suspend_cause);
520     }
521 
522     if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
523         pa_source_sync_suspend(s->monitor_source);
524 
525     return ret;
526 }
527 
pa_sink_set_get_volume_callback(pa_sink * s,pa_sink_cb_t cb)528 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
529     pa_assert(s);
530 
531     s->get_volume = cb;
532 }
533 
pa_sink_set_set_volume_callback(pa_sink * s,pa_sink_cb_t cb)534 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
535     pa_sink_flags_t flags;
536 
537     pa_assert(s);
538     pa_assert(!s->write_volume || cb);
539 
540     s->set_volume = cb;
541 
542     /* Save the current flags so we can tell if they've changed */
543     flags = s->flags;
544 
545     if (cb) {
546         /* The sink implementor is responsible for setting decibel volume support */
547         s->flags |= PA_SINK_HW_VOLUME_CTRL;
548     } else {
549         s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
550         /* See note below in pa_sink_put() about volume sharing and decibel volumes */
551         pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
552     }
553 
554     /* If the flags have changed after init, let any clients know via a change event */
555     if (s->state != PA_SINK_INIT && flags != s->flags)
556         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
557 }
558 
pa_sink_set_write_volume_callback(pa_sink * s,pa_sink_cb_t cb)559 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
560     pa_sink_flags_t flags;
561 
562     pa_assert(s);
563     pa_assert(!cb || s->set_volume);
564 
565     s->write_volume = cb;
566 
567     /* Save the current flags so we can tell if they've changed */
568     flags = s->flags;
569 
570     if (cb)
571         s->flags |= PA_SINK_DEFERRED_VOLUME;
572     else
573         s->flags &= ~PA_SINK_DEFERRED_VOLUME;
574 
575     /* If the flags have changed after init, let any clients know via a change event */
576     if (s->state != PA_SINK_INIT && flags != s->flags)
577         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
578 }
579 
pa_sink_set_get_mute_callback(pa_sink * s,pa_sink_get_mute_cb_t cb)580 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
581     pa_assert(s);
582 
583     s->get_mute = cb;
584 }
585 
pa_sink_set_set_mute_callback(pa_sink * s,pa_sink_cb_t cb)586 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
587     pa_sink_flags_t flags;
588 
589     pa_assert(s);
590 
591     s->set_mute = cb;
592 
593     /* Save the current flags so we can tell if they've changed */
594     flags = s->flags;
595 
596     if (cb)
597         s->flags |= PA_SINK_HW_MUTE_CTRL;
598     else
599         s->flags &= ~PA_SINK_HW_MUTE_CTRL;
600 
601     /* If the flags have changed after init, let any clients know via a change event */
602     if (s->state != PA_SINK_INIT && flags != s->flags)
603         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
604 }
605 
enable_flat_volume(pa_sink * s,bool enable)606 static void enable_flat_volume(pa_sink *s, bool enable) {
607     pa_sink_flags_t flags;
608 
609     pa_assert(s);
610 
611     /* Always follow the overall user preference here */
612     enable = enable && s->core->flat_volumes;
613 
614     /* Save the current flags so we can tell if they've changed */
615     flags = s->flags;
616 
617     if (enable)
618         s->flags |= PA_SINK_FLAT_VOLUME;
619     else
620         s->flags &= ~PA_SINK_FLAT_VOLUME;
621 
622     /* If the flags have changed after init, let any clients know via a change event */
623     if (s->state != PA_SINK_INIT && flags != s->flags)
624         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
625 }
626 
pa_sink_enable_decibel_volume(pa_sink * s,bool enable)627 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
628     pa_sink_flags_t flags;
629 
630     pa_assert(s);
631 
632     /* Save the current flags so we can tell if they've changed */
633     flags = s->flags;
634 
635     if (enable) {
636         s->flags |= PA_SINK_DECIBEL_VOLUME;
637         enable_flat_volume(s, true);
638     } else {
639         s->flags &= ~PA_SINK_DECIBEL_VOLUME;
640         enable_flat_volume(s, false);
641     }
642 
643     /* If the flags have changed after init, let any clients know via a change event */
644     if (s->state != PA_SINK_INIT && flags != s->flags)
645         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
646 }
647 
648 /* Called from main context */
pa_sink_put(pa_sink * s)649 void pa_sink_put(pa_sink* s) {
650     pa_sink_assert_ref(s);
651     pa_assert_ctl_context();
652 
653     pa_assert(s->state == PA_SINK_INIT);
654     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
655 
656     /* The following fields must be initialized properly when calling _put() */
657     pa_assert(s->asyncmsgq);
658     pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
659 
660     /* Generally, flags should be initialized via pa_sink_new(). As a
661      * special exception we allow some volume related flags to be set
662      * between _new() and _put() by the callback setter functions above.
663      *
664      * Thus we implement a couple safeguards here which ensure the above
665      * setters were used (or at least the implementor made manual changes
666      * in a compatible way).
667      *
668      * Note: All of these flags set here can change over the life time
669      * of the sink. */
670     pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
671     pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
672     pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
673 
674     /* XXX: Currently decibel volume is disabled for all sinks that use volume
675      * sharing. When the master sink supports decibel volume, it would be good
676      * to have the flag also in the filter sink, but currently we don't do that
677      * so that the flags of the filter sink never change when it's moved from
678      * a master sink to another. One solution for this problem would be to
679      * remove user-visible volume altogether from filter sinks when volume
680      * sharing is used, but the current approach was easier to implement... */
681     /* We always support decibel volumes in software, otherwise we leave it to
682      * the sink implementor to set this flag as needed.
683      *
684      * Note: This flag can also change over the life time of the sink. */
685     if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
686         pa_sink_enable_decibel_volume(s, true);
687         s->soft_volume = s->reference_volume;
688     }
689 
690     /* If the sink implementor support DB volumes by itself, we should always
691      * try and enable flat volumes too */
692     if ((s->flags & PA_SINK_DECIBEL_VOLUME))
693         enable_flat_volume(s, true);
694 
695     if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
696         pa_sink *root_sink = pa_sink_get_master(s);
697 
698         pa_assert(root_sink);
699 
700         s->reference_volume = root_sink->reference_volume;
701         pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
702 
703         s->real_volume = root_sink->real_volume;
704         pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
705     } else
706         /* We assume that if the sink implementor changed the default
707          * volume they did so in real_volume, because that is the usual
708          * place where they are supposed to place their changes.  */
709         s->reference_volume = s->real_volume;
710 
711     s->thread_info.soft_volume = s->soft_volume;
712     s->thread_info.soft_muted = s->muted;
713     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
714 
715     pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
716               || (s->base_volume == PA_VOLUME_NORM
717                   && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
718     pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
719     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
720     pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
721     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
722 
723     pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
724     pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
725     pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
726 
727     if (s->suspend_cause)
728         pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
729     else
730         pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
731 
732     pa_source_put(s->monitor_source);
733 
734     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
735     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
736 
737     /* It's good to fire the SINK_PUT hook before updating the default sink,
738      * because module-switch-on-connect will set the new sink as the default
739      * sink, and if we were to call pa_core_update_default_sink() before that,
740      * the default sink might change twice, causing unnecessary stream moving. */
741 
742     pa_core_update_default_sink(s->core);
743 
744     pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
745 }
746 
747 /* Called from main context */
pa_sink_unlink(pa_sink * s)748 void pa_sink_unlink(pa_sink* s) {
749     bool linked;
750     pa_sink_input *i, PA_UNUSED *j = NULL;
751 
752     pa_sink_assert_ref(s);
753     pa_assert_ctl_context();
754 
755     /* Please note that pa_sink_unlink() does more than simply
756      * reversing pa_sink_put(). It also undoes the registrations
757      * already done in pa_sink_new()! */
758 
759     if (s->unlink_requested)
760         return;
761 
762     s->unlink_requested = true;
763 
764     linked = PA_SINK_IS_LINKED(s->state);
765 
766     if (linked)
767         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
768 
769     if (s->state != PA_SINK_UNLINKED)
770         pa_namereg_unregister(s->core, s->name);
771     pa_idxset_remove_by_data(s->core->sinks, s, NULL);
772 
773     pa_core_update_default_sink(s->core);
774 
775     if (linked && s->core->rescue_streams)
776 	pa_sink_move_streams_to_default_sink(s->core, s, false);
777 
778     if (s->card)
779         pa_idxset_remove_by_data(s->card->sinks, s, NULL);
780 
781     while ((i = pa_idxset_first(s->inputs, NULL))) {
782         pa_assert(i != j);
783         pa_sink_input_kill(i);
784         j = i;
785     }
786 
787     if (linked)
788         /* It's important to keep the suspend cause unchanged when unlinking,
789          * because if we remove the SESSION suspend cause here, the alsa sink
790          * will sync its volume with the hardware while another user is
791          * active, messing up the volume for that other user. */
792         sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
793     else
794         s->state = PA_SINK_UNLINKED;
795 
796     reset_callbacks(s);
797 
798     if (s->monitor_source)
799         pa_source_unlink(s->monitor_source);
800 
801     if (linked) {
802         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
803         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
804     }
805 }
806 
807 /* Called from main context */
sink_free(pa_object * o)808 static void sink_free(pa_object *o) {
809     pa_sink *s = PA_SINK(o);
810 
811     pa_assert(s);
812     pa_assert_ctl_context();
813     pa_assert(pa_sink_refcnt(s) == 0);
814     pa_assert(!PA_SINK_IS_LINKED(s->state));
815 
816     pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
817 
818     pa_sink_volume_change_flush(s);
819 
820     if (s->monitor_source) {
821         pa_source_unref(s->monitor_source);
822         s->monitor_source = NULL;
823     }
824 
825     pa_idxset_free(s->inputs, NULL);
826     pa_hashmap_free(s->thread_info.inputs);
827 
828     if (s->silence.memblock)
829         pa_memblock_unref(s->silence.memblock);
830 
831     pa_xfree(s->name);
832     pa_xfree(s->driver);
833 
834     if (s->proplist)
835         pa_proplist_free(s->proplist);
836 
837     if (s->ports)
838         pa_hashmap_free(s->ports);
839 
840     pa_xfree(s);
841 }
842 
843 /* Called from main context, and not while the IO thread is active, please */
pa_sink_set_asyncmsgq(pa_sink * s,pa_asyncmsgq * q)844 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
845     pa_sink_assert_ref(s);
846     pa_assert_ctl_context();
847 
848     s->asyncmsgq = q;
849 
850     if (s->monitor_source)
851         pa_source_set_asyncmsgq(s->monitor_source, q);
852 }
853 
854 /* Called from main context, and not while the IO thread is active, please */
pa_sink_update_flags(pa_sink * s,pa_sink_flags_t mask,pa_sink_flags_t value)855 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
856     pa_sink_flags_t old_flags;
857     pa_sink_input *input;
858     uint32_t idx;
859 
860     pa_sink_assert_ref(s);
861     pa_assert_ctl_context();
862 
863     /* For now, allow only a minimal set of flags to be changed. */
864     pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
865 
866     old_flags = s->flags;
867     s->flags = (s->flags & ~mask) | (value & mask);
868 
869     if (s->flags == old_flags)
870         return;
871 
872     if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
873         pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
874 
875     if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
876         pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
877                      s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
878 
879     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
880     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
881 
882     if (s->monitor_source)
883         pa_source_update_flags(s->monitor_source,
884                                ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
885                                ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
886                                ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
887                                ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
888 
889     PA_IDXSET_FOREACH(input, s->inputs, idx) {
890         if (input->origin_sink)
891             pa_sink_update_flags(input->origin_sink, mask, value);
892     }
893 }
894 
895 /* Called from IO context, or before _put() from main context */
pa_sink_set_rtpoll(pa_sink * s,pa_rtpoll * p)896 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
897     pa_sink_assert_ref(s);
898     pa_sink_assert_io_context(s);
899 
900     s->thread_info.rtpoll = p;
901 
902     if (s->monitor_source)
903         pa_source_set_rtpoll(s->monitor_source, p);
904 }
905 
906 /* Called from main context */
pa_sink_update_status(pa_sink * s)907 int pa_sink_update_status(pa_sink*s) {
908     pa_sink_assert_ref(s);
909     pa_assert_ctl_context();
910     pa_assert(PA_SINK_IS_LINKED(s->state));
911 
912     if (s->state == PA_SINK_SUSPENDED)
913         return 0;
914 
915     return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
916 }
917 
918 /* Called from main context */
pa_sink_suspend(pa_sink * s,bool suspend,pa_suspend_cause_t cause)919 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
920     pa_suspend_cause_t merged_cause;
921 
922     pa_sink_assert_ref(s);
923     pa_assert_ctl_context();
924     pa_assert(PA_SINK_IS_LINKED(s->state));
925     pa_assert(cause != 0);
926 
927     if (suspend)
928         merged_cause = s->suspend_cause | cause;
929     else
930         merged_cause = s->suspend_cause & ~cause;
931 
932     if (merged_cause)
933         return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
934     else
935         return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
936 }
937 
938 /* Called from main context */
pa_sink_move_all_start(pa_sink * s,pa_queue * q)939 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
940     pa_sink_input *i, *n;
941     uint32_t idx;
942 
943     pa_sink_assert_ref(s);
944     pa_assert_ctl_context();
945     pa_assert(PA_SINK_IS_LINKED(s->state));
946 
947     if (!q)
948         q = pa_queue_new();
949 
950     for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
951         n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
952 
953         pa_sink_input_ref(i);
954 
955         if (pa_sink_input_start_move(i) >= 0)
956             pa_queue_push(q, i);
957         else
958             pa_sink_input_unref(i);
959     }
960 
961     return q;
962 }
963 
964 /* Called from main context */
pa_sink_move_all_finish(pa_sink * s,pa_queue * q,bool save)965 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
966     pa_sink_input *i;
967 
968     pa_sink_assert_ref(s);
969     pa_assert_ctl_context();
970     pa_assert(PA_SINK_IS_LINKED(s->state));
971     pa_assert(q);
972 
973     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
974         if (PA_SINK_INPUT_IS_LINKED(i->state)) {
975             if (pa_sink_input_finish_move(i, s, save) < 0)
976                 pa_sink_input_fail_move(i);
977 
978         }
979         pa_sink_input_unref(i);
980     }
981 
982     pa_queue_free(q, NULL);
983 }
984 
985 /* Called from main context */
pa_sink_move_all_fail(pa_queue * q)986 void pa_sink_move_all_fail(pa_queue *q) {
987     pa_sink_input *i;
988 
989     pa_assert_ctl_context();
990     pa_assert(q);
991 
992     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
993         pa_sink_input_fail_move(i);
994         pa_sink_input_unref(i);
995     }
996 
997     pa_queue_free(q, NULL);
998 }
999 
1000  /* Called from IO thread context */
pa_sink_process_input_underruns(pa_sink * s,size_t left_to_play)1001 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1002     pa_sink_input *i;
1003     void *state = NULL;
1004     size_t result = 0;
1005 
1006     pa_sink_assert_ref(s);
1007     pa_sink_assert_io_context(s);
1008 
1009     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1010         size_t uf = i->thread_info.underrun_for_sink;
1011 
1012         /* Propagate down the filter tree */
1013         if (i->origin_sink) {
1014             size_t filter_result, left_to_play_origin;
1015 
1016             /* The recursive call works in the origin sink domain ... */
1017             left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1018 
1019             /* .. and returns the time to sleep before waking up. We need the
1020              * underrun duration for comparisons, so we undo the subtraction on
1021              * the return value... */
1022             filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1023 
1024             /* ... and convert it back to the master sink domain */
1025             filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1026 
1027             /* Remember the longest underrun so far */
1028             if (filter_result > result)
1029                 result = filter_result;
1030         }
1031 
1032         if (uf == 0) {
1033             /* No underrun here, move on */
1034             continue;
1035         } else if (uf >= left_to_play) {
1036             /* The sink has possibly consumed all the data the sink input provided */
1037             pa_sink_input_process_underrun(i);
1038         } else if (uf > result) {
1039             /* Remember the longest underrun so far */
1040             result = uf;
1041         }
1042     }
1043 
1044     if (result > 0)
1045         pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1046                 (long) result, (long) left_to_play - result);
1047     return left_to_play - result;
1048 }
1049 
1050 /* Called from IO thread context */
pa_sink_process_rewind(pa_sink * s,size_t nbytes)1051 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1052     pa_sink_input *i;
1053     void *state = NULL;
1054 
1055     pa_sink_assert_ref(s);
1056     pa_sink_assert_io_context(s);
1057     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1058 
1059     /* If nobody requested this and this is actually no real rewind
1060      * then we can short cut this. Please note that this means that
1061      * not all rewind requests triggered upstream will always be
1062      * translated in actual requests! */
1063     if (!s->thread_info.rewind_requested && nbytes <= 0)
1064         return;
1065 
1066     s->thread_info.rewind_nbytes = 0;
1067     s->thread_info.rewind_requested = false;
1068 
1069     if (nbytes > 0) {
1070         pa_log_debug("Processing rewind...");
1071         if (s->flags & PA_SINK_DEFERRED_VOLUME)
1072             pa_sink_volume_change_rewind(s, nbytes);
1073     }
1074 
1075     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1076         pa_sink_input_assert_ref(i);
1077         pa_sink_input_process_rewind(i, nbytes);
1078     }
1079 
1080     if (nbytes > 0) {
1081         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1082             pa_source_process_rewind(s->monitor_source, nbytes);
1083     }
1084 }
1085 
1086 /* Called from IO thread context */
fill_mix_info(pa_sink * s,size_t * length,pa_mix_info * info,unsigned maxinfo)1087 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1088     pa_sink_input *i;
1089     unsigned n = 0;
1090     void *state = NULL;
1091     size_t mixlength = *length;
1092 
1093     pa_sink_assert_ref(s);
1094     pa_sink_assert_io_context(s);
1095     pa_assert(info);
1096 
1097     while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1098         pa_sink_input_assert_ref(i);
1099 
1100         pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1101 
1102         if (mixlength == 0 || info->chunk.length < mixlength)
1103             mixlength = info->chunk.length;
1104 
1105         if (pa_memblock_is_silence(info->chunk.memblock)) {
1106             pa_memblock_unref(info->chunk.memblock);
1107             continue;
1108         }
1109 
1110         info->userdata = pa_sink_input_ref(i);
1111 
1112         pa_assert(info->chunk.memblock);
1113         pa_assert(info->chunk.length > 0);
1114 
1115         info++;
1116         n++;
1117         maxinfo--;
1118     }
1119 
1120     if (mixlength > 0)
1121         *length = mixlength;
1122 
1123     return n;
1124 }
1125 
1126 /* Called from IO thread context */
inputs_drop(pa_sink * s,pa_mix_info * info,unsigned n,pa_memchunk * result)1127 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1128     pa_sink_input *i;
1129     void *state;
1130     unsigned p = 0;
1131     unsigned n_unreffed = 0;
1132 
1133     pa_sink_assert_ref(s);
1134     pa_sink_assert_io_context(s);
1135     pa_assert(result);
1136     pa_assert(result->memblock);
1137     pa_assert(result->length > 0);
1138 
1139     /* We optimize for the case where the order of the inputs has not changed */
1140 
1141     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1142         unsigned j;
1143         pa_mix_info* m = NULL;
1144 
1145         pa_sink_input_assert_ref(i);
1146 
1147         /* Let's try to find the matching entry info the pa_mix_info array */
1148         for (j = 0; j < n; j ++) {
1149 
1150             if (info[p].userdata == i) {
1151                 m = info + p;
1152                 break;
1153             }
1154 
1155             p++;
1156             if (p >= n)
1157                 p = 0;
1158         }
1159 
1160         /* Drop read data */
1161         pa_sink_input_drop(i, result->length);
1162 
1163         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1164 
1165             if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1166                 void *ostate = NULL;
1167                 pa_source_output *o;
1168                 pa_memchunk c;
1169 
1170                 if (m && m->chunk.memblock) {
1171                     c = m->chunk;
1172                     pa_memblock_ref(c.memblock);
1173                     pa_assert(result->length <= c.length);
1174                     c.length = result->length;
1175 
1176                     pa_memchunk_make_writable(&c, 0);
1177                     pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1178                 } else {
1179                     c = s->silence;
1180                     pa_memblock_ref(c.memblock);
1181                     pa_assert(result->length <= c.length);
1182                     c.length = result->length;
1183                 }
1184 
1185                 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1186                     pa_source_output_assert_ref(o);
1187                     pa_assert(o->direct_on_input == i);
1188                     pa_source_post_direct(s->monitor_source, o, &c);
1189                 }
1190 
1191                 pa_memblock_unref(c.memblock);
1192             }
1193         }
1194 
1195         if (m) {
1196             if (m->chunk.memblock) {
1197                 pa_memblock_unref(m->chunk.memblock);
1198                 pa_memchunk_reset(&m->chunk);
1199             }
1200 
1201             pa_sink_input_unref(m->userdata);
1202             m->userdata = NULL;
1203 
1204             n_unreffed += 1;
1205         }
1206     }
1207 
1208     /* Now drop references to entries that are included in the
1209      * pa_mix_info array but don't exist anymore */
1210 
1211     if (n_unreffed < n) {
1212         for (; n > 0; info++, n--) {
1213             if (info->userdata)
1214                 pa_sink_input_unref(info->userdata);
1215             if (info->chunk.memblock)
1216                 pa_memblock_unref(info->chunk.memblock);
1217         }
1218     }
1219 
1220     if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1221         pa_source_post(s->monitor_source, result);
1222 }
1223 
1224 /* Called from IO thread context */
pa_sink_render(pa_sink * s,size_t length,pa_memchunk * result)1225 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1226     pa_mix_info info[MAX_MIX_CHANNELS];
1227     unsigned n;
1228     size_t block_size_max;
1229 
1230     pa_sink_assert_ref(s);
1231     pa_sink_assert_io_context(s);
1232     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1233     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1234     pa_assert(result);
1235 
1236     pa_assert(!s->thread_info.rewind_requested);
1237     pa_assert(s->thread_info.rewind_nbytes == 0);
1238 
1239     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1240         result->memblock = pa_memblock_ref(s->silence.memblock);
1241         result->index = s->silence.index;
1242         result->length = PA_MIN(s->silence.length, length);
1243         return;
1244     }
1245 
1246     pa_sink_ref(s);
1247 
1248     if (length <= 0)
1249         length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1250 
1251     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1252     if (length > block_size_max)
1253         length = pa_frame_align(block_size_max, &s->sample_spec);
1254 
1255     pa_assert(length > 0);
1256 
1257     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1258 
1259     if (n == 0) {
1260 
1261         *result = s->silence;
1262         pa_memblock_ref(result->memblock);
1263 
1264         if (result->length > length)
1265             result->length = length;
1266 
1267     } else if (n == 1) {
1268         pa_cvolume volume;
1269 
1270         *result = info[0].chunk;
1271         pa_memblock_ref(result->memblock);
1272 
1273         if (result->length > length)
1274             result->length = length;
1275 
1276         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1277 
1278         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1279             pa_memblock_unref(result->memblock);
1280             pa_silence_memchunk_get(&s->core->silence_cache,
1281                                     s->core->mempool,
1282                                     result,
1283                                     &s->sample_spec,
1284                                     result->length);
1285         } else if (!pa_cvolume_is_norm(&volume)) {
1286             pa_memchunk_make_writable(result, 0);
1287             pa_volume_memchunk(result, &s->sample_spec, &volume);
1288         }
1289     } else {
1290         void *ptr;
1291         result->memblock = pa_memblock_new(s->core->mempool, length);
1292 
1293         ptr = pa_memblock_acquire(result->memblock);
1294         result->length = pa_mix(info, n,
1295                                 ptr, length,
1296                                 &s->sample_spec,
1297                                 &s->thread_info.soft_volume,
1298                                 s->thread_info.soft_muted);
1299         pa_memblock_release(result->memblock);
1300 
1301         result->index = 0;
1302     }
1303 
1304     inputs_drop(s, info, n, result);
1305 
1306     pa_sink_unref(s);
1307 }
1308 
1309 /* Called from IO thread context */
pa_sink_render_into(pa_sink * s,pa_memchunk * target)1310 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1311     pa_mix_info info[MAX_MIX_CHANNELS];
1312     unsigned n;
1313     size_t length, block_size_max;
1314 
1315     pa_sink_assert_ref(s);
1316     pa_sink_assert_io_context(s);
1317     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1318     pa_assert(target);
1319     pa_assert(target->memblock);
1320     pa_assert(target->length > 0);
1321     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1322 
1323     pa_assert(!s->thread_info.rewind_requested);
1324     pa_assert(s->thread_info.rewind_nbytes == 0);
1325 
1326     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1327         pa_silence_memchunk(target, &s->sample_spec);
1328         return;
1329     }
1330 
1331     pa_sink_ref(s);
1332 
1333     length = target->length;
1334     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1335     if (length > block_size_max)
1336         length = pa_frame_align(block_size_max, &s->sample_spec);
1337 
1338     pa_assert(length > 0);
1339 
1340     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1341 
1342     if (n == 0) {
1343         if (target->length > length)
1344             target->length = length;
1345 
1346         pa_silence_memchunk(target, &s->sample_spec);
1347     } else if (n == 1) {
1348         pa_cvolume volume;
1349 
1350         if (target->length > length)
1351             target->length = length;
1352 
1353         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1354 
1355         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1356             pa_silence_memchunk(target, &s->sample_spec);
1357         else {
1358             pa_memchunk vchunk;
1359 
1360             vchunk = info[0].chunk;
1361             pa_memblock_ref(vchunk.memblock);
1362 
1363             if (vchunk.length > length)
1364                 vchunk.length = length;
1365 
1366             if (!pa_cvolume_is_norm(&volume)) {
1367                 pa_memchunk_make_writable(&vchunk, 0);
1368                 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1369             }
1370 
1371             pa_memchunk_memcpy(target, &vchunk);
1372             pa_memblock_unref(vchunk.memblock);
1373         }
1374 
1375     } else {
1376         void *ptr;
1377 
1378         ptr = pa_memblock_acquire(target->memblock);
1379 
1380         target->length = pa_mix(info, n,
1381                                 (uint8_t*) ptr + target->index, length,
1382                                 &s->sample_spec,
1383                                 &s->thread_info.soft_volume,
1384                                 s->thread_info.soft_muted);
1385 
1386         pa_memblock_release(target->memblock);
1387     }
1388 
1389     inputs_drop(s, info, n, target);
1390 
1391     pa_sink_unref(s);
1392 }
1393 
1394 /* Called from IO thread context */
pa_sink_render_into_full(pa_sink * s,pa_memchunk * target)1395 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1396     pa_memchunk chunk;
1397     size_t l, d;
1398 
1399     pa_sink_assert_ref(s);
1400     pa_sink_assert_io_context(s);
1401     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1402     pa_assert(target);
1403     pa_assert(target->memblock);
1404     pa_assert(target->length > 0);
1405     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1406 
1407     pa_assert(!s->thread_info.rewind_requested);
1408     pa_assert(s->thread_info.rewind_nbytes == 0);
1409 
1410     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1411         pa_silence_memchunk(target, &s->sample_spec);
1412         return;
1413     }
1414 
1415     pa_sink_ref(s);
1416 
1417     l = target->length;
1418     d = 0;
1419     while (l > 0) {
1420         chunk = *target;
1421         chunk.index += d;
1422         chunk.length -= d;
1423 
1424         pa_sink_render_into(s, &chunk);
1425 
1426         d += chunk.length;
1427         l -= chunk.length;
1428     }
1429 
1430     pa_sink_unref(s);
1431 }
1432 
1433 /* Called from IO thread context */
pa_sink_render_full(pa_sink * s,size_t length,pa_memchunk * result)1434 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1435     pa_sink_assert_ref(s);
1436     pa_sink_assert_io_context(s);
1437     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1438     pa_assert(length > 0);
1439     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1440     pa_assert(result);
1441 
1442     pa_assert(!s->thread_info.rewind_requested);
1443     pa_assert(s->thread_info.rewind_nbytes == 0);
1444 
1445     pa_sink_ref(s);
1446 
1447     pa_sink_render(s, length, result);
1448 
1449     if (result->length < length) {
1450         pa_memchunk chunk;
1451 
1452         pa_memchunk_make_writable(result, length);
1453 
1454         chunk.memblock = result->memblock;
1455         chunk.index = result->index + result->length;
1456         chunk.length = length - result->length;
1457 
1458         pa_sink_render_into_full(s, &chunk);
1459 
1460         result->length = length;
1461     }
1462 
1463     pa_sink_unref(s);
1464 }
1465 
1466 /* Called from main thread */
pa_sink_reconfigure(pa_sink * s,pa_sample_spec * spec,bool passthrough)1467 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1468     pa_sample_spec desired_spec;
1469     uint32_t default_rate = s->default_sample_rate;
1470     uint32_t alternate_rate = s->alternate_sample_rate;
1471     uint32_t idx;
1472     pa_sink_input *i;
1473     bool default_rate_is_usable = false;
1474     bool alternate_rate_is_usable = false;
1475     bool avoid_resampling = s->avoid_resampling;
1476 
1477     if (pa_sample_spec_equal(spec, &s->sample_spec))
1478         return;
1479 
1480     if (!s->reconfigure)
1481         return;
1482 
1483     if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1484         pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1485         return;
1486     }
1487 
1488     if (PA_SINK_IS_RUNNING(s->state)) {
1489         pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1490                     pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1491         return;
1492     }
1493 
1494     if (s->monitor_source) {
1495         if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1496             pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1497             return;
1498         }
1499     }
1500 
1501     if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1502         return;
1503 
1504     desired_spec = s->sample_spec;
1505 
1506     if (passthrough) {
1507         /* We have to try to use the sink input format and rate */
1508         desired_spec.format = spec->format;
1509         desired_spec.rate = spec->rate;
1510 
1511     } else if (avoid_resampling) {
1512         /* We just try to set the sink input's sample rate if it's not too low */
1513         if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1514             desired_spec.rate = spec->rate;
1515         desired_spec.format = spec->format;
1516 
1517     } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1518         /* We can directly try to use this rate */
1519         desired_spec.rate = spec->rate;
1520 
1521     }
1522 
1523     if (desired_spec.rate != spec->rate) {
1524         /* See if we can pick a rate that results in less resampling effort */
1525         if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1526             default_rate_is_usable = true;
1527         if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1528             default_rate_is_usable = true;
1529         if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1530             alternate_rate_is_usable = true;
1531         if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1532             alternate_rate_is_usable = true;
1533 
1534         if (alternate_rate_is_usable && !default_rate_is_usable)
1535             desired_spec.rate = alternate_rate;
1536         else
1537             desired_spec.rate = default_rate;
1538     }
1539 
1540     if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1541         return;
1542 
1543     if (!passthrough && pa_sink_used_by(s) > 0)
1544         return;
1545 
1546     pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1547                  s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1548     pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1549 
1550     s->reconfigure(s, &desired_spec, passthrough);
1551 
1552     /* update monitor source as well */
1553     if (s->monitor_source && !passthrough)
1554         pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1555     pa_log_info("Reconfigured successfully");
1556 
1557     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1558         if (i->state == PA_SINK_INPUT_CORKED)
1559             pa_sink_input_update_resampler(i);
1560     }
1561 
1562     pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1563 }
1564 
1565 /* Called from main thread */
pa_sink_get_latency(pa_sink * s)1566 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1567     int64_t usec = 0;
1568 
1569     pa_sink_assert_ref(s);
1570     pa_assert_ctl_context();
1571     pa_assert(PA_SINK_IS_LINKED(s->state));
1572 
1573     /* The returned value is supposed to be in the time domain of the sound card! */
1574 
1575     if (s->state == PA_SINK_SUSPENDED)
1576         return 0;
1577 
1578     if (!(s->flags & PA_SINK_LATENCY))
1579         return 0;
1580 
1581     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1582 
1583     /* the return value is unsigned, so check that the offset can be added to usec without
1584      * underflowing. */
1585     if (-s->port_latency_offset <= usec)
1586         usec += s->port_latency_offset;
1587     else
1588         usec = 0;
1589 
1590     return (pa_usec_t)usec;
1591 }
1592 
1593 /* Called from IO thread */
pa_sink_get_latency_within_thread(pa_sink * s,bool allow_negative)1594 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1595     int64_t usec = 0;
1596     pa_msgobject *o;
1597 
1598     pa_sink_assert_ref(s);
1599     pa_sink_assert_io_context(s);
1600     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1601 
1602     /* The returned value is supposed to be in the time domain of the sound card! */
1603 
1604     if (s->thread_info.state == PA_SINK_SUSPENDED)
1605         return 0;
1606 
1607     if (!(s->flags & PA_SINK_LATENCY))
1608         return 0;
1609 
1610     o = PA_MSGOBJECT(s);
1611 
1612     /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1613 
1614     o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1615 
1616     /* If allow_negative is false, the call should only return positive values, */
1617     usec += s->thread_info.port_latency_offset;
1618     if (!allow_negative && usec < 0)
1619         usec = 0;
1620 
1621     return usec;
1622 }
1623 
1624 /* Called from the main thread (and also from the IO thread while the main
1625  * thread is waiting).
1626  *
1627  * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1628  * set. Instead, flat volume mode is detected by checking whether the root sink
1629  * has the flag set. */
pa_sink_flat_volume_enabled(pa_sink * s)1630 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1631     pa_sink_assert_ref(s);
1632 
1633     s = pa_sink_get_master(s);
1634 
1635     if (PA_LIKELY(s))
1636         return (s->flags & PA_SINK_FLAT_VOLUME);
1637     else
1638         return false;
1639 }
1640 
1641 /* Called from the main thread (and also from the IO thread while the main
1642  * thread is waiting). */
pa_sink_get_master(pa_sink * s)1643 pa_sink *pa_sink_get_master(pa_sink *s) {
1644     pa_sink_assert_ref(s);
1645 
1646     while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1647         if (PA_UNLIKELY(!s->input_to_master))
1648             return NULL;
1649 
1650         s = s->input_to_master->sink;
1651     }
1652 
1653     return s;
1654 }
1655 
1656 /* Called from main context */
pa_sink_is_filter(pa_sink * s)1657 bool pa_sink_is_filter(pa_sink *s) {
1658     pa_sink_assert_ref(s);
1659 
1660     return (s->input_to_master != NULL);
1661 }
1662 
1663 /* Called from main context */
pa_sink_is_passthrough(pa_sink * s)1664 bool pa_sink_is_passthrough(pa_sink *s) {
1665     pa_sink_input *alt_i;
1666     uint32_t idx;
1667 
1668     pa_sink_assert_ref(s);
1669 
1670     /* one and only one PASSTHROUGH input can possibly be connected */
1671     if (pa_idxset_size(s->inputs) == 1) {
1672         alt_i = pa_idxset_first(s->inputs, &idx);
1673 
1674         if (pa_sink_input_is_passthrough(alt_i))
1675             return true;
1676     }
1677 
1678     return false;
1679 }
1680 
1681 /* Called from main context */
pa_sink_enter_passthrough(pa_sink * s)1682 void pa_sink_enter_passthrough(pa_sink *s) {
1683     pa_cvolume volume;
1684 
1685     /* The sink implementation is reconfigured for passthrough in
1686      * pa_sink_reconfigure(). This function sets the PA core objects to
1687      * passthrough mode. */
1688 
1689     /* disable the monitor in passthrough mode */
1690     if (s->monitor_source) {
1691         pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1692         pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1693     }
1694 
1695     /* set the volume to NORM */
1696     s->saved_volume = *pa_sink_get_volume(s, true);
1697     s->saved_save_volume = s->save_volume;
1698 
1699     pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1700     pa_sink_set_volume(s, &volume, true, false);
1701 
1702     pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1703 }
1704 
1705 /* Called from main context */
pa_sink_leave_passthrough(pa_sink * s)1706 void pa_sink_leave_passthrough(pa_sink *s) {
1707     /* Unsuspend monitor */
1708     if (s->monitor_source) {
1709         pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1710         pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1711     }
1712 
1713     /* Restore sink volume to what it was before we entered passthrough mode */
1714     pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1715 
1716     pa_cvolume_init(&s->saved_volume);
1717     s->saved_save_volume = false;
1718 
1719 }
1720 
1721 /* Called from main context. */
compute_reference_ratio(pa_sink_input * i)1722 static void compute_reference_ratio(pa_sink_input *i) {
1723     unsigned c = 0;
1724     pa_cvolume remapped;
1725     pa_cvolume ratio;
1726 
1727     pa_assert(i);
1728     pa_assert(pa_sink_flat_volume_enabled(i->sink));
1729 
1730     /*
1731      * Calculates the reference ratio from the sink's reference
1732      * volume. This basically calculates:
1733      *
1734      * i->reference_ratio = i->volume / i->sink->reference_volume
1735      */
1736 
1737     remapped = i->sink->reference_volume;
1738     pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1739 
1740     ratio = i->reference_ratio;
1741 
1742     for (c = 0; c < i->sample_spec.channels; c++) {
1743 
1744         /* We don't update when the sink volume is 0 anyway */
1745         if (remapped.values[c] <= PA_VOLUME_MUTED)
1746             continue;
1747 
1748         /* Don't update the reference ratio unless necessary */
1749         if (pa_sw_volume_multiply(
1750                     ratio.values[c],
1751                     remapped.values[c]) == i->volume.values[c])
1752             continue;
1753 
1754         ratio.values[c] = pa_sw_volume_divide(
1755                 i->volume.values[c],
1756                 remapped.values[c]);
1757     }
1758 
1759     pa_sink_input_set_reference_ratio(i, &ratio);
1760 }
1761 
1762 /* Called from main context. Only called for the root sink in volume sharing
1763  * cases, except for internal recursive calls. */
compute_reference_ratios(pa_sink * s)1764 static void compute_reference_ratios(pa_sink *s) {
1765     uint32_t idx;
1766     pa_sink_input *i;
1767 
1768     pa_sink_assert_ref(s);
1769     pa_assert_ctl_context();
1770     pa_assert(PA_SINK_IS_LINKED(s->state));
1771     pa_assert(pa_sink_flat_volume_enabled(s));
1772 
1773     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1774         compute_reference_ratio(i);
1775 
1776         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1777                 && PA_SINK_IS_LINKED(i->origin_sink->state))
1778             compute_reference_ratios(i->origin_sink);
1779     }
1780 }
1781 
1782 /* Called from main context. Only called for the root sink in volume sharing
1783  * cases, except for internal recursive calls. */
compute_real_ratios(pa_sink * s)1784 static void compute_real_ratios(pa_sink *s) {
1785     pa_sink_input *i;
1786     uint32_t idx;
1787 
1788     pa_sink_assert_ref(s);
1789     pa_assert_ctl_context();
1790     pa_assert(PA_SINK_IS_LINKED(s->state));
1791     pa_assert(pa_sink_flat_volume_enabled(s));
1792 
1793     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1794         unsigned c;
1795         pa_cvolume remapped;
1796 
1797         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1798             /* The origin sink uses volume sharing, so this input's real ratio
1799              * is handled as a special case - the real ratio must be 0 dB, and
1800              * as a result i->soft_volume must equal i->volume_factor. */
1801             pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1802             i->soft_volume = i->volume_factor;
1803 
1804             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1805                 compute_real_ratios(i->origin_sink);
1806 
1807             continue;
1808         }
1809 
1810         /*
1811          * This basically calculates:
1812          *
1813          * i->real_ratio := i->volume / s->real_volume
1814          * i->soft_volume := i->real_ratio * i->volume_factor
1815          */
1816 
1817         remapped = s->real_volume;
1818         pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1819 
1820         i->real_ratio.channels = i->sample_spec.channels;
1821         i->soft_volume.channels = i->sample_spec.channels;
1822 
1823         for (c = 0; c < i->sample_spec.channels; c++) {
1824 
1825             if (remapped.values[c] <= PA_VOLUME_MUTED) {
1826                 /* We leave i->real_ratio untouched */
1827                 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1828                 continue;
1829             }
1830 
1831             /* Don't lose accuracy unless necessary */
1832             if (pa_sw_volume_multiply(
1833                         i->real_ratio.values[c],
1834                         remapped.values[c]) != i->volume.values[c])
1835 
1836                 i->real_ratio.values[c] = pa_sw_volume_divide(
1837                         i->volume.values[c],
1838                         remapped.values[c]);
1839 
1840             i->soft_volume.values[c] = pa_sw_volume_multiply(
1841                     i->real_ratio.values[c],
1842                     i->volume_factor.values[c]);
1843         }
1844 
1845         /* We don't copy the soft_volume to the thread_info data
1846          * here. That must be done by the caller */
1847     }
1848 }
1849 
cvolume_remap_minimal_impact(pa_cvolume * v,const pa_cvolume * template,const pa_channel_map * from,const pa_channel_map * to)1850 static pa_cvolume *cvolume_remap_minimal_impact(
1851         pa_cvolume *v,
1852         const pa_cvolume *template,
1853         const pa_channel_map *from,
1854         const pa_channel_map *to) {
1855 
1856     pa_cvolume t;
1857 
1858     pa_assert(v);
1859     pa_assert(template);
1860     pa_assert(from);
1861     pa_assert(to);
1862     pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1863     pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1864 
1865     /* Much like pa_cvolume_remap(), but tries to minimize impact when
1866      * mapping from sink input to sink volumes:
1867      *
1868      * If template is a possible remapping from v it is used instead
1869      * of remapping anew.
1870      *
1871      * If the channel maps don't match we set an all-channel volume on
1872      * the sink to ensure that changing a volume on one stream has no
1873      * effect that cannot be compensated for in another stream that
1874      * does not have the same channel map as the sink. */
1875 
1876     if (pa_channel_map_equal(from, to))
1877         return v;
1878 
1879     t = *template;
1880     if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1881         *v = *template;
1882         return v;
1883     }
1884 
1885     pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1886     return v;
1887 }
1888 
1889 /* Called from main thread. Only called for the root sink in volume sharing
1890  * cases, except for internal recursive calls. */
get_maximum_input_volume(pa_sink * s,pa_cvolume * max_volume,const pa_channel_map * channel_map)1891 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1892     pa_sink_input *i;
1893     uint32_t idx;
1894 
1895     pa_sink_assert_ref(s);
1896     pa_assert(max_volume);
1897     pa_assert(channel_map);
1898     pa_assert(pa_sink_flat_volume_enabled(s));
1899 
1900     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1901         pa_cvolume remapped;
1902 
1903         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1904             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1905                 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1906 
1907             /* Ignore this input. The origin sink uses volume sharing, so this
1908              * input's volume will be set to be equal to the root sink's real
1909              * volume. Obviously this input's current volume must not then
1910              * affect what the root sink's real volume will be. */
1911             continue;
1912         }
1913 
1914         remapped = i->volume;
1915         cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1916         pa_cvolume_merge(max_volume, max_volume, &remapped);
1917     }
1918 }
1919 
1920 /* Called from main thread. Only called for the root sink in volume sharing
1921  * cases, except for internal recursive calls. */
has_inputs(pa_sink * s)1922 static bool has_inputs(pa_sink *s) {
1923     pa_sink_input *i;
1924     uint32_t idx;
1925 
1926     pa_sink_assert_ref(s);
1927 
1928     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1929         if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1930             return true;
1931     }
1932 
1933     return false;
1934 }
1935 
1936 /* Called from main thread. Only called for the root sink in volume sharing
1937  * cases, except for internal recursive calls. */
update_real_volume(pa_sink * s,const pa_cvolume * new_volume,pa_channel_map * channel_map)1938 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1939     pa_sink_input *i;
1940     uint32_t idx;
1941 
1942     pa_sink_assert_ref(s);
1943     pa_assert(new_volume);
1944     pa_assert(channel_map);
1945 
1946     s->real_volume = *new_volume;
1947     pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1948 
1949     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1950         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1951             if (pa_sink_flat_volume_enabled(s)) {
1952                 pa_cvolume new_input_volume;
1953 
1954                 /* Follow the root sink's real volume. */
1955                 new_input_volume = *new_volume;
1956                 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
1957                 pa_sink_input_set_volume_direct(i, &new_input_volume);
1958                 compute_reference_ratio(i);
1959             }
1960 
1961             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1962                 update_real_volume(i->origin_sink, new_volume, channel_map);
1963         }
1964     }
1965 }
1966 
1967 /* Called from main thread. Only called for the root sink in shared volume
1968  * cases. */
compute_real_volume(pa_sink * s)1969 static void compute_real_volume(pa_sink *s) {
1970     pa_sink_assert_ref(s);
1971     pa_assert_ctl_context();
1972     pa_assert(PA_SINK_IS_LINKED(s->state));
1973     pa_assert(pa_sink_flat_volume_enabled(s));
1974     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1975 
1976     /* This determines the maximum volume of all streams and sets
1977      * s->real_volume accordingly. */
1978 
1979     if (!has_inputs(s)) {
1980         /* In the special case that we have no sink inputs we leave the
1981          * volume unmodified. */
1982         update_real_volume(s, &s->reference_volume, &s->channel_map);
1983         return;
1984     }
1985 
1986     pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1987 
1988     /* First let's determine the new maximum volume of all inputs
1989      * connected to this sink */
1990     get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1991     update_real_volume(s, &s->real_volume, &s->channel_map);
1992 
1993     /* Then, let's update the real ratios/soft volumes of all inputs
1994      * connected to this sink */
1995     compute_real_ratios(s);
1996 }
1997 
1998 /* Called from main thread. Only called for the root sink in shared volume
1999  * cases, except for internal recursive calls. */
propagate_reference_volume(pa_sink * s)2000 static void propagate_reference_volume(pa_sink *s) {
2001     pa_sink_input *i;
2002     uint32_t idx;
2003 
2004     pa_sink_assert_ref(s);
2005     pa_assert_ctl_context();
2006     pa_assert(PA_SINK_IS_LINKED(s->state));
2007     pa_assert(pa_sink_flat_volume_enabled(s));
2008 
2009     /* This is called whenever the sink volume changes that is not
2010      * caused by a sink input volume change. We need to fix up the
2011      * sink input volumes accordingly */
2012 
2013     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2014         pa_cvolume new_volume;
2015 
2016         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2017             if (PA_SINK_IS_LINKED(i->origin_sink->state))
2018                 propagate_reference_volume(i->origin_sink);
2019 
2020             /* Since the origin sink uses volume sharing, this input's volume
2021              * needs to be updated to match the root sink's real volume, but
2022              * that will be done later in update_real_volume(). */
2023             continue;
2024         }
2025 
2026         /* This basically calculates:
2027          *
2028          * i->volume := s->reference_volume * i->reference_ratio  */
2029 
2030         new_volume = s->reference_volume;
2031         pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2032         pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2033         pa_sink_input_set_volume_direct(i, &new_volume);
2034     }
2035 }
2036 
2037 /* Called from main thread. Only called for the root sink in volume sharing
2038  * cases, except for internal recursive calls. The return value indicates
2039  * whether any reference volume actually changed. */
update_reference_volume(pa_sink * s,const pa_cvolume * v,const pa_channel_map * channel_map,bool save)2040 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2041     pa_cvolume volume;
2042     bool reference_volume_changed;
2043     pa_sink_input *i;
2044     uint32_t idx;
2045 
2046     pa_sink_assert_ref(s);
2047     pa_assert(PA_SINK_IS_LINKED(s->state));
2048     pa_assert(v);
2049     pa_assert(channel_map);
2050     pa_assert(pa_cvolume_valid(v));
2051 
2052     volume = *v;
2053     pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2054 
2055     reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2056     pa_sink_set_reference_volume_direct(s, &volume);
2057 
2058     s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2059 
2060     if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2061         /* If the root sink's volume doesn't change, then there can't be any
2062          * changes in the other sinks in the sink tree either.
2063          *
2064          * It's probably theoretically possible that even if the root sink's
2065          * volume changes slightly, some filter sink doesn't change its volume
2066          * due to rounding errors. If that happens, we still want to propagate
2067          * the changed root sink volume to the sinks connected to the
2068          * intermediate sink that didn't change its volume. This theoretical
2069          * possibility is the reason why we have that !(s->flags &
2070          * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2071          * notice even if we returned here false always if
2072          * reference_volume_changed is false. */
2073         return false;
2074 
2075     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2076         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2077                 && PA_SINK_IS_LINKED(i->origin_sink->state))
2078             update_reference_volume(i->origin_sink, v, channel_map, false);
2079     }
2080 
2081     return true;
2082 }
2083 
2084 /* Called from main thread */
pa_sink_set_volume(pa_sink * s,const pa_cvolume * volume,bool send_msg,bool save)2085 void pa_sink_set_volume(
2086         pa_sink *s,
2087         const pa_cvolume *volume,
2088         bool send_msg,
2089         bool save) {
2090 
2091     pa_cvolume new_reference_volume;
2092     pa_sink *root_sink;
2093 
2094     pa_sink_assert_ref(s);
2095     pa_assert_ctl_context();
2096     pa_assert(PA_SINK_IS_LINKED(s->state));
2097     pa_assert(!volume || pa_cvolume_valid(volume));
2098     pa_assert(volume || pa_sink_flat_volume_enabled(s));
2099     pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2100 
2101     /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2102      * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2103     if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2104         pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2105         return;
2106     }
2107 
2108     /* In case of volume sharing, the volume is set for the root sink first,
2109      * from which it's then propagated to the sharing sinks. */
2110     root_sink = pa_sink_get_master(s);
2111 
2112     if (PA_UNLIKELY(!root_sink))
2113         return;
2114 
2115     /* As a special exception we accept mono volumes on all sinks --
2116      * even on those with more complex channel maps */
2117 
2118     if (volume) {
2119         if (pa_cvolume_compatible(volume, &s->sample_spec))
2120             new_reference_volume = *volume;
2121         else {
2122             new_reference_volume = s->reference_volume;
2123             pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2124         }
2125 
2126         pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2127 
2128         if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2129             if (pa_sink_flat_volume_enabled(root_sink)) {
2130                 /* OK, propagate this volume change back to the inputs */
2131                 propagate_reference_volume(root_sink);
2132 
2133                 /* And now recalculate the real volume */
2134                 compute_real_volume(root_sink);
2135             } else
2136                 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2137         }
2138 
2139     } else {
2140         /* If volume is NULL we synchronize the sink's real and
2141          * reference volumes with the stream volumes. */
2142 
2143         pa_assert(pa_sink_flat_volume_enabled(root_sink));
2144 
2145         /* Ok, let's determine the new real volume */
2146         compute_real_volume(root_sink);
2147 
2148         /* Let's 'push' the reference volume if necessary */
2149         pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2150         /* If the sink and its root don't have the same number of channels, we need to remap */
2151         if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2152             pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2153         update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2154 
2155         /* Now that the reference volume is updated, we can update the streams'
2156          * reference ratios. */
2157         compute_reference_ratios(root_sink);
2158     }
2159 
2160     if (root_sink->set_volume) {
2161         /* If we have a function set_volume(), then we do not apply a
2162          * soft volume by default. However, set_volume() is free to
2163          * apply one to root_sink->soft_volume */
2164 
2165         pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2166         if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2167             root_sink->set_volume(root_sink);
2168 
2169     } else
2170         /* If we have no function set_volume(), then the soft volume
2171          * becomes the real volume */
2172         root_sink->soft_volume = root_sink->real_volume;
2173 
2174     /* This tells the sink that soft volume and/or real volume changed */
2175     if (send_msg)
2176         pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2177 }
2178 
2179 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2180  * Only to be called by sink implementor */
pa_sink_set_soft_volume(pa_sink * s,const pa_cvolume * volume)2181 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2182 
2183     pa_sink_assert_ref(s);
2184     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2185 
2186     if (s->flags & PA_SINK_DEFERRED_VOLUME)
2187         pa_sink_assert_io_context(s);
2188     else
2189         pa_assert_ctl_context();
2190 
2191     if (!volume)
2192         pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2193     else
2194         s->soft_volume = *volume;
2195 
2196     if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2197         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2198     else
2199         s->thread_info.soft_volume = s->soft_volume;
2200 }
2201 
2202 /* Called from the main thread. Only called for the root sink in volume sharing
2203  * cases, except for internal recursive calls. */
propagate_real_volume(pa_sink * s,const pa_cvolume * old_real_volume)2204 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2205     pa_sink_input *i;
2206     uint32_t idx;
2207 
2208     pa_sink_assert_ref(s);
2209     pa_assert(old_real_volume);
2210     pa_assert_ctl_context();
2211     pa_assert(PA_SINK_IS_LINKED(s->state));
2212 
2213     /* This is called when the hardware's real volume changes due to
2214      * some external event. We copy the real volume into our
2215      * reference volume and then rebuild the stream volumes based on
2216      * i->real_ratio which should stay fixed. */
2217 
2218     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2219         if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2220             return;
2221 
2222         /* 1. Make the real volume the reference volume */
2223         update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2224     }
2225 
2226     if (pa_sink_flat_volume_enabled(s)) {
2227 
2228         PA_IDXSET_FOREACH(i, s->inputs, idx) {
2229             pa_cvolume new_volume;
2230 
2231             /* 2. Since the sink's reference and real volumes are equal
2232              * now our ratios should be too. */
2233             pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2234 
2235             /* 3. Recalculate the new stream reference volume based on the
2236              * reference ratio and the sink's reference volume.
2237              *
2238              * This basically calculates:
2239              *
2240              * i->volume = s->reference_volume * i->reference_ratio
2241              *
2242              * This is identical to propagate_reference_volume() */
2243             new_volume = s->reference_volume;
2244             pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2245             pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2246             pa_sink_input_set_volume_direct(i, &new_volume);
2247 
2248             if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2249                     && PA_SINK_IS_LINKED(i->origin_sink->state))
2250                 propagate_real_volume(i->origin_sink, old_real_volume);
2251         }
2252     }
2253 
2254     /* Something got changed in the hardware. It probably makes sense
2255      * to save changed hw settings given that hw volume changes not
2256      * triggered by PA are almost certainly done by the user. */
2257     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2258         s->save_volume = true;
2259 }
2260 
2261 /* Called from io thread */
pa_sink_update_volume_and_mute(pa_sink * s)2262 void pa_sink_update_volume_and_mute(pa_sink *s) {
2263     pa_assert(s);
2264     pa_sink_assert_io_context(s);
2265 
2266     pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2267 }
2268 
2269 /* Called from main thread */
pa_sink_get_volume(pa_sink * s,bool force_refresh)2270 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2271     pa_sink_assert_ref(s);
2272     pa_assert_ctl_context();
2273     pa_assert(PA_SINK_IS_LINKED(s->state));
2274 
2275     if (s->refresh_volume || force_refresh) {
2276         struct pa_cvolume old_real_volume;
2277 
2278         pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2279 
2280         old_real_volume = s->real_volume;
2281 
2282         if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2283             s->get_volume(s);
2284 
2285         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2286 
2287         update_real_volume(s, &s->real_volume, &s->channel_map);
2288         propagate_real_volume(s, &old_real_volume);
2289     }
2290 
2291     return &s->reference_volume;
2292 }
2293 
2294 /* Called from main thread. In volume sharing cases, only the root sink may
2295  * call this. */
pa_sink_volume_changed(pa_sink * s,const pa_cvolume * new_real_volume)2296 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2297     pa_cvolume old_real_volume;
2298 
2299     pa_sink_assert_ref(s);
2300     pa_assert_ctl_context();
2301     pa_assert(PA_SINK_IS_LINKED(s->state));
2302     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2303 
2304     /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2305 
2306     old_real_volume = s->real_volume;
2307     update_real_volume(s, new_real_volume, &s->channel_map);
2308     propagate_real_volume(s, &old_real_volume);
2309 }
2310 
2311 /* Called from main thread */
pa_sink_set_mute(pa_sink * s,bool mute,bool save)2312 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2313     bool old_muted;
2314 
2315     pa_sink_assert_ref(s);
2316     pa_assert_ctl_context();
2317 
2318     old_muted = s->muted;
2319 
2320     if (mute == old_muted) {
2321         s->save_muted |= save;
2322         return;
2323     }
2324 
2325     s->muted = mute;
2326     s->save_muted = save;
2327 
2328     if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2329         s->set_mute_in_progress = true;
2330         s->set_mute(s);
2331         s->set_mute_in_progress = false;
2332     }
2333 
2334     if (!PA_SINK_IS_LINKED(s->state))
2335         return;
2336 
2337     pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2338     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2339     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2340     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2341 }
2342 
2343 /* Called from main thread */
pa_sink_get_mute(pa_sink * s,bool force_refresh)2344 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2345 
2346     pa_sink_assert_ref(s);
2347     pa_assert_ctl_context();
2348     pa_assert(PA_SINK_IS_LINKED(s->state));
2349 
2350     if ((s->refresh_muted || force_refresh) && s->get_mute) {
2351         bool mute;
2352 
2353         if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2354             if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2355                 pa_sink_mute_changed(s, mute);
2356         } else {
2357             if (s->get_mute(s, &mute) >= 0)
2358                 pa_sink_mute_changed(s, mute);
2359         }
2360     }
2361 
2362     return s->muted;
2363 }
2364 
2365 /* Called from main thread */
pa_sink_mute_changed(pa_sink * s,bool new_muted)2366 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2367     pa_sink_assert_ref(s);
2368     pa_assert_ctl_context();
2369     pa_assert(PA_SINK_IS_LINKED(s->state));
2370 
2371     if (s->set_mute_in_progress)
2372         return;
2373 
2374     /* pa_sink_set_mute() does this same check, so this may appear redundant,
2375      * but we must have this here also, because the save parameter of
2376      * pa_sink_set_mute() would otherwise have unintended side effects (saving
2377      * the mute state when it shouldn't be saved). */
2378     if (new_muted == s->muted)
2379         return;
2380 
2381     pa_sink_set_mute(s, new_muted, true);
2382 }
2383 
2384 /* Called from main thread */
pa_sink_update_proplist(pa_sink * s,pa_update_mode_t mode,pa_proplist * p)2385 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2386     pa_sink_assert_ref(s);
2387     pa_assert_ctl_context();
2388 
2389     if (p)
2390         pa_proplist_update(s->proplist, mode, p);
2391 
2392     if (PA_SINK_IS_LINKED(s->state)) {
2393         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2394         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2395     }
2396 
2397     return true;
2398 }
2399 
2400 /* Called from main thread */
2401 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
pa_sink_set_description(pa_sink * s,const char * description)2402 void pa_sink_set_description(pa_sink *s, const char *description) {
2403     const char *old;
2404     pa_sink_assert_ref(s);
2405     pa_assert_ctl_context();
2406 
2407     if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2408         return;
2409 
2410     old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2411 
2412     if (old && description && pa_streq(old, description))
2413         return;
2414 
2415     if (description)
2416         pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2417     else
2418         pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2419 
2420     if (s->monitor_source) {
2421         char *n;
2422 
2423         n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2424         pa_source_set_description(s->monitor_source, n);
2425         pa_xfree(n);
2426     }
2427 
2428     if (PA_SINK_IS_LINKED(s->state)) {
2429         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2430         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2431     }
2432 }
2433 
2434 /* Called from main thread */
pa_sink_linked_by(pa_sink * s)2435 unsigned pa_sink_linked_by(pa_sink *s) {
2436     unsigned ret;
2437 
2438     pa_sink_assert_ref(s);
2439     pa_assert_ctl_context();
2440     pa_assert(PA_SINK_IS_LINKED(s->state));
2441 
2442     ret = pa_idxset_size(s->inputs);
2443 
2444     /* We add in the number of streams connected to us here. Please
2445      * note the asymmetry to pa_sink_used_by()! */
2446 
2447     if (s->monitor_source)
2448         ret += pa_source_linked_by(s->monitor_source);
2449 
2450     return ret;
2451 }
2452 
2453 /* Called from main thread */
pa_sink_used_by(pa_sink * s)2454 unsigned pa_sink_used_by(pa_sink *s) {
2455     unsigned ret;
2456 
2457     pa_sink_assert_ref(s);
2458     pa_assert_ctl_context();
2459     pa_assert(PA_SINK_IS_LINKED(s->state));
2460 
2461     ret = pa_idxset_size(s->inputs);
2462     pa_assert(ret >= s->n_corked);
2463 
2464     /* Streams connected to our monitor source do not matter for
2465      * pa_sink_used_by()!.*/
2466 
2467     return ret - s->n_corked;
2468 }
2469 
2470 /* Called from main thread */
pa_sink_check_suspend(pa_sink * s,pa_sink_input * ignore_input,pa_source_output * ignore_output)2471 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2472     unsigned ret;
2473     pa_sink_input *i;
2474     uint32_t idx;
2475 
2476     pa_sink_assert_ref(s);
2477     pa_assert_ctl_context();
2478 
2479     if (!PA_SINK_IS_LINKED(s->state))
2480         return 0;
2481 
2482     ret = 0;
2483 
2484     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2485         if (i == ignore_input)
2486             continue;
2487 
2488         /* We do not assert here. It is perfectly valid for a sink input to
2489          * be in the INIT state (i.e. created, marked done but not yet put)
2490          * and we should not care if it's unlinked as it won't contribute
2491          * towards our busy status.
2492          */
2493         if (!PA_SINK_INPUT_IS_LINKED(i->state))
2494             continue;
2495 
2496         if (i->state == PA_SINK_INPUT_CORKED)
2497             continue;
2498 
2499         if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2500             continue;
2501 
2502         ret ++;
2503     }
2504 
2505     if (s->monitor_source)
2506         ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2507 
2508     return ret;
2509 }
2510 
pa_sink_state_to_string(pa_sink_state_t state)2511 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2512     switch (state) {
2513         case PA_SINK_INIT:          return "INIT";
2514         case PA_SINK_IDLE:          return "IDLE";
2515         case PA_SINK_RUNNING:       return "RUNNING";
2516         case PA_SINK_SUSPENDED:     return "SUSPENDED";
2517         case PA_SINK_UNLINKED:      return "UNLINKED";
2518         case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2519     }
2520 
2521     pa_assert_not_reached();
2522 }
2523 
2524 /* Called from the IO thread */
sync_input_volumes_within_thread(pa_sink * s)2525 static void sync_input_volumes_within_thread(pa_sink *s) {
2526     pa_sink_input *i;
2527     void *state = NULL;
2528 
2529     pa_sink_assert_ref(s);
2530     pa_sink_assert_io_context(s);
2531 
2532     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2533         if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2534             continue;
2535 
2536         i->thread_info.soft_volume = i->soft_volume;
2537         pa_sink_input_request_rewind(i, 0, true, false, false);
2538     }
2539 }
2540 
2541 /* Called from the IO thread. Only called for the root sink in volume sharing
2542  * cases, except for internal recursive calls. */
set_shared_volume_within_thread(pa_sink * s)2543 static void set_shared_volume_within_thread(pa_sink *s) {
2544     pa_sink_input *i = NULL;
2545     void *state = NULL;
2546 
2547     pa_sink_assert_ref(s);
2548 
2549     PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2550 
2551     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2552         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2553             set_shared_volume_within_thread(i->origin_sink);
2554     }
2555 }
2556 
2557 /* Called from IO thread, except when it is not */
pa_sink_process_msg(pa_msgobject * o,int code,void * userdata,int64_t offset,pa_memchunk * chunk)2558 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2559     pa_sink *s = PA_SINK(o);
2560     pa_sink_assert_ref(s);
2561 
2562     switch ((pa_sink_message_t) code) {
2563 
2564         case PA_SINK_MESSAGE_ADD_INPUT: {
2565             pa_sink_input *i = PA_SINK_INPUT(userdata);
2566 
2567             /* If you change anything here, make sure to change the
2568              * sink input handling a few lines down at
2569              * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2570 
2571             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2572 
2573             /* Since the caller sleeps in pa_sink_input_put(), we can
2574              * safely access data outside of thread_info even though
2575              * it is mutable */
2576 
2577             if ((i->thread_info.sync_prev = i->sync_prev)) {
2578                 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2579                 pa_assert(i->sync_prev->sync_next == i);
2580                 i->thread_info.sync_prev->thread_info.sync_next = i;
2581             }
2582 
2583             if ((i->thread_info.sync_next = i->sync_next)) {
2584                 pa_assert(i->sink == i->thread_info.sync_next->sink);
2585                 pa_assert(i->sync_next->sync_prev == i);
2586                 i->thread_info.sync_next->thread_info.sync_prev = i;
2587             }
2588 
2589             pa_sink_input_attach(i);
2590 
2591             pa_sink_input_set_state_within_thread(i, i->state);
2592 
2593             /* The requested latency of the sink input needs to be fixed up and
2594              * then configured on the sink. If this causes the sink latency to
2595              * go down, the sink implementor is responsible for doing a rewind
2596              * in the update_requested_latency() callback to ensure that the
2597              * sink buffer doesn't contain more data than what the new latency
2598              * allows.
2599              *
2600              * XXX: Does it really make sense to push this responsibility to
2601              * the sink implementors? Wouldn't it be better to do it once in
2602              * the core than many times in the modules? */
2603 
2604             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2605                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2606 
2607             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2608             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2609 
2610             /* We don't rewind here automatically. This is left to the
2611              * sink input implementor because some sink inputs need a
2612              * slow start, i.e. need some time to buffer client
2613              * samples before beginning streaming.
2614              *
2615              * XXX: Does it really make sense to push this functionality to
2616              * the sink implementors? Wouldn't it be better to do it once in
2617              * the core than many times in the modules? */
2618 
2619             /* In flat volume mode we need to update the volume as
2620              * well */
2621             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2622         }
2623 
2624         case PA_SINK_MESSAGE_REMOVE_INPUT: {
2625             pa_sink_input *i = PA_SINK_INPUT(userdata);
2626 
2627             /* If you change anything here, make sure to change the
2628              * sink input handling a few lines down at
2629              * PA_SINK_MESSAGE_START_MOVE, too. */
2630 
2631             pa_sink_input_detach(i);
2632 
2633             pa_sink_input_set_state_within_thread(i, i->state);
2634 
2635             /* Since the caller sleeps in pa_sink_input_unlink(),
2636              * we can safely access data outside of thread_info even
2637              * though it is mutable */
2638 
2639             pa_assert(!i->sync_prev);
2640             pa_assert(!i->sync_next);
2641 
2642             if (i->thread_info.sync_prev) {
2643                 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2644                 i->thread_info.sync_prev = NULL;
2645             }
2646 
2647             if (i->thread_info.sync_next) {
2648                 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2649                 i->thread_info.sync_next = NULL;
2650             }
2651 
2652             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2653             pa_sink_invalidate_requested_latency(s, true);
2654             pa_sink_request_rewind(s, (size_t) -1);
2655 
2656             /* In flat volume mode we need to update the volume as
2657              * well */
2658             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2659         }
2660 
2661         case PA_SINK_MESSAGE_START_MOVE: {
2662             pa_sink_input *i = PA_SINK_INPUT(userdata);
2663 
2664             /* We don't support moving synchronized streams. */
2665             pa_assert(!i->sync_prev);
2666             pa_assert(!i->sync_next);
2667             pa_assert(!i->thread_info.sync_next);
2668             pa_assert(!i->thread_info.sync_prev);
2669 
2670             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2671                 pa_usec_t usec = 0;
2672                 size_t sink_nbytes, total_nbytes;
2673 
2674                 /* The old sink probably has some audio from this
2675                  * stream in its buffer. We want to "take it back" as
2676                  * much as possible and play it to the new sink. We
2677                  * don't know at this point how much the old sink can
2678                  * rewind. We have to pick something, and that
2679                  * something is the full latency of the old sink here.
2680                  * So we rewind the stream buffer by the sink latency
2681                  * amount, which may be more than what we should
2682                  * rewind. This can result in a chunk of audio being
2683                  * played both to the old sink and the new sink.
2684                  *
2685                  * FIXME: Fix this code so that we don't have to make
2686                  * guesses about how much the sink will actually be
2687                  * able to rewind. If someone comes up with a solution
2688                  * for this, something to note is that the part of the
2689                  * latency that the old sink couldn't rewind should
2690                  * ideally be compensated after the stream has moved
2691                  * to the new sink by adding silence. The new sink
2692                  * most likely can't start playing the moved stream
2693                  * immediately, and that gap should be removed from
2694                  * the "compensation silence" (at least at the time of
2695                  * writing this, the move finish code will actually
2696                  * already take care of dropping the new sink's
2697                  * unrewindable latency, so taking into account the
2698                  * unrewindable latency of the old sink is the only
2699                  * problem).
2700                  *
2701                  * The render_memblockq contents are discarded,
2702                  * because when the sink changes, the format of the
2703                  * audio stored in the render_memblockq may change
2704                  * too, making the stored audio invalid. FIXME:
2705                  * However, the read and write indices are moved back
2706                  * the same amount, so if they are not the same now,
2707                  * they won't be the same after the rewind either. If
2708                  * the write index of the render_memblockq is ahead of
2709                  * the read index, then the render_memblockq will feed
2710                  * the new sink some silence first, which it shouldn't
2711                  * do. The write index should be flushed to be the
2712                  * same as the read index. */
2713 
2714                 /* Get the latency of the sink */
2715                 usec = pa_sink_get_latency_within_thread(s, false);
2716                 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2717                 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2718 
2719                 if (total_nbytes > 0) {
2720                     i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2721                     i->thread_info.rewrite_flush = true;
2722                     pa_sink_input_process_rewind(i, sink_nbytes);
2723                 }
2724             }
2725 
2726             pa_sink_input_detach(i);
2727 
2728             /* Let's remove the sink input ...*/
2729             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2730 
2731             pa_sink_invalidate_requested_latency(s, true);
2732 
2733             pa_log_debug("Requesting rewind due to started move");
2734             pa_sink_request_rewind(s, (size_t) -1);
2735 
2736             /* In flat volume mode we need to update the volume as
2737              * well */
2738             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2739         }
2740 
2741         case PA_SINK_MESSAGE_FINISH_MOVE: {
2742             pa_sink_input *i = PA_SINK_INPUT(userdata);
2743 
2744             /* We don't support moving synchronized streams. */
2745             pa_assert(!i->sync_prev);
2746             pa_assert(!i->sync_next);
2747             pa_assert(!i->thread_info.sync_next);
2748             pa_assert(!i->thread_info.sync_prev);
2749 
2750             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2751 
2752             pa_sink_input_attach(i);
2753 
2754             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2755                 pa_usec_t usec = 0;
2756                 size_t nbytes;
2757 
2758                 /* In the ideal case the new sink would start playing
2759                  * the stream immediately. That requires the sink to
2760                  * be able to rewind all of its latency, which usually
2761                  * isn't possible, so there will probably be some gap
2762                  * before the moved stream becomes audible. We then
2763                  * have two possibilities: 1) start playing the stream
2764                  * from where it is now, or 2) drop the unrewindable
2765                  * latency of the sink from the stream. With option 1
2766                  * we won't lose any audio but the stream will have a
2767                  * pause. With option 2 we may lose some audio but the
2768                  * stream time will be somewhat in sync with the wall
2769                  * clock. Lennart seems to have chosen option 2 (one
2770                  * of the reasons might have been that option 1 is
2771                  * actually much harder to implement), so we drop the
2772                  * latency of the new sink from the moved stream and
2773                  * hope that the sink will undo most of that in the
2774                  * rewind. */
2775 
2776                 /* Get the latency of the sink */
2777                 usec = pa_sink_get_latency_within_thread(s, false);
2778                 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2779 
2780                 if (nbytes > 0)
2781                     pa_sink_input_drop(i, nbytes);
2782 
2783                 pa_log_debug("Requesting rewind due to finished move");
2784                 pa_sink_request_rewind(s, nbytes);
2785             }
2786 
2787             /* Updating the requested sink latency has to be done
2788              * after the sink rewind request, not before, because
2789              * otherwise the sink may limit the rewind amount
2790              * needlessly. */
2791 
2792             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2793                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2794 
2795             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2796             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2797 
2798             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2799         }
2800 
2801         case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2802             pa_sink *root_sink = pa_sink_get_master(s);
2803 
2804             if (PA_LIKELY(root_sink))
2805                 set_shared_volume_within_thread(root_sink);
2806 
2807             return 0;
2808         }
2809 
2810         case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2811 
2812             if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2813                 s->set_volume(s);
2814                 pa_sink_volume_change_push(s);
2815             }
2816             /* Fall through ... */
2817 
2818         case PA_SINK_MESSAGE_SET_VOLUME:
2819 
2820             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2821                 s->thread_info.soft_volume = s->soft_volume;
2822                 pa_sink_request_rewind(s, (size_t) -1);
2823             }
2824 
2825             /* Fall through ... */
2826 
2827         case PA_SINK_MESSAGE_SYNC_VOLUMES:
2828             sync_input_volumes_within_thread(s);
2829             return 0;
2830 
2831         case PA_SINK_MESSAGE_GET_VOLUME:
2832 
2833             if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2834                 s->get_volume(s);
2835                 pa_sink_volume_change_flush(s);
2836                 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2837             }
2838 
2839             /* In case sink implementor reset SW volume. */
2840             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2841                 s->thread_info.soft_volume = s->soft_volume;
2842                 pa_sink_request_rewind(s, (size_t) -1);
2843             }
2844 
2845             return 0;
2846 
2847         case PA_SINK_MESSAGE_SET_MUTE:
2848 
2849             if (s->thread_info.soft_muted != s->muted) {
2850                 s->thread_info.soft_muted = s->muted;
2851                 pa_sink_request_rewind(s, (size_t) -1);
2852             }
2853 
2854             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2855                 s->set_mute(s);
2856 
2857             return 0;
2858 
2859         case PA_SINK_MESSAGE_GET_MUTE:
2860 
2861             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2862                 return s->get_mute(s, userdata);
2863 
2864             return 0;
2865 
2866         case PA_SINK_MESSAGE_SET_STATE: {
2867             struct set_state_data *data = userdata;
2868             bool suspend_change =
2869                 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
2870                 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
2871 
2872             if (s->set_state_in_io_thread) {
2873                 int r;
2874 
2875                 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2876                     return r;
2877             }
2878 
2879             s->thread_info.state = data->state;
2880 
2881             if (s->thread_info.state == PA_SINK_SUSPENDED) {
2882                 s->thread_info.rewind_nbytes = 0;
2883                 s->thread_info.rewind_requested = false;
2884             }
2885 
2886             if (suspend_change) {
2887                 pa_sink_input *i;
2888                 void *state = NULL;
2889 
2890                 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2891                     if (i->suspend_within_thread)
2892                         i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2893             }
2894 
2895             return 0;
2896         }
2897 
2898         case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2899 
2900             pa_usec_t *usec = userdata;
2901             *usec = pa_sink_get_requested_latency_within_thread(s);
2902 
2903             /* Yes, that's right, the IO thread will see -1 when no
2904              * explicit requested latency is configured, the main
2905              * thread will see max_latency */
2906             if (*usec == (pa_usec_t) -1)
2907                 *usec = s->thread_info.max_latency;
2908 
2909             return 0;
2910         }
2911 
2912         case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2913             pa_usec_t *r = userdata;
2914 
2915             pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2916 
2917             return 0;
2918         }
2919 
2920         case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2921             pa_usec_t *r = userdata;
2922 
2923             r[0] = s->thread_info.min_latency;
2924             r[1] = s->thread_info.max_latency;
2925 
2926             return 0;
2927         }
2928 
2929         case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2930 
2931             *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2932             return 0;
2933 
2934         case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2935 
2936             pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2937             return 0;
2938 
2939         case PA_SINK_MESSAGE_GET_MAX_REWIND:
2940 
2941             *((size_t*) userdata) = s->thread_info.max_rewind;
2942             return 0;
2943 
2944         case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2945 
2946             *((size_t*) userdata) = s->thread_info.max_request;
2947             return 0;
2948 
2949         case PA_SINK_MESSAGE_SET_MAX_REWIND:
2950 
2951             pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2952             return 0;
2953 
2954         case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2955 
2956             pa_sink_set_max_request_within_thread(s, (size_t) offset);
2957             return 0;
2958 
2959         case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2960             /* This message is sent from IO-thread and handled in main thread. */
2961             pa_assert_ctl_context();
2962 
2963             /* Make sure we're not messing with main thread when no longer linked */
2964             if (!PA_SINK_IS_LINKED(s->state))
2965                 return 0;
2966 
2967             pa_sink_get_volume(s, true);
2968             pa_sink_get_mute(s, true);
2969             return 0;
2970 
2971         case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
2972             s->thread_info.port_latency_offset = offset;
2973             return 0;
2974 
2975         case PA_SINK_MESSAGE_GET_LATENCY:
2976         case PA_SINK_MESSAGE_MAX:
2977             ;
2978     }
2979 
2980     return -1;
2981 }
2982 
2983 /* Called from main thread */
pa_sink_suspend_all(pa_core * c,bool suspend,pa_suspend_cause_t cause)2984 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2985     pa_sink *sink;
2986     uint32_t idx;
2987     int ret = 0;
2988 
2989     pa_core_assert_ref(c);
2990     pa_assert_ctl_context();
2991     pa_assert(cause != 0);
2992 
2993     PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2994         int r;
2995 
2996         if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2997             ret = r;
2998     }
2999 
3000     return ret;
3001 }
3002 
3003 /* Called from IO thread */
pa_sink_detach_within_thread(pa_sink * s)3004 void pa_sink_detach_within_thread(pa_sink *s) {
3005     pa_sink_input *i;
3006     void *state = NULL;
3007 
3008     pa_sink_assert_ref(s);
3009     pa_sink_assert_io_context(s);
3010     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3011 
3012     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3013         pa_sink_input_detach(i);
3014 
3015     if (s->monitor_source)
3016         pa_source_detach_within_thread(s->monitor_source);
3017 }
3018 
3019 /* Called from IO thread */
pa_sink_attach_within_thread(pa_sink * s)3020 void pa_sink_attach_within_thread(pa_sink *s) {
3021     pa_sink_input *i;
3022     void *state = NULL;
3023 
3024     pa_sink_assert_ref(s);
3025     pa_sink_assert_io_context(s);
3026     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3027 
3028     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3029         pa_sink_input_attach(i);
3030 
3031     if (s->monitor_source)
3032         pa_source_attach_within_thread(s->monitor_source);
3033 }
3034 
3035 /* Called from IO thread */
pa_sink_request_rewind(pa_sink * s,size_t nbytes)3036 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3037     pa_sink_assert_ref(s);
3038     pa_sink_assert_io_context(s);
3039     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3040 
3041     if (nbytes == (size_t) -1)
3042         nbytes = s->thread_info.max_rewind;
3043 
3044     nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3045 
3046     if (s->thread_info.rewind_requested &&
3047         nbytes <= s->thread_info.rewind_nbytes)
3048         return;
3049 
3050     s->thread_info.rewind_nbytes = nbytes;
3051     s->thread_info.rewind_requested = true;
3052 
3053     if (s->request_rewind)
3054         s->request_rewind(s);
3055 }
3056 
3057 /* Called from IO thread */
pa_sink_get_requested_latency_within_thread(pa_sink * s)3058 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3059     pa_usec_t result = (pa_usec_t) -1;
3060     pa_sink_input *i;
3061     void *state = NULL;
3062     pa_usec_t monitor_latency;
3063 
3064     pa_sink_assert_ref(s);
3065     pa_sink_assert_io_context(s);
3066 
3067     if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3068         return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3069 
3070     if (s->thread_info.requested_latency_valid)
3071         return s->thread_info.requested_latency;
3072 
3073     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3074         if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3075             (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3076             result = i->thread_info.requested_sink_latency;
3077 
3078     monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3079 
3080     if (monitor_latency != (pa_usec_t) -1 &&
3081         (result == (pa_usec_t) -1 || result > monitor_latency))
3082         result = monitor_latency;
3083 
3084     if (result != (pa_usec_t) -1)
3085         result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3086 
3087     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3088         /* Only cache if properly initialized */
3089         s->thread_info.requested_latency = result;
3090         s->thread_info.requested_latency_valid = true;
3091     }
3092 
3093     return result;
3094 }
3095 
3096 /* Called from main thread */
pa_sink_get_requested_latency(pa_sink * s)3097 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3098     pa_usec_t usec = 0;
3099 
3100     pa_sink_assert_ref(s);
3101     pa_assert_ctl_context();
3102     pa_assert(PA_SINK_IS_LINKED(s->state));
3103 
3104     if (s->state == PA_SINK_SUSPENDED)
3105         return 0;
3106 
3107     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3108 
3109     return usec;
3110 }
3111 
3112 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_rewind_within_thread(pa_sink * s,size_t max_rewind)3113 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3114     pa_sink_input *i;
3115     void *state = NULL;
3116 
3117     pa_sink_assert_ref(s);
3118     pa_sink_assert_io_context(s);
3119 
3120     if (max_rewind == s->thread_info.max_rewind)
3121         return;
3122 
3123     s->thread_info.max_rewind = max_rewind;
3124 
3125     if (PA_SINK_IS_LINKED(s->thread_info.state))
3126         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3127             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3128 
3129     if (s->monitor_source)
3130         pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3131 }
3132 
3133 /* Called from main thread */
pa_sink_set_max_rewind(pa_sink * s,size_t max_rewind)3134 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3135     pa_sink_assert_ref(s);
3136     pa_assert_ctl_context();
3137 
3138     if (PA_SINK_IS_LINKED(s->state))
3139         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3140     else
3141         pa_sink_set_max_rewind_within_thread(s, max_rewind);
3142 }
3143 
3144 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_request_within_thread(pa_sink * s,size_t max_request)3145 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3146     void *state = NULL;
3147 
3148     pa_sink_assert_ref(s);
3149     pa_sink_assert_io_context(s);
3150 
3151     if (max_request == s->thread_info.max_request)
3152         return;
3153 
3154     s->thread_info.max_request = max_request;
3155 
3156     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3157         pa_sink_input *i;
3158 
3159         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3160             pa_sink_input_update_max_request(i, s->thread_info.max_request);
3161     }
3162 }
3163 
3164 /* Called from main thread */
pa_sink_set_max_request(pa_sink * s,size_t max_request)3165 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3166     pa_sink_assert_ref(s);
3167     pa_assert_ctl_context();
3168 
3169     if (PA_SINK_IS_LINKED(s->state))
3170         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3171     else
3172         pa_sink_set_max_request_within_thread(s, max_request);
3173 }
3174 
3175 /* Called from IO thread */
pa_sink_invalidate_requested_latency(pa_sink * s,bool dynamic)3176 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3177     pa_sink_input *i;
3178     void *state = NULL;
3179 
3180     pa_sink_assert_ref(s);
3181     pa_sink_assert_io_context(s);
3182 
3183     if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3184         s->thread_info.requested_latency_valid = false;
3185     else if (dynamic)
3186         return;
3187 
3188     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3189 
3190         if (s->update_requested_latency)
3191             s->update_requested_latency(s);
3192 
3193         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3194             if (i->update_sink_requested_latency)
3195                 i->update_sink_requested_latency(i);
3196     }
3197 }
3198 
3199 /* Called from main thread */
pa_sink_set_latency_range(pa_sink * s,pa_usec_t min_latency,pa_usec_t max_latency)3200 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3201     pa_sink_assert_ref(s);
3202     pa_assert_ctl_context();
3203 
3204     /* min_latency == 0:           no limit
3205      * min_latency anything else:  specified limit
3206      *
3207      * Similar for max_latency */
3208 
3209     if (min_latency < ABSOLUTE_MIN_LATENCY)
3210         min_latency = ABSOLUTE_MIN_LATENCY;
3211 
3212     if (max_latency <= 0 ||
3213         max_latency > ABSOLUTE_MAX_LATENCY)
3214         max_latency = ABSOLUTE_MAX_LATENCY;
3215 
3216     pa_assert(min_latency <= max_latency);
3217 
3218     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3219     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3220                max_latency == ABSOLUTE_MAX_LATENCY) ||
3221               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3222 
3223     if (PA_SINK_IS_LINKED(s->state)) {
3224         pa_usec_t r[2];
3225 
3226         r[0] = min_latency;
3227         r[1] = max_latency;
3228 
3229         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3230     } else
3231         pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3232 }
3233 
3234 /* Called from main thread */
pa_sink_get_latency_range(pa_sink * s,pa_usec_t * min_latency,pa_usec_t * max_latency)3235 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3236     pa_sink_assert_ref(s);
3237     pa_assert_ctl_context();
3238     pa_assert(min_latency);
3239     pa_assert(max_latency);
3240 
3241     if (PA_SINK_IS_LINKED(s->state)) {
3242         pa_usec_t r[2] = { 0, 0 };
3243 
3244         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3245 
3246         *min_latency = r[0];
3247         *max_latency = r[1];
3248     } else {
3249         *min_latency = s->thread_info.min_latency;
3250         *max_latency = s->thread_info.max_latency;
3251     }
3252 }
3253 
3254 /* Called from IO thread */
pa_sink_set_latency_range_within_thread(pa_sink * s,pa_usec_t min_latency,pa_usec_t max_latency)3255 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3256     pa_sink_assert_ref(s);
3257     pa_sink_assert_io_context(s);
3258 
3259     pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3260     pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3261     pa_assert(min_latency <= max_latency);
3262 
3263     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3264     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3265                max_latency == ABSOLUTE_MAX_LATENCY) ||
3266               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3267 
3268     if (s->thread_info.min_latency == min_latency &&
3269         s->thread_info.max_latency == max_latency)
3270         return;
3271 
3272     s->thread_info.min_latency = min_latency;
3273     s->thread_info.max_latency = max_latency;
3274 
3275     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3276         pa_sink_input *i;
3277         void *state = NULL;
3278 
3279         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3280             if (i->update_sink_latency_range)
3281                 i->update_sink_latency_range(i);
3282     }
3283 
3284     pa_sink_invalidate_requested_latency(s, false);
3285 
3286     pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3287 }
3288 
3289 /* Called from main thread */
pa_sink_set_fixed_latency(pa_sink * s,pa_usec_t latency)3290 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3291     pa_sink_assert_ref(s);
3292     pa_assert_ctl_context();
3293 
3294     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3295         pa_assert(latency == 0);
3296         return;
3297     }
3298 
3299     if (latency < ABSOLUTE_MIN_LATENCY)
3300         latency = ABSOLUTE_MIN_LATENCY;
3301 
3302     if (latency > ABSOLUTE_MAX_LATENCY)
3303         latency = ABSOLUTE_MAX_LATENCY;
3304 
3305     if (PA_SINK_IS_LINKED(s->state))
3306         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3307     else
3308         s->thread_info.fixed_latency = latency;
3309 
3310     pa_source_set_fixed_latency(s->monitor_source, latency);
3311 }
3312 
3313 /* Called from main thread */
pa_sink_get_fixed_latency(pa_sink * s)3314 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3315     pa_usec_t latency;
3316 
3317     pa_sink_assert_ref(s);
3318     pa_assert_ctl_context();
3319 
3320     if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3321         return 0;
3322 
3323     if (PA_SINK_IS_LINKED(s->state))
3324         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3325     else
3326         latency = s->thread_info.fixed_latency;
3327 
3328     return latency;
3329 }
3330 
3331 /* Called from IO thread */
pa_sink_set_fixed_latency_within_thread(pa_sink * s,pa_usec_t latency)3332 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3333     pa_sink_assert_ref(s);
3334     pa_sink_assert_io_context(s);
3335 
3336     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3337         pa_assert(latency == 0);
3338         s->thread_info.fixed_latency = 0;
3339 
3340         if (s->monitor_source)
3341             pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3342 
3343         return;
3344     }
3345 
3346     pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3347     pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3348 
3349     if (s->thread_info.fixed_latency == latency)
3350         return;
3351 
3352     s->thread_info.fixed_latency = latency;
3353 
3354     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3355         pa_sink_input *i;
3356         void *state = NULL;
3357 
3358         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3359             if (i->update_sink_fixed_latency)
3360                 i->update_sink_fixed_latency(i);
3361     }
3362 
3363     pa_sink_invalidate_requested_latency(s, false);
3364 
3365     pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3366 }
3367 
3368 /* Called from main context */
pa_sink_set_port_latency_offset(pa_sink * s,int64_t offset)3369 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3370     pa_sink_assert_ref(s);
3371 
3372     s->port_latency_offset = offset;
3373 
3374     if (PA_SINK_IS_LINKED(s->state))
3375         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3376     else
3377         s->thread_info.port_latency_offset = offset;
3378 
3379     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3380 }
3381 
3382 /* Called from main context */
pa_sink_get_max_rewind(pa_sink * s)3383 size_t pa_sink_get_max_rewind(pa_sink *s) {
3384     size_t r;
3385     pa_assert_ctl_context();
3386     pa_sink_assert_ref(s);
3387 
3388     if (!PA_SINK_IS_LINKED(s->state))
3389         return s->thread_info.max_rewind;
3390 
3391     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3392 
3393     return r;
3394 }
3395 
3396 /* Called from main context */
pa_sink_get_max_request(pa_sink * s)3397 size_t pa_sink_get_max_request(pa_sink *s) {
3398     size_t r;
3399     pa_sink_assert_ref(s);
3400     pa_assert_ctl_context();
3401 
3402     if (!PA_SINK_IS_LINKED(s->state))
3403         return s->thread_info.max_request;
3404 
3405     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3406 
3407     return r;
3408 }
3409 
3410 /* Called from main context */
pa_sink_set_port(pa_sink * s,const char * name,bool save)3411 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3412     pa_device_port *port;
3413 
3414     pa_sink_assert_ref(s);
3415     pa_assert_ctl_context();
3416 
3417     if (!s->set_port) {
3418         pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3419         return -PA_ERR_NOTIMPLEMENTED;
3420     }
3421 
3422     if (!name)
3423         return -PA_ERR_NOENTITY;
3424 
3425     if (!(port = pa_hashmap_get(s->ports, name)))
3426         return -PA_ERR_NOENTITY;
3427 
3428     if (s->active_port == port) {
3429         s->save_port = s->save_port || save;
3430         return 0;
3431     }
3432 
3433     if (s->set_port(s, port) < 0)
3434         return -PA_ERR_NOENTITY;
3435 
3436     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3437 
3438     pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3439 
3440     s->active_port = port;
3441     s->save_port = save;
3442 
3443     pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3444 
3445     /* The active port affects the default sink selection. */
3446     pa_core_update_default_sink(s->core);
3447 
3448     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3449 
3450     return 0;
3451 }
3452 
pa_device_init_icon(pa_proplist * p,bool is_sink)3453 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3454     const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3455 
3456     pa_assert(p);
3457 
3458     if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3459         return true;
3460 
3461     if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3462 
3463         if (pa_streq(ff, "microphone"))
3464             t = "audio-input-microphone";
3465         else if (pa_streq(ff, "webcam"))
3466             t = "camera-web";
3467         else if (pa_streq(ff, "computer"))
3468             t = "computer";
3469         else if (pa_streq(ff, "handset"))
3470             t = "phone";
3471         else if (pa_streq(ff, "portable"))
3472             t = "multimedia-player";
3473         else if (pa_streq(ff, "tv"))
3474             t = "video-display";
3475 
3476         /*
3477          * The following icons are not part of the icon naming spec,
3478          * because Rodney Dawes sucks as the maintainer of that spec.
3479          *
3480          * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3481          */
3482         else if (pa_streq(ff, "headset"))
3483             t = "audio-headset";
3484         else if (pa_streq(ff, "headphone"))
3485             t = "audio-headphones";
3486         else if (pa_streq(ff, "speaker"))
3487             t = "audio-speakers";
3488         else if (pa_streq(ff, "hands-free"))
3489             t = "audio-handsfree";
3490     }
3491 
3492     if (!t)
3493         if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3494             if (pa_streq(c, "modem"))
3495                 t = "modem";
3496 
3497     if (!t) {
3498         if (is_sink)
3499             t = "audio-card";
3500         else
3501             t = "audio-input-microphone";
3502     }
3503 
3504     if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3505         if (strstr(profile, "analog"))
3506             s = "-analog";
3507         else if (strstr(profile, "iec958"))
3508             s = "-iec958";
3509         else if (strstr(profile, "hdmi"))
3510             s = "-hdmi";
3511     }
3512 
3513     bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3514 
3515     pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3516 
3517     return true;
3518 }
3519 
pa_device_init_description(pa_proplist * p,pa_card * card)3520 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3521     const char *s, *d = NULL, *k;
3522     pa_assert(p);
3523 
3524     if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3525         return true;
3526 
3527     if (card)
3528         if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3529             d = s;
3530 
3531     if (!d)
3532         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3533             if (pa_streq(s, "internal"))
3534                 d = _("Built-in Audio");
3535 
3536     if (!d)
3537         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3538             if (pa_streq(s, "modem"))
3539                 d = _("Modem");
3540 
3541     if (!d)
3542         d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3543 
3544     if (!d)
3545         return false;
3546 
3547     k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3548 
3549     if (d && k)
3550         pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3551     else if (d)
3552         pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3553 
3554     return true;
3555 }
3556 
pa_device_init_intended_roles(pa_proplist * p)3557 bool pa_device_init_intended_roles(pa_proplist *p) {
3558     const char *s;
3559     pa_assert(p);
3560 
3561     if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3562         return true;
3563 
3564     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3565         if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3566             || pa_streq(s, "headset")) {
3567             pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3568             return true;
3569         }
3570 
3571     return false;
3572 }
3573 
pa_device_init_priority(pa_proplist * p)3574 unsigned pa_device_init_priority(pa_proplist *p) {
3575     const char *s;
3576     unsigned priority = 0;
3577 
3578     pa_assert(p);
3579 
3580     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3581 
3582         if (pa_streq(s, "sound"))
3583             priority += 9000;
3584         else if (!pa_streq(s, "modem"))
3585             priority += 1000;
3586     }
3587 
3588     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3589 
3590         if (pa_streq(s, "headphone"))
3591             priority += 900;
3592         else if (pa_streq(s, "hifi"))
3593             priority += 600;
3594         else if (pa_streq(s, "speaker"))
3595             priority += 500;
3596         else if (pa_streq(s, "portable"))
3597             priority += 450;
3598     }
3599 
3600     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3601 
3602         if (pa_streq(s, "bluetooth"))
3603             priority += 50;
3604         else if (pa_streq(s, "usb"))
3605             priority += 40;
3606         else if (pa_streq(s, "pci"))
3607             priority += 30;
3608     }
3609 
3610     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3611 
3612         if (pa_startswith(s, "analog-"))
3613             priority += 9;
3614         else if (pa_startswith(s, "iec958-"))
3615             priority += 8;
3616     }
3617 
3618     return priority;
3619 }
3620 
3621 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3622 
3623 /* Called from the IO thread. */
pa_sink_volume_change_new(pa_sink * s)3624 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3625     pa_sink_volume_change *c;
3626     if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3627         c = pa_xnew(pa_sink_volume_change, 1);
3628 
3629     PA_LLIST_INIT(pa_sink_volume_change, c);
3630     c->at = 0;
3631     pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3632     return c;
3633 }
3634 
3635 /* Called from the IO thread. */
pa_sink_volume_change_free(pa_sink_volume_change * c)3636 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3637     pa_assert(c);
3638     if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3639         pa_xfree(c);
3640 }
3641 
3642 /* Called from the IO thread. */
pa_sink_volume_change_push(pa_sink * s)3643 void pa_sink_volume_change_push(pa_sink *s) {
3644     pa_sink_volume_change *c = NULL;
3645     pa_sink_volume_change *nc = NULL;
3646     pa_sink_volume_change *pc = NULL;
3647     uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3648 
3649     const char *direction = NULL;
3650 
3651     pa_assert(s);
3652     nc = pa_sink_volume_change_new(s);
3653 
3654     /* NOTE: There is already more different volumes in pa_sink that I can remember.
3655      *       Adding one more volume for HW would get us rid of this, but I am trying
3656      *       to survive with the ones we already have. */
3657     pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3658 
3659     if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3660         pa_log_debug("Volume not changing");
3661         pa_sink_volume_change_free(nc);
3662         return;
3663     }
3664 
3665     nc->at = pa_sink_get_latency_within_thread(s, false);
3666     nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3667 
3668     if (s->thread_info.volume_changes_tail) {
3669         for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3670             /* If volume is going up let's do it a bit late. If it is going
3671              * down let's do it a bit early. */
3672             if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3673                 if (nc->at + safety_margin > c->at) {
3674                     nc->at += safety_margin;
3675                     direction = "up";
3676                     break;
3677                 }
3678             }
3679             else if (nc->at - safety_margin > c->at) {
3680                     nc->at -= safety_margin;
3681                     direction = "down";
3682                     break;
3683             }
3684         }
3685     }
3686 
3687     if (c == NULL) {
3688         if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3689             nc->at += safety_margin;
3690             direction = "up";
3691         } else {
3692             nc->at -= safety_margin;
3693             direction = "down";
3694         }
3695         PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3696     }
3697     else {
3698         PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3699     }
3700 
3701     pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3702 
3703     /* We can ignore volume events that came earlier but should happen later than this. */
3704     PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3705         pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3706         pa_sink_volume_change_free(c);
3707     }
3708     nc->next = NULL;
3709     s->thread_info.volume_changes_tail = nc;
3710 }
3711 
3712 /* Called from the IO thread. */
pa_sink_volume_change_flush(pa_sink * s)3713 static void pa_sink_volume_change_flush(pa_sink *s) {
3714     pa_sink_volume_change *c = s->thread_info.volume_changes;
3715     pa_assert(s);
3716     s->thread_info.volume_changes = NULL;
3717     s->thread_info.volume_changes_tail = NULL;
3718     while (c) {
3719         pa_sink_volume_change *next = c->next;
3720         pa_sink_volume_change_free(c);
3721         c = next;
3722     }
3723 }
3724 
3725 /* Called from the IO thread. */
pa_sink_volume_change_apply(pa_sink * s,pa_usec_t * usec_to_next)3726 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3727     pa_usec_t now;
3728     bool ret = false;
3729 
3730     pa_assert(s);
3731 
3732     if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3733         if (usec_to_next)
3734             *usec_to_next = 0;
3735         return ret;
3736     }
3737 
3738     pa_assert(s->write_volume);
3739 
3740     now = pa_rtclock_now();
3741 
3742     while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3743         pa_sink_volume_change *c = s->thread_info.volume_changes;
3744         PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3745         pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3746                      pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3747         ret = true;
3748         s->thread_info.current_hw_volume = c->hw_volume;
3749         pa_sink_volume_change_free(c);
3750     }
3751 
3752     if (ret)
3753         s->write_volume(s);
3754 
3755     if (s->thread_info.volume_changes) {
3756         if (usec_to_next)
3757             *usec_to_next = s->thread_info.volume_changes->at - now;
3758         if (pa_log_ratelimit(PA_LOG_DEBUG))
3759             pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3760     }
3761     else {
3762         if (usec_to_next)
3763             *usec_to_next = 0;
3764         s->thread_info.volume_changes_tail = NULL;
3765     }
3766     return ret;
3767 }
3768 
3769 /* Called from the IO thread. */
pa_sink_volume_change_rewind(pa_sink * s,size_t nbytes)3770 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3771     /* All the queued volume events later than current latency are shifted to happen earlier. */
3772     pa_sink_volume_change *c;
3773     pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3774     pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3775     pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3776 
3777     pa_log_debug("latency = %lld", (long long) limit);
3778     limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3779 
3780     PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3781         pa_usec_t modified_limit = limit;
3782         if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3783             modified_limit -= s->thread_info.volume_change_safety_margin;
3784         else
3785             modified_limit += s->thread_info.volume_change_safety_margin;
3786         if (c->at > modified_limit) {
3787             c->at -= rewound;
3788             if (c->at < modified_limit)
3789                 c->at = modified_limit;
3790         }
3791         prev_vol = pa_cvolume_avg(&c->hw_volume);
3792     }
3793     pa_sink_volume_change_apply(s, NULL);
3794 }
3795 
3796 /* Called from the main thread */
3797 /* Gets the list of formats supported by the sink. The members and idxset must
3798  * be freed by the caller. */
pa_sink_get_formats(pa_sink * s)3799 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3800     pa_idxset *ret;
3801 
3802     pa_assert(s);
3803 
3804     if (s->get_formats) {
3805         /* Sink supports format query, all is good */
3806         ret = s->get_formats(s);
3807     } else {
3808         /* Sink doesn't support format query, so assume it does PCM */
3809         pa_format_info *f = pa_format_info_new();
3810         f->encoding = PA_ENCODING_PCM;
3811 
3812         ret = pa_idxset_new(NULL, NULL);
3813         pa_idxset_put(ret, f, NULL);
3814     }
3815 
3816     return ret;
3817 }
3818 
3819 /* Called from the main thread */
3820 /* Allows an external source to set what formats a sink supports if the sink
3821  * permits this. The function makes a copy of the formats on success. */
pa_sink_set_formats(pa_sink * s,pa_idxset * formats)3822 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3823     pa_assert(s);
3824     pa_assert(formats);
3825 
3826     if (s->set_formats)
3827         /* Sink supports setting formats -- let's give it a shot */
3828         return s->set_formats(s, formats);
3829     else
3830         /* Sink doesn't support setting this -- bail out */
3831         return false;
3832 }
3833 
3834 /* Called from the main thread */
3835 /* Checks if the sink can accept this format */
pa_sink_check_format(pa_sink * s,pa_format_info * f)3836 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3837     pa_idxset *formats = NULL;
3838     bool ret = false;
3839 
3840     pa_assert(s);
3841     pa_assert(f);
3842 
3843     formats = pa_sink_get_formats(s);
3844 
3845     if (formats) {
3846         pa_format_info *finfo_device;
3847         uint32_t i;
3848 
3849         PA_IDXSET_FOREACH(finfo_device, formats, i) {
3850             if (pa_format_info_is_compatible(finfo_device, f)) {
3851                 ret = true;
3852                 break;
3853             }
3854         }
3855 
3856         pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3857     }
3858 
3859     return ret;
3860 }
3861 
3862 /* Called from the main thread */
3863 /* Calculates the intersection between formats supported by the sink and
3864  * in_formats, and returns these, in the order of the sink's formats. */
pa_sink_check_formats(pa_sink * s,pa_idxset * in_formats)3865 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3866     pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3867     pa_format_info *f_sink, *f_in;
3868     uint32_t i, j;
3869 
3870     pa_assert(s);
3871 
3872     if (!in_formats || pa_idxset_isempty(in_formats))
3873         goto done;
3874 
3875     sink_formats = pa_sink_get_formats(s);
3876 
3877     PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3878         PA_IDXSET_FOREACH(f_in, in_formats, j) {
3879             if (pa_format_info_is_compatible(f_sink, f_in))
3880                 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3881         }
3882     }
3883 
3884 done:
3885     if (sink_formats)
3886         pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3887 
3888     return out_formats;
3889 }
3890 
3891 /* Called from the main thread */
pa_sink_set_sample_format(pa_sink * s,pa_sample_format_t format)3892 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
3893     pa_sample_format_t old_format;
3894 
3895     pa_assert(s);
3896     pa_assert(pa_sample_format_valid(format));
3897 
3898     old_format = s->sample_spec.format;
3899     if (old_format == format)
3900         return;
3901 
3902     pa_log_info("%s: format: %s -> %s",
3903                 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
3904 
3905     s->sample_spec.format = format;
3906 
3907     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3908 }
3909 
3910 /* Called from the main thread */
pa_sink_set_sample_rate(pa_sink * s,uint32_t rate)3911 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
3912     uint32_t old_rate;
3913 
3914     pa_assert(s);
3915     pa_assert(pa_sample_rate_valid(rate));
3916 
3917     old_rate = s->sample_spec.rate;
3918     if (old_rate == rate)
3919         return;
3920 
3921     pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
3922 
3923     s->sample_spec.rate = rate;
3924 
3925     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3926 }
3927 
3928 /* Called from the main thread. */
pa_sink_set_reference_volume_direct(pa_sink * s,const pa_cvolume * volume)3929 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
3930     pa_cvolume old_volume;
3931     char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3932     char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
3933 
3934     pa_assert(s);
3935     pa_assert(volume);
3936 
3937     old_volume = s->reference_volume;
3938 
3939     if (pa_cvolume_equal(volume, &old_volume))
3940         return;
3941 
3942     s->reference_volume = *volume;
3943     pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
3944                  pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3945                                             s->flags & PA_SINK_DECIBEL_VOLUME),
3946                  pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3947                                             s->flags & PA_SINK_DECIBEL_VOLUME));
3948 
3949     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3950     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
3951 }
3952 
pa_sink_move_streams_to_default_sink(pa_core * core,pa_sink * old_sink,bool default_sink_changed)3953 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
3954     pa_sink_input *i;
3955     uint32_t idx;
3956 
3957     pa_assert(core);
3958     pa_assert(old_sink);
3959 
3960     if (core->state == PA_CORE_SHUTDOWN)
3961         return;
3962 
3963     if (core->default_sink == NULL || core->default_sink->unlink_requested)
3964         return;
3965 
3966     if (old_sink == core->default_sink)
3967         return;
3968 
3969     PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
3970         if (!PA_SINK_INPUT_IS_LINKED(i->state))
3971             continue;
3972 
3973         if (!i->sink)
3974             continue;
3975 
3976         /* Don't move sink-inputs which connect filter sinks to their target sinks */
3977         if (i->origin_sink)
3978             continue;
3979 
3980         /* If default_sink_changed is false, the old sink became unavailable, so all streams must be moved. */
3981         if (pa_safe_streq(old_sink->name, i->preferred_sink) && default_sink_changed)
3982             continue;
3983 
3984         if (!pa_sink_input_may_move_to(i, core->default_sink))
3985             continue;
3986 
3987         if (default_sink_changed)
3988             pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
3989                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
3990         else
3991             pa_log_info("The sink input %u \"%s\" is moving to %s, because the old sink became unavailable.",
3992                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
3993 
3994         pa_sink_input_move_to(i, core->default_sink, false);
3995     }
3996 }
3997