• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2006 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37 
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/stream-util.h>
44 #include <pulsecore/mix.h>
45 #include <pulsecore/core-subscribe.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/play-memblockq.h>
49 #include <pulsecore/flist.h>
50 
51 #include "sink.h"
52 
53 #define MAX_MIX_CHANNELS 32
54 #define MIX_BUFFER_LENGTH (pa_page_size())
55 #define ABSOLUTE_MIN_LATENCY (500)
56 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
57 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 
59 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 
61 struct pa_sink_volume_change {
62     pa_usec_t at;
63     pa_cvolume hw_volume;
64 
65     PA_LLIST_FIELDS(pa_sink_volume_change);
66 };
67 
68 struct set_state_data {
69     pa_sink_state_t state;
70     pa_suspend_cause_t suspend_cause;
71 };
72 
73 static void sink_free(pa_object *s);
74 
75 static void pa_sink_volume_change_push(pa_sink *s);
76 static void pa_sink_volume_change_flush(pa_sink *s);
77 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 
pa_sink_new_data_init(pa_sink_new_data * data)79 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
80     pa_assert(data);
81 
82     pa_zero(*data);
83     data->proplist = pa_proplist_new();
84     data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
85 
86     return data;
87 }
88 
pa_sink_new_data_set_name(pa_sink_new_data * data,const char * name)89 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
90     pa_assert(data);
91 
92     pa_xfree(data->name);
93     data->name = pa_xstrdup(name);
94 }
95 
pa_sink_new_data_set_sample_spec(pa_sink_new_data * data,const pa_sample_spec * spec)96 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
97     pa_assert(data);
98 
99     if ((data->sample_spec_is_set = !!spec))
100         data->sample_spec = *spec;
101 }
102 
pa_sink_new_data_set_channel_map(pa_sink_new_data * data,const pa_channel_map * map)103 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
104     pa_assert(data);
105 
106     if ((data->channel_map_is_set = !!map))
107         data->channel_map = *map;
108 }
109 
pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data * data,const uint32_t alternate_sample_rate)110 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
111     pa_assert(data);
112 
113     data->alternate_sample_rate_is_set = true;
114     data->alternate_sample_rate = alternate_sample_rate;
115 }
116 
pa_sink_new_data_set_avoid_resampling(pa_sink_new_data * data,bool avoid_resampling)117 void pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling) {
118     pa_assert(data);
119 
120     data->avoid_resampling_is_set = true;
121     data->avoid_resampling = avoid_resampling;
122 }
123 
pa_sink_new_data_set_volume(pa_sink_new_data * data,const pa_cvolume * volume)124 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
125     pa_assert(data);
126 
127     if ((data->volume_is_set = !!volume))
128         data->volume = *volume;
129 }
130 
pa_sink_new_data_set_muted(pa_sink_new_data * data,bool mute)131 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
132     pa_assert(data);
133 
134     data->muted_is_set = true;
135     data->muted = mute;
136 }
137 
pa_sink_new_data_set_port(pa_sink_new_data * data,const char * port)138 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
139     pa_assert(data);
140 
141     pa_xfree(data->active_port);
142     data->active_port = pa_xstrdup(port);
143 }
144 
pa_sink_new_data_done(pa_sink_new_data * data)145 void pa_sink_new_data_done(pa_sink_new_data *data) {
146     pa_assert(data);
147 
148     pa_proplist_free(data->proplist);
149 
150     if (data->ports)
151         pa_hashmap_free(data->ports);
152 
153     pa_xfree(data->name);
154     pa_xfree(data->active_port);
155 }
156 
157 /* Called from main context */
reset_callbacks(pa_sink * s)158 static void reset_callbacks(pa_sink *s) {
159     pa_assert(s);
160 
161     s->set_state_in_main_thread = NULL;
162     s->set_state_in_io_thread = NULL;
163     s->get_volume = NULL;
164     s->set_volume = NULL;
165     s->write_volume = NULL;
166     s->get_mute = NULL;
167     s->set_mute = NULL;
168     s->request_rewind = NULL;
169     s->update_requested_latency = NULL;
170     s->set_port = NULL;
171     s->get_formats = NULL;
172     s->set_formats = NULL;
173     s->reconfigure = NULL;
174 }
175 
176 /* Called from main context */
pa_sink_new(pa_core * core,pa_sink_new_data * data,pa_sink_flags_t flags)177 pa_sink* pa_sink_new(
178         pa_core *core,
179         pa_sink_new_data *data,
180         pa_sink_flags_t flags) {
181 
182     pa_sink *s;
183     const char *name;
184     char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
185     pa_source_new_data source_data;
186     const char *dn;
187     char *pt;
188 
189     pa_assert(core);
190     pa_assert(data);
191     pa_assert(data->name);
192     pa_assert_ctl_context();
193 
194     s = pa_msgobject_new(pa_sink);
195 
196     if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
197         pa_log_debug("Failed to register name %s.", data->name);
198         pa_xfree(s);
199         return NULL;
200     }
201 
202     pa_sink_new_data_set_name(data, name);
203 
204     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
205         pa_xfree(s);
206         pa_namereg_unregister(core, name);
207         return NULL;
208     }
209 
210     /* FIXME, need to free s here on failure */
211 
212     pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
213     pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
214 
215     pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
216 
217     if (!data->channel_map_is_set)
218         pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
219 
220     pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
221     pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
222 
223     /* FIXME: There should probably be a general function for checking whether
224      * the sink volume is allowed to be set, like there is for sink inputs. */
225     pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
226 
227     if (!data->volume_is_set) {
228         pa_cvolume_reset(&data->volume, data->sample_spec.channels);
229         data->save_volume = false;
230     }
231 
232     pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
233     pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
234 
235     if (!data->muted_is_set)
236         data->muted = false;
237 
238     if (data->card)
239         pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
240 
241     pa_device_init_description(data->proplist, data->card);
242     pa_device_init_icon(data->proplist, true);
243     pa_device_init_intended_roles(data->proplist);
244 
245     if (!data->active_port) {
246         pa_device_port *p = pa_device_port_find_best(data->ports);
247         if (p)
248             pa_sink_new_data_set_port(data, p->name);
249     }
250 
251     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
252         pa_xfree(s);
253         pa_namereg_unregister(core, name);
254         return NULL;
255     }
256 
257     s->parent.parent.free = sink_free;
258     s->parent.process_msg = pa_sink_process_msg;
259 
260     s->core = core;
261     s->state = PA_SINK_INIT;
262     s->flags = flags;
263     s->priority = 0;
264     s->suspend_cause = data->suspend_cause;
265     s->name = pa_xstrdup(name);
266     s->proplist = pa_proplist_copy(data->proplist);
267     s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
268     s->module = data->module;
269     s->card = data->card;
270 
271     s->priority = pa_device_init_priority(s->proplist);
272 
273     s->sample_spec = data->sample_spec;
274     s->channel_map = data->channel_map;
275     s->default_sample_rate = s->sample_spec.rate;
276 
277     if (data->alternate_sample_rate_is_set)
278         s->alternate_sample_rate = data->alternate_sample_rate;
279     else
280         s->alternate_sample_rate = s->core->alternate_sample_rate;
281 
282     if (data->avoid_resampling_is_set)
283         s->avoid_resampling = data->avoid_resampling;
284     else
285         s->avoid_resampling = s->core->avoid_resampling;
286 
287     s->inputs = pa_idxset_new(NULL, NULL);
288     s->n_corked = 0;
289     s->input_to_master = NULL;
290 
291     s->reference_volume = s->real_volume = data->volume;
292     pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
293     s->base_volume = PA_VOLUME_NORM;
294     s->n_volume_steps = PA_VOLUME_NORM+1;
295     s->muted = data->muted;
296     s->refresh_volume = s->refresh_muted = false;
297 
298     reset_callbacks(s);
299     s->userdata = NULL;
300 
301     s->asyncmsgq = NULL;
302 
303     /* As a minor optimization we just steal the list instead of
304      * copying it here */
305     s->ports = data->ports;
306     data->ports = NULL;
307 
308     s->active_port = NULL;
309     s->save_port = false;
310 
311     if (data->active_port)
312         if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
313             s->save_port = data->save_port;
314 
315     /* Hopefully the active port has already been assigned in the previous call
316        to pa_device_port_find_best, but better safe than sorry */
317     if (!s->active_port)
318         s->active_port = pa_device_port_find_best(s->ports);
319 
320     if (s->active_port)
321         s->port_latency_offset = s->active_port->latency_offset;
322     else
323         s->port_latency_offset = 0;
324 
325     s->save_volume = data->save_volume;
326     s->save_muted = data->save_muted;
327 
328     pa_silence_memchunk_get(
329             &core->silence_cache,
330             core->mempool,
331             &s->silence,
332             &s->sample_spec,
333             0);
334 
335     s->thread_info.rtpoll = NULL;
336     s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
337                                                 (pa_free_cb_t) pa_sink_input_unref);
338     s->thread_info.soft_volume =  s->soft_volume;
339     s->thread_info.soft_muted = s->muted;
340     s->thread_info.state = s->state;
341     s->thread_info.rewind_nbytes = 0;
342     s->thread_info.last_rewind_nbytes = 0;
343     s->thread_info.rewind_requested = false;
344     s->thread_info.max_rewind = 0;
345     s->thread_info.max_request = 0;
346     s->thread_info.requested_latency_valid = false;
347     s->thread_info.requested_latency = 0;
348     s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
349     s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
350     s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
351 
352     PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
353     s->thread_info.volume_changes_tail = NULL;
354     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
355     s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
356     s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
357     s->thread_info.port_latency_offset = s->port_latency_offset;
358 
359     /* FIXME: This should probably be moved to pa_sink_put() */
360     pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
361 
362     if (s->card)
363         pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
364 
365     pt = pa_proplist_to_string_sep(s->proplist, "\n    ");
366     pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n    %s",
367                 s->index,
368                 s->name,
369                 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
370                 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
371                 pt);
372     pa_xfree(pt);
373 
374     pa_source_new_data_init(&source_data);
375     pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
376     pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
377     pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
378     pa_source_new_data_set_avoid_resampling(&source_data, s->avoid_resampling);
379     source_data.name = pa_sprintf_malloc("%s.monitor", name);
380     source_data.driver = data->driver;
381     source_data.module = data->module;
382     source_data.card = data->card;
383 
384     dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
385     pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
386     pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
387 
388     s->monitor_source = pa_source_new(core, &source_data,
389                                       ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
390                                       ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
391 
392     pa_source_new_data_done(&source_data);
393 
394     if (!s->monitor_source) {
395         pa_sink_unlink(s);
396         pa_sink_unref(s);
397         return NULL;
398     }
399 
400     s->monitor_source->monitor_of = s;
401 
402     pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
403     pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
404     pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
405 
406     return s;
407 }
408 
409 /* Called from main context */
sink_set_state(pa_sink * s,pa_sink_state_t state,pa_suspend_cause_t suspend_cause)410 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
411     int ret = 0;
412     bool state_changed;
413     bool suspend_cause_changed;
414     bool suspending;
415     bool resuming;
416     pa_sink_state_t old_state;
417     pa_suspend_cause_t old_suspend_cause;
418 
419     pa_assert(s);
420     pa_assert_ctl_context();
421 
422     state_changed = state != s->state;
423     suspend_cause_changed = suspend_cause != s->suspend_cause;
424 
425     if (!state_changed && !suspend_cause_changed)
426         return 0;
427 
428     suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
429     resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
430 
431     /* If we are resuming, suspend_cause must be 0. */
432     pa_assert(!resuming || !suspend_cause);
433 
434     /* Here's something to think about: what to do with the suspend cause if
435      * resuming the sink fails? The old suspend cause will be incorrect, so we
436      * can't use that. On the other hand, if we set no suspend cause (as is the
437      * case currently), then it looks strange to have a sink suspended without
438      * any cause. It might be a good idea to add a new "resume failed" suspend
439      * cause, or it might just add unnecessary complexity, given that the
440      * current approach of not setting any suspend cause works well enough. */
441 
442     if (s->set_state_in_main_thread) {
443         if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
444             /* set_state_in_main_thread() is allowed to fail only when resuming. */
445             pa_assert(resuming);
446 
447             /* If resuming fails, we set the state to SUSPENDED and
448              * suspend_cause to 0. */
449             state = PA_SINK_SUSPENDED;
450             suspend_cause = 0;
451             state_changed = false;
452             suspend_cause_changed = suspend_cause != s->suspend_cause;
453             resuming = false;
454 
455             /* We know the state isn't changing. If the suspend cause isn't
456              * changing either, then there's nothing more to do. */
457             if (!suspend_cause_changed)
458                 return ret;
459         }
460     }
461 
462     if (s->asyncmsgq) {
463         struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
464 
465         if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
466             /* SET_STATE is allowed to fail only when resuming. */
467             pa_assert(resuming);
468 
469             if (s->set_state_in_main_thread)
470                 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
471 
472             /* If resuming fails, we set the state to SUSPENDED and
473              * suspend_cause to 0. */
474             state = PA_SINK_SUSPENDED;
475             suspend_cause = 0;
476             state_changed = false;
477             suspend_cause_changed = suspend_cause != s->suspend_cause;
478             resuming = false;
479 
480             /* We know the state isn't changing. If the suspend cause isn't
481              * changing either, then there's nothing more to do. */
482             if (!suspend_cause_changed)
483                 return ret;
484         }
485     }
486 
487     old_suspend_cause = s->suspend_cause;
488     if (suspend_cause_changed) {
489         char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
490         char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
491 
492         pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
493                      pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
494         s->suspend_cause = suspend_cause;
495     }
496 
497     old_state = s->state;
498     if (state_changed) {
499         pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
500         s->state = state;
501 
502         /* If we enter UNLINKED state, then we don't send change notifications.
503          * pa_sink_unlink() will send unlink notifications instead. */
504         if (state != PA_SINK_UNLINKED) {
505             pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
506             pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507         }
508     }
509 
510     if (suspending || resuming || suspend_cause_changed) {
511         pa_sink_input *i;
512         uint32_t idx;
513 
514         /* We're suspending or resuming, tell everyone about it */
515 
516         PA_IDXSET_FOREACH(i, s->inputs, idx)
517             if (s->state == PA_SINK_SUSPENDED &&
518                 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
519                 pa_sink_input_kill(i);
520             else if (i->suspend)
521                 i->suspend(i, old_state, old_suspend_cause);
522     }
523 
524     if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
525         pa_source_sync_suspend(s->monitor_source);
526 
527     return ret;
528 }
529 
pa_sink_set_get_volume_callback(pa_sink * s,pa_sink_cb_t cb)530 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
531     pa_assert(s);
532 
533     s->get_volume = cb;
534 }
535 
pa_sink_set_set_volume_callback(pa_sink * s,pa_sink_cb_t cb)536 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
537     pa_sink_flags_t flags;
538 
539     pa_assert(s);
540     pa_assert(!s->write_volume || cb);
541 
542     s->set_volume = cb;
543 
544     /* Save the current flags so we can tell if they've changed */
545     flags = s->flags;
546 
547     if (cb) {
548         /* The sink implementor is responsible for setting decibel volume support */
549         s->flags |= PA_SINK_HW_VOLUME_CTRL;
550     } else {
551         s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
552         /* See note below in pa_sink_put() about volume sharing and decibel volumes */
553         pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
554     }
555 
556     /* If the flags have changed after init, let any clients know via a change event */
557     if (s->state != PA_SINK_INIT && flags != s->flags)
558         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
559 }
560 
pa_sink_set_write_volume_callback(pa_sink * s,pa_sink_cb_t cb)561 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
562     pa_sink_flags_t flags;
563 
564     pa_assert(s);
565     pa_assert(!cb || s->set_volume);
566 
567     s->write_volume = cb;
568 
569     /* Save the current flags so we can tell if they've changed */
570     flags = s->flags;
571 
572     if (cb)
573         s->flags |= PA_SINK_DEFERRED_VOLUME;
574     else
575         s->flags &= ~PA_SINK_DEFERRED_VOLUME;
576 
577     /* If the flags have changed after init, let any clients know via a change event */
578     if (s->state != PA_SINK_INIT && flags != s->flags)
579         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
580 }
581 
pa_sink_set_get_mute_callback(pa_sink * s,pa_sink_get_mute_cb_t cb)582 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
583     pa_assert(s);
584 
585     s->get_mute = cb;
586 }
587 
pa_sink_set_set_mute_callback(pa_sink * s,pa_sink_cb_t cb)588 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
589     pa_sink_flags_t flags;
590 
591     pa_assert(s);
592 
593     s->set_mute = cb;
594 
595     /* Save the current flags so we can tell if they've changed */
596     flags = s->flags;
597 
598     if (cb)
599         s->flags |= PA_SINK_HW_MUTE_CTRL;
600     else
601         s->flags &= ~PA_SINK_HW_MUTE_CTRL;
602 
603     /* If the flags have changed after init, let any clients know via a change event */
604     if (s->state != PA_SINK_INIT && flags != s->flags)
605         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
606 }
607 
enable_flat_volume(pa_sink * s,bool enable)608 static void enable_flat_volume(pa_sink *s, bool enable) {
609     pa_sink_flags_t flags;
610 
611     pa_assert(s);
612 
613     /* Always follow the overall user preference here */
614     enable = enable && s->core->flat_volumes;
615 
616     /* Save the current flags so we can tell if they've changed */
617     flags = s->flags;
618 
619     if (enable)
620         s->flags |= PA_SINK_FLAT_VOLUME;
621     else
622         s->flags &= ~PA_SINK_FLAT_VOLUME;
623 
624     /* If the flags have changed after init, let any clients know via a change event */
625     if (s->state != PA_SINK_INIT && flags != s->flags)
626         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
627 }
628 
pa_sink_enable_decibel_volume(pa_sink * s,bool enable)629 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
630     pa_sink_flags_t flags;
631 
632     pa_assert(s);
633 
634     /* Save the current flags so we can tell if they've changed */
635     flags = s->flags;
636 
637     if (enable) {
638         s->flags |= PA_SINK_DECIBEL_VOLUME;
639         enable_flat_volume(s, true);
640     } else {
641         s->flags &= ~PA_SINK_DECIBEL_VOLUME;
642         enable_flat_volume(s, false);
643     }
644 
645     /* If the flags have changed after init, let any clients know via a change event */
646     if (s->state != PA_SINK_INIT && flags != s->flags)
647         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
648 }
649 
650 /* Called from main context */
pa_sink_put(pa_sink * s)651 void pa_sink_put(pa_sink* s) {
652     pa_sink_assert_ref(s);
653     pa_assert_ctl_context();
654 
655     pa_assert(s->state == PA_SINK_INIT);
656     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
657 
658     /* The following fields must be initialized properly when calling _put() */
659     pa_assert(s->asyncmsgq);
660     pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
661 
662     /* Generally, flags should be initialized via pa_sink_new(). As a
663      * special exception we allow some volume related flags to be set
664      * between _new() and _put() by the callback setter functions above.
665      *
666      * Thus we implement a couple safeguards here which ensure the above
667      * setters were used (or at least the implementor made manual changes
668      * in a compatible way).
669      *
670      * Note: All of these flags set here can change over the life time
671      * of the sink. */
672     pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
673     pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
674     pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
675 
676     /* XXX: Currently decibel volume is disabled for all sinks that use volume
677      * sharing. When the master sink supports decibel volume, it would be good
678      * to have the flag also in the filter sink, but currently we don't do that
679      * so that the flags of the filter sink never change when it's moved from
680      * a master sink to another. One solution for this problem would be to
681      * remove user-visible volume altogether from filter sinks when volume
682      * sharing is used, but the current approach was easier to implement... */
683     /* We always support decibel volumes in software, otherwise we leave it to
684      * the sink implementor to set this flag as needed.
685      *
686      * Note: This flag can also change over the life time of the sink. */
687     if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
688         pa_sink_enable_decibel_volume(s, true);
689         s->soft_volume = s->reference_volume;
690     }
691 
692     /* If the sink implementor support DB volumes by itself, we should always
693      * try and enable flat volumes too */
694     if ((s->flags & PA_SINK_DECIBEL_VOLUME))
695         enable_flat_volume(s, true);
696 
697     if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
698         pa_sink *root_sink = pa_sink_get_master(s);
699 
700         pa_assert(root_sink);
701 
702         s->reference_volume = root_sink->reference_volume;
703         pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
704 
705         s->real_volume = root_sink->real_volume;
706         pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
707     } else
708         /* We assume that if the sink implementor changed the default
709          * volume they did so in real_volume, because that is the usual
710          * place where they are supposed to place their changes.  */
711         s->reference_volume = s->real_volume;
712 
713     s->thread_info.soft_volume = s->soft_volume;
714     s->thread_info.soft_muted = s->muted;
715     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
716 
717     pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
718               || (s->base_volume == PA_VOLUME_NORM
719                   && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
720     pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
721     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
722     pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
723     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
724 
725     pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
726     pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
727     pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
728 
729     if (s->suspend_cause)
730         pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
731     else
732         pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
733 
734     pa_source_put(s->monitor_source);
735 
736     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
737     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
738 
739     /* It's good to fire the SINK_PUT hook before updating the default sink,
740      * because module-switch-on-connect will set the new sink as the default
741      * sink, and if we were to call pa_core_update_default_sink() before that,
742      * the default sink might change twice, causing unnecessary stream moving. */
743 
744     pa_core_update_default_sink(s->core);
745 
746     pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
747 }
748 
749 /* Called from main context */
pa_sink_unlink(pa_sink * s)750 void pa_sink_unlink(pa_sink* s) {
751     bool linked;
752     pa_sink_input *i, PA_UNUSED *j = NULL;
753 
754     pa_sink_assert_ref(s);
755     pa_assert_ctl_context();
756 
757     /* Please note that pa_sink_unlink() does more than simply
758      * reversing pa_sink_put(). It also undoes the registrations
759      * already done in pa_sink_new()! */
760 
761     if (s->unlink_requested)
762         return;
763 
764     s->unlink_requested = true;
765 
766     linked = PA_SINK_IS_LINKED(s->state);
767 
768     if (linked)
769         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
770 
771     if (s->state != PA_SINK_UNLINKED)
772         pa_namereg_unregister(s->core, s->name);
773     pa_idxset_remove_by_data(s->core->sinks, s, NULL);
774 
775     pa_core_update_default_sink(s->core);
776 
777     if (linked && s->core->rescue_streams)
778 	pa_sink_move_streams_to_default_sink(s->core, s, false);
779 
780     if (s->card)
781         pa_idxset_remove_by_data(s->card->sinks, s, NULL);
782 
783     while ((i = pa_idxset_first(s->inputs, NULL))) {
784         pa_assert(i != j);
785         pa_sink_input_kill(i);
786         j = i;
787     }
788 
789     /* Unlink monitor source before unlinking the sink */
790     if (s->monitor_source)
791         pa_source_unlink(s->monitor_source);
792 
793     if (linked)
794         /* It's important to keep the suspend cause unchanged when unlinking,
795          * because if we remove the SESSION suspend cause here, the alsa sink
796          * will sync its volume with the hardware while another user is
797          * active, messing up the volume for that other user. */
798         sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
799     else
800         s->state = PA_SINK_UNLINKED;
801 
802     reset_callbacks(s);
803 
804     if (linked) {
805         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
806         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
807     }
808 }
809 
810 /* Called from main context */
sink_free(pa_object * o)811 static void sink_free(pa_object *o) {
812     pa_sink *s = PA_SINK(o);
813 
814     pa_assert(s);
815     pa_assert_ctl_context();
816     pa_assert(pa_sink_refcnt(s) == 0);
817     pa_assert(!PA_SINK_IS_LINKED(s->state));
818 
819     pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
820 
821     pa_sink_volume_change_flush(s);
822 
823     if (s->monitor_source) {
824         pa_source_unref(s->monitor_source);
825         s->monitor_source = NULL;
826     }
827 
828     pa_idxset_free(s->inputs, NULL);
829     pa_hashmap_free(s->thread_info.inputs);
830 
831     if (s->silence.memblock)
832         pa_memblock_unref(s->silence.memblock);
833 
834     pa_xfree(s->name);
835     pa_xfree(s->driver);
836 
837     if (s->proplist)
838         pa_proplist_free(s->proplist);
839 
840     if (s->ports)
841         pa_hashmap_free(s->ports);
842 
843     pa_xfree(s);
844 }
845 
846 /* Called from main context, and not while the IO thread is active, please */
pa_sink_set_asyncmsgq(pa_sink * s,pa_asyncmsgq * q)847 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
848     pa_sink_assert_ref(s);
849     pa_assert_ctl_context();
850 
851     s->asyncmsgq = q;
852 
853     if (s->monitor_source)
854         pa_source_set_asyncmsgq(s->monitor_source, q);
855 }
856 
857 /* Called from main context, and not while the IO thread is active, please */
pa_sink_update_flags(pa_sink * s,pa_sink_flags_t mask,pa_sink_flags_t value)858 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
859     pa_sink_flags_t old_flags;
860     pa_sink_input *input;
861     uint32_t idx;
862 
863     pa_sink_assert_ref(s);
864     pa_assert_ctl_context();
865 
866     /* For now, allow only a minimal set of flags to be changed. */
867     pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
868 
869     old_flags = s->flags;
870     s->flags = (s->flags & ~mask) | (value & mask);
871 
872     if (s->flags == old_flags)
873         return;
874 
875     if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
876         pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
877 
878     if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
879         pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
880                      s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
881 
882     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
883     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
884 
885     if (s->monitor_source)
886         pa_source_update_flags(s->monitor_source,
887                                ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
888                                ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
889                                ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
890                                ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
891 
892     PA_IDXSET_FOREACH(input, s->inputs, idx) {
893         if (input->origin_sink)
894             pa_sink_update_flags(input->origin_sink, mask, value);
895     }
896 }
897 
898 /* Called from IO context, or before _put() from main context */
pa_sink_set_rtpoll(pa_sink * s,pa_rtpoll * p)899 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
900     pa_sink_assert_ref(s);
901     pa_sink_assert_io_context(s);
902 
903     s->thread_info.rtpoll = p;
904 
905     if (s->monitor_source)
906         pa_source_set_rtpoll(s->monitor_source, p);
907 }
908 
909 /* Called from main context */
pa_sink_update_status(pa_sink * s)910 int pa_sink_update_status(pa_sink*s) {
911     pa_sink_assert_ref(s);
912     pa_assert_ctl_context();
913     pa_assert(PA_SINK_IS_LINKED(s->state));
914 
915     if (s->state == PA_SINK_SUSPENDED)
916         return 0;
917 
918     return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
919 }
920 
921 /* Called from main context */
pa_sink_suspend(pa_sink * s,bool suspend,pa_suspend_cause_t cause)922 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
923     pa_suspend_cause_t merged_cause;
924 
925     pa_sink_assert_ref(s);
926     pa_assert_ctl_context();
927     pa_assert(PA_SINK_IS_LINKED(s->state));
928     pa_assert(cause != 0);
929 
930     if (suspend)
931         merged_cause = s->suspend_cause | cause;
932     else
933         merged_cause = s->suspend_cause & ~cause;
934 
935     if (merged_cause)
936         return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
937     else
938         return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
939 }
940 
941 /* Called from main context */
pa_sink_move_all_start(pa_sink * s,pa_queue * q)942 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
943     pa_sink_input *i, *n;
944     uint32_t idx;
945 
946     pa_sink_assert_ref(s);
947     pa_assert_ctl_context();
948     pa_assert(PA_SINK_IS_LINKED(s->state));
949 
950     if (!q)
951         q = pa_queue_new();
952 
953     for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
954         n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
955 
956         pa_sink_input_ref(i);
957 
958         if (pa_sink_input_start_move(i) >= 0)
959             pa_queue_push(q, i);
960         else
961             pa_sink_input_unref(i);
962     }
963 
964     return q;
965 }
966 
967 /* Called from main context */
pa_sink_move_all_finish(pa_sink * s,pa_queue * q,bool save)968 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
969     pa_sink_input *i;
970 
971     pa_sink_assert_ref(s);
972     pa_assert_ctl_context();
973     pa_assert(PA_SINK_IS_LINKED(s->state));
974     pa_assert(q);
975 
976     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
977         if (PA_SINK_INPUT_IS_LINKED(i->state)) {
978             if (pa_sink_input_finish_move(i, s, save) < 0)
979                 pa_sink_input_fail_move(i);
980 
981         }
982         pa_sink_input_unref(i);
983     }
984 
985     pa_queue_free(q, NULL);
986 }
987 
988 /* Called from main context */
pa_sink_move_all_fail(pa_queue * q)989 void pa_sink_move_all_fail(pa_queue *q) {
990     pa_sink_input *i;
991 
992     pa_assert_ctl_context();
993     pa_assert(q);
994 
995     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
996         pa_sink_input_fail_move(i);
997         pa_sink_input_unref(i);
998     }
999 
1000     pa_queue_free(q, NULL);
1001 }
1002 
1003  /* Called from IO thread context */
pa_sink_process_input_underruns(pa_sink * s,size_t left_to_play)1004 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1005     pa_sink_input *i;
1006     void *state = NULL;
1007     size_t result = 0;
1008 
1009     pa_sink_assert_ref(s);
1010     pa_sink_assert_io_context(s);
1011 
1012     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1013         size_t uf = i->thread_info.underrun_for_sink;
1014 
1015         /* Propagate down the filter tree */
1016         if (i->origin_sink) {
1017             size_t filter_result, left_to_play_origin;
1018 
1019             /* The combine sink sets i->origin sink but has a different threading model
1020              * than the filter sinks. Therefore the recursion below may not be executed
1021              * because pa_sink_process_input_underruns() was not called in the thread
1022              * context of the origin sink.
1023              * FIXME: It is unclear if some other kind of recursion would be necessary
1024              * for the combine sink. */
1025             if (!i->module || !pa_safe_streq(i->module->name, "module-combine-sink")) {
1026 
1027                 /* The recursive call works in the origin sink domain ... */
1028                 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1029 
1030                 /* .. and returns the time to sleep before waking up. We need the
1031                  * underrun duration for comparisons, so we undo the subtraction on
1032                  * the return value... */
1033                 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1034 
1035                 /* ... and convert it back to the master sink domain */
1036                 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1037 
1038                 /* Remember the longest underrun so far */
1039                 if (filter_result > result)
1040                     result = filter_result;
1041             }
1042         }
1043 
1044         if (uf == 0) {
1045             /* No underrun here, move on */
1046             continue;
1047         } else if (uf >= left_to_play) {
1048             /* The sink has possibly consumed all the data the sink input provided */
1049             pa_sink_input_process_underrun(i);
1050         } else if (uf > result) {
1051             /* Remember the longest underrun so far */
1052             result = uf;
1053         }
1054     }
1055 
1056     if (result > 0)
1057         pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1058                 (long) result, (long) left_to_play - result);
1059     return left_to_play - result;
1060 }
1061 
1062 /* Called from IO thread context */
pa_sink_process_rewind(pa_sink * s,size_t nbytes)1063 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1064     pa_sink_input *i;
1065     void *state = NULL;
1066 
1067     pa_sink_assert_ref(s);
1068     pa_sink_assert_io_context(s);
1069     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1070 
1071     /* If nobody requested this and this is actually no real rewind
1072      * then we can short cut this. Please note that this means that
1073      * not all rewind requests triggered upstream will always be
1074      * translated in actual requests! */
1075     if (!s->thread_info.rewind_requested && nbytes <= 0)
1076         return;
1077 
1078     s->thread_info.rewind_nbytes = 0;
1079     s->thread_info.rewind_requested = false;
1080 
1081     if (nbytes > 0) {
1082         pa_log_debug("Processing rewind...");
1083         if (s->flags & PA_SINK_DEFERRED_VOLUME)
1084             pa_sink_volume_change_rewind(s, nbytes);
1085     }
1086 
1087     /* Save rewind value */
1088     s->thread_info.last_rewind_nbytes = nbytes;
1089 
1090     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1091         pa_sink_input_assert_ref(i);
1092         pa_sink_input_process_rewind(i, nbytes);
1093     }
1094 
1095     if (nbytes > 0) {
1096         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1097             pa_source_process_rewind(s->monitor_source, nbytes);
1098     }
1099 }
1100 
1101 /* Called from IO thread context */
fill_mix_info(pa_sink * s,size_t * length,pa_mix_info * info,unsigned maxinfo)1102 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1103     pa_sink_input *i;
1104     unsigned n = 0;
1105     void *state = NULL;
1106     size_t mixlength = *length;
1107 
1108     pa_sink_assert_ref(s);
1109     pa_sink_assert_io_context(s);
1110     pa_assert(info);
1111 
1112     while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1113         pa_sink_input_assert_ref(i);
1114 
1115         pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1116 
1117         if (mixlength == 0 || info->chunk.length < mixlength)
1118             mixlength = info->chunk.length;
1119 
1120         if (pa_memblock_is_silence(info->chunk.memblock)) {
1121             pa_memblock_unref(info->chunk.memblock);
1122             continue;
1123         }
1124 
1125         info->userdata = pa_sink_input_ref(i);
1126 
1127         pa_assert(info->chunk.memblock);
1128         pa_assert(info->chunk.length > 0);
1129 
1130         info++;
1131         n++;
1132         maxinfo--;
1133     }
1134 
1135     if (mixlength > 0)
1136         *length = mixlength;
1137 
1138     return n;
1139 }
1140 
1141 /* Called from IO thread context */
inputs_drop(pa_sink * s,pa_mix_info * info,unsigned n,pa_memchunk * result)1142 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1143     pa_sink_input *i;
1144     void *state;
1145     unsigned p = 0;
1146     unsigned n_unreffed = 0;
1147 
1148     pa_sink_assert_ref(s);
1149     pa_sink_assert_io_context(s);
1150     pa_assert(result);
1151     pa_assert(result->memblock);
1152     pa_assert(result->length > 0);
1153 
1154     /* We optimize for the case where the order of the inputs has not changed */
1155 
1156     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1157         unsigned j;
1158         pa_mix_info* m = NULL;
1159 
1160         pa_sink_input_assert_ref(i);
1161 
1162         /* Let's try to find the matching entry info the pa_mix_info array */
1163         for (j = 0; j < n; j ++) {
1164 
1165             if (info[p].userdata == i) {
1166                 m = info + p;
1167                 break;
1168             }
1169 
1170             p++;
1171             if (p >= n)
1172                 p = 0;
1173         }
1174 
1175         /* Drop read data */
1176         pa_sink_input_drop(i, result->length);
1177 
1178         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1179 
1180             if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1181                 void *ostate = NULL;
1182                 pa_source_output *o;
1183                 pa_memchunk c;
1184 
1185                 if (m && m->chunk.memblock) {
1186                     c = m->chunk;
1187                     pa_memblock_ref(c.memblock);
1188                     pa_assert(result->length <= c.length);
1189                     c.length = result->length;
1190 
1191                     pa_memchunk_make_writable(&c, 0);
1192                     pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1193                 } else {
1194                     c = s->silence;
1195                     pa_memblock_ref(c.memblock);
1196                     pa_assert(result->length <= c.length);
1197                     c.length = result->length;
1198                 }
1199 
1200                 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1201                     pa_source_output_assert_ref(o);
1202                     pa_assert(o->direct_on_input == i);
1203                     pa_source_post_direct(s->monitor_source, o, &c);
1204                 }
1205 
1206                 pa_memblock_unref(c.memblock);
1207             }
1208         }
1209 
1210         if (m) {
1211             if (m->chunk.memblock) {
1212                 pa_memblock_unref(m->chunk.memblock);
1213                 pa_memchunk_reset(&m->chunk);
1214             }
1215 
1216             pa_sink_input_unref(m->userdata);
1217             m->userdata = NULL;
1218 
1219             n_unreffed += 1;
1220         }
1221     }
1222 
1223     /* Now drop references to entries that are included in the
1224      * pa_mix_info array but don't exist anymore */
1225 
1226     if (n_unreffed < n) {
1227         for (; n > 0; info++, n--) {
1228             if (info->userdata)
1229                 pa_sink_input_unref(info->userdata);
1230             if (info->chunk.memblock)
1231                 pa_memblock_unref(info->chunk.memblock);
1232         }
1233     }
1234 
1235     if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1236         pa_source_post(s->monitor_source, result);
1237 }
1238 
1239 /* Called from IO thread context */
pa_sink_render(pa_sink * s,size_t length,pa_memchunk * result)1240 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1241     pa_mix_info info[MAX_MIX_CHANNELS];
1242     unsigned n;
1243     size_t block_size_max;
1244 
1245     pa_sink_assert_ref(s);
1246     pa_sink_assert_io_context(s);
1247     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1248     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1249     pa_assert(result);
1250 
1251     pa_assert(!s->thread_info.rewind_requested);
1252     pa_assert(s->thread_info.rewind_nbytes == 0);
1253 
1254     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1255         result->memblock = pa_memblock_ref(s->silence.memblock);
1256         result->index = s->silence.index;
1257         result->length = PA_MIN(s->silence.length, length);
1258         return;
1259     }
1260 
1261     pa_sink_ref(s);
1262 
1263     if (length <= 0)
1264         length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1265 
1266     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1267     if (length > block_size_max)
1268         length = pa_frame_align(block_size_max, &s->sample_spec);
1269 
1270     pa_assert(length > 0);
1271 
1272     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1273 
1274     if (n == 0) {
1275 
1276         *result = s->silence;
1277         pa_memblock_ref(result->memblock);
1278 
1279         if (result->length > length)
1280             result->length = length;
1281 
1282     } else if (n == 1) {
1283         pa_cvolume volume;
1284 
1285         *result = info[0].chunk;
1286         pa_memblock_ref(result->memblock);
1287 
1288         if (result->length > length)
1289             result->length = length;
1290 
1291         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1292 
1293         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1294             pa_memblock_unref(result->memblock);
1295             pa_silence_memchunk_get(&s->core->silence_cache,
1296                                     s->core->mempool,
1297                                     result,
1298                                     &s->sample_spec,
1299                                     result->length);
1300         } else if (!pa_cvolume_is_norm(&volume)) {
1301             pa_memchunk_make_writable(result, 0);
1302             pa_volume_memchunk(result, &s->sample_spec, &volume);
1303         }
1304     } else {
1305         void *ptr;
1306         result->memblock = pa_memblock_new(s->core->mempool, length);
1307 
1308         ptr = pa_memblock_acquire(result->memblock);
1309         result->length = pa_mix(info, n,
1310                                 ptr, length,
1311                                 &s->sample_spec,
1312                                 &s->thread_info.soft_volume,
1313                                 s->thread_info.soft_muted);
1314         pa_memblock_release(result->memblock);
1315 
1316         result->index = 0;
1317     }
1318 
1319     inputs_drop(s, info, n, result);
1320 
1321     pa_sink_unref(s);
1322 }
1323 
1324 /* Called from IO thread context */
pa_sink_render_into(pa_sink * s,pa_memchunk * target)1325 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1326     pa_mix_info info[MAX_MIX_CHANNELS];
1327     unsigned n;
1328     size_t length, block_size_max;
1329 
1330     pa_sink_assert_ref(s);
1331     pa_sink_assert_io_context(s);
1332     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1333     pa_assert(target);
1334     pa_assert(target->memblock);
1335     pa_assert(target->length > 0);
1336     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1337 
1338     pa_assert(!s->thread_info.rewind_requested);
1339     pa_assert(s->thread_info.rewind_nbytes == 0);
1340 
1341     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1342         pa_silence_memchunk(target, &s->sample_spec);
1343         return;
1344     }
1345 
1346     pa_sink_ref(s);
1347 
1348     length = target->length;
1349     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1350     if (length > block_size_max)
1351         length = pa_frame_align(block_size_max, &s->sample_spec);
1352 
1353     pa_assert(length > 0);
1354 
1355     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1356 
1357     if (n == 0) {
1358         if (target->length > length)
1359             target->length = length;
1360 
1361         pa_silence_memchunk(target, &s->sample_spec);
1362     } else if (n == 1) {
1363         pa_cvolume volume;
1364 
1365         if (target->length > length)
1366             target->length = length;
1367 
1368         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1369 
1370         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1371             pa_silence_memchunk(target, &s->sample_spec);
1372         else {
1373             pa_memchunk vchunk;
1374 
1375             vchunk = info[0].chunk;
1376             pa_memblock_ref(vchunk.memblock);
1377 
1378             if (vchunk.length > length)
1379                 vchunk.length = length;
1380 
1381             if (!pa_cvolume_is_norm(&volume)) {
1382                 pa_memchunk_make_writable(&vchunk, 0);
1383                 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1384             }
1385 
1386             pa_memchunk_memcpy(target, &vchunk);
1387             pa_memblock_unref(vchunk.memblock);
1388         }
1389 
1390     } else {
1391         void *ptr;
1392 
1393         ptr = pa_memblock_acquire(target->memblock);
1394 
1395         target->length = pa_mix(info, n,
1396                                 (uint8_t*) ptr + target->index, length,
1397                                 &s->sample_spec,
1398                                 &s->thread_info.soft_volume,
1399                                 s->thread_info.soft_muted);
1400 
1401         pa_memblock_release(target->memblock);
1402     }
1403 
1404     inputs_drop(s, info, n, target);
1405 
1406     pa_sink_unref(s);
1407 }
1408 
1409 /* Called from IO thread context */
pa_sink_render_into_full(pa_sink * s,pa_memchunk * target)1410 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1411     pa_memchunk chunk;
1412     size_t l, d;
1413 
1414     pa_sink_assert_ref(s);
1415     pa_sink_assert_io_context(s);
1416     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1417     pa_assert(target);
1418     pa_assert(target->memblock);
1419     pa_assert(target->length > 0);
1420     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1421 
1422     pa_assert(!s->thread_info.rewind_requested);
1423     pa_assert(s->thread_info.rewind_nbytes == 0);
1424 
1425     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1426         pa_silence_memchunk(target, &s->sample_spec);
1427         return;
1428     }
1429 
1430     pa_sink_ref(s);
1431 
1432     l = target->length;
1433     d = 0;
1434     while (l > 0) {
1435         chunk = *target;
1436         chunk.index += d;
1437         chunk.length -= d;
1438 
1439         pa_sink_render_into(s, &chunk);
1440 
1441         d += chunk.length;
1442         l -= chunk.length;
1443     }
1444 
1445     pa_sink_unref(s);
1446 }
1447 
1448 /* Called from IO thread context */
pa_sink_render_full(pa_sink * s,size_t length,pa_memchunk * result)1449 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1450     pa_sink_assert_ref(s);
1451     pa_sink_assert_io_context(s);
1452     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1453     pa_assert(length > 0);
1454     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1455     pa_assert(result);
1456 
1457     pa_assert(!s->thread_info.rewind_requested);
1458     pa_assert(s->thread_info.rewind_nbytes == 0);
1459 
1460     pa_sink_ref(s);
1461 
1462     pa_sink_render(s, length, result);
1463 
1464     if (result->length < length) {
1465         pa_memchunk chunk;
1466 
1467         pa_memchunk_make_writable(result, length);
1468 
1469         chunk.memblock = result->memblock;
1470         chunk.index = result->index + result->length;
1471         chunk.length = length - result->length;
1472 
1473         pa_sink_render_into_full(s, &chunk);
1474 
1475         result->length = length;
1476     }
1477 
1478     pa_sink_unref(s);
1479 }
1480 
1481 /* Called from main thread */
pa_sink_reconfigure(pa_sink * s,pa_sample_spec * spec,bool passthrough)1482 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1483     pa_sample_spec desired_spec;
1484     uint32_t default_rate = s->default_sample_rate;
1485     uint32_t alternate_rate = s->alternate_sample_rate;
1486     uint32_t idx;
1487     pa_sink_input *i;
1488     bool default_rate_is_usable = false;
1489     bool alternate_rate_is_usable = false;
1490     bool avoid_resampling = s->avoid_resampling;
1491 
1492     if (pa_sample_spec_equal(spec, &s->sample_spec))
1493         return;
1494 
1495     if (!s->reconfigure)
1496         return;
1497 
1498     if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1499         pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1500         return;
1501     }
1502 
1503     if (PA_SINK_IS_RUNNING(s->state)) {
1504         pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1505                     pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1506         return;
1507     }
1508 
1509     if (s->monitor_source) {
1510         if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1511             pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1512             return;
1513         }
1514     }
1515 
1516     if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1517         return;
1518 
1519     desired_spec = s->sample_spec;
1520 
1521     if (passthrough) {
1522         /* We have to try to use the sink input format and rate */
1523         desired_spec.format = spec->format;
1524         desired_spec.rate = spec->rate;
1525 
1526     } else if (avoid_resampling) {
1527         /* We just try to set the sink input's sample rate if it's not too low */
1528         if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1529             desired_spec.rate = spec->rate;
1530         desired_spec.format = spec->format;
1531 
1532     } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1533         /* We can directly try to use this rate */
1534         desired_spec.rate = spec->rate;
1535 
1536     }
1537 
1538     if (desired_spec.rate != spec->rate) {
1539         /* See if we can pick a rate that results in less resampling effort */
1540         if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1541             default_rate_is_usable = true;
1542         if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1543             default_rate_is_usable = true;
1544         if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1545             alternate_rate_is_usable = true;
1546         if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1547             alternate_rate_is_usable = true;
1548 
1549         if (alternate_rate_is_usable && !default_rate_is_usable)
1550             desired_spec.rate = alternate_rate;
1551         else
1552             desired_spec.rate = default_rate;
1553     }
1554 
1555     if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1556         return;
1557 
1558     if (!passthrough && pa_sink_used_by(s) > 0)
1559         return;
1560 
1561     pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1562                  s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1563     pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1564 
1565     s->reconfigure(s, &desired_spec, passthrough);
1566 
1567     /* update monitor source as well */
1568     if (s->monitor_source && !passthrough)
1569         pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1570     pa_log_info("Reconfigured successfully");
1571 
1572     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1573         if (i->state == PA_SINK_INPUT_CORKED)
1574             pa_sink_input_update_resampler(i, true);
1575     }
1576 
1577     pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1578 }
1579 
1580 /* Called from main thread */
pa_sink_get_last_rewind(pa_sink * s)1581 size_t pa_sink_get_last_rewind(pa_sink *s) {
1582     size_t rewind_bytes;
1583 
1584     pa_sink_assert_ref(s);
1585     pa_assert_ctl_context();
1586     pa_assert(PA_SINK_IS_LINKED(s->state));
1587 
1588     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LAST_REWIND, &rewind_bytes, 0, NULL) == 0);
1589 
1590     return rewind_bytes;
1591 }
1592 
1593 /* Called from main thread */
pa_sink_get_latency(pa_sink * s)1594 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1595     int64_t usec = 0;
1596 
1597     pa_sink_assert_ref(s);
1598     pa_assert_ctl_context();
1599     pa_assert(PA_SINK_IS_LINKED(s->state));
1600 
1601     /* The returned value is supposed to be in the time domain of the sound card! */
1602 
1603     if (s->state == PA_SINK_SUSPENDED)
1604         return 0;
1605 
1606     if (!(s->flags & PA_SINK_LATENCY))
1607         return 0;
1608 
1609     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1610 
1611     /* the return value is unsigned, so check that the offset can be added to usec without
1612      * underflowing. */
1613     if (-s->port_latency_offset <= usec)
1614         usec += s->port_latency_offset;
1615     else
1616         usec = 0;
1617 
1618     return (pa_usec_t)usec;
1619 }
1620 
1621 /* Called from IO thread */
pa_sink_get_latency_within_thread(pa_sink * s,bool allow_negative)1622 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1623     int64_t usec = 0;
1624     pa_msgobject *o;
1625 
1626     pa_sink_assert_ref(s);
1627     pa_sink_assert_io_context(s);
1628     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1629 
1630     /* The returned value is supposed to be in the time domain of the sound card! */
1631 
1632     if (s->thread_info.state == PA_SINK_SUSPENDED)
1633         return 0;
1634 
1635     if (!(s->flags & PA_SINK_LATENCY))
1636         return 0;
1637 
1638     o = PA_MSGOBJECT(s);
1639 
1640     /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1641 
1642     o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1643 
1644     /* If allow_negative is false, the call should only return positive values, */
1645     usec += s->thread_info.port_latency_offset;
1646     if (!allow_negative && usec < 0)
1647         usec = 0;
1648 
1649     return usec;
1650 }
1651 
1652 /* Called from the main thread (and also from the IO thread while the main
1653  * thread is waiting).
1654  *
1655  * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1656  * set. Instead, flat volume mode is detected by checking whether the root sink
1657  * has the flag set. */
pa_sink_flat_volume_enabled(pa_sink * s)1658 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1659     pa_sink_assert_ref(s);
1660 
1661     s = pa_sink_get_master(s);
1662 
1663     if (PA_LIKELY(s))
1664         return (s->flags & PA_SINK_FLAT_VOLUME);
1665     else
1666         return false;
1667 }
1668 
1669 /* Check if the sink has a virtual sink attached.
1670  * Called from the IO thread. */
pa_sink_has_filter_attached(pa_sink * s)1671 bool pa_sink_has_filter_attached(pa_sink *s) {
1672     bool vsink_attached = false;
1673     void *state = NULL;
1674     pa_sink_input *i;
1675 
1676     pa_assert(s);
1677 
1678     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1679         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1680             if (!i->origin_sink)
1681                 continue;
1682 
1683             vsink_attached = true;
1684             break;
1685         }
1686     }
1687     return vsink_attached;
1688 }
1689 
1690 /* Called from the main thread (and also from the IO thread while the main
1691  * thread is waiting). */
pa_sink_get_master(pa_sink * s)1692 pa_sink *pa_sink_get_master(pa_sink *s) {
1693     pa_sink_assert_ref(s);
1694 
1695     while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1696         if (PA_UNLIKELY(!s->input_to_master))
1697             return NULL;
1698 
1699         s = s->input_to_master->sink;
1700     }
1701 
1702     return s;
1703 }
1704 
1705 /* Called from main context */
pa_sink_is_filter(pa_sink * s)1706 bool pa_sink_is_filter(pa_sink *s) {
1707     pa_sink_assert_ref(s);
1708 
1709     return (s->input_to_master != NULL);
1710 }
1711 
1712 /* Called from main context */
pa_sink_is_passthrough(pa_sink * s)1713 bool pa_sink_is_passthrough(pa_sink *s) {
1714     pa_sink_input *alt_i;
1715     uint32_t idx;
1716 
1717     pa_sink_assert_ref(s);
1718 
1719     /* one and only one PASSTHROUGH input can possibly be connected */
1720     if (pa_idxset_size(s->inputs) == 1) {
1721         alt_i = pa_idxset_first(s->inputs, &idx);
1722 
1723         if (pa_sink_input_is_passthrough(alt_i))
1724             return true;
1725     }
1726 
1727     return false;
1728 }
1729 
1730 /* Called from main context */
pa_sink_enter_passthrough(pa_sink * s)1731 void pa_sink_enter_passthrough(pa_sink *s) {
1732     pa_cvolume volume;
1733 
1734     /* The sink implementation is reconfigured for passthrough in
1735      * pa_sink_reconfigure(). This function sets the PA core objects to
1736      * passthrough mode. */
1737 
1738     /* disable the monitor in passthrough mode */
1739     if (s->monitor_source) {
1740         pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1741         pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1742     }
1743 
1744     /* set the volume to NORM */
1745     s->saved_volume = *pa_sink_get_volume(s, true);
1746     s->saved_save_volume = s->save_volume;
1747 
1748     pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1749     pa_sink_set_volume(s, &volume, true, false);
1750 
1751     pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1752 }
1753 
1754 /* Called from main context */
pa_sink_leave_passthrough(pa_sink * s)1755 void pa_sink_leave_passthrough(pa_sink *s) {
1756     /* Unsuspend monitor */
1757     if (s->monitor_source) {
1758         pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1759         pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1760     }
1761 
1762     /* Restore sink volume to what it was before we entered passthrough mode */
1763     pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1764 
1765     pa_cvolume_init(&s->saved_volume);
1766     s->saved_save_volume = false;
1767 
1768 }
1769 
1770 /* Called from main context. */
compute_reference_ratio(pa_sink_input * i)1771 static void compute_reference_ratio(pa_sink_input *i) {
1772     unsigned c = 0;
1773     pa_cvolume remapped;
1774     pa_cvolume ratio;
1775 
1776     pa_assert(i);
1777     pa_assert(pa_sink_flat_volume_enabled(i->sink));
1778 
1779     /*
1780      * Calculates the reference ratio from the sink's reference
1781      * volume. This basically calculates:
1782      *
1783      * i->reference_ratio = i->volume / i->sink->reference_volume
1784      */
1785 
1786     remapped = i->sink->reference_volume;
1787     pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1788 
1789     ratio = i->reference_ratio;
1790 
1791     for (c = 0; c < i->sample_spec.channels; c++) {
1792 
1793         /* We don't update when the sink volume is 0 anyway */
1794         if (remapped.values[c] <= PA_VOLUME_MUTED)
1795             continue;
1796 
1797         /* Don't update the reference ratio unless necessary */
1798         if (pa_sw_volume_multiply(
1799                     ratio.values[c],
1800                     remapped.values[c]) == i->volume.values[c])
1801             continue;
1802 
1803         ratio.values[c] = pa_sw_volume_divide(
1804                 i->volume.values[c],
1805                 remapped.values[c]);
1806     }
1807 
1808     pa_sink_input_set_reference_ratio(i, &ratio);
1809 }
1810 
1811 /* Called from main context. Only called for the root sink in volume sharing
1812  * cases, except for internal recursive calls. */
compute_reference_ratios(pa_sink * s)1813 static void compute_reference_ratios(pa_sink *s) {
1814     uint32_t idx;
1815     pa_sink_input *i;
1816 
1817     pa_sink_assert_ref(s);
1818     pa_assert_ctl_context();
1819     pa_assert(PA_SINK_IS_LINKED(s->state));
1820     pa_assert(pa_sink_flat_volume_enabled(s));
1821 
1822     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1823         compute_reference_ratio(i);
1824 
1825         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1826                 && PA_SINK_IS_LINKED(i->origin_sink->state))
1827             compute_reference_ratios(i->origin_sink);
1828     }
1829 }
1830 
1831 /* Called from main context. Only called for the root sink in volume sharing
1832  * cases, except for internal recursive calls. */
compute_real_ratios(pa_sink * s)1833 static void compute_real_ratios(pa_sink *s) {
1834     pa_sink_input *i;
1835     uint32_t idx;
1836 
1837     pa_sink_assert_ref(s);
1838     pa_assert_ctl_context();
1839     pa_assert(PA_SINK_IS_LINKED(s->state));
1840     pa_assert(pa_sink_flat_volume_enabled(s));
1841 
1842     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1843         unsigned c;
1844         pa_cvolume remapped;
1845 
1846         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1847             /* The origin sink uses volume sharing, so this input's real ratio
1848              * is handled as a special case - the real ratio must be 0 dB, and
1849              * as a result i->soft_volume must equal i->volume_factor. */
1850             pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1851             i->soft_volume = i->volume_factor;
1852 
1853             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1854                 compute_real_ratios(i->origin_sink);
1855 
1856             continue;
1857         }
1858 
1859         /*
1860          * This basically calculates:
1861          *
1862          * i->real_ratio := i->volume / s->real_volume
1863          * i->soft_volume := i->real_ratio * i->volume_factor
1864          */
1865 
1866         remapped = s->real_volume;
1867         pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1868 
1869         i->real_ratio.channels = i->sample_spec.channels;
1870         i->soft_volume.channels = i->sample_spec.channels;
1871 
1872         for (c = 0; c < i->sample_spec.channels; c++) {
1873 
1874             if (remapped.values[c] <= PA_VOLUME_MUTED) {
1875                 /* We leave i->real_ratio untouched */
1876                 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1877                 continue;
1878             }
1879 
1880             /* Don't lose accuracy unless necessary */
1881             if (pa_sw_volume_multiply(
1882                         i->real_ratio.values[c],
1883                         remapped.values[c]) != i->volume.values[c])
1884 
1885                 i->real_ratio.values[c] = pa_sw_volume_divide(
1886                         i->volume.values[c],
1887                         remapped.values[c]);
1888 
1889             i->soft_volume.values[c] = pa_sw_volume_multiply(
1890                     i->real_ratio.values[c],
1891                     i->volume_factor.values[c]);
1892         }
1893 
1894         /* We don't copy the soft_volume to the thread_info data
1895          * here. That must be done by the caller */
1896     }
1897 }
1898 
cvolume_remap_minimal_impact(pa_cvolume * v,const pa_cvolume * template,const pa_channel_map * from,const pa_channel_map * to)1899 static pa_cvolume *cvolume_remap_minimal_impact(
1900         pa_cvolume *v,
1901         const pa_cvolume *template,
1902         const pa_channel_map *from,
1903         const pa_channel_map *to) {
1904 
1905     pa_cvolume t;
1906 
1907     pa_assert(v);
1908     pa_assert(template);
1909     pa_assert(from);
1910     pa_assert(to);
1911     pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1912     pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1913 
1914     /* Much like pa_cvolume_remap(), but tries to minimize impact when
1915      * mapping from sink input to sink volumes:
1916      *
1917      * If template is a possible remapping from v it is used instead
1918      * of remapping anew.
1919      *
1920      * If the channel maps don't match we set an all-channel volume on
1921      * the sink to ensure that changing a volume on one stream has no
1922      * effect that cannot be compensated for in another stream that
1923      * does not have the same channel map as the sink. */
1924 
1925     if (pa_channel_map_equal(from, to))
1926         return v;
1927 
1928     t = *template;
1929     if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1930         *v = *template;
1931         return v;
1932     }
1933 
1934     pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1935     return v;
1936 }
1937 
1938 /* Called from main thread. Only called for the root sink in volume sharing
1939  * cases, except for internal recursive calls. */
get_maximum_input_volume(pa_sink * s,pa_cvolume * max_volume,const pa_channel_map * channel_map)1940 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1941     pa_sink_input *i;
1942     uint32_t idx;
1943 
1944     pa_sink_assert_ref(s);
1945     pa_assert(max_volume);
1946     pa_assert(channel_map);
1947     pa_assert(pa_sink_flat_volume_enabled(s));
1948 
1949     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1950         pa_cvolume remapped;
1951 
1952         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1953             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1954                 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1955 
1956             /* Ignore this input. The origin sink uses volume sharing, so this
1957              * input's volume will be set to be equal to the root sink's real
1958              * volume. Obviously this input's current volume must not then
1959              * affect what the root sink's real volume will be. */
1960             continue;
1961         }
1962 
1963         remapped = i->volume;
1964         cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1965         pa_cvolume_merge(max_volume, max_volume, &remapped);
1966     }
1967 }
1968 
1969 /* Called from main thread. Only called for the root sink in volume sharing
1970  * cases, except for internal recursive calls. */
has_inputs(pa_sink * s)1971 static bool has_inputs(pa_sink *s) {
1972     pa_sink_input *i;
1973     uint32_t idx;
1974 
1975     pa_sink_assert_ref(s);
1976 
1977     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1978         if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1979             return true;
1980     }
1981 
1982     return false;
1983 }
1984 
1985 /* Called from main thread. Only called for the root sink in volume sharing
1986  * cases, except for internal recursive calls. */
update_real_volume(pa_sink * s,const pa_cvolume * new_volume,pa_channel_map * channel_map)1987 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1988     pa_sink_input *i;
1989     uint32_t idx;
1990 
1991     pa_sink_assert_ref(s);
1992     pa_assert(new_volume);
1993     pa_assert(channel_map);
1994 
1995     s->real_volume = *new_volume;
1996     pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1997 
1998     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1999         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2000             if (pa_sink_flat_volume_enabled(s)) {
2001                 pa_cvolume new_input_volume;
2002 
2003                 /* Follow the root sink's real volume. */
2004                 new_input_volume = *new_volume;
2005                 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2006                 pa_sink_input_set_volume_direct(i, &new_input_volume);
2007                 compute_reference_ratio(i);
2008             }
2009 
2010             if (PA_SINK_IS_LINKED(i->origin_sink->state))
2011                 update_real_volume(i->origin_sink, new_volume, channel_map);
2012         }
2013     }
2014 }
2015 
2016 /* Called from main thread. Only called for the root sink in shared volume
2017  * cases. */
compute_real_volume(pa_sink * s)2018 static void compute_real_volume(pa_sink *s) {
2019     pa_sink_assert_ref(s);
2020     pa_assert_ctl_context();
2021     pa_assert(PA_SINK_IS_LINKED(s->state));
2022     pa_assert(pa_sink_flat_volume_enabled(s));
2023     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2024 
2025     /* This determines the maximum volume of all streams and sets
2026      * s->real_volume accordingly. */
2027 
2028     if (!has_inputs(s)) {
2029         /* In the special case that we have no sink inputs we leave the
2030          * volume unmodified. */
2031         update_real_volume(s, &s->reference_volume, &s->channel_map);
2032         return;
2033     }
2034 
2035     pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2036 
2037     /* First let's determine the new maximum volume of all inputs
2038      * connected to this sink */
2039     get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2040     update_real_volume(s, &s->real_volume, &s->channel_map);
2041 
2042     /* Then, let's update the real ratios/soft volumes of all inputs
2043      * connected to this sink */
2044     compute_real_ratios(s);
2045 }
2046 
2047 /* Called from main thread. Only called for the root sink in shared volume
2048  * cases, except for internal recursive calls. */
propagate_reference_volume(pa_sink * s)2049 static void propagate_reference_volume(pa_sink *s) {
2050     pa_sink_input *i;
2051     uint32_t idx;
2052 
2053     pa_sink_assert_ref(s);
2054     pa_assert_ctl_context();
2055     pa_assert(PA_SINK_IS_LINKED(s->state));
2056     pa_assert(pa_sink_flat_volume_enabled(s));
2057 
2058     /* This is called whenever the sink volume changes that is not
2059      * caused by a sink input volume change. We need to fix up the
2060      * sink input volumes accordingly */
2061 
2062     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2063         pa_cvolume new_volume;
2064 
2065         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2066             if (PA_SINK_IS_LINKED(i->origin_sink->state))
2067                 propagate_reference_volume(i->origin_sink);
2068 
2069             /* Since the origin sink uses volume sharing, this input's volume
2070              * needs to be updated to match the root sink's real volume, but
2071              * that will be done later in update_real_volume(). */
2072             continue;
2073         }
2074 
2075         /* This basically calculates:
2076          *
2077          * i->volume := s->reference_volume * i->reference_ratio  */
2078 
2079         new_volume = s->reference_volume;
2080         pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2081         pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2082         pa_sink_input_set_volume_direct(i, &new_volume);
2083     }
2084 }
2085 
2086 /* Called from main thread. Only called for the root sink in volume sharing
2087  * cases, except for internal recursive calls. The return value indicates
2088  * whether any reference volume actually changed. */
update_reference_volume(pa_sink * s,const pa_cvolume * v,const pa_channel_map * channel_map,bool save)2089 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2090     pa_cvolume volume;
2091     bool reference_volume_changed;
2092     pa_sink_input *i;
2093     uint32_t idx;
2094 
2095     pa_sink_assert_ref(s);
2096     pa_assert(PA_SINK_IS_LINKED(s->state));
2097     pa_assert(v);
2098     pa_assert(channel_map);
2099     pa_assert(pa_cvolume_valid(v));
2100 
2101     volume = *v;
2102     pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2103 
2104     reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2105     pa_sink_set_reference_volume_direct(s, &volume);
2106 
2107     s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2108 
2109     if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2110         /* If the root sink's volume doesn't change, then there can't be any
2111          * changes in the other sinks in the sink tree either.
2112          *
2113          * It's probably theoretically possible that even if the root sink's
2114          * volume changes slightly, some filter sink doesn't change its volume
2115          * due to rounding errors. If that happens, we still want to propagate
2116          * the changed root sink volume to the sinks connected to the
2117          * intermediate sink that didn't change its volume. This theoretical
2118          * possibility is the reason why we have that !(s->flags &
2119          * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2120          * notice even if we returned here false always if
2121          * reference_volume_changed is false. */
2122         return false;
2123 
2124     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2125         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2126                 && PA_SINK_IS_LINKED(i->origin_sink->state))
2127             update_reference_volume(i->origin_sink, v, channel_map, false);
2128     }
2129 
2130     return true;
2131 }
2132 
2133 /* Called from main thread */
pa_sink_set_volume(pa_sink * s,const pa_cvolume * volume,bool send_msg,bool save)2134 void pa_sink_set_volume(
2135         pa_sink *s,
2136         const pa_cvolume *volume,
2137         bool send_msg,
2138         bool save) {
2139 
2140     pa_cvolume new_reference_volume;
2141     pa_sink *root_sink;
2142 
2143     pa_sink_assert_ref(s);
2144     pa_assert_ctl_context();
2145     pa_assert(PA_SINK_IS_LINKED(s->state));
2146     pa_assert(!volume || pa_cvolume_valid(volume));
2147     pa_assert(volume || pa_sink_flat_volume_enabled(s));
2148     pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2149 
2150     /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2151      * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2152     if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2153         pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2154         return;
2155     }
2156 
2157     /* In case of volume sharing, the volume is set for the root sink first,
2158      * from which it's then propagated to the sharing sinks. */
2159     root_sink = pa_sink_get_master(s);
2160 
2161     if (PA_UNLIKELY(!root_sink))
2162         return;
2163 
2164     /* As a special exception we accept mono volumes on all sinks --
2165      * even on those with more complex channel maps */
2166 
2167     if (volume) {
2168         if (pa_cvolume_compatible(volume, &s->sample_spec))
2169             new_reference_volume = *volume;
2170         else {
2171             new_reference_volume = s->reference_volume;
2172             pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2173         }
2174 
2175         pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2176 
2177         if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2178             if (pa_sink_flat_volume_enabled(root_sink)) {
2179                 /* OK, propagate this volume change back to the inputs */
2180                 propagate_reference_volume(root_sink);
2181 
2182                 /* And now recalculate the real volume */
2183                 compute_real_volume(root_sink);
2184             } else
2185                 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2186         }
2187 
2188     } else {
2189         /* If volume is NULL we synchronize the sink's real and
2190          * reference volumes with the stream volumes. */
2191 
2192         pa_assert(pa_sink_flat_volume_enabled(root_sink));
2193 
2194         /* Ok, let's determine the new real volume */
2195         compute_real_volume(root_sink);
2196 
2197         /* Let's 'push' the reference volume if necessary */
2198         pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2199         /* If the sink and its root don't have the same number of channels, we need to remap */
2200         if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2201             pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2202         update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2203 
2204         /* Now that the reference volume is updated, we can update the streams'
2205          * reference ratios. */
2206         compute_reference_ratios(root_sink);
2207     }
2208 
2209     if (root_sink->set_volume) {
2210         /* If we have a function set_volume(), then we do not apply a
2211          * soft volume by default. However, set_volume() is free to
2212          * apply one to root_sink->soft_volume */
2213 
2214         pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2215         if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2216             root_sink->set_volume(root_sink);
2217 
2218     } else
2219         /* If we have no function set_volume(), then the soft volume
2220          * becomes the real volume */
2221         root_sink->soft_volume = root_sink->real_volume;
2222 
2223     /* This tells the sink that soft volume and/or real volume changed */
2224     if (send_msg)
2225         pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2226 }
2227 
2228 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2229  * Only to be called by sink implementor */
pa_sink_set_soft_volume(pa_sink * s,const pa_cvolume * volume)2230 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2231 
2232     pa_sink_assert_ref(s);
2233     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2234 
2235     if (s->flags & PA_SINK_DEFERRED_VOLUME)
2236         pa_sink_assert_io_context(s);
2237     else
2238         pa_assert_ctl_context();
2239 
2240     if (!volume)
2241         pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2242     else
2243         s->soft_volume = *volume;
2244 
2245     if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2246         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2247     else
2248         s->thread_info.soft_volume = s->soft_volume;
2249 }
2250 
2251 /* Called from the main thread. Only called for the root sink in volume sharing
2252  * cases, except for internal recursive calls. */
propagate_real_volume(pa_sink * s,const pa_cvolume * old_real_volume)2253 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2254     pa_sink_input *i;
2255     uint32_t idx;
2256 
2257     pa_sink_assert_ref(s);
2258     pa_assert(old_real_volume);
2259     pa_assert_ctl_context();
2260     pa_assert(PA_SINK_IS_LINKED(s->state));
2261 
2262     /* This is called when the hardware's real volume changes due to
2263      * some external event. We copy the real volume into our
2264      * reference volume and then rebuild the stream volumes based on
2265      * i->real_ratio which should stay fixed. */
2266 
2267     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2268         if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2269             return;
2270 
2271         /* 1. Make the real volume the reference volume */
2272         update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2273     }
2274 
2275     if (pa_sink_flat_volume_enabled(s)) {
2276 
2277         PA_IDXSET_FOREACH(i, s->inputs, idx) {
2278             pa_cvolume new_volume;
2279 
2280             /* 2. Since the sink's reference and real volumes are equal
2281              * now our ratios should be too. */
2282             pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2283 
2284             /* 3. Recalculate the new stream reference volume based on the
2285              * reference ratio and the sink's reference volume.
2286              *
2287              * This basically calculates:
2288              *
2289              * i->volume = s->reference_volume * i->reference_ratio
2290              *
2291              * This is identical to propagate_reference_volume() */
2292             new_volume = s->reference_volume;
2293             pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2294             pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2295             pa_sink_input_set_volume_direct(i, &new_volume);
2296 
2297             if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2298                     && PA_SINK_IS_LINKED(i->origin_sink->state))
2299                 propagate_real_volume(i->origin_sink, old_real_volume);
2300         }
2301     }
2302 
2303     /* Something got changed in the hardware. It probably makes sense
2304      * to save changed hw settings given that hw volume changes not
2305      * triggered by PA are almost certainly done by the user. */
2306     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2307         s->save_volume = true;
2308 }
2309 
2310 /* Called from io thread */
pa_sink_update_volume_and_mute(pa_sink * s)2311 void pa_sink_update_volume_and_mute(pa_sink *s) {
2312     pa_assert(s);
2313     pa_sink_assert_io_context(s);
2314 
2315     pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2316 }
2317 
2318 /* Called from main thread */
pa_sink_get_volume(pa_sink * s,bool force_refresh)2319 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2320     pa_sink_assert_ref(s);
2321     pa_assert_ctl_context();
2322     pa_assert(PA_SINK_IS_LINKED(s->state));
2323 
2324     if (s->refresh_volume || force_refresh) {
2325         struct pa_cvolume old_real_volume;
2326 
2327         pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2328 
2329         old_real_volume = s->real_volume;
2330 
2331         if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2332             s->get_volume(s);
2333 
2334         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2335 
2336         update_real_volume(s, &s->real_volume, &s->channel_map);
2337         propagate_real_volume(s, &old_real_volume);
2338     }
2339 
2340     return &s->reference_volume;
2341 }
2342 
2343 /* Called from main thread. In volume sharing cases, only the root sink may
2344  * call this. */
pa_sink_volume_changed(pa_sink * s,const pa_cvolume * new_real_volume)2345 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2346     pa_cvolume old_real_volume;
2347 
2348     pa_sink_assert_ref(s);
2349     pa_assert_ctl_context();
2350     pa_assert(PA_SINK_IS_LINKED(s->state));
2351     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2352 
2353     /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2354 
2355     old_real_volume = s->real_volume;
2356     update_real_volume(s, new_real_volume, &s->channel_map);
2357     propagate_real_volume(s, &old_real_volume);
2358 }
2359 
2360 /* Called from main thread */
pa_sink_set_mute(pa_sink * s,bool mute,bool save)2361 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2362     bool old_muted;
2363 
2364     pa_sink_assert_ref(s);
2365     pa_assert_ctl_context();
2366 
2367     old_muted = s->muted;
2368 
2369     if (mute == old_muted) {
2370         s->save_muted |= save;
2371         return;
2372     }
2373 
2374     s->muted = mute;
2375     s->save_muted = save;
2376 
2377     if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2378         s->set_mute_in_progress = true;
2379         s->set_mute(s);
2380         s->set_mute_in_progress = false;
2381     }
2382 
2383     if (!PA_SINK_IS_LINKED(s->state))
2384         return;
2385 
2386     pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2387     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2388     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2389     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2390 }
2391 
2392 /* Called from main thread */
pa_sink_get_mute(pa_sink * s,bool force_refresh)2393 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2394 
2395     pa_sink_assert_ref(s);
2396     pa_assert_ctl_context();
2397     pa_assert(PA_SINK_IS_LINKED(s->state));
2398 
2399     if ((s->refresh_muted || force_refresh) && s->get_mute) {
2400         bool mute;
2401 
2402         if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2403             if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2404                 pa_sink_mute_changed(s, mute);
2405         } else {
2406             if (s->get_mute(s, &mute) >= 0)
2407                 pa_sink_mute_changed(s, mute);
2408         }
2409     }
2410 
2411     return s->muted;
2412 }
2413 
2414 /* Called from main thread */
pa_sink_mute_changed(pa_sink * s,bool new_muted)2415 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2416     pa_sink_assert_ref(s);
2417     pa_assert_ctl_context();
2418     pa_assert(PA_SINK_IS_LINKED(s->state));
2419 
2420     if (s->set_mute_in_progress)
2421         return;
2422 
2423     /* pa_sink_set_mute() does this same check, so this may appear redundant,
2424      * but we must have this here also, because the save parameter of
2425      * pa_sink_set_mute() would otherwise have unintended side effects (saving
2426      * the mute state when it shouldn't be saved). */
2427     if (new_muted == s->muted)
2428         return;
2429 
2430     pa_sink_set_mute(s, new_muted, true);
2431 }
2432 
2433 /* Called from main thread */
pa_sink_update_proplist(pa_sink * s,pa_update_mode_t mode,pa_proplist * p)2434 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2435     pa_sink_assert_ref(s);
2436     pa_assert_ctl_context();
2437 
2438     if (p)
2439         pa_proplist_update(s->proplist, mode, p);
2440 
2441     if (PA_SINK_IS_LINKED(s->state)) {
2442         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2443         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2444     }
2445 
2446     return true;
2447 }
2448 
2449 /* Called from main thread */
2450 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
pa_sink_set_description(pa_sink * s,const char * description)2451 void pa_sink_set_description(pa_sink *s, const char *description) {
2452     const char *old;
2453     pa_sink_assert_ref(s);
2454     pa_assert_ctl_context();
2455 
2456     if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2457         return;
2458 
2459     old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2460 
2461     if (old && description && pa_streq(old, description))
2462         return;
2463 
2464     if (description)
2465         pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2466     else
2467         pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2468 
2469     if (s->monitor_source) {
2470         char *n;
2471 
2472         n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2473         pa_source_set_description(s->monitor_source, n);
2474         pa_xfree(n);
2475     }
2476 
2477     if (PA_SINK_IS_LINKED(s->state)) {
2478         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2479         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2480     }
2481 }
2482 
2483 /* Called from main thread */
pa_sink_linked_by(pa_sink * s)2484 unsigned pa_sink_linked_by(pa_sink *s) {
2485     unsigned ret;
2486 
2487     pa_sink_assert_ref(s);
2488     pa_assert_ctl_context();
2489     pa_assert(PA_SINK_IS_LINKED(s->state));
2490 
2491     ret = pa_idxset_size(s->inputs);
2492 
2493     /* We add in the number of streams connected to us here. Please
2494      * note the asymmetry to pa_sink_used_by()! */
2495 
2496     if (s->monitor_source)
2497         ret += pa_source_linked_by(s->monitor_source);
2498 
2499     return ret;
2500 }
2501 
2502 /* Called from main thread */
pa_sink_used_by(pa_sink * s)2503 unsigned pa_sink_used_by(pa_sink *s) {
2504     unsigned ret;
2505 
2506     pa_sink_assert_ref(s);
2507     pa_assert_ctl_context();
2508     pa_assert(PA_SINK_IS_LINKED(s->state));
2509 
2510     ret = pa_idxset_size(s->inputs);
2511     pa_assert(ret >= s->n_corked);
2512 
2513     /* Streams connected to our monitor source do not matter for
2514      * pa_sink_used_by()!.*/
2515 
2516     return ret - s->n_corked;
2517 }
2518 
2519 /* Called from main thread */
pa_sink_check_suspend(pa_sink * s,pa_sink_input * ignore_input,pa_source_output * ignore_output)2520 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2521     unsigned ret;
2522     pa_sink_input *i;
2523     uint32_t idx;
2524 
2525     pa_sink_assert_ref(s);
2526     pa_assert_ctl_context();
2527 
2528     if (!PA_SINK_IS_LINKED(s->state))
2529         return 0;
2530 
2531     ret = 0;
2532 
2533     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2534         if (i == ignore_input)
2535             continue;
2536 
2537         /* We do not assert here. It is perfectly valid for a sink input to
2538          * be in the INIT state (i.e. created, marked done but not yet put)
2539          * and we should not care if it's unlinked as it won't contribute
2540          * towards our busy status.
2541          */
2542         if (!PA_SINK_INPUT_IS_LINKED(i->state))
2543             continue;
2544 
2545         if (i->state == PA_SINK_INPUT_CORKED)
2546             continue;
2547 
2548         if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2549             continue;
2550 
2551         ret ++;
2552     }
2553 
2554     if (s->monitor_source)
2555         ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2556 
2557     return ret;
2558 }
2559 
pa_sink_state_to_string(pa_sink_state_t state)2560 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2561     switch (state) {
2562         case PA_SINK_INIT:          return "INIT";
2563         case PA_SINK_IDLE:          return "IDLE";
2564         case PA_SINK_RUNNING:       return "RUNNING";
2565         case PA_SINK_SUSPENDED:     return "SUSPENDED";
2566         case PA_SINK_UNLINKED:      return "UNLINKED";
2567         case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2568     }
2569 
2570     pa_assert_not_reached();
2571 }
2572 
2573 /* Called from the IO thread */
sync_input_volumes_within_thread(pa_sink * s)2574 static void sync_input_volumes_within_thread(pa_sink *s) {
2575     pa_sink_input *i;
2576     void *state = NULL;
2577 
2578     pa_sink_assert_ref(s);
2579     pa_sink_assert_io_context(s);
2580 
2581     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2582         if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2583             continue;
2584 
2585         i->thread_info.soft_volume = i->soft_volume;
2586         pa_sink_input_request_rewind(i, 0, true, false, false);
2587     }
2588 }
2589 
2590 /* Called from the IO thread. Only called for the root sink in volume sharing
2591  * cases, except for internal recursive calls. */
set_shared_volume_within_thread(pa_sink * s)2592 static void set_shared_volume_within_thread(pa_sink *s) {
2593     pa_sink_input *i = NULL;
2594     void *state = NULL;
2595 
2596     pa_sink_assert_ref(s);
2597 
2598     PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2599 
2600     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2601         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2602             set_shared_volume_within_thread(i->origin_sink);
2603     }
2604 }
2605 
2606 /* Called from IO thread. Gets max_rewind limit from sink inputs.
2607  * This function is used to communicate the max_rewind value of a
2608  * virtual sink to the master sink. The get_max_rewind_limit()
2609  * callback is implemented by sink inputs connecting a virtual
2610  * sink to its master. */
get_max_rewind_limit(pa_sink * s,size_t requested_limit)2611 static size_t get_max_rewind_limit(pa_sink *s, size_t requested_limit) {
2612     pa_sink_input *i;
2613     void *state = NULL;
2614     size_t rewind_limit;
2615 
2616     pa_assert(s);
2617 
2618     /* Get rewind limit in sink sample spec from sink inputs */
2619     rewind_limit = (size_t)(-1);
2620     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2621         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2622 
2623             if (i->get_max_rewind_limit) {
2624                 size_t limit;
2625 
2626                 limit = i->get_max_rewind_limit(i);
2627                 if (rewind_limit == (size_t)(-1) || rewind_limit > limit)
2628                     rewind_limit = limit;
2629             }
2630         }
2631     }
2632 
2633     /* Set max_rewind */
2634     if (rewind_limit != (size_t)(-1))
2635         requested_limit = PA_MIN(rewind_limit, requested_limit);
2636 
2637     return requested_limit;
2638 }
2639 
2640 /* Called from IO thread, except when it is not */
pa_sink_process_msg(pa_msgobject * o,int code,void * userdata,int64_t offset,pa_memchunk * chunk)2641 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2642     pa_sink *s = PA_SINK(o);
2643     pa_sink_assert_ref(s);
2644 
2645     switch ((pa_sink_message_t) code) {
2646 
2647         case PA_SINK_MESSAGE_ADD_INPUT: {
2648             pa_sink_input *i = PA_SINK_INPUT(userdata);
2649 
2650             /* If you change anything here, make sure to change the
2651              * sink input handling a few lines down at
2652              * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2653 
2654             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2655 
2656             /* Since the caller sleeps in pa_sink_input_put(), we can
2657              * safely access data outside of thread_info even though
2658              * it is mutable */
2659 
2660             if ((i->thread_info.sync_prev = i->sync_prev)) {
2661                 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2662                 pa_assert(i->sync_prev->sync_next == i);
2663                 i->thread_info.sync_prev->thread_info.sync_next = i;
2664             }
2665 
2666             if ((i->thread_info.sync_next = i->sync_next)) {
2667                 pa_assert(i->sink == i->thread_info.sync_next->sink);
2668                 pa_assert(i->sync_next->sync_prev == i);
2669                 i->thread_info.sync_next->thread_info.sync_prev = i;
2670             }
2671 
2672             pa_sink_input_attach(i);
2673 
2674             pa_sink_input_set_state_within_thread(i, i->state);
2675 
2676             /* The requested latency of the sink input needs to be fixed up and
2677              * then configured on the sink. If this causes the sink latency to
2678              * go down, the sink implementor is responsible for doing a rewind
2679              * in the update_requested_latency() callback to ensure that the
2680              * sink buffer doesn't contain more data than what the new latency
2681              * allows.
2682              *
2683              * XXX: Does it really make sense to push this responsibility to
2684              * the sink implementors? Wouldn't it be better to do it once in
2685              * the core than many times in the modules? */
2686 
2687             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2688                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2689 
2690             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2691             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2692 
2693             /* We don't rewind here automatically. This is left to the
2694              * sink input implementor because some sink inputs need a
2695              * slow start, i.e. need some time to buffer client
2696              * samples before beginning streaming.
2697              *
2698              * XXX: Does it really make sense to push this functionality to
2699              * the sink implementors? Wouldn't it be better to do it once in
2700              * the core than many times in the modules? */
2701 
2702             /* In flat volume mode we need to update the volume as
2703              * well */
2704             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2705         }
2706 
2707         case PA_SINK_MESSAGE_REMOVE_INPUT: {
2708             pa_sink_input *i = PA_SINK_INPUT(userdata);
2709 
2710             /* If you change anything here, make sure to change the
2711              * sink input handling a few lines down at
2712              * PA_SINK_MESSAGE_START_MOVE, too. */
2713 
2714             pa_sink_input_detach(i);
2715 
2716             pa_sink_input_set_state_within_thread(i, i->state);
2717 
2718             /* Since the caller sleeps in pa_sink_input_unlink(),
2719              * we can safely access data outside of thread_info even
2720              * though it is mutable */
2721 
2722             pa_assert(!i->sync_prev);
2723             pa_assert(!i->sync_next);
2724 
2725             if (i->thread_info.sync_prev) {
2726                 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2727                 i->thread_info.sync_prev = NULL;
2728             }
2729 
2730             if (i->thread_info.sync_next) {
2731                 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2732                 i->thread_info.sync_next = NULL;
2733             }
2734 
2735             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2736             pa_sink_request_rewind(s, (size_t) -1);
2737             pa_sink_invalidate_requested_latency(s, true);
2738 
2739             /* In flat volume mode we need to update the volume as
2740              * well */
2741             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2742         }
2743 
2744         case PA_SINK_MESSAGE_START_MOVE: {
2745             pa_sink_input *i = PA_SINK_INPUT(userdata);
2746 
2747             /* We don't support moving synchronized streams. */
2748             pa_assert(!i->sync_prev);
2749             pa_assert(!i->sync_next);
2750             pa_assert(!i->thread_info.sync_next);
2751             pa_assert(!i->thread_info.sync_prev);
2752 
2753             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2754 
2755                 /* The old sink probably has some audio from this
2756                  * stream in its buffer. We want to "take it back" as
2757                  * much as possible and play it to the new sink. We
2758                  * don't know at this point how much the old sink can
2759                  * rewind, so we just save some values and reconstruct
2760                  * the render memblockq in finish_move(). */
2761 
2762                 /* Save some current values for restore_render_memblockq() */
2763                 i->thread_info.origin_sink_latency = pa_sink_get_latency_within_thread(s, false);
2764                 i->thread_info.move_start_time = pa_rtclock_now();
2765                 i->thread_info.resampler_delay_frames = 0;
2766                 if (i->thread_info.resampler)
2767                     /* Round down */
2768                     i->thread_info.resampler_delay_frames = pa_resampler_get_delay(i->thread_info.resampler, false);
2769             }
2770 
2771             pa_sink_input_detach(i);
2772 
2773             /* Let's remove the sink input ...*/
2774             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2775 
2776             /* The rewind must be requested before invalidating the latency, otherwise
2777              * the max_rewind value of the sink may change before the rewind. */
2778             pa_log_debug("Requesting rewind due to started move");
2779             pa_sink_request_rewind(s, (size_t) -1);
2780 
2781             pa_sink_invalidate_requested_latency(s, true);
2782 
2783             /* In flat volume mode we need to update the volume as
2784              * well */
2785             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2786         }
2787 
2788         case PA_SINK_MESSAGE_FINISH_MOVE: {
2789             pa_sink_input *i = PA_SINK_INPUT(userdata);
2790 
2791             /* We don't support moving synchronized streams. */
2792             pa_assert(!i->sync_prev);
2793             pa_assert(!i->sync_next);
2794             pa_assert(!i->thread_info.sync_next);
2795             pa_assert(!i->thread_info.sync_prev);
2796 
2797             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2798 
2799             pa_sink_input_attach(i);
2800 
2801             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2802                 pa_usec_t usec = 0;
2803                 size_t nbytes, delay_bytes;
2804 
2805                 /* In the ideal case the new sink would start playing
2806                  * the stream immediately. That requires the sink to
2807                  * be able to rewind all of its latency, which usually
2808                  * isn't possible, so there will probably be some gap
2809                  * before the moved stream becomes audible. We then
2810                  * have two possibilities: 1) start playing the stream
2811                  * from where it is now, or 2) drop the unrewindable
2812                  * latency of the sink from the stream. With option 1
2813                  * we won't lose any audio but the stream will have a
2814                  * pause. With option 2 we may lose some audio but the
2815                  * stream time will be somewhat in sync with the wall
2816                  * clock. Lennart seems to have chosen option 2 (one
2817                  * of the reasons might have been that option 1 is
2818                  * actually much harder to implement), so we drop the
2819                  * latency of the new sink from the moved stream and
2820                  * hope that the sink will undo most of that in the
2821                  * rewind. */
2822 
2823                 /* Get the latency of the sink */
2824                 usec = pa_sink_get_latency_within_thread(s, false);
2825                 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2826 
2827                 /* Calculate number of samples that have been played during the move */
2828                 delay_bytes = 0;
2829                 if (i->thread_info.move_start_time > 0) {
2830                     usec = pa_rtclock_now() - i->thread_info.move_start_time;
2831                     pa_log_debug("Move took %llu usec", usec);
2832                     delay_bytes = pa_usec_to_bytes(usec, &s->sample_spec);
2833                 }
2834 
2835                 /* max_rewind must be updated for the sink input because otherwise
2836                  * the data in the render memblockq will get lost */
2837                 pa_sink_input_update_max_rewind(i, nbytes);
2838 
2839                 if (nbytes + delay_bytes > 0)
2840                     pa_sink_input_drop(i, nbytes + delay_bytes);
2841 
2842                 pa_log_debug("Requesting rewind due to finished move");
2843                 pa_sink_request_rewind(s, nbytes);
2844             }
2845 
2846             /* Updating the requested sink latency has to be done
2847              * after the sink rewind request, not before, because
2848              * otherwise the sink may limit the rewind amount
2849              * needlessly. */
2850 
2851             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2852                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2853 
2854             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2855             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2856 
2857             /* Reset move variables */
2858             i->thread_info.move_start_time = 0;
2859             i->thread_info.resampler_delay_frames = 0;
2860             i->thread_info.origin_sink_latency = 0;
2861 
2862             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2863         }
2864 
2865         case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2866             pa_sink *root_sink = pa_sink_get_master(s);
2867 
2868             if (PA_LIKELY(root_sink))
2869                 set_shared_volume_within_thread(root_sink);
2870 
2871             return 0;
2872         }
2873 
2874         case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2875 
2876             if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2877                 s->set_volume(s);
2878                 pa_sink_volume_change_push(s);
2879             }
2880             /* Fall through ... */
2881 
2882         case PA_SINK_MESSAGE_SET_VOLUME:
2883 
2884             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2885                 s->thread_info.soft_volume = s->soft_volume;
2886                 pa_sink_request_rewind(s, (size_t) -1);
2887             }
2888 
2889             /* Fall through ... */
2890 
2891         case PA_SINK_MESSAGE_SYNC_VOLUMES:
2892             sync_input_volumes_within_thread(s);
2893             return 0;
2894 
2895         case PA_SINK_MESSAGE_GET_VOLUME:
2896 
2897             if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2898                 s->get_volume(s);
2899                 pa_sink_volume_change_flush(s);
2900                 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2901             }
2902 
2903             /* In case sink implementor reset SW volume. */
2904             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2905                 s->thread_info.soft_volume = s->soft_volume;
2906                 pa_sink_request_rewind(s, (size_t) -1);
2907             }
2908 
2909             return 0;
2910 
2911         case PA_SINK_MESSAGE_SET_MUTE:
2912 
2913             if (s->thread_info.soft_muted != s->muted) {
2914                 s->thread_info.soft_muted = s->muted;
2915                 pa_sink_request_rewind(s, (size_t) -1);
2916             }
2917 
2918             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2919                 s->set_mute(s);
2920 
2921             return 0;
2922 
2923         case PA_SINK_MESSAGE_GET_MUTE:
2924 
2925             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2926                 return s->get_mute(s, userdata);
2927 
2928             return 0;
2929 
2930         case PA_SINK_MESSAGE_SET_STATE: {
2931             struct set_state_data *data = userdata;
2932             bool suspend_change =
2933                 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
2934                 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
2935 
2936             if (s->set_state_in_io_thread) {
2937                 int r;
2938 
2939                 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2940                     return r;
2941             }
2942 
2943             s->thread_info.state = data->state;
2944 
2945             if (s->thread_info.state == PA_SINK_SUSPENDED) {
2946                 s->thread_info.rewind_nbytes = 0;
2947                 s->thread_info.rewind_requested = false;
2948             }
2949 
2950             if (suspend_change) {
2951                 pa_sink_input *i;
2952                 void *state = NULL;
2953 
2954                 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2955                     if (i->suspend_within_thread)
2956                         i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2957             }
2958 
2959             return 0;
2960         }
2961 
2962         case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2963 
2964             pa_usec_t *usec = userdata;
2965             *usec = pa_sink_get_requested_latency_within_thread(s);
2966 
2967             /* Yes, that's right, the IO thread will see -1 when no
2968              * explicit requested latency is configured, the main
2969              * thread will see max_latency */
2970             if (*usec == (pa_usec_t) -1)
2971                 *usec = s->thread_info.max_latency;
2972 
2973             return 0;
2974         }
2975 
2976         case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2977             pa_usec_t *r = userdata;
2978 
2979             pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2980 
2981             return 0;
2982         }
2983 
2984         case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2985             pa_usec_t *r = userdata;
2986 
2987             r[0] = s->thread_info.min_latency;
2988             r[1] = s->thread_info.max_latency;
2989 
2990             return 0;
2991         }
2992 
2993         case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2994 
2995             *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2996             return 0;
2997 
2998         case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2999 
3000             pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3001             return 0;
3002 
3003         case PA_SINK_MESSAGE_GET_MAX_REWIND:
3004 
3005             *((size_t*) userdata) = s->thread_info.max_rewind;
3006             return 0;
3007 
3008         case PA_SINK_MESSAGE_GET_LAST_REWIND:
3009 
3010             *((size_t*) userdata) = s->thread_info.last_rewind_nbytes;
3011             return 0;
3012 
3013         case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3014 
3015             *((size_t*) userdata) = s->thread_info.max_request;
3016             return 0;
3017 
3018         case PA_SINK_MESSAGE_SET_MAX_REWIND:
3019 
3020             pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3021             return 0;
3022 
3023         case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3024 
3025             pa_sink_set_max_request_within_thread(s, (size_t) offset);
3026             return 0;
3027 
3028         case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3029             /* This message is sent from IO-thread and handled in main thread. */
3030             pa_assert_ctl_context();
3031 
3032             /* Make sure we're not messing with main thread when no longer linked */
3033             if (!PA_SINK_IS_LINKED(s->state))
3034                 return 0;
3035 
3036             pa_sink_get_volume(s, true);
3037             pa_sink_get_mute(s, true);
3038             return 0;
3039 
3040         case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3041             s->thread_info.port_latency_offset = offset;
3042             return 0;
3043 
3044         case PA_SINK_MESSAGE_GET_LATENCY:
3045         case PA_SINK_MESSAGE_MAX:
3046             ;
3047     }
3048 
3049     return -1;
3050 }
3051 
3052 /* Called from main thread */
pa_sink_suspend_all(pa_core * c,bool suspend,pa_suspend_cause_t cause)3053 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3054     pa_sink *sink;
3055     uint32_t idx;
3056     int ret = 0;
3057 
3058     pa_core_assert_ref(c);
3059     pa_assert_ctl_context();
3060     pa_assert(cause != 0);
3061 
3062     PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3063         int r;
3064 
3065         if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3066             ret = r;
3067     }
3068 
3069     return ret;
3070 }
3071 
3072 /* Called from IO thread */
pa_sink_detach_within_thread(pa_sink * s)3073 void pa_sink_detach_within_thread(pa_sink *s) {
3074     pa_sink_input *i;
3075     void *state = NULL;
3076 
3077     pa_sink_assert_ref(s);
3078     pa_sink_assert_io_context(s);
3079     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3080 
3081     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3082         pa_sink_input_detach(i);
3083 
3084     if (s->monitor_source)
3085         pa_source_detach_within_thread(s->monitor_source);
3086 }
3087 
3088 /* Called from IO thread */
pa_sink_attach_within_thread(pa_sink * s)3089 void pa_sink_attach_within_thread(pa_sink *s) {
3090     pa_sink_input *i;
3091     void *state = NULL;
3092 
3093     pa_sink_assert_ref(s);
3094     pa_sink_assert_io_context(s);
3095     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3096 
3097     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3098         pa_sink_input_attach(i);
3099 
3100     if (s->monitor_source)
3101         pa_source_attach_within_thread(s->monitor_source);
3102 }
3103 
3104 /* Called from IO thread */
pa_sink_request_rewind(pa_sink * s,size_t nbytes)3105 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3106     pa_sink_assert_ref(s);
3107     pa_sink_assert_io_context(s);
3108     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3109 
3110     if (nbytes == (size_t) -1)
3111         nbytes = s->thread_info.max_rewind;
3112 
3113     nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3114 
3115     if (s->thread_info.rewind_requested &&
3116         nbytes <= s->thread_info.rewind_nbytes)
3117         return;
3118 
3119     s->thread_info.rewind_nbytes = nbytes;
3120     s->thread_info.rewind_requested = true;
3121 
3122     if (s->request_rewind)
3123         s->request_rewind(s);
3124 }
3125 
3126 /* Called from IO thread */
pa_sink_get_requested_latency_within_thread(pa_sink * s)3127 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3128     pa_usec_t result = (pa_usec_t) -1;
3129     pa_sink_input *i;
3130     void *state = NULL;
3131     pa_usec_t monitor_latency;
3132 
3133     pa_sink_assert_ref(s);
3134     pa_sink_assert_io_context(s);
3135 
3136     if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3137         return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3138 
3139     if (s->thread_info.requested_latency_valid)
3140         return s->thread_info.requested_latency;
3141 
3142     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3143         if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3144             (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3145             result = i->thread_info.requested_sink_latency;
3146 
3147     monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3148 
3149     if (monitor_latency != (pa_usec_t) -1 &&
3150         (result == (pa_usec_t) -1 || result > monitor_latency))
3151         result = monitor_latency;
3152 
3153     if (result != (pa_usec_t) -1)
3154         result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3155 
3156     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3157         /* Only cache if properly initialized */
3158         s->thread_info.requested_latency = result;
3159         s->thread_info.requested_latency_valid = true;
3160     }
3161 
3162     return result;
3163 }
3164 
3165 /* Called from main thread */
pa_sink_get_requested_latency(pa_sink * s)3166 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3167     pa_usec_t usec = 0;
3168 
3169     pa_sink_assert_ref(s);
3170     pa_assert_ctl_context();
3171     pa_assert(PA_SINK_IS_LINKED(s->state));
3172 
3173     if (s->state == PA_SINK_SUSPENDED)
3174         return 0;
3175 
3176     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3177 
3178     return usec;
3179 }
3180 
3181 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_rewind_within_thread(pa_sink * s,size_t max_rewind)3182 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3183     pa_sink_input *i;
3184     void *state = NULL;
3185 
3186     pa_sink_assert_ref(s);
3187     pa_sink_assert_io_context(s);
3188 
3189     max_rewind = get_max_rewind_limit(s, max_rewind);
3190 
3191     if (max_rewind == s->thread_info.max_rewind)
3192         return;
3193 
3194     s->thread_info.max_rewind = max_rewind;
3195 
3196     if (PA_SINK_IS_LINKED(s->thread_info.state))
3197         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3198             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3199 
3200     if (s->monitor_source)
3201         pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3202 }
3203 
3204 /* Called from main thread */
pa_sink_set_max_rewind(pa_sink * s,size_t max_rewind)3205 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3206     pa_sink_assert_ref(s);
3207     pa_assert_ctl_context();
3208 
3209     if (PA_SINK_IS_LINKED(s->state))
3210         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3211     else
3212         pa_sink_set_max_rewind_within_thread(s, max_rewind);
3213 }
3214 
3215 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_request_within_thread(pa_sink * s,size_t max_request)3216 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3217     void *state = NULL;
3218 
3219     pa_sink_assert_ref(s);
3220     pa_sink_assert_io_context(s);
3221 
3222     if (max_request == s->thread_info.max_request)
3223         return;
3224 
3225     s->thread_info.max_request = max_request;
3226 
3227     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3228         pa_sink_input *i;
3229 
3230         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3231             pa_sink_input_update_max_request(i, s->thread_info.max_request);
3232     }
3233 }
3234 
3235 /* Called from main thread */
pa_sink_set_max_request(pa_sink * s,size_t max_request)3236 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3237     pa_sink_assert_ref(s);
3238     pa_assert_ctl_context();
3239 
3240     if (PA_SINK_IS_LINKED(s->state))
3241         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3242     else
3243         pa_sink_set_max_request_within_thread(s, max_request);
3244 }
3245 
3246 /* Called from IO thread */
pa_sink_invalidate_requested_latency(pa_sink * s,bool dynamic)3247 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3248     pa_sink_input *i;
3249     void *state = NULL;
3250 
3251     pa_sink_assert_ref(s);
3252     pa_sink_assert_io_context(s);
3253 
3254     if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3255         s->thread_info.requested_latency_valid = false;
3256     else if (dynamic)
3257         return;
3258 
3259     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3260 
3261         if (s->update_requested_latency)
3262             s->update_requested_latency(s);
3263 
3264         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3265             if (i->update_sink_requested_latency)
3266                 i->update_sink_requested_latency(i);
3267     }
3268 }
3269 
3270 /* Called from main thread */
pa_sink_set_latency_range(pa_sink * s,pa_usec_t min_latency,pa_usec_t max_latency)3271 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3272     pa_sink_assert_ref(s);
3273     pa_assert_ctl_context();
3274 
3275     /* min_latency == 0:           no limit
3276      * min_latency anything else:  specified limit
3277      *
3278      * Similar for max_latency */
3279 
3280     if (min_latency < ABSOLUTE_MIN_LATENCY)
3281         min_latency = ABSOLUTE_MIN_LATENCY;
3282 
3283     if (max_latency <= 0 ||
3284         max_latency > ABSOLUTE_MAX_LATENCY)
3285         max_latency = ABSOLUTE_MAX_LATENCY;
3286 
3287     pa_assert(min_latency <= max_latency);
3288 
3289     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3290     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3291                max_latency == ABSOLUTE_MAX_LATENCY) ||
3292               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3293 
3294     if (PA_SINK_IS_LINKED(s->state)) {
3295         pa_usec_t r[2];
3296 
3297         r[0] = min_latency;
3298         r[1] = max_latency;
3299 
3300         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3301     } else
3302         pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3303 }
3304 
3305 /* Called from main thread */
pa_sink_get_latency_range(pa_sink * s,pa_usec_t * min_latency,pa_usec_t * max_latency)3306 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3307     pa_sink_assert_ref(s);
3308     pa_assert_ctl_context();
3309     pa_assert(min_latency);
3310     pa_assert(max_latency);
3311 
3312     if (PA_SINK_IS_LINKED(s->state)) {
3313         pa_usec_t r[2] = { 0, 0 };
3314 
3315         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3316 
3317         *min_latency = r[0];
3318         *max_latency = r[1];
3319     } else {
3320         *min_latency = s->thread_info.min_latency;
3321         *max_latency = s->thread_info.max_latency;
3322     }
3323 }
3324 
3325 /* Called from IO thread */
pa_sink_set_latency_range_within_thread(pa_sink * s,pa_usec_t min_latency,pa_usec_t max_latency)3326 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3327     pa_sink_assert_ref(s);
3328     pa_sink_assert_io_context(s);
3329 
3330     pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3331     pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3332     pa_assert(min_latency <= max_latency);
3333 
3334     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3335     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3336                max_latency == ABSOLUTE_MAX_LATENCY) ||
3337               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3338 
3339     if (s->thread_info.min_latency == min_latency &&
3340         s->thread_info.max_latency == max_latency)
3341         return;
3342 
3343     s->thread_info.min_latency = min_latency;
3344     s->thread_info.max_latency = max_latency;
3345 
3346     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3347         pa_sink_input *i;
3348         void *state = NULL;
3349 
3350         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3351             if (i->update_sink_latency_range)
3352                 i->update_sink_latency_range(i);
3353     }
3354 
3355     pa_sink_invalidate_requested_latency(s, false);
3356 
3357     pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3358 }
3359 
3360 /* Called from main thread */
pa_sink_set_fixed_latency(pa_sink * s,pa_usec_t latency)3361 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3362     pa_sink_assert_ref(s);
3363     pa_assert_ctl_context();
3364 
3365     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3366         pa_assert(latency == 0);
3367         return;
3368     }
3369 
3370     if (latency < ABSOLUTE_MIN_LATENCY)
3371         latency = ABSOLUTE_MIN_LATENCY;
3372 
3373     if (latency > ABSOLUTE_MAX_LATENCY)
3374         latency = ABSOLUTE_MAX_LATENCY;
3375 
3376     if (PA_SINK_IS_LINKED(s->state))
3377         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3378     else
3379         s->thread_info.fixed_latency = latency;
3380 
3381     pa_source_set_fixed_latency(s->monitor_source, latency);
3382 }
3383 
3384 /* Called from main thread */
pa_sink_get_fixed_latency(pa_sink * s)3385 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3386     pa_usec_t latency;
3387 
3388     pa_sink_assert_ref(s);
3389     pa_assert_ctl_context();
3390 
3391     if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3392         return 0;
3393 
3394     if (PA_SINK_IS_LINKED(s->state))
3395         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3396     else
3397         latency = s->thread_info.fixed_latency;
3398 
3399     return latency;
3400 }
3401 
3402 /* Called from IO thread */
pa_sink_set_fixed_latency_within_thread(pa_sink * s,pa_usec_t latency)3403 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3404     pa_sink_assert_ref(s);
3405     pa_sink_assert_io_context(s);
3406 
3407     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3408         pa_assert(latency == 0);
3409         s->thread_info.fixed_latency = 0;
3410 
3411         if (s->monitor_source)
3412             pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3413 
3414         return;
3415     }
3416 
3417     pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3418     pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3419 
3420     if (s->thread_info.fixed_latency == latency)
3421         return;
3422 
3423     s->thread_info.fixed_latency = latency;
3424 
3425     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3426         pa_sink_input *i;
3427         void *state = NULL;
3428 
3429         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3430             if (i->update_sink_fixed_latency)
3431                 i->update_sink_fixed_latency(i);
3432     }
3433 
3434     pa_sink_invalidate_requested_latency(s, false);
3435 
3436     pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3437 }
3438 
3439 /* Called from main context */
pa_sink_set_port_latency_offset(pa_sink * s,int64_t offset)3440 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3441     pa_sink_assert_ref(s);
3442 
3443     s->port_latency_offset = offset;
3444 
3445     if (PA_SINK_IS_LINKED(s->state))
3446         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3447     else
3448         s->thread_info.port_latency_offset = offset;
3449 
3450     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3451 }
3452 
3453 /* Called from main context */
pa_sink_get_max_rewind(pa_sink * s)3454 size_t pa_sink_get_max_rewind(pa_sink *s) {
3455     size_t r;
3456     pa_assert_ctl_context();
3457     pa_sink_assert_ref(s);
3458 
3459     if (!PA_SINK_IS_LINKED(s->state))
3460         return s->thread_info.max_rewind;
3461 
3462     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3463 
3464     return r;
3465 }
3466 
3467 /* Called from main context */
pa_sink_get_max_request(pa_sink * s)3468 size_t pa_sink_get_max_request(pa_sink *s) {
3469     size_t r;
3470     pa_sink_assert_ref(s);
3471     pa_assert_ctl_context();
3472 
3473     if (!PA_SINK_IS_LINKED(s->state))
3474         return s->thread_info.max_request;
3475 
3476     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3477 
3478     return r;
3479 }
3480 
3481 /* Called from main context */
pa_sink_set_port(pa_sink * s,const char * name,bool save)3482 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3483     pa_device_port *port;
3484 
3485     pa_sink_assert_ref(s);
3486     pa_assert_ctl_context();
3487 
3488     if (!s->set_port) {
3489         pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3490         return -PA_ERR_NOTIMPLEMENTED;
3491     }
3492 
3493     if (!name)
3494         return -PA_ERR_NOENTITY;
3495 
3496     if (!(port = pa_hashmap_get(s->ports, name)))
3497         return -PA_ERR_NOENTITY;
3498 
3499     if (s->active_port == port) {
3500         s->save_port = s->save_port || save;
3501         return 0;
3502     }
3503 
3504     s->port_changing = true;
3505 
3506     if (s->set_port(s, port) < 0)
3507         return -PA_ERR_NOENTITY;
3508 
3509     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3510 
3511     pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3512 
3513     s->active_port = port;
3514     s->save_port = save;
3515 
3516     pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3517 
3518     /* The active port affects the default sink selection. */
3519     pa_core_update_default_sink(s->core);
3520 
3521     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3522 
3523     s->port_changing = false;
3524 
3525     return 0;
3526 }
3527 
pa_device_init_icon(pa_proplist * p,bool is_sink)3528 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3529     const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3530 
3531     pa_assert(p);
3532 
3533     if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3534         return true;
3535 
3536     if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3537 
3538         if (pa_streq(ff, "microphone"))
3539             t = "audio-input-microphone";
3540         else if (pa_streq(ff, "webcam"))
3541             t = "camera-web";
3542         else if (pa_streq(ff, "computer"))
3543             t = "computer";
3544         else if (pa_streq(ff, "handset"))
3545             t = "phone";
3546         else if (pa_streq(ff, "portable"))
3547             t = "multimedia-player";
3548         else if (pa_streq(ff, "tv"))
3549             t = "video-display";
3550 
3551         /*
3552          * The following icons are not part of the icon naming spec,
3553          * because Rodney Dawes sucks as the maintainer of that spec.
3554          *
3555          * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3556          */
3557         else if (pa_streq(ff, "headset"))
3558             t = "audio-headset";
3559         else if (pa_streq(ff, "headphone"))
3560             t = "audio-headphones";
3561         else if (pa_streq(ff, "speaker"))
3562             t = "audio-speakers";
3563         else if (pa_streq(ff, "hands-free"))
3564             t = "audio-handsfree";
3565     }
3566 
3567     if (!t)
3568         if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3569             if (pa_streq(c, "modem"))
3570                 t = "modem";
3571 
3572     if (!t) {
3573         if (is_sink)
3574             t = "audio-card";
3575         else
3576             t = "audio-input-microphone";
3577     }
3578 
3579     if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3580         if (strstr(profile, "analog"))
3581             s = "-analog";
3582         else if (strstr(profile, "iec958"))
3583             s = "-iec958";
3584         else if (strstr(profile, "hdmi"))
3585             s = "-hdmi";
3586     }
3587 
3588     bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3589 
3590     pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3591 
3592     return true;
3593 }
3594 
pa_device_init_description(pa_proplist * p,pa_card * card)3595 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3596     const char *s, *d = NULL, *k;
3597     pa_assert(p);
3598 
3599     if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3600         return true;
3601 
3602     if (card)
3603         if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3604             d = s;
3605 
3606     if (!d)
3607         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3608             if (pa_streq(s, "internal"))
3609                 d = _("Built-in Audio");
3610 
3611     if (!d)
3612         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3613             if (pa_streq(s, "modem"))
3614                 d = _("Modem");
3615 
3616     if (!d)
3617         d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3618 
3619     if (!d)
3620         return false;
3621 
3622     k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3623 
3624     if (d && k)
3625         pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3626     else if (d)
3627         pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3628 
3629     return true;
3630 }
3631 
pa_device_init_intended_roles(pa_proplist * p)3632 bool pa_device_init_intended_roles(pa_proplist *p) {
3633     const char *s;
3634     pa_assert(p);
3635 
3636     if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3637         return true;
3638 
3639     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3640         if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3641             || pa_streq(s, "headset")) {
3642             pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3643             return true;
3644         }
3645 
3646     return false;
3647 }
3648 
pa_device_init_priority(pa_proplist * p)3649 unsigned pa_device_init_priority(pa_proplist *p) {
3650     const char *s;
3651     unsigned priority = 0;
3652 
3653     pa_assert(p);
3654 
3655     /* JACK sinks and sources get very high priority so that we'll switch the
3656      * default devices automatically when jackd starts and
3657      * module-jackdbus-detect creates the jack sink and source. */
3658     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_API))) {
3659         if (pa_streq(s, "jack"))
3660             priority += 10000;
3661     }
3662 
3663     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3664 
3665         if (pa_streq(s, "sound"))
3666             priority += 9000;
3667         else if (!pa_streq(s, "modem"))
3668             priority += 1000;
3669     }
3670 
3671     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3672 
3673         if (pa_streq(s, "headphone"))
3674             priority += 900;
3675         else if (pa_streq(s, "hifi"))
3676             priority += 600;
3677         else if (pa_streq(s, "speaker"))
3678             priority += 500;
3679         else if (pa_streq(s, "portable"))
3680             priority += 450;
3681     }
3682 
3683     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3684 
3685         if (pa_streq(s, "bluetooth"))
3686             priority += 50;
3687         else if (pa_streq(s, "usb"))
3688             priority += 40;
3689         else if (pa_streq(s, "pci"))
3690             priority += 30;
3691     }
3692 
3693     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3694 
3695         if (pa_startswith(s, "analog-")) {
3696             priority += 9;
3697 
3698             /* If an analog device has an intended role of "phone", it probably
3699              * co-exists with another device that is meant for everything else,
3700              * and that other device should have higher priority than the phone
3701              * device. */
3702             if (pa_str_in_list_spaces(pa_proplist_gets(p, PA_PROP_DEVICE_INTENDED_ROLES), "phone"))
3703                 priority -= 1;
3704         }
3705         else if (pa_startswith(s, "iec958-"))
3706             priority += 7;
3707     }
3708 
3709     return priority;
3710 }
3711 
3712 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3713 
3714 /* Called from the IO thread. */
pa_sink_volume_change_new(pa_sink * s)3715 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3716     pa_sink_volume_change *c;
3717     if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3718         c = pa_xnew(pa_sink_volume_change, 1);
3719 
3720     PA_LLIST_INIT(pa_sink_volume_change, c);
3721     c->at = 0;
3722     pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3723     return c;
3724 }
3725 
3726 /* Called from the IO thread. */
pa_sink_volume_change_free(pa_sink_volume_change * c)3727 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3728     pa_assert(c);
3729     if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3730         pa_xfree(c);
3731 }
3732 
3733 /* Called from the IO thread. */
pa_sink_volume_change_push(pa_sink * s)3734 void pa_sink_volume_change_push(pa_sink *s) {
3735     pa_sink_volume_change *c = NULL;
3736     pa_sink_volume_change *nc = NULL;
3737     pa_sink_volume_change *pc = NULL;
3738     uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3739 
3740     const char *direction = NULL;
3741 
3742     pa_assert(s);
3743     nc = pa_sink_volume_change_new(s);
3744 
3745     /* NOTE: There is already more different volumes in pa_sink that I can remember.
3746      *       Adding one more volume for HW would get us rid of this, but I am trying
3747      *       to survive with the ones we already have. */
3748     pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3749 
3750     if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3751         pa_log_debug("Volume not changing");
3752         pa_sink_volume_change_free(nc);
3753         return;
3754     }
3755 
3756     nc->at = pa_sink_get_latency_within_thread(s, false);
3757     nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3758 
3759     if (s->thread_info.volume_changes_tail) {
3760         for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3761             /* If volume is going up let's do it a bit late. If it is going
3762              * down let's do it a bit early. */
3763             if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3764                 if (nc->at + safety_margin > c->at) {
3765                     nc->at += safety_margin;
3766                     direction = "up";
3767                     break;
3768                 }
3769             }
3770             else if (nc->at - safety_margin > c->at) {
3771                     nc->at -= safety_margin;
3772                     direction = "down";
3773                     break;
3774             }
3775         }
3776     }
3777 
3778     if (c == NULL) {
3779         if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3780             nc->at += safety_margin;
3781             direction = "up";
3782         } else {
3783             nc->at -= safety_margin;
3784             direction = "down";
3785         }
3786         PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3787     }
3788     else {
3789         PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3790     }
3791 
3792     pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3793 
3794     /* We can ignore volume events that came earlier but should happen later than this. */
3795     PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3796         pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3797         pa_sink_volume_change_free(c);
3798     }
3799     nc->next = NULL;
3800     s->thread_info.volume_changes_tail = nc;
3801 }
3802 
3803 /* Called from the IO thread. */
pa_sink_volume_change_flush(pa_sink * s)3804 static void pa_sink_volume_change_flush(pa_sink *s) {
3805     pa_sink_volume_change *c = s->thread_info.volume_changes;
3806     pa_assert(s);
3807     s->thread_info.volume_changes = NULL;
3808     s->thread_info.volume_changes_tail = NULL;
3809     while (c) {
3810         pa_sink_volume_change *next = c->next;
3811         pa_sink_volume_change_free(c);
3812         c = next;
3813     }
3814 }
3815 
3816 /* Called from the IO thread. */
pa_sink_volume_change_apply(pa_sink * s,pa_usec_t * usec_to_next)3817 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3818     pa_usec_t now;
3819     bool ret = false;
3820 
3821     pa_assert(s);
3822 
3823     if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3824         if (usec_to_next)
3825             *usec_to_next = 0;
3826         return ret;
3827     }
3828 
3829     pa_assert(s->write_volume);
3830 
3831     now = pa_rtclock_now();
3832 
3833     while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3834         pa_sink_volume_change *c = s->thread_info.volume_changes;
3835         PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3836         pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3837                      pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3838         ret = true;
3839         s->thread_info.current_hw_volume = c->hw_volume;
3840         pa_sink_volume_change_free(c);
3841     }
3842 
3843     if (ret)
3844         s->write_volume(s);
3845 
3846     if (s->thread_info.volume_changes) {
3847         if (usec_to_next)
3848             *usec_to_next = s->thread_info.volume_changes->at - now;
3849         if (pa_log_ratelimit(PA_LOG_DEBUG))
3850             pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3851     }
3852     else {
3853         if (usec_to_next)
3854             *usec_to_next = 0;
3855         s->thread_info.volume_changes_tail = NULL;
3856     }
3857     return ret;
3858 }
3859 
3860 /* Called from the IO thread. */
pa_sink_volume_change_rewind(pa_sink * s,size_t nbytes)3861 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3862     /* All the queued volume events later than current latency are shifted to happen earlier. */
3863     pa_sink_volume_change *c;
3864     pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3865     pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3866     pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3867 
3868     pa_log_debug("latency = %lld", (long long) limit);
3869     limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3870 
3871     PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3872         pa_usec_t modified_limit = limit;
3873         if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3874             modified_limit -= s->thread_info.volume_change_safety_margin;
3875         else
3876             modified_limit += s->thread_info.volume_change_safety_margin;
3877         if (c->at > modified_limit) {
3878             c->at -= rewound;
3879             if (c->at < modified_limit)
3880                 c->at = modified_limit;
3881         }
3882         prev_vol = pa_cvolume_avg(&c->hw_volume);
3883     }
3884     pa_sink_volume_change_apply(s, NULL);
3885 }
3886 
3887 /* Called from the main thread */
3888 /* Gets the list of formats supported by the sink. The members and idxset must
3889  * be freed by the caller. */
pa_sink_get_formats(pa_sink * s)3890 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3891     pa_idxset *ret;
3892 
3893     pa_assert(s);
3894 
3895     if (s->get_formats) {
3896         /* Sink supports format query, all is good */
3897         ret = s->get_formats(s);
3898     } else {
3899         /* Sink doesn't support format query, so assume it does PCM */
3900         pa_format_info *f = pa_format_info_new();
3901         f->encoding = PA_ENCODING_PCM;
3902 
3903         ret = pa_idxset_new(NULL, NULL);
3904         pa_idxset_put(ret, f, NULL);
3905     }
3906 
3907     return ret;
3908 }
3909 
3910 /* Called from the main thread */
3911 /* Allows an external source to set what formats a sink supports if the sink
3912  * permits this. The function makes a copy of the formats on success. */
pa_sink_set_formats(pa_sink * s,pa_idxset * formats)3913 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3914     pa_assert(s);
3915     pa_assert(formats);
3916 
3917     if (s->set_formats)
3918         /* Sink supports setting formats -- let's give it a shot */
3919         return s->set_formats(s, formats);
3920     else
3921         /* Sink doesn't support setting this -- bail out */
3922         return false;
3923 }
3924 
3925 /* Called from the main thread */
3926 /* Checks if the sink can accept this format */
pa_sink_check_format(pa_sink * s,pa_format_info * f)3927 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3928     pa_idxset *formats = NULL;
3929     bool ret = false;
3930 
3931     pa_assert(s);
3932     pa_assert(f);
3933 
3934     formats = pa_sink_get_formats(s);
3935 
3936     if (formats) {
3937         pa_format_info *finfo_device;
3938         uint32_t i;
3939 
3940         PA_IDXSET_FOREACH(finfo_device, formats, i) {
3941             if (pa_format_info_is_compatible(finfo_device, f)) {
3942                 ret = true;
3943                 break;
3944             }
3945         }
3946 
3947         pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3948     }
3949 
3950     return ret;
3951 }
3952 
3953 /* Called from the main thread */
3954 /* Calculates the intersection between formats supported by the sink and
3955  * in_formats, and returns these, in the order of the sink's formats. */
pa_sink_check_formats(pa_sink * s,pa_idxset * in_formats)3956 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3957     pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3958     pa_format_info *f_sink, *f_in;
3959     uint32_t i, j;
3960 
3961     pa_assert(s);
3962 
3963     if (!in_formats || pa_idxset_isempty(in_formats))
3964         goto done;
3965 
3966     sink_formats = pa_sink_get_formats(s);
3967 
3968     PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3969         PA_IDXSET_FOREACH(f_in, in_formats, j) {
3970             if (pa_format_info_is_compatible(f_sink, f_in))
3971                 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3972         }
3973     }
3974 
3975 done:
3976     if (sink_formats)
3977         pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3978 
3979     return out_formats;
3980 }
3981 
3982 /* Called from the main thread */
pa_sink_set_sample_format(pa_sink * s,pa_sample_format_t format)3983 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
3984     pa_sample_format_t old_format;
3985 
3986     pa_assert(s);
3987     pa_assert(pa_sample_format_valid(format));
3988 
3989     old_format = s->sample_spec.format;
3990     if (old_format == format)
3991         return;
3992 
3993     pa_log_info("%s: format: %s -> %s",
3994                 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
3995 
3996     s->sample_spec.format = format;
3997 
3998     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3999 }
4000 
4001 /* Called from the main thread */
pa_sink_set_sample_rate(pa_sink * s,uint32_t rate)4002 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
4003     uint32_t old_rate;
4004 
4005     pa_assert(s);
4006     pa_assert(pa_sample_rate_valid(rate));
4007 
4008     old_rate = s->sample_spec.rate;
4009     if (old_rate == rate)
4010         return;
4011 
4012     pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
4013 
4014     s->sample_spec.rate = rate;
4015 
4016     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4017 }
4018 
4019 /* Called from the main thread. */
pa_sink_set_reference_volume_direct(pa_sink * s,const pa_cvolume * volume)4020 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4021     pa_cvolume old_volume;
4022     char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4023     char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4024 
4025     pa_assert(s);
4026     pa_assert(volume);
4027 
4028     old_volume = s->reference_volume;
4029 
4030     if (pa_cvolume_equal(volume, &old_volume))
4031         return;
4032 
4033     s->reference_volume = *volume;
4034     pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4035                  pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4036                                             s->flags & PA_SINK_DECIBEL_VOLUME),
4037                  pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4038                                             s->flags & PA_SINK_DECIBEL_VOLUME));
4039 
4040     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4041     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
4042 }
4043 
pa_sink_move_streams_to_default_sink(pa_core * core,pa_sink * old_sink,bool default_sink_changed)4044 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
4045     pa_sink_input *i;
4046     uint32_t idx;
4047 
4048     pa_assert(core);
4049     pa_assert(old_sink);
4050 
4051     if (core->state == PA_CORE_SHUTDOWN)
4052         return;
4053 
4054     if (core->default_sink == NULL || core->default_sink->unlink_requested)
4055         return;
4056 
4057     if (old_sink == core->default_sink)
4058         return;
4059 
4060     PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
4061         if (!PA_SINK_INPUT_IS_LINKED(i->state))
4062             continue;
4063 
4064         if (!i->sink)
4065             continue;
4066 
4067         /* Don't move sink-inputs which connect filter sinks to their target sinks */
4068         if (i->origin_sink)
4069             continue;
4070 
4071         /* If default_sink_changed is false, the old sink became unavailable, so all streams must be moved. */
4072         if (pa_safe_streq(old_sink->name, i->preferred_sink) && default_sink_changed)
4073             continue;
4074 
4075         if (!pa_sink_input_may_move_to(i, core->default_sink))
4076             continue;
4077 
4078         if (default_sink_changed)
4079             pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
4080                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4081         else
4082             pa_log_info("The sink input %u \"%s\" is moving to %s, because the old sink became unavailable.",
4083                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4084 
4085         pa_sink_input_move_to(i, core->default_sink, false);
4086     }
4087 }
4088