• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2006 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37 
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/stream-util.h>
44 #include <pulsecore/mix.h>
45 #include <pulsecore/core-subscribe.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/play-memblockq.h>
49 #include <pulsecore/flist.h>
50 
51 #include "sink.h"
52 
53 #define MAX_MIX_CHANNELS 32
54 #define MIX_BUFFER_LENGTH (pa_page_size())
55 #define ABSOLUTE_MIN_LATENCY (500)
56 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
57 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 
59 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 
61 struct pa_sink_volume_change {
62     pa_usec_t at;
63     pa_cvolume hw_volume;
64 
65     PA_LLIST_FIELDS(pa_sink_volume_change);
66 };
67 
68 struct set_state_data {
69     pa_sink_state_t state;
70     pa_suspend_cause_t suspend_cause;
71 };
72 
73 static void sink_free(pa_object *s);
74 
75 static void pa_sink_volume_change_push(pa_sink *s);
76 static void pa_sink_volume_change_flush(pa_sink *s);
77 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 
pa_sink_new_data_init(pa_sink_new_data * data)79 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
80     pa_assert(data);
81 
82     pa_zero(*data);
83     data->proplist = pa_proplist_new();
84     data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
85 
86     return data;
87 }
88 
pa_sink_new_data_set_name(pa_sink_new_data * data,const char * name)89 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
90     pa_assert(data);
91 
92     pa_xfree(data->name);
93     data->name = pa_xstrdup(name);
94 }
95 
pa_sink_new_data_set_sample_spec(pa_sink_new_data * data,const pa_sample_spec * spec)96 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
97     pa_assert(data);
98 
99     if ((data->sample_spec_is_set = !!spec))
100         data->sample_spec = *spec;
101 }
102 
pa_sink_new_data_set_channel_map(pa_sink_new_data * data,const pa_channel_map * map)103 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
104     pa_assert(data);
105 
106     if ((data->channel_map_is_set = !!map))
107         data->channel_map = *map;
108 }
109 
pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data * data,const uint32_t alternate_sample_rate)110 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
111     pa_assert(data);
112 
113     data->alternate_sample_rate_is_set = true;
114     data->alternate_sample_rate = alternate_sample_rate;
115 }
116 
pa_sink_new_data_set_avoid_resampling(pa_sink_new_data * data,bool avoid_resampling)117 void pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling) {
118     pa_assert(data);
119 
120     data->avoid_resampling_is_set = true;
121     data->avoid_resampling = avoid_resampling;
122 }
123 
pa_sink_new_data_set_volume(pa_sink_new_data * data,const pa_cvolume * volume)124 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
125     pa_assert(data);
126 
127     if ((data->volume_is_set = !!volume))
128         data->volume = *volume;
129 }
130 
pa_sink_new_data_set_muted(pa_sink_new_data * data,bool mute)131 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
132     pa_assert(data);
133 
134     data->muted_is_set = true;
135     data->muted = mute;
136 }
137 
pa_sink_new_data_set_port(pa_sink_new_data * data,const char * port)138 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
139     pa_assert(data);
140 
141     pa_xfree(data->active_port);
142     data->active_port = pa_xstrdup(port);
143 }
144 
pa_sink_new_data_done(pa_sink_new_data * data)145 void pa_sink_new_data_done(pa_sink_new_data *data) {
146     pa_assert(data);
147 
148     pa_proplist_free(data->proplist);
149 
150     if (data->ports)
151         pa_hashmap_free(data->ports);
152 
153     pa_xfree(data->name);
154     pa_xfree(data->active_port);
155 }
156 
157 /* Called from main context */
reset_callbacks(pa_sink * s)158 static void reset_callbacks(pa_sink *s) {
159     pa_assert(s);
160 
161     s->set_state_in_main_thread = NULL;
162     s->set_state_in_io_thread = NULL;
163     s->get_volume = NULL;
164     s->set_volume = NULL;
165     s->write_volume = NULL;
166     s->get_mute = NULL;
167     s->set_mute = NULL;
168     s->request_rewind = NULL;
169     s->update_requested_latency = NULL;
170     s->set_port = NULL;
171     s->get_formats = NULL;
172     s->set_formats = NULL;
173     s->reconfigure = NULL;
174 }
175 
176 /* Called from main context */
pa_sink_new(pa_core * core,pa_sink_new_data * data,pa_sink_flags_t flags)177 pa_sink* pa_sink_new(
178         pa_core *core,
179         pa_sink_new_data *data,
180         pa_sink_flags_t flags) {
181 
182     pa_sink *s;
183     const char *name;
184     char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
185     pa_source_new_data source_data;
186     const char *dn;
187     char *pt;
188 
189     pa_assert(core);
190     pa_assert(data);
191     pa_assert(data->name);
192     pa_assert_ctl_context();
193 
194     s = pa_msgobject_new(pa_sink);
195 
196     if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
197         pa_log_debug("Failed to register name %s.", data->name);
198         pa_xfree(s);
199         return NULL;
200     }
201 
202     pa_sink_new_data_set_name(data, name);
203 
204     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
205         pa_xfree(s);
206         pa_namereg_unregister(core, name);
207         return NULL;
208     }
209 
210     /* FIXME, need to free s here on failure */
211 
212     pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
213     pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
214 
215     pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
216 
217     if (!data->channel_map_is_set)
218         pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
219 
220     pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
221     pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
222 
223     /* FIXME: There should probably be a general function for checking whether
224      * the sink volume is allowed to be set, like there is for sink inputs. */
225     pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
226 
227     if (!data->volume_is_set) {
228         pa_cvolume_reset(&data->volume, data->sample_spec.channels);
229         data->save_volume = false;
230     }
231 
232     pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
233     pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
234 
235     if (!data->muted_is_set)
236         data->muted = false;
237 
238     if (data->card)
239         pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
240 
241     pa_device_init_description(data->proplist, data->card);
242     pa_device_init_icon(data->proplist, true);
243     pa_device_init_intended_roles(data->proplist);
244 
245     if (!data->active_port) {
246         pa_device_port *p = pa_device_port_find_best(data->ports);
247         if (p)
248             pa_sink_new_data_set_port(data, p->name);
249     }
250 
251     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
252         pa_xfree(s);
253         pa_namereg_unregister(core, name);
254         return NULL;
255     }
256 
257     s->parent.parent.free = sink_free;
258     s->parent.process_msg = pa_sink_process_msg;
259 
260     s->core = core;
261     s->state = PA_SINK_INIT;
262     s->flags = flags;
263     s->priority = 0;
264     s->suspend_cause = data->suspend_cause;
265     s->name = pa_xstrdup(name);
266     s->proplist = pa_proplist_copy(data->proplist);
267     s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
268     s->module = data->module;
269     s->card = data->card;
270 
271     s->priority = pa_device_init_priority(s->proplist);
272 
273     s->sample_spec = data->sample_spec;
274     s->channel_map = data->channel_map;
275     s->default_sample_rate = s->sample_spec.rate;
276 
277     if (data->alternate_sample_rate_is_set)
278         s->alternate_sample_rate = data->alternate_sample_rate;
279     else
280         s->alternate_sample_rate = s->core->alternate_sample_rate;
281 
282     if (data->avoid_resampling_is_set)
283         s->avoid_resampling = data->avoid_resampling;
284     else
285         s->avoid_resampling = s->core->avoid_resampling;
286 
287     s->inputs = pa_idxset_new(NULL, NULL);
288     s->n_corked = 0;
289     s->input_to_master = NULL;
290 
291     s->reference_volume = s->real_volume = data->volume;
292     pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
293     s->base_volume = PA_VOLUME_NORM;
294     s->n_volume_steps = PA_VOLUME_NORM+1;
295     s->muted = data->muted;
296     s->refresh_volume = s->refresh_muted = false;
297 
298     reset_callbacks(s);
299     s->userdata = NULL;
300 
301     s->asyncmsgq = NULL;
302 
303     /* As a minor optimization we just steal the list instead of
304      * copying it here */
305     s->ports = data->ports;
306     data->ports = NULL;
307 
308     s->active_port = NULL;
309     s->save_port = false;
310 
311     if (data->active_port)
312         if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
313             s->save_port = data->save_port;
314 
315     /* Hopefully the active port has already been assigned in the previous call
316        to pa_device_port_find_best, but better safe than sorry */
317     if (!s->active_port)
318         s->active_port = pa_device_port_find_best(s->ports);
319 
320     if (s->active_port)
321         s->port_latency_offset = s->active_port->latency_offset;
322     else
323         s->port_latency_offset = 0;
324 
325     s->save_volume = data->save_volume;
326     s->save_muted = data->save_muted;
327 
328     pa_silence_memchunk_get(
329             &core->silence_cache,
330             core->mempool,
331             &s->silence,
332             &s->sample_spec,
333             0);
334 
335     s->thread_info.rtpoll = NULL;
336     s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
337                                                 (pa_free_cb_t) pa_sink_input_unref);
338     s->thread_info.soft_volume =  s->soft_volume;
339     s->thread_info.soft_muted = s->muted;
340     s->thread_info.state = s->state;
341     s->thread_info.rewind_nbytes = 0;
342     s->thread_info.last_rewind_nbytes = 0;
343     s->thread_info.rewind_requested = false;
344     s->thread_info.max_rewind = 0;
345     s->thread_info.max_request = 0;
346     s->thread_info.requested_latency_valid = false;
347     s->thread_info.requested_latency = 0;
348     s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
349     s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
350     s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
351 
352     PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
353     s->thread_info.volume_changes_tail = NULL;
354     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
355     s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
356     s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
357     s->thread_info.port_latency_offset = s->port_latency_offset;
358 
359     /* FIXME: This should probably be moved to pa_sink_put() */
360     pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
361 
362     if (s->card)
363         pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
364 
365     pt = pa_proplist_to_string_sep(s->proplist, "\n    ");
366     pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n    %s",
367                 s->index,
368                 s->name,
369                 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
370                 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
371                 pt);
372     pa_xfree(pt);
373 
374     pa_source_new_data_init(&source_data);
375     pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
376     pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
377     pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
378     pa_source_new_data_set_avoid_resampling(&source_data, s->avoid_resampling);
379     source_data.name = pa_sprintf_malloc("%s.monitor", name);
380     source_data.driver = data->driver;
381     source_data.module = data->module;
382     source_data.card = data->card;
383 
384     dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
385     pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
386     pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
387 
388     s->monitor_source = pa_source_new(core, &source_data,
389                                       ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
390                                       ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
391 
392     pa_source_new_data_done(&source_data);
393 
394     if (!s->monitor_source) {
395         pa_sink_unlink(s);
396         pa_sink_unref(s);
397         return NULL;
398     }
399 
400     s->monitor_source->monitor_of = s;
401 
402     pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
403     pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
404     pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
405 
406     return s;
407 }
408 
409 /* Called from main context */
sink_set_state(pa_sink * s,pa_sink_state_t state,pa_suspend_cause_t suspend_cause)410 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
411     int ret = 0;
412     bool state_changed;
413     bool suspend_cause_changed;
414     bool suspending;
415     bool resuming;
416     pa_sink_state_t old_state;
417     pa_suspend_cause_t old_suspend_cause;
418 
419     pa_assert(s);
420     pa_assert_ctl_context();
421 
422     state_changed = state != s->state;
423     suspend_cause_changed = suspend_cause != s->suspend_cause;
424 
425     if (!state_changed && !suspend_cause_changed)
426         return 0;
427 
428     suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
429     resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
430 
431     /* If we are resuming, suspend_cause must be 0. */
432     pa_assert(!resuming || !suspend_cause);
433 
434     /* Here's something to think about: what to do with the suspend cause if
435      * resuming the sink fails? The old suspend cause will be incorrect, so we
436      * can't use that. On the other hand, if we set no suspend cause (as is the
437      * case currently), then it looks strange to have a sink suspended without
438      * any cause. It might be a good idea to add a new "resume failed" suspend
439      * cause, or it might just add unnecessary complexity, given that the
440      * current approach of not setting any suspend cause works well enough. */
441 
442     if (s->set_state_in_main_thread) {
443         if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
444             /* set_state_in_main_thread() is allowed to fail only when resuming. */
445             pa_assert(resuming);
446 
447             /* If resuming fails, we set the state to SUSPENDED and
448              * suspend_cause to 0. */
449             state = PA_SINK_SUSPENDED;
450             suspend_cause = 0;
451             state_changed = false;
452             suspend_cause_changed = suspend_cause != s->suspend_cause;
453             resuming = false;
454 
455             /* We know the state isn't changing. If the suspend cause isn't
456              * changing either, then there's nothing more to do. */
457             if (!suspend_cause_changed)
458                 return ret;
459         }
460     }
461 
462     if (s->asyncmsgq) {
463         struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
464 
465         if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
466             /* SET_STATE is allowed to fail only when resuming. */
467             pa_assert(resuming);
468 
469             if (s->set_state_in_main_thread)
470                 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
471 
472             /* If resuming fails, we set the state to SUSPENDED and
473              * suspend_cause to 0. */
474             state = PA_SINK_SUSPENDED;
475             suspend_cause = 0;
476             state_changed = false;
477             suspend_cause_changed = suspend_cause != s->suspend_cause;
478             resuming = false;
479 
480             /* We know the state isn't changing. If the suspend cause isn't
481              * changing either, then there's nothing more to do. */
482             if (!suspend_cause_changed)
483                 return ret;
484         }
485     }
486 
487     old_suspend_cause = s->suspend_cause;
488     if (suspend_cause_changed) {
489         char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
490         char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
491 
492         pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
493                      pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
494         s->suspend_cause = suspend_cause;
495     }
496 
497     old_state = s->state;
498     if (state_changed) {
499         pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
500         s->state = state;
501 
502         /* If we enter UNLINKED state, then we don't send change notifications.
503          * pa_sink_unlink() will send unlink notifications instead. */
504         if (state != PA_SINK_UNLINKED) {
505             pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
506             pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507         }
508     }
509 
510     if (suspending || resuming || suspend_cause_changed) {
511         pa_sink_input *i;
512         uint32_t idx;
513 
514         /* We're suspending or resuming, tell everyone about it */
515 
516         PA_IDXSET_FOREACH(i, s->inputs, idx)
517             if (s->state == PA_SINK_SUSPENDED &&
518                 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
519                 pa_sink_input_kill(i);
520             else if (i->suspend)
521                 i->suspend(i, old_state, old_suspend_cause);
522     }
523 
524     if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
525         pa_source_sync_suspend(s->monitor_source);
526 
527     return ret;
528 }
529 
pa_sink_set_get_volume_callback(pa_sink * s,pa_sink_cb_t cb)530 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
531     pa_assert(s);
532 
533     s->get_volume = cb;
534 }
535 
pa_sink_set_set_volume_callback(pa_sink * s,pa_sink_cb_t cb)536 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
537     pa_sink_flags_t flags;
538 
539     pa_assert(s);
540     pa_assert(!s->write_volume || cb);
541 
542     s->set_volume = cb;
543 
544     /* Save the current flags so we can tell if they've changed */
545     flags = s->flags;
546 
547     if (cb) {
548         /* The sink implementor is responsible for setting decibel volume support */
549         s->flags |= PA_SINK_HW_VOLUME_CTRL;
550     } else {
551         s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
552         /* See note below in pa_sink_put() about volume sharing and decibel volumes */
553         pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
554     }
555 
556     /* If the flags have changed after init, let any clients know via a change event */
557     if (s->state != PA_SINK_INIT && flags != s->flags)
558         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
559 }
560 
pa_sink_set_write_volume_callback(pa_sink * s,pa_sink_cb_t cb)561 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
562     pa_sink_flags_t flags;
563 
564     pa_assert(s);
565     pa_assert(!cb || s->set_volume);
566 
567     s->write_volume = cb;
568 
569     /* Save the current flags so we can tell if they've changed */
570     flags = s->flags;
571 
572     if (cb)
573         s->flags |= PA_SINK_DEFERRED_VOLUME;
574     else
575         s->flags &= ~PA_SINK_DEFERRED_VOLUME;
576 
577     /* If the flags have changed after init, let any clients know via a change event */
578     if (s->state != PA_SINK_INIT && flags != s->flags)
579         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
580 }
581 
pa_sink_set_get_mute_callback(pa_sink * s,pa_sink_get_mute_cb_t cb)582 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
583     pa_assert(s);
584 
585     s->get_mute = cb;
586 }
587 
pa_sink_set_set_mute_callback(pa_sink * s,pa_sink_cb_t cb)588 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
589     pa_sink_flags_t flags;
590 
591     pa_assert(s);
592 
593     s->set_mute = cb;
594 
595     /* Save the current flags so we can tell if they've changed */
596     flags = s->flags;
597 
598     if (cb)
599         s->flags |= PA_SINK_HW_MUTE_CTRL;
600     else
601         s->flags &= ~PA_SINK_HW_MUTE_CTRL;
602 
603     /* If the flags have changed after init, let any clients know via a change event */
604     if (s->state != PA_SINK_INIT && flags != s->flags)
605         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
606 }
607 
enable_flat_volume(pa_sink * s,bool enable)608 static void enable_flat_volume(pa_sink *s, bool enable) {
609     pa_sink_flags_t flags;
610 
611     pa_assert(s);
612 
613     /* Always follow the overall user preference here */
614     enable = enable && s->core->flat_volumes;
615 
616     /* Save the current flags so we can tell if they've changed */
617     flags = s->flags;
618 
619     if (enable)
620         s->flags |= PA_SINK_FLAT_VOLUME;
621     else
622         s->flags &= ~PA_SINK_FLAT_VOLUME;
623 
624     /* If the flags have changed after init, let any clients know via a change event */
625     if (s->state != PA_SINK_INIT && flags != s->flags)
626         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
627 }
628 
pa_sink_enable_decibel_volume(pa_sink * s,bool enable)629 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
630     pa_sink_flags_t flags;
631 
632     pa_assert(s);
633 
634     /* Save the current flags so we can tell if they've changed */
635     flags = s->flags;
636 
637     if (enable) {
638         s->flags |= PA_SINK_DECIBEL_VOLUME;
639         enable_flat_volume(s, true);
640     } else {
641         s->flags &= ~PA_SINK_DECIBEL_VOLUME;
642         enable_flat_volume(s, false);
643     }
644 
645     /* If the flags have changed after init, let any clients know via a change event */
646     if (s->state != PA_SINK_INIT && flags != s->flags)
647         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
648 }
649 
650 /* Called from main context */
pa_sink_put(pa_sink * s)651 void pa_sink_put(pa_sink* s) {
652     pa_sink_assert_ref(s);
653     pa_assert_ctl_context();
654 
655     pa_assert(s->state == PA_SINK_INIT);
656     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
657 
658     /* The following fields must be initialized properly when calling _put() */
659     pa_assert(s->asyncmsgq);
660     pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
661 
662     /* Generally, flags should be initialized via pa_sink_new(). As a
663      * special exception we allow some volume related flags to be set
664      * between _new() and _put() by the callback setter functions above.
665      *
666      * Thus we implement a couple safeguards here which ensure the above
667      * setters were used (or at least the implementor made manual changes
668      * in a compatible way).
669      *
670      * Note: All of these flags set here can change over the life time
671      * of the sink. */
672     pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
673     pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
674     pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
675 
676     /* XXX: Currently decibel volume is disabled for all sinks that use volume
677      * sharing. When the master sink supports decibel volume, it would be good
678      * to have the flag also in the filter sink, but currently we don't do that
679      * so that the flags of the filter sink never change when it's moved from
680      * a master sink to another. One solution for this problem would be to
681      * remove user-visible volume altogether from filter sinks when volume
682      * sharing is used, but the current approach was easier to implement... */
683     /* We always support decibel volumes in software, otherwise we leave it to
684      * the sink implementor to set this flag as needed.
685      *
686      * Note: This flag can also change over the life time of the sink. */
687     if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
688         pa_sink_enable_decibel_volume(s, true);
689         s->soft_volume = s->reference_volume;
690     }
691 
692     /* If the sink implementor support DB volumes by itself, we should always
693      * try and enable flat volumes too */
694     if ((s->flags & PA_SINK_DECIBEL_VOLUME))
695         enable_flat_volume(s, true);
696 
697     if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
698         pa_sink *root_sink = pa_sink_get_master(s);
699 
700         pa_assert(root_sink);
701 
702         s->reference_volume = root_sink->reference_volume;
703         pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
704 
705         s->real_volume = root_sink->real_volume;
706         pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
707     } else
708         /* We assume that if the sink implementor changed the default
709          * volume they did so in real_volume, because that is the usual
710          * place where they are supposed to place their changes.  */
711         s->reference_volume = s->real_volume;
712 
713     s->thread_info.soft_volume = s->soft_volume;
714     s->thread_info.soft_muted = s->muted;
715     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
716 
717     pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
718               || (s->base_volume == PA_VOLUME_NORM
719                   && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
720     pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
721     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
722     pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
723     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
724 
725     pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
726     pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
727     pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
728 
729     if (s->suspend_cause)
730         pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
731     else
732         pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
733 
734     pa_source_put(s->monitor_source);
735 
736     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
737     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
738 
739     /* It's good to fire the SINK_PUT hook before updating the default sink,
740      * because module-switch-on-connect will set the new sink as the default
741      * sink, and if we were to call pa_core_update_default_sink() before that,
742      * the default sink might change twice, causing unnecessary stream moving. */
743 
744     pa_core_update_default_sink(s->core);
745 
746     pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
747 }
748 
749 /* Called from main context */
pa_sink_unlink(pa_sink * s)750 void pa_sink_unlink(pa_sink* s) {
751     bool linked;
752     pa_sink_input *i, PA_UNUSED *j = NULL;
753 
754     pa_sink_assert_ref(s);
755     pa_assert_ctl_context();
756 
757     /* Please note that pa_sink_unlink() does more than simply
758      * reversing pa_sink_put(). It also undoes the registrations
759      * already done in pa_sink_new()! */
760 
761     if (s->unlink_requested)
762         return;
763 
764     s->unlink_requested = true;
765 
766     linked = PA_SINK_IS_LINKED(s->state);
767 
768     if (linked)
769         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
770 
771     if (s->state != PA_SINK_UNLINKED)
772         pa_namereg_unregister(s->core, s->name);
773     pa_idxset_remove_by_data(s->core->sinks, s, NULL);
774 
775     pa_core_update_default_sink(s->core);
776 
777     if (linked && s->core->rescue_streams)
778 	pa_sink_move_streams_to_default_sink(s->core, s, false);
779 
780     if (s->card)
781         pa_idxset_remove_by_data(s->card->sinks, s, NULL);
782 
783     while ((i = pa_idxset_first(s->inputs, NULL))) {
784         pa_assert(i != j);
785         pa_sink_input_kill(i);
786         j = i;
787     }
788 
789     /* Unlink monitor source before unlinking the sink */
790     if (s->monitor_source)
791         pa_source_unlink(s->monitor_source);
792 
793     if (linked)
794         /* It's important to keep the suspend cause unchanged when unlinking,
795          * because if we remove the SESSION suspend cause here, the alsa sink
796          * will sync its volume with the hardware while another user is
797          * active, messing up the volume for that other user. */
798         sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
799     else
800         s->state = PA_SINK_UNLINKED;
801 
802     reset_callbacks(s);
803 
804     if (linked) {
805         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
806         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
807     }
808 }
809 
810 /* Called from main context */
sink_free(pa_object * o)811 static void sink_free(pa_object *o) {
812     pa_sink *s = PA_SINK(o);
813 
814     pa_assert(s);
815     pa_assert_ctl_context();
816     pa_assert(pa_sink_refcnt(s) == 0);
817     pa_assert(!PA_SINK_IS_LINKED(s->state));
818 
819     pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
820 
821     pa_sink_volume_change_flush(s);
822 
823     if (s->monitor_source) {
824         pa_source_unref(s->monitor_source);
825         s->monitor_source = NULL;
826     }
827 
828     pa_idxset_free(s->inputs, NULL);
829     pa_hashmap_free(s->thread_info.inputs);
830 
831     if (s->silence.memblock)
832         pa_memblock_unref(s->silence.memblock);
833 
834     pa_xfree(s->name);
835     pa_xfree(s->driver);
836 
837     if (s->proplist)
838         pa_proplist_free(s->proplist);
839 
840     if (s->ports)
841         pa_hashmap_free(s->ports);
842 
843     pa_xfree(s);
844 }
845 
846 /* Called from main context, and not while the IO thread is active, please */
pa_sink_set_asyncmsgq(pa_sink * s,pa_asyncmsgq * q)847 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
848     pa_sink_assert_ref(s);
849     pa_assert_ctl_context();
850 
851     s->asyncmsgq = q;
852 
853     if (s->monitor_source)
854         pa_source_set_asyncmsgq(s->monitor_source, q);
855 }
856 
857 /* Called from main context, and not while the IO thread is active, please */
pa_sink_update_flags(pa_sink * s,pa_sink_flags_t mask,pa_sink_flags_t value)858 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
859     pa_sink_flags_t old_flags;
860     pa_sink_input *input;
861     uint32_t idx;
862 
863     pa_sink_assert_ref(s);
864     pa_assert_ctl_context();
865 
866     /* For now, allow only a minimal set of flags to be changed. */
867     pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
868 
869     old_flags = s->flags;
870     s->flags = (s->flags & ~mask) | (value & mask);
871 
872     if (s->flags == old_flags)
873         return;
874 
875     if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
876         pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
877 
878     if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
879         pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
880                      s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
881 
882     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
883     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
884 
885     if (s->monitor_source)
886         pa_source_update_flags(s->monitor_source,
887                                ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
888                                ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
889                                ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
890                                ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
891 
892     PA_IDXSET_FOREACH(input, s->inputs, idx) {
893         if (input->origin_sink)
894             pa_sink_update_flags(input->origin_sink, mask, value);
895     }
896 }
897 
898 /* Called from IO context, or before _put() from main context */
pa_sink_set_rtpoll(pa_sink * s,pa_rtpoll * p)899 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
900     pa_sink_assert_ref(s);
901     pa_sink_assert_io_context(s);
902 
903     s->thread_info.rtpoll = p;
904 
905     if (s->monitor_source)
906         pa_source_set_rtpoll(s->monitor_source, p);
907 }
908 
909 /* Called from main context */
pa_sink_update_status(pa_sink * s)910 int pa_sink_update_status(pa_sink*s) {
911     pa_sink_assert_ref(s);
912     pa_assert_ctl_context();
913     pa_assert(PA_SINK_IS_LINKED(s->state));
914 
915     if (s->state == PA_SINK_SUSPENDED)
916         return 0;
917 
918     return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
919 }
920 
921 /* Called from main context */
pa_sink_suspend(pa_sink * s,bool suspend,pa_suspend_cause_t cause)922 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
923     pa_suspend_cause_t merged_cause;
924 
925     pa_sink_assert_ref(s);
926     pa_assert_ctl_context();
927     pa_assert(PA_SINK_IS_LINKED(s->state));
928     pa_assert(cause != 0);
929 
930     if (suspend)
931         merged_cause = s->suspend_cause | cause;
932     else
933         merged_cause = s->suspend_cause & ~cause;
934 
935     if (merged_cause)
936         return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
937     else
938         return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
939 }
940 
941 /* Called from main context */
pa_sink_move_all_start(pa_sink * s,pa_queue * q)942 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
943     pa_sink_input *i, *n;
944     uint32_t idx;
945 
946     pa_sink_assert_ref(s);
947     pa_assert_ctl_context();
948     pa_assert(PA_SINK_IS_LINKED(s->state));
949 
950     if (!q)
951         q = pa_queue_new();
952 
953     for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
954         n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
955 
956         pa_sink_input_ref(i);
957 
958         if (pa_sink_input_start_move(i) >= 0)
959             pa_queue_push(q, i);
960         else
961             pa_sink_input_unref(i);
962     }
963 
964     return q;
965 }
966 
967 /* Called from main context */
pa_sink_move_all_finish(pa_sink * s,pa_queue * q,bool save)968 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
969     pa_sink_input *i;
970 
971     pa_sink_assert_ref(s);
972     pa_assert_ctl_context();
973     pa_assert(PA_SINK_IS_LINKED(s->state));
974     pa_assert(q);
975 
976     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
977         if (PA_SINK_INPUT_IS_LINKED(i->state)) {
978             if (pa_sink_input_finish_move(i, s, save) < 0)
979                 pa_sink_input_fail_move(i);
980 
981         }
982         pa_sink_input_unref(i);
983     }
984 
985     pa_queue_free(q, NULL);
986 }
987 
988 /* Called from main context */
pa_sink_move_all_fail(pa_queue * q)989 void pa_sink_move_all_fail(pa_queue *q) {
990     pa_sink_input *i;
991 
992     pa_assert_ctl_context();
993     pa_assert(q);
994 
995     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
996         pa_sink_input_fail_move(i);
997         pa_sink_input_unref(i);
998     }
999 
1000     pa_queue_free(q, NULL);
1001 }
1002 
1003  /* Called from IO thread context */
pa_sink_process_input_underruns(pa_sink * s,size_t left_to_play)1004 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1005     pa_sink_input *i;
1006     void *state = NULL;
1007     size_t result = 0;
1008 
1009     pa_sink_assert_ref(s);
1010     pa_sink_assert_io_context(s);
1011 
1012     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1013         size_t uf = i->thread_info.underrun_for_sink;
1014 
1015         /* Propagate down the filter tree */
1016         if (i->origin_sink) {
1017             size_t filter_result, left_to_play_origin;
1018 
1019             /* The combine sink sets i->origin sink but has a different threading model
1020              * than the filter sinks. Therefore the recursion below may not be executed
1021              * because pa_sink_process_input_underruns() was not called in the thread
1022              * context of the origin sink.
1023              * FIXME: It is unclear if some other kind of recursion would be necessary
1024              * for the combine sink. */
1025             if (!i->module || !pa_safe_streq(i->module->name, "module-combine-sink")) {
1026 
1027                 /* The recursive call works in the origin sink domain ... */
1028                 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1029 
1030                 /* .. and returns the time to sleep before waking up. We need the
1031                  * underrun duration for comparisons, so we undo the subtraction on
1032                  * the return value... */
1033                 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1034 
1035                 /* ... and convert it back to the master sink domain */
1036                 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1037 
1038                 /* Remember the longest underrun so far */
1039                 if (filter_result > result)
1040                     result = filter_result;
1041             }
1042         }
1043 
1044         if (uf == 0) {
1045             /* No underrun here, move on */
1046             continue;
1047         } else if (uf >= left_to_play) {
1048             /* The sink has possibly consumed all the data the sink input provided */
1049             pa_sink_input_process_underrun(i);
1050         } else if (uf > result) {
1051             /* Remember the longest underrun so far */
1052             result = uf;
1053         }
1054     }
1055 
1056     if (result > 0)
1057         pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1058                 (long) result, (long) left_to_play - result);
1059     return left_to_play - result;
1060 }
1061 
1062 /* Called from IO thread context */
pa_sink_process_rewind(pa_sink * s,size_t nbytes)1063 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1064     pa_sink_input *i;
1065     void *state = NULL;
1066 
1067     pa_sink_assert_ref(s);
1068     pa_sink_assert_io_context(s);
1069     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1070 
1071     /* If nobody requested this and this is actually no real rewind
1072      * then we can short cut this. Please note that this means that
1073      * not all rewind requests triggered upstream will always be
1074      * translated in actual requests! */
1075     if (!s->thread_info.rewind_requested && nbytes <= 0)
1076         return;
1077 
1078     s->thread_info.rewind_nbytes = 0;
1079     s->thread_info.rewind_requested = false;
1080 
1081     if (nbytes > 0) {
1082         pa_log_debug("Processing rewind...");
1083         if (s->flags & PA_SINK_DEFERRED_VOLUME)
1084             pa_sink_volume_change_rewind(s, nbytes);
1085     }
1086 
1087     /* Save rewind value */
1088     s->thread_info.last_rewind_nbytes = nbytes;
1089 
1090     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1091         pa_sink_input_assert_ref(i);
1092         pa_sink_input_process_rewind(i, nbytes);
1093     }
1094 
1095     if (nbytes > 0) {
1096         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1097             pa_source_process_rewind(s->monitor_source, nbytes);
1098     }
1099 }
1100 
1101 /* Called from IO thread context */
fill_mix_info(pa_sink * s,size_t * length,pa_mix_info * info,unsigned maxinfo)1102 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1103     pa_sink_input *i;
1104     unsigned n = 0;
1105     void *state = NULL;
1106     size_t mixlength = *length;
1107 
1108     pa_sink_assert_ref(s);
1109     pa_sink_assert_io_context(s);
1110     pa_assert(info);
1111 
1112     while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1113         pa_sink_input_assert_ref(i);
1114 
1115         pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1116 
1117         if (mixlength == 0 || info->chunk.length < mixlength)
1118             mixlength = info->chunk.length;
1119 
1120         if (pa_memblock_is_silence(info->chunk.memblock)) {
1121             pa_memblock_unref(info->chunk.memblock);
1122             continue;
1123         }
1124 
1125         info->userdata = pa_sink_input_ref(i);
1126 
1127         pa_assert(info->chunk.memblock);
1128         pa_assert(info->chunk.length > 0);
1129 
1130         info++;
1131         n++;
1132         maxinfo--;
1133     }
1134 
1135     if (mixlength > 0)
1136         *length = mixlength;
1137 
1138     return n;
1139 }
1140 
1141 /* Called from IO thread context */
inputs_drop(pa_sink * s,pa_mix_info * info,unsigned n,pa_memchunk * result)1142 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1143     pa_sink_input *i;
1144     void *state;
1145     unsigned p = 0;
1146     unsigned n_unreffed = 0;
1147 
1148     pa_sink_assert_ref(s);
1149     pa_sink_assert_io_context(s);
1150     pa_assert(result);
1151     pa_assert(result->memblock);
1152     pa_assert(result->length > 0);
1153 
1154     /* We optimize for the case where the order of the inputs has not changed */
1155 
1156     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1157         unsigned j;
1158         pa_mix_info* m = NULL;
1159 
1160         pa_sink_input_assert_ref(i);
1161 
1162         /* Let's try to find the matching entry info the pa_mix_info array */
1163         for (j = 0; j < n; j ++) {
1164 
1165             if (info[p].userdata == i) {
1166                 m = info + p;
1167                 break;
1168             }
1169 
1170             p++;
1171             if (p >= n)
1172                 p = 0;
1173         }
1174 
1175         /* Drop read data */
1176         pa_sink_input_drop(i, result->length);
1177 
1178         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1179 
1180             if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1181                 void *ostate = NULL;
1182                 pa_source_output *o;
1183                 pa_memchunk c;
1184 
1185                 if (m && m->chunk.memblock) {
1186                     c = m->chunk;
1187                     pa_memblock_ref(c.memblock);
1188                     pa_assert(result->length <= c.length);
1189                     c.length = result->length;
1190 
1191                     pa_memchunk_make_writable(&c, 0);
1192                     pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1193                 } else {
1194                     c = s->silence;
1195                     pa_memblock_ref(c.memblock);
1196                     pa_assert(result->length <= c.length);
1197                     c.length = result->length;
1198                 }
1199 
1200                 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1201                     pa_source_output_assert_ref(o);
1202                     pa_assert(o->direct_on_input == i);
1203                     pa_source_post_direct(s->monitor_source, o, &c);
1204                 }
1205 
1206                 pa_memblock_unref(c.memblock);
1207             }
1208         }
1209 
1210         if (m) {
1211             if (m->chunk.memblock) {
1212                 pa_memblock_unref(m->chunk.memblock);
1213                 pa_memchunk_reset(&m->chunk);
1214             }
1215 
1216             pa_sink_input_unref(m->userdata);
1217             m->userdata = NULL;
1218 
1219             n_unreffed += 1;
1220         }
1221     }
1222 
1223     /* Now drop references to entries that are included in the
1224      * pa_mix_info array but don't exist anymore */
1225 
1226     if (n_unreffed < n) {
1227         for (; n > 0; info++, n--) {
1228             if (info->userdata)
1229                 pa_sink_input_unref(info->userdata);
1230             if (info->chunk.memblock)
1231                 pa_memblock_unref(info->chunk.memblock);
1232         }
1233     }
1234 
1235     if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1236         pa_source_post(s->monitor_source, result);
1237 }
1238 
1239 /* Called from IO thread context */
pa_sink_render(pa_sink * s,size_t length,pa_memchunk * result)1240 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1241     pa_mix_info info[MAX_MIX_CHANNELS];
1242     unsigned n;
1243     size_t block_size_max;
1244 
1245     pa_sink_assert_ref(s);
1246     pa_sink_assert_io_context(s);
1247     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1248     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1249     pa_assert(result);
1250 
1251     pa_assert(!s->thread_info.rewind_requested);
1252     pa_assert(s->thread_info.rewind_nbytes == 0);
1253 
1254     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1255         result->memblock = pa_memblock_ref(s->silence.memblock);
1256         result->index = s->silence.index;
1257         result->length = PA_MIN(s->silence.length, length);
1258         return;
1259     }
1260 
1261     pa_sink_ref(s);
1262 
1263     if (length <= 0)
1264         length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1265 
1266     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1267     if (length > block_size_max)
1268         length = pa_frame_align(block_size_max, &s->sample_spec);
1269 
1270     pa_assert(length > 0);
1271 
1272     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1273 
1274     if (n == 0) {
1275 
1276         *result = s->silence;
1277         pa_memblock_ref(result->memblock);
1278 
1279         if (result->length > length)
1280             result->length = length;
1281 
1282     } else if (n == 1) {
1283         pa_cvolume volume;
1284 
1285         *result = info[0].chunk;
1286         pa_memblock_ref(result->memblock);
1287 
1288         if (result->length > length)
1289             result->length = length;
1290 
1291         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1292 
1293         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1294             pa_memblock_unref(result->memblock);
1295             pa_silence_memchunk_get(&s->core->silence_cache,
1296                                     s->core->mempool,
1297                                     result,
1298                                     &s->sample_spec,
1299                                     result->length);
1300         } else if (!pa_cvolume_is_norm(&volume)) {
1301             pa_memchunk_make_writable(result, 0);
1302             pa_volume_memchunk(result, &s->sample_spec, &volume);
1303         }
1304     } else {
1305         void *ptr;
1306         result->memblock = pa_memblock_new(s->core->mempool, length);
1307 
1308         ptr = pa_memblock_acquire(result->memblock);
1309         result->length = pa_mix(info, n,
1310                                 ptr, length,
1311                                 &s->sample_spec,
1312                                 &s->thread_info.soft_volume,
1313                                 s->thread_info.soft_muted);
1314         pa_memblock_release(result->memblock);
1315 
1316         result->index = 0;
1317     }
1318 
1319     inputs_drop(s, info, n, result);
1320 
1321     pa_sink_unref(s);
1322 }
1323 
1324 /* Called from IO thread context */
pa_sink_render_into(pa_sink * s,pa_memchunk * target)1325 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1326     pa_mix_info info[MAX_MIX_CHANNELS];
1327     unsigned n;
1328     size_t length, block_size_max;
1329 
1330     pa_sink_assert_ref(s);
1331     pa_sink_assert_io_context(s);
1332     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1333     pa_assert(target);
1334     pa_assert(target->memblock);
1335     pa_assert(target->length > 0);
1336     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1337 
1338     pa_assert(!s->thread_info.rewind_requested);
1339     pa_assert(s->thread_info.rewind_nbytes == 0);
1340 
1341     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1342         pa_silence_memchunk(target, &s->sample_spec);
1343         return;
1344     }
1345 
1346     pa_sink_ref(s);
1347 
1348     length = target->length;
1349     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1350     if (length > block_size_max)
1351         length = pa_frame_align(block_size_max, &s->sample_spec);
1352 
1353     pa_assert(length > 0);
1354 
1355     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1356 
1357     if (n == 0) {
1358         if (target->length > length)
1359             target->length = length;
1360 
1361         pa_silence_memchunk(target, &s->sample_spec);
1362     } else if (n == 1) {
1363         pa_cvolume volume;
1364 
1365         if (target->length > length)
1366             target->length = length;
1367 
1368         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1369 
1370         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1371             pa_silence_memchunk(target, &s->sample_spec);
1372         else {
1373             pa_memchunk vchunk;
1374 
1375             vchunk = info[0].chunk;
1376             pa_memblock_ref(vchunk.memblock);
1377 
1378             if (vchunk.length > length)
1379                 vchunk.length = length;
1380 
1381             if (!pa_cvolume_is_norm(&volume)) {
1382                 pa_memchunk_make_writable(&vchunk, 0);
1383                 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1384             }
1385 
1386             pa_memchunk_memcpy(target, &vchunk);
1387             pa_memblock_unref(vchunk.memblock);
1388         }
1389 
1390     } else {
1391         void *ptr;
1392 
1393         ptr = pa_memblock_acquire(target->memblock);
1394 
1395         target->length = pa_mix(info, n,
1396                                 (uint8_t*) ptr + target->index, length,
1397                                 &s->sample_spec,
1398                                 &s->thread_info.soft_volume,
1399                                 s->thread_info.soft_muted);
1400 
1401         pa_memblock_release(target->memblock);
1402     }
1403 
1404     inputs_drop(s, info, n, target);
1405 
1406     pa_sink_unref(s);
1407 }
1408 
1409 /* Called from IO thread context */
pa_sink_render_into_full(pa_sink * s,pa_memchunk * target)1410 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1411     pa_memchunk chunk;
1412     size_t l, d;
1413 
1414     pa_sink_assert_ref(s);
1415     pa_sink_assert_io_context(s);
1416     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1417     pa_assert(target);
1418     pa_assert(target->memblock);
1419     pa_assert(target->length > 0);
1420     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1421 
1422     pa_assert(!s->thread_info.rewind_requested);
1423     pa_assert(s->thread_info.rewind_nbytes == 0);
1424 
1425     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1426         pa_silence_memchunk(target, &s->sample_spec);
1427         return;
1428     }
1429 
1430     pa_sink_ref(s);
1431 
1432     l = target->length;
1433     d = 0;
1434     while (l > 0) {
1435         chunk = *target;
1436         chunk.index += d;
1437         chunk.length -= d;
1438 
1439         pa_sink_render_into(s, &chunk);
1440 
1441         d += chunk.length;
1442         l -= chunk.length;
1443     }
1444 
1445     pa_sink_unref(s);
1446 }
1447 
1448 /* Called from IO thread context */
pa_sink_render_full(pa_sink * s,size_t length,pa_memchunk * result)1449 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1450     pa_sink_assert_ref(s);
1451     pa_sink_assert_io_context(s);
1452     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1453     pa_assert(length > 0);
1454     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1455     pa_assert(result);
1456 
1457     pa_assert(!s->thread_info.rewind_requested);
1458     pa_assert(s->thread_info.rewind_nbytes == 0);
1459 
1460     pa_sink_ref(s);
1461 
1462     pa_sink_render(s, length, result);
1463 
1464     if (result->length < length) {
1465         pa_memchunk chunk;
1466 
1467         pa_memchunk_make_writable(result, length);
1468 
1469         chunk.memblock = result->memblock;
1470         chunk.index = result->index + result->length;
1471         chunk.length = length - result->length;
1472 
1473         pa_sink_render_into_full(s, &chunk);
1474 
1475         result->length = length;
1476     }
1477 
1478     pa_sink_unref(s);
1479 }
1480 
1481 /* Called from main thread */
pa_sink_reconfigure(pa_sink * s,pa_sample_spec * spec,bool passthrough)1482 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1483     pa_sample_spec desired_spec;
1484     uint32_t default_rate = s->default_sample_rate;
1485     uint32_t alternate_rate = s->alternate_sample_rate;
1486     uint32_t idx;
1487     pa_sink_input *i;
1488     bool default_rate_is_usable = false;
1489     bool alternate_rate_is_usable = false;
1490     bool avoid_resampling = s->avoid_resampling;
1491 
1492     if (pa_sample_spec_equal(spec, &s->sample_spec))
1493         return;
1494 
1495     if (!s->reconfigure)
1496         return;
1497 
1498     if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1499         pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1500         return;
1501     }
1502 
1503     if (PA_SINK_IS_RUNNING(s->state)) {
1504         pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1505                     pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1506         return;
1507     }
1508 
1509     if (s->monitor_source) {
1510         if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1511             pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1512             return;
1513         }
1514     }
1515 
1516     if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1517         return;
1518 
1519     desired_spec = s->sample_spec;
1520 
1521     if (passthrough) {
1522         /* We have to try to use the sink input format and rate */
1523         desired_spec.format = spec->format;
1524         desired_spec.rate = spec->rate;
1525 
1526     } else if (avoid_resampling) {
1527         /* We just try to set the sink input's sample rate if it's not too low */
1528         if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1529             desired_spec.rate = spec->rate;
1530         desired_spec.format = spec->format;
1531 
1532     } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1533         /* We can directly try to use this rate */
1534         desired_spec.rate = spec->rate;
1535 
1536     }
1537 
1538     if (desired_spec.rate != spec->rate) {
1539         /* See if we can pick a rate that results in less resampling effort */
1540         if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1541             default_rate_is_usable = true;
1542         if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1543             default_rate_is_usable = true;
1544         if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1545             alternate_rate_is_usable = true;
1546         if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1547             alternate_rate_is_usable = true;
1548 
1549         if (alternate_rate_is_usable && !default_rate_is_usable)
1550             desired_spec.rate = alternate_rate;
1551         else
1552             desired_spec.rate = default_rate;
1553     }
1554 
1555     if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1556         return;
1557 
1558     if (!passthrough && pa_sink_used_by(s) > 0)
1559         return;
1560 
1561     pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1562                  s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1563     pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1564 
1565     s->reconfigure(s, &desired_spec, passthrough);
1566 
1567     /* update monitor source as well */
1568     if (s->monitor_source && !passthrough)
1569         pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1570     pa_log_info("Reconfigured successfully");
1571 
1572     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1573         if (i->state == PA_SINK_INPUT_CORKED)
1574             pa_sink_input_update_resampler(i, true);
1575     }
1576 
1577     pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1578 }
1579 
1580 /* Called from main thread */
pa_sink_get_last_rewind(pa_sink * s)1581 size_t pa_sink_get_last_rewind(pa_sink *s) {
1582     size_t rewind_bytes;
1583 
1584     pa_sink_assert_ref(s);
1585     pa_assert_ctl_context();
1586     pa_assert(PA_SINK_IS_LINKED(s->state));
1587 
1588     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LAST_REWIND, &rewind_bytes, 0, NULL) == 0);
1589 
1590     return rewind_bytes;
1591 }
1592 
1593 /* Called from main thread */
pa_sink_get_latency(pa_sink * s)1594 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1595     int64_t usec = 0;
1596 
1597     pa_sink_assert_ref(s);
1598     pa_assert_ctl_context();
1599     pa_assert(PA_SINK_IS_LINKED(s->state));
1600 
1601     /* The returned value is supposed to be in the time domain of the sound card! */
1602 
1603     if (s->state == PA_SINK_SUSPENDED)
1604         return 0;
1605 
1606     if (!(s->flags & PA_SINK_LATENCY))
1607         return 0;
1608 
1609     if (s->asyncmsgq == NULL) {
1610         pa_log_error("pa_asyncmsgq is NULL");
1611         return 0;
1612     }
1613     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1614 
1615     /* the return value is unsigned, so check that the offset can be added to usec without
1616      * underflowing. */
1617     if (-s->port_latency_offset <= usec)
1618         usec += s->port_latency_offset;
1619     else
1620         usec = 0;
1621 
1622     return (pa_usec_t)usec;
1623 }
1624 
1625 /* Called from IO thread */
pa_sink_get_latency_within_thread(pa_sink * s,bool allow_negative)1626 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1627     int64_t usec = 0;
1628     pa_msgobject *o;
1629 
1630     pa_sink_assert_ref(s);
1631     pa_sink_assert_io_context(s);
1632     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1633 
1634     /* The returned value is supposed to be in the time domain of the sound card! */
1635 
1636     if (s->thread_info.state == PA_SINK_SUSPENDED)
1637         return 0;
1638 
1639     if (!(s->flags & PA_SINK_LATENCY))
1640         return 0;
1641 
1642     o = PA_MSGOBJECT(s);
1643 
1644     /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1645 
1646     o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1647 
1648     /* If allow_negative is false, the call should only return positive values, */
1649     usec += s->thread_info.port_latency_offset;
1650     if (!allow_negative && usec < 0)
1651         usec = 0;
1652 
1653     return usec;
1654 }
1655 
1656 /* Called from the main thread (and also from the IO thread while the main
1657  * thread is waiting).
1658  *
1659  * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1660  * set. Instead, flat volume mode is detected by checking whether the root sink
1661  * has the flag set. */
pa_sink_flat_volume_enabled(pa_sink * s)1662 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1663     pa_sink_assert_ref(s);
1664 
1665     s = pa_sink_get_master(s);
1666 
1667     if (PA_LIKELY(s))
1668         return (s->flags & PA_SINK_FLAT_VOLUME);
1669     else
1670         return false;
1671 }
1672 
1673 /* Check if the sink has a virtual sink attached.
1674  * Called from the IO thread. */
pa_sink_has_filter_attached(pa_sink * s)1675 bool pa_sink_has_filter_attached(pa_sink *s) {
1676     bool vsink_attached = false;
1677     void *state = NULL;
1678     pa_sink_input *i;
1679 
1680     pa_assert(s);
1681 
1682     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1683         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1684             if (!i->origin_sink)
1685                 continue;
1686 
1687             vsink_attached = true;
1688             break;
1689         }
1690     }
1691     return vsink_attached;
1692 }
1693 
1694 /* Called from the main thread (and also from the IO thread while the main
1695  * thread is waiting). */
pa_sink_get_master(pa_sink * s)1696 pa_sink *pa_sink_get_master(pa_sink *s) {
1697     pa_sink_assert_ref(s);
1698 
1699     while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1700         if (PA_UNLIKELY(!s->input_to_master))
1701             return NULL;
1702 
1703         s = s->input_to_master->sink;
1704     }
1705 
1706     return s;
1707 }
1708 
1709 /* Called from main context */
pa_sink_is_filter(pa_sink * s)1710 bool pa_sink_is_filter(pa_sink *s) {
1711     pa_sink_assert_ref(s);
1712 
1713     return (s->input_to_master != NULL);
1714 }
1715 
1716 /* Called from main context */
pa_sink_is_passthrough(pa_sink * s)1717 bool pa_sink_is_passthrough(pa_sink *s) {
1718     pa_sink_input *alt_i;
1719     uint32_t idx;
1720 
1721     pa_sink_assert_ref(s);
1722 
1723     /* one and only one PASSTHROUGH input can possibly be connected */
1724     if (pa_idxset_size(s->inputs) == 1) {
1725         alt_i = pa_idxset_first(s->inputs, &idx);
1726 
1727         if (pa_sink_input_is_passthrough(alt_i))
1728             return true;
1729     }
1730 
1731     return false;
1732 }
1733 
1734 /* Called from main context */
pa_sink_enter_passthrough(pa_sink * s)1735 void pa_sink_enter_passthrough(pa_sink *s) {
1736     pa_cvolume volume;
1737 
1738     /* The sink implementation is reconfigured for passthrough in
1739      * pa_sink_reconfigure(). This function sets the PA core objects to
1740      * passthrough mode. */
1741 
1742     /* disable the monitor in passthrough mode */
1743     if (s->monitor_source) {
1744         pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1745         pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1746     }
1747 
1748     /* set the volume to NORM */
1749     s->saved_volume = *pa_sink_get_volume(s, true);
1750     s->saved_save_volume = s->save_volume;
1751 
1752     pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1753     pa_sink_set_volume(s, &volume, true, false);
1754 
1755     pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1756 }
1757 
1758 /* Called from main context */
pa_sink_leave_passthrough(pa_sink * s)1759 void pa_sink_leave_passthrough(pa_sink *s) {
1760     /* Unsuspend monitor */
1761     if (s->monitor_source) {
1762         pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1763         pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1764     }
1765 
1766     /* Restore sink volume to what it was before we entered passthrough mode */
1767     pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1768 
1769     pa_cvolume_init(&s->saved_volume);
1770     s->saved_save_volume = false;
1771 
1772 }
1773 
1774 /* Called from main context. */
compute_reference_ratio(pa_sink_input * i)1775 static void compute_reference_ratio(pa_sink_input *i) {
1776     unsigned c = 0;
1777     pa_cvolume remapped;
1778     pa_cvolume ratio;
1779 
1780     pa_assert(i);
1781     pa_assert(pa_sink_flat_volume_enabled(i->sink));
1782 
1783     /*
1784      * Calculates the reference ratio from the sink's reference
1785      * volume. This basically calculates:
1786      *
1787      * i->reference_ratio = i->volume / i->sink->reference_volume
1788      */
1789 
1790     remapped = i->sink->reference_volume;
1791     pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1792 
1793     ratio = i->reference_ratio;
1794 
1795     for (c = 0; c < i->sample_spec.channels; c++) {
1796 
1797         /* We don't update when the sink volume is 0 anyway */
1798         if (remapped.values[c] <= PA_VOLUME_MUTED)
1799             continue;
1800 
1801         /* Don't update the reference ratio unless necessary */
1802         if (pa_sw_volume_multiply(
1803                     ratio.values[c],
1804                     remapped.values[c]) == i->volume.values[c])
1805             continue;
1806 
1807         ratio.values[c] = pa_sw_volume_divide(
1808                 i->volume.values[c],
1809                 remapped.values[c]);
1810     }
1811 
1812     pa_sink_input_set_reference_ratio(i, &ratio);
1813 }
1814 
1815 /* Called from main context. Only called for the root sink in volume sharing
1816  * cases, except for internal recursive calls. */
compute_reference_ratios(pa_sink * s)1817 static void compute_reference_ratios(pa_sink *s) {
1818     uint32_t idx;
1819     pa_sink_input *i;
1820 
1821     pa_sink_assert_ref(s);
1822     pa_assert_ctl_context();
1823     pa_assert(PA_SINK_IS_LINKED(s->state));
1824     pa_assert(pa_sink_flat_volume_enabled(s));
1825 
1826     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1827         compute_reference_ratio(i);
1828 
1829         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1830                 && PA_SINK_IS_LINKED(i->origin_sink->state))
1831             compute_reference_ratios(i->origin_sink);
1832     }
1833 }
1834 
1835 /* Called from main context. Only called for the root sink in volume sharing
1836  * cases, except for internal recursive calls. */
compute_real_ratios(pa_sink * s)1837 static void compute_real_ratios(pa_sink *s) {
1838     pa_sink_input *i;
1839     uint32_t idx;
1840 
1841     pa_sink_assert_ref(s);
1842     pa_assert_ctl_context();
1843     pa_assert(PA_SINK_IS_LINKED(s->state));
1844     pa_assert(pa_sink_flat_volume_enabled(s));
1845 
1846     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1847         unsigned c;
1848         pa_cvolume remapped;
1849 
1850         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1851             /* The origin sink uses volume sharing, so this input's real ratio
1852              * is handled as a special case - the real ratio must be 0 dB, and
1853              * as a result i->soft_volume must equal i->volume_factor. */
1854             pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1855             i->soft_volume = i->volume_factor;
1856 
1857             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1858                 compute_real_ratios(i->origin_sink);
1859 
1860             continue;
1861         }
1862 
1863         /*
1864          * This basically calculates:
1865          *
1866          * i->real_ratio := i->volume / s->real_volume
1867          * i->soft_volume := i->real_ratio * i->volume_factor
1868          */
1869 
1870         remapped = s->real_volume;
1871         pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1872 
1873         i->real_ratio.channels = i->sample_spec.channels;
1874         i->soft_volume.channels = i->sample_spec.channels;
1875 
1876         for (c = 0; c < i->sample_spec.channels; c++) {
1877 
1878             if (remapped.values[c] <= PA_VOLUME_MUTED) {
1879                 /* We leave i->real_ratio untouched */
1880                 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1881                 continue;
1882             }
1883 
1884             /* Don't lose accuracy unless necessary */
1885             if (pa_sw_volume_multiply(
1886                         i->real_ratio.values[c],
1887                         remapped.values[c]) != i->volume.values[c])
1888 
1889                 i->real_ratio.values[c] = pa_sw_volume_divide(
1890                         i->volume.values[c],
1891                         remapped.values[c]);
1892 
1893             i->soft_volume.values[c] = pa_sw_volume_multiply(
1894                     i->real_ratio.values[c],
1895                     i->volume_factor.values[c]);
1896         }
1897 
1898         /* We don't copy the soft_volume to the thread_info data
1899          * here. That must be done by the caller */
1900     }
1901 }
1902 
cvolume_remap_minimal_impact(pa_cvolume * v,const pa_cvolume * template,const pa_channel_map * from,const pa_channel_map * to)1903 static pa_cvolume *cvolume_remap_minimal_impact(
1904         pa_cvolume *v,
1905         const pa_cvolume *template,
1906         const pa_channel_map *from,
1907         const pa_channel_map *to) {
1908 
1909     pa_cvolume t;
1910 
1911     pa_assert(v);
1912     pa_assert(template);
1913     pa_assert(from);
1914     pa_assert(to);
1915     pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1916     pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1917 
1918     /* Much like pa_cvolume_remap(), but tries to minimize impact when
1919      * mapping from sink input to sink volumes:
1920      *
1921      * If template is a possible remapping from v it is used instead
1922      * of remapping anew.
1923      *
1924      * If the channel maps don't match we set an all-channel volume on
1925      * the sink to ensure that changing a volume on one stream has no
1926      * effect that cannot be compensated for in another stream that
1927      * does not have the same channel map as the sink. */
1928 
1929     if (pa_channel_map_equal(from, to))
1930         return v;
1931 
1932     t = *template;
1933     if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1934         *v = *template;
1935         return v;
1936     }
1937 
1938     pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1939     return v;
1940 }
1941 
1942 /* Called from main thread. Only called for the root sink in volume sharing
1943  * cases, except for internal recursive calls. */
get_maximum_input_volume(pa_sink * s,pa_cvolume * max_volume,const pa_channel_map * channel_map)1944 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1945     pa_sink_input *i;
1946     uint32_t idx;
1947 
1948     pa_sink_assert_ref(s);
1949     pa_assert(max_volume);
1950     pa_assert(channel_map);
1951     pa_assert(pa_sink_flat_volume_enabled(s));
1952 
1953     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1954         pa_cvolume remapped;
1955 
1956         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1957             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1958                 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1959 
1960             /* Ignore this input. The origin sink uses volume sharing, so this
1961              * input's volume will be set to be equal to the root sink's real
1962              * volume. Obviously this input's current volume must not then
1963              * affect what the root sink's real volume will be. */
1964             continue;
1965         }
1966 
1967         remapped = i->volume;
1968         cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1969         pa_cvolume_merge(max_volume, max_volume, &remapped);
1970     }
1971 }
1972 
1973 /* Called from main thread. Only called for the root sink in volume sharing
1974  * cases, except for internal recursive calls. */
has_inputs(pa_sink * s)1975 static bool has_inputs(pa_sink *s) {
1976     pa_sink_input *i;
1977     uint32_t idx;
1978 
1979     pa_sink_assert_ref(s);
1980 
1981     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1982         if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1983             return true;
1984     }
1985 
1986     return false;
1987 }
1988 
1989 /* Called from main thread. Only called for the root sink in volume sharing
1990  * cases, except for internal recursive calls. */
update_real_volume(pa_sink * s,const pa_cvolume * new_volume,pa_channel_map * channel_map)1991 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1992     pa_sink_input *i;
1993     uint32_t idx;
1994 
1995     pa_sink_assert_ref(s);
1996     pa_assert(new_volume);
1997     pa_assert(channel_map);
1998 
1999     s->real_volume = *new_volume;
2000     pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2001 
2002     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2003         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2004             if (pa_sink_flat_volume_enabled(s)) {
2005                 pa_cvolume new_input_volume;
2006 
2007                 /* Follow the root sink's real volume. */
2008                 new_input_volume = *new_volume;
2009                 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2010                 pa_sink_input_set_volume_direct(i, &new_input_volume);
2011                 compute_reference_ratio(i);
2012             }
2013 
2014             if (PA_SINK_IS_LINKED(i->origin_sink->state))
2015                 update_real_volume(i->origin_sink, new_volume, channel_map);
2016         }
2017     }
2018 }
2019 
2020 /* Called from main thread. Only called for the root sink in shared volume
2021  * cases. */
compute_real_volume(pa_sink * s)2022 static void compute_real_volume(pa_sink *s) {
2023     pa_sink_assert_ref(s);
2024     pa_assert_ctl_context();
2025     pa_assert(PA_SINK_IS_LINKED(s->state));
2026     pa_assert(pa_sink_flat_volume_enabled(s));
2027     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2028 
2029     /* This determines the maximum volume of all streams and sets
2030      * s->real_volume accordingly. */
2031 
2032     if (!has_inputs(s)) {
2033         /* In the special case that we have no sink inputs we leave the
2034          * volume unmodified. */
2035         update_real_volume(s, &s->reference_volume, &s->channel_map);
2036         return;
2037     }
2038 
2039     pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2040 
2041     /* First let's determine the new maximum volume of all inputs
2042      * connected to this sink */
2043     get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2044     update_real_volume(s, &s->real_volume, &s->channel_map);
2045 
2046     /* Then, let's update the real ratios/soft volumes of all inputs
2047      * connected to this sink */
2048     compute_real_ratios(s);
2049 }
2050 
2051 /* Called from main thread. Only called for the root sink in shared volume
2052  * cases, except for internal recursive calls. */
propagate_reference_volume(pa_sink * s)2053 static void propagate_reference_volume(pa_sink *s) {
2054     pa_sink_input *i;
2055     uint32_t idx;
2056 
2057     pa_sink_assert_ref(s);
2058     pa_assert_ctl_context();
2059     pa_assert(PA_SINK_IS_LINKED(s->state));
2060     pa_assert(pa_sink_flat_volume_enabled(s));
2061 
2062     /* This is called whenever the sink volume changes that is not
2063      * caused by a sink input volume change. We need to fix up the
2064      * sink input volumes accordingly */
2065 
2066     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2067         pa_cvolume new_volume;
2068 
2069         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2070             if (PA_SINK_IS_LINKED(i->origin_sink->state))
2071                 propagate_reference_volume(i->origin_sink);
2072 
2073             /* Since the origin sink uses volume sharing, this input's volume
2074              * needs to be updated to match the root sink's real volume, but
2075              * that will be done later in update_real_volume(). */
2076             continue;
2077         }
2078 
2079         /* This basically calculates:
2080          *
2081          * i->volume := s->reference_volume * i->reference_ratio  */
2082 
2083         new_volume = s->reference_volume;
2084         pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2085         pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2086         pa_sink_input_set_volume_direct(i, &new_volume);
2087     }
2088 }
2089 
2090 /* Called from main thread. Only called for the root sink in volume sharing
2091  * cases, except for internal recursive calls. The return value indicates
2092  * whether any reference volume actually changed. */
update_reference_volume(pa_sink * s,const pa_cvolume * v,const pa_channel_map * channel_map,bool save)2093 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2094     pa_cvolume volume;
2095     bool reference_volume_changed;
2096     pa_sink_input *i;
2097     uint32_t idx;
2098 
2099     pa_sink_assert_ref(s);
2100     pa_assert(PA_SINK_IS_LINKED(s->state));
2101     pa_assert(v);
2102     pa_assert(channel_map);
2103     pa_assert(pa_cvolume_valid(v));
2104 
2105     volume = *v;
2106     pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2107 
2108     reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2109     pa_sink_set_reference_volume_direct(s, &volume);
2110 
2111     s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2112 
2113     if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2114         /* If the root sink's volume doesn't change, then there can't be any
2115          * changes in the other sinks in the sink tree either.
2116          *
2117          * It's probably theoretically possible that even if the root sink's
2118          * volume changes slightly, some filter sink doesn't change its volume
2119          * due to rounding errors. If that happens, we still want to propagate
2120          * the changed root sink volume to the sinks connected to the
2121          * intermediate sink that didn't change its volume. This theoretical
2122          * possibility is the reason why we have that !(s->flags &
2123          * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2124          * notice even if we returned here false always if
2125          * reference_volume_changed is false. */
2126         return false;
2127 
2128     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2129         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2130                 && PA_SINK_IS_LINKED(i->origin_sink->state))
2131             update_reference_volume(i->origin_sink, v, channel_map, false);
2132     }
2133 
2134     return true;
2135 }
2136 
2137 /* Called from main thread */
pa_sink_set_volume(pa_sink * s,const pa_cvolume * volume,bool send_msg,bool save)2138 void pa_sink_set_volume(
2139         pa_sink *s,
2140         const pa_cvolume *volume,
2141         bool send_msg,
2142         bool save) {
2143 
2144     pa_cvolume new_reference_volume;
2145     pa_sink *root_sink;
2146 
2147     pa_sink_assert_ref(s);
2148     pa_assert_ctl_context();
2149     pa_assert(PA_SINK_IS_LINKED(s->state));
2150     pa_assert(!volume || pa_cvolume_valid(volume));
2151     pa_assert(volume || pa_sink_flat_volume_enabled(s));
2152     pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2153 
2154     /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2155      * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2156     if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2157         pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2158         return;
2159     }
2160 
2161     /* In case of volume sharing, the volume is set for the root sink first,
2162      * from which it's then propagated to the sharing sinks. */
2163     root_sink = pa_sink_get_master(s);
2164 
2165     if (PA_UNLIKELY(!root_sink))
2166         return;
2167 
2168     /* As a special exception we accept mono volumes on all sinks --
2169      * even on those with more complex channel maps */
2170 
2171     if (volume) {
2172         if (pa_cvolume_compatible(volume, &s->sample_spec))
2173             new_reference_volume = *volume;
2174         else {
2175             new_reference_volume = s->reference_volume;
2176             pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2177         }
2178 
2179         pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2180 
2181         if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2182             if (pa_sink_flat_volume_enabled(root_sink)) {
2183                 /* OK, propagate this volume change back to the inputs */
2184                 propagate_reference_volume(root_sink);
2185 
2186                 /* And now recalculate the real volume */
2187                 compute_real_volume(root_sink);
2188             } else
2189                 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2190         }
2191 
2192     } else {
2193         /* If volume is NULL we synchronize the sink's real and
2194          * reference volumes with the stream volumes. */
2195 
2196         pa_assert(pa_sink_flat_volume_enabled(root_sink));
2197 
2198         /* Ok, let's determine the new real volume */
2199         compute_real_volume(root_sink);
2200 
2201         /* Let's 'push' the reference volume if necessary */
2202         pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2203         /* If the sink and its root don't have the same number of channels, we need to remap */
2204         if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2205             pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2206         update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2207 
2208         /* Now that the reference volume is updated, we can update the streams'
2209          * reference ratios. */
2210         compute_reference_ratios(root_sink);
2211     }
2212 
2213     if (root_sink->set_volume) {
2214         /* If we have a function set_volume(), then we do not apply a
2215          * soft volume by default. However, set_volume() is free to
2216          * apply one to root_sink->soft_volume */
2217 
2218         pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2219         if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2220             root_sink->set_volume(root_sink);
2221 
2222     } else
2223         /* If we have no function set_volume(), then the soft volume
2224          * becomes the real volume */
2225         root_sink->soft_volume = root_sink->real_volume;
2226 
2227     /* This tells the sink that soft volume and/or real volume changed */
2228     if (send_msg)
2229         pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2230 }
2231 
2232 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2233  * Only to be called by sink implementor */
pa_sink_set_soft_volume(pa_sink * s,const pa_cvolume * volume)2234 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2235 
2236     pa_sink_assert_ref(s);
2237     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2238 
2239     if (s->flags & PA_SINK_DEFERRED_VOLUME)
2240         pa_sink_assert_io_context(s);
2241     else
2242         pa_assert_ctl_context();
2243 
2244     if (!volume)
2245         pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2246     else
2247         s->soft_volume = *volume;
2248 
2249     if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2250         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2251     else
2252         s->thread_info.soft_volume = s->soft_volume;
2253 }
2254 
2255 /* Called from the main thread. Only called for the root sink in volume sharing
2256  * cases, except for internal recursive calls. */
propagate_real_volume(pa_sink * s,const pa_cvolume * old_real_volume)2257 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2258     pa_sink_input *i;
2259     uint32_t idx;
2260 
2261     pa_sink_assert_ref(s);
2262     pa_assert(old_real_volume);
2263     pa_assert_ctl_context();
2264     pa_assert(PA_SINK_IS_LINKED(s->state));
2265 
2266     /* This is called when the hardware's real volume changes due to
2267      * some external event. We copy the real volume into our
2268      * reference volume and then rebuild the stream volumes based on
2269      * i->real_ratio which should stay fixed. */
2270 
2271     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2272         if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2273             return;
2274 
2275         /* 1. Make the real volume the reference volume */
2276         update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2277     }
2278 
2279     if (pa_sink_flat_volume_enabled(s)) {
2280 
2281         PA_IDXSET_FOREACH(i, s->inputs, idx) {
2282             pa_cvolume new_volume;
2283 
2284             /* 2. Since the sink's reference and real volumes are equal
2285              * now our ratios should be too. */
2286             pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2287 
2288             /* 3. Recalculate the new stream reference volume based on the
2289              * reference ratio and the sink's reference volume.
2290              *
2291              * This basically calculates:
2292              *
2293              * i->volume = s->reference_volume * i->reference_ratio
2294              *
2295              * This is identical to propagate_reference_volume() */
2296             new_volume = s->reference_volume;
2297             pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2298             pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2299             pa_sink_input_set_volume_direct(i, &new_volume);
2300 
2301             if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2302                     && PA_SINK_IS_LINKED(i->origin_sink->state))
2303                 propagate_real_volume(i->origin_sink, old_real_volume);
2304         }
2305     }
2306 
2307     /* Something got changed in the hardware. It probably makes sense
2308      * to save changed hw settings given that hw volume changes not
2309      * triggered by PA are almost certainly done by the user. */
2310     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2311         s->save_volume = true;
2312 }
2313 
2314 /* Called from io thread */
pa_sink_update_volume_and_mute(pa_sink * s)2315 void pa_sink_update_volume_and_mute(pa_sink *s) {
2316     pa_assert(s);
2317     pa_sink_assert_io_context(s);
2318 
2319     pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2320 }
2321 
2322 /* Called from main thread */
pa_sink_get_volume(pa_sink * s,bool force_refresh)2323 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2324     pa_sink_assert_ref(s);
2325     pa_assert_ctl_context();
2326     pa_assert(PA_SINK_IS_LINKED(s->state));
2327 
2328     if (s->refresh_volume || force_refresh) {
2329         struct pa_cvolume old_real_volume;
2330 
2331         pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2332 
2333         old_real_volume = s->real_volume;
2334 
2335         if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2336             s->get_volume(s);
2337 
2338         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2339 
2340         update_real_volume(s, &s->real_volume, &s->channel_map);
2341         propagate_real_volume(s, &old_real_volume);
2342     }
2343 
2344     return &s->reference_volume;
2345 }
2346 
2347 /* Called from main thread. In volume sharing cases, only the root sink may
2348  * call this. */
pa_sink_volume_changed(pa_sink * s,const pa_cvolume * new_real_volume)2349 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2350     pa_cvolume old_real_volume;
2351 
2352     pa_sink_assert_ref(s);
2353     pa_assert_ctl_context();
2354     pa_assert(PA_SINK_IS_LINKED(s->state));
2355     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2356 
2357     /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2358 
2359     old_real_volume = s->real_volume;
2360     update_real_volume(s, new_real_volume, &s->channel_map);
2361     propagate_real_volume(s, &old_real_volume);
2362 }
2363 
2364 /* Called from main thread */
pa_sink_set_mute(pa_sink * s,bool mute,bool save)2365 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2366     bool old_muted;
2367 
2368     pa_sink_assert_ref(s);
2369     pa_assert_ctl_context();
2370 
2371     old_muted = s->muted;
2372 
2373     if (mute == old_muted) {
2374         s->save_muted |= save;
2375         return;
2376     }
2377 
2378     s->muted = mute;
2379     s->save_muted = save;
2380 
2381     if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2382         s->set_mute_in_progress = true;
2383         s->set_mute(s);
2384         s->set_mute_in_progress = false;
2385     }
2386 
2387     if (!PA_SINK_IS_LINKED(s->state))
2388         return;
2389 
2390     pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2391     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2392     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2393     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2394 }
2395 
2396 /* Called from main thread */
pa_sink_get_mute(pa_sink * s,bool force_refresh)2397 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2398 
2399     pa_sink_assert_ref(s);
2400     pa_assert_ctl_context();
2401     pa_assert(PA_SINK_IS_LINKED(s->state));
2402 
2403     if ((s->refresh_muted || force_refresh) && s->get_mute) {
2404         bool mute;
2405 
2406         if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2407             if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2408                 pa_sink_mute_changed(s, mute);
2409         } else {
2410             if (s->get_mute(s, &mute) >= 0)
2411                 pa_sink_mute_changed(s, mute);
2412         }
2413     }
2414 
2415     return s->muted;
2416 }
2417 
2418 /* Called from main thread */
pa_sink_mute_changed(pa_sink * s,bool new_muted)2419 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2420     pa_sink_assert_ref(s);
2421     pa_assert_ctl_context();
2422     pa_assert(PA_SINK_IS_LINKED(s->state));
2423 
2424     if (s->set_mute_in_progress)
2425         return;
2426 
2427     /* pa_sink_set_mute() does this same check, so this may appear redundant,
2428      * but we must have this here also, because the save parameter of
2429      * pa_sink_set_mute() would otherwise have unintended side effects (saving
2430      * the mute state when it shouldn't be saved). */
2431     if (new_muted == s->muted)
2432         return;
2433 
2434     pa_sink_set_mute(s, new_muted, true);
2435 }
2436 
2437 /* Called from main thread */
pa_sink_update_proplist(pa_sink * s,pa_update_mode_t mode,pa_proplist * p)2438 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2439     pa_sink_assert_ref(s);
2440     pa_assert_ctl_context();
2441 
2442     if (p)
2443         pa_proplist_update(s->proplist, mode, p);
2444 
2445     if (PA_SINK_IS_LINKED(s->state)) {
2446         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2447         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2448     }
2449 
2450     return true;
2451 }
2452 
2453 /* Called from main thread */
2454 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
pa_sink_set_description(pa_sink * s,const char * description)2455 void pa_sink_set_description(pa_sink *s, const char *description) {
2456     const char *old;
2457     pa_sink_assert_ref(s);
2458     pa_assert_ctl_context();
2459 
2460     if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2461         return;
2462 
2463     old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2464 
2465     if (old && description && pa_streq(old, description))
2466         return;
2467 
2468     if (description)
2469         pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2470     else
2471         pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2472 
2473     if (s->monitor_source) {
2474         char *n;
2475 
2476         n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2477         pa_source_set_description(s->monitor_source, n);
2478         pa_xfree(n);
2479     }
2480 
2481     if (PA_SINK_IS_LINKED(s->state)) {
2482         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2483         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2484     }
2485 }
2486 
2487 /* Called from main thread */
pa_sink_linked_by(pa_sink * s)2488 unsigned pa_sink_linked_by(pa_sink *s) {
2489     unsigned ret;
2490 
2491     pa_sink_assert_ref(s);
2492     pa_assert_ctl_context();
2493     pa_assert(PA_SINK_IS_LINKED(s->state));
2494 
2495     ret = pa_idxset_size(s->inputs);
2496 
2497     /* We add in the number of streams connected to us here. Please
2498      * note the asymmetry to pa_sink_used_by()! */
2499 
2500     if (s->monitor_source)
2501         ret += pa_source_linked_by(s->monitor_source);
2502 
2503     return ret;
2504 }
2505 
2506 /* Called from main thread */
pa_sink_used_by(pa_sink * s)2507 unsigned pa_sink_used_by(pa_sink *s) {
2508     unsigned ret;
2509 
2510     pa_sink_assert_ref(s);
2511     pa_assert_ctl_context();
2512     pa_assert(PA_SINK_IS_LINKED(s->state));
2513 
2514     ret = pa_idxset_size(s->inputs);
2515     pa_assert(ret >= s->n_corked);
2516 
2517     /* Streams connected to our monitor source do not matter for
2518      * pa_sink_used_by()!.*/
2519 
2520     return ret - s->n_corked;
2521 }
2522 
2523 /* Called from main thread */
pa_sink_check_suspend(pa_sink * s,pa_sink_input * ignore_input,pa_source_output * ignore_output)2524 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2525     unsigned ret;
2526     pa_sink_input *i;
2527     uint32_t idx;
2528 
2529     pa_sink_assert_ref(s);
2530     pa_assert_ctl_context();
2531 
2532     if (!PA_SINK_IS_LINKED(s->state))
2533         return 0;
2534 
2535     ret = 0;
2536 
2537     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2538         if (i == ignore_input)
2539             continue;
2540 
2541         /* We do not assert here. It is perfectly valid for a sink input to
2542          * be in the INIT state (i.e. created, marked done but not yet put)
2543          * and we should not care if it's unlinked as it won't contribute
2544          * towards our busy status.
2545          */
2546         if (!PA_SINK_INPUT_IS_LINKED(i->state))
2547             continue;
2548 
2549         if (i->state == PA_SINK_INPUT_CORKED)
2550             continue;
2551 
2552         if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2553             continue;
2554 
2555         ret ++;
2556     }
2557 
2558     if (s->monitor_source)
2559         ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2560 
2561     return ret;
2562 }
2563 
pa_sink_state_to_string(pa_sink_state_t state)2564 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2565     switch (state) {
2566         case PA_SINK_INIT:          return "INIT";
2567         case PA_SINK_IDLE:          return "IDLE";
2568         case PA_SINK_RUNNING:       return "RUNNING";
2569         case PA_SINK_SUSPENDED:     return "SUSPENDED";
2570         case PA_SINK_UNLINKED:      return "UNLINKED";
2571         case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2572     }
2573 
2574     pa_assert_not_reached();
2575 }
2576 
2577 /* Called from the IO thread */
sync_input_volumes_within_thread(pa_sink * s)2578 static void sync_input_volumes_within_thread(pa_sink *s) {
2579     pa_sink_input *i;
2580     void *state = NULL;
2581 
2582     pa_sink_assert_ref(s);
2583     pa_sink_assert_io_context(s);
2584 
2585     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2586         if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2587             continue;
2588 
2589         i->thread_info.soft_volume = i->soft_volume;
2590         pa_sink_input_request_rewind(i, 0, true, false, false);
2591     }
2592 }
2593 
2594 /* Called from the IO thread. Only called for the root sink in volume sharing
2595  * cases, except for internal recursive calls. */
set_shared_volume_within_thread(pa_sink * s)2596 static void set_shared_volume_within_thread(pa_sink *s) {
2597     pa_sink_input *i = NULL;
2598     void *state = NULL;
2599 
2600     pa_sink_assert_ref(s);
2601 
2602     PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2603 
2604     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2605         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2606             set_shared_volume_within_thread(i->origin_sink);
2607     }
2608 }
2609 
2610 /* Called from IO thread. Gets max_rewind limit from sink inputs.
2611  * This function is used to communicate the max_rewind value of a
2612  * virtual sink to the master sink. The get_max_rewind_limit()
2613  * callback is implemented by sink inputs connecting a virtual
2614  * sink to its master. */
get_max_rewind_limit(pa_sink * s,size_t requested_limit)2615 static size_t get_max_rewind_limit(pa_sink *s, size_t requested_limit) {
2616     pa_sink_input *i;
2617     void *state = NULL;
2618     size_t rewind_limit;
2619 
2620     pa_assert(s);
2621 
2622     /* Get rewind limit in sink sample spec from sink inputs */
2623     rewind_limit = (size_t)(-1);
2624     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2625         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2626 
2627             if (i->get_max_rewind_limit) {
2628                 size_t limit;
2629 
2630                 limit = i->get_max_rewind_limit(i);
2631                 if (rewind_limit == (size_t)(-1) || rewind_limit > limit)
2632                     rewind_limit = limit;
2633             }
2634         }
2635     }
2636 
2637     /* Set max_rewind */
2638     if (rewind_limit != (size_t)(-1))
2639         requested_limit = PA_MIN(rewind_limit, requested_limit);
2640 
2641     return requested_limit;
2642 }
2643 
2644 /* Called from IO thread, except when it is not */
pa_sink_process_msg(pa_msgobject * o,int code,void * userdata,int64_t offset,pa_memchunk * chunk)2645 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2646     pa_sink *s = PA_SINK(o);
2647     pa_sink_assert_ref(s);
2648 
2649     switch ((pa_sink_message_t) code) {
2650 
2651         case PA_SINK_MESSAGE_ADD_INPUT: {
2652             pa_sink_input *i = PA_SINK_INPUT(userdata);
2653 
2654             /* If you change anything here, make sure to change the
2655              * sink input handling a few lines down at
2656              * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2657 
2658             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2659 
2660             /* Since the caller sleeps in pa_sink_input_put(), we can
2661              * safely access data outside of thread_info even though
2662              * it is mutable */
2663 
2664             if ((i->thread_info.sync_prev = i->sync_prev)) {
2665                 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2666                 pa_assert(i->sync_prev->sync_next == i);
2667                 i->thread_info.sync_prev->thread_info.sync_next = i;
2668             }
2669 
2670             if ((i->thread_info.sync_next = i->sync_next)) {
2671                 pa_assert(i->sink == i->thread_info.sync_next->sink);
2672                 pa_assert(i->sync_next->sync_prev == i);
2673                 i->thread_info.sync_next->thread_info.sync_prev = i;
2674             }
2675 
2676             pa_sink_input_attach(i);
2677 
2678             pa_sink_input_set_state_within_thread(i, i->state);
2679 
2680             /* The requested latency of the sink input needs to be fixed up and
2681              * then configured on the sink. If this causes the sink latency to
2682              * go down, the sink implementor is responsible for doing a rewind
2683              * in the update_requested_latency() callback to ensure that the
2684              * sink buffer doesn't contain more data than what the new latency
2685              * allows.
2686              *
2687              * XXX: Does it really make sense to push this responsibility to
2688              * the sink implementors? Wouldn't it be better to do it once in
2689              * the core than many times in the modules? */
2690 
2691             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2692                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2693 
2694             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2695             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2696 
2697             /* We don't rewind here automatically. This is left to the
2698              * sink input implementor because some sink inputs need a
2699              * slow start, i.e. need some time to buffer client
2700              * samples before beginning streaming.
2701              *
2702              * XXX: Does it really make sense to push this functionality to
2703              * the sink implementors? Wouldn't it be better to do it once in
2704              * the core than many times in the modules? */
2705 
2706             /* In flat volume mode we need to update the volume as
2707              * well */
2708             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2709         }
2710 
2711         case PA_SINK_MESSAGE_REMOVE_INPUT: {
2712             pa_sink_input *i = PA_SINK_INPUT(userdata);
2713 
2714             /* If you change anything here, make sure to change the
2715              * sink input handling a few lines down at
2716              * PA_SINK_MESSAGE_START_MOVE, too. */
2717 
2718             pa_sink_input_detach(i);
2719 
2720             pa_sink_input_set_state_within_thread(i, i->state);
2721 
2722             /* Since the caller sleeps in pa_sink_input_unlink(),
2723              * we can safely access data outside of thread_info even
2724              * though it is mutable */
2725 
2726             pa_assert(!i->sync_prev);
2727             pa_assert(!i->sync_next);
2728 
2729             if (i->thread_info.sync_prev) {
2730                 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2731                 i->thread_info.sync_prev = NULL;
2732             }
2733 
2734             if (i->thread_info.sync_next) {
2735                 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2736                 i->thread_info.sync_next = NULL;
2737             }
2738 
2739             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2740             pa_sink_request_rewind(s, (size_t) -1);
2741             pa_sink_invalidate_requested_latency(s, true);
2742 
2743             /* In flat volume mode we need to update the volume as
2744              * well */
2745             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2746         }
2747 
2748         case PA_SINK_MESSAGE_START_MOVE: {
2749             pa_sink_input *i = PA_SINK_INPUT(userdata);
2750 
2751             /* We don't support moving synchronized streams. */
2752             pa_assert(!i->sync_prev);
2753             pa_assert(!i->sync_next);
2754             pa_assert(!i->thread_info.sync_next);
2755             pa_assert(!i->thread_info.sync_prev);
2756 
2757             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2758 
2759                 /* The old sink probably has some audio from this
2760                  * stream in its buffer. We want to "take it back" as
2761                  * much as possible and play it to the new sink. We
2762                  * don't know at this point how much the old sink can
2763                  * rewind, so we just save some values and reconstruct
2764                  * the render memblockq in finish_move(). */
2765 
2766                 /* Save some current values for restore_render_memblockq() */
2767                 i->thread_info.origin_sink_latency = pa_sink_get_latency_within_thread(s, false);
2768                 // may cause pop during switch between OFFLOAD and PRIMARY
2769                 // i->thread_info.move_start_time = pa_rtclock_now();
2770                 i->thread_info.resampler_delay_frames = 0;
2771                 if (i->thread_info.resampler)
2772                     /* Round down */
2773                     i->thread_info.resampler_delay_frames = pa_resampler_get_delay(i->thread_info.resampler, false);
2774             }
2775 
2776             pa_sink_input_detach(i);
2777 
2778             /* Let's remove the sink input ...*/
2779             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2780 
2781             /* The rewind must be requested before invalidating the latency, otherwise
2782              * the max_rewind value of the sink may change before the rewind. */
2783             pa_log_debug("Requesting rewind due to started move");
2784             pa_sink_request_rewind(s, (size_t) -1);
2785 
2786             pa_sink_invalidate_requested_latency(s, true);
2787 
2788             /* In flat volume mode we need to update the volume as
2789              * well */
2790             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2791         }
2792 
2793         case PA_SINK_MESSAGE_FINISH_MOVE: {
2794             pa_sink_input *i = PA_SINK_INPUT(userdata);
2795 
2796             /* We don't support moving synchronized streams. */
2797             pa_assert(!i->sync_prev);
2798             pa_assert(!i->sync_next);
2799             pa_assert(!i->thread_info.sync_next);
2800             pa_assert(!i->thread_info.sync_prev);
2801 
2802             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2803 
2804             pa_sink_input_attach(i);
2805 
2806             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2807                 pa_usec_t usec = 0;
2808                 size_t nbytes, delay_bytes;
2809 
2810                 /* In the ideal case the new sink would start playing
2811                  * the stream immediately. That requires the sink to
2812                  * be able to rewind all of its latency, which usually
2813                  * isn't possible, so there will probably be some gap
2814                  * before the moved stream becomes audible. We then
2815                  * have two possibilities: 1) start playing the stream
2816                  * from where it is now, or 2) drop the unrewindable
2817                  * latency of the sink from the stream. With option 1
2818                  * we won't lose any audio but the stream will have a
2819                  * pause. With option 2 we may lose some audio but the
2820                  * stream time will be somewhat in sync with the wall
2821                  * clock. Lennart seems to have chosen option 2 (one
2822                  * of the reasons might have been that option 1 is
2823                  * actually much harder to implement), so we drop the
2824                  * latency of the new sink from the moved stream and
2825                  * hope that the sink will undo most of that in the
2826                  * rewind. */
2827 
2828                 /* Get the latency of the sink */
2829                 usec = pa_sink_get_latency_within_thread(s, false);
2830                 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2831 
2832                 /* Calculate number of samples that have been played during the move */
2833                 delay_bytes = 0;
2834                 if (i->thread_info.move_start_time > 0) {
2835                     usec = pa_rtclock_now() - i->thread_info.move_start_time;
2836                     delay_bytes = pa_usec_to_bytes(usec, &s->sample_spec);
2837                 }
2838 
2839                 /* max_rewind must be updated for the sink input because otherwise
2840                  * the data in the render memblockq will get lost */
2841                 pa_sink_input_update_max_rewind(i, nbytes);
2842 
2843                 if (nbytes + delay_bytes > 0)
2844                     pa_sink_input_drop(i, nbytes + delay_bytes);
2845 
2846                 pa_log_debug("Requesting rewind due to finished move");
2847                 pa_sink_request_rewind(s, nbytes);
2848             }
2849 
2850             /* Updating the requested sink latency has to be done
2851              * after the sink rewind request, not before, because
2852              * otherwise the sink may limit the rewind amount
2853              * needlessly. */
2854 
2855             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2856                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2857 
2858             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2859             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2860 
2861             /* Reset move variables */
2862             i->thread_info.move_start_time = 0;
2863             i->thread_info.resampler_delay_frames = 0;
2864             i->thread_info.origin_sink_latency = 0;
2865 
2866             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2867         }
2868 
2869         case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2870             pa_sink *root_sink = pa_sink_get_master(s);
2871 
2872             if (PA_LIKELY(root_sink))
2873                 set_shared_volume_within_thread(root_sink);
2874 
2875             return 0;
2876         }
2877 
2878         case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2879 
2880             if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2881                 s->set_volume(s);
2882                 pa_sink_volume_change_push(s);
2883             }
2884             /* Fall through ... */
2885 
2886         case PA_SINK_MESSAGE_SET_VOLUME:
2887 
2888             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2889                 s->thread_info.soft_volume = s->soft_volume;
2890                 pa_sink_request_rewind(s, (size_t) -1);
2891             }
2892 
2893             /* Fall through ... */
2894 
2895         case PA_SINK_MESSAGE_SYNC_VOLUMES:
2896             sync_input_volumes_within_thread(s);
2897             return 0;
2898 
2899         case PA_SINK_MESSAGE_GET_VOLUME:
2900 
2901             if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2902                 s->get_volume(s);
2903                 pa_sink_volume_change_flush(s);
2904                 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2905             }
2906 
2907             /* In case sink implementor reset SW volume. */
2908             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2909                 s->thread_info.soft_volume = s->soft_volume;
2910                 pa_sink_request_rewind(s, (size_t) -1);
2911             }
2912 
2913             return 0;
2914 
2915         case PA_SINK_MESSAGE_SET_MUTE:
2916 
2917             if (s->thread_info.soft_muted != s->muted) {
2918                 s->thread_info.soft_muted = s->muted;
2919                 pa_sink_request_rewind(s, (size_t) -1);
2920             }
2921 
2922             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2923                 s->set_mute(s);
2924 
2925             return 0;
2926 
2927         case PA_SINK_MESSAGE_GET_MUTE:
2928 
2929             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2930                 return s->get_mute(s, userdata);
2931 
2932             return 0;
2933 
2934         case PA_SINK_MESSAGE_SET_STATE: {
2935             struct set_state_data *data = userdata;
2936             bool suspend_change =
2937                 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
2938                 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
2939 
2940             if (s->set_state_in_io_thread) {
2941                 int r;
2942 
2943                 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2944                     return r;
2945             }
2946 
2947             s->thread_info.state = data->state;
2948 
2949             if (s->thread_info.state == PA_SINK_SUSPENDED) {
2950                 s->thread_info.rewind_nbytes = 0;
2951                 s->thread_info.rewind_requested = false;
2952             }
2953 
2954             if (suspend_change) {
2955                 pa_sink_input *i;
2956                 void *state = NULL;
2957 
2958                 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2959                     if (i->suspend_within_thread)
2960                         i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2961             }
2962 
2963             return 0;
2964         }
2965 
2966         case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2967 
2968             pa_usec_t *usec = userdata;
2969             *usec = pa_sink_get_requested_latency_within_thread(s);
2970 
2971             /* Yes, that's right, the IO thread will see -1 when no
2972              * explicit requested latency is configured, the main
2973              * thread will see max_latency */
2974             if (*usec == (pa_usec_t) -1)
2975                 *usec = s->thread_info.max_latency;
2976 
2977             return 0;
2978         }
2979 
2980         case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2981             pa_usec_t *r = userdata;
2982 
2983             pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2984 
2985             return 0;
2986         }
2987 
2988         case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2989             pa_usec_t *r = userdata;
2990 
2991             r[0] = s->thread_info.min_latency;
2992             r[1] = s->thread_info.max_latency;
2993 
2994             return 0;
2995         }
2996 
2997         case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2998 
2999             *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
3000             return 0;
3001 
3002         case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3003 
3004             pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3005             return 0;
3006 
3007         case PA_SINK_MESSAGE_GET_MAX_REWIND:
3008 
3009             *((size_t*) userdata) = s->thread_info.max_rewind;
3010             return 0;
3011 
3012         case PA_SINK_MESSAGE_GET_LAST_REWIND:
3013 
3014             *((size_t*) userdata) = s->thread_info.last_rewind_nbytes;
3015             return 0;
3016 
3017         case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3018 
3019             *((size_t*) userdata) = s->thread_info.max_request;
3020             return 0;
3021 
3022         case PA_SINK_MESSAGE_SET_MAX_REWIND:
3023 
3024             pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3025             return 0;
3026 
3027         case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3028 
3029             pa_sink_set_max_request_within_thread(s, (size_t) offset);
3030             return 0;
3031 
3032         case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3033             /* This message is sent from IO-thread and handled in main thread. */
3034             pa_assert_ctl_context();
3035 
3036             /* Make sure we're not messing with main thread when no longer linked */
3037             if (!PA_SINK_IS_LINKED(s->state))
3038                 return 0;
3039 
3040             pa_sink_get_volume(s, true);
3041             pa_sink_get_mute(s, true);
3042             return 0;
3043 
3044         case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3045             s->thread_info.port_latency_offset = offset;
3046             return 0;
3047 
3048         case PA_SINK_MESSAGE_GET_LATENCY:
3049         case PA_SINK_MESSAGE_MAX:
3050             ;
3051     }
3052 
3053     return -1;
3054 }
3055 
3056 /* Called from main thread */
pa_sink_suspend_all(pa_core * c,bool suspend,pa_suspend_cause_t cause)3057 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3058     pa_sink *sink;
3059     uint32_t idx;
3060     int ret = 0;
3061 
3062     pa_core_assert_ref(c);
3063     pa_assert_ctl_context();
3064     pa_assert(cause != 0);
3065 
3066     PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3067         int r;
3068 
3069         if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3070             ret = r;
3071     }
3072 
3073     return ret;
3074 }
3075 
3076 /* Called from IO thread */
pa_sink_detach_within_thread(pa_sink * s)3077 void pa_sink_detach_within_thread(pa_sink *s) {
3078     pa_sink_input *i;
3079     void *state = NULL;
3080 
3081     pa_sink_assert_ref(s);
3082     pa_sink_assert_io_context(s);
3083     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3084 
3085     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3086         pa_sink_input_detach(i);
3087 
3088     if (s->monitor_source)
3089         pa_source_detach_within_thread(s->monitor_source);
3090 }
3091 
3092 /* Called from IO thread */
pa_sink_attach_within_thread(pa_sink * s)3093 void pa_sink_attach_within_thread(pa_sink *s) {
3094     pa_sink_input *i;
3095     void *state = NULL;
3096 
3097     pa_sink_assert_ref(s);
3098     pa_sink_assert_io_context(s);
3099     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3100 
3101     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3102         pa_sink_input_attach(i);
3103 
3104     if (s->monitor_source)
3105         pa_source_attach_within_thread(s->monitor_source);
3106 }
3107 
3108 /* Called from IO thread */
pa_sink_request_rewind(pa_sink * s,size_t nbytes)3109 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3110     pa_sink_assert_ref(s);
3111     pa_sink_assert_io_context(s);
3112     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3113 
3114     if (nbytes == (size_t) -1)
3115         nbytes = s->thread_info.max_rewind;
3116 
3117     nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3118 
3119     if (s->thread_info.rewind_requested &&
3120         nbytes <= s->thread_info.rewind_nbytes)
3121         return;
3122 
3123     s->thread_info.rewind_nbytes = nbytes;
3124     s->thread_info.rewind_requested = true;
3125 
3126     if (s->request_rewind)
3127         s->request_rewind(s);
3128 }
3129 
3130 /* Called from IO thread */
pa_sink_get_requested_latency_within_thread(pa_sink * s)3131 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3132     pa_usec_t result = (pa_usec_t) -1;
3133     pa_sink_input *i;
3134     void *state = NULL;
3135     pa_usec_t monitor_latency;
3136 
3137     pa_sink_assert_ref(s);
3138     pa_sink_assert_io_context(s);
3139 
3140     if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3141         return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3142 
3143     if (s->thread_info.requested_latency_valid)
3144         return s->thread_info.requested_latency;
3145 
3146     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3147         if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3148             (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3149             result = i->thread_info.requested_sink_latency;
3150 
3151     monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3152 
3153     if (monitor_latency != (pa_usec_t) -1 &&
3154         (result == (pa_usec_t) -1 || result > monitor_latency))
3155         result = monitor_latency;
3156 
3157     if (result != (pa_usec_t) -1)
3158         result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3159 
3160     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3161         /* Only cache if properly initialized */
3162         s->thread_info.requested_latency = result;
3163         s->thread_info.requested_latency_valid = true;
3164     }
3165 
3166     return result;
3167 }
3168 
3169 /* Called from main thread */
pa_sink_get_requested_latency(pa_sink * s)3170 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3171     pa_usec_t usec = 0;
3172 
3173     pa_sink_assert_ref(s);
3174     pa_assert_ctl_context();
3175     pa_assert(PA_SINK_IS_LINKED(s->state));
3176 
3177     if (s->state == PA_SINK_SUSPENDED)
3178         return 0;
3179 
3180     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3181 
3182     return usec;
3183 }
3184 
3185 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_rewind_within_thread(pa_sink * s,size_t max_rewind)3186 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3187     pa_sink_input *i;
3188     void *state = NULL;
3189 
3190     pa_sink_assert_ref(s);
3191     pa_sink_assert_io_context(s);
3192 
3193     max_rewind = get_max_rewind_limit(s, max_rewind);
3194 
3195     if (max_rewind == s->thread_info.max_rewind)
3196         return;
3197 
3198     s->thread_info.max_rewind = max_rewind;
3199 
3200     if (PA_SINK_IS_LINKED(s->thread_info.state))
3201         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3202             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3203 
3204     if (s->monitor_source)
3205         pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3206 }
3207 
3208 /* Called from main thread */
pa_sink_set_max_rewind(pa_sink * s,size_t max_rewind)3209 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3210     pa_sink_assert_ref(s);
3211     pa_assert_ctl_context();
3212 
3213     if (PA_SINK_IS_LINKED(s->state))
3214         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3215     else
3216         pa_sink_set_max_rewind_within_thread(s, max_rewind);
3217 }
3218 
3219 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_request_within_thread(pa_sink * s,size_t max_request)3220 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3221     void *state = NULL;
3222 
3223     pa_sink_assert_ref(s);
3224     pa_sink_assert_io_context(s);
3225 
3226     if (max_request == s->thread_info.max_request)
3227         return;
3228 
3229     s->thread_info.max_request = max_request;
3230 
3231     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3232         pa_sink_input *i;
3233 
3234         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3235             pa_sink_input_update_max_request(i, s->thread_info.max_request);
3236     }
3237 }
3238 
3239 /* Called from main thread */
pa_sink_set_max_request(pa_sink * s,size_t max_request)3240 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3241     pa_sink_assert_ref(s);
3242     pa_assert_ctl_context();
3243 
3244     if (PA_SINK_IS_LINKED(s->state))
3245         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3246     else
3247         pa_sink_set_max_request_within_thread(s, max_request);
3248 }
3249 
3250 /* Called from IO thread */
pa_sink_invalidate_requested_latency(pa_sink * s,bool dynamic)3251 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3252     pa_sink_input *i;
3253     void *state = NULL;
3254 
3255     pa_sink_assert_ref(s);
3256     pa_sink_assert_io_context(s);
3257 
3258     if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3259         s->thread_info.requested_latency_valid = false;
3260     else if (dynamic)
3261         return;
3262 
3263     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3264 
3265         if (s->update_requested_latency)
3266             s->update_requested_latency(s);
3267 
3268         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3269             if (i->update_sink_requested_latency)
3270                 i->update_sink_requested_latency(i);
3271     }
3272 }
3273 
3274 /* Called from main thread */
pa_sink_set_latency_range(pa_sink * s,pa_usec_t min_latency,pa_usec_t max_latency)3275 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3276     pa_sink_assert_ref(s);
3277     pa_assert_ctl_context();
3278 
3279     /* min_latency == 0:           no limit
3280      * min_latency anything else:  specified limit
3281      *
3282      * Similar for max_latency */
3283 
3284     if (min_latency < ABSOLUTE_MIN_LATENCY)
3285         min_latency = ABSOLUTE_MIN_LATENCY;
3286 
3287     if (max_latency <= 0 ||
3288         max_latency > ABSOLUTE_MAX_LATENCY)
3289         max_latency = ABSOLUTE_MAX_LATENCY;
3290 
3291     pa_assert(min_latency <= max_latency);
3292 
3293     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3294     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3295                max_latency == ABSOLUTE_MAX_LATENCY) ||
3296               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3297 
3298     if (PA_SINK_IS_LINKED(s->state)) {
3299         pa_usec_t r[2];
3300 
3301         r[0] = min_latency;
3302         r[1] = max_latency;
3303 
3304         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3305     } else
3306         pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3307 }
3308 
3309 /* Called from main thread */
pa_sink_get_latency_range(pa_sink * s,pa_usec_t * min_latency,pa_usec_t * max_latency)3310 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3311     pa_sink_assert_ref(s);
3312     pa_assert_ctl_context();
3313     pa_assert(min_latency);
3314     pa_assert(max_latency);
3315 
3316     if (PA_SINK_IS_LINKED(s->state)) {
3317         pa_usec_t r[2] = { 0, 0 };
3318 
3319         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3320 
3321         *min_latency = r[0];
3322         *max_latency = r[1];
3323     } else {
3324         *min_latency = s->thread_info.min_latency;
3325         *max_latency = s->thread_info.max_latency;
3326     }
3327 }
3328 
3329 /* Called from IO thread */
pa_sink_set_latency_range_within_thread(pa_sink * s,pa_usec_t min_latency,pa_usec_t max_latency)3330 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3331     pa_sink_assert_ref(s);
3332     pa_sink_assert_io_context(s);
3333 
3334     pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3335     pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3336     pa_assert(min_latency <= max_latency);
3337 
3338     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3339     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3340                max_latency == ABSOLUTE_MAX_LATENCY) ||
3341               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3342 
3343     if (s->thread_info.min_latency == min_latency &&
3344         s->thread_info.max_latency == max_latency)
3345         return;
3346 
3347     s->thread_info.min_latency = min_latency;
3348     s->thread_info.max_latency = max_latency;
3349 
3350     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3351         pa_sink_input *i;
3352         void *state = NULL;
3353 
3354         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3355             if (i->update_sink_latency_range)
3356                 i->update_sink_latency_range(i);
3357     }
3358 
3359     pa_sink_invalidate_requested_latency(s, false);
3360 
3361     pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3362 }
3363 
3364 /* Called from main thread */
pa_sink_set_fixed_latency(pa_sink * s,pa_usec_t latency)3365 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3366     pa_sink_assert_ref(s);
3367     pa_assert_ctl_context();
3368 
3369     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3370         //pa_assert(latency == 0);
3371         return;
3372     }
3373 
3374     if (latency < ABSOLUTE_MIN_LATENCY)
3375         latency = ABSOLUTE_MIN_LATENCY;
3376 
3377     if (latency > ABSOLUTE_MAX_LATENCY)
3378         latency = ABSOLUTE_MAX_LATENCY;
3379 
3380     if (PA_SINK_IS_LINKED(s->state))
3381         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3382     else
3383         s->thread_info.fixed_latency = latency;
3384 
3385     pa_source_set_fixed_latency(s->monitor_source, latency);
3386 }
3387 
3388 /* Called from main thread */
pa_sink_get_fixed_latency(pa_sink * s)3389 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3390     pa_usec_t latency;
3391 
3392     pa_sink_assert_ref(s);
3393     pa_assert_ctl_context();
3394 
3395     if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3396         return 0;
3397 
3398     if (PA_SINK_IS_LINKED(s->state))
3399         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3400     else
3401         latency = s->thread_info.fixed_latency;
3402 
3403     return latency;
3404 }
3405 
3406 /* Called from IO thread */
pa_sink_set_fixed_latency_within_thread(pa_sink * s,pa_usec_t latency)3407 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3408     pa_sink_assert_ref(s);
3409     pa_sink_assert_io_context(s);
3410 
3411     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3412         pa_assert(latency == 0);
3413         s->thread_info.fixed_latency = 0;
3414 
3415         if (s->monitor_source)
3416             pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3417 
3418         return;
3419     }
3420 
3421     pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3422     pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3423 
3424     if (s->thread_info.fixed_latency == latency)
3425         return;
3426 
3427     s->thread_info.fixed_latency = latency;
3428 
3429     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3430         pa_sink_input *i;
3431         void *state = NULL;
3432 
3433         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3434             if (i->update_sink_fixed_latency)
3435                 i->update_sink_fixed_latency(i);
3436     }
3437 
3438     pa_sink_invalidate_requested_latency(s, false);
3439 
3440     pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3441 }
3442 
3443 /* Called from main context */
pa_sink_set_port_latency_offset(pa_sink * s,int64_t offset)3444 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3445     pa_sink_assert_ref(s);
3446 
3447     s->port_latency_offset = offset;
3448 
3449     if (PA_SINK_IS_LINKED(s->state))
3450         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3451     else
3452         s->thread_info.port_latency_offset = offset;
3453 
3454     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3455 }
3456 
3457 /* Called from main context */
pa_sink_get_max_rewind(pa_sink * s)3458 size_t pa_sink_get_max_rewind(pa_sink *s) {
3459     size_t r;
3460     pa_assert_ctl_context();
3461     pa_sink_assert_ref(s);
3462 
3463     if (!PA_SINK_IS_LINKED(s->state))
3464         return s->thread_info.max_rewind;
3465 
3466     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3467 
3468     return r;
3469 }
3470 
3471 /* Called from main context */
pa_sink_get_max_request(pa_sink * s)3472 size_t pa_sink_get_max_request(pa_sink *s) {
3473     size_t r;
3474     pa_sink_assert_ref(s);
3475     pa_assert_ctl_context();
3476 
3477     if (!PA_SINK_IS_LINKED(s->state))
3478         return s->thread_info.max_request;
3479 
3480     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3481 
3482     return r;
3483 }
3484 
3485 /* Called from main context */
pa_sink_set_port(pa_sink * s,const char * name,bool save)3486 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3487     pa_device_port *port;
3488 
3489     pa_sink_assert_ref(s);
3490     pa_assert_ctl_context();
3491 
3492     if (!s->set_port) {
3493         pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3494         return -PA_ERR_NOTIMPLEMENTED;
3495     }
3496 
3497     if (!name)
3498         return -PA_ERR_NOENTITY;
3499 
3500     if (!(port = pa_hashmap_get(s->ports, name)))
3501         return -PA_ERR_NOENTITY;
3502 
3503     if (s->active_port == port) {
3504         s->save_port = s->save_port || save;
3505         return 0;
3506     }
3507 
3508     s->port_changing = true;
3509 
3510     if (s->set_port(s, port) < 0)
3511         return -PA_ERR_NOENTITY;
3512 
3513     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3514 
3515     pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3516 
3517     s->active_port = port;
3518     s->save_port = save;
3519 
3520     pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3521 
3522     /* The active port affects the default sink selection. */
3523     pa_core_update_default_sink(s->core);
3524 
3525     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3526 
3527     s->port_changing = false;
3528 
3529     return 0;
3530 }
3531 
pa_device_init_icon(pa_proplist * p,bool is_sink)3532 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3533     const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3534 
3535     pa_assert(p);
3536 
3537     if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3538         return true;
3539 
3540     if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3541 
3542         if (pa_streq(ff, "microphone"))
3543             t = "audio-input-microphone";
3544         else if (pa_streq(ff, "webcam"))
3545             t = "camera-web";
3546         else if (pa_streq(ff, "computer"))
3547             t = "computer";
3548         else if (pa_streq(ff, "handset"))
3549             t = "phone";
3550         else if (pa_streq(ff, "portable"))
3551             t = "multimedia-player";
3552         else if (pa_streq(ff, "tv"))
3553             t = "video-display";
3554 
3555         /*
3556          * The following icons are not part of the icon naming spec,
3557          * because Rodney Dawes sucks as the maintainer of that spec.
3558          *
3559          * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3560          */
3561         else if (pa_streq(ff, "headset"))
3562             t = "audio-headset";
3563         else if (pa_streq(ff, "headphone"))
3564             t = "audio-headphones";
3565         else if (pa_streq(ff, "speaker"))
3566             t = "audio-speakers";
3567         else if (pa_streq(ff, "hands-free"))
3568             t = "audio-handsfree";
3569     }
3570 
3571     if (!t)
3572         if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3573             if (pa_streq(c, "modem"))
3574                 t = "modem";
3575 
3576     if (!t) {
3577         if (is_sink)
3578             t = "audio-card";
3579         else
3580             t = "audio-input-microphone";
3581     }
3582 
3583     if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3584         if (strstr(profile, "analog"))
3585             s = "-analog";
3586         else if (strstr(profile, "iec958"))
3587             s = "-iec958";
3588         else if (strstr(profile, "hdmi"))
3589             s = "-hdmi";
3590     }
3591 
3592     bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3593 
3594     pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3595 
3596     return true;
3597 }
3598 
pa_device_init_description(pa_proplist * p,pa_card * card)3599 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3600     const char *s, *d = NULL, *k;
3601     pa_assert(p);
3602 
3603     if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3604         return true;
3605 
3606     if (card)
3607         if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3608             d = s;
3609 
3610     if (!d)
3611         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3612             if (pa_streq(s, "internal"))
3613                 d = _("Built-in Audio");
3614 
3615     if (!d)
3616         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3617             if (pa_streq(s, "modem"))
3618                 d = _("Modem");
3619 
3620     if (!d)
3621         d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3622 
3623     if (!d)
3624         return false;
3625 
3626     k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3627 
3628     if (d && k)
3629         pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3630     else if (d)
3631         pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3632 
3633     return true;
3634 }
3635 
pa_device_init_intended_roles(pa_proplist * p)3636 bool pa_device_init_intended_roles(pa_proplist *p) {
3637     const char *s;
3638     pa_assert(p);
3639 
3640     if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3641         return true;
3642 
3643     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3644         if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3645             || pa_streq(s, "headset")) {
3646             pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3647             return true;
3648         }
3649 
3650     return false;
3651 }
3652 
pa_device_init_priority(pa_proplist * p)3653 unsigned pa_device_init_priority(pa_proplist *p) {
3654     const char *s;
3655     unsigned priority = 0;
3656 
3657     pa_assert(p);
3658 
3659     /* JACK sinks and sources get very high priority so that we'll switch the
3660      * default devices automatically when jackd starts and
3661      * module-jackdbus-detect creates the jack sink and source. */
3662     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_API))) {
3663         if (pa_streq(s, "jack"))
3664             priority += 10000;
3665     }
3666 
3667     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3668 
3669         if (pa_streq(s, "sound"))
3670             priority += 9000;
3671         else if (!pa_streq(s, "modem"))
3672             priority += 1000;
3673     }
3674 
3675     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3676 
3677         if (pa_streq(s, "headphone"))
3678             priority += 900;
3679         else if (pa_streq(s, "hifi"))
3680             priority += 600;
3681         else if (pa_streq(s, "speaker"))
3682             priority += 500;
3683         else if (pa_streq(s, "portable"))
3684             priority += 450;
3685     }
3686 
3687     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3688 
3689         if (pa_streq(s, "bluetooth"))
3690             priority += 50;
3691         else if (pa_streq(s, "usb"))
3692             priority += 40;
3693         else if (pa_streq(s, "pci"))
3694             priority += 30;
3695     }
3696 
3697     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3698 
3699         if (pa_startswith(s, "analog-")) {
3700             priority += 9;
3701 
3702             /* If an analog device has an intended role of "phone", it probably
3703              * co-exists with another device that is meant for everything else,
3704              * and that other device should have higher priority than the phone
3705              * device. */
3706             if (pa_str_in_list_spaces(pa_proplist_gets(p, PA_PROP_DEVICE_INTENDED_ROLES), "phone"))
3707                 priority -= 1;
3708         }
3709         else if (pa_startswith(s, "iec958-"))
3710             priority += 7;
3711     }
3712 
3713     return priority;
3714 }
3715 
3716 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3717 
3718 /* Called from the IO thread. */
pa_sink_volume_change_new(pa_sink * s)3719 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3720     pa_sink_volume_change *c;
3721     if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3722         c = pa_xnew(pa_sink_volume_change, 1);
3723 
3724     PA_LLIST_INIT(pa_sink_volume_change, c);
3725     c->at = 0;
3726     pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3727     return c;
3728 }
3729 
3730 /* Called from the IO thread. */
pa_sink_volume_change_free(pa_sink_volume_change * c)3731 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3732     pa_assert(c);
3733     if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3734         pa_xfree(c);
3735 }
3736 
3737 /* Called from the IO thread. */
pa_sink_volume_change_push(pa_sink * s)3738 void pa_sink_volume_change_push(pa_sink *s) {
3739     pa_sink_volume_change *c = NULL;
3740     pa_sink_volume_change *nc = NULL;
3741     pa_sink_volume_change *pc = NULL;
3742     uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3743 
3744     const char *direction = NULL;
3745 
3746     pa_assert(s);
3747     nc = pa_sink_volume_change_new(s);
3748 
3749     /* NOTE: There is already more different volumes in pa_sink that I can remember.
3750      *       Adding one more volume for HW would get us rid of this, but I am trying
3751      *       to survive with the ones we already have. */
3752     pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3753 
3754     if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3755         pa_log_debug("Volume not changing");
3756         pa_sink_volume_change_free(nc);
3757         return;
3758     }
3759 
3760     nc->at = pa_sink_get_latency_within_thread(s, false);
3761     nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3762 
3763     if (s->thread_info.volume_changes_tail) {
3764         for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3765             /* If volume is going up let's do it a bit late. If it is going
3766              * down let's do it a bit early. */
3767             if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3768                 if (nc->at + safety_margin > c->at) {
3769                     nc->at += safety_margin;
3770                     direction = "up";
3771                     break;
3772                 }
3773             }
3774             else if (nc->at - safety_margin > c->at) {
3775                     nc->at -= safety_margin;
3776                     direction = "down";
3777                     break;
3778             }
3779         }
3780     }
3781 
3782     if (c == NULL) {
3783         if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3784             nc->at += safety_margin;
3785             direction = "up";
3786         } else {
3787             nc->at -= safety_margin;
3788             direction = "down";
3789         }
3790         PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3791     }
3792     else {
3793         PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3794     }
3795 
3796     pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3797 
3798     /* We can ignore volume events that came earlier but should happen later than this. */
3799     PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3800         pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3801         pa_sink_volume_change_free(c);
3802     }
3803     nc->next = NULL;
3804     s->thread_info.volume_changes_tail = nc;
3805 }
3806 
3807 /* Called from the IO thread. */
pa_sink_volume_change_flush(pa_sink * s)3808 static void pa_sink_volume_change_flush(pa_sink *s) {
3809     pa_sink_volume_change *c = s->thread_info.volume_changes;
3810     pa_assert(s);
3811     s->thread_info.volume_changes = NULL;
3812     s->thread_info.volume_changes_tail = NULL;
3813     while (c) {
3814         pa_sink_volume_change *next = c->next;
3815         pa_sink_volume_change_free(c);
3816         c = next;
3817     }
3818 }
3819 
3820 /* Called from the IO thread. */
pa_sink_volume_change_apply(pa_sink * s,pa_usec_t * usec_to_next)3821 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3822     pa_usec_t now;
3823     bool ret = false;
3824 
3825     pa_assert(s);
3826 
3827     if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3828         if (usec_to_next)
3829             *usec_to_next = 0;
3830         return ret;
3831     }
3832 
3833     pa_assert(s->write_volume);
3834 
3835     now = pa_rtclock_now();
3836 
3837     while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3838         pa_sink_volume_change *c = s->thread_info.volume_changes;
3839         PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3840         pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3841                      pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3842         ret = true;
3843         s->thread_info.current_hw_volume = c->hw_volume;
3844         pa_sink_volume_change_free(c);
3845     }
3846 
3847     if (ret)
3848         s->write_volume(s);
3849 
3850     if (s->thread_info.volume_changes) {
3851         if (usec_to_next)
3852             *usec_to_next = s->thread_info.volume_changes->at - now;
3853         if (pa_log_ratelimit(PA_LOG_DEBUG))
3854             pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3855     }
3856     else {
3857         if (usec_to_next)
3858             *usec_to_next = 0;
3859         s->thread_info.volume_changes_tail = NULL;
3860     }
3861     return ret;
3862 }
3863 
3864 /* Called from the IO thread. */
pa_sink_volume_change_rewind(pa_sink * s,size_t nbytes)3865 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3866     /* All the queued volume events later than current latency are shifted to happen earlier. */
3867     pa_sink_volume_change *c;
3868     pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3869     pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3870     pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3871 
3872     pa_log_debug("latency = %lld", (long long) limit);
3873     limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3874 
3875     PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3876         pa_usec_t modified_limit = limit;
3877         if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3878             modified_limit -= s->thread_info.volume_change_safety_margin;
3879         else
3880             modified_limit += s->thread_info.volume_change_safety_margin;
3881         if (c->at > modified_limit) {
3882             c->at -= rewound;
3883             if (c->at < modified_limit)
3884                 c->at = modified_limit;
3885         }
3886         prev_vol = pa_cvolume_avg(&c->hw_volume);
3887     }
3888     pa_sink_volume_change_apply(s, NULL);
3889 }
3890 
3891 /* Called from the main thread */
3892 /* Gets the list of formats supported by the sink. The members and idxset must
3893  * be freed by the caller. */
pa_sink_get_formats(pa_sink * s)3894 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3895     pa_idxset *ret;
3896 
3897     pa_assert(s);
3898 
3899     if (s->get_formats) {
3900         /* Sink supports format query, all is good */
3901         ret = s->get_formats(s);
3902     } else {
3903         /* Sink doesn't support format query, so assume it does PCM */
3904         pa_format_info *f = pa_format_info_new();
3905         f->encoding = PA_ENCODING_PCM;
3906 
3907         ret = pa_idxset_new(NULL, NULL);
3908         pa_idxset_put(ret, f, NULL);
3909     }
3910 
3911     return ret;
3912 }
3913 
3914 /* Called from the main thread */
3915 /* Allows an external source to set what formats a sink supports if the sink
3916  * permits this. The function makes a copy of the formats on success. */
pa_sink_set_formats(pa_sink * s,pa_idxset * formats)3917 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3918     pa_assert(s);
3919     pa_assert(formats);
3920 
3921     if (s->set_formats)
3922         /* Sink supports setting formats -- let's give it a shot */
3923         return s->set_formats(s, formats);
3924     else
3925         /* Sink doesn't support setting this -- bail out */
3926         return false;
3927 }
3928 
3929 /* Called from the main thread */
3930 /* Checks if the sink can accept this format */
pa_sink_check_format(pa_sink * s,pa_format_info * f)3931 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3932     pa_idxset *formats = NULL;
3933     bool ret = false;
3934 
3935     pa_assert(s);
3936     pa_assert(f);
3937 
3938     formats = pa_sink_get_formats(s);
3939 
3940     if (formats) {
3941         pa_format_info *finfo_device;
3942         uint32_t i;
3943 
3944         PA_IDXSET_FOREACH(finfo_device, formats, i) {
3945             if (pa_format_info_is_compatible(finfo_device, f)) {
3946                 ret = true;
3947                 break;
3948             }
3949         }
3950 
3951         pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3952     }
3953 
3954     return ret;
3955 }
3956 
3957 /* Called from the main thread */
3958 /* Calculates the intersection between formats supported by the sink and
3959  * in_formats, and returns these, in the order of the sink's formats. */
pa_sink_check_formats(pa_sink * s,pa_idxset * in_formats)3960 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3961     pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3962     pa_format_info *f_sink, *f_in;
3963     uint32_t i, j;
3964 
3965     pa_assert(s);
3966 
3967     if (!in_formats || pa_idxset_isempty(in_formats))
3968         goto done;
3969 
3970     sink_formats = pa_sink_get_formats(s);
3971 
3972     PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3973         PA_IDXSET_FOREACH(f_in, in_formats, j) {
3974             if (pa_format_info_is_compatible(f_sink, f_in))
3975                 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3976         }
3977     }
3978 
3979 done:
3980     if (sink_formats)
3981         pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3982 
3983     return out_formats;
3984 }
3985 
3986 /* Called from the main thread */
pa_sink_set_sample_format(pa_sink * s,pa_sample_format_t format)3987 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
3988     pa_sample_format_t old_format;
3989 
3990     pa_assert(s);
3991     pa_assert(pa_sample_format_valid(format));
3992 
3993     old_format = s->sample_spec.format;
3994     if (old_format == format)
3995         return;
3996 
3997     pa_log_info("%s: format: %s -> %s",
3998                 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
3999 
4000     s->sample_spec.format = format;
4001 
4002     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4003 }
4004 
4005 /* Called from the main thread */
pa_sink_set_sample_rate(pa_sink * s,uint32_t rate)4006 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
4007     uint32_t old_rate;
4008 
4009     pa_assert(s);
4010     pa_assert(pa_sample_rate_valid(rate));
4011 
4012     old_rate = s->sample_spec.rate;
4013     if (old_rate == rate)
4014         return;
4015 
4016     pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
4017 
4018     s->sample_spec.rate = rate;
4019 
4020     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4021 }
4022 
4023 /* Called from the main thread. */
pa_sink_set_reference_volume_direct(pa_sink * s,const pa_cvolume * volume)4024 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4025     pa_cvolume old_volume;
4026     char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4027     char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4028 
4029     pa_assert(s);
4030     pa_assert(volume);
4031 
4032     old_volume = s->reference_volume;
4033 
4034     if (pa_cvolume_equal(volume, &old_volume))
4035         return;
4036 
4037     s->reference_volume = *volume;
4038     pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4039                  pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4040                                             s->flags & PA_SINK_DECIBEL_VOLUME),
4041                  pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4042                                             s->flags & PA_SINK_DECIBEL_VOLUME));
4043 
4044     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4045     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
4046 }
4047 
pa_sink_move_streams_to_default_sink(pa_core * core,pa_sink * old_sink,bool default_sink_changed)4048 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
4049     pa_sink_input *i;
4050     uint32_t idx;
4051 
4052     pa_assert(core);
4053     pa_assert(old_sink);
4054 
4055     if (core->state == PA_CORE_SHUTDOWN)
4056         return;
4057 
4058     if (core->default_sink == NULL || core->default_sink->unlink_requested)
4059         return;
4060 
4061     if (old_sink == core->default_sink)
4062         return;
4063 
4064     PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
4065         if (!PA_SINK_INPUT_IS_LINKED(i->state))
4066             continue;
4067 
4068         if (!i->sink)
4069             continue;
4070 
4071         /* Don't move sink-inputs which connect filter sinks to their target sinks */
4072         if (i->origin_sink)
4073             continue;
4074 
4075         /* If default_sink_changed is false, the old sink became unavailable, so all streams must be moved. */
4076         if (pa_safe_streq(old_sink->name, i->preferred_sink) && default_sink_changed)
4077             continue;
4078 
4079         if (!pa_sink_input_may_move_to(i, core->default_sink))
4080             continue;
4081 
4082         if (default_sink_changed)
4083             pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
4084                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4085         else
4086             pa_log_info("The sink input %u \"%s\" is moving to %s, because the old sink became unavailable.",
4087                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4088 
4089         pa_sink_input_move_to(i, core->default_sink, false);
4090     }
4091 }
4092