1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <stdlib.h>
27
28 #include <pulse/format.h>
29 #include <pulse/utf8.h>
30 #include <pulse/xmalloc.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/rtclock.h>
34 #include <pulse/internal.h>
35
36 #include <pulsecore/core-util.h>
37 #include <pulsecore/source-output.h>
38 #include <pulsecore/namereg.h>
39 #include <pulsecore/core-subscribe.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/mix.h>
42 #include <pulsecore/flist.h>
43
44 #include "source.h"
45
46 #define ABSOLUTE_MIN_LATENCY (500)
47 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
48 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
49
50 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
51
52 struct pa_source_volume_change {
53 pa_usec_t at;
54 pa_cvolume hw_volume;
55
56 PA_LLIST_FIELDS(pa_source_volume_change);
57 };
58
59 struct set_state_data {
60 pa_source_state_t state;
61 pa_suspend_cause_t suspend_cause;
62 };
63
64 static void source_free(pa_object *o);
65
66 static void pa_source_volume_change_push(pa_source *s);
67 static void pa_source_volume_change_flush(pa_source *s);
68
pa_source_new_data_init(pa_source_new_data * data)69 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
70 pa_assert(data);
71
72 pa_zero(*data);
73 data->proplist = pa_proplist_new();
74 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
75
76 return data;
77 }
78
pa_source_new_data_set_name(pa_source_new_data * data,const char * name)79 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
80 pa_assert(data);
81
82 pa_xfree(data->name);
83 data->name = pa_xstrdup(name);
84 }
85
pa_source_new_data_set_sample_spec(pa_source_new_data * data,const pa_sample_spec * spec)86 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
87 pa_assert(data);
88
89 if ((data->sample_spec_is_set = !!spec))
90 data->sample_spec = *spec;
91 }
92
pa_source_new_data_set_channel_map(pa_source_new_data * data,const pa_channel_map * map)93 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
94 pa_assert(data);
95
96 if ((data->channel_map_is_set = !!map))
97 data->channel_map = *map;
98 }
99
pa_source_new_data_set_alternate_sample_rate(pa_source_new_data * data,const uint32_t alternate_sample_rate)100 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
101 pa_assert(data);
102
103 data->alternate_sample_rate_is_set = true;
104 data->alternate_sample_rate = alternate_sample_rate;
105 }
106
pa_source_new_data_set_avoid_resampling(pa_source_new_data * data,bool avoid_resampling)107 void pa_source_new_data_set_avoid_resampling(pa_source_new_data *data, bool avoid_resampling) {
108 pa_assert(data);
109
110 data->avoid_resampling_is_set = true;
111 data->avoid_resampling = avoid_resampling;
112 }
113
pa_source_new_data_set_volume(pa_source_new_data * data,const pa_cvolume * volume)114 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
115 pa_assert(data);
116
117 if ((data->volume_is_set = !!volume))
118 data->volume = *volume;
119 }
120
pa_source_new_data_set_muted(pa_source_new_data * data,bool mute)121 void pa_source_new_data_set_muted(pa_source_new_data *data, bool mute) {
122 pa_assert(data);
123
124 data->muted_is_set = true;
125 data->muted = mute;
126 }
127
pa_source_new_data_set_port(pa_source_new_data * data,const char * port)128 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
129 pa_assert(data);
130
131 pa_xfree(data->active_port);
132 data->active_port = pa_xstrdup(port);
133 }
134
pa_source_new_data_done(pa_source_new_data * data)135 void pa_source_new_data_done(pa_source_new_data *data) {
136 pa_assert(data);
137
138 pa_proplist_free(data->proplist);
139
140 if (data->ports)
141 pa_hashmap_free(data->ports);
142
143 pa_xfree(data->name);
144 pa_xfree(data->active_port);
145 }
146
147 /* Called from main context */
reset_callbacks(pa_source * s)148 static void reset_callbacks(pa_source *s) {
149 pa_assert(s);
150
151 s->set_state_in_main_thread = NULL;
152 s->set_state_in_io_thread = NULL;
153 s->get_volume = NULL;
154 s->set_volume = NULL;
155 s->write_volume = NULL;
156 s->get_mute = NULL;
157 s->set_mute = NULL;
158 s->update_requested_latency = NULL;
159 s->set_port = NULL;
160 s->get_formats = NULL;
161 s->reconfigure = NULL;
162 }
163
164 /* Called from main context */
pa_source_new(pa_core * core,pa_source_new_data * data,pa_source_flags_t flags)165 pa_source* pa_source_new(
166 pa_core *core,
167 pa_source_new_data *data,
168 pa_source_flags_t flags) {
169
170 pa_source *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 char *pt;
174
175 pa_assert(core);
176 pa_assert(data);
177 pa_assert(data->name);
178 pa_assert_ctl_context();
179
180 s = pa_msgobject_new(pa_source);
181
182 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
183 pa_log_debug("Failed to register name %s.", data->name);
184 pa_xfree(s);
185 return NULL;
186 }
187
188 pa_source_new_data_set_name(data, name);
189
190 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
191 pa_xfree(s);
192 pa_namereg_unregister(core, name);
193 return NULL;
194 }
195
196 /* FIXME, need to free s here on failure */
197
198 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
199 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
200
201 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
202
203 if (!data->channel_map_is_set)
204 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
205
206 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
207 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
208
209 /* FIXME: There should probably be a general function for checking whether
210 * the source volume is allowed to be set, like there is for source outputs. */
211 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
212
213 if (!data->volume_is_set) {
214 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
215 data->save_volume = false;
216 }
217
218 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
219 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
220
221 if (!data->muted_is_set)
222 data->muted = false;
223
224 if (data->card)
225 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
226
227 pa_device_init_description(data->proplist, data->card);
228 pa_device_init_icon(data->proplist, false);
229 pa_device_init_intended_roles(data->proplist);
230
231 if (!data->active_port) {
232 pa_device_port *p = pa_device_port_find_best(data->ports);
233 if (p)
234 pa_source_new_data_set_port(data, p->name);
235 }
236
237 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
238 pa_xfree(s);
239 pa_namereg_unregister(core, name);
240 return NULL;
241 }
242
243 s->parent.parent.free = source_free;
244 s->parent.process_msg = pa_source_process_msg;
245
246 s->core = core;
247 s->state = PA_SOURCE_INIT;
248 s->flags = flags;
249 s->priority = 0;
250 s->suspend_cause = data->suspend_cause;
251 s->name = pa_xstrdup(name);
252 s->proplist = pa_proplist_copy(data->proplist);
253 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
254 s->module = data->module;
255 s->card = data->card;
256
257 s->priority = pa_device_init_priority(s->proplist);
258
259 s->sample_spec = data->sample_spec;
260 s->channel_map = data->channel_map;
261 s->default_sample_rate = s->sample_spec.rate;
262
263 if (data->alternate_sample_rate_is_set)
264 s->alternate_sample_rate = data->alternate_sample_rate;
265 else
266 s->alternate_sample_rate = s->core->alternate_sample_rate;
267
268 if (data->avoid_resampling_is_set)
269 s->avoid_resampling = data->avoid_resampling;
270 else
271 s->avoid_resampling = s->core->avoid_resampling;
272
273 s->outputs = pa_idxset_new(NULL, NULL);
274 s->n_corked = 0;
275 s->monitor_of = NULL;
276 s->output_from_master = NULL;
277
278 s->reference_volume = s->real_volume = data->volume;
279 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
280 s->base_volume = PA_VOLUME_NORM;
281 s->n_volume_steps = PA_VOLUME_NORM+1;
282 s->muted = data->muted;
283 s->refresh_volume = s->refresh_muted = false;
284
285 reset_callbacks(s);
286 s->userdata = NULL;
287
288 s->asyncmsgq = NULL;
289
290 /* As a minor optimization we just steal the list instead of
291 * copying it here */
292 s->ports = data->ports;
293 data->ports = NULL;
294
295 s->active_port = NULL;
296 s->save_port = false;
297
298 if (data->active_port)
299 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
300 s->save_port = data->save_port;
301
302 /* Hopefully the active port has already been assigned in the previous call
303 to pa_device_port_find_best, but better safe than sorry */
304 if (!s->active_port)
305 s->active_port = pa_device_port_find_best(s->ports);
306
307 if (s->active_port)
308 s->port_latency_offset = s->active_port->latency_offset;
309 else
310 s->port_latency_offset = 0;
311
312 s->save_volume = data->save_volume;
313 s->save_muted = data->save_muted;
314
315 pa_silence_memchunk_get(
316 &core->silence_cache,
317 core->mempool,
318 &s->silence,
319 &s->sample_spec,
320 0);
321
322 s->thread_info.rtpoll = NULL;
323 s->thread_info.outputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
324 (pa_free_cb_t) pa_source_output_unref);
325 s->thread_info.soft_volume = s->soft_volume;
326 s->thread_info.soft_muted = s->muted;
327 s->thread_info.state = s->state;
328 s->thread_info.max_rewind = 0;
329 s->thread_info.requested_latency_valid = false;
330 s->thread_info.requested_latency = 0;
331 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
332 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
333 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
334
335 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
336 s->thread_info.volume_changes_tail = NULL;
337 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
338 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
339 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
340 s->thread_info.port_latency_offset = s->port_latency_offset;
341
342 /* FIXME: This should probably be moved to pa_source_put() */
343 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
344
345 if (s->card)
346 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
347
348 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
349 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
350 s->index,
351 s->name,
352 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
353 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
354 pt);
355 pa_xfree(pt);
356
357 return s;
358 }
359
360 /* Called from main context */
source_set_state(pa_source * s,pa_source_state_t state,pa_suspend_cause_t suspend_cause)361 static int source_set_state(pa_source *s, pa_source_state_t state, pa_suspend_cause_t suspend_cause) {
362 int ret = 0;
363 bool state_changed;
364 bool suspend_cause_changed;
365 bool suspending;
366 bool resuming;
367 pa_source_state_t old_state;
368 pa_suspend_cause_t old_suspend_cause;
369
370 pa_assert(s);
371 pa_assert_ctl_context();
372
373 state_changed = state != s->state;
374 suspend_cause_changed = suspend_cause != s->suspend_cause;
375
376 if (!state_changed && !suspend_cause_changed)
377 return 0;
378
379 suspending = PA_SOURCE_IS_OPENED(s->state) && state == PA_SOURCE_SUSPENDED;
380 resuming = s->state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state);
381
382 /* If we are resuming, suspend_cause must be 0. */
383 pa_assert(!resuming || !suspend_cause);
384
385 /* Here's something to think about: what to do with the suspend cause if
386 * resuming the source fails? The old suspend cause will be incorrect, so we
387 * can't use that. On the other hand, if we set no suspend cause (as is the
388 * case currently), then it looks strange to have a source suspended without
389 * any cause. It might be a good idea to add a new "resume failed" suspend
390 * cause, or it might just add unnecessary complexity, given that the
391 * current approach of not setting any suspend cause works well enough. */
392
393 if (s->set_state_in_main_thread) {
394 if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
395 /* set_state_in_main_thread() is allowed to fail only when resuming. */
396 pa_assert(resuming);
397
398 /* If resuming fails, we set the state to SUSPENDED and
399 * suspend_cause to 0. */
400 state = PA_SOURCE_SUSPENDED;
401 suspend_cause = 0;
402 state_changed = false;
403 suspend_cause_changed = suspend_cause != s->suspend_cause;
404 resuming = false;
405
406 /* We know the state isn't changing. If the suspend cause isn't
407 * changing either, then there's nothing more to do. */
408 if (!suspend_cause_changed)
409 return ret;
410 }
411 }
412
413 if (s->asyncmsgq) {
414 struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
415
416 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
417 /* SET_STATE is allowed to fail only when resuming. */
418 pa_assert(resuming);
419
420 if (s->set_state_in_main_thread)
421 s->set_state_in_main_thread(s, PA_SOURCE_SUSPENDED, 0);
422
423 /* If resuming fails, we set the state to SUSPENDED and
424 * suspend_cause to 0. */
425 state = PA_SOURCE_SUSPENDED;
426 suspend_cause = 0;
427 state_changed = false;
428 suspend_cause_changed = suspend_cause != s->suspend_cause;
429 resuming = false;
430
431 /* We know the state isn't changing. If the suspend cause isn't
432 * changing either, then there's nothing more to do. */
433 if (!suspend_cause_changed)
434 return ret;
435 }
436 }
437
438 old_suspend_cause = s->suspend_cause;
439 if (suspend_cause_changed) {
440 char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
441 char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
442
443 pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
444 pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
445 s->suspend_cause = suspend_cause;
446 }
447
448 old_state = s->state;
449 if (state_changed) {
450 pa_log_debug("%s: state: %s -> %s", s->name, pa_source_state_to_string(s->state), pa_source_state_to_string(state));
451 s->state = state;
452
453 /* If we enter UNLINKED state, then we don't send change notifications.
454 * pa_source_unlink() will send unlink notifications instead. */
455 if (state != PA_SOURCE_UNLINKED) {
456 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
457 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
458 }
459 }
460
461 if (suspending || resuming || suspend_cause_changed) {
462 pa_source_output *o;
463 uint32_t idx;
464
465 /* We're suspending or resuming, tell everyone about it */
466
467 PA_IDXSET_FOREACH(o, s->outputs, idx)
468 if (s->state == PA_SOURCE_SUSPENDED &&
469 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
470 pa_source_output_kill(o);
471 else if (o->suspend)
472 o->suspend(o, old_state, old_suspend_cause);
473 }
474
475 return ret;
476 }
477
pa_source_set_get_volume_callback(pa_source * s,pa_source_cb_t cb)478 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
479 pa_assert(s);
480
481 s->get_volume = cb;
482 }
483
pa_source_set_set_volume_callback(pa_source * s,pa_source_cb_t cb)484 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
485 pa_source_flags_t flags;
486
487 pa_assert(s);
488 pa_assert(!s->write_volume || cb);
489
490 s->set_volume = cb;
491
492 /* Save the current flags so we can tell if they've changed */
493 flags = s->flags;
494
495 if (cb) {
496 /* The source implementor is responsible for setting decibel volume support */
497 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
498 } else {
499 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
500 /* See note below in pa_source_put() about volume sharing and decibel volumes */
501 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
502 }
503
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SOURCE_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 }
508
pa_source_set_write_volume_callback(pa_source * s,pa_source_cb_t cb)509 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
510 pa_source_flags_t flags;
511
512 pa_assert(s);
513 pa_assert(!cb || s->set_volume);
514
515 s->write_volume = cb;
516
517 /* Save the current flags so we can tell if they've changed */
518 flags = s->flags;
519
520 if (cb)
521 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
522 else
523 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
524
525 /* If the flags have changed after init, let any clients know via a change event */
526 if (s->state != PA_SOURCE_INIT && flags != s->flags)
527 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
528 }
529
pa_source_set_get_mute_callback(pa_source * s,pa_source_get_mute_cb_t cb)530 void pa_source_set_get_mute_callback(pa_source *s, pa_source_get_mute_cb_t cb) {
531 pa_assert(s);
532
533 s->get_mute = cb;
534 }
535
pa_source_set_set_mute_callback(pa_source * s,pa_source_cb_t cb)536 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
537 pa_source_flags_t flags;
538
539 pa_assert(s);
540
541 s->set_mute = cb;
542
543 /* Save the current flags so we can tell if they've changed */
544 flags = s->flags;
545
546 if (cb)
547 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
548 else
549 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
550
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SOURCE_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
554 }
555
enable_flat_volume(pa_source * s,bool enable)556 static void enable_flat_volume(pa_source *s, bool enable) {
557 pa_source_flags_t flags;
558
559 pa_assert(s);
560
561 /* Always follow the overall user preference here */
562 enable = enable && s->core->flat_volumes;
563
564 /* Save the current flags so we can tell if they've changed */
565 flags = s->flags;
566
567 if (enable)
568 s->flags |= PA_SOURCE_FLAT_VOLUME;
569 else
570 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
571
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SOURCE_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
575 }
576
pa_source_enable_decibel_volume(pa_source * s,bool enable)577 void pa_source_enable_decibel_volume(pa_source *s, bool enable) {
578 pa_source_flags_t flags;
579
580 pa_assert(s);
581
582 /* Save the current flags so we can tell if they've changed */
583 flags = s->flags;
584
585 if (enable) {
586 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
587 enable_flat_volume(s, true);
588 } else {
589 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
590 enable_flat_volume(s, false);
591 }
592
593 /* If the flags have changed after init, let any clients know via a change event */
594 if (s->state != PA_SOURCE_INIT && flags != s->flags)
595 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
596 }
597
598 /* Called from main context */
pa_source_put(pa_source * s)599 void pa_source_put(pa_source *s) {
600 pa_source_assert_ref(s);
601 pa_assert_ctl_context();
602
603 pa_assert(s->state == PA_SOURCE_INIT);
604 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || pa_source_is_filter(s));
605
606 /* The following fields must be initialized properly when calling _put() */
607 pa_assert(s->asyncmsgq);
608 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
609
610 /* Generally, flags should be initialized via pa_source_new(). As a
611 * special exception we allow some volume related flags to be set
612 * between _new() and _put() by the callback setter functions above.
613 *
614 * Thus we implement a couple safeguards here which ensure the above
615 * setters were used (or at least the implementor made manual changes
616 * in a compatible way).
617 *
618 * Note: All of these flags set here can change over the life time
619 * of the source. */
620 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
621 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
622 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
623
624 /* XXX: Currently decibel volume is disabled for all sources that use volume
625 * sharing. When the master source supports decibel volume, it would be good
626 * to have the flag also in the filter source, but currently we don't do that
627 * so that the flags of the filter source never change when it's moved from
628 * a master source to another. One solution for this problem would be to
629 * remove user-visible volume altogether from filter sources when volume
630 * sharing is used, but the current approach was easier to implement... */
631 /* We always support decibel volumes in software, otherwise we leave it to
632 * the source implementor to set this flag as needed.
633 *
634 * Note: This flag can also change over the life time of the source. */
635 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
636 pa_source_enable_decibel_volume(s, true);
637 s->soft_volume = s->reference_volume;
638 }
639
640 /* If the source implementor support DB volumes by itself, we should always
641 * try and enable flat volumes too */
642 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
643 enable_flat_volume(s, true);
644
645 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
646 pa_source *root_source = pa_source_get_master(s);
647
648 pa_assert(PA_LIKELY(root_source));
649
650 s->reference_volume = root_source->reference_volume;
651 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
652
653 s->real_volume = root_source->real_volume;
654 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
655 } else
656 /* We assume that if the sink implementor changed the default
657 * volume they did so in real_volume, because that is the usual
658 * place where they are supposed to place their changes. */
659 s->reference_volume = s->real_volume;
660
661 s->thread_info.soft_volume = s->soft_volume;
662 s->thread_info.soft_muted = s->muted;
663 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
664
665 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
666 || (s->base_volume == PA_VOLUME_NORM
667 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
668 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
669 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
670
671 if (s->suspend_cause)
672 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED, s->suspend_cause) == 0);
673 else
674 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE, 0) == 0);
675
676 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
677 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
678
679 /* It's good to fire the SOURCE_PUT hook before updating the default source,
680 * because module-switch-on-connect will set the new source as the default
681 * source, and if we were to call pa_core_update_default_source() before that,
682 * the default source might change twice, causing unnecessary stream moving. */
683 pa_core_update_default_source(s->core);
684
685 pa_core_move_streams_to_newly_available_preferred_source(s->core, s);
686 }
687
688 /* Called from main context */
pa_source_unlink(pa_source * s)689 void pa_source_unlink(pa_source *s) {
690 bool linked;
691 pa_source_output *o, PA_UNUSED *j = NULL;
692
693 pa_source_assert_ref(s);
694 pa_assert_ctl_context();
695
696 /* See pa_sink_unlink() for a couple of comments how this function
697 * works. */
698
699 if (s->unlink_requested)
700 return;
701
702 s->unlink_requested = true;
703
704 linked = PA_SOURCE_IS_LINKED(s->state);
705
706 if (linked)
707 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
708
709 if (s->state != PA_SOURCE_UNLINKED)
710 pa_namereg_unregister(s->core, s->name);
711 pa_idxset_remove_by_data(s->core->sources, s, NULL);
712
713 pa_core_update_default_source(s->core);
714
715 if (linked && s->core->rescue_streams)
716 pa_source_move_streams_to_default_source(s->core, s, false);
717
718 if (s->card)
719 pa_idxset_remove_by_data(s->card->sources, s, NULL);
720
721 while ((o = pa_idxset_first(s->outputs, NULL))) {
722 pa_assert(o != j);
723 pa_source_output_kill(o);
724 j = o;
725 }
726
727 if (linked)
728 /* It's important to keep the suspend cause unchanged when unlinking,
729 * because if we remove the SESSION suspend cause here, the alsa
730 * source will sync its volume with the hardware while another user is
731 * active, messing up the volume for that other user. */
732 source_set_state(s, PA_SOURCE_UNLINKED, s->suspend_cause);
733 else
734 s->state = PA_SOURCE_UNLINKED;
735
736 reset_callbacks(s);
737
738 if (linked) {
739 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
740 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
741 }
742 }
743
744 /* Called from main context */
source_free(pa_object * o)745 static void source_free(pa_object *o) {
746 pa_source *s = PA_SOURCE(o);
747
748 pa_assert(s);
749 pa_assert_ctl_context();
750 pa_assert(pa_source_refcnt(s) == 0);
751 pa_assert(!PA_SOURCE_IS_LINKED(s->state));
752
753 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
754
755 pa_source_volume_change_flush(s);
756
757 pa_idxset_free(s->outputs, NULL);
758 pa_hashmap_free(s->thread_info.outputs);
759
760 if (s->silence.memblock)
761 pa_memblock_unref(s->silence.memblock);
762
763 pa_xfree(s->name);
764 pa_xfree(s->driver);
765
766 if (s->proplist)
767 pa_proplist_free(s->proplist);
768
769 if (s->ports)
770 pa_hashmap_free(s->ports);
771
772 pa_xfree(s);
773 }
774
775 /* Called from main context, and not while the IO thread is active, please */
pa_source_set_asyncmsgq(pa_source * s,pa_asyncmsgq * q)776 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
777 pa_source_assert_ref(s);
778 pa_assert_ctl_context();
779
780 s->asyncmsgq = q;
781 }
782
783 /* Called from main context, and not while the IO thread is active, please */
pa_source_update_flags(pa_source * s,pa_source_flags_t mask,pa_source_flags_t value)784 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
785 pa_source_flags_t old_flags;
786 pa_source_output *output;
787 uint32_t idx;
788
789 pa_source_assert_ref(s);
790 pa_assert_ctl_context();
791
792 /* For now, allow only a minimal set of flags to be changed. */
793 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
794
795 old_flags = s->flags;
796 s->flags = (s->flags & ~mask) | (value & mask);
797
798 if (s->flags == old_flags)
799 return;
800
801 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
802 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
803
804 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
805 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
806 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
807
808 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
809 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
810
811 PA_IDXSET_FOREACH(output, s->outputs, idx) {
812 if (output->destination_source)
813 pa_source_update_flags(output->destination_source, mask, value);
814 }
815 }
816
817 /* Called from IO context, or before _put() from main context */
pa_source_set_rtpoll(pa_source * s,pa_rtpoll * p)818 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
819 pa_source_assert_ref(s);
820 pa_source_assert_io_context(s);
821
822 s->thread_info.rtpoll = p;
823 }
824
825 /* Called from main context */
pa_source_update_status(pa_source * s)826 int pa_source_update_status(pa_source*s) {
827 pa_source_assert_ref(s);
828 pa_assert_ctl_context();
829 pa_assert(PA_SOURCE_IS_LINKED(s->state));
830
831 if (s->state == PA_SOURCE_SUSPENDED)
832 return 0;
833
834 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE, 0);
835 }
836
837 /* Called from main context */
pa_source_suspend(pa_source * s,bool suspend,pa_suspend_cause_t cause)838 int pa_source_suspend(pa_source *s, bool suspend, pa_suspend_cause_t cause) {
839 pa_suspend_cause_t merged_cause;
840
841 pa_source_assert_ref(s);
842 pa_assert_ctl_context();
843 pa_assert(PA_SOURCE_IS_LINKED(s->state));
844 pa_assert(cause != 0);
845
846 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
847 return -PA_ERR_NOTSUPPORTED;
848
849 if (suspend)
850 merged_cause = s->suspend_cause | cause;
851 else
852 merged_cause = s->suspend_cause & ~cause;
853
854 if (merged_cause)
855 return source_set_state(s, PA_SOURCE_SUSPENDED, merged_cause);
856 else
857 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE, 0);
858 }
859
860 /* Called from main context */
pa_source_sync_suspend(pa_source * s)861 int pa_source_sync_suspend(pa_source *s) {
862 pa_sink_state_t state;
863 pa_suspend_cause_t suspend_cause;
864
865 pa_source_assert_ref(s);
866 pa_assert_ctl_context();
867 pa_assert(PA_SOURCE_IS_LINKED(s->state));
868 pa_assert(s->monitor_of);
869
870 state = s->monitor_of->state;
871 suspend_cause = s->monitor_of->suspend_cause;
872
873 /* The monitor source usually has the same state and suspend cause as the
874 * sink, the only exception is when the monitor source is suspended due to
875 * the sink being in the passthrough mode. If the monitor currently has the
876 * PASSTHROUGH suspend cause, then we have to keep the monitor suspended
877 * even if the sink is running. */
878 if (s->suspend_cause & PA_SUSPEND_PASSTHROUGH)
879 suspend_cause |= PA_SUSPEND_PASSTHROUGH;
880
881 if (state == PA_SINK_SUSPENDED || suspend_cause)
882 return source_set_state(s, PA_SOURCE_SUSPENDED, suspend_cause);
883
884 pa_assert(PA_SINK_IS_OPENED(state));
885
886 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE, 0);
887 }
888
889 /* Called from main context */
pa_source_move_all_start(pa_source * s,pa_queue * q)890 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
891 pa_source_output *o, *n;
892 uint32_t idx;
893
894 pa_source_assert_ref(s);
895 pa_assert_ctl_context();
896 pa_assert(PA_SOURCE_IS_LINKED(s->state));
897
898 if (!q)
899 q = pa_queue_new();
900
901 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
902 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
903
904 pa_source_output_ref(o);
905
906 if (pa_source_output_start_move(o) >= 0)
907 pa_queue_push(q, o);
908 else
909 pa_source_output_unref(o);
910 }
911
912 return q;
913 }
914
915 /* Called from main context */
pa_source_move_all_finish(pa_source * s,pa_queue * q,bool save)916 void pa_source_move_all_finish(pa_source *s, pa_queue *q, bool save) {
917 pa_source_output *o;
918
919 pa_source_assert_ref(s);
920 pa_assert_ctl_context();
921 pa_assert(PA_SOURCE_IS_LINKED(s->state));
922 pa_assert(q);
923
924 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
925 if (PA_SOURCE_OUTPUT_IS_LINKED(o->state)) {
926 if (pa_source_output_finish_move(o, s, save) < 0)
927 pa_source_output_fail_move(o);
928
929 }
930 pa_source_output_unref(o);
931 }
932
933 pa_queue_free(q, NULL);
934 }
935
936 /* Called from main context */
pa_source_move_all_fail(pa_queue * q)937 void pa_source_move_all_fail(pa_queue *q) {
938 pa_source_output *o;
939
940 pa_assert_ctl_context();
941 pa_assert(q);
942
943 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
944 pa_source_output_fail_move(o);
945 pa_source_output_unref(o);
946 }
947
948 pa_queue_free(q, NULL);
949 }
950
951 /* Called from IO thread context */
pa_source_process_rewind(pa_source * s,size_t nbytes)952 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
953 pa_source_output *o;
954 void *state = NULL;
955
956 pa_source_assert_ref(s);
957 pa_source_assert_io_context(s);
958 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
959
960 if (nbytes <= 0)
961 return;
962
963 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
964 return;
965
966 pa_log_debug("Processing rewind...");
967
968 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
969 pa_source_output_assert_ref(o);
970 pa_source_output_process_rewind(o, nbytes);
971 }
972 }
973
974 /* Called from IO thread context */
pa_source_post(pa_source * s,const pa_memchunk * chunk)975 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
976 pa_source_output *o;
977 void *state = NULL;
978
979 pa_source_assert_ref(s);
980 pa_source_assert_io_context(s);
981 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
982 pa_assert(chunk);
983
984 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
985 return;
986
987 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
988 pa_memchunk vchunk = *chunk;
989
990 pa_memblock_ref(vchunk.memblock);
991 pa_memchunk_make_writable(&vchunk, 0);
992
993 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
994 pa_silence_memchunk(&vchunk, &s->sample_spec);
995 else
996 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
997
998 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
999 pa_source_output_assert_ref(o);
1000
1001 if (!o->thread_info.direct_on_input)
1002 pa_source_output_push(o, &vchunk);
1003 }
1004
1005 pa_memblock_unref(vchunk.memblock);
1006 } else {
1007
1008 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
1009 pa_source_output_assert_ref(o);
1010
1011 if (!o->thread_info.direct_on_input)
1012 pa_source_output_push(o, chunk);
1013 }
1014 }
1015 }
1016
1017 /* Called from IO thread context */
pa_source_post_direct(pa_source * s,pa_source_output * o,const pa_memchunk * chunk)1018 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
1019 pa_source_assert_ref(s);
1020 pa_source_assert_io_context(s);
1021 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1022 pa_source_output_assert_ref(o);
1023 pa_assert(o->thread_info.direct_on_input);
1024 pa_assert(chunk);
1025
1026 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1027 return;
1028
1029 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
1030 pa_memchunk vchunk = *chunk;
1031
1032 pa_memblock_ref(vchunk.memblock);
1033 pa_memchunk_make_writable(&vchunk, 0);
1034
1035 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
1036 pa_silence_memchunk(&vchunk, &s->sample_spec);
1037 else
1038 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
1039
1040 pa_source_output_push(o, &vchunk);
1041
1042 pa_memblock_unref(vchunk.memblock);
1043 } else
1044 pa_source_output_push(o, chunk);
1045 }
1046
1047 /* Called from main thread */
pa_source_reconfigure(pa_source * s,pa_sample_spec * spec,bool passthrough)1048 void pa_source_reconfigure(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1049 uint32_t idx;
1050 pa_source_output *o;
1051 pa_sample_spec desired_spec;
1052 uint32_t default_rate = s->default_sample_rate;
1053 uint32_t alternate_rate = s->alternate_sample_rate;
1054 bool default_rate_is_usable = false;
1055 bool alternate_rate_is_usable = false;
1056 bool avoid_resampling = s->avoid_resampling;
1057
1058 if (pa_sample_spec_equal(spec, &s->sample_spec))
1059 return;
1060
1061 if (!s->reconfigure && !s->monitor_of)
1062 return;
1063
1064 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1065 pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1066 return;
1067 }
1068
1069 if (PA_SOURCE_IS_RUNNING(s->state)) {
1070 pa_log_info("Cannot update sample spec, SOURCE_IS_RUNNING, will keep using %s and %u Hz",
1071 pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1072 return;
1073 }
1074
1075 if (s->monitor_of) {
1076 if (PA_SINK_IS_RUNNING(s->monitor_of->state)) {
1077 pa_log_info("Cannot update sample spec, this is a monitor source and the sink is running.");
1078 return;
1079 }
1080 }
1081
1082 if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1083 return;
1084
1085 desired_spec = s->sample_spec;
1086
1087 if (passthrough) {
1088 /* We have to try to use the source output format and rate */
1089 desired_spec.format = spec->format;
1090 desired_spec.rate = spec->rate;
1091
1092 } else if (avoid_resampling) {
1093 /* We just try to set the source output's sample rate if it's not too low */
1094 if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1095 desired_spec.rate = spec->rate;
1096 desired_spec.format = spec->format;
1097
1098 } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1099 /* We can directly try to use this rate */
1100 desired_spec.rate = spec->rate;
1101
1102 }
1103
1104 if (desired_spec.rate != spec->rate) {
1105 /* See if we can pick a rate that results in less resampling effort */
1106 if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1107 default_rate_is_usable = true;
1108 if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1109 default_rate_is_usable = true;
1110 if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1111 alternate_rate_is_usable = true;
1112 if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1113 alternate_rate_is_usable = true;
1114
1115 if (alternate_rate_is_usable && !default_rate_is_usable)
1116 desired_spec.rate = alternate_rate;
1117 else
1118 desired_spec.rate = default_rate;
1119 }
1120
1121 if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_source_is_passthrough(s))
1122 return;
1123
1124 if (!passthrough && pa_source_used_by(s) > 0)
1125 return;
1126
1127 pa_log_debug("Suspending source %s due to changing format, desired format = %s rate = %u",
1128 s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1129 pa_source_suspend(s, true, PA_SUSPEND_INTERNAL);
1130
1131 if (s->reconfigure)
1132 s->reconfigure(s, &desired_spec, passthrough);
1133 else {
1134 /* This is a monitor source. */
1135
1136 /* XXX: This code is written with non-passthrough streams in mind. I
1137 * have no idea whether the behaviour with passthrough streams is
1138 * sensible. */
1139 if (!passthrough) {
1140 s->sample_spec = desired_spec;
1141 pa_sink_reconfigure(s->monitor_of, &desired_spec, false);
1142 s->sample_spec = s->monitor_of->sample_spec;
1143 } else
1144 goto unsuspend;
1145 }
1146
1147 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1148 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1149 pa_source_output_update_resampler(o);
1150 }
1151
1152 pa_log_info("Reconfigured successfully");
1153
1154 unsuspend:
1155 pa_source_suspend(s, false, PA_SUSPEND_INTERNAL);
1156 }
1157
1158 /* Called from main thread */
pa_source_get_latency(pa_source * s)1159 pa_usec_t pa_source_get_latency(pa_source *s) {
1160 int64_t usec;
1161
1162 pa_source_assert_ref(s);
1163 pa_assert_ctl_context();
1164 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1165
1166 if (s->state == PA_SOURCE_SUSPENDED)
1167 return 0;
1168
1169 if (!(s->flags & PA_SOURCE_LATENCY))
1170 return 0;
1171
1172 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1173
1174 /* The return value is unsigned, so check that the offset can be added to usec without
1175 * underflowing. */
1176 if (-s->port_latency_offset <= usec)
1177 usec += s->port_latency_offset;
1178 else
1179 usec = 0;
1180
1181 return (pa_usec_t)usec;
1182 }
1183
1184 /* Called from IO thread */
pa_source_get_latency_within_thread(pa_source * s,bool allow_negative)1185 int64_t pa_source_get_latency_within_thread(pa_source *s, bool allow_negative) {
1186 int64_t usec = 0;
1187 pa_msgobject *o;
1188
1189 pa_source_assert_ref(s);
1190 pa_source_assert_io_context(s);
1191 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1192
1193 /* The returned value is supposed to be in the time domain of the sound card! */
1194
1195 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1196 return 0;
1197
1198 if (!(s->flags & PA_SOURCE_LATENCY))
1199 return 0;
1200
1201 o = PA_MSGOBJECT(s);
1202
1203 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1204
1205 o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1206
1207 /* If allow_negative is false, the call should only return positive values, */
1208 usec += s->thread_info.port_latency_offset;
1209 if (!allow_negative && usec < 0)
1210 usec = 0;
1211
1212 return usec;
1213 }
1214
1215 /* Called from the main thread (and also from the IO thread while the main
1216 * thread is waiting).
1217 *
1218 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1219 * set. Instead, flat volume mode is detected by checking whether the root source
1220 * has the flag set. */
pa_source_flat_volume_enabled(pa_source * s)1221 bool pa_source_flat_volume_enabled(pa_source *s) {
1222 pa_source_assert_ref(s);
1223
1224 s = pa_source_get_master(s);
1225
1226 if (PA_LIKELY(s))
1227 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1228 else
1229 return false;
1230 }
1231
1232 /* Called from the main thread (and also from the IO thread while the main
1233 * thread is waiting). */
pa_source_get_master(pa_source * s)1234 pa_source *pa_source_get_master(pa_source *s) {
1235 pa_source_assert_ref(s);
1236
1237 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1238 if (PA_UNLIKELY(!s->output_from_master))
1239 return NULL;
1240
1241 s = s->output_from_master->source;
1242 }
1243
1244 return s;
1245 }
1246
1247 /* Called from main context */
pa_source_is_filter(pa_source * s)1248 bool pa_source_is_filter(pa_source *s) {
1249 pa_source_assert_ref(s);
1250
1251 return (s->output_from_master != NULL);
1252 }
1253
1254 /* Called from main context */
pa_source_is_passthrough(pa_source * s)1255 bool pa_source_is_passthrough(pa_source *s) {
1256
1257 pa_source_assert_ref(s);
1258
1259 /* NB Currently only monitor sources support passthrough mode */
1260 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1261 }
1262
1263 /* Called from main context */
pa_source_enter_passthrough(pa_source * s)1264 void pa_source_enter_passthrough(pa_source *s) {
1265 pa_cvolume volume;
1266
1267 /* set the volume to NORM */
1268 s->saved_volume = *pa_source_get_volume(s, true);
1269 s->saved_save_volume = s->save_volume;
1270
1271 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1272 pa_source_set_volume(s, &volume, true, false);
1273 }
1274
1275 /* Called from main context */
pa_source_leave_passthrough(pa_source * s)1276 void pa_source_leave_passthrough(pa_source *s) {
1277 /* Restore source volume to what it was before we entered passthrough mode */
1278 pa_source_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1279
1280 pa_cvolume_init(&s->saved_volume);
1281 s->saved_save_volume = false;
1282 }
1283
1284 /* Called from main context. */
compute_reference_ratio(pa_source_output * o)1285 static void compute_reference_ratio(pa_source_output *o) {
1286 unsigned c = 0;
1287 pa_cvolume remapped;
1288 pa_cvolume ratio;
1289
1290 pa_assert(o);
1291 pa_assert(pa_source_flat_volume_enabled(o->source));
1292
1293 /*
1294 * Calculates the reference ratio from the source's reference
1295 * volume. This basically calculates:
1296 *
1297 * o->reference_ratio = o->volume / o->source->reference_volume
1298 */
1299
1300 remapped = o->source->reference_volume;
1301 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1302
1303 ratio = o->reference_ratio;
1304
1305 for (c = 0; c < o->sample_spec.channels; c++) {
1306
1307 /* We don't update when the source volume is 0 anyway */
1308 if (remapped.values[c] <= PA_VOLUME_MUTED)
1309 continue;
1310
1311 /* Don't update the reference ratio unless necessary */
1312 if (pa_sw_volume_multiply(
1313 ratio.values[c],
1314 remapped.values[c]) == o->volume.values[c])
1315 continue;
1316
1317 ratio.values[c] = pa_sw_volume_divide(
1318 o->volume.values[c],
1319 remapped.values[c]);
1320 }
1321
1322 pa_source_output_set_reference_ratio(o, &ratio);
1323 }
1324
1325 /* Called from main context. Only called for the root source in volume sharing
1326 * cases, except for internal recursive calls. */
compute_reference_ratios(pa_source * s)1327 static void compute_reference_ratios(pa_source *s) {
1328 uint32_t idx;
1329 pa_source_output *o;
1330
1331 pa_source_assert_ref(s);
1332 pa_assert_ctl_context();
1333 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1334 pa_assert(pa_source_flat_volume_enabled(s));
1335
1336 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1337 compute_reference_ratio(o);
1338
1339 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1340 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1341 compute_reference_ratios(o->destination_source);
1342 }
1343 }
1344
1345 /* Called from main context. Only called for the root source in volume sharing
1346 * cases, except for internal recursive calls. */
compute_real_ratios(pa_source * s)1347 static void compute_real_ratios(pa_source *s) {
1348 pa_source_output *o;
1349 uint32_t idx;
1350
1351 pa_source_assert_ref(s);
1352 pa_assert_ctl_context();
1353 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1354 pa_assert(pa_source_flat_volume_enabled(s));
1355
1356 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1357 unsigned c;
1358 pa_cvolume remapped;
1359
1360 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1361 /* The origin source uses volume sharing, so this input's real ratio
1362 * is handled as a special case - the real ratio must be 0 dB, and
1363 * as a result i->soft_volume must equal i->volume_factor. */
1364 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1365 o->soft_volume = o->volume_factor;
1366
1367 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1368 compute_real_ratios(o->destination_source);
1369
1370 continue;
1371 }
1372
1373 /*
1374 * This basically calculates:
1375 *
1376 * i->real_ratio := i->volume / s->real_volume
1377 * i->soft_volume := i->real_ratio * i->volume_factor
1378 */
1379
1380 remapped = s->real_volume;
1381 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1382
1383 o->real_ratio.channels = o->sample_spec.channels;
1384 o->soft_volume.channels = o->sample_spec.channels;
1385
1386 for (c = 0; c < o->sample_spec.channels; c++) {
1387
1388 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1389 /* We leave o->real_ratio untouched */
1390 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1391 continue;
1392 }
1393
1394 /* Don't lose accuracy unless necessary */
1395 if (pa_sw_volume_multiply(
1396 o->real_ratio.values[c],
1397 remapped.values[c]) != o->volume.values[c])
1398
1399 o->real_ratio.values[c] = pa_sw_volume_divide(
1400 o->volume.values[c],
1401 remapped.values[c]);
1402
1403 o->soft_volume.values[c] = pa_sw_volume_multiply(
1404 o->real_ratio.values[c],
1405 o->volume_factor.values[c]);
1406 }
1407
1408 /* We don't copy the soft_volume to the thread_info data
1409 * here. That must be done by the caller */
1410 }
1411 }
1412
cvolume_remap_minimal_impact(pa_cvolume * v,const pa_cvolume * template,const pa_channel_map * from,const pa_channel_map * to)1413 static pa_cvolume *cvolume_remap_minimal_impact(
1414 pa_cvolume *v,
1415 const pa_cvolume *template,
1416 const pa_channel_map *from,
1417 const pa_channel_map *to) {
1418
1419 pa_cvolume t;
1420
1421 pa_assert(v);
1422 pa_assert(template);
1423 pa_assert(from);
1424 pa_assert(to);
1425 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1426 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1427
1428 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1429 * mapping from source output to source volumes:
1430 *
1431 * If template is a possible remapping from v it is used instead
1432 * of remapping anew.
1433 *
1434 * If the channel maps don't match we set an all-channel volume on
1435 * the source to ensure that changing a volume on one stream has no
1436 * effect that cannot be compensated for in another stream that
1437 * does not have the same channel map as the source. */
1438
1439 if (pa_channel_map_equal(from, to))
1440 return v;
1441
1442 t = *template;
1443 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1444 *v = *template;
1445 return v;
1446 }
1447
1448 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1449 return v;
1450 }
1451
1452 /* Called from main thread. Only called for the root source in volume sharing
1453 * cases, except for internal recursive calls. */
get_maximum_output_volume(pa_source * s,pa_cvolume * max_volume,const pa_channel_map * channel_map)1454 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1455 pa_source_output *o;
1456 uint32_t idx;
1457
1458 pa_source_assert_ref(s);
1459 pa_assert(max_volume);
1460 pa_assert(channel_map);
1461 pa_assert(pa_source_flat_volume_enabled(s));
1462
1463 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1464 pa_cvolume remapped;
1465
1466 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1467 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1468 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1469
1470 /* Ignore this output. The origin source uses volume sharing, so this
1471 * output's volume will be set to be equal to the root source's real
1472 * volume. Obviously this output's current volume must not then
1473 * affect what the root source's real volume will be. */
1474 continue;
1475 }
1476
1477 remapped = o->volume;
1478 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1479 pa_cvolume_merge(max_volume, max_volume, &remapped);
1480 }
1481 }
1482
1483 /* Called from main thread. Only called for the root source in volume sharing
1484 * cases, except for internal recursive calls. */
has_outputs(pa_source * s)1485 static bool has_outputs(pa_source *s) {
1486 pa_source_output *o;
1487 uint32_t idx;
1488
1489 pa_source_assert_ref(s);
1490
1491 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1492 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1493 return true;
1494 }
1495
1496 return false;
1497 }
1498
1499 /* Called from main thread. Only called for the root source in volume sharing
1500 * cases, except for internal recursive calls. */
update_real_volume(pa_source * s,const pa_cvolume * new_volume,pa_channel_map * channel_map)1501 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1502 pa_source_output *o;
1503 uint32_t idx;
1504
1505 pa_source_assert_ref(s);
1506 pa_assert(new_volume);
1507 pa_assert(channel_map);
1508
1509 s->real_volume = *new_volume;
1510 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1511
1512 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1513 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1514 if (pa_source_flat_volume_enabled(s)) {
1515 pa_cvolume new_output_volume;
1516
1517 /* Follow the root source's real volume. */
1518 new_output_volume = *new_volume;
1519 pa_cvolume_remap(&new_output_volume, channel_map, &o->channel_map);
1520 pa_source_output_set_volume_direct(o, &new_output_volume);
1521 compute_reference_ratio(o);
1522 }
1523
1524 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1525 update_real_volume(o->destination_source, new_volume, channel_map);
1526 }
1527 }
1528 }
1529
1530 /* Called from main thread. Only called for the root source in shared volume
1531 * cases. */
compute_real_volume(pa_source * s)1532 static void compute_real_volume(pa_source *s) {
1533 pa_source_assert_ref(s);
1534 pa_assert_ctl_context();
1535 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1536 pa_assert(pa_source_flat_volume_enabled(s));
1537 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1538
1539 /* This determines the maximum volume of all streams and sets
1540 * s->real_volume accordingly. */
1541
1542 if (!has_outputs(s)) {
1543 /* In the special case that we have no source outputs we leave the
1544 * volume unmodified. */
1545 update_real_volume(s, &s->reference_volume, &s->channel_map);
1546 return;
1547 }
1548
1549 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1550
1551 /* First let's determine the new maximum volume of all outputs
1552 * connected to this source */
1553 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1554 update_real_volume(s, &s->real_volume, &s->channel_map);
1555
1556 /* Then, let's update the real ratios/soft volumes of all outputs
1557 * connected to this source */
1558 compute_real_ratios(s);
1559 }
1560
1561 /* Called from main thread. Only called for the root source in shared volume
1562 * cases, except for internal recursive calls. */
propagate_reference_volume(pa_source * s)1563 static void propagate_reference_volume(pa_source *s) {
1564 pa_source_output *o;
1565 uint32_t idx;
1566
1567 pa_source_assert_ref(s);
1568 pa_assert_ctl_context();
1569 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1570 pa_assert(pa_source_flat_volume_enabled(s));
1571
1572 /* This is called whenever the source volume changes that is not
1573 * caused by a source output volume change. We need to fix up the
1574 * source output volumes accordingly */
1575
1576 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1577 pa_cvolume new_volume;
1578
1579 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1580 if (PA_SOURCE_IS_LINKED(o->destination_source->state))
1581 propagate_reference_volume(o->destination_source);
1582
1583 /* Since the origin source uses volume sharing, this output's volume
1584 * needs to be updated to match the root source's real volume, but
1585 * that will be done later in update_real_volume(). */
1586 continue;
1587 }
1588
1589 /* This basically calculates:
1590 *
1591 * o->volume := o->reference_volume * o->reference_ratio */
1592
1593 new_volume = s->reference_volume;
1594 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1595 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1596 pa_source_output_set_volume_direct(o, &new_volume);
1597 }
1598 }
1599
1600 /* Called from main thread. Only called for the root source in volume sharing
1601 * cases, except for internal recursive calls. The return value indicates
1602 * whether any reference volume actually changed. */
update_reference_volume(pa_source * s,const pa_cvolume * v,const pa_channel_map * channel_map,bool save)1603 static bool update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1604 pa_cvolume volume;
1605 bool reference_volume_changed;
1606 pa_source_output *o;
1607 uint32_t idx;
1608
1609 pa_source_assert_ref(s);
1610 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1611 pa_assert(v);
1612 pa_assert(channel_map);
1613 pa_assert(pa_cvolume_valid(v));
1614
1615 volume = *v;
1616 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1617
1618 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1619 pa_source_set_reference_volume_direct(s, &volume);
1620
1621 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1622
1623 if (!reference_volume_changed && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1624 /* If the root source's volume doesn't change, then there can't be any
1625 * changes in the other source in the source tree either.
1626 *
1627 * It's probably theoretically possible that even if the root source's
1628 * volume changes slightly, some filter source doesn't change its volume
1629 * due to rounding errors. If that happens, we still want to propagate
1630 * the changed root source volume to the sources connected to the
1631 * intermediate source that didn't change its volume. This theoretical
1632 * possibility is the reason why we have that !(s->flags &
1633 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1634 * notice even if we returned here false always if
1635 * reference_volume_changed is false. */
1636 return false;
1637
1638 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1639 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1640 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1641 update_reference_volume(o->destination_source, v, channel_map, false);
1642 }
1643
1644 return true;
1645 }
1646
1647 /* Called from main thread */
pa_source_set_volume(pa_source * s,const pa_cvolume * volume,bool send_msg,bool save)1648 void pa_source_set_volume(
1649 pa_source *s,
1650 const pa_cvolume *volume,
1651 bool send_msg,
1652 bool save) {
1653
1654 pa_cvolume new_reference_volume, root_real_volume;
1655 pa_source *root_source;
1656
1657 pa_source_assert_ref(s);
1658 pa_assert_ctl_context();
1659 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1660 pa_assert(!volume || pa_cvolume_valid(volume));
1661 pa_assert(volume || pa_source_flat_volume_enabled(s));
1662 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1663
1664 /* make sure we don't change the volume in PASSTHROUGH mode ...
1665 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1666 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1667 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1668 return;
1669 }
1670
1671 /* In case of volume sharing, the volume is set for the root source first,
1672 * from which it's then propagated to the sharing sources. */
1673 root_source = pa_source_get_master(s);
1674
1675 if (PA_UNLIKELY(!root_source))
1676 return;
1677
1678 /* As a special exception we accept mono volumes on all sources --
1679 * even on those with more complex channel maps */
1680
1681 if (volume) {
1682 if (pa_cvolume_compatible(volume, &s->sample_spec))
1683 new_reference_volume = *volume;
1684 else {
1685 new_reference_volume = s->reference_volume;
1686 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1687 }
1688
1689 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1690
1691 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1692 if (pa_source_flat_volume_enabled(root_source)) {
1693 /* OK, propagate this volume change back to the outputs */
1694 propagate_reference_volume(root_source);
1695
1696 /* And now recalculate the real volume */
1697 compute_real_volume(root_source);
1698 } else
1699 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1700 }
1701
1702 } else {
1703 /* If volume is NULL we synchronize the source's real and
1704 * reference volumes with the stream volumes. */
1705
1706 pa_assert(pa_source_flat_volume_enabled(root_source));
1707
1708 /* Ok, let's determine the new real volume */
1709 compute_real_volume(root_source);
1710
1711 /* To propagate the reference volume from the filter to the root source,
1712 * we first take the real volume from the root source and remap it to
1713 * match the filter. Then, we merge in the reference volume from the
1714 * filter on top of this, and remap it back to the root source channel
1715 * count and map */
1716 root_real_volume = root_source->real_volume;
1717 /* First we remap root's real volume to filter channel count and map if needed */
1718 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1719 pa_cvolume_remap(&root_real_volume, &root_source->channel_map, &s->channel_map);
1720 /* Then let's 'push' the reference volume if necessary */
1721 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_real_volume);
1722 /* If the source and its root don't have the same number of channels, we need to remap back */
1723 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1724 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1725
1726 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1727
1728 /* Now that the reference volume is updated, we can update the streams'
1729 * reference ratios. */
1730 compute_reference_ratios(root_source);
1731 }
1732
1733 if (root_source->set_volume) {
1734 /* If we have a function set_volume(), then we do not apply a
1735 * soft volume by default. However, set_volume() is free to
1736 * apply one to root_source->soft_volume */
1737
1738 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1739 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1740 root_source->set_volume(root_source);
1741
1742 } else
1743 /* If we have no function set_volume(), then the soft volume
1744 * becomes the real volume */
1745 root_source->soft_volume = root_source->real_volume;
1746
1747 /* This tells the source that soft volume and/or real volume changed */
1748 if (send_msg)
1749 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1750 }
1751
1752 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1753 * Only to be called by source implementor */
pa_source_set_soft_volume(pa_source * s,const pa_cvolume * volume)1754 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1755
1756 pa_source_assert_ref(s);
1757 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1758
1759 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1760 pa_source_assert_io_context(s);
1761 else
1762 pa_assert_ctl_context();
1763
1764 if (!volume)
1765 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1766 else
1767 s->soft_volume = *volume;
1768
1769 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1770 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1771 else
1772 s->thread_info.soft_volume = s->soft_volume;
1773 }
1774
1775 /* Called from the main thread. Only called for the root source in volume sharing
1776 * cases, except for internal recursive calls. */
propagate_real_volume(pa_source * s,const pa_cvolume * old_real_volume)1777 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1778 pa_source_output *o;
1779 uint32_t idx;
1780
1781 pa_source_assert_ref(s);
1782 pa_assert(old_real_volume);
1783 pa_assert_ctl_context();
1784 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1785
1786 /* This is called when the hardware's real volume changes due to
1787 * some external event. We copy the real volume into our
1788 * reference volume and then rebuild the stream volumes based on
1789 * i->real_ratio which should stay fixed. */
1790
1791 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1792 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1793 return;
1794
1795 /* 1. Make the real volume the reference volume */
1796 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
1797 }
1798
1799 if (pa_source_flat_volume_enabled(s)) {
1800 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1801 pa_cvolume new_volume;
1802
1803 /* 2. Since the source's reference and real volumes are equal
1804 * now our ratios should be too. */
1805 pa_source_output_set_reference_ratio(o, &o->real_ratio);
1806
1807 /* 3. Recalculate the new stream reference volume based on the
1808 * reference ratio and the sink's reference volume.
1809 *
1810 * This basically calculates:
1811 *
1812 * o->volume = s->reference_volume * o->reference_ratio
1813 *
1814 * This is identical to propagate_reference_volume() */
1815 new_volume = s->reference_volume;
1816 pa_cvolume_remap(&new_volume, &s->channel_map, &o->channel_map);
1817 pa_sw_cvolume_multiply(&new_volume, &new_volume, &o->reference_ratio);
1818 pa_source_output_set_volume_direct(o, &new_volume);
1819
1820 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1821 && PA_SOURCE_IS_LINKED(o->destination_source->state))
1822 propagate_real_volume(o->destination_source, old_real_volume);
1823 }
1824 }
1825
1826 /* Something got changed in the hardware. It probably makes sense
1827 * to save changed hw settings given that hw volume changes not
1828 * triggered by PA are almost certainly done by the user. */
1829 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1830 s->save_volume = true;
1831 }
1832
1833 /* Called from io thread */
pa_source_update_volume_and_mute(pa_source * s)1834 void pa_source_update_volume_and_mute(pa_source *s) {
1835 pa_assert(s);
1836 pa_source_assert_io_context(s);
1837
1838 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1839 }
1840
1841 /* Called from main thread */
pa_source_get_volume(pa_source * s,bool force_refresh)1842 const pa_cvolume *pa_source_get_volume(pa_source *s, bool force_refresh) {
1843 pa_source_assert_ref(s);
1844 pa_assert_ctl_context();
1845 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1846
1847 if (s->refresh_volume || force_refresh) {
1848 struct pa_cvolume old_real_volume;
1849
1850 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1851
1852 old_real_volume = s->real_volume;
1853
1854 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1855 s->get_volume(s);
1856
1857 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1858
1859 update_real_volume(s, &s->real_volume, &s->channel_map);
1860 propagate_real_volume(s, &old_real_volume);
1861 }
1862
1863 return &s->reference_volume;
1864 }
1865
1866 /* Called from main thread. In volume sharing cases, only the root source may
1867 * call this. */
pa_source_volume_changed(pa_source * s,const pa_cvolume * new_real_volume)1868 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1869 pa_cvolume old_real_volume;
1870
1871 pa_source_assert_ref(s);
1872 pa_assert_ctl_context();
1873 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1874 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1875
1876 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1877
1878 old_real_volume = s->real_volume;
1879 update_real_volume(s, new_real_volume, &s->channel_map);
1880 propagate_real_volume(s, &old_real_volume);
1881 }
1882
1883 /* Called from main thread */
pa_source_set_mute(pa_source * s,bool mute,bool save)1884 void pa_source_set_mute(pa_source *s, bool mute, bool save) {
1885 bool old_muted;
1886
1887 pa_source_assert_ref(s);
1888 pa_assert_ctl_context();
1889
1890 old_muted = s->muted;
1891
1892 if (mute == old_muted) {
1893 s->save_muted |= save;
1894 return;
1895 }
1896
1897 s->muted = mute;
1898 s->save_muted = save;
1899
1900 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute) {
1901 s->set_mute_in_progress = true;
1902 s->set_mute(s);
1903 s->set_mute_in_progress = false;
1904 }
1905
1906 if (!PA_SOURCE_IS_LINKED(s->state))
1907 return;
1908
1909 pa_log_debug("The mute of source %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
1910 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1911 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1912 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_MUTE_CHANGED], s);
1913 }
1914
1915 /* Called from main thread */
pa_source_get_mute(pa_source * s,bool force_refresh)1916 bool pa_source_get_mute(pa_source *s, bool force_refresh) {
1917
1918 pa_source_assert_ref(s);
1919 pa_assert_ctl_context();
1920 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1921
1922 if ((s->refresh_muted || force_refresh) && s->get_mute) {
1923 bool mute;
1924
1925 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
1926 if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
1927 pa_source_mute_changed(s, mute);
1928 } else {
1929 if (s->get_mute(s, &mute) >= 0)
1930 pa_source_mute_changed(s, mute);
1931 }
1932 }
1933
1934 return s->muted;
1935 }
1936
1937 /* Called from main thread */
pa_source_mute_changed(pa_source * s,bool new_muted)1938 void pa_source_mute_changed(pa_source *s, bool new_muted) {
1939 pa_source_assert_ref(s);
1940 pa_assert_ctl_context();
1941 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1942
1943 if (s->set_mute_in_progress)
1944 return;
1945
1946 /* pa_source_set_mute() does this same check, so this may appear redundant,
1947 * but we must have this here also, because the save parameter of
1948 * pa_source_set_mute() would otherwise have unintended side effects
1949 * (saving the mute state when it shouldn't be saved). */
1950 if (new_muted == s->muted)
1951 return;
1952
1953 pa_source_set_mute(s, new_muted, true);
1954 }
1955
1956 /* Called from main thread */
pa_source_update_proplist(pa_source * s,pa_update_mode_t mode,pa_proplist * p)1957 bool pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1958 pa_source_assert_ref(s);
1959 pa_assert_ctl_context();
1960
1961 if (p)
1962 pa_proplist_update(s->proplist, mode, p);
1963
1964 if (PA_SOURCE_IS_LINKED(s->state)) {
1965 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1966 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1967 }
1968
1969 return true;
1970 }
1971
1972 /* Called from main thread */
1973 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
pa_source_set_description(pa_source * s,const char * description)1974 void pa_source_set_description(pa_source *s, const char *description) {
1975 const char *old;
1976 pa_source_assert_ref(s);
1977 pa_assert_ctl_context();
1978
1979 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1980 return;
1981
1982 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1983
1984 if (old && description && pa_streq(old, description))
1985 return;
1986
1987 if (description)
1988 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1989 else
1990 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1991
1992 if (PA_SOURCE_IS_LINKED(s->state)) {
1993 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1994 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1995 }
1996 }
1997
1998 /* Called from main thread */
pa_source_linked_by(pa_source * s)1999 unsigned pa_source_linked_by(pa_source *s) {
2000 pa_source_assert_ref(s);
2001 pa_assert_ctl_context();
2002 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2003
2004 return pa_idxset_size(s->outputs);
2005 }
2006
2007 /* Called from main thread */
pa_source_used_by(pa_source * s)2008 unsigned pa_source_used_by(pa_source *s) {
2009 unsigned ret;
2010
2011 pa_source_assert_ref(s);
2012 pa_assert_ctl_context();
2013 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2014
2015 ret = pa_idxset_size(s->outputs);
2016 pa_assert(ret >= s->n_corked);
2017
2018 return ret - s->n_corked;
2019 }
2020
2021 /* Called from main thread */
pa_source_check_suspend(pa_source * s,pa_source_output * ignore)2022 unsigned pa_source_check_suspend(pa_source *s, pa_source_output *ignore) {
2023 unsigned ret;
2024 pa_source_output *o;
2025 uint32_t idx;
2026
2027 pa_source_assert_ref(s);
2028 pa_assert_ctl_context();
2029
2030 if (!PA_SOURCE_IS_LINKED(s->state))
2031 return 0;
2032
2033 ret = 0;
2034
2035 PA_IDXSET_FOREACH(o, s->outputs, idx) {
2036 if (o == ignore)
2037 continue;
2038
2039 /* We do not assert here. It is perfectly valid for a source output to
2040 * be in the INIT state (i.e. created, marked done but not yet put)
2041 * and we should not care if it's unlinked as it won't contribute
2042 * towards our busy status.
2043 */
2044 if (!PA_SOURCE_OUTPUT_IS_LINKED(o->state))
2045 continue;
2046
2047 if (o->state == PA_SOURCE_OUTPUT_CORKED)
2048 continue;
2049
2050 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
2051 continue;
2052
2053 ret ++;
2054 }
2055
2056 return ret;
2057 }
2058
pa_source_state_to_string(pa_source_state_t state)2059 const char *pa_source_state_to_string(pa_source_state_t state) {
2060 switch (state) {
2061 case PA_SOURCE_INIT: return "INIT";
2062 case PA_SOURCE_IDLE: return "IDLE";
2063 case PA_SOURCE_RUNNING: return "RUNNING";
2064 case PA_SOURCE_SUSPENDED: return "SUSPENDED";
2065 case PA_SOURCE_UNLINKED: return "UNLINKED";
2066 case PA_SOURCE_INVALID_STATE: return "INVALID_STATE";
2067 }
2068
2069 pa_assert_not_reached();
2070 }
2071
2072 /* Called from the IO thread */
sync_output_volumes_within_thread(pa_source * s)2073 static void sync_output_volumes_within_thread(pa_source *s) {
2074 pa_source_output *o;
2075 void *state = NULL;
2076
2077 pa_source_assert_ref(s);
2078 pa_source_assert_io_context(s);
2079
2080 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2081 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
2082 continue;
2083
2084 o->thread_info.soft_volume = o->soft_volume;
2085 //pa_source_output_request_rewind(o, 0, true, false, false);
2086 }
2087 }
2088
2089 /* Called from the IO thread. Only called for the root source in volume sharing
2090 * cases, except for internal recursive calls. */
set_shared_volume_within_thread(pa_source * s)2091 static void set_shared_volume_within_thread(pa_source *s) {
2092 pa_source_output *o;
2093 void *state = NULL;
2094
2095 pa_source_assert_ref(s);
2096
2097 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2098
2099 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2100 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
2101 set_shared_volume_within_thread(o->destination_source);
2102 }
2103 }
2104
2105 /* Called from IO thread, except when it is not */
pa_source_process_msg(pa_msgobject * object,int code,void * userdata,int64_t offset,pa_memchunk * chunk)2106 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2107 pa_source *s = PA_SOURCE(object);
2108 pa_source_assert_ref(s);
2109
2110 switch ((pa_source_message_t) code) {
2111
2112 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
2113 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2114
2115 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
2116
2117 if (o->direct_on_input) {
2118 o->thread_info.direct_on_input = o->direct_on_input;
2119 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
2120 }
2121
2122 pa_source_output_attach(o);
2123
2124 pa_source_output_set_state_within_thread(o, o->state);
2125
2126 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2127 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2128
2129 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2130
2131 /* We don't just invalidate the requested latency here,
2132 * because if we are in a move we might need to fix up the
2133 * requested latency. */
2134 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2135
2136 /* In flat volume mode we need to update the volume as
2137 * well */
2138 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2139 }
2140
2141 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2142 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2143
2144 pa_source_output_set_state_within_thread(o, o->state);
2145
2146 pa_source_output_detach(o);
2147
2148 if (o->thread_info.direct_on_input) {
2149 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2150 o->thread_info.direct_on_input = NULL;
2151 }
2152
2153 pa_hashmap_remove_and_free(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index));
2154 pa_source_invalidate_requested_latency(s, true);
2155
2156 /* In flat volume mode we need to update the volume as
2157 * well */
2158 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2159 }
2160
2161 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2162 pa_source *root_source = pa_source_get_master(s);
2163
2164 if (PA_LIKELY(root_source))
2165 set_shared_volume_within_thread(root_source);
2166
2167 return 0;
2168 }
2169
2170 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2171
2172 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2173 s->set_volume(s);
2174 pa_source_volume_change_push(s);
2175 }
2176 /* Fall through ... */
2177
2178 case PA_SOURCE_MESSAGE_SET_VOLUME:
2179
2180 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2181 s->thread_info.soft_volume = s->soft_volume;
2182 }
2183
2184 /* Fall through ... */
2185
2186 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2187 sync_output_volumes_within_thread(s);
2188 return 0;
2189
2190 case PA_SOURCE_MESSAGE_GET_VOLUME:
2191
2192 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2193 s->get_volume(s);
2194 pa_source_volume_change_flush(s);
2195 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2196 }
2197
2198 /* In case source implementor reset SW volume. */
2199 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2200 s->thread_info.soft_volume = s->soft_volume;
2201 }
2202
2203 return 0;
2204
2205 case PA_SOURCE_MESSAGE_SET_MUTE:
2206
2207 if (s->thread_info.soft_muted != s->muted) {
2208 s->thread_info.soft_muted = s->muted;
2209 }
2210
2211 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2212 s->set_mute(s);
2213
2214 return 0;
2215
2216 case PA_SOURCE_MESSAGE_GET_MUTE:
2217
2218 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2219 return s->get_mute(s, userdata);
2220
2221 return 0;
2222
2223 case PA_SOURCE_MESSAGE_SET_STATE: {
2224 struct set_state_data *data = userdata;
2225 bool suspend_change =
2226 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(data->state)) ||
2227 (PA_SOURCE_IS_OPENED(s->thread_info.state) && data->state == PA_SOURCE_SUSPENDED);
2228
2229 if (s->set_state_in_io_thread) {
2230 int r;
2231
2232 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2233 return r;
2234 }
2235
2236 s->thread_info.state = data->state;
2237
2238 if (suspend_change) {
2239 pa_source_output *o;
2240 void *state = NULL;
2241
2242 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2243 if (o->suspend_within_thread)
2244 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2245 }
2246
2247 return 0;
2248 }
2249
2250 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2251
2252 pa_usec_t *usec = userdata;
2253 *usec = pa_source_get_requested_latency_within_thread(s);
2254
2255 /* Yes, that's right, the IO thread will see -1 when no
2256 * explicit requested latency is configured, the main
2257 * thread will see max_latency */
2258 if (*usec == (pa_usec_t) -1)
2259 *usec = s->thread_info.max_latency;
2260
2261 return 0;
2262 }
2263
2264 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2265 pa_usec_t *r = userdata;
2266
2267 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2268
2269 return 0;
2270 }
2271
2272 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2273 pa_usec_t *r = userdata;
2274
2275 r[0] = s->thread_info.min_latency;
2276 r[1] = s->thread_info.max_latency;
2277
2278 return 0;
2279 }
2280
2281 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2282
2283 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2284 return 0;
2285
2286 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2287
2288 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2289 return 0;
2290
2291 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2292
2293 *((size_t*) userdata) = s->thread_info.max_rewind;
2294 return 0;
2295
2296 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2297
2298 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2299 return 0;
2300
2301 case PA_SOURCE_MESSAGE_GET_LATENCY:
2302
2303 if (s->monitor_of) {
2304 *((int64_t*) userdata) = -pa_sink_get_latency_within_thread(s->monitor_of, true);
2305 return 0;
2306 }
2307
2308 /* Implementors need to overwrite this implementation! */
2309 return -1;
2310
2311 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2312 /* This message is sent from IO-thread and handled in main thread. */
2313 pa_assert_ctl_context();
2314
2315 /* Make sure we're not messing with main thread when no longer linked */
2316 if (!PA_SOURCE_IS_LINKED(s->state))
2317 return 0;
2318
2319 pa_source_get_volume(s, true);
2320 pa_source_get_mute(s, true);
2321 return 0;
2322
2323 case PA_SOURCE_MESSAGE_SET_PORT_LATENCY_OFFSET:
2324 s->thread_info.port_latency_offset = offset;
2325 return 0;
2326
2327 case PA_SOURCE_MESSAGE_MAX:
2328 ;
2329 }
2330
2331 return -1;
2332 }
2333
2334 /* Called from main thread */
pa_source_suspend_all(pa_core * c,bool suspend,pa_suspend_cause_t cause)2335 int pa_source_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2336 pa_source *source;
2337 uint32_t idx;
2338 int ret = 0;
2339
2340 pa_core_assert_ref(c);
2341 pa_assert_ctl_context();
2342 pa_assert(cause != 0);
2343
2344 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2345 int r;
2346
2347 if (source->monitor_of)
2348 continue;
2349
2350 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2351 ret = r;
2352 }
2353
2354 return ret;
2355 }
2356
2357 /* Called from IO thread */
pa_source_detach_within_thread(pa_source * s)2358 void pa_source_detach_within_thread(pa_source *s) {
2359 pa_source_output *o;
2360 void *state = NULL;
2361
2362 pa_source_assert_ref(s);
2363 pa_source_assert_io_context(s);
2364 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2365
2366 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2367 pa_source_output_detach(o);
2368 }
2369
2370 /* Called from IO thread */
pa_source_attach_within_thread(pa_source * s)2371 void pa_source_attach_within_thread(pa_source *s) {
2372 pa_source_output *o;
2373 void *state = NULL;
2374
2375 pa_source_assert_ref(s);
2376 pa_source_assert_io_context(s);
2377 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2378
2379 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2380 pa_source_output_attach(o);
2381 }
2382
2383 /* Called from IO thread */
pa_source_get_requested_latency_within_thread(pa_source * s)2384 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2385 pa_usec_t result = (pa_usec_t) -1;
2386 pa_source_output *o;
2387 void *state = NULL;
2388
2389 pa_source_assert_ref(s);
2390 pa_source_assert_io_context(s);
2391
2392 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2393 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2394
2395 if (s->thread_info.requested_latency_valid)
2396 return s->thread_info.requested_latency;
2397
2398 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2399 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2400 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2401 result = o->thread_info.requested_source_latency;
2402
2403 if (result != (pa_usec_t) -1)
2404 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2405
2406 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2407 /* Only cache this if we are fully set up */
2408 s->thread_info.requested_latency = result;
2409 s->thread_info.requested_latency_valid = true;
2410 }
2411
2412 return result;
2413 }
2414
2415 /* Called from main thread */
pa_source_get_requested_latency(pa_source * s)2416 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2417 pa_usec_t usec = 0;
2418
2419 pa_source_assert_ref(s);
2420 pa_assert_ctl_context();
2421 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2422
2423 if (s->state == PA_SOURCE_SUSPENDED)
2424 return 0;
2425
2426 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2427
2428 return usec;
2429 }
2430
2431 /* Called from IO thread */
pa_source_set_max_rewind_within_thread(pa_source * s,size_t max_rewind)2432 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2433 pa_source_output *o;
2434 void *state = NULL;
2435
2436 pa_source_assert_ref(s);
2437 pa_source_assert_io_context(s);
2438
2439 if (max_rewind == s->thread_info.max_rewind)
2440 return;
2441
2442 s->thread_info.max_rewind = max_rewind;
2443
2444 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2445 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2446 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2447 }
2448
2449 /* Called from main thread */
pa_source_set_max_rewind(pa_source * s,size_t max_rewind)2450 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2451 pa_source_assert_ref(s);
2452 pa_assert_ctl_context();
2453
2454 if (PA_SOURCE_IS_LINKED(s->state))
2455 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2456 else
2457 pa_source_set_max_rewind_within_thread(s, max_rewind);
2458 }
2459
2460 /* Called from IO thread */
pa_source_invalidate_requested_latency(pa_source * s,bool dynamic)2461 void pa_source_invalidate_requested_latency(pa_source *s, bool dynamic) {
2462 pa_source_output *o;
2463 void *state = NULL;
2464
2465 pa_source_assert_ref(s);
2466 pa_source_assert_io_context(s);
2467
2468 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2469 s->thread_info.requested_latency_valid = false;
2470 else if (dynamic)
2471 return;
2472
2473 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2474
2475 if (s->update_requested_latency)
2476 s->update_requested_latency(s);
2477
2478 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2479 if (o->update_source_requested_latency)
2480 o->update_source_requested_latency(o);
2481 }
2482
2483 if (s->monitor_of)
2484 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2485 }
2486
2487 /* Called from main thread */
pa_source_set_latency_range(pa_source * s,pa_usec_t min_latency,pa_usec_t max_latency)2488 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2489 pa_source_assert_ref(s);
2490 pa_assert_ctl_context();
2491
2492 /* min_latency == 0: no limit
2493 * min_latency anything else: specified limit
2494 *
2495 * Similar for max_latency */
2496
2497 if (min_latency < ABSOLUTE_MIN_LATENCY)
2498 min_latency = ABSOLUTE_MIN_LATENCY;
2499
2500 if (max_latency <= 0 ||
2501 max_latency > ABSOLUTE_MAX_LATENCY)
2502 max_latency = ABSOLUTE_MAX_LATENCY;
2503
2504 pa_assert(min_latency <= max_latency);
2505
2506 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2507 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2508 max_latency == ABSOLUTE_MAX_LATENCY) ||
2509 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2510
2511 if (PA_SOURCE_IS_LINKED(s->state)) {
2512 pa_usec_t r[2];
2513
2514 r[0] = min_latency;
2515 r[1] = max_latency;
2516
2517 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2518 } else
2519 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2520 }
2521
2522 /* Called from main thread */
pa_source_get_latency_range(pa_source * s,pa_usec_t * min_latency,pa_usec_t * max_latency)2523 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2524 pa_source_assert_ref(s);
2525 pa_assert_ctl_context();
2526 pa_assert(min_latency);
2527 pa_assert(max_latency);
2528
2529 if (PA_SOURCE_IS_LINKED(s->state)) {
2530 pa_usec_t r[2] = { 0, 0 };
2531
2532 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2533
2534 *min_latency = r[0];
2535 *max_latency = r[1];
2536 } else {
2537 *min_latency = s->thread_info.min_latency;
2538 *max_latency = s->thread_info.max_latency;
2539 }
2540 }
2541
2542 /* Called from IO thread, and from main thread before pa_source_put() is called */
pa_source_set_latency_range_within_thread(pa_source * s,pa_usec_t min_latency,pa_usec_t max_latency)2543 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2544 pa_source_assert_ref(s);
2545 pa_source_assert_io_context(s);
2546
2547 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2548 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2549 pa_assert(min_latency <= max_latency);
2550
2551 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2552 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2553 max_latency == ABSOLUTE_MAX_LATENCY) ||
2554 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2555 s->monitor_of);
2556
2557 if (s->thread_info.min_latency == min_latency &&
2558 s->thread_info.max_latency == max_latency)
2559 return;
2560
2561 s->thread_info.min_latency = min_latency;
2562 s->thread_info.max_latency = max_latency;
2563
2564 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2565 pa_source_output *o;
2566 void *state = NULL;
2567
2568 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2569 if (o->update_source_latency_range)
2570 o->update_source_latency_range(o);
2571 }
2572
2573 pa_source_invalidate_requested_latency(s, false);
2574 }
2575
2576 /* Called from main thread, before the source is put */
pa_source_set_fixed_latency(pa_source * s,pa_usec_t latency)2577 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2578 pa_source_assert_ref(s);
2579 pa_assert_ctl_context();
2580
2581 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2582 pa_assert(latency == 0);
2583 return;
2584 }
2585
2586 if (latency < ABSOLUTE_MIN_LATENCY)
2587 latency = ABSOLUTE_MIN_LATENCY;
2588
2589 if (latency > ABSOLUTE_MAX_LATENCY)
2590 latency = ABSOLUTE_MAX_LATENCY;
2591
2592 if (PA_SOURCE_IS_LINKED(s->state))
2593 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2594 else
2595 s->thread_info.fixed_latency = latency;
2596 }
2597
2598 /* Called from main thread */
pa_source_get_fixed_latency(pa_source * s)2599 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2600 pa_usec_t latency;
2601
2602 pa_source_assert_ref(s);
2603 pa_assert_ctl_context();
2604
2605 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2606 return 0;
2607
2608 if (PA_SOURCE_IS_LINKED(s->state))
2609 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2610 else
2611 latency = s->thread_info.fixed_latency;
2612
2613 return latency;
2614 }
2615
2616 /* Called from IO thread */
pa_source_set_fixed_latency_within_thread(pa_source * s,pa_usec_t latency)2617 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2618 pa_source_assert_ref(s);
2619 pa_source_assert_io_context(s);
2620
2621 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2622 pa_assert(latency == 0);
2623 s->thread_info.fixed_latency = 0;
2624
2625 return;
2626 }
2627
2628 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2629 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2630
2631 if (s->thread_info.fixed_latency == latency)
2632 return;
2633
2634 s->thread_info.fixed_latency = latency;
2635
2636 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2637 pa_source_output *o;
2638 void *state = NULL;
2639
2640 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2641 if (o->update_source_fixed_latency)
2642 o->update_source_fixed_latency(o);
2643 }
2644
2645 pa_source_invalidate_requested_latency(s, false);
2646 }
2647
2648 /* Called from main thread */
pa_source_set_port_latency_offset(pa_source * s,int64_t offset)2649 void pa_source_set_port_latency_offset(pa_source *s, int64_t offset) {
2650 pa_source_assert_ref(s);
2651
2652 s->port_latency_offset = offset;
2653
2654 if (PA_SOURCE_IS_LINKED(s->state))
2655 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2656 else
2657 s->thread_info.port_latency_offset = offset;
2658
2659 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_LATENCY_OFFSET_CHANGED], s);
2660 }
2661
2662 /* Called from main thread */
pa_source_get_max_rewind(pa_source * s)2663 size_t pa_source_get_max_rewind(pa_source *s) {
2664 size_t r;
2665 pa_assert_ctl_context();
2666 pa_source_assert_ref(s);
2667
2668 if (!PA_SOURCE_IS_LINKED(s->state))
2669 return s->thread_info.max_rewind;
2670
2671 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2672
2673 return r;
2674 }
2675
2676 /* Called from main context */
pa_source_set_port(pa_source * s,const char * name,bool save)2677 int pa_source_set_port(pa_source *s, const char *name, bool save) {
2678 pa_device_port *port;
2679
2680 pa_source_assert_ref(s);
2681 pa_assert_ctl_context();
2682
2683 if (!s->set_port) {
2684 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2685 return -PA_ERR_NOTIMPLEMENTED;
2686 }
2687
2688 if (!name)
2689 return -PA_ERR_NOENTITY;
2690
2691 if (!(port = pa_hashmap_get(s->ports, name)))
2692 return -PA_ERR_NOENTITY;
2693
2694 if (s->active_port == port) {
2695 s->save_port = s->save_port || save;
2696 return 0;
2697 }
2698
2699 s->port_changing = true;
2700
2701 if (s->set_port(s, port) < 0)
2702 return -PA_ERR_NOENTITY;
2703
2704 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2705
2706 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2707
2708 s->active_port = port;
2709 s->save_port = save;
2710
2711 /* The active port affects the default source selection. */
2712 pa_core_update_default_source(s->core);
2713
2714 pa_source_set_port_latency_offset(s, s->active_port->latency_offset);
2715
2716 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2717
2718 s->port_changing = false;
2719
2720 return 0;
2721 }
2722
2723 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2724
2725 /* Called from the IO thread. */
pa_source_volume_change_new(pa_source * s)2726 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2727 pa_source_volume_change *c;
2728 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2729 c = pa_xnew(pa_source_volume_change, 1);
2730
2731 PA_LLIST_INIT(pa_source_volume_change, c);
2732 c->at = 0;
2733 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2734 return c;
2735 }
2736
2737 /* Called from the IO thread. */
pa_source_volume_change_free(pa_source_volume_change * c)2738 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2739 pa_assert(c);
2740 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2741 pa_xfree(c);
2742 }
2743
2744 /* Called from the IO thread. */
pa_source_volume_change_push(pa_source * s)2745 void pa_source_volume_change_push(pa_source *s) {
2746 pa_source_volume_change *c = NULL;
2747 pa_source_volume_change *nc = NULL;
2748 pa_source_volume_change *pc = NULL;
2749 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2750
2751 const char *direction = NULL;
2752
2753 pa_assert(s);
2754 nc = pa_source_volume_change_new(s);
2755
2756 /* NOTE: There is already more different volumes in pa_source that I can remember.
2757 * Adding one more volume for HW would get us rid of this, but I am trying
2758 * to survive with the ones we already have. */
2759 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2760
2761 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2762 pa_log_debug("Volume not changing");
2763 pa_source_volume_change_free(nc);
2764 return;
2765 }
2766
2767 nc->at = pa_source_get_latency_within_thread(s, false);
2768 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2769
2770 if (s->thread_info.volume_changes_tail) {
2771 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2772 /* If volume is going up let's do it a bit late. If it is going
2773 * down let's do it a bit early. */
2774 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2775 if (nc->at + safety_margin > c->at) {
2776 nc->at += safety_margin;
2777 direction = "up";
2778 break;
2779 }
2780 }
2781 else if (nc->at - safety_margin > c->at) {
2782 nc->at -= safety_margin;
2783 direction = "down";
2784 break;
2785 }
2786 }
2787 }
2788
2789 if (c == NULL) {
2790 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2791 nc->at += safety_margin;
2792 direction = "up";
2793 } else {
2794 nc->at -= safety_margin;
2795 direction = "down";
2796 }
2797 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2798 }
2799 else {
2800 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2801 }
2802
2803 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2804
2805 /* We can ignore volume events that came earlier but should happen later than this. */
2806 PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
2807 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2808 pa_source_volume_change_free(c);
2809 }
2810 nc->next = NULL;
2811 s->thread_info.volume_changes_tail = nc;
2812 }
2813
2814 /* Called from the IO thread. */
pa_source_volume_change_flush(pa_source * s)2815 static void pa_source_volume_change_flush(pa_source *s) {
2816 pa_source_volume_change *c = s->thread_info.volume_changes;
2817 pa_assert(s);
2818 s->thread_info.volume_changes = NULL;
2819 s->thread_info.volume_changes_tail = NULL;
2820 while (c) {
2821 pa_source_volume_change *next = c->next;
2822 pa_source_volume_change_free(c);
2823 c = next;
2824 }
2825 }
2826
2827 /* Called from the IO thread. */
pa_source_volume_change_apply(pa_source * s,pa_usec_t * usec_to_next)2828 bool pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2829 pa_usec_t now;
2830 bool ret = false;
2831
2832 pa_assert(s);
2833
2834 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2835 if (usec_to_next)
2836 *usec_to_next = 0;
2837 return ret;
2838 }
2839
2840 pa_assert(s->write_volume);
2841
2842 now = pa_rtclock_now();
2843
2844 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2845 pa_source_volume_change *c = s->thread_info.volume_changes;
2846 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2847 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2848 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2849 ret = true;
2850 s->thread_info.current_hw_volume = c->hw_volume;
2851 pa_source_volume_change_free(c);
2852 }
2853
2854 if (ret)
2855 s->write_volume(s);
2856
2857 if (s->thread_info.volume_changes) {
2858 if (usec_to_next)
2859 *usec_to_next = s->thread_info.volume_changes->at - now;
2860 if (pa_log_ratelimit(PA_LOG_DEBUG))
2861 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2862 }
2863 else {
2864 if (usec_to_next)
2865 *usec_to_next = 0;
2866 s->thread_info.volume_changes_tail = NULL;
2867 }
2868 return ret;
2869 }
2870
2871 /* Called from the main thread */
2872 /* Gets the list of formats supported by the source. The members and idxset must
2873 * be freed by the caller. */
pa_source_get_formats(pa_source * s)2874 pa_idxset* pa_source_get_formats(pa_source *s) {
2875 pa_idxset *ret;
2876
2877 pa_assert(s);
2878
2879 if (s->get_formats) {
2880 /* Source supports format query, all is good */
2881 ret = s->get_formats(s);
2882 } else {
2883 /* Source doesn't support format query, so assume it does PCM */
2884 pa_format_info *f = pa_format_info_new();
2885 f->encoding = PA_ENCODING_PCM;
2886
2887 ret = pa_idxset_new(NULL, NULL);
2888 pa_idxset_put(ret, f, NULL);
2889 }
2890
2891 return ret;
2892 }
2893
2894 /* Called from the main thread */
2895 /* Checks if the source can accept this format */
pa_source_check_format(pa_source * s,pa_format_info * f)2896 bool pa_source_check_format(pa_source *s, pa_format_info *f) {
2897 pa_idxset *formats = NULL;
2898 bool ret = false;
2899
2900 pa_assert(s);
2901 pa_assert(f);
2902
2903 formats = pa_source_get_formats(s);
2904
2905 if (formats) {
2906 pa_format_info *finfo_device;
2907 uint32_t i;
2908
2909 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2910 if (pa_format_info_is_compatible(finfo_device, f)) {
2911 ret = true;
2912 break;
2913 }
2914 }
2915
2916 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2917 }
2918
2919 return ret;
2920 }
2921
2922 /* Called from the main thread */
2923 /* Calculates the intersection between formats supported by the source and
2924 * in_formats, and returns these, in the order of the source's formats. */
pa_source_check_formats(pa_source * s,pa_idxset * in_formats)2925 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2926 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2927 pa_format_info *f_source, *f_in;
2928 uint32_t i, j;
2929
2930 pa_assert(s);
2931
2932 if (!in_formats || pa_idxset_isempty(in_formats))
2933 goto done;
2934
2935 source_formats = pa_source_get_formats(s);
2936
2937 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2938 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2939 if (pa_format_info_is_compatible(f_source, f_in))
2940 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2941 }
2942 }
2943
2944 done:
2945 if (source_formats)
2946 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);
2947
2948 return out_formats;
2949 }
2950
2951 /* Called from the main thread */
pa_source_set_sample_format(pa_source * s,pa_sample_format_t format)2952 void pa_source_set_sample_format(pa_source *s, pa_sample_format_t format) {
2953 pa_sample_format_t old_format;
2954
2955 pa_assert(s);
2956 pa_assert(pa_sample_format_valid(format));
2957
2958 old_format = s->sample_spec.format;
2959 if (old_format == format)
2960 return;
2961
2962 pa_log_info("%s: format: %s -> %s",
2963 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
2964
2965 s->sample_spec.format = format;
2966
2967 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2968 }
2969
2970 /* Called from the main thread */
pa_source_set_sample_rate(pa_source * s,uint32_t rate)2971 void pa_source_set_sample_rate(pa_source *s, uint32_t rate) {
2972 uint32_t old_rate;
2973
2974 pa_assert(s);
2975 pa_assert(pa_sample_rate_valid(rate));
2976
2977 old_rate = s->sample_spec.rate;
2978 if (old_rate == rate)
2979 return;
2980
2981 pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
2982
2983 s->sample_spec.rate = rate;
2984
2985 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2986 }
2987
2988 /* Called from the main thread. */
pa_source_set_reference_volume_direct(pa_source * s,const pa_cvolume * volume)2989 void pa_source_set_reference_volume_direct(pa_source *s, const pa_cvolume *volume) {
2990 pa_cvolume old_volume;
2991 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2992 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2993
2994 pa_assert(s);
2995 pa_assert(volume);
2996
2997 old_volume = s->reference_volume;
2998
2999 if (pa_cvolume_equal(volume, &old_volume))
3000 return;
3001
3002 s->reference_volume = *volume;
3003 pa_log_debug("The reference volume of source %s changed from %s to %s.", s->name,
3004 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
3005 s->flags & PA_SOURCE_DECIBEL_VOLUME),
3006 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
3007 s->flags & PA_SOURCE_DECIBEL_VOLUME));
3008
3009 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3010 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_VOLUME_CHANGED], s);
3011 }
3012
pa_source_move_streams_to_default_source(pa_core * core,pa_source * old_source,bool default_source_changed)3013 void pa_source_move_streams_to_default_source(pa_core *core, pa_source *old_source, bool default_source_changed) {
3014 pa_source_output *o;
3015 uint32_t idx;
3016
3017 pa_assert(core);
3018 pa_assert(old_source);
3019
3020 if (core->state == PA_CORE_SHUTDOWN)
3021 return;
3022
3023 if (core->default_source == NULL || core->default_source->unlink_requested)
3024 return;
3025
3026 if (old_source == core->default_source)
3027 return;
3028
3029 PA_IDXSET_FOREACH(o, old_source->outputs, idx) {
3030 if (!PA_SOURCE_OUTPUT_IS_LINKED(o->state))
3031 continue;
3032
3033 if (!o->source)
3034 continue;
3035
3036 /* Don't move source-outputs which connect sources to filter sources */
3037 if (o->destination_source)
3038 continue;
3039
3040 /* If default_source_changed is false, the old source became unavailable, so all streams must be moved. */
3041 if (pa_safe_streq(old_source->name, o->preferred_source) && default_source_changed)
3042 continue;
3043
3044 if (!pa_source_output_may_move_to(o, core->default_source))
3045 continue;
3046
3047 if (default_source_changed)
3048 pa_log_info("The source output %u \"%s\" is moving to %s due to change of the default source.",
3049 o->index, pa_strnull(pa_proplist_gets(o->proplist, PA_PROP_APPLICATION_NAME)), core->default_source->name);
3050 else
3051 pa_log_info("The source output %u \"%s\" is moving to %s, because the old source became unavailable.",
3052 o->index, pa_strnull(pa_proplist_gets(o->proplist, PA_PROP_APPLICATION_NAME)), core->default_source->name);
3053
3054 pa_source_output_move_to(o, core->default_source, false);
3055 }
3056 }
3057