1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <signal.h>
26 #include <stdio.h>
27
28 #include <alsa/asoundlib.h>
29
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
75 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83 pa_core *core;
84 pa_module *module;
85 pa_source *source;
86
87 pa_thread *thread;
88 pa_thread_mq thread_mq;
89 pa_rtpoll *rtpoll;
90
91 snd_pcm_t *pcm_handle;
92
93 char *paths_dir;
94 pa_alsa_fdlist *mixer_fdl;
95 pa_alsa_mixer_pdata *mixer_pd;
96 pa_hashmap *mixers;
97 snd_mixer_t *mixer_handle;
98 pa_alsa_path_set *mixer_path_set;
99 pa_alsa_path *mixer_path;
100
101 pa_cvolume hardware_volume;
102
103 pa_sample_spec verified_sample_spec;
104 pa_sample_format_t *supported_formats;
105 unsigned int *supported_rates;
106 struct {
107 size_t fragment_size;
108 size_t nfrags;
109 size_t tsched_size;
110 size_t tsched_watermark;
111 } initial_info;
112
113 size_t
114 frame_size,
115 fragment_size,
116 hwbuf_size,
117 tsched_size,
118 tsched_watermark,
119 tsched_watermark_ref,
120 hwbuf_unused,
121 min_sleep,
122 min_wakeup,
123 watermark_inc_step,
124 watermark_dec_step,
125 watermark_inc_threshold,
126 watermark_dec_threshold;
127
128 snd_pcm_uframes_t frames_per_block;
129
130 pa_usec_t watermark_dec_not_before;
131 pa_usec_t min_latency_ref;
132 pa_usec_t tsched_watermark_usec;
133
134 char *device_name; /* name of the PCM device */
135 char *control_device; /* name of the control device */
136
137 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
138
139 bool first;
140
141 pa_rtpoll_item *alsa_rtpoll_item;
142
143 pa_smoother *smoother;
144 uint64_t read_count;
145 pa_usec_t smoother_interval;
146 pa_usec_t last_smoother_update;
147
148 pa_reserve_wrapper *reserve;
149 pa_hook_slot *reserve_slot;
150 pa_reserve_monitor_wrapper *monitor;
151 pa_hook_slot *monitor_slot;
152
153 /* ucm context */
154 pa_alsa_ucm_mapping_context *ucm_context;
155 };
156
157 enum {
158 SOURCE_MESSAGE_SYNC_MIXER = PA_SOURCE_MESSAGE_MAX
159 };
160
161 static void userdata_free(struct userdata *u);
162 static int unsuspend(struct userdata *u, bool recovering);
163
reserve_cb(pa_reserve_wrapper * r,void * forced,struct userdata * u)164 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
165 pa_assert(r);
166 pa_assert(u);
167
168 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
169
170 if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
171 return PA_HOOK_CANCEL;
172
173 return PA_HOOK_OK;
174 }
175
reserve_done(struct userdata * u)176 static void reserve_done(struct userdata *u) {
177 pa_assert(u);
178
179 if (u->reserve_slot) {
180 pa_hook_slot_free(u->reserve_slot);
181 u->reserve_slot = NULL;
182 }
183
184 if (u->reserve) {
185 pa_reserve_wrapper_unref(u->reserve);
186 u->reserve = NULL;
187 }
188 }
189
reserve_update(struct userdata * u)190 static void reserve_update(struct userdata *u) {
191 const char *description;
192 pa_assert(u);
193
194 if (!u->source || !u->reserve)
195 return;
196
197 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
198 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
199 }
200
reserve_init(struct userdata * u,const char * dname)201 static int reserve_init(struct userdata *u, const char *dname) {
202 char *rname;
203
204 pa_assert(u);
205 pa_assert(dname);
206
207 if (u->reserve)
208 return 0;
209
210 if (pa_in_system_mode())
211 return 0;
212
213 if (!(rname = pa_alsa_get_reserve_name(dname)))
214 return 0;
215
216 /* We are resuming, try to lock the device */
217 u->reserve = pa_reserve_wrapper_get(u->core, rname);
218 pa_xfree(rname);
219
220 if (!(u->reserve))
221 return -1;
222
223 reserve_update(u);
224
225 pa_assert(!u->reserve_slot);
226 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
227
228 return 0;
229 }
230
monitor_cb(pa_reserve_monitor_wrapper * w,void * busy,struct userdata * u)231 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
232 pa_assert(w);
233 pa_assert(u);
234
235 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
236 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
237 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
238 } else {
239 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
240 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
241 }
242
243 return PA_HOOK_OK;
244 }
245
monitor_done(struct userdata * u)246 static void monitor_done(struct userdata *u) {
247 pa_assert(u);
248
249 if (u->monitor_slot) {
250 pa_hook_slot_free(u->monitor_slot);
251 u->monitor_slot = NULL;
252 }
253
254 if (u->monitor) {
255 pa_reserve_monitor_wrapper_unref(u->monitor);
256 u->monitor = NULL;
257 }
258 }
259
reserve_monitor_init(struct userdata * u,const char * dname)260 static int reserve_monitor_init(struct userdata *u, const char *dname) {
261 char *rname;
262
263 pa_assert(u);
264 pa_assert(dname);
265
266 if (pa_in_system_mode())
267 return 0;
268
269 if (!(rname = pa_alsa_get_reserve_name(dname)))
270 return 0;
271
272 /* We are resuming, try to lock the device */
273 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
274 pa_xfree(rname);
275
276 if (!(u->monitor))
277 return -1;
278
279 pa_assert(!u->monitor_slot);
280 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
281
282 return 0;
283 }
284
fix_min_sleep_wakeup(struct userdata * u)285 static void fix_min_sleep_wakeup(struct userdata *u) {
286 size_t max_use, max_use_2;
287
288 pa_assert(u);
289 pa_assert(u->use_tsched);
290
291 max_use = u->hwbuf_size - u->hwbuf_unused;
292 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
293
294 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
295 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
296
297 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
298 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
299 }
300
fix_tsched_watermark(struct userdata * u)301 static void fix_tsched_watermark(struct userdata *u) {
302 size_t max_use;
303 pa_assert(u);
304 pa_assert(u->use_tsched);
305
306 max_use = u->hwbuf_size - u->hwbuf_unused;
307
308 if (u->tsched_watermark > max_use - u->min_sleep)
309 u->tsched_watermark = max_use - u->min_sleep;
310
311 if (u->tsched_watermark < u->min_wakeup)
312 u->tsched_watermark = u->min_wakeup;
313
314 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
315 }
316
increase_watermark(struct userdata * u)317 static void increase_watermark(struct userdata *u) {
318 size_t old_watermark;
319 pa_usec_t old_min_latency, new_min_latency;
320
321 pa_assert(u);
322 pa_assert(u->use_tsched);
323
324 /* First, just try to increase the watermark */
325 old_watermark = u->tsched_watermark;
326 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
327 fix_tsched_watermark(u);
328
329 if (old_watermark != u->tsched_watermark) {
330 pa_log_info("Increasing wakeup watermark to %0.2f ms",
331 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
332 return;
333 }
334
335 /* Hmm, we cannot increase the watermark any further, hence let's
336 raise the latency unless doing so was disabled in
337 configuration */
338 if (u->fixed_latency_range)
339 return;
340
341 old_min_latency = u->source->thread_info.min_latency;
342 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
343 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
344
345 if (old_min_latency != new_min_latency) {
346 pa_log_info("Increasing minimal latency to %0.2f ms",
347 (double) new_min_latency / PA_USEC_PER_MSEC);
348
349 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
350 }
351
352 /* When we reach this we're officially fucked! */
353 }
354
decrease_watermark(struct userdata * u)355 static void decrease_watermark(struct userdata *u) {
356 size_t old_watermark;
357 pa_usec_t now;
358
359 pa_assert(u);
360 pa_assert(u->use_tsched);
361
362 now = pa_rtclock_now();
363
364 if (u->watermark_dec_not_before <= 0)
365 goto restart;
366
367 if (u->watermark_dec_not_before > now)
368 return;
369
370 old_watermark = u->tsched_watermark;
371
372 if (u->tsched_watermark < u->watermark_dec_step)
373 u->tsched_watermark = u->tsched_watermark / 2;
374 else
375 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
376
377 fix_tsched_watermark(u);
378
379 if (old_watermark != u->tsched_watermark)
380 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
381 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
382
383 /* We don't change the latency range*/
384
385 restart:
386 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
387 }
388
389 /* Called from IO Context on unsuspend or from main thread when creating source */
reset_watermark(struct userdata * u,size_t tsched_watermark,pa_sample_spec * ss,bool in_thread)390 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
391 bool in_thread) {
392 u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
393
394 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
395 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
396
397 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
398 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
399
400 fix_min_sleep_wakeup(u);
401 fix_tsched_watermark(u);
402
403 if (in_thread)
404 pa_source_set_latency_range_within_thread(u->source,
405 u->min_latency_ref,
406 pa_bytes_to_usec(u->hwbuf_size, ss));
407 else {
408 pa_source_set_latency_range(u->source,
409 0,
410 pa_bytes_to_usec(u->hwbuf_size, ss));
411
412 /* work-around assert in pa_source_set_latency_within_thead,
413 keep track of min_latency and reuse it when
414 this routine is called from IO context */
415 u->min_latency_ref = u->source->thread_info.min_latency;
416 }
417
418 pa_log_info("Time scheduling watermark is %0.2fms",
419 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
420 }
421
hw_sleep_time(struct userdata * u,pa_usec_t * sleep_usec,pa_usec_t * process_usec)422 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
423 pa_usec_t wm, usec;
424
425 pa_assert(sleep_usec);
426 pa_assert(process_usec);
427
428 pa_assert(u);
429 pa_assert(u->use_tsched);
430
431 usec = pa_source_get_requested_latency_within_thread(u->source);
432
433 if (usec == (pa_usec_t) -1)
434 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
435
436 wm = u->tsched_watermark_usec;
437
438 if (wm > usec)
439 wm = usec/2;
440
441 *sleep_usec = usec - wm;
442 *process_usec = wm;
443
444 #ifdef DEBUG_TIMING
445 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
446 (unsigned long) (usec / PA_USEC_PER_MSEC),
447 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
448 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
449 #endif
450 }
451
452 /* Reset smoother and counters */
reset_vars(struct userdata * u)453 static void reset_vars(struct userdata *u) {
454
455 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
456 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
457 u->last_smoother_update = 0;
458
459 u->read_count = 0;
460 u->first = true;
461 }
462
463 /* Called from IO context */
close_pcm(struct userdata * u)464 static void close_pcm(struct userdata *u) {
465 pa_smoother_pause(u->smoother, pa_rtclock_now());
466
467 /* Let's suspend */
468 snd_pcm_close(u->pcm_handle);
469 u->pcm_handle = NULL;
470
471 if (u->alsa_rtpoll_item) {
472 pa_rtpoll_item_free(u->alsa_rtpoll_item);
473 u->alsa_rtpoll_item = NULL;
474 }
475 }
476
try_recover(struct userdata * u,const char * call,int err)477 static int try_recover(struct userdata *u, const char *call, int err) {
478 pa_assert(u);
479 pa_assert(call);
480 pa_assert(err < 0);
481
482 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
483
484 pa_assert(err != -EAGAIN);
485
486 if (err == -EPIPE)
487 pa_log_debug("%s: Buffer overrun!", call);
488
489 if (err == -ESTRPIPE)
490 pa_log_debug("%s: System suspended!", call);
491
492 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
493 pa_log("%s: %s, trying to restart PCM", call, pa_alsa_strerror(err));
494
495 /* As a last measure, restart the PCM and inform the caller about it. */
496 close_pcm(u);
497 if (unsuspend(u, true) < 0)
498 return -1;
499
500 return 1;
501 }
502
503 reset_vars(u);
504 return 0;
505 }
506
check_left_to_record(struct userdata * u,size_t n_bytes,bool on_timeout)507 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
508 size_t left_to_record;
509 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
510 bool overrun = false;
511
512 /* We use <= instead of < for this check here because an overrun
513 * only happens after the last sample was processed, not already when
514 * it is removed from the buffer. This is particularly important
515 * when block transfer is used. */
516
517 if (n_bytes <= rec_space)
518 left_to_record = rec_space - n_bytes;
519 else {
520
521 /* We got a dropout. What a mess! */
522 left_to_record = 0;
523 overrun = true;
524
525 #ifdef DEBUG_TIMING
526 PA_DEBUG_TRAP;
527 #endif
528
529 if (pa_log_ratelimit(PA_LOG_INFO))
530 pa_log_info("Overrun!");
531 }
532
533 #ifdef DEBUG_TIMING
534 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
535 #endif
536
537 if (u->use_tsched) {
538 bool reset_not_before = true;
539
540 if (overrun || left_to_record < u->watermark_inc_threshold)
541 increase_watermark(u);
542 else if (left_to_record > u->watermark_dec_threshold) {
543 reset_not_before = false;
544
545 /* We decrease the watermark only if have actually
546 * been woken up by a timeout. If something else woke
547 * us up it's too easy to fulfill the deadlines... */
548
549 if (on_timeout)
550 decrease_watermark(u);
551 }
552
553 if (reset_not_before)
554 u->watermark_dec_not_before = 0;
555 }
556
557 return left_to_record;
558 }
559
mmap_read(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)560 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
561 bool work_done = false;
562 bool recovery_done = false;
563 pa_usec_t max_sleep_usec = 0, process_usec = 0;
564 size_t left_to_record;
565 unsigned j = 0;
566
567 pa_assert(u);
568 pa_source_assert_ref(u->source);
569
570 if (u->use_tsched)
571 hw_sleep_time(u, &max_sleep_usec, &process_usec);
572
573 for (;;) {
574 snd_pcm_sframes_t n;
575 size_t n_bytes;
576 int r;
577 bool after_avail = true;
578
579 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
580
581 recovery_done = true;
582 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
583 continue;
584
585 return r;
586 }
587
588 n_bytes = (size_t) n * u->frame_size;
589
590 #ifdef DEBUG_TIMING
591 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
592 #endif
593
594 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
595 on_timeout = false;
596
597 if (u->use_tsched)
598 if (!polled &&
599 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
600 #ifdef DEBUG_TIMING
601 pa_log_debug("Not reading, because too early.");
602 #endif
603 break;
604 }
605
606 if (PA_UNLIKELY(n_bytes <= 0)) {
607
608 if (polled)
609 PA_ONCE_BEGIN {
610 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
611 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
612 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
613 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
614 pa_strnull(dn));
615 pa_xfree(dn);
616 } PA_ONCE_END;
617
618 #ifdef DEBUG_TIMING
619 pa_log_debug("Not reading, because not necessary.");
620 #endif
621 break;
622 }
623
624 if (++j > 10) {
625 #ifdef DEBUG_TIMING
626 pa_log_debug("Not filling up, because already too many iterations.");
627 #endif
628
629 break;
630 }
631
632 polled = false;
633
634 #ifdef DEBUG_TIMING
635 pa_log_debug("Reading");
636 #endif
637
638 for (;;) {
639 pa_memchunk chunk;
640 void *p;
641 int err;
642 const snd_pcm_channel_area_t *areas;
643 snd_pcm_uframes_t offset, frames;
644 snd_pcm_sframes_t sframes;
645
646 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
647 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
648
649 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
650
651 if (!after_avail && err == -EAGAIN)
652 break;
653
654 recovery_done = true;
655 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
656 continue;
657
658 if (r == 1)
659 break;
660
661 return r;
662 }
663
664 /* Make sure that if these memblocks need to be copied they will fit into one slot */
665 frames = PA_MIN(frames, u->frames_per_block);
666
667 if (!after_avail && frames == 0)
668 break;
669
670 pa_assert(frames > 0);
671 after_avail = false;
672
673 /* Check these are multiples of 8 bit */
674 pa_assert((areas[0].first & 7) == 0);
675 pa_assert((areas[0].step & 7) == 0);
676
677 /* We assume a single interleaved memory buffer */
678 pa_assert((areas[0].first >> 3) == 0);
679 pa_assert((areas[0].step >> 3) == u->frame_size);
680
681 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
682
683 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
684 chunk.length = pa_memblock_get_length(chunk.memblock);
685 chunk.index = 0;
686
687 pa_source_post(u->source, &chunk);
688 pa_memblock_unref_fixed(chunk.memblock);
689
690 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
691
692 recovery_done = true;
693 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
694 continue;
695
696 if (r == 1)
697 break;
698
699 return r;
700 }
701
702 work_done = true;
703
704 u->read_count += frames * u->frame_size;
705
706 #ifdef DEBUG_TIMING
707 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
708 #endif
709
710 if ((size_t) frames * u->frame_size >= n_bytes)
711 break;
712
713 n_bytes -= (size_t) frames * u->frame_size;
714 }
715 }
716
717 if (u->use_tsched) {
718 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
719 process_usec = u->tsched_watermark_usec;
720
721 if (*sleep_usec > process_usec)
722 *sleep_usec -= process_usec;
723 else
724 *sleep_usec = 0;
725
726 /* If the PCM was recovered, it may need restarting. Reduce the sleep time
727 * to 0 to ensure immediate restart. */
728 if (recovery_done)
729 *sleep_usec = 0;
730 }
731
732 return work_done ? 1 : 0;
733 }
734
unix_read(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)735 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
736 int work_done = false;
737 bool recovery_done = false;
738 pa_usec_t max_sleep_usec = 0, process_usec = 0;
739 size_t left_to_record;
740 unsigned j = 0;
741
742 pa_assert(u);
743 pa_source_assert_ref(u->source);
744
745 if (u->use_tsched)
746 hw_sleep_time(u, &max_sleep_usec, &process_usec);
747
748 for (;;) {
749 snd_pcm_sframes_t n;
750 size_t n_bytes;
751 int r;
752 bool after_avail = true;
753
754 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
755
756 recovery_done = true;
757 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
758 continue;
759
760 return r;
761 }
762
763 n_bytes = (size_t) n * u->frame_size;
764 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
765 on_timeout = false;
766
767 if (u->use_tsched)
768 if (!polled &&
769 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
770 break;
771
772 if (PA_UNLIKELY(n_bytes <= 0)) {
773
774 if (polled)
775 PA_ONCE_BEGIN {
776 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
777 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
778 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
779 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
780 pa_strnull(dn));
781 pa_xfree(dn);
782 } PA_ONCE_END;
783
784 break;
785 }
786
787 if (++j > 10) {
788 #ifdef DEBUG_TIMING
789 pa_log_debug("Not filling up, because already too many iterations.");
790 #endif
791
792 break;
793 }
794
795 polled = false;
796
797 for (;;) {
798 void *p;
799 snd_pcm_sframes_t frames;
800 pa_memchunk chunk;
801
802 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
803
804 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
805
806 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
807 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
808
809 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
810
811 p = pa_memblock_acquire(chunk.memblock);
812 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
813 pa_memblock_release(chunk.memblock);
814
815 if (PA_UNLIKELY(frames < 0)) {
816 pa_memblock_unref(chunk.memblock);
817
818 if (!after_avail && (int) frames == -EAGAIN)
819 break;
820
821 recovery_done = true;
822 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
823 continue;
824
825 if (r == 1)
826 break;
827
828 return r;
829 }
830
831 if (!after_avail && frames == 0) {
832 pa_memblock_unref(chunk.memblock);
833 break;
834 }
835
836 pa_assert(frames > 0);
837 after_avail = false;
838
839 chunk.index = 0;
840 chunk.length = (size_t) frames * u->frame_size;
841
842 pa_source_post(u->source, &chunk);
843 pa_memblock_unref(chunk.memblock);
844
845 work_done = true;
846
847 u->read_count += frames * u->frame_size;
848
849 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
850
851 if ((size_t) frames * u->frame_size >= n_bytes)
852 break;
853
854 n_bytes -= (size_t) frames * u->frame_size;
855 }
856 }
857
858 if (u->use_tsched) {
859 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
860 process_usec = u->tsched_watermark_usec;
861
862 if (*sleep_usec > process_usec)
863 *sleep_usec -= process_usec;
864 else
865 *sleep_usec = 0;
866
867 /* If the PCM was recovered, it may need restarting. Reduce the sleep time
868 * to 0 to ensure immediate restart. */
869 if (recovery_done)
870 *sleep_usec = 0;
871 }
872
873 return work_done ? 1 : 0;
874 }
875
update_smoother(struct userdata * u)876 static void update_smoother(struct userdata *u) {
877 snd_pcm_sframes_t delay = 0;
878 uint64_t position;
879 int err;
880 pa_usec_t now1 = 0, now2;
881 snd_pcm_status_t *status;
882 snd_htimestamp_t htstamp = { 0, 0 };
883
884 snd_pcm_status_alloca(&status);
885
886 pa_assert(u);
887 pa_assert(u->pcm_handle);
888
889 /* Let's update the time smoother */
890
891 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
892 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
893 return;
894 }
895
896 snd_pcm_status_get_htstamp(status, &htstamp);
897 now1 = pa_timespec_load(&htstamp);
898
899 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
900 if (now1 <= 0)
901 now1 = pa_rtclock_now();
902
903 /* check if the time since the last update is bigger than the interval */
904 if (u->last_smoother_update > 0)
905 if (u->last_smoother_update + u->smoother_interval > now1)
906 return;
907
908 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
909 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
910
911 pa_smoother_put(u->smoother, now1, now2);
912
913 u->last_smoother_update = now1;
914 /* exponentially increase the update interval up to the MAX limit */
915 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
916 }
917
source_get_latency(struct userdata * u)918 static int64_t source_get_latency(struct userdata *u) {
919 int64_t delay;
920 pa_usec_t now1, now2;
921
922 pa_assert(u);
923
924 now1 = pa_rtclock_now();
925 now2 = pa_smoother_get(u->smoother, now1);
926
927 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
928
929 return delay;
930 }
931
build_pollfd(struct userdata * u)932 static int build_pollfd(struct userdata *u) {
933 pa_assert(u);
934 pa_assert(u->pcm_handle);
935
936 if (u->alsa_rtpoll_item)
937 pa_rtpoll_item_free(u->alsa_rtpoll_item);
938
939 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
940 return -1;
941
942 return 0;
943 }
944
945 /* Called from IO context */
suspend(struct userdata * u)946 static void suspend(struct userdata *u) {
947 pa_assert(u);
948
949 /* PCM may have been invalidated due to device failure.
950 * In that case, there is nothing to do. */
951 if (!u->pcm_handle)
952 return;
953
954 /* Close PCM device */
955 close_pcm(u);
956
957 pa_log_info("Device suspended...");
958 }
959
960 /* Called from IO context */
update_sw_params(struct userdata * u)961 static int update_sw_params(struct userdata *u) {
962 snd_pcm_uframes_t avail_min;
963 int err;
964
965 pa_assert(u);
966
967 /* Use the full buffer if no one asked us for anything specific */
968 u->hwbuf_unused = 0;
969
970 if (u->use_tsched) {
971 pa_usec_t latency;
972
973 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
974 size_t b;
975
976 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
977
978 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
979
980 /* We need at least one sample in our buffer */
981
982 if (PA_UNLIKELY(b < u->frame_size))
983 b = u->frame_size;
984
985 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
986 }
987
988 fix_min_sleep_wakeup(u);
989 fix_tsched_watermark(u);
990 }
991
992 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
993
994 avail_min = 1;
995
996 if (u->use_tsched) {
997 pa_usec_t sleep_usec, process_usec;
998
999 hw_sleep_time(u, &sleep_usec, &process_usec);
1000 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
1001 }
1002
1003 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1004
1005 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1006 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1007 return err;
1008 }
1009
1010 return 0;
1011 }
1012
1013 /* Called from IO Context on unsuspend */
update_size(struct userdata * u,pa_sample_spec * ss)1014 static void update_size(struct userdata *u, pa_sample_spec *ss) {
1015 pa_assert(u);
1016 pa_assert(ss);
1017
1018 u->frame_size = pa_frame_size(ss);
1019 u->frames_per_block = pa_mempool_block_size_max(u->core->mempool) / u->frame_size;
1020
1021 /* use initial values including module arguments */
1022 u->fragment_size = u->initial_info.fragment_size;
1023 u->hwbuf_size = u->initial_info.nfrags * u->fragment_size;
1024 u->tsched_size = u->initial_info.tsched_size;
1025 u->tsched_watermark = u->initial_info.tsched_watermark;
1026
1027 u->tsched_watermark_ref = u->tsched_watermark;
1028
1029 pa_log_info("Updated frame_size %zu, frames_per_block %lu, fragment_size %zu, hwbuf_size %zu, tsched(size %zu, watermark %zu)",
1030 u->frame_size, (unsigned long) u->frames_per_block, u->fragment_size, u->hwbuf_size, u->tsched_size, u->tsched_watermark);
1031 }
1032
1033 /* Called from IO context */
unsuspend(struct userdata * u,bool recovering)1034 static int unsuspend(struct userdata *u, bool recovering) {
1035 pa_sample_spec ss;
1036 int err, i;
1037 bool b, d;
1038 snd_pcm_uframes_t period_frames, buffer_frames;
1039 snd_pcm_uframes_t tsched_frames = 0;
1040 bool frame_size_changed = false;
1041
1042 pa_assert(u);
1043 pa_assert(!u->pcm_handle);
1044
1045 pa_log_info("Trying resume...");
1046
1047 /*
1048 * On some machines, during the system suspend and resume, the thread_func could receive
1049 * POLLERR events before the dev nodes in /dev/snd/ are accessible, and thread_func calls
1050 * the unsuspend() to try to recover the PCM, this will make the snd_pcm_open() fail, here
1051 * we add msleep and retry to make sure those nodes are accessible.
1052 */
1053 for (i = 0; i < 4; i++) {
1054 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
1055 SND_PCM_NONBLOCK|
1056 SND_PCM_NO_AUTO_RESAMPLE|
1057 SND_PCM_NO_AUTO_CHANNELS|
1058 SND_PCM_NO_AUTO_FORMAT)) < 0 && recovering)
1059 pa_msleep(25);
1060 else
1061 break;
1062 }
1063
1064 if (err < 0) {
1065 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1066 goto fail;
1067 }
1068
1069 if (pa_frame_size(&u->source->sample_spec) != u->frame_size) {
1070 update_size(u, &u->source->sample_spec);
1071 tsched_frames = u->tsched_size / u->frame_size;
1072 frame_size_changed = true;
1073 }
1074
1075 ss = u->source->sample_spec;
1076 period_frames = u->fragment_size / u->frame_size;
1077 buffer_frames = u->hwbuf_size / u->frame_size;
1078 b = u->use_mmap;
1079 d = u->use_tsched;
1080
1081 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_frames, &buffer_frames, tsched_frames, &b, &d, true)) < 0) {
1082 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1083 goto fail;
1084 }
1085
1086 if (b != u->use_mmap || d != u->use_tsched) {
1087 pa_log_warn("Resume failed, couldn't get original access mode.");
1088 goto fail;
1089 }
1090
1091 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
1092 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1093 goto fail;
1094 }
1095
1096 if (frame_size_changed) {
1097 u->fragment_size = (size_t)(period_frames * u->frame_size);
1098 u->hwbuf_size = (size_t)(buffer_frames * u->frame_size);
1099 pa_proplist_setf(u->source->proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%zu", u->hwbuf_size);
1100 pa_proplist_setf(u->source->proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%zu", u->fragment_size);
1101
1102 } else if (period_frames * u->frame_size != u->fragment_size ||
1103 buffer_frames * u->frame_size != u->hwbuf_size) {
1104 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %zu/%zu, New %lu/%lu)",
1105 u->hwbuf_size, u->fragment_size,
1106 (unsigned long) buffer_frames * u->frame_size, (unsigned long) period_frames * u->frame_size);
1107 goto fail;
1108 }
1109
1110 if (update_sw_params(u) < 0)
1111 goto fail;
1112
1113 if (build_pollfd(u) < 0)
1114 goto fail;
1115
1116 /* FIXME: We need to reload the volume somehow */
1117
1118 reset_vars(u);
1119
1120 /* reset the watermark to the value defined when source was created */
1121 if (u->use_tsched && !recovering)
1122 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1123
1124 pa_log_info("Resumed successfully...");
1125
1126 return 0;
1127
1128 fail:
1129 if (u->pcm_handle) {
1130 snd_pcm_close(u->pcm_handle);
1131 u->pcm_handle = NULL;
1132 }
1133
1134 return -PA_ERR_IO;
1135 }
1136
1137 /* Called from the IO thread or the main thread depending on whether deferred
1138 * volume is enabled or not (with deferred volume all mixer handling is done
1139 * from the IO thread).
1140 *
1141 * Sets the mixer settings to match the current source and port state (the port
1142 * is given as an argument, because active_port may still point to the old
1143 * port, if we're switching ports). */
sync_mixer(struct userdata * u,pa_device_port * port)1144 static void sync_mixer(struct userdata *u, pa_device_port *port) {
1145 pa_alsa_setting *setting = NULL;
1146
1147 pa_assert(u);
1148
1149 if (!u->mixer_path)
1150 return;
1151
1152 /* port may be NULL, because if we use a synthesized mixer path, then the
1153 * source has no ports. */
1154 if (port && !u->ucm_context) {
1155 pa_alsa_port_data *data;
1156
1157 data = PA_DEVICE_PORT_DATA(port);
1158 setting = data->setting;
1159 }
1160
1161 pa_alsa_path_select(u->mixer_path, setting, u->mixer_handle, u->source->muted);
1162
1163 if (u->source->set_mute)
1164 u->source->set_mute(u->source);
1165 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1166 if (u->source->write_volume)
1167 u->source->write_volume(u->source);
1168 } else {
1169 if (u->source->set_volume)
1170 u->source->set_volume(u->source);
1171 }
1172 }
1173
1174 /* Called from IO context */
source_process_msg(pa_msgobject * o,int code,void * data,int64_t offset,pa_memchunk * chunk)1175 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1176 struct userdata *u = PA_SOURCE(o)->userdata;
1177
1178 switch (code) {
1179
1180 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1181 int64_t r = 0;
1182
1183 if (u->pcm_handle)
1184 r = source_get_latency(u);
1185
1186 *((int64_t*) data) = r;
1187
1188 return 0;
1189 }
1190
1191 case SOURCE_MESSAGE_SYNC_MIXER: {
1192 pa_device_port *port = data;
1193
1194 sync_mixer(u, port);
1195 return 0;
1196 }
1197 }
1198
1199 return pa_source_process_msg(o, code, data, offset, chunk);
1200 }
1201
1202 /* Called from main context */
source_set_state_in_main_thread_cb(pa_source * s,pa_source_state_t new_state,pa_suspend_cause_t new_suspend_cause)1203 static int source_set_state_in_main_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1204 pa_source_state_t old_state;
1205 struct userdata *u;
1206
1207 pa_source_assert_ref(s);
1208 pa_assert_se(u = s->userdata);
1209
1210 /* When our session becomes active, we need to sync the mixer, because
1211 * another user may have changed the mixer settings.
1212 *
1213 * If deferred volume is enabled, the syncing is done in the
1214 * set_state_in_io_thread() callback instead. */
1215 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME)
1216 && (s->suspend_cause & PA_SUSPEND_SESSION)
1217 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1218 sync_mixer(u, s->active_port);
1219
1220 old_state = u->source->state;
1221
1222 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1223 reserve_done(u);
1224 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1225 if (reserve_init(u, u->device_name) < 0)
1226 return -PA_ERR_BUSY;
1227
1228 return 0;
1229 }
1230
1231 /* Called from the IO thread. */
source_set_state_in_io_thread_cb(pa_source * s,pa_source_state_t new_state,pa_suspend_cause_t new_suspend_cause)1232 static int source_set_state_in_io_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1233 struct userdata *u;
1234
1235 pa_assert(s);
1236 pa_assert_se(u = s->userdata);
1237
1238 /* When our session becomes active, we need to sync the mixer, because
1239 * another user may have changed the mixer settings.
1240 *
1241 * If deferred volume is disabled, the syncing is done in the
1242 * set_state_in_main_thread() callback instead. */
1243 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME)
1244 && (s->suspend_cause & PA_SUSPEND_SESSION)
1245 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1246 sync_mixer(u, s->active_port);
1247
1248 /* It may be that only the suspend cause is changing, in which case there's
1249 * nothing more to do. */
1250 if (new_state == s->thread_info.state)
1251 return 0;
1252
1253 switch (new_state) {
1254
1255 case PA_SOURCE_SUSPENDED: {
1256 pa_assert(PA_SOURCE_IS_OPENED(s->thread_info.state));
1257
1258 suspend(u);
1259
1260 break;
1261 }
1262
1263 case PA_SOURCE_IDLE:
1264 case PA_SOURCE_RUNNING: {
1265 int r;
1266
1267 if (s->thread_info.state == PA_SOURCE_INIT) {
1268 if (build_pollfd(u) < 0)
1269 /* FIXME: This will cause an assertion failure, because
1270 * with the current design pa_source_put() is not allowed
1271 * to fail and pa_source_put() has no fallback code that
1272 * would start the source suspended if opening the device
1273 * fails. */
1274 return -PA_ERR_IO;
1275 }
1276
1277 if (s->thread_info.state == PA_SOURCE_SUSPENDED) {
1278 if ((r = unsuspend(u, false)) < 0)
1279 return r;
1280 }
1281
1282 break;
1283 }
1284
1285 case PA_SOURCE_UNLINKED:
1286 case PA_SOURCE_INIT:
1287 case PA_SOURCE_INVALID_STATE:
1288 ;
1289 }
1290
1291 return 0;
1292 }
1293
ctl_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1294 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1295 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1296
1297 pa_assert(u);
1298 pa_assert(u->mixer_handle);
1299
1300 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1301 return 0;
1302
1303 if (!PA_SOURCE_IS_LINKED(u->source->state))
1304 return 0;
1305
1306 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1307 return 0;
1308
1309 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1310 pa_source_get_volume(u->source, true);
1311 pa_source_get_mute(u->source, true);
1312 }
1313
1314 return 0;
1315 }
1316
io_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1317 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1318 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1319
1320 pa_assert(u);
1321 pa_assert(u->mixer_handle);
1322
1323 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1324 return 0;
1325
1326 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1327 return 0;
1328
1329 if (mask & SND_CTL_EVENT_MASK_VALUE)
1330 pa_source_update_volume_and_mute(u->source);
1331
1332 return 0;
1333 }
1334
source_get_volume_cb(pa_source * s)1335 static void source_get_volume_cb(pa_source *s) {
1336 struct userdata *u = s->userdata;
1337 pa_cvolume r;
1338 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1339
1340 pa_assert(u);
1341 pa_assert(u->mixer_path);
1342 pa_assert(u->mixer_handle);
1343
1344 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1345 return;
1346
1347 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1348 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1349
1350 pa_log_debug("Read hardware volume: %s",
1351 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1352
1353 if (pa_cvolume_equal(&u->hardware_volume, &r))
1354 return;
1355
1356 s->real_volume = u->hardware_volume = r;
1357
1358 /* Hmm, so the hardware volume changed, let's reset our software volume */
1359 if (u->mixer_path->has_dB)
1360 pa_source_set_soft_volume(s, NULL);
1361 }
1362
source_set_volume_cb(pa_source * s)1363 static void source_set_volume_cb(pa_source *s) {
1364 struct userdata *u = s->userdata;
1365 pa_cvolume r;
1366 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1367 bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1368
1369 pa_assert(u);
1370 pa_assert(u->mixer_path);
1371 pa_assert(u->mixer_handle);
1372
1373 /* Shift up by the base volume */
1374 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1375
1376 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1377 return;
1378
1379 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1380 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1381
1382 u->hardware_volume = r;
1383
1384 if (u->mixer_path->has_dB) {
1385 pa_cvolume new_soft_volume;
1386 bool accurate_enough;
1387
1388 /* Match exactly what the user requested by software */
1389 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1390
1391 /* If the adjustment to do in software is only minimal we
1392 * can skip it. That saves us CPU at the expense of a bit of
1393 * accuracy */
1394 accurate_enough =
1395 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1396 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1397
1398 pa_log_debug("Requested volume: %s",
1399 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1400 pa_log_debug("Got hardware volume: %s",
1401 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1402 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1403 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1404 pa_yes_no(accurate_enough));
1405
1406 if (!accurate_enough)
1407 s->soft_volume = new_soft_volume;
1408
1409 } else {
1410 pa_log_debug("Wrote hardware volume: %s",
1411 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1412
1413 /* We can't match exactly what the user requested, hence let's
1414 * at least tell the user about it */
1415
1416 s->real_volume = r;
1417 }
1418 }
1419
source_write_volume_cb(pa_source * s)1420 static void source_write_volume_cb(pa_source *s) {
1421 struct userdata *u = s->userdata;
1422 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1423
1424 pa_assert(u);
1425 pa_assert(u->mixer_path);
1426 pa_assert(u->mixer_handle);
1427 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1428
1429 /* Shift up by the base volume */
1430 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1431
1432 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1433 pa_log_error("Writing HW volume failed");
1434 else {
1435 pa_cvolume tmp_vol;
1436 bool accurate_enough;
1437
1438 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1439 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1440
1441 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1442 accurate_enough =
1443 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1444 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1445
1446 if (!accurate_enough) {
1447 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1448
1449 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1450 pa_cvolume_snprint_verbose(volume_buf[0],
1451 sizeof(volume_buf[0]),
1452 &s->thread_info.current_hw_volume,
1453 &s->channel_map,
1454 true),
1455 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1456 }
1457 }
1458 }
1459
source_get_mute_cb(pa_source * s,bool * mute)1460 static int source_get_mute_cb(pa_source *s, bool *mute) {
1461 struct userdata *u = s->userdata;
1462
1463 pa_assert(u);
1464 pa_assert(u->mixer_path);
1465 pa_assert(u->mixer_handle);
1466
1467 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1468 return -1;
1469
1470 return 0;
1471 }
1472
source_set_mute_cb(pa_source * s)1473 static void source_set_mute_cb(pa_source *s) {
1474 struct userdata *u = s->userdata;
1475
1476 pa_assert(u);
1477 pa_assert(u->mixer_path);
1478 pa_assert(u->mixer_handle);
1479
1480 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1481 }
1482
mixer_volume_init(struct userdata * u)1483 static void mixer_volume_init(struct userdata *u) {
1484 pa_assert(u);
1485
1486 if (!u->mixer_path || !u->mixer_path->has_volume) {
1487 pa_source_set_write_volume_callback(u->source, NULL);
1488 pa_source_set_get_volume_callback(u->source, NULL);
1489 pa_source_set_set_volume_callback(u->source, NULL);
1490
1491 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1492 } else {
1493 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1494 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1495
1496 if (u->mixer_path->has_dB && u->deferred_volume) {
1497 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1498 pa_log_info("Successfully enabled deferred volume.");
1499 } else
1500 pa_source_set_write_volume_callback(u->source, NULL);
1501
1502 if (u->mixer_path->has_dB) {
1503 pa_source_enable_decibel_volume(u->source, true);
1504 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1505
1506 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1507 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1508
1509 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1510 } else {
1511 pa_source_enable_decibel_volume(u->source, false);
1512 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1513
1514 u->source->base_volume = PA_VOLUME_NORM;
1515 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1516 }
1517
1518 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1519 }
1520
1521 if (!u->mixer_path || !u->mixer_path->has_mute) {
1522 pa_source_set_get_mute_callback(u->source, NULL);
1523 pa_source_set_set_mute_callback(u->source, NULL);
1524 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1525 } else {
1526 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1527 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1528 pa_log_info("Using hardware mute control.");
1529 }
1530 }
1531
source_set_port_ucm_cb(pa_source * s,pa_device_port * p)1532 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1533 struct userdata *u = s->userdata;
1534 pa_alsa_ucm_port_data *data;
1535
1536 pa_assert(u);
1537 pa_assert(p);
1538 pa_assert(u->ucm_context);
1539
1540 data = PA_DEVICE_PORT_DATA(p);
1541 u->mixer_path = data->path;
1542 mixer_volume_init(u);
1543
1544 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1545 pa_asyncmsgq_send(u->source->asyncmsgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_SYNC_MIXER, p, 0, NULL);
1546 else
1547 sync_mixer(u, p);
1548
1549 return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1550 }
1551
source_set_port_cb(pa_source * s,pa_device_port * p)1552 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1553 struct userdata *u = s->userdata;
1554 pa_alsa_port_data *data;
1555
1556 pa_assert(u);
1557 pa_assert(p);
1558 pa_assert(u->mixer_handle);
1559 pa_assert(!u->ucm_context);
1560
1561 data = PA_DEVICE_PORT_DATA(p);
1562 pa_assert_se(u->mixer_path = data->path);
1563 mixer_volume_init(u);
1564
1565 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1566 pa_asyncmsgq_send(u->source->asyncmsgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_SYNC_MIXER, p, 0, NULL);
1567 else
1568 sync_mixer(u, p);
1569
1570 return 0;
1571 }
1572
source_update_requested_latency_cb(pa_source * s)1573 static void source_update_requested_latency_cb(pa_source *s) {
1574 struct userdata *u = s->userdata;
1575 pa_assert(u);
1576 pa_assert(u->use_tsched); /* only when timer scheduling is used
1577 * we can dynamically adjust the
1578 * latency */
1579
1580 if (!u->pcm_handle)
1581 return;
1582
1583 update_sw_params(u);
1584 }
1585
source_reconfigure_cb(pa_source * s,pa_sample_spec * spec,bool passthrough)1586 static void source_reconfigure_cb(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1587 struct userdata *u = s->userdata;
1588 int i;
1589 bool format_supported = false;
1590 bool rate_supported = false;
1591
1592 pa_assert(u);
1593
1594 for (i = 0; u->supported_formats[i] != PA_SAMPLE_MAX; i++) {
1595 if (u->supported_formats[i] == spec->format) {
1596 pa_source_set_sample_format(u->source, spec->format);
1597 format_supported = true;
1598 break;
1599 }
1600 }
1601
1602 if (!format_supported) {
1603 pa_log_info("Source does not support sample format of %s, set it to a verified value",
1604 pa_sample_format_to_string(spec->format));
1605 pa_source_set_sample_format(u->source, u->verified_sample_spec.format);
1606 }
1607
1608 for (i = 0; u->supported_rates[i]; i++) {
1609 if (u->supported_rates[i] == spec->rate) {
1610 pa_source_set_sample_rate(u->source, spec->rate);
1611 rate_supported = true;
1612 break;
1613 }
1614 }
1615
1616 if (!rate_supported) {
1617 pa_log_info("Source does not support sample rate of %u, set it to a verfied value", spec->rate);
1618 pa_source_set_sample_rate(u->source, u->verified_sample_spec.rate);
1619 }
1620 }
1621
thread_func(void * userdata)1622 static void thread_func(void *userdata) {
1623 struct userdata *u = userdata;
1624 unsigned short revents = 0;
1625
1626 pa_assert(u);
1627
1628 pa_log_debug("Thread starting up");
1629
1630 if (u->core->realtime_scheduling)
1631 pa_thread_make_realtime(u->core->realtime_priority);
1632
1633 pa_thread_mq_install(&u->thread_mq);
1634
1635 for (;;) {
1636 int ret;
1637 pa_usec_t rtpoll_sleep = 0, real_sleep;
1638
1639 #ifdef DEBUG_TIMING
1640 pa_log_debug("Loop");
1641 #endif
1642
1643 /* Read some data and pass it to the sources */
1644 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1645 int work_done;
1646 pa_usec_t sleep_usec = 0;
1647 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1648
1649 if (u->first) {
1650 pa_log_info("Starting capture.");
1651 snd_pcm_start(u->pcm_handle);
1652
1653 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1654
1655 u->first = false;
1656 }
1657
1658 if (u->use_mmap)
1659 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1660 else
1661 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1662
1663 if (work_done < 0)
1664 goto fail;
1665
1666 /* pa_log_debug("work_done = %i", work_done); */
1667
1668 if (work_done)
1669 update_smoother(u);
1670
1671 if (u->use_tsched) {
1672 pa_usec_t cusec;
1673
1674 /* OK, the capture buffer is now empty, let's
1675 * calculate when to wake up next */
1676
1677 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1678
1679 /* Convert from the sound card time domain to the
1680 * system time domain */
1681 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1682
1683 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1684
1685 /* We don't trust the conversion, so we wake up whatever comes first */
1686 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1687 }
1688 }
1689
1690 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1691 pa_usec_t volume_sleep;
1692 pa_source_volume_change_apply(u->source, &volume_sleep);
1693 if (volume_sleep > 0) {
1694 if (rtpoll_sleep > 0)
1695 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1696 else
1697 rtpoll_sleep = volume_sleep;
1698 }
1699 }
1700
1701 if (rtpoll_sleep > 0) {
1702 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1703 real_sleep = pa_rtclock_now();
1704 }
1705 else
1706 pa_rtpoll_set_timer_disabled(u->rtpoll);
1707
1708 /* Hmm, nothing to do. Let's sleep */
1709 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1710 goto fail;
1711
1712 if (rtpoll_sleep > 0) {
1713 real_sleep = pa_rtclock_now() - real_sleep;
1714 #ifdef DEBUG_TIMING
1715 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1716 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1717 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1718 #endif
1719 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1720 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1721 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1722 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1723 }
1724
1725 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1726 pa_source_volume_change_apply(u->source, NULL);
1727
1728 if (ret == 0)
1729 goto finish;
1730
1731 /* Tell ALSA about this and process its response */
1732 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1733 struct pollfd *pollfd;
1734 int err;
1735 unsigned n;
1736
1737 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1738
1739 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1740 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1741 goto fail;
1742 }
1743
1744 if (revents & ~POLLIN) {
1745 if ((err = pa_alsa_recover_from_poll(u->pcm_handle, revents)) < 0)
1746 goto fail;
1747
1748 /* Stream needs to be restarted */
1749 if (err == 1) {
1750 close_pcm(u);
1751 if (unsuspend(u, true) < 0)
1752 goto fail;
1753 } else
1754 reset_vars(u);
1755
1756 revents = 0;
1757 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1758 pa_log_debug("Wakeup from ALSA!");
1759
1760 } else
1761 revents = 0;
1762 }
1763
1764 fail:
1765 /* If this was no regular exit from the loop we have to continue
1766 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1767 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1768 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1769
1770 finish:
1771 pa_log_debug("Thread shutting down");
1772 }
1773
set_source_name(pa_source_new_data * data,pa_modargs * ma,const char * device_id,const char * device_name,pa_alsa_mapping * mapping)1774 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1775 const char *n;
1776 char *t;
1777
1778 pa_assert(data);
1779 pa_assert(ma);
1780 pa_assert(device_name);
1781
1782 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1783 pa_source_new_data_set_name(data, n);
1784 data->namereg_fail = true;
1785 return;
1786 }
1787
1788 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1789 data->namereg_fail = true;
1790 else {
1791 n = device_id ? device_id : device_name;
1792 data->namereg_fail = false;
1793 }
1794
1795 if (mapping)
1796 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1797 else
1798 t = pa_sprintf_malloc("alsa_input.%s", n);
1799
1800 pa_source_new_data_set_name(data, t);
1801 pa_xfree(t);
1802 }
1803
find_mixer(struct userdata * u,pa_alsa_mapping * mapping,const char * element,bool ignore_dB)1804 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1805 const char *mdev;
1806
1807 if (!mapping && !element)
1808 return;
1809
1810 if (!element && mapping && pa_alsa_path_set_is_empty(mapping->input_path_set))
1811 return;
1812
1813 u->mixers = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func,
1814 NULL, (pa_free_cb_t) pa_alsa_mixer_free);
1815
1816 mdev = pa_proplist_gets(mapping->proplist, "alsa.mixer_device");
1817 if (mdev) {
1818 u->mixer_handle = pa_alsa_open_mixer_by_name(u->mixers, mdev, false);
1819 } else {
1820 u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->mixers, u->pcm_handle, false);
1821 }
1822 if (!u->mixer_handle) {
1823 pa_log_info("Failed to find a working mixer device.");
1824 return;
1825 }
1826
1827 if (element) {
1828
1829 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1830 goto fail;
1831
1832 if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
1833 goto fail;
1834
1835 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1836 pa_alsa_path_dump(u->mixer_path);
1837 } else {
1838 u->mixer_path_set = mapping->input_path_set;
1839 }
1840
1841 return;
1842
1843 fail:
1844
1845 if (u->mixer_path) {
1846 pa_alsa_path_free(u->mixer_path);
1847 u->mixer_path = NULL;
1848 }
1849
1850 u->mixer_handle = NULL;
1851 pa_hashmap_free(u->mixers);
1852 u->mixers = NULL;
1853 }
1854
setup_mixer(struct userdata * u,bool ignore_dB)1855 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1856 bool need_mixer_callback = false;
1857
1858 pa_assert(u);
1859
1860 /* This code is before the u->mixer_handle check, because if the UCM
1861 * configuration doesn't specify volume or mute controls, u->mixer_handle
1862 * will be NULL, but the UCM device enable sequence will still need to be
1863 * executed. */
1864 if (u->source->active_port && u->ucm_context) {
1865 if (pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
1866 return -1;
1867 }
1868
1869 if (!u->mixer_handle)
1870 return 0;
1871
1872 if (u->source->active_port) {
1873 if (!u->ucm_context) {
1874 pa_alsa_port_data *data;
1875
1876 /* We have a list of supported paths, so let's activate the
1877 * one that has been chosen as active */
1878
1879 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1880 u->mixer_path = data->path;
1881
1882 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1883 } else {
1884 pa_alsa_ucm_port_data *data;
1885
1886 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1887
1888 /* Now activate volume controls, if any */
1889 if (data->path) {
1890 u->mixer_path = data->path;
1891 pa_alsa_path_select(u->mixer_path, NULL, u->mixer_handle, u->source->muted);
1892 }
1893 }
1894 } else {
1895
1896 if (!u->mixer_path && u->mixer_path_set)
1897 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1898
1899 if (u->mixer_path) {
1900 /* Hmm, we have only a single path, then let's activate it */
1901
1902 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1903 } else
1904 return 0;
1905 }
1906
1907 mixer_volume_init(u);
1908
1909 /* Will we need to register callbacks? */
1910 if (u->mixer_path_set && u->mixer_path_set->paths) {
1911 pa_alsa_path *p;
1912 void *state;
1913
1914 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1915 if (p->has_volume || p->has_mute)
1916 need_mixer_callback = true;
1917 }
1918 }
1919 else if (u->mixer_path)
1920 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1921
1922 if (need_mixer_callback) {
1923 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1924 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1925 u->mixer_pd = pa_alsa_mixer_pdata_new();
1926 mixer_callback = io_mixer_callback;
1927
1928 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1929 pa_log("Failed to initialize file descriptor monitoring");
1930 return -1;
1931 }
1932 } else {
1933 u->mixer_fdl = pa_alsa_fdlist_new();
1934 mixer_callback = ctl_mixer_callback;
1935
1936 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
1937 pa_log("Failed to initialize file descriptor monitoring");
1938 return -1;
1939 }
1940 }
1941
1942 if (u->mixer_path_set)
1943 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1944 else
1945 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1946 }
1947
1948 return 0;
1949 }
1950
pa_alsa_source_new(pa_module * m,pa_modargs * ma,const char * driver,pa_card * card,pa_alsa_mapping * mapping)1951 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1952
1953 struct userdata *u = NULL;
1954 const char *dev_id = NULL, *key, *mod_name;
1955 pa_sample_spec ss;
1956 char *thread_name = NULL;
1957 uint32_t alternate_sample_rate;
1958 pa_channel_map map;
1959 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1960 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1961 size_t frame_size;
1962 bool use_mmap = true;
1963 bool use_tsched = true;
1964 bool ignore_dB = false;
1965 bool namereg_fail = false;
1966 bool deferred_volume = false;
1967 bool fixed_latency_range = false;
1968 bool b;
1969 bool d;
1970 bool avoid_resampling;
1971 pa_source_new_data data;
1972 bool volume_is_set;
1973 bool mute_is_set;
1974 pa_alsa_profile_set *profile_set = NULL;
1975 void *state;
1976
1977 pa_assert(m);
1978 pa_assert(ma);
1979
1980 ss = m->core->default_sample_spec;
1981 map = m->core->default_channel_map;
1982 avoid_resampling = m->core->avoid_resampling;
1983
1984 /* Pick sample spec overrides from the mapping, if any */
1985 if (mapping) {
1986 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
1987 ss.format = mapping->sample_spec.format;
1988 if (mapping->sample_spec.rate != 0)
1989 ss.rate = mapping->sample_spec.rate;
1990 if (mapping->sample_spec.channels != 0) {
1991 ss.channels = mapping->sample_spec.channels;
1992 if (pa_channel_map_valid(&mapping->channel_map))
1993 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
1994 }
1995 }
1996
1997 /* Override with modargs if provided */
1998 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1999 pa_log("Failed to parse sample specification and channel map");
2000 goto fail;
2001 }
2002
2003 alternate_sample_rate = m->core->alternate_sample_rate;
2004 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2005 pa_log("Failed to parse alternate sample rate");
2006 goto fail;
2007 }
2008
2009 frame_size = pa_frame_size(&ss);
2010
2011 nfrags = m->core->default_n_fragments;
2012 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2013 if (frag_size <= 0)
2014 frag_size = (uint32_t) frame_size;
2015 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2016 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2017
2018 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2019 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2020 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2021 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2022 pa_log("Failed to parse buffer metrics");
2023 goto fail;
2024 }
2025
2026 buffer_size = nfrags * frag_size;
2027
2028 period_frames = frag_size/frame_size;
2029 buffer_frames = buffer_size/frame_size;
2030 tsched_frames = tsched_size/frame_size;
2031
2032 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2033 pa_log("Failed to parse mmap argument.");
2034 goto fail;
2035 }
2036
2037 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2038 pa_log("Failed to parse tsched argument.");
2039 goto fail;
2040 }
2041
2042 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2043 pa_log("Failed to parse ignore_dB argument.");
2044 goto fail;
2045 }
2046
2047 deferred_volume = m->core->deferred_volume;
2048 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2049 pa_log("Failed to parse deferred_volume argument.");
2050 goto fail;
2051 }
2052
2053 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2054 pa_log("Failed to parse fixed_latency_range argument.");
2055 goto fail;
2056 }
2057
2058 use_tsched = pa_alsa_may_tsched(use_tsched);
2059
2060 u = pa_xnew0(struct userdata, 1);
2061 u->core = m->core;
2062 u->module = m;
2063 u->use_mmap = use_mmap;
2064 u->use_tsched = use_tsched;
2065 u->tsched_size = tsched_size;
2066 u->initial_info.nfrags = (size_t) nfrags;
2067 u->initial_info.fragment_size = (size_t) frag_size;
2068 u->initial_info.tsched_size = (size_t) tsched_size;
2069 u->initial_info.tsched_watermark = (size_t) tsched_watermark;
2070 u->deferred_volume = deferred_volume;
2071 u->fixed_latency_range = fixed_latency_range;
2072 u->first = true;
2073 u->rtpoll = pa_rtpoll_new();
2074
2075 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
2076 pa_log("pa_thread_mq_init() failed.");
2077 goto fail;
2078 }
2079
2080 u->smoother = pa_smoother_new(
2081 SMOOTHER_ADJUST_USEC,
2082 SMOOTHER_WINDOW_USEC,
2083 true,
2084 true,
2085 5,
2086 pa_rtclock_now(),
2087 true);
2088 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2089
2090 /* use ucm */
2091 if (mapping && mapping->ucm_context.ucm)
2092 u->ucm_context = &mapping->ucm_context;
2093
2094 dev_id = pa_modargs_get_value(
2095 ma, "device_id",
2096 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2097
2098 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2099
2100 if (reserve_init(u, dev_id) < 0)
2101 goto fail;
2102
2103 if (reserve_monitor_init(u, dev_id) < 0)
2104 goto fail;
2105
2106 b = use_mmap;
2107 d = use_tsched;
2108
2109 /* Force ALSA to reread its configuration if module-alsa-card didn't
2110 * do it for us. This matters if our device was hot-plugged after ALSA
2111 * has already read its configuration - see
2112 * https://bugs.freedesktop.org/show_bug.cgi?id=54029
2113 */
2114
2115 if (!card)
2116 snd_config_update_free_global();
2117
2118 if (mapping) {
2119
2120 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2121 pa_log("device_id= not set");
2122 goto fail;
2123 }
2124
2125 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2126 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2127 pa_log("Failed to enable ucm modifier %s", mod_name);
2128 else
2129 pa_log_debug("Enabled ucm modifier %s", mod_name);
2130 }
2131
2132 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2133 dev_id,
2134 &u->device_name,
2135 &ss, &map,
2136 SND_PCM_STREAM_CAPTURE,
2137 &period_frames, &buffer_frames, tsched_frames,
2138 &b, &d, mapping)))
2139 goto fail;
2140
2141 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2142
2143 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2144 goto fail;
2145
2146 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2147 dev_id,
2148 &u->device_name,
2149 &ss, &map,
2150 SND_PCM_STREAM_CAPTURE,
2151 &period_frames, &buffer_frames, tsched_frames,
2152 &b, &d, profile_set, &mapping)))
2153 goto fail;
2154
2155 } else {
2156
2157 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2158 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2159 &u->device_name,
2160 &ss, &map,
2161 SND_PCM_STREAM_CAPTURE,
2162 &period_frames, &buffer_frames, tsched_frames,
2163 &b, &d, false)))
2164 goto fail;
2165 }
2166
2167 pa_assert(u->device_name);
2168 pa_log_info("Successfully opened device %s.", u->device_name);
2169
2170 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2171 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2172 goto fail;
2173 }
2174
2175 if (mapping)
2176 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2177
2178 if (use_mmap && !b) {
2179 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2180 u->use_mmap = use_mmap = false;
2181 }
2182
2183 if (use_tsched && (!b || !d)) {
2184 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2185 u->use_tsched = use_tsched = false;
2186 }
2187
2188 if (u->use_mmap)
2189 pa_log_info("Successfully enabled mmap() mode.");
2190
2191 if (u->use_tsched) {
2192 pa_log_info("Successfully enabled timer-based scheduling mode.");
2193 if (u->fixed_latency_range)
2194 pa_log_info("Disabling latency range changes on overrun");
2195 }
2196
2197 u->verified_sample_spec = ss;
2198
2199 u->supported_formats = pa_alsa_get_supported_formats(u->pcm_handle, ss.format);
2200 if (!u->supported_formats) {
2201 pa_log_error("Failed to find any supported sample formats.");
2202 goto fail;
2203 }
2204
2205 u->supported_rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2206 if (!u->supported_rates) {
2207 pa_log_error("Failed to find any supported sample rates.");
2208 goto fail;
2209 }
2210
2211 /* ALSA might tweak the sample spec, so recalculate the frame size */
2212 frame_size = pa_frame_size(&ss);
2213
2214 pa_source_new_data_init(&data);
2215 data.driver = driver;
2216 data.module = m;
2217 data.card = card;
2218 set_source_name(&data, ma, dev_id, u->device_name, mapping);
2219
2220 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2221 * variable instead of using &data.namereg_fail directly, because
2222 * data.namereg_fail is a bitfield and taking the address of a bitfield
2223 * variable is impossible. */
2224 namereg_fail = data.namereg_fail;
2225 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2226 pa_log("Failed to parse namereg_fail argument.");
2227 pa_source_new_data_done(&data);
2228 goto fail;
2229 }
2230 data.namereg_fail = namereg_fail;
2231
2232 if (pa_modargs_get_value_boolean(ma, "avoid_resampling", &avoid_resampling) < 0) {
2233 pa_log("Failed to parse avoid_resampling argument.");
2234 pa_source_new_data_done(&data);
2235 goto fail;
2236 }
2237 pa_source_new_data_set_avoid_resampling(&data, avoid_resampling);
2238
2239 pa_source_new_data_set_sample_spec(&data, &ss);
2240 pa_source_new_data_set_channel_map(&data, &map);
2241 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2242
2243 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2244 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2245 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2246 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2247 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2248
2249 if (mapping) {
2250 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2251 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2252
2253 state = NULL;
2254 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2255 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2256 }
2257
2258 pa_alsa_init_description(data.proplist, card);
2259
2260 if (u->control_device)
2261 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2262
2263 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2264 pa_log("Invalid properties");
2265 pa_source_new_data_done(&data);
2266 goto fail;
2267 }
2268
2269 if (u->ucm_context) {
2270 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card, u->pcm_handle, ignore_dB);
2271 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2272 } else {
2273 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2274 if (u->mixer_path_set)
2275 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2276 }
2277
2278 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2279 volume_is_set = data.volume_is_set;
2280 mute_is_set = data.muted_is_set;
2281 pa_source_new_data_done(&data);
2282
2283 if (!u->source) {
2284 pa_log("Failed to create source object");
2285 goto fail;
2286 }
2287
2288 if (u->ucm_context) {
2289 pa_device_port *port;
2290 unsigned h_prio = 0;
2291 PA_HASHMAP_FOREACH(port, u->source->ports, state) {
2292 if (!h_prio || port->priority > h_prio)
2293 h_prio = port->priority;
2294 }
2295 /* ucm ports prioriy is 100, 200, ..., 900, change it to units digit */
2296 h_prio = h_prio / 100;
2297 u->source->priority += h_prio;
2298 }
2299
2300 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2301 &u->source->thread_info.volume_change_safety_margin) < 0) {
2302 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2303 goto fail;
2304 }
2305
2306 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2307 &u->source->thread_info.volume_change_extra_delay) < 0) {
2308 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2309 goto fail;
2310 }
2311
2312 u->source->parent.process_msg = source_process_msg;
2313 if (u->use_tsched)
2314 u->source->update_requested_latency = source_update_requested_latency_cb;
2315 u->source->set_state_in_main_thread = source_set_state_in_main_thread_cb;
2316 u->source->set_state_in_io_thread = source_set_state_in_io_thread_cb;
2317 if (u->ucm_context)
2318 u->source->set_port = source_set_port_ucm_cb;
2319 else
2320 u->source->set_port = source_set_port_cb;
2321 u->source->reconfigure = source_reconfigure_cb;
2322 u->source->userdata = u;
2323
2324 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2325 pa_source_set_rtpoll(u->source, u->rtpoll);
2326
2327 u->frame_size = frame_size;
2328 u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2329 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2330 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2331 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2332
2333 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2334 (double) u->hwbuf_size / (double) u->fragment_size,
2335 (long unsigned) u->fragment_size,
2336 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2337 (long unsigned) u->hwbuf_size,
2338 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2339
2340 if (u->use_tsched) {
2341 u->tsched_watermark_ref = tsched_watermark;
2342 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2343 }
2344 else
2345 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2346
2347 reserve_update(u);
2348
2349 if (update_sw_params(u) < 0)
2350 goto fail;
2351
2352 if (setup_mixer(u, ignore_dB) < 0)
2353 goto fail;
2354
2355 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2356
2357 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2358 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2359 pa_log("Failed to create thread.");
2360 goto fail;
2361 }
2362 pa_xfree(thread_name);
2363 thread_name = NULL;
2364
2365 /* Get initial mixer settings */
2366 if (volume_is_set) {
2367 if (u->source->set_volume)
2368 u->source->set_volume(u->source);
2369 } else {
2370 if (u->source->get_volume)
2371 u->source->get_volume(u->source);
2372 }
2373
2374 if (mute_is_set) {
2375 if (u->source->set_mute)
2376 u->source->set_mute(u->source);
2377 } else {
2378 if (u->source->get_mute) {
2379 bool mute;
2380
2381 if (u->source->get_mute(u->source, &mute) >= 0)
2382 pa_source_set_mute(u->source, mute, false);
2383 }
2384 }
2385
2386 if ((volume_is_set || mute_is_set) && u->source->write_volume)
2387 u->source->write_volume(u->source);
2388
2389 pa_source_put(u->source);
2390
2391 if (profile_set)
2392 pa_alsa_profile_set_free(profile_set);
2393
2394 return u->source;
2395
2396 fail:
2397 pa_xfree(thread_name);
2398
2399 if (u)
2400 userdata_free(u);
2401
2402 if (profile_set)
2403 pa_alsa_profile_set_free(profile_set);
2404
2405 return NULL;
2406 }
2407
userdata_free(struct userdata * u)2408 static void userdata_free(struct userdata *u) {
2409 pa_assert(u);
2410
2411 if (u->source)
2412 pa_source_unlink(u->source);
2413
2414 if (u->thread) {
2415 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2416 pa_thread_free(u->thread);
2417 }
2418
2419 pa_thread_mq_done(&u->thread_mq);
2420
2421 if (u->source)
2422 pa_source_unref(u->source);
2423
2424 if (u->mixer_pd)
2425 pa_alsa_mixer_pdata_free(u->mixer_pd);
2426
2427 if (u->alsa_rtpoll_item)
2428 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2429
2430 if (u->rtpoll)
2431 pa_rtpoll_free(u->rtpoll);
2432
2433 if (u->pcm_handle) {
2434 snd_pcm_drop(u->pcm_handle);
2435 snd_pcm_close(u->pcm_handle);
2436 }
2437
2438 if (u->mixer_fdl)
2439 pa_alsa_fdlist_free(u->mixer_fdl);
2440
2441 /* Only free the mixer_path if the sink owns it */
2442 if (u->mixer_path && !u->mixer_path_set && !u->ucm_context)
2443 pa_alsa_path_free(u->mixer_path);
2444
2445 if (u->mixers)
2446 pa_hashmap_free(u->mixers);
2447
2448 if (u->smoother)
2449 pa_smoother_free(u->smoother);
2450
2451 if (u->supported_formats)
2452 pa_xfree(u->supported_formats);
2453
2454 if (u->supported_rates)
2455 pa_xfree(u->supported_rates);
2456
2457 reserve_done(u);
2458 monitor_done(u);
2459
2460 pa_xfree(u->device_name);
2461 pa_xfree(u->control_device);
2462 pa_xfree(u->paths_dir);
2463 pa_xfree(u);
2464 }
2465
pa_alsa_source_free(pa_source * s)2466 void pa_alsa_source_free(pa_source *s) {
2467 struct userdata *u;
2468
2469 pa_source_assert_ref(s);
2470 pa_assert_se(u = s->userdata);
2471
2472 userdata_free(u);
2473 }
2474