1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <signal.h>
26 #include <stdio.h>
27
28 #include <alsa/asoundlib.h>
29
30 #include <pulse/rtclock.h>
31 #include <pulse/timeval.h>
32 #include <pulse/util.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50
51 #ifdef USE_SMOOTHER_2
52 #include <pulsecore/time-smoother_2.h>
53 #else
54 #include <pulsecore/time-smoother.h>
55 #endif
56
57 #include <modules/reserve-wrap.h>
58
59 #include "alsa-util.h"
60 #include "alsa-source.h"
61
62 /* #define DEBUG_TIMING */
63
64 #define DEFAULT_DEVICE "default"
65
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */
68
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC) /* 0ms */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
74 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
75
76 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */
77 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */
78
79 #ifdef USE_SMOOTHER_2
80 #define SMOOTHER_WINDOW_USEC (15*PA_USEC_PER_SEC) /* 15s */
81 #else
82 #define SMOOTHER_WINDOW_USEC (10*PA_USEC_PER_SEC) /* 10s */
83 #define SMOOTHER_ADJUST_USEC (1*PA_USEC_PER_SEC) /* 1s */
84
85 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */
86 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */
87 #endif
88
89 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
90
91 struct userdata {
92 pa_core *core;
93 pa_module *module;
94 pa_source *source;
95
96 pa_thread *thread;
97 pa_thread_mq thread_mq;
98 pa_rtpoll *rtpoll;
99
100 snd_pcm_t *pcm_handle;
101
102 char *paths_dir;
103 pa_alsa_fdlist *mixer_fdl;
104 pa_alsa_mixer_pdata *mixer_pd;
105 pa_hashmap *mixers;
106 snd_mixer_t *mixer_handle;
107 pa_alsa_path_set *mixer_path_set;
108 pa_alsa_path *mixer_path;
109
110 pa_cvolume hardware_volume;
111
112 pa_sample_spec verified_sample_spec;
113 pa_sample_format_t *supported_formats;
114 unsigned int *supported_rates;
115 struct {
116 size_t fragment_size;
117 size_t nfrags;
118 size_t tsched_size;
119 size_t tsched_watermark;
120 } initial_info;
121
122 size_t
123 frame_size,
124 fragment_size,
125 hwbuf_size,
126 tsched_size,
127 tsched_watermark,
128 tsched_watermark_ref,
129 hwbuf_unused,
130 min_sleep,
131 min_wakeup,
132 watermark_inc_step,
133 watermark_dec_step,
134 watermark_inc_threshold,
135 watermark_dec_threshold;
136
137 snd_pcm_uframes_t frames_per_block;
138
139 pa_usec_t watermark_dec_not_before;
140 pa_usec_t min_latency_ref;
141 pa_usec_t tsched_watermark_usec;
142
143 char *device_name; /* name of the PCM device */
144 char *control_device; /* name of the control device */
145
146 bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
147
148 bool first;
149
150 pa_rtpoll_item *alsa_rtpoll_item;
151
152 #ifdef USE_SMOOTHER_2
153 pa_smoother_2 *smoother;
154 #else
155 pa_smoother *smoother;
156 #endif
157 uint64_t read_count;
158
159 #ifndef USE_SMOOTHER_2
160 pa_usec_t smoother_interval;
161 pa_usec_t last_smoother_update;
162 #endif
163
164 pa_reserve_wrapper *reserve;
165 pa_hook_slot *reserve_slot;
166 pa_reserve_monitor_wrapper *monitor;
167 pa_hook_slot *monitor_slot;
168
169 /* ucm context */
170 pa_alsa_ucm_mapping_context *ucm_context;
171 };
172
173 enum {
174 SOURCE_MESSAGE_SYNC_MIXER = PA_SOURCE_MESSAGE_MAX
175 };
176
177 static void userdata_free(struct userdata *u);
178 static int unsuspend(struct userdata *u, bool recovering);
179
reserve_cb(pa_reserve_wrapper * r,void * forced,struct userdata * u)180 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
181 pa_assert(r);
182 pa_assert(u);
183
184 pa_log_debug("Suspending source %s, because another application requested us to release the device.", u->source->name);
185
186 if (pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION) < 0)
187 return PA_HOOK_CANCEL;
188
189 return PA_HOOK_OK;
190 }
191
reserve_done(struct userdata * u)192 static void reserve_done(struct userdata *u) {
193 pa_assert(u);
194
195 if (u->reserve_slot) {
196 pa_hook_slot_free(u->reserve_slot);
197 u->reserve_slot = NULL;
198 }
199
200 if (u->reserve) {
201 pa_reserve_wrapper_unref(u->reserve);
202 u->reserve = NULL;
203 }
204 }
205
reserve_update(struct userdata * u)206 static void reserve_update(struct userdata *u) {
207 const char *description;
208 pa_assert(u);
209
210 if (!u->source || !u->reserve)
211 return;
212
213 if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
214 pa_reserve_wrapper_set_application_device_name(u->reserve, description);
215 }
216
reserve_init(struct userdata * u,const char * dname)217 static int reserve_init(struct userdata *u, const char *dname) {
218 char *rname;
219
220 pa_assert(u);
221 pa_assert(dname);
222
223 if (u->reserve)
224 return 0;
225
226 if (pa_in_system_mode())
227 return 0;
228
229 if (!(rname = pa_alsa_get_reserve_name(dname)))
230 return 0;
231
232 /* We are resuming, try to lock the device */
233 u->reserve = pa_reserve_wrapper_get(u->core, rname);
234 pa_xfree(rname);
235
236 if (!(u->reserve))
237 return -1;
238
239 reserve_update(u);
240
241 pa_assert(!u->reserve_slot);
242 u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
243
244 return 0;
245 }
246
monitor_cb(pa_reserve_monitor_wrapper * w,void * busy,struct userdata * u)247 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
248 pa_assert(w);
249 pa_assert(u);
250
251 if (PA_PTR_TO_UINT(busy) && !u->reserve) {
252 pa_log_debug("Suspending source %s, because another application is blocking the access to the device.", u->source->name);
253 pa_source_suspend(u->source, true, PA_SUSPEND_APPLICATION);
254 } else {
255 pa_log_debug("Resuming source %s, because other applications aren't blocking access to the device any more.", u->source->name);
256 pa_source_suspend(u->source, false, PA_SUSPEND_APPLICATION);
257 }
258
259 return PA_HOOK_OK;
260 }
261
monitor_done(struct userdata * u)262 static void monitor_done(struct userdata *u) {
263 pa_assert(u);
264
265 if (u->monitor_slot) {
266 pa_hook_slot_free(u->monitor_slot);
267 u->monitor_slot = NULL;
268 }
269
270 if (u->monitor) {
271 pa_reserve_monitor_wrapper_unref(u->monitor);
272 u->monitor = NULL;
273 }
274 }
275
reserve_monitor_init(struct userdata * u,const char * dname)276 static int reserve_monitor_init(struct userdata *u, const char *dname) {
277 char *rname;
278
279 pa_assert(u);
280 pa_assert(dname);
281
282 if (pa_in_system_mode())
283 return 0;
284
285 if (!(rname = pa_alsa_get_reserve_name(dname)))
286 return 0;
287
288 /* We are resuming, try to lock the device */
289 u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
290 pa_xfree(rname);
291
292 if (!(u->monitor))
293 return -1;
294
295 pa_assert(!u->monitor_slot);
296 u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
297
298 return 0;
299 }
300
fix_min_sleep_wakeup(struct userdata * u)301 static void fix_min_sleep_wakeup(struct userdata *u) {
302 size_t max_use, max_use_2;
303
304 pa_assert(u);
305 pa_assert(u->use_tsched);
306
307 max_use = u->hwbuf_size - u->hwbuf_unused;
308 max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
309
310 u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
311 u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
312
313 u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
314 u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
315 }
316
fix_tsched_watermark(struct userdata * u)317 static void fix_tsched_watermark(struct userdata *u) {
318 size_t max_use;
319 pa_assert(u);
320 pa_assert(u->use_tsched);
321
322 max_use = u->hwbuf_size - u->hwbuf_unused;
323
324 if (u->tsched_watermark > max_use - u->min_sleep)
325 u->tsched_watermark = max_use - u->min_sleep;
326
327 if (u->tsched_watermark < u->min_wakeup)
328 u->tsched_watermark = u->min_wakeup;
329
330 u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
331 }
332
increase_watermark(struct userdata * u)333 static void increase_watermark(struct userdata *u) {
334 size_t old_watermark;
335 pa_usec_t old_min_latency, new_min_latency;
336
337 pa_assert(u);
338 pa_assert(u->use_tsched);
339
340 /* First, just try to increase the watermark */
341 old_watermark = u->tsched_watermark;
342 u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
343 fix_tsched_watermark(u);
344
345 if (old_watermark != u->tsched_watermark) {
346 pa_log_info("Increasing wakeup watermark to %0.2f ms",
347 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
348 return;
349 }
350
351 /* Hmm, we cannot increase the watermark any further, hence let's
352 raise the latency unless doing so was disabled in
353 configuration */
354 if (u->fixed_latency_range)
355 return;
356
357 old_min_latency = u->source->thread_info.min_latency;
358 new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
359 new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
360
361 if (old_min_latency != new_min_latency) {
362 pa_log_info("Increasing minimal latency to %0.2f ms",
363 (double) new_min_latency / PA_USEC_PER_MSEC);
364
365 pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
366 }
367
368 /* When we reach this we're officially fucked! */
369 }
370
decrease_watermark(struct userdata * u)371 static void decrease_watermark(struct userdata *u) {
372 size_t old_watermark;
373 pa_usec_t now;
374
375 pa_assert(u);
376 pa_assert(u->use_tsched);
377
378 now = pa_rtclock_now();
379
380 if (u->watermark_dec_not_before <= 0)
381 goto restart;
382
383 if (u->watermark_dec_not_before > now)
384 return;
385
386 old_watermark = u->tsched_watermark;
387
388 if (u->tsched_watermark < u->watermark_dec_step)
389 u->tsched_watermark = u->tsched_watermark / 2;
390 else
391 u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
392
393 fix_tsched_watermark(u);
394
395 if (old_watermark != u->tsched_watermark)
396 pa_log_info("Decreasing wakeup watermark to %0.2f ms",
397 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
398
399 /* We don't change the latency range*/
400
401 restart:
402 u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
403 }
404
405 /* Called from IO Context on unsuspend or from main thread when creating source */
reset_watermark(struct userdata * u,size_t tsched_watermark,pa_sample_spec * ss,bool in_thread)406 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
407 bool in_thread) {
408 u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->source->sample_spec);
409
410 u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
411 u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
412
413 u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
414 u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
415
416 fix_min_sleep_wakeup(u);
417 fix_tsched_watermark(u);
418
419 if (in_thread)
420 pa_source_set_latency_range_within_thread(u->source,
421 u->min_latency_ref,
422 pa_bytes_to_usec(u->hwbuf_size, ss));
423 else {
424 pa_source_set_latency_range(u->source,
425 0,
426 pa_bytes_to_usec(u->hwbuf_size, ss));
427
428 /* work-around assert in pa_source_set_latency_within_thead,
429 keep track of min_latency and reuse it when
430 this routine is called from IO context */
431 u->min_latency_ref = u->source->thread_info.min_latency;
432 }
433
434 pa_log_info("Time scheduling watermark is %0.2fms",
435 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
436 }
437
hw_sleep_time(struct userdata * u,pa_usec_t * sleep_usec,pa_usec_t * process_usec)438 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
439 pa_usec_t wm, usec;
440
441 pa_assert(sleep_usec);
442 pa_assert(process_usec);
443
444 pa_assert(u);
445 pa_assert(u->use_tsched);
446
447 usec = pa_source_get_requested_latency_within_thread(u->source);
448
449 if (usec == (pa_usec_t) -1)
450 usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
451
452 wm = u->tsched_watermark_usec;
453
454 if (wm > usec)
455 wm = usec/2;
456
457 *sleep_usec = usec - wm;
458 *process_usec = wm;
459
460 #ifdef DEBUG_TIMING
461 pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
462 (unsigned long) (usec / PA_USEC_PER_MSEC),
463 (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
464 (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
465 #endif
466 }
467
468 /* Reset smoother and counters */
reset_vars(struct userdata * u)469 static void reset_vars(struct userdata *u) {
470
471 #ifdef USE_SMOOTHER_2
472 pa_smoother_2_reset(u->smoother, pa_rtclock_now());
473 #else
474 pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
475 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
476 u->last_smoother_update = 0;
477 #endif
478
479 u->read_count = 0;
480 u->first = true;
481 }
482
483 /* Called from IO context */
close_pcm(struct userdata * u)484 static void close_pcm(struct userdata *u) {
485
486 #ifdef USE_SMOOTHER_2
487 pa_smoother_2_pause(u->smoother, pa_rtclock_now());
488 #else
489 pa_smoother_pause(u->smoother, pa_rtclock_now());
490 #endif
491
492 /* Let's suspend */
493 snd_pcm_close(u->pcm_handle);
494 u->pcm_handle = NULL;
495
496 if (u->alsa_rtpoll_item) {
497 pa_rtpoll_item_free(u->alsa_rtpoll_item);
498 u->alsa_rtpoll_item = NULL;
499 }
500 }
501
try_recover(struct userdata * u,const char * call,int err)502 static int try_recover(struct userdata *u, const char *call, int err) {
503 pa_assert(u);
504 pa_assert(call);
505 pa_assert(err < 0);
506
507 pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
508
509 pa_assert(err != -EAGAIN);
510
511 if (err == -EPIPE)
512 pa_log_debug("%s: Buffer overrun!", call);
513
514 if (err == -ESTRPIPE)
515 pa_log_debug("%s: System suspended!", call);
516
517 if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
518 pa_log("%s: %s, trying to restart PCM", call, pa_alsa_strerror(err));
519
520 /* As a last measure, restart the PCM and inform the caller about it. */
521 close_pcm(u);
522 if (unsuspend(u, true) < 0)
523 return -1;
524
525 return 1;
526 }
527
528 reset_vars(u);
529 return 0;
530 }
531
check_left_to_record(struct userdata * u,size_t n_bytes,bool on_timeout)532 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, bool on_timeout) {
533 size_t left_to_record;
534 size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
535 bool overrun = false;
536
537 /* We use <= instead of < for this check here because an overrun
538 * only happens after the last sample was processed, not already when
539 * it is removed from the buffer. This is particularly important
540 * when block transfer is used. */
541
542 if (n_bytes <= rec_space)
543 left_to_record = rec_space - n_bytes;
544 else {
545
546 /* We got a dropout. What a mess! */
547 left_to_record = 0;
548 overrun = true;
549
550 #ifdef DEBUG_TIMING
551 PA_DEBUG_TRAP;
552 #endif
553
554 if (pa_log_ratelimit(PA_LOG_INFO))
555 pa_log_info("Overrun!");
556 }
557
558 #ifdef DEBUG_TIMING
559 pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
560 #endif
561
562 if (u->use_tsched) {
563 bool reset_not_before = true;
564
565 if (overrun || left_to_record < u->watermark_inc_threshold)
566 increase_watermark(u);
567 else if (left_to_record > u->watermark_dec_threshold) {
568 reset_not_before = false;
569
570 /* We decrease the watermark only if have actually
571 * been woken up by a timeout. If something else woke
572 * us up it's too easy to fulfill the deadlines... */
573
574 if (on_timeout)
575 decrease_watermark(u);
576 }
577
578 if (reset_not_before)
579 u->watermark_dec_not_before = 0;
580 }
581
582 return left_to_record;
583 }
584
mmap_read(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)585 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
586 bool work_done = false;
587 bool recovery_done = false;
588 pa_usec_t max_sleep_usec = 0, process_usec = 0;
589 size_t left_to_record;
590 unsigned j = 0;
591
592 pa_assert(u);
593 pa_source_assert_ref(u->source);
594
595 if (u->use_tsched)
596 hw_sleep_time(u, &max_sleep_usec, &process_usec);
597
598 for (;;) {
599 snd_pcm_sframes_t n;
600 size_t n_bytes;
601 int r;
602 bool after_avail = true;
603
604 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
605
606 recovery_done = true;
607 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
608 continue;
609
610 return r;
611 }
612
613 n_bytes = (size_t) n * u->frame_size;
614
615 #ifdef DEBUG_TIMING
616 pa_log_debug("avail: %lu", (unsigned long) n_bytes);
617 #endif
618
619 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
620 on_timeout = false;
621
622 if (u->use_tsched)
623 if (!polled &&
624 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
625 #ifdef DEBUG_TIMING
626 pa_log_debug("Not reading, because too early.");
627 #endif
628 break;
629 }
630
631 if (PA_UNLIKELY(n_bytes <= 0)) {
632
633 if (polled)
634 PA_ONCE_BEGIN {
635 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
636 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
637 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
638 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
639 pa_strnull(dn));
640 pa_xfree(dn);
641 } PA_ONCE_END;
642
643 #ifdef DEBUG_TIMING
644 pa_log_debug("Not reading, because not necessary.");
645 #endif
646 break;
647 }
648
649 if (++j > 10) {
650 #ifdef DEBUG_TIMING
651 pa_log_debug("Not filling up, because already too many iterations.");
652 #endif
653
654 break;
655 }
656
657 polled = false;
658
659 #ifdef DEBUG_TIMING
660 pa_log_debug("Reading");
661 #endif
662
663 for (;;) {
664 pa_memchunk chunk;
665 void *p;
666 int err;
667 const snd_pcm_channel_area_t *areas;
668 snd_pcm_uframes_t offset, frames;
669 snd_pcm_sframes_t sframes;
670
671 frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
672 /* pa_log_debug("%lu frames to read", (unsigned long) frames); */
673
674 if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
675
676 if (!after_avail && err == -EAGAIN)
677 break;
678
679 recovery_done = true;
680 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
681 continue;
682
683 if (r == 1)
684 break;
685
686 return r;
687 }
688
689 /* Make sure that if these memblocks need to be copied they will fit into one slot */
690 frames = PA_MIN(frames, u->frames_per_block);
691
692 if (!after_avail && frames == 0)
693 break;
694
695 pa_assert(frames > 0);
696 after_avail = false;
697
698 /* Check these are multiples of 8 bit */
699 pa_assert((areas[0].first & 7) == 0);
700 pa_assert((areas[0].step & 7) == 0);
701
702 /* We assume a single interleaved memory buffer */
703 pa_assert((areas[0].first >> 3) == 0);
704 pa_assert((areas[0].step >> 3) == u->frame_size);
705
706 p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
707
708 chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, true);
709 chunk.length = pa_memblock_get_length(chunk.memblock);
710 chunk.index = 0;
711
712 pa_source_post(u->source, &chunk);
713 pa_memblock_unref_fixed(chunk.memblock);
714
715 if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
716
717 recovery_done = true;
718 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
719 continue;
720
721 if (r == 1)
722 break;
723
724 return r;
725 }
726
727 work_done = true;
728
729 u->read_count += frames * u->frame_size;
730
731 #ifdef DEBUG_TIMING
732 pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
733 #endif
734
735 if ((size_t) frames * u->frame_size >= n_bytes)
736 break;
737
738 n_bytes -= (size_t) frames * u->frame_size;
739 }
740 }
741
742 if (u->use_tsched) {
743 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
744 process_usec = u->tsched_watermark_usec;
745
746 if (*sleep_usec > process_usec)
747 *sleep_usec -= process_usec;
748 else
749 *sleep_usec = 0;
750
751 /* If the PCM was recovered, it may need restarting. Reduce the sleep time
752 * to 0 to ensure immediate restart. */
753 if (recovery_done)
754 *sleep_usec = 0;
755 }
756
757 return work_done ? 1 : 0;
758 }
759
unix_read(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)760 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
761 int work_done = false;
762 bool recovery_done = false;
763 pa_usec_t max_sleep_usec = 0, process_usec = 0;
764 size_t left_to_record;
765 unsigned j = 0;
766
767 pa_assert(u);
768 pa_source_assert_ref(u->source);
769
770 if (u->use_tsched)
771 hw_sleep_time(u, &max_sleep_usec, &process_usec);
772
773 for (;;) {
774 snd_pcm_sframes_t n;
775 size_t n_bytes;
776 int r;
777 bool after_avail = true;
778
779 if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
780
781 recovery_done = true;
782 if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
783 continue;
784
785 return r;
786 }
787
788 n_bytes = (size_t) n * u->frame_size;
789 left_to_record = check_left_to_record(u, n_bytes, on_timeout);
790 on_timeout = false;
791
792 if (u->use_tsched)
793 if (!polled &&
794 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
795 break;
796
797 if (PA_UNLIKELY(n_bytes <= 0)) {
798
799 if (polled)
800 PA_ONCE_BEGIN {
801 char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
802 pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read.\n"
803 "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
804 "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
805 pa_strnull(dn));
806 pa_xfree(dn);
807 } PA_ONCE_END;
808
809 break;
810 }
811
812 if (++j > 10) {
813 #ifdef DEBUG_TIMING
814 pa_log_debug("Not filling up, because already too many iterations.");
815 #endif
816
817 break;
818 }
819
820 polled = false;
821
822 for (;;) {
823 void *p;
824 snd_pcm_sframes_t frames;
825 pa_memchunk chunk;
826
827 chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
828
829 frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
830
831 if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
832 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
833
834 /* pa_log_debug("%lu frames to read", (unsigned long) n); */
835
836 p = pa_memblock_acquire(chunk.memblock);
837 frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
838 pa_memblock_release(chunk.memblock);
839
840 if (PA_UNLIKELY(frames < 0)) {
841 pa_memblock_unref(chunk.memblock);
842
843 if (!after_avail && (int) frames == -EAGAIN)
844 break;
845
846 recovery_done = true;
847 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
848 continue;
849
850 if (r == 1)
851 break;
852
853 return r;
854 }
855
856 if (!after_avail && frames == 0) {
857 pa_memblock_unref(chunk.memblock);
858 break;
859 }
860
861 pa_assert(frames > 0);
862 after_avail = false;
863
864 chunk.index = 0;
865 chunk.length = (size_t) frames * u->frame_size;
866
867 pa_source_post(u->source, &chunk);
868 pa_memblock_unref(chunk.memblock);
869
870 work_done = true;
871
872 u->read_count += frames * u->frame_size;
873
874 /* pa_log_debug("read %lu frames", (unsigned long) frames); */
875
876 if ((size_t) frames * u->frame_size >= n_bytes)
877 break;
878
879 n_bytes -= (size_t) frames * u->frame_size;
880 }
881 }
882
883 if (u->use_tsched) {
884 *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
885 process_usec = u->tsched_watermark_usec;
886
887 if (*sleep_usec > process_usec)
888 *sleep_usec -= process_usec;
889 else
890 *sleep_usec = 0;
891
892 /* If the PCM was recovered, it may need restarting. Reduce the sleep time
893 * to 0 to ensure immediate restart. */
894 if (recovery_done)
895 *sleep_usec = 0;
896 }
897
898 return work_done ? 1 : 0;
899 }
900
update_smoother(struct userdata * u)901 static void update_smoother(struct userdata *u) {
902 snd_pcm_sframes_t delay = 0;
903 uint64_t position;
904 int err;
905 pa_usec_t now1 = 0;
906 #ifndef USE_SMOOTHER_2
907 pa_usec_t now2;
908 #endif
909 snd_pcm_status_t *status;
910 snd_htimestamp_t htstamp = { 0, 0 };
911
912 snd_pcm_status_alloca(&status);
913
914 pa_assert(u);
915 pa_assert(u->pcm_handle);
916
917 /* Let's update the time smoother */
918
919 if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->source->sample_spec, true)) < 0)) {
920 pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
921 return;
922 }
923
924 snd_pcm_status_get_htstamp(status, &htstamp);
925 now1 = pa_timespec_load(&htstamp);
926
927 /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
928 if (now1 <= 0)
929 now1 = pa_rtclock_now();
930
931 position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
932
933 #ifdef USE_SMOOTHER_2
934 pa_smoother_2_put(u->smoother, now1, position);
935 #else
936 /* check if the time since the last update is bigger than the interval */
937 if (u->last_smoother_update > 0)
938 if (u->last_smoother_update + u->smoother_interval > now1)
939 return;
940
941 now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
942
943 pa_smoother_put(u->smoother, now1, now2);
944
945 u->last_smoother_update = now1;
946 /* exponentially increase the update interval up to the MAX limit */
947 u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
948 #endif
949 }
950
source_get_latency(struct userdata * u)951 static int64_t source_get_latency(struct userdata *u) {
952 int64_t delay;
953 pa_usec_t now1;
954 #ifndef USE_SMOOTHER_2
955 pa_usec_t now2;
956 #endif
957
958 pa_assert(u);
959
960 now1 = pa_rtclock_now();
961
962 #ifdef USE_SMOOTHER_2
963 delay = - pa_smoother_2_get_delay(u->smoother, now1, u->read_count);
964 #else
965 now2 = pa_smoother_get(u->smoother, now1);
966
967 delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
968 #endif
969
970 return delay;
971 }
972
build_pollfd(struct userdata * u)973 static int build_pollfd(struct userdata *u) {
974 pa_assert(u);
975 pa_assert(u->pcm_handle);
976
977 if (u->alsa_rtpoll_item)
978 pa_rtpoll_item_free(u->alsa_rtpoll_item);
979
980 if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
981 return -1;
982
983 return 0;
984 }
985
986 /* Called from IO context */
suspend(struct userdata * u)987 static void suspend(struct userdata *u) {
988 pa_assert(u);
989
990 /* PCM may have been invalidated due to device failure.
991 * In that case, there is nothing to do. */
992 if (!u->pcm_handle)
993 return;
994
995 /* Close PCM device */
996 close_pcm(u);
997
998 pa_log_info("Device suspended...");
999 }
1000
1001 /* Called from IO context */
update_sw_params(struct userdata * u)1002 static int update_sw_params(struct userdata *u) {
1003 snd_pcm_uframes_t avail_min;
1004 int err;
1005
1006 pa_assert(u);
1007
1008 /* Use the full buffer if no one asked us for anything specific */
1009 u->hwbuf_unused = 0;
1010
1011 if (u->use_tsched) {
1012 pa_usec_t latency;
1013
1014 if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
1015 size_t b;
1016
1017 pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
1018
1019 b = pa_usec_to_bytes(latency, &u->source->sample_spec);
1020
1021 /* We need at least one sample in our buffer */
1022
1023 if (PA_UNLIKELY(b < u->frame_size))
1024 b = u->frame_size;
1025
1026 u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
1027 }
1028
1029 fix_min_sleep_wakeup(u);
1030 fix_tsched_watermark(u);
1031 }
1032
1033 pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
1034
1035 avail_min = 1;
1036
1037 if (u->use_tsched) {
1038 pa_usec_t sleep_usec, process_usec;
1039
1040 hw_sleep_time(u, &sleep_usec, &process_usec);
1041 avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
1042 }
1043
1044 pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1045
1046 if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1047 pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1048 return err;
1049 }
1050
1051 return 0;
1052 }
1053
1054 /* Called from IO Context on unsuspend */
update_size(struct userdata * u,pa_sample_spec * ss)1055 static void update_size(struct userdata *u, pa_sample_spec *ss) {
1056 pa_assert(u);
1057 pa_assert(ss);
1058
1059 u->frame_size = pa_frame_size(ss);
1060 u->frames_per_block = pa_mempool_block_size_max(u->core->mempool) / u->frame_size;
1061
1062 /* use initial values including module arguments */
1063 u->fragment_size = u->initial_info.fragment_size;
1064 u->hwbuf_size = u->initial_info.nfrags * u->fragment_size;
1065 u->tsched_size = u->initial_info.tsched_size;
1066 u->tsched_watermark = u->initial_info.tsched_watermark;
1067
1068 u->tsched_watermark_ref = u->tsched_watermark;
1069
1070 pa_log_info("Updated frame_size %zu, frames_per_block %lu, fragment_size %zu, hwbuf_size %zu, tsched(size %zu, watermark %zu)",
1071 u->frame_size, (unsigned long) u->frames_per_block, u->fragment_size, u->hwbuf_size, u->tsched_size, u->tsched_watermark);
1072 }
1073
1074 /* Called from IO context */
unsuspend(struct userdata * u,bool recovering)1075 static int unsuspend(struct userdata *u, bool recovering) {
1076 pa_sample_spec ss;
1077 int err, i;
1078 bool b, d;
1079 snd_pcm_uframes_t period_frames, buffer_frames;
1080 snd_pcm_uframes_t tsched_frames = 0;
1081 bool frame_size_changed = false;
1082
1083 pa_assert(u);
1084 pa_assert(!u->pcm_handle);
1085
1086 pa_log_info("Trying resume...");
1087
1088 /*
1089 * On some machines, during the system suspend and resume, the thread_func could receive
1090 * POLLERR events before the dev nodes in /dev/snd/ are accessible, and thread_func calls
1091 * the unsuspend() to try to recover the PCM, this will make the snd_pcm_open() fail, here
1092 * we add msleep and retry to make sure those nodes are accessible.
1093 */
1094 for (i = 0; i < 4; i++) {
1095 if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
1096 SND_PCM_NONBLOCK|
1097 SND_PCM_NO_AUTO_RESAMPLE|
1098 SND_PCM_NO_AUTO_CHANNELS|
1099 SND_PCM_NO_AUTO_FORMAT)) < 0 && recovering)
1100 pa_msleep(25);
1101 else
1102 break;
1103 }
1104
1105 if (err < 0) {
1106 pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1107 goto fail;
1108 }
1109
1110 if (pa_frame_size(&u->source->sample_spec) != u->frame_size) {
1111 update_size(u, &u->source->sample_spec);
1112 tsched_frames = u->tsched_size / u->frame_size;
1113 frame_size_changed = true;
1114 }
1115
1116 ss = u->source->sample_spec;
1117 period_frames = u->fragment_size / u->frame_size;
1118 buffer_frames = u->hwbuf_size / u->frame_size;
1119 b = u->use_mmap;
1120 d = u->use_tsched;
1121
1122 if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_frames, &buffer_frames, tsched_frames, &b, &d, true)) < 0) {
1123 pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1124 goto fail;
1125 }
1126
1127 if (b != u->use_mmap || d != u->use_tsched) {
1128 pa_log_warn("Resume failed, couldn't get original access mode.");
1129 goto fail;
1130 }
1131
1132 if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
1133 pa_log_warn("Resume failed, couldn't restore original sample settings.");
1134 goto fail;
1135 }
1136
1137 if (frame_size_changed) {
1138 u->fragment_size = (size_t)(period_frames * u->frame_size);
1139 u->hwbuf_size = (size_t)(buffer_frames * u->frame_size);
1140 pa_proplist_setf(u->source->proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%zu", u->hwbuf_size);
1141 pa_proplist_setf(u->source->proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%zu", u->fragment_size);
1142
1143 } else if (period_frames * u->frame_size != u->fragment_size ||
1144 buffer_frames * u->frame_size != u->hwbuf_size) {
1145 pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %zu/%zu, New %lu/%lu)",
1146 u->hwbuf_size, u->fragment_size,
1147 (unsigned long) buffer_frames * u->frame_size, (unsigned long) period_frames * u->frame_size);
1148 goto fail;
1149 }
1150
1151 if (update_sw_params(u) < 0)
1152 goto fail;
1153
1154 if (build_pollfd(u) < 0)
1155 goto fail;
1156
1157 /* FIXME: We need to reload the volume somehow */
1158
1159 reset_vars(u);
1160
1161 /* reset the watermark to the value defined when source was created */
1162 if (u->use_tsched && !recovering)
1163 reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, true);
1164
1165 pa_log_info("Resumed successfully...");
1166
1167 return 0;
1168
1169 fail:
1170 if (u->pcm_handle) {
1171 snd_pcm_close(u->pcm_handle);
1172 u->pcm_handle = NULL;
1173 }
1174
1175 return -PA_ERR_IO;
1176 }
1177
1178 /* Called from the IO thread or the main thread depending on whether deferred
1179 * volume is enabled or not (with deferred volume all mixer handling is done
1180 * from the IO thread).
1181 *
1182 * Sets the mixer settings to match the current source and port state (the port
1183 * is given as an argument, because active_port may still point to the old
1184 * port, if we're switching ports). */
sync_mixer(struct userdata * u,pa_device_port * port)1185 static void sync_mixer(struct userdata *u, pa_device_port *port) {
1186 pa_alsa_setting *setting = NULL;
1187
1188 pa_assert(u);
1189
1190 if (!u->mixer_path)
1191 return;
1192
1193 /* port may be NULL, because if we use a synthesized mixer path, then the
1194 * source has no ports. */
1195 if (port && !u->ucm_context) {
1196 pa_alsa_port_data *data;
1197
1198 data = PA_DEVICE_PORT_DATA(port);
1199 setting = data->setting;
1200 }
1201
1202 pa_alsa_path_select(u->mixer_path, setting, u->mixer_handle, u->source->muted);
1203
1204 if (u->source->set_mute)
1205 u->source->set_mute(u->source);
1206 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1207 if (u->source->write_volume)
1208 u->source->write_volume(u->source);
1209 } else {
1210 if (u->source->set_volume)
1211 u->source->set_volume(u->source);
1212 }
1213 }
1214
1215 /* Called from IO context */
source_process_msg(pa_msgobject * o,int code,void * data,int64_t offset,pa_memchunk * chunk)1216 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1217 struct userdata *u = PA_SOURCE(o)->userdata;
1218
1219 switch (code) {
1220
1221 case PA_SOURCE_MESSAGE_GET_LATENCY: {
1222 int64_t r = 0;
1223
1224 if (u->pcm_handle)
1225 r = source_get_latency(u);
1226
1227 *((int64_t*) data) = r;
1228
1229 return 0;
1230 }
1231
1232 case SOURCE_MESSAGE_SYNC_MIXER: {
1233 pa_device_port *port = data;
1234
1235 sync_mixer(u, port);
1236 return 0;
1237 }
1238 }
1239
1240 return pa_source_process_msg(o, code, data, offset, chunk);
1241 }
1242
1243 /* Called from main context */
source_set_state_in_main_thread_cb(pa_source * s,pa_source_state_t new_state,pa_suspend_cause_t new_suspend_cause)1244 static int source_set_state_in_main_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1245 pa_source_state_t old_state;
1246 struct userdata *u;
1247
1248 pa_source_assert_ref(s);
1249 pa_assert_se(u = s->userdata);
1250
1251 /* When our session becomes active, we need to sync the mixer, because
1252 * another user may have changed the mixer settings.
1253 *
1254 * If deferred volume is enabled, the syncing is done in the
1255 * set_state_in_io_thread() callback instead. */
1256 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME)
1257 && (s->suspend_cause & PA_SUSPEND_SESSION)
1258 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1259 sync_mixer(u, s->active_port);
1260
1261 old_state = u->source->state;
1262
1263 if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1264 reserve_done(u);
1265 else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1266 if (reserve_init(u, u->device_name) < 0)
1267 return -PA_ERR_BUSY;
1268
1269 return 0;
1270 }
1271
1272 /* Called from the IO thread. */
source_set_state_in_io_thread_cb(pa_source * s,pa_source_state_t new_state,pa_suspend_cause_t new_suspend_cause)1273 static int source_set_state_in_io_thread_cb(pa_source *s, pa_source_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1274 struct userdata *u;
1275
1276 pa_assert(s);
1277 pa_assert_se(u = s->userdata);
1278
1279 /* When our session becomes active, we need to sync the mixer, because
1280 * another user may have changed the mixer settings.
1281 *
1282 * If deferred volume is disabled, the syncing is done in the
1283 * set_state_in_main_thread() callback instead. */
1284 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME)
1285 && (s->suspend_cause & PA_SUSPEND_SESSION)
1286 && !(new_suspend_cause & PA_SUSPEND_SESSION))
1287 sync_mixer(u, s->active_port);
1288
1289 /* It may be that only the suspend cause is changing, in which case there's
1290 * nothing more to do. */
1291 if (new_state == s->thread_info.state)
1292 return 0;
1293
1294 switch (new_state) {
1295
1296 case PA_SOURCE_SUSPENDED: {
1297 pa_assert(PA_SOURCE_IS_OPENED(s->thread_info.state));
1298
1299 suspend(u);
1300
1301 break;
1302 }
1303
1304 case PA_SOURCE_IDLE:
1305 case PA_SOURCE_RUNNING: {
1306 int r;
1307
1308 if (s->thread_info.state == PA_SOURCE_INIT) {
1309 if (build_pollfd(u) < 0)
1310 /* FIXME: This will cause an assertion failure, because
1311 * with the current design pa_source_put() is not allowed
1312 * to fail and pa_source_put() has no fallback code that
1313 * would start the source suspended if opening the device
1314 * fails. */
1315 return -PA_ERR_IO;
1316 }
1317
1318 if (s->thread_info.state == PA_SOURCE_SUSPENDED) {
1319 if ((r = unsuspend(u, false)) < 0)
1320 return r;
1321 }
1322
1323 break;
1324 }
1325
1326 case PA_SOURCE_UNLINKED:
1327 case PA_SOURCE_INIT:
1328 case PA_SOURCE_INVALID_STATE:
1329 ;
1330 }
1331
1332 return 0;
1333 }
1334
ctl_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1335 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1336 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1337
1338 pa_assert(u);
1339 pa_assert(u->mixer_handle);
1340
1341 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1342 return 0;
1343
1344 if (!PA_SOURCE_IS_LINKED(u->source->state))
1345 return 0;
1346
1347 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1348 return 0;
1349
1350 if (mask & SND_CTL_EVENT_MASK_VALUE) {
1351 pa_source_get_volume(u->source, true);
1352 pa_source_get_mute(u->source, true);
1353 }
1354
1355 return 0;
1356 }
1357
io_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1358 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1359 struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1360
1361 pa_assert(u);
1362 pa_assert(u->mixer_handle);
1363
1364 if (mask == SND_CTL_EVENT_MASK_REMOVE)
1365 return 0;
1366
1367 if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1368 return 0;
1369
1370 if (mask & SND_CTL_EVENT_MASK_VALUE)
1371 pa_source_update_volume_and_mute(u->source);
1372
1373 return 0;
1374 }
1375
source_get_volume_cb(pa_source * s)1376 static void source_get_volume_cb(pa_source *s) {
1377 struct userdata *u = s->userdata;
1378 pa_cvolume r;
1379 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1380
1381 pa_assert(u);
1382 pa_assert(u->mixer_path);
1383 pa_assert(u->mixer_handle);
1384
1385 if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1386 return;
1387
1388 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1389 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1390
1391 pa_log_debug("Read hardware volume: %s",
1392 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1393
1394 if (pa_cvolume_equal(&u->hardware_volume, &r))
1395 return;
1396
1397 s->real_volume = u->hardware_volume = r;
1398
1399 /* Hmm, so the hardware volume changed, let's reset our software volume */
1400 if (u->mixer_path->has_dB)
1401 pa_source_set_soft_volume(s, NULL);
1402 }
1403
source_set_volume_cb(pa_source * s)1404 static void source_set_volume_cb(pa_source *s) {
1405 struct userdata *u = s->userdata;
1406 pa_cvolume r;
1407 char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1408 bool deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1409 bool write_to_hw = !deferred_volume;
1410
1411 pa_assert(u);
1412 pa_assert(u->mixer_path);
1413 pa_assert(u->mixer_handle);
1414
1415 /* Shift up by the base volume */
1416 pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1417
1418 /* If the set_volume() is called because of ucm active_port changing, the
1419 * volume should be written to hw immediately, otherwise this volume will be
1420 * overridden by calling get_volume_cb() which is called by
1421 * _disdev/_enadev() -> io_mixer_callback() */
1422 if (u->ucm_context && s->port_changing)
1423 write_to_hw = true;
1424
1425 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, write_to_hw) < 0)
1426 return;
1427
1428 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1429 pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1430
1431 u->hardware_volume = r;
1432
1433 if (u->mixer_path->has_dB) {
1434 pa_cvolume new_soft_volume;
1435 bool accurate_enough;
1436
1437 /* Match exactly what the user requested by software */
1438 pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1439
1440 /* If the adjustment to do in software is only minimal we
1441 * can skip it. That saves us CPU at the expense of a bit of
1442 * accuracy */
1443 accurate_enough =
1444 (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1445 (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1446
1447 pa_log_debug("Requested volume: %s",
1448 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1449 pa_log_debug("Got hardware volume: %s",
1450 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1451 pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1452 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1453 pa_yes_no(accurate_enough));
1454
1455 if (!accurate_enough)
1456 s->soft_volume = new_soft_volume;
1457
1458 } else {
1459 pa_log_debug("Wrote hardware volume: %s",
1460 pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1461
1462 /* We can't match exactly what the user requested, hence let's
1463 * at least tell the user about it */
1464
1465 s->real_volume = r;
1466 }
1467 }
1468
source_write_volume_cb(pa_source * s)1469 static void source_write_volume_cb(pa_source *s) {
1470 struct userdata *u = s->userdata;
1471 pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1472
1473 pa_assert(u);
1474 pa_assert(u->mixer_path);
1475 pa_assert(u->mixer_handle);
1476 pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1477
1478 /* Shift up by the base volume */
1479 pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1480
1481 if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1482 pa_log_error("Writing HW volume failed");
1483 else {
1484 pa_cvolume tmp_vol;
1485 bool accurate_enough;
1486
1487 /* Shift down by the base volume, so that 0dB becomes maximum volume */
1488 pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1489
1490 pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1491 accurate_enough =
1492 (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1493 (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1494
1495 if (!accurate_enough) {
1496 char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1497
1498 pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1499 pa_cvolume_snprint_verbose(volume_buf[0],
1500 sizeof(volume_buf[0]),
1501 &s->thread_info.current_hw_volume,
1502 &s->channel_map,
1503 true),
1504 pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1505 }
1506 }
1507 }
1508
source_get_mute_cb(pa_source * s,bool * mute)1509 static int source_get_mute_cb(pa_source *s, bool *mute) {
1510 struct userdata *u = s->userdata;
1511
1512 pa_assert(u);
1513 pa_assert(u->mixer_path);
1514 pa_assert(u->mixer_handle);
1515
1516 if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1517 return -1;
1518
1519 return 0;
1520 }
1521
source_set_mute_cb(pa_source * s)1522 static void source_set_mute_cb(pa_source *s) {
1523 struct userdata *u = s->userdata;
1524
1525 pa_assert(u);
1526 pa_assert(u->mixer_path);
1527 pa_assert(u->mixer_handle);
1528
1529 pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1530 }
1531
mixer_volume_init(struct userdata * u)1532 static void mixer_volume_init(struct userdata *u) {
1533 pa_assert(u);
1534
1535 if (!u->mixer_path || !u->mixer_path->has_volume) {
1536 pa_source_set_write_volume_callback(u->source, NULL);
1537 pa_source_set_get_volume_callback(u->source, NULL);
1538 pa_source_set_set_volume_callback(u->source, NULL);
1539
1540 pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1541 } else {
1542 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1543 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1544
1545 if (u->mixer_path->has_dB && u->deferred_volume) {
1546 pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1547 pa_log_info("Successfully enabled deferred volume.");
1548 } else
1549 pa_source_set_write_volume_callback(u->source, NULL);
1550
1551 if (u->mixer_path->has_dB) {
1552 pa_source_enable_decibel_volume(u->source, true);
1553 pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1554
1555 u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1556 u->source->n_volume_steps = PA_VOLUME_NORM+1;
1557
1558 pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1559 } else {
1560 pa_source_enable_decibel_volume(u->source, false);
1561 pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1562
1563 u->source->base_volume = PA_VOLUME_NORM;
1564 u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1565 }
1566
1567 pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1568 }
1569
1570 if (!u->mixer_path || !u->mixer_path->has_mute) {
1571 pa_source_set_get_mute_callback(u->source, NULL);
1572 pa_source_set_set_mute_callback(u->source, NULL);
1573 pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1574 } else {
1575 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1576 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1577 pa_log_info("Using hardware mute control.");
1578 }
1579 }
1580
source_set_port_ucm_cb(pa_source * s,pa_device_port * p)1581 static int source_set_port_ucm_cb(pa_source *s, pa_device_port *p) {
1582 struct userdata *u = s->userdata;
1583 pa_alsa_ucm_port_data *data;
1584
1585 pa_assert(u);
1586 pa_assert(p);
1587 pa_assert(u->ucm_context);
1588
1589 data = PA_DEVICE_PORT_DATA(p);
1590 u->mixer_path = data->path;
1591 mixer_volume_init(u);
1592
1593 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1594 pa_asyncmsgq_send(u->source->asyncmsgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_SYNC_MIXER, p, 0, NULL);
1595 else
1596 sync_mixer(u, p);
1597
1598 return pa_alsa_ucm_set_port(u->ucm_context, p, false);
1599 }
1600
source_set_port_cb(pa_source * s,pa_device_port * p)1601 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1602 struct userdata *u = s->userdata;
1603 pa_alsa_port_data *data;
1604
1605 pa_assert(u);
1606 pa_assert(p);
1607 pa_assert(u->mixer_handle);
1608 pa_assert(!u->ucm_context);
1609
1610 data = PA_DEVICE_PORT_DATA(p);
1611 pa_assert_se(u->mixer_path = data->path);
1612 mixer_volume_init(u);
1613
1614 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1615 pa_asyncmsgq_send(u->source->asyncmsgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_SYNC_MIXER, p, 0, NULL);
1616 else
1617 sync_mixer(u, p);
1618
1619 return 0;
1620 }
1621
source_update_requested_latency_cb(pa_source * s)1622 static void source_update_requested_latency_cb(pa_source *s) {
1623 struct userdata *u = s->userdata;
1624 pa_assert(u);
1625 pa_assert(u->use_tsched); /* only when timer scheduling is used
1626 * we can dynamically adjust the
1627 * latency */
1628
1629 if (!u->pcm_handle)
1630 return;
1631
1632 update_sw_params(u);
1633 }
1634
source_reconfigure_cb(pa_source * s,pa_sample_spec * spec,bool passthrough)1635 static void source_reconfigure_cb(pa_source *s, pa_sample_spec *spec, bool passthrough) {
1636 struct userdata *u = s->userdata;
1637 int i;
1638 bool format_supported = false;
1639 bool rate_supported = false;
1640 #ifdef USE_SMOOTHER_2
1641 pa_sample_spec effective_spec;
1642 #endif
1643
1644 pa_assert(u);
1645
1646 #ifdef USE_SMOOTHER_2
1647 effective_spec.channels = s->sample_spec.channels;
1648 #endif
1649
1650 for (i = 0; u->supported_formats[i] != PA_SAMPLE_MAX; i++) {
1651 if (u->supported_formats[i] == spec->format) {
1652 pa_source_set_sample_format(u->source, spec->format);
1653 #ifdef USE_SMOOTHER_2
1654 effective_spec.format = spec->format;
1655 #endif
1656 format_supported = true;
1657 break;
1658 }
1659 }
1660
1661 if (!format_supported) {
1662 pa_log_info("Source does not support sample format of %s, set it to a verified value",
1663 pa_sample_format_to_string(spec->format));
1664 pa_source_set_sample_format(u->source, u->verified_sample_spec.format);
1665 #ifdef USE_SMOOTHER_2
1666 effective_spec.format = u->verified_sample_spec.format;
1667 #endif
1668 }
1669
1670 for (i = 0; u->supported_rates[i]; i++) {
1671 if (u->supported_rates[i] == spec->rate) {
1672 pa_source_set_sample_rate(u->source, spec->rate);
1673 #ifdef USE_SMOOTHER_2
1674 effective_spec.rate = spec->rate;
1675 #endif
1676 rate_supported = true;
1677 break;
1678 }
1679 }
1680
1681 if (!rate_supported) {
1682 pa_log_info("Source does not support sample rate of %u, set it to a verfied value", spec->rate);
1683 pa_source_set_sample_rate(u->source, u->verified_sample_spec.rate);
1684 #ifdef USE_SMOOTHER_2
1685 effective_spec.rate = u->verified_sample_spec.rate;
1686 #endif
1687 }
1688
1689 #ifdef USE_SMOOTHER_2
1690 pa_smoother_2_set_sample_spec(u->smoother, pa_rtclock_now(), &effective_spec);
1691 #endif
1692
1693 }
1694
thread_func(void * userdata)1695 static void thread_func(void *userdata) {
1696 struct userdata *u = userdata;
1697 unsigned short revents = 0;
1698
1699 pa_assert(u);
1700
1701 pa_log_debug("Thread starting up");
1702
1703 if (u->core->realtime_scheduling)
1704 pa_thread_make_realtime(u->core->realtime_priority);
1705
1706 pa_thread_mq_install(&u->thread_mq);
1707
1708 for (;;) {
1709 int ret;
1710 pa_usec_t rtpoll_sleep = 0, real_sleep;
1711
1712 #ifdef DEBUG_TIMING
1713 pa_log_debug("Loop");
1714 #endif
1715
1716 /* Read some data and pass it to the sources */
1717 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1718 int work_done;
1719 pa_usec_t sleep_usec = 0;
1720 bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1721
1722 if (u->first) {
1723 pa_log_info("Starting capture.");
1724 snd_pcm_start(u->pcm_handle);
1725
1726 #ifdef USE_SMOOTHER_2
1727 pa_smoother_2_resume(u->smoother, pa_rtclock_now());
1728 #else
1729 pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1730 #endif
1731
1732 u->first = false;
1733 }
1734
1735 if (u->use_mmap)
1736 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1737 else
1738 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1739
1740 if (work_done < 0)
1741 goto fail;
1742
1743 /* pa_log_debug("work_done = %i", work_done); */
1744
1745 if (work_done)
1746 update_smoother(u);
1747
1748 if (u->use_tsched) {
1749 pa_usec_t cusec;
1750
1751 /* OK, the capture buffer is now empty, let's
1752 * calculate when to wake up next */
1753
1754 /* pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1755
1756 /* Convert from the sound card time domain to the
1757 * system time domain */
1758 #ifdef USE_SMOOTHER_2
1759 cusec = pa_smoother_2_translate(u->smoother, sleep_usec);
1760 #else
1761 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1762 #endif
1763
1764 /* pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1765
1766 /* We don't trust the conversion, so we wake up whatever comes first */
1767 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1768 }
1769 }
1770
1771 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1772 pa_usec_t volume_sleep;
1773 pa_source_volume_change_apply(u->source, &volume_sleep);
1774 if (volume_sleep > 0) {
1775 if (rtpoll_sleep > 0)
1776 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1777 else
1778 rtpoll_sleep = volume_sleep;
1779 }
1780 }
1781
1782 if (rtpoll_sleep > 0) {
1783 pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1784 real_sleep = pa_rtclock_now();
1785 }
1786 else
1787 pa_rtpoll_set_timer_disabled(u->rtpoll);
1788
1789 /* Hmm, nothing to do. Let's sleep */
1790 if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
1791 goto fail;
1792
1793 if (rtpoll_sleep > 0) {
1794 real_sleep = pa_rtclock_now() - real_sleep;
1795 #ifdef DEBUG_TIMING
1796 pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
1797 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
1798 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
1799 #endif
1800 if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
1801 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
1802 (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
1803 (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
1804 }
1805
1806 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1807 pa_source_volume_change_apply(u->source, NULL);
1808
1809 if (ret == 0)
1810 goto finish;
1811
1812 /* Tell ALSA about this and process its response */
1813 if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1814 struct pollfd *pollfd;
1815 int err;
1816 unsigned n;
1817
1818 pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1819
1820 if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1821 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1822 goto fail;
1823 }
1824
1825 if (revents & ~POLLIN) {
1826 if ((err = pa_alsa_recover_from_poll(u->pcm_handle, revents)) < 0)
1827 goto fail;
1828
1829 /* Stream needs to be restarted */
1830 if (err == 1) {
1831 close_pcm(u);
1832 if (unsuspend(u, true) < 0)
1833 goto fail;
1834 } else
1835 reset_vars(u);
1836
1837 revents = 0;
1838 } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1839 pa_log_debug("Wakeup from ALSA!");
1840
1841 } else
1842 revents = 0;
1843 }
1844
1845 fail:
1846 /* If this was no regular exit from the loop we have to continue
1847 * processing messages until we received PA_MESSAGE_SHUTDOWN */
1848 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1849 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1850
1851 finish:
1852 pa_log_debug("Thread shutting down");
1853 }
1854
set_source_name(pa_source_new_data * data,pa_modargs * ma,const char * device_id,const char * device_name,pa_alsa_mapping * mapping)1855 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1856 const char *n;
1857 char *t;
1858
1859 pa_assert(data);
1860 pa_assert(ma);
1861 pa_assert(device_name);
1862
1863 if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1864 pa_source_new_data_set_name(data, n);
1865 data->namereg_fail = true;
1866 return;
1867 }
1868
1869 if ((n = pa_modargs_get_value(ma, "name", NULL)))
1870 data->namereg_fail = true;
1871 else {
1872 n = device_id ? device_id : device_name;
1873 data->namereg_fail = false;
1874 }
1875
1876 if (mapping)
1877 t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1878 else
1879 t = pa_sprintf_malloc("alsa_input.%s", n);
1880
1881 pa_source_new_data_set_name(data, t);
1882 pa_xfree(t);
1883 }
1884
find_mixer(struct userdata * u,pa_alsa_mapping * mapping,const char * element,bool ignore_dB)1885 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
1886 const char *mdev;
1887
1888 if (!mapping && !element)
1889 return;
1890
1891 if (!element && mapping && pa_alsa_path_set_is_empty(mapping->input_path_set))
1892 return;
1893
1894 u->mixers = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func,
1895 NULL, (pa_free_cb_t) pa_alsa_mixer_free);
1896
1897 mdev = mapping ? pa_proplist_gets(mapping->proplist, "alsa.mixer_device") : NULL;
1898 if (mdev) {
1899 u->mixer_handle = pa_alsa_open_mixer_by_name(u->mixers, mdev, false);
1900 } else {
1901 u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->mixers, u->pcm_handle, false);
1902 }
1903 if (!u->mixer_handle) {
1904 pa_log_info("Failed to find a working mixer device.");
1905 return;
1906 }
1907
1908 if (element) {
1909
1910 if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1911 goto fail;
1912
1913 if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
1914 goto fail;
1915
1916 pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1917 pa_alsa_path_dump(u->mixer_path);
1918 } else {
1919 u->mixer_path_set = mapping->input_path_set;
1920 }
1921
1922 return;
1923
1924 fail:
1925
1926 if (u->mixer_path) {
1927 pa_alsa_path_free(u->mixer_path);
1928 u->mixer_path = NULL;
1929 }
1930
1931 u->mixer_handle = NULL;
1932 pa_hashmap_free(u->mixers);
1933 u->mixers = NULL;
1934 }
1935
setup_mixer(struct userdata * u,bool ignore_dB)1936 static int setup_mixer(struct userdata *u, bool ignore_dB) {
1937 bool need_mixer_callback = false;
1938
1939 pa_assert(u);
1940
1941 /* This code is before the u->mixer_handle check, because if the UCM
1942 * configuration doesn't specify volume or mute controls, u->mixer_handle
1943 * will be NULL, but the UCM device enable sequence will still need to be
1944 * executed. */
1945 if (u->source->active_port && u->ucm_context) {
1946 if (pa_alsa_ucm_set_port(u->ucm_context, u->source->active_port, false) < 0)
1947 return -1;
1948 }
1949
1950 if (!u->mixer_handle)
1951 return 0;
1952
1953 if (u->source->active_port) {
1954 if (!u->ucm_context) {
1955 pa_alsa_port_data *data;
1956
1957 /* We have a list of supported paths, so let's activate the
1958 * one that has been chosen as active */
1959
1960 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1961 u->mixer_path = data->path;
1962
1963 pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->source->muted);
1964 } else {
1965 pa_alsa_ucm_port_data *data;
1966
1967 data = PA_DEVICE_PORT_DATA(u->source->active_port);
1968
1969 /* Now activate volume controls, if any */
1970 if (data->path) {
1971 u->mixer_path = data->path;
1972 pa_alsa_path_select(u->mixer_path, NULL, u->mixer_handle, u->source->muted);
1973 }
1974 }
1975 } else {
1976
1977 if (!u->mixer_path && u->mixer_path_set)
1978 u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
1979
1980 if (u->mixer_path) {
1981 /* Hmm, we have only a single path, then let's activate it */
1982
1983 pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->source->muted);
1984 } else
1985 return 0;
1986 }
1987
1988 mixer_volume_init(u);
1989
1990 /* Will we need to register callbacks? */
1991 if (u->mixer_path_set && u->mixer_path_set->paths) {
1992 pa_alsa_path *p;
1993 void *state;
1994
1995 PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
1996 if (p->has_volume || p->has_mute)
1997 need_mixer_callback = true;
1998 }
1999 }
2000 else if (u->mixer_path)
2001 need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
2002
2003 if (need_mixer_callback) {
2004 int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
2005 if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
2006 u->mixer_pd = pa_alsa_mixer_pdata_new();
2007 mixer_callback = io_mixer_callback;
2008
2009 if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
2010 pa_log("Failed to initialize file descriptor monitoring");
2011 return -1;
2012 }
2013 } else {
2014 u->mixer_fdl = pa_alsa_fdlist_new();
2015 mixer_callback = ctl_mixer_callback;
2016
2017 if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2018 pa_log("Failed to initialize file descriptor monitoring");
2019 return -1;
2020 }
2021 }
2022
2023 if (u->mixer_path_set)
2024 pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2025 else
2026 pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2027 }
2028
2029 return 0;
2030 }
2031
pa_alsa_source_new(pa_module * m,pa_modargs * ma,const char * driver,pa_card * card,pa_alsa_mapping * mapping)2032 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2033
2034 struct userdata *u = NULL;
2035 const char *dev_id = NULL, *key, *mod_name;
2036 pa_sample_spec ss;
2037 char *thread_name = NULL;
2038 uint32_t alternate_sample_rate;
2039 pa_channel_map map;
2040 uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
2041 snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2042 size_t frame_size;
2043 bool use_mmap = true;
2044 bool use_tsched = true;
2045 bool ignore_dB = false;
2046 bool namereg_fail = false;
2047 bool deferred_volume = false;
2048 bool fixed_latency_range = false;
2049 bool b;
2050 bool d;
2051 bool avoid_resampling;
2052 pa_source_new_data data;
2053 bool volume_is_set;
2054 bool mute_is_set;
2055 pa_alsa_profile_set *profile_set = NULL;
2056 void *state;
2057
2058 pa_assert(m);
2059 pa_assert(ma);
2060
2061 ss = m->core->default_sample_spec;
2062 map = m->core->default_channel_map;
2063 avoid_resampling = m->core->avoid_resampling;
2064
2065 /* Pick sample spec overrides from the mapping, if any */
2066 if (mapping) {
2067 if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
2068 ss.format = mapping->sample_spec.format;
2069 if (mapping->sample_spec.rate != 0)
2070 ss.rate = mapping->sample_spec.rate;
2071 if (mapping->sample_spec.channels != 0) {
2072 ss.channels = mapping->sample_spec.channels;
2073 if (pa_channel_map_valid(&mapping->channel_map))
2074 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2075 }
2076 }
2077
2078 /* Override with modargs if provided */
2079 if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2080 pa_log("Failed to parse sample specification and channel map");
2081 goto fail;
2082 }
2083
2084 alternate_sample_rate = m->core->alternate_sample_rate;
2085 if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2086 pa_log("Failed to parse alternate sample rate");
2087 goto fail;
2088 }
2089
2090 frame_size = pa_frame_size(&ss);
2091
2092 nfrags = m->core->default_n_fragments;
2093 frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2094 if (frag_size <= 0)
2095 frag_size = (uint32_t) frame_size;
2096 tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2097 tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2098
2099 if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2100 pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2101 pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2102 pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2103 pa_log("Failed to parse buffer metrics");
2104 goto fail;
2105 }
2106
2107 buffer_size = nfrags * frag_size;
2108
2109 period_frames = frag_size/frame_size;
2110 buffer_frames = buffer_size/frame_size;
2111 tsched_frames = tsched_size/frame_size;
2112
2113 if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2114 pa_log("Failed to parse mmap argument.");
2115 goto fail;
2116 }
2117
2118 if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2119 pa_log("Failed to parse tsched argument.");
2120 goto fail;
2121 }
2122
2123 if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2124 pa_log("Failed to parse ignore_dB argument.");
2125 goto fail;
2126 }
2127
2128 deferred_volume = m->core->deferred_volume;
2129 if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2130 pa_log("Failed to parse deferred_volume argument.");
2131 goto fail;
2132 }
2133
2134 if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2135 pa_log("Failed to parse fixed_latency_range argument.");
2136 goto fail;
2137 }
2138
2139 use_tsched = pa_alsa_may_tsched(use_tsched);
2140
2141 u = pa_xnew0(struct userdata, 1);
2142 u->core = m->core;
2143 u->module = m;
2144 u->use_mmap = use_mmap;
2145 u->use_tsched = use_tsched;
2146 u->tsched_size = tsched_size;
2147 u->initial_info.nfrags = (size_t) nfrags;
2148 u->initial_info.fragment_size = (size_t) frag_size;
2149 u->initial_info.tsched_size = (size_t) tsched_size;
2150 u->initial_info.tsched_watermark = (size_t) tsched_watermark;
2151 u->deferred_volume = deferred_volume;
2152 u->fixed_latency_range = fixed_latency_range;
2153 u->first = true;
2154 u->rtpoll = pa_rtpoll_new();
2155
2156 if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
2157 pa_log("pa_thread_mq_init() failed.");
2158 goto fail;
2159 }
2160
2161 #ifndef USE_SMOOTHER_2
2162 u->smoother = pa_smoother_new(
2163 SMOOTHER_ADJUST_USEC,
2164 SMOOTHER_WINDOW_USEC,
2165 true,
2166 true,
2167 5,
2168 pa_rtclock_now(),
2169 true);
2170 u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2171 #endif
2172
2173 /* use ucm */
2174 if (mapping && mapping->ucm_context.ucm)
2175 u->ucm_context = &mapping->ucm_context;
2176
2177 dev_id = pa_modargs_get_value(
2178 ma, "device_id",
2179 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2180
2181 u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2182
2183 if (reserve_init(u, dev_id) < 0)
2184 goto fail;
2185
2186 if (reserve_monitor_init(u, dev_id) < 0)
2187 goto fail;
2188
2189 b = use_mmap;
2190 d = use_tsched;
2191
2192 /* Force ALSA to reread its configuration if module-alsa-card didn't
2193 * do it for us. This matters if our device was hot-plugged after ALSA
2194 * has already read its configuration - see
2195 * https://bugs.freedesktop.org/show_bug.cgi?id=54029
2196 */
2197
2198 if (!card)
2199 snd_config_update_free_global();
2200
2201 if (mapping) {
2202
2203 if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2204 pa_log("device_id= not set");
2205 goto fail;
2206 }
2207
2208 if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2209 if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2210 pa_log("Failed to enable ucm modifier %s", mod_name);
2211 else
2212 pa_log_debug("Enabled ucm modifier %s", mod_name);
2213 }
2214
2215 if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2216 dev_id,
2217 &u->device_name,
2218 &ss, &map,
2219 SND_PCM_STREAM_CAPTURE,
2220 &period_frames, &buffer_frames, tsched_frames,
2221 &b, &d, mapping)))
2222 goto fail;
2223
2224 } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2225
2226 if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2227 goto fail;
2228
2229 if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2230 dev_id,
2231 &u->device_name,
2232 &ss, &map,
2233 SND_PCM_STREAM_CAPTURE,
2234 &period_frames, &buffer_frames, tsched_frames,
2235 &b, &d, profile_set, &mapping)))
2236 goto fail;
2237
2238 } else {
2239
2240 if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2241 pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2242 &u->device_name,
2243 &ss, &map,
2244 SND_PCM_STREAM_CAPTURE,
2245 &period_frames, &buffer_frames, tsched_frames,
2246 &b, &d, false)))
2247 goto fail;
2248 }
2249
2250 pa_assert(u->device_name);
2251 pa_log_info("Successfully opened device %s.", u->device_name);
2252
2253 if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2254 pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2255 goto fail;
2256 }
2257
2258 if (mapping)
2259 pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2260
2261 if (use_mmap && !b) {
2262 pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2263 u->use_mmap = use_mmap = false;
2264 }
2265
2266 if (use_tsched && (!b || !d)) {
2267 pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2268 u->use_tsched = use_tsched = false;
2269 }
2270
2271 if (u->use_mmap)
2272 pa_log_info("Successfully enabled mmap() mode.");
2273
2274 if (u->use_tsched) {
2275 pa_log_info("Successfully enabled timer-based scheduling mode.");
2276 if (u->fixed_latency_range)
2277 pa_log_info("Disabling latency range changes on overrun");
2278 }
2279
2280 u->verified_sample_spec = ss;
2281
2282 u->supported_formats = pa_alsa_get_supported_formats(u->pcm_handle, ss.format);
2283 if (!u->supported_formats) {
2284 pa_log_error("Failed to find any supported sample formats.");
2285 goto fail;
2286 }
2287
2288 u->supported_rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2289 if (!u->supported_rates) {
2290 pa_log_error("Failed to find any supported sample rates.");
2291 goto fail;
2292 }
2293
2294 /* ALSA might tweak the sample spec, so recalculate the frame size */
2295 frame_size = pa_frame_size(&ss);
2296
2297 pa_source_new_data_init(&data);
2298 data.driver = driver;
2299 data.module = m;
2300 data.card = card;
2301 set_source_name(&data, ma, dev_id, u->device_name, mapping);
2302
2303 /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2304 * variable instead of using &data.namereg_fail directly, because
2305 * data.namereg_fail is a bitfield and taking the address of a bitfield
2306 * variable is impossible. */
2307 namereg_fail = data.namereg_fail;
2308 if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2309 pa_log("Failed to parse namereg_fail argument.");
2310 pa_source_new_data_done(&data);
2311 goto fail;
2312 }
2313 data.namereg_fail = namereg_fail;
2314
2315 if (pa_modargs_get_value_boolean(ma, "avoid_resampling", &avoid_resampling) < 0) {
2316 pa_log("Failed to parse avoid_resampling argument.");
2317 pa_source_new_data_done(&data);
2318 goto fail;
2319 }
2320 pa_source_new_data_set_avoid_resampling(&data, avoid_resampling);
2321
2322 pa_source_new_data_set_sample_spec(&data, &ss);
2323 pa_source_new_data_set_channel_map(&data, &map);
2324 pa_source_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2325
2326 pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2327 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2328 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2329 pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2330 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2331
2332 if (mapping) {
2333 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2334 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2335
2336 state = NULL;
2337 while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2338 pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2339 }
2340
2341 pa_alsa_init_description(data.proplist, card);
2342
2343 if (u->control_device)
2344 pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2345
2346 if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2347 pa_log("Invalid properties");
2348 pa_source_new_data_done(&data);
2349 goto fail;
2350 }
2351
2352 if (u->ucm_context) {
2353 pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, false, card, u->pcm_handle, ignore_dB);
2354 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2355 } else {
2356 find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2357 if (u->mixer_path_set)
2358 pa_alsa_add_ports(&data, u->mixer_path_set, card);
2359 }
2360
2361 u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
2362 volume_is_set = data.volume_is_set;
2363 mute_is_set = data.muted_is_set;
2364 pa_source_new_data_done(&data);
2365
2366 if (!u->source) {
2367 pa_log("Failed to create source object");
2368 goto fail;
2369 }
2370
2371 #ifdef USE_SMOOTHER_2
2372 u->smoother = pa_smoother_2_new(SMOOTHER_WINDOW_USEC, pa_rtclock_now(), frame_size, u->source->sample_spec.rate);
2373 #endif
2374
2375 if (u->ucm_context) {
2376 pa_device_port *port;
2377 unsigned h_prio = 0;
2378 PA_HASHMAP_FOREACH(port, u->source->ports, state) {
2379 if (!h_prio || port->priority > h_prio)
2380 h_prio = port->priority;
2381 }
2382 /* ucm ports prioriy is 100, 200, ..., 900, change it to units digit */
2383 h_prio = h_prio / 100;
2384 u->source->priority += h_prio;
2385 }
2386
2387 if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2388 &u->source->thread_info.volume_change_safety_margin) < 0) {
2389 pa_log("Failed to parse deferred_volume_safety_margin parameter");
2390 goto fail;
2391 }
2392
2393 if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2394 &u->source->thread_info.volume_change_extra_delay) < 0) {
2395 pa_log("Failed to parse deferred_volume_extra_delay parameter");
2396 goto fail;
2397 }
2398
2399 u->source->parent.process_msg = source_process_msg;
2400 if (u->use_tsched)
2401 u->source->update_requested_latency = source_update_requested_latency_cb;
2402 u->source->set_state_in_main_thread = source_set_state_in_main_thread_cb;
2403 u->source->set_state_in_io_thread = source_set_state_in_io_thread_cb;
2404 if (u->ucm_context)
2405 u->source->set_port = source_set_port_ucm_cb;
2406 else
2407 u->source->set_port = source_set_port_cb;
2408 u->source->reconfigure = source_reconfigure_cb;
2409 u->source->userdata = u;
2410
2411 pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
2412 pa_source_set_rtpoll(u->source, u->rtpoll);
2413
2414 u->frame_size = frame_size;
2415 u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2416 u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2417 u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2418 pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
2419
2420 pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2421 (double) u->hwbuf_size / (double) u->fragment_size,
2422 (long unsigned) u->fragment_size,
2423 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2424 (long unsigned) u->hwbuf_size,
2425 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2426
2427 if (u->use_tsched) {
2428 u->tsched_watermark_ref = tsched_watermark;
2429 reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2430 }
2431 else
2432 pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
2433
2434 reserve_update(u);
2435
2436 if (update_sw_params(u) < 0)
2437 goto fail;
2438
2439 if (setup_mixer(u, ignore_dB) < 0)
2440 goto fail;
2441
2442 pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2443
2444 thread_name = pa_sprintf_malloc("alsa-source-%s", pa_strnull(pa_proplist_gets(u->source->proplist, "alsa.id")));
2445 if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2446 pa_log("Failed to create thread.");
2447 goto fail;
2448 }
2449 pa_xfree(thread_name);
2450 thread_name = NULL;
2451
2452 /* Get initial mixer settings */
2453 if (volume_is_set) {
2454 if (u->source->set_volume)
2455 u->source->set_volume(u->source);
2456 } else {
2457 if (u->source->get_volume)
2458 u->source->get_volume(u->source);
2459 }
2460
2461 if (mute_is_set) {
2462 if (u->source->set_mute)
2463 u->source->set_mute(u->source);
2464 } else {
2465 if (u->source->get_mute) {
2466 bool mute;
2467
2468 if (u->source->get_mute(u->source, &mute) >= 0)
2469 pa_source_set_mute(u->source, mute, false);
2470 }
2471 }
2472
2473 if ((volume_is_set || mute_is_set) && u->source->write_volume)
2474 u->source->write_volume(u->source);
2475
2476 pa_source_put(u->source);
2477
2478 if (profile_set)
2479 pa_alsa_profile_set_free(profile_set);
2480
2481 return u->source;
2482
2483 fail:
2484 pa_xfree(thread_name);
2485
2486 if (u)
2487 userdata_free(u);
2488
2489 if (profile_set)
2490 pa_alsa_profile_set_free(profile_set);
2491
2492 return NULL;
2493 }
2494
userdata_free(struct userdata * u)2495 static void userdata_free(struct userdata *u) {
2496 pa_assert(u);
2497
2498 if (u->source)
2499 pa_source_unlink(u->source);
2500
2501 if (u->thread) {
2502 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2503 pa_thread_free(u->thread);
2504 }
2505
2506 pa_thread_mq_done(&u->thread_mq);
2507
2508 if (u->source)
2509 pa_source_unref(u->source);
2510
2511 if (u->mixer_pd)
2512 pa_alsa_mixer_pdata_free(u->mixer_pd);
2513
2514 if (u->alsa_rtpoll_item)
2515 pa_rtpoll_item_free(u->alsa_rtpoll_item);
2516
2517 if (u->rtpoll)
2518 pa_rtpoll_free(u->rtpoll);
2519
2520 if (u->pcm_handle) {
2521 snd_pcm_drop(u->pcm_handle);
2522 snd_pcm_close(u->pcm_handle);
2523 }
2524
2525 if (u->mixer_fdl)
2526 pa_alsa_fdlist_free(u->mixer_fdl);
2527
2528 /* Only free the mixer_path if the sink owns it */
2529 if (u->mixer_path && !u->mixer_path_set && !u->ucm_context)
2530 pa_alsa_path_free(u->mixer_path);
2531
2532 if (u->mixers)
2533 pa_hashmap_free(u->mixers);
2534
2535 if (u->smoother)
2536 #ifdef USE_SMOOTHER_2
2537 pa_smoother_2_free(u->smoother);
2538 #else
2539 pa_smoother_free(u->smoother);
2540 #endif
2541
2542 if (u->supported_formats)
2543 pa_xfree(u->supported_formats);
2544
2545 if (u->supported_rates)
2546 pa_xfree(u->supported_rates);
2547
2548 reserve_done(u);
2549 monitor_done(u);
2550
2551 pa_xfree(u->device_name);
2552 pa_xfree(u->control_device);
2553 pa_xfree(u->paths_dir);
2554 pa_xfree(u);
2555 }
2556
pa_alsa_source_free(pa_source * s)2557 void pa_alsa_source_free(pa_source *s) {
2558 struct userdata *u;
2559
2560 pa_source_assert_ref(s);
2561 pa_assert_se(u = s->userdata);
2562
2563 userdata_free(u);
2564 }
2565