• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #include <signal.h>
26 #include <stdio.h>
27 
28 #include <alsa/asoundlib.h>
29 
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 #endif
33 
34 #include <pulse/rtclock.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40 
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 
56 #ifdef USE_SMOOTHER_2
57 #include <pulsecore/time-smoother_2.h>
58 #else
59 #include <pulsecore/time-smoother.h>
60 #endif
61 
62 #include <modules/reserve-wrap.h>
63 
64 #include "alsa-util.h"
65 #include "alsa-sink.h"
66 
67 /* #define DEBUG_TIMING */
68 
69 #define DEFAULT_DEVICE "default"
70 
71 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s    -- Overall buffer size */
72 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms  -- Fill up when only this much is left in the buffer */
73 
74 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  -- On underrun, increase watermark by this */
75 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms   -- When everything's great, decrease watermark by this */
76 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s   -- How long after a drop out recheck if things are good now */
77 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms   -- If the buffer level ever below this threshold, increase the watermark */
78 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
79 
80 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
81  * will increase the watermark only if we hit a real underrun. */
82 
83 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms  -- Sleep at least 10ms on each iteration */
84 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms   -- Wakeup at least this long before the buffer runs empty*/
85 
86 #ifdef USE_SMOOTHER_2
87 #define SMOOTHER_WINDOW_USEC  (15*PA_USEC_PER_SEC)                 /* 15s   -- smoother windows size */
88 #else
89 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s   -- smoother windows size */
90 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s    -- smoother adjust time */
91 
92 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms   -- min smoother update interval */
93 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms -- max smoother update interval */
94 #endif
95 
96 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)  /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
97 
98 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
99 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
100 
101 #define DEFAULT_WRITE_ITERATION_THRESHOLD 0.03 /* don't iterate write if < 3% of the buffer is available */
102 
103 struct userdata {
104     pa_core *core;
105     pa_module *module;
106     pa_sink *sink;
107 
108     pa_thread *thread;
109     pa_thread_mq thread_mq;
110     pa_rtpoll *rtpoll;
111 
112     snd_pcm_t *pcm_handle;
113 
114     char *paths_dir;
115     pa_alsa_fdlist *mixer_fdl;
116     pa_alsa_mixer_pdata *mixer_pd;
117     pa_hashmap *mixers;
118     snd_mixer_t *mixer_handle;
119     pa_alsa_path_set *mixer_path_set;
120     pa_alsa_path *mixer_path;
121 
122     pa_cvolume hardware_volume;
123 
124     pa_sample_spec verified_sample_spec;
125     pa_sample_format_t *supported_formats;
126     unsigned int *supported_rates;
127     struct {
128         size_t fragment_size;
129         size_t nfrags;
130         size_t tsched_size;
131         size_t tsched_watermark;
132         size_t rewind_safeguard;
133     } initial_info;
134 
135     size_t
136         frame_size,
137         fragment_size,
138         hwbuf_size,
139         tsched_size,
140         tsched_watermark,
141         tsched_watermark_ref,
142         hwbuf_unused,
143         min_sleep,
144         min_wakeup,
145         watermark_inc_step,
146         watermark_dec_step,
147         watermark_inc_threshold,
148         watermark_dec_threshold,
149         rewind_safeguard;
150 
151     snd_pcm_uframes_t frames_per_block;
152 
153     pa_usec_t watermark_dec_not_before;
154     pa_usec_t min_latency_ref;
155     pa_usec_t tsched_watermark_usec;
156 
157     pa_memchunk memchunk;
158 
159     char *device_name;  /* name of the PCM device */
160     char *control_device; /* name of the control device */
161 
162     bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
163 
164     bool first, after_rewind;
165 
166     pa_rtpoll_item *alsa_rtpoll_item;
167 
168 #ifdef USE_SMOOTHER_2
169     pa_smoother_2 *smoother;
170 #else
171     pa_smoother *smoother;
172 #endif
173     uint64_t write_count;
174     uint64_t since_start;
175 
176 #ifndef USE_SMOOTHER_2
177     pa_usec_t smoother_interval;
178     pa_usec_t last_smoother_update;
179 #endif
180 
181     pa_idxset *formats;
182 
183     pa_reserve_wrapper *reserve;
184     pa_hook_slot *reserve_slot;
185     pa_reserve_monitor_wrapper *monitor;
186     pa_hook_slot *monitor_slot;
187 
188     /* ucm context */
189     pa_alsa_ucm_mapping_context *ucm_context;
190 };
191 
192 enum {
193     SINK_MESSAGE_SYNC_MIXER = PA_SINK_MESSAGE_MAX
194 };
195 
196 static void userdata_free(struct userdata *u);
197 static int unsuspend(struct userdata *u, bool recovering);
198 
199 /* FIXME: Is there a better way to do this than device names? */
is_iec958(struct userdata * u)200 static bool is_iec958(struct userdata *u) {
201     return (strncmp("iec958", u->device_name, 6) == 0);
202 }
203 
is_hdmi(struct userdata * u)204 static bool is_hdmi(struct userdata *u) {
205     return (strncmp("hdmi", u->device_name, 4) == 0);
206 }
207 
reserve_cb(pa_reserve_wrapper * r,void * forced,struct userdata * u)208 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
209     pa_assert(r);
210     pa_assert(u);
211 
212     pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
213 
214     if (pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION) < 0)
215         return PA_HOOK_CANCEL;
216 
217     return PA_HOOK_OK;
218 }
219 
reserve_done(struct userdata * u)220 static void reserve_done(struct userdata *u) {
221     pa_assert(u);
222 
223     if (u->reserve_slot) {
224         pa_hook_slot_free(u->reserve_slot);
225         u->reserve_slot = NULL;
226     }
227 
228     if (u->reserve) {
229         pa_reserve_wrapper_unref(u->reserve);
230         u->reserve = NULL;
231     }
232 }
233 
reserve_update(struct userdata * u)234 static void reserve_update(struct userdata *u) {
235     const char *description;
236     pa_assert(u);
237 
238     if (!u->sink || !u->reserve)
239         return;
240 
241     if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
242         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
243 }
244 
reserve_init(struct userdata * u,const char * dname)245 static int reserve_init(struct userdata *u, const char *dname) {
246     char *rname;
247 
248     pa_assert(u);
249     pa_assert(dname);
250 
251     if (u->reserve)
252         return 0;
253 
254     if (pa_in_system_mode())
255         return 0;
256 
257     if (!(rname = pa_alsa_get_reserve_name(dname)))
258         return 0;
259 
260     /* We are resuming, try to lock the device */
261     u->reserve = pa_reserve_wrapper_get(u->core, rname);
262     pa_xfree(rname);
263 
264     if (!(u->reserve))
265         return -1;
266 
267     reserve_update(u);
268 
269     pa_assert(!u->reserve_slot);
270     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
271 
272     return 0;
273 }
274 
monitor_cb(pa_reserve_monitor_wrapper * w,void * busy,struct userdata * u)275 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
276     pa_assert(w);
277     pa_assert(u);
278 
279     if (PA_PTR_TO_UINT(busy) && !u->reserve) {
280         pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
281         pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
282     } else {
283         pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
284         pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
285     }
286 
287     return PA_HOOK_OK;
288 }
289 
monitor_done(struct userdata * u)290 static void monitor_done(struct userdata *u) {
291     pa_assert(u);
292 
293     if (u->monitor_slot) {
294         pa_hook_slot_free(u->monitor_slot);
295         u->monitor_slot = NULL;
296     }
297 
298     if (u->monitor) {
299         pa_reserve_monitor_wrapper_unref(u->monitor);
300         u->monitor = NULL;
301     }
302 }
303 
reserve_monitor_init(struct userdata * u,const char * dname)304 static int reserve_monitor_init(struct userdata *u, const char *dname) {
305     char *rname;
306 
307     pa_assert(u);
308     pa_assert(dname);
309 
310     if (pa_in_system_mode())
311         return 0;
312 
313     if (!(rname = pa_alsa_get_reserve_name(dname)))
314         return 0;
315 
316     /* We are resuming, try to lock the device */
317     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
318     pa_xfree(rname);
319 
320     if (!(u->monitor))
321         return -1;
322 
323     pa_assert(!u->monitor_slot);
324     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
325 
326     return 0;
327 }
328 
fix_min_sleep_wakeup(struct userdata * u)329 static void fix_min_sleep_wakeup(struct userdata *u) {
330     size_t max_use, max_use_2;
331 
332     pa_assert(u);
333     pa_assert(u->use_tsched);
334 
335     max_use = u->hwbuf_size - u->hwbuf_unused;
336     max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
337 
338     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
339     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
340 
341     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
342     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
343 }
344 
fix_tsched_watermark(struct userdata * u)345 static void fix_tsched_watermark(struct userdata *u) {
346     size_t max_use;
347     pa_assert(u);
348     pa_assert(u->use_tsched);
349 
350     max_use = u->hwbuf_size - u->hwbuf_unused;
351 
352     if (u->tsched_watermark > max_use - u->min_sleep)
353         u->tsched_watermark = max_use - u->min_sleep;
354 
355     if (u->tsched_watermark < u->min_wakeup)
356         u->tsched_watermark = u->min_wakeup;
357 
358     u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
359 }
360 
increase_watermark(struct userdata * u)361 static void increase_watermark(struct userdata *u) {
362     size_t old_watermark;
363     pa_usec_t old_min_latency, new_min_latency;
364 
365     pa_assert(u);
366     pa_assert(u->use_tsched);
367 
368     /* First, just try to increase the watermark */
369     old_watermark = u->tsched_watermark;
370     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
371     fix_tsched_watermark(u);
372 
373     if (old_watermark != u->tsched_watermark) {
374         pa_log_info("Increasing wakeup watermark to %0.2f ms",
375                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
376         return;
377     }
378 
379     /* Hmm, we cannot increase the watermark any further, hence let's
380        raise the latency, unless doing so was disabled in
381        configuration */
382     if (u->fixed_latency_range)
383         return;
384 
385     old_min_latency = u->sink->thread_info.min_latency;
386     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
387     new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
388 
389     if (old_min_latency != new_min_latency) {
390         pa_log_info("Increasing minimal latency to %0.2f ms",
391                     (double) new_min_latency / PA_USEC_PER_MSEC);
392 
393         pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
394     }
395 
396     /* When we reach this we're officially fucked! */
397 }
398 
decrease_watermark(struct userdata * u)399 static void decrease_watermark(struct userdata *u) {
400     size_t old_watermark;
401     pa_usec_t now;
402 
403     pa_assert(u);
404     pa_assert(u->use_tsched);
405 
406     now = pa_rtclock_now();
407 
408     if (u->watermark_dec_not_before <= 0)
409         goto restart;
410 
411     if (u->watermark_dec_not_before > now)
412         return;
413 
414     old_watermark = u->tsched_watermark;
415 
416     if (u->tsched_watermark < u->watermark_dec_step)
417         u->tsched_watermark = u->tsched_watermark / 2;
418     else
419         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
420 
421     fix_tsched_watermark(u);
422 
423     if (old_watermark != u->tsched_watermark)
424         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
425                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
426 
427     /* We don't change the latency range*/
428 
429 restart:
430     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
431 }
432 
433 /* Called from IO Context on unsuspend or from main thread when creating sink */
reset_watermark(struct userdata * u,size_t tsched_watermark,pa_sample_spec * ss,bool in_thread)434 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
435                             bool in_thread) {
436     u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->sink->sample_spec);
437 
438     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
439     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
440 
441     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
442     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
443 
444     fix_min_sleep_wakeup(u);
445     fix_tsched_watermark(u);
446 
447     if (in_thread)
448         pa_sink_set_latency_range_within_thread(u->sink,
449                                                 u->min_latency_ref,
450                                                 pa_bytes_to_usec(u->hwbuf_size, ss));
451     else {
452         pa_sink_set_latency_range(u->sink,
453                                   0,
454                                   pa_bytes_to_usec(u->hwbuf_size, ss));
455 
456         /* work-around assert in pa_sink_set_latency_within_thead,
457            keep track of min_latency and reuse it when
458            this routine is called from IO context */
459         u->min_latency_ref = u->sink->thread_info.min_latency;
460     }
461 
462     pa_log_info("Time scheduling watermark is %0.2fms",
463                 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
464 }
465 
hw_sleep_time(struct userdata * u,pa_usec_t * sleep_usec,pa_usec_t * process_usec)466 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
467     pa_usec_t usec, wm;
468 
469     pa_assert(sleep_usec);
470     pa_assert(process_usec);
471 
472     pa_assert(u);
473     pa_assert(u->use_tsched);
474 
475     usec = pa_sink_get_requested_latency_within_thread(u->sink);
476 
477     if (usec == (pa_usec_t) -1)
478         usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
479 
480     wm = u->tsched_watermark_usec;
481 
482     if (wm > usec)
483         wm = usec/2;
484 
485     *sleep_usec = usec - wm;
486     *process_usec = wm;
487 
488 #ifdef DEBUG_TIMING
489     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
490                  (unsigned long) (usec / PA_USEC_PER_MSEC),
491                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
492                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
493 #endif
494 }
495 
496 /* Reset smoother and counters */
reset_vars(struct userdata * u)497 static void reset_vars(struct userdata *u) {
498 
499 #ifdef USE_SMOOTHER_2
500     pa_smoother_2_reset(u->smoother, pa_rtclock_now());
501 #else
502     pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
503     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
504     u->last_smoother_update = 0;
505 #endif
506 
507     u->first = true;
508     u->since_start = 0;
509     u->write_count = 0;
510 }
511 
512 /* Called from IO context */
close_pcm(struct userdata * u)513 static void close_pcm(struct userdata *u) {
514     /* Let's suspend -- we don't call snd_pcm_drain() here since that might
515      * take awfully long with our long buffer sizes today. */
516     snd_pcm_close(u->pcm_handle);
517     u->pcm_handle = NULL;
518 
519     if (u->alsa_rtpoll_item) {
520         pa_rtpoll_item_free(u->alsa_rtpoll_item);
521         u->alsa_rtpoll_item = NULL;
522     }
523 }
524 
try_recover(struct userdata * u,const char * call,int err)525 static int try_recover(struct userdata *u, const char *call, int err) {
526     pa_assert(u);
527     pa_assert(call);
528     pa_assert(err < 0);
529 
530     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
531 
532     pa_assert(err != -EAGAIN);
533 
534     if (err == -EPIPE)
535         pa_log_debug("%s: Buffer underrun!", call);
536 
537     if (err == -ESTRPIPE)
538         pa_log_debug("%s: System suspended!", call);
539 
540     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
541         pa_log("%s: %s, trying to restart PCM", call, pa_alsa_strerror(err));
542 
543         /* As a last measure, restart the PCM and inform the caller about it. */
544         close_pcm(u);
545         if (unsuspend(u, true) < 0)
546             return -1;
547 
548         return 1;
549     }
550 
551     reset_vars(u);
552     return 0;
553 }
554 
check_left_to_play(struct userdata * u,size_t n_bytes,bool on_timeout)555 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, bool on_timeout) {
556     size_t left_to_play;
557     bool underrun = false;
558 
559     /* We use <= instead of < for this check here because an underrun
560      * only happens after the last sample was processed, not already when
561      * it is removed from the buffer. This is particularly important
562      * when block transfer is used. */
563 
564     if (n_bytes <= u->hwbuf_size)
565         left_to_play = u->hwbuf_size - n_bytes;
566     else {
567 
568         /* We got a dropout. What a mess! */
569         left_to_play = 0;
570         underrun = true;
571 
572 #if 0
573         PA_DEBUG_TRAP;
574 #endif
575 
576         if (!u->first && !u->after_rewind)
577             if (pa_log_ratelimit(PA_LOG_INFO))
578                 pa_log_info("Underrun!");
579     }
580 
581 #ifdef DEBUG_TIMING
582     pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
583                  (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
584                  (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
585                  (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
586 #endif
587 
588     if (u->use_tsched) {
589         bool reset_not_before = true;
590 
591         if (!u->first && !u->after_rewind) {
592             if (underrun || left_to_play < u->watermark_inc_threshold)
593                 increase_watermark(u);
594             else if (left_to_play > u->watermark_dec_threshold) {
595                 reset_not_before = false;
596 
597                 /* We decrease the watermark only if have actually
598                  * been woken up by a timeout. If something else woke
599                  * us up it's too easy to fulfill the deadlines... */
600 
601                 if (on_timeout)
602                     decrease_watermark(u);
603             }
604         }
605 
606         if (reset_not_before)
607             u->watermark_dec_not_before = 0;
608     }
609 
610     return left_to_play;
611 }
612 
mmap_write(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)613 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
614     bool work_done = false;
615     pa_usec_t max_sleep_usec = 0, process_usec = 0;
616     size_t left_to_play, input_underrun;
617     unsigned j = 0;
618 
619     pa_assert(u);
620     pa_sink_assert_ref(u->sink);
621 
622     if (u->use_tsched)
623         hw_sleep_time(u, &max_sleep_usec, &process_usec);
624 
625     for (;;) {
626         snd_pcm_sframes_t n;
627         size_t n_bytes;
628         int r;
629         bool after_avail = true;
630 
631         /* First we determine how many samples are missing to fill the
632          * buffer up to 100% */
633 
634         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
635 
636             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
637                 continue;
638 
639             return r;
640         }
641 
642         n_bytes = (size_t) n * u->frame_size;
643 
644 #ifdef DEBUG_TIMING
645         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
646 #endif
647 
648         left_to_play = check_left_to_play(u, n_bytes, on_timeout);
649         on_timeout = false;
650 
651         if (u->use_tsched)
652 
653             /* We won't fill up the playback buffer before at least
654             * half the sleep time is over because otherwise we might
655             * ask for more data from the clients then they expect. We
656             * need to guarantee that clients only have to keep around
657             * a single hw buffer length. */
658 
659             if (!polled &&
660                 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
661 #ifdef DEBUG_TIMING
662                 pa_log_debug("Not filling up, because too early.");
663 #endif
664                 break;
665             }
666 
667         if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
668 
669             if (polled)
670                 PA_ONCE_BEGIN {
671                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
672                     pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write.\n"
673                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
674                              "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
675                            pa_strnull(dn));
676                     pa_xfree(dn);
677                 } PA_ONCE_END;
678 
679 #ifdef DEBUG_TIMING
680             pa_log_debug("Not filling up, because not necessary.");
681 #endif
682             break;
683         }
684 
685         j++;
686 
687         if (j > 10) {
688 #ifdef DEBUG_TIMING
689             pa_log_debug("Not filling up, because already too many iterations.");
690 #endif
691 
692             break;
693         } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
694 #ifdef DEBUG_TIMING
695             pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
696 #endif
697             break;
698         }
699 
700         n_bytes -= u->hwbuf_unused;
701         polled = false;
702 
703 #ifdef DEBUG_TIMING
704         pa_log_debug("Filling up");
705 #endif
706 
707         for (;;) {
708             pa_memchunk chunk;
709             void *p;
710             int err;
711             const snd_pcm_channel_area_t *areas;
712             snd_pcm_uframes_t offset, frames;
713             snd_pcm_sframes_t sframes;
714             size_t written;
715 
716             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
717 /*             pa_log_debug("%lu frames to write", (unsigned long) frames); */
718 
719             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
720 
721                 if (!after_avail && err == -EAGAIN)
722                     break;
723 
724                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
725                     continue;
726 
727                 if (r == 1)
728                     break;
729 
730                 return r;
731             }
732 
733             /* Make sure that if these memblocks need to be copied they will fit into one slot */
734             frames = PA_MIN(frames, u->frames_per_block);
735 
736             if (!after_avail && frames == 0)
737                 break;
738 
739             pa_assert(frames > 0);
740             after_avail = false;
741 
742             /* Check these are multiples of 8 bit */
743             pa_assert((areas[0].first & 7) == 0);
744             pa_assert((areas[0].step & 7) == 0);
745 
746             /* We assume a single interleaved memory buffer */
747             pa_assert((areas[0].first >> 3) == 0);
748             pa_assert((areas[0].step >> 3) == u->frame_size);
749 
750             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
751 
752             written = frames * u->frame_size;
753             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, true);
754             chunk.length = pa_memblock_get_length(chunk.memblock);
755             chunk.index = 0;
756 
757             pa_sink_render_into_full(u->sink, &chunk);
758             pa_memblock_unref_fixed(chunk.memblock);
759 
760             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
761 
762                 if ((int) sframes == -EAGAIN)
763                     break;
764 
765                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
766                     continue;
767 
768                 if (r == 1)
769                     break;
770 
771                 return r;
772             }
773 
774             work_done = true;
775 
776             u->write_count += written;
777             u->since_start += written;
778 
779 #ifdef DEBUG_TIMING
780             pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
781 #endif
782 
783             if (written >= n_bytes)
784                 break;
785 
786             n_bytes -= written;
787         }
788     }
789 
790     input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
791 
792     if (u->use_tsched) {
793         pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
794 
795         *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
796         process_usec = u->tsched_watermark_usec;
797 
798         if (*sleep_usec > process_usec)
799             *sleep_usec -= process_usec;
800         else
801             *sleep_usec = 0;
802 
803         *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
804     } else
805         *sleep_usec = 0;
806 
807     return work_done ? 1 : 0;
808 }
809 
unix_write(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)810 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
811     bool work_done = false;
812     pa_usec_t max_sleep_usec = 0, process_usec = 0;
813     size_t left_to_play, input_underrun;
814     unsigned j = 0;
815 
816     pa_assert(u);
817     pa_sink_assert_ref(u->sink);
818 
819     if (u->use_tsched)
820         hw_sleep_time(u, &max_sleep_usec, &process_usec);
821 
822     for (;;) {
823         snd_pcm_sframes_t n;
824         size_t n_bytes;
825         int r;
826         bool after_avail = true;
827 
828         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
829 
830             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
831                 continue;
832 
833             return r;
834         }
835 
836         n_bytes = (size_t) n * u->frame_size;
837 
838 #ifdef DEBUG_TIMING
839         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
840 #endif
841 
842         left_to_play = check_left_to_play(u, n_bytes, on_timeout);
843         on_timeout = false;
844 
845         if (u->use_tsched)
846 
847             /* We won't fill up the playback buffer before at least
848             * half the sleep time is over because otherwise we might
849             * ask for more data from the clients then they expect. We
850             * need to guarantee that clients only have to keep around
851             * a single hw buffer length. */
852 
853             if (!polled &&
854                 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
855                 break;
856 
857         if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
858 
859             if (polled)
860                 PA_ONCE_BEGIN {
861                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
862                     pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write.\n"
863                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
864                              "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
865                            pa_strnull(dn));
866                     pa_xfree(dn);
867                 } PA_ONCE_END;
868 
869             break;
870         }
871 
872         j++;
873 
874         if (j > 10) {
875 #ifdef DEBUG_TIMING
876             pa_log_debug("Not filling up, because already too many iterations.");
877 #endif
878 
879             break;
880         } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
881 #ifdef DEBUG_TIMING
882             pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
883 #endif
884             break;
885         }
886 
887         n_bytes -= u->hwbuf_unused;
888         polled = false;
889 
890         for (;;) {
891             snd_pcm_sframes_t frames;
892             void *p;
893             size_t written;
894 
895 /*         pa_log_debug("%lu frames to write", (unsigned long) frames); */
896 
897             if (u->memchunk.length <= 0)
898                 pa_sink_render(u->sink, n_bytes, &u->memchunk);
899 
900             pa_assert(u->memchunk.length > 0);
901 
902             frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
903 
904             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
905                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
906 
907             p = pa_memblock_acquire(u->memchunk.memblock);
908             frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
909             pa_memblock_release(u->memchunk.memblock);
910 
911             if (PA_UNLIKELY(frames < 0)) {
912 
913                 if (!after_avail && (int) frames == -EAGAIN)
914                     break;
915 
916                 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
917                     continue;
918 
919                 if (r == 1)
920                     break;
921 
922                 return r;
923             }
924 
925             if (!after_avail && frames == 0)
926                 break;
927 
928             pa_assert(frames > 0);
929             after_avail = false;
930 
931             written = frames * u->frame_size;
932             u->memchunk.index += written;
933             u->memchunk.length -= written;
934 
935             if (u->memchunk.length <= 0) {
936                 pa_memblock_unref(u->memchunk.memblock);
937                 pa_memchunk_reset(&u->memchunk);
938             }
939 
940             work_done = true;
941 
942             u->write_count += written;
943             u->since_start += written;
944 
945 /*         pa_log_debug("wrote %lu frames", (unsigned long) frames); */
946 
947             if (written >= n_bytes)
948                 break;
949 
950             n_bytes -= written;
951         }
952     }
953 
954     input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
955 
956     if (u->use_tsched) {
957         pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
958 
959         *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
960         process_usec = u->tsched_watermark_usec;
961 
962         if (*sleep_usec > process_usec)
963             *sleep_usec -= process_usec;
964         else
965             *sleep_usec = 0;
966 
967         *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
968     } else
969         *sleep_usec = 0;
970 
971     return work_done ? 1 : 0;
972 }
973 
update_smoother(struct userdata * u)974 static void update_smoother(struct userdata *u) {
975     snd_pcm_sframes_t delay = 0;
976     int64_t position;
977     int err;
978     pa_usec_t now1 = 0;
979 #ifndef USE_SMOOTHER_2
980     pa_usec_t now2;
981 #endif
982     snd_pcm_status_t *status;
983     snd_htimestamp_t htstamp = { 0, 0 };
984 
985     snd_pcm_status_alloca(&status);
986 
987     pa_assert(u);
988     pa_assert(u->pcm_handle);
989 
990     /* Let's update the time smoother */
991 
992     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, false)) < 0)) {
993         pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
994         return;
995     }
996 
997     snd_pcm_status_get_htstamp(status, &htstamp);
998     now1 = pa_timespec_load(&htstamp);
999 
1000     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
1001     if (now1 <= 0)
1002         now1 = pa_rtclock_now();
1003 
1004     position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
1005 
1006 #ifdef USE_SMOOTHER_2
1007     pa_smoother_2_put(u->smoother, now1, position);
1008 #else
1009     /* check if the time since the last update is bigger than the interval */
1010     if (u->last_smoother_update > 0)
1011         if (u->last_smoother_update + u->smoother_interval > now1)
1012             return;
1013 
1014     if (PA_UNLIKELY(position < 0))
1015         position = 0;
1016 
1017     now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
1018 
1019     pa_smoother_put(u->smoother, now1, now2);
1020 
1021     u->last_smoother_update = now1;
1022     /* exponentially increase the update interval up to the MAX limit */
1023     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
1024 #endif
1025 }
1026 
sink_get_latency(struct userdata * u)1027 static int64_t sink_get_latency(struct userdata *u) {
1028     int64_t delay;
1029     pa_usec_t now1;
1030 #ifndef USE_SMOOTHER_2
1031     pa_usec_t now2;
1032 #endif
1033 
1034     pa_assert(u);
1035 
1036     now1 = pa_rtclock_now();
1037 #ifdef USE_SMOOTHER_2
1038     delay = pa_smoother_2_get_delay(u->smoother, now1, u->write_count);
1039 #else
1040     now2 = pa_smoother_get(u->smoother, now1);
1041 
1042     delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
1043 #endif
1044 
1045     if (u->memchunk.memblock)
1046         delay += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
1047 
1048     return delay;
1049 }
1050 
build_pollfd(struct userdata * u)1051 static int build_pollfd(struct userdata *u) {
1052     pa_assert(u);
1053     pa_assert(u->pcm_handle);
1054 
1055     if (u->alsa_rtpoll_item)
1056         pa_rtpoll_item_free(u->alsa_rtpoll_item);
1057 
1058     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
1059         return -1;
1060 
1061     return 0;
1062 }
1063 
1064 /* Called from IO context */
suspend(struct userdata * u)1065 static void suspend(struct userdata *u) {
1066     pa_assert(u);
1067 
1068     /* Handle may have been invalidated due to a device failure.
1069      * In that case there is nothing to do. */
1070     if (!u->pcm_handle)
1071         return;
1072 
1073 #ifdef USE_SMOOTHER_2
1074     pa_smoother_2_pause(u->smoother, pa_rtclock_now());
1075 #else
1076     pa_smoother_pause(u->smoother, pa_rtclock_now());
1077 #endif
1078 
1079     /* Close PCM device */
1080     close_pcm(u);
1081 
1082     /* We reset max_rewind/max_request here to make sure that while we
1083      * are suspended the old max_request/max_rewind values set before
1084      * the suspend can influence the per-stream buffer of newly
1085      * created streams, without their requirements having any
1086      * influence on them. */
1087     pa_sink_set_max_rewind_within_thread(u->sink, 0);
1088     pa_sink_set_max_request_within_thread(u->sink, 0);
1089 
1090     pa_log_info("Device suspended...");
1091 }
1092 
1093 /* Called from IO context */
update_sw_params(struct userdata * u,bool may_need_rewind)1094 static int update_sw_params(struct userdata *u, bool may_need_rewind) {
1095     size_t old_unused;
1096     snd_pcm_uframes_t avail_min;
1097     int err;
1098 
1099     pa_assert(u);
1100 
1101     /* Use the full buffer if no one asked us for anything specific */
1102     old_unused = u->hwbuf_unused;
1103     u->hwbuf_unused = 0;
1104 
1105     if (u->use_tsched) {
1106         pa_usec_t latency;
1107 
1108         if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
1109             size_t b;
1110 
1111             pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
1112 
1113             b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
1114 
1115             /* We need at least one sample in our buffer */
1116 
1117             if (PA_UNLIKELY(b < u->frame_size))
1118                 b = u->frame_size;
1119 
1120             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
1121         }
1122 
1123         fix_min_sleep_wakeup(u);
1124         fix_tsched_watermark(u);
1125     }
1126 
1127     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
1128 
1129     /* We need at last one frame in the used part of the buffer */
1130     avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
1131 
1132     if (u->use_tsched) {
1133         pa_usec_t sleep_usec, process_usec;
1134 
1135         hw_sleep_time(u, &sleep_usec, &process_usec);
1136         avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1137     }
1138 
1139     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1140 
1141     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1142         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1143         return err;
1144     }
1145 
1146     /* If we're lowering the latency, we need to do a rewind, because otherwise
1147      * we might end up in a situation where the hw buffer contains more data
1148      * than the new configured latency. The rewind has to be requested before
1149      * updating max_rewind, because the rewind amount is limited to max_rewind.
1150      *
1151      * If may_need_rewind is false, it means that we're just starting playback,
1152      * and rewinding is never needed in that situation. */
1153     if (may_need_rewind && u->hwbuf_unused > old_unused) {
1154         pa_log_debug("Requesting rewind due to latency change.");
1155         pa_sink_request_rewind(u->sink, (size_t) -1);
1156     }
1157 
1158     pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1159     if (pa_alsa_pcm_is_hw(u->pcm_handle))
1160         pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1161     else {
1162         pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1163         pa_sink_set_max_rewind_within_thread(u->sink, 0);
1164     }
1165 
1166     return 0;
1167 }
1168 
1169 /* Called from IO Context on unsuspend */
update_size(struct userdata * u,pa_sample_spec * ss)1170 static void update_size(struct userdata *u, pa_sample_spec *ss) {
1171     pa_assert(u);
1172     pa_assert(ss);
1173 
1174     u->frame_size = pa_frame_size(ss);
1175     u->frames_per_block = pa_mempool_block_size_max(u->core->mempool) / u->frame_size;
1176 
1177     /* use initial values including module arguments */
1178     u->fragment_size = u->initial_info.fragment_size;
1179     u->hwbuf_size = u->initial_info.nfrags * u->fragment_size;
1180     u->tsched_size = u->initial_info.tsched_size;
1181     u->tsched_watermark = u->initial_info.tsched_watermark;
1182     u->rewind_safeguard = u->initial_info.rewind_safeguard;
1183 
1184     u->tsched_watermark_ref = u->tsched_watermark;
1185 
1186     pa_log_info("Updated frame_size %zu, frames_per_block %lu, fragment_size %zu, hwbuf_size %zu, tsched(size %zu, watermark %zu), rewind_safeguard %zu",
1187                 u->frame_size, (unsigned long) u->frames_per_block, u->fragment_size, u->hwbuf_size, u->tsched_size, u->tsched_watermark, u->rewind_safeguard);
1188 }
1189 
1190 /* Called from IO context */
unsuspend(struct userdata * u,bool recovering)1191 static int unsuspend(struct userdata *u, bool recovering) {
1192     pa_sample_spec ss;
1193     int err, i;
1194     bool b, d;
1195     snd_pcm_uframes_t period_frames, buffer_frames;
1196     snd_pcm_uframes_t tsched_frames = 0;
1197     char *device_name = NULL;
1198     bool frame_size_changed = false;
1199 
1200     pa_assert(u);
1201     pa_assert(!u->pcm_handle);
1202 
1203     pa_log_info("Trying resume...");
1204 
1205     if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1206         /* Need to open device in NONAUDIO mode */
1207         int len = strlen(u->device_name) + 8;
1208 
1209         device_name = pa_xmalloc(len);
1210         pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1211     }
1212 
1213     /*
1214      * On some machines, during the system suspend and resume, the thread_func could receive
1215      * POLLERR events before the dev nodes in /dev/snd/ are accessible, and thread_func calls
1216      * the unsuspend() to try to recover the PCM, this will make the snd_pcm_open() fail, here
1217      * we add msleep and retry to make sure those nodes are accessible.
1218      */
1219     for (i = 0; i < 4; i++) {
1220 	if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1221 				SND_PCM_NONBLOCK|
1222 				SND_PCM_NO_AUTO_RESAMPLE|
1223 				SND_PCM_NO_AUTO_CHANNELS|
1224 				SND_PCM_NO_AUTO_FORMAT)) < 0 && recovering)
1225 	    pa_msleep(25);
1226 	else
1227 	    break;
1228     }
1229 
1230     if (err < 0) {
1231 	pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1232 	goto fail;
1233     }
1234 
1235     if (pa_frame_size(&u->sink->sample_spec) != u->frame_size) {
1236         update_size(u, &u->sink->sample_spec);
1237         tsched_frames = u->tsched_size / u->frame_size;
1238         frame_size_changed = true;
1239     }
1240 
1241     ss = u->sink->sample_spec;
1242     period_frames = u->fragment_size / u->frame_size;
1243     buffer_frames = u->hwbuf_size / u->frame_size;
1244     b = u->use_mmap;
1245     d = u->use_tsched;
1246 
1247     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_frames, &buffer_frames, tsched_frames, &b, &d, true)) < 0) {
1248         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1249         goto fail;
1250     }
1251 
1252     if (b != u->use_mmap || d != u->use_tsched) {
1253         pa_log_warn("Resume failed, couldn't get original access mode.");
1254         goto fail;
1255     }
1256 
1257     if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1258         pa_log_warn("Resume failed, couldn't restore original sample settings.");
1259         goto fail;
1260     }
1261 
1262     if (frame_size_changed) {
1263         u->fragment_size = (size_t)(period_frames * u->frame_size);
1264         u->hwbuf_size = (size_t)(buffer_frames * u->frame_size);
1265         pa_proplist_setf(u->sink->proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%zu", u->hwbuf_size);
1266         pa_proplist_setf(u->sink->proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%zu", u->fragment_size);
1267 
1268     } else if (period_frames * u->frame_size != u->fragment_size ||
1269                 buffer_frames * u->frame_size != u->hwbuf_size) {
1270         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %zu/%zu, New %lu/%lu)",
1271                     u->hwbuf_size, u->fragment_size,
1272                     (unsigned long) buffer_frames * u->frame_size, (unsigned long) period_frames * u->frame_size);
1273         goto fail;
1274     }
1275 
1276     if (update_sw_params(u, false) < 0)
1277         goto fail;
1278 
1279     if (build_pollfd(u) < 0)
1280         goto fail;
1281 
1282     reset_vars(u);
1283 
1284     /* reset the watermark to the value defined when sink was created */
1285     if (u->use_tsched && !recovering)
1286         reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, true);
1287 
1288     pa_log_info("Resumed successfully...");
1289 
1290     pa_xfree(device_name);
1291     return 0;
1292 
1293 fail:
1294     if (u->pcm_handle) {
1295         snd_pcm_close(u->pcm_handle);
1296         u->pcm_handle = NULL;
1297     }
1298 
1299     pa_xfree(device_name);
1300 
1301     return -PA_ERR_IO;
1302 }
1303 
1304 /* Called from the IO thread or the main thread depending on whether deferred
1305  * volume is enabled or not (with deferred volume all mixer handling is done
1306  * from the IO thread).
1307  *
1308  * Sets the mixer settings to match the current sink and port state (the port
1309  * is given as an argument, because active_port may still point to the old
1310  * port, if we're switching ports). */
sync_mixer(struct userdata * u,pa_device_port * port)1311 static void sync_mixer(struct userdata *u, pa_device_port *port) {
1312     pa_alsa_setting *setting = NULL;
1313 
1314     pa_assert(u);
1315 
1316     if (!u->mixer_path)
1317         return;
1318 
1319     /* port may be NULL, because if we use a synthesized mixer path, then the
1320      * sink has no ports. */
1321     if (port && !u->ucm_context) {
1322         pa_alsa_port_data *data;
1323 
1324         data = PA_DEVICE_PORT_DATA(port);
1325         setting = data->setting;
1326     }
1327 
1328     pa_alsa_path_select(u->mixer_path, setting, u->mixer_handle, u->sink->muted);
1329 
1330     if (u->sink->set_mute)
1331         u->sink->set_mute(u->sink);
1332     if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1333         if (u->sink->write_volume)
1334             u->sink->write_volume(u->sink);
1335     } else {
1336         if (u->sink->set_volume)
1337             u->sink->set_volume(u->sink);
1338     }
1339 }
1340 
1341 /* Called from IO context */
sink_process_msg(pa_msgobject * o,int code,void * data,int64_t offset,pa_memchunk * chunk)1342 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1343     struct userdata *u = PA_SINK(o)->userdata;
1344 
1345     switch (code) {
1346 
1347         case PA_SINK_MESSAGE_GET_LATENCY: {
1348             int64_t r = 0;
1349 
1350             if (u->pcm_handle)
1351                 r = sink_get_latency(u);
1352 
1353             *((int64_t*) data) = r;
1354 
1355             return 0;
1356         }
1357 
1358         case SINK_MESSAGE_SYNC_MIXER: {
1359             pa_device_port *port = data;
1360 
1361             sync_mixer(u, port);
1362             return 0;
1363         }
1364     }
1365 
1366     return pa_sink_process_msg(o, code, data, offset, chunk);
1367 }
1368 
1369 /* Called from main context */
sink_set_state_in_main_thread_cb(pa_sink * s,pa_sink_state_t new_state,pa_suspend_cause_t new_suspend_cause)1370 static int sink_set_state_in_main_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1371     pa_sink_state_t old_state;
1372     struct userdata *u;
1373 
1374     pa_sink_assert_ref(s);
1375     pa_assert_se(u = s->userdata);
1376 
1377     /* When our session becomes active, we need to sync the mixer, because
1378      * another user may have changed the mixer settings.
1379      *
1380      * If deferred volume is enabled, the syncing is done in the
1381      * set_state_in_io_thread() callback instead. */
1382     if (!(s->flags & PA_SINK_DEFERRED_VOLUME)
1383             && (s->suspend_cause & PA_SUSPEND_SESSION)
1384             && !(new_suspend_cause & PA_SUSPEND_SESSION))
1385         sync_mixer(u, s->active_port);
1386 
1387     old_state = u->sink->state;
1388 
1389     if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1390         reserve_done(u);
1391     else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1392         if (reserve_init(u, u->device_name) < 0)
1393             return -PA_ERR_BUSY;
1394 
1395     return 0;
1396 }
1397 
1398 /* Called from the IO thread. */
sink_set_state_in_io_thread_cb(pa_sink * s,pa_sink_state_t new_state,pa_suspend_cause_t new_suspend_cause)1399 static int sink_set_state_in_io_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1400     struct userdata *u;
1401 
1402     pa_assert(s);
1403     pa_assert_se(u = s->userdata);
1404 
1405     /* When our session becomes active, we need to sync the mixer, because
1406      * another user may have changed the mixer settings.
1407      *
1408      * If deferred volume is disabled, the syncing is done in the
1409      * set_state_in_main_thread() callback instead. */
1410     if ((s->flags & PA_SINK_DEFERRED_VOLUME)
1411             && (s->suspend_cause & PA_SUSPEND_SESSION)
1412             && !(new_suspend_cause & PA_SUSPEND_SESSION))
1413         sync_mixer(u, s->active_port);
1414 
1415     /* It may be that only the suspend cause is changing, in which case there's
1416      * nothing more to do. */
1417     if (new_state == s->thread_info.state)
1418         return 0;
1419 
1420     switch (new_state) {
1421 
1422         case PA_SINK_SUSPENDED: {
1423             pa_assert(PA_SINK_IS_OPENED(s->thread_info.state));
1424 
1425             suspend(u);
1426 
1427             break;
1428         }
1429 
1430         case PA_SINK_IDLE:
1431         case PA_SINK_RUNNING: {
1432             int r;
1433 
1434             if (s->thread_info.state == PA_SINK_INIT) {
1435                 if (build_pollfd(u) < 0)
1436                     /* FIXME: This will cause an assertion failure, because
1437                      * with the current design pa_sink_put() is not allowed
1438                      * to fail and pa_sink_put() has no fallback code that
1439                      * would start the sink suspended if opening the device
1440                      * fails. */
1441                     return -PA_ERR_IO;
1442             }
1443 
1444             if (s->thread_info.state == PA_SINK_SUSPENDED) {
1445                 if ((r = unsuspend(u, false)) < 0)
1446                     return r;
1447             }
1448 
1449             break;
1450         }
1451 
1452         case PA_SINK_UNLINKED:
1453         case PA_SINK_INIT:
1454         case PA_SINK_INVALID_STATE:
1455             break;
1456     }
1457 
1458     return 0;
1459 }
1460 
ctl_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1461 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1462     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1463 
1464     pa_assert(u);
1465     pa_assert(u->mixer_handle);
1466 
1467     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1468         return 0;
1469 
1470     if (!PA_SINK_IS_LINKED(u->sink->state))
1471         return 0;
1472 
1473     if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1474         return 0;
1475 
1476     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1477         pa_sink_get_volume(u->sink, true);
1478         pa_sink_get_mute(u->sink, true);
1479     }
1480 
1481     return 0;
1482 }
1483 
io_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1484 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1485     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1486 
1487     pa_assert(u);
1488     pa_assert(u->mixer_handle);
1489 
1490     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1491         return 0;
1492 
1493     if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1494         return 0;
1495 
1496     if (mask & SND_CTL_EVENT_MASK_VALUE)
1497         pa_sink_update_volume_and_mute(u->sink);
1498 
1499     return 0;
1500 }
1501 
sink_get_volume_cb(pa_sink * s)1502 static void sink_get_volume_cb(pa_sink *s) {
1503     struct userdata *u = s->userdata;
1504     pa_cvolume r;
1505     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1506 
1507     pa_assert(u);
1508     pa_assert(u->mixer_path);
1509     pa_assert(u->mixer_handle);
1510 
1511     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1512         return;
1513 
1514     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1515     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1516 
1517     pa_log_debug("Read hardware volume: %s",
1518                  pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1519 
1520     if (pa_cvolume_equal(&u->hardware_volume, &r))
1521         return;
1522 
1523     s->real_volume = u->hardware_volume = r;
1524 
1525     /* Hmm, so the hardware volume changed, let's reset our software volume */
1526     if (u->mixer_path->has_dB)
1527         pa_sink_set_soft_volume(s, NULL);
1528 }
1529 
sink_set_volume_cb(pa_sink * s)1530 static void sink_set_volume_cb(pa_sink *s) {
1531     struct userdata *u = s->userdata;
1532     pa_cvolume r;
1533     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1534     bool deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1535     bool write_to_hw = !deferred_volume;
1536 
1537     pa_assert(u);
1538     pa_assert(u->mixer_path);
1539     pa_assert(u->mixer_handle);
1540 
1541     /* Shift up by the base volume */
1542     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1543 
1544     /* If the set_volume() is called because of ucm active_port changing, the
1545      * volume should be written to hw immediately, otherwise this volume will be
1546      * overridden by calling get_volume_cb() which is called by
1547      * _disdev/_enadev() -> io_mixer_callback() */
1548     if (u->ucm_context && s->port_changing)
1549 	write_to_hw = true;
1550 
1551     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, write_to_hw) < 0)
1552         return;
1553 
1554     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1555     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1556 
1557     u->hardware_volume = r;
1558 
1559     if (u->mixer_path->has_dB) {
1560         pa_cvolume new_soft_volume;
1561         bool accurate_enough;
1562 
1563         /* Match exactly what the user requested by software */
1564         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1565 
1566         /* If the adjustment to do in software is only minimal we
1567          * can skip it. That saves us CPU at the expense of a bit of
1568          * accuracy */
1569         accurate_enough =
1570             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1571             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1572 
1573         pa_log_debug("Requested volume: %s",
1574                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1575         pa_log_debug("Got hardware volume: %s",
1576                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1577         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1578                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1579                      pa_yes_no(accurate_enough));
1580 
1581         if (!accurate_enough)
1582             s->soft_volume = new_soft_volume;
1583 
1584     } else {
1585         pa_log_debug("Wrote hardware volume: %s",
1586                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1587 
1588         /* We can't match exactly what the user requested, hence let's
1589          * at least tell the user about it */
1590 
1591         s->real_volume = r;
1592     }
1593 }
1594 
sink_write_volume_cb(pa_sink * s)1595 static void sink_write_volume_cb(pa_sink *s) {
1596     struct userdata *u = s->userdata;
1597     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1598 
1599     pa_assert(u);
1600     pa_assert(u->mixer_path);
1601     pa_assert(u->mixer_handle);
1602     pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1603 
1604     /* Shift up by the base volume */
1605     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1606 
1607     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1608         pa_log_error("Writing HW volume failed");
1609     else {
1610         pa_cvolume tmp_vol;
1611         bool accurate_enough;
1612 
1613         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1614         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1615 
1616         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1617         accurate_enough =
1618             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1619             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1620 
1621         if (!accurate_enough) {
1622             char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1623 
1624             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1625                          pa_cvolume_snprint_verbose(volume_buf[0],
1626                                                     sizeof(volume_buf[0]),
1627                                                     &s->thread_info.current_hw_volume,
1628                                                     &s->channel_map,
1629                                                     true),
1630                          pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1631         }
1632     }
1633 }
1634 
sink_get_mute_cb(pa_sink * s,bool * mute)1635 static int sink_get_mute_cb(pa_sink *s, bool *mute) {
1636     struct userdata *u = s->userdata;
1637 
1638     pa_assert(u);
1639     pa_assert(u->mixer_path);
1640     pa_assert(u->mixer_handle);
1641 
1642     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1643         return -1;
1644 
1645     return 0;
1646 }
1647 
sink_set_mute_cb(pa_sink * s)1648 static void sink_set_mute_cb(pa_sink *s) {
1649     struct userdata *u = s->userdata;
1650 
1651     pa_assert(u);
1652     pa_assert(u->mixer_path);
1653     pa_assert(u->mixer_handle);
1654 
1655     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1656 }
1657 
mixer_volume_init(struct userdata * u)1658 static void mixer_volume_init(struct userdata *u) {
1659     pa_assert(u);
1660 
1661     if (!u->mixer_path || !u->mixer_path->has_volume) {
1662         pa_sink_set_write_volume_callback(u->sink, NULL);
1663         pa_sink_set_get_volume_callback(u->sink, NULL);
1664         pa_sink_set_set_volume_callback(u->sink, NULL);
1665 
1666         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1667     } else {
1668         pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1669         pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1670 
1671         if (u->mixer_path->has_dB && u->deferred_volume) {
1672             pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1673             pa_log_info("Successfully enabled deferred volume.");
1674         } else
1675             pa_sink_set_write_volume_callback(u->sink, NULL);
1676 
1677         if (u->mixer_path->has_dB) {
1678             pa_sink_enable_decibel_volume(u->sink, true);
1679             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1680 
1681             u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1682             u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1683 
1684             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1685         } else {
1686             pa_sink_enable_decibel_volume(u->sink, false);
1687             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1688 
1689             u->sink->base_volume = PA_VOLUME_NORM;
1690             u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1691         }
1692 
1693         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1694     }
1695 
1696     if (!u->mixer_path || !u->mixer_path->has_mute) {
1697         pa_sink_set_get_mute_callback(u->sink, NULL);
1698         pa_sink_set_set_mute_callback(u->sink, NULL);
1699         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1700     } else {
1701         pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1702         pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1703         pa_log_info("Using hardware mute control.");
1704     }
1705 }
1706 
sink_set_port_ucm_cb(pa_sink * s,pa_device_port * p)1707 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1708     struct userdata *u = s->userdata;
1709     pa_alsa_ucm_port_data *data;
1710 
1711     pa_assert(u);
1712     pa_assert(p);
1713     pa_assert(u->ucm_context);
1714 
1715     data = PA_DEVICE_PORT_DATA(p);
1716     u->mixer_path = data->path;
1717     mixer_volume_init(u);
1718 
1719     if (s->flags & PA_SINK_DEFERRED_VOLUME)
1720         pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_SYNC_MIXER, p, 0, NULL);
1721     else
1722         sync_mixer(u, p);
1723 
1724     return pa_alsa_ucm_set_port(u->ucm_context, p);
1725 }
1726 
sink_set_port_cb(pa_sink * s,pa_device_port * p)1727 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1728     struct userdata *u = s->userdata;
1729     pa_alsa_port_data *data;
1730 
1731     pa_assert(u);
1732     pa_assert(p);
1733     pa_assert(u->mixer_handle);
1734     pa_assert(!u->ucm_context);
1735 
1736     data = PA_DEVICE_PORT_DATA(p);
1737     pa_assert_se(u->mixer_path = data->path);
1738     mixer_volume_init(u);
1739 
1740     if (s->flags & PA_SINK_DEFERRED_VOLUME)
1741         pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_SYNC_MIXER, p, 0, NULL);
1742     else
1743         sync_mixer(u, p);
1744 
1745     if (data->suspend_when_unavailable && p->available == PA_AVAILABLE_NO)
1746         pa_sink_suspend(s, true, PA_SUSPEND_UNAVAILABLE);
1747     else
1748         pa_sink_suspend(s, false, PA_SUSPEND_UNAVAILABLE);
1749 
1750     return 0;
1751 }
1752 
sink_update_requested_latency_cb(pa_sink * s)1753 static void sink_update_requested_latency_cb(pa_sink *s) {
1754     struct userdata *u = s->userdata;
1755     pa_assert(u);
1756     pa_assert(u->use_tsched); /* only when timer scheduling is used
1757                                * we can dynamically adjust the
1758                                * latency */
1759 
1760     if (!u->pcm_handle)
1761         return;
1762 
1763     update_sw_params(u, true);
1764 }
1765 
sink_get_formats(pa_sink * s)1766 static pa_idxset* sink_get_formats(pa_sink *s) {
1767     struct userdata *u = s->userdata;
1768 
1769     pa_assert(u);
1770 
1771     return pa_idxset_copy(u->formats, (pa_copy_func_t) pa_format_info_copy);
1772 }
1773 
sink_set_formats(pa_sink * s,pa_idxset * formats)1774 static bool sink_set_formats(pa_sink *s, pa_idxset *formats) {
1775     struct userdata *u = s->userdata;
1776     pa_format_info *f, *g;
1777     uint32_t idx, n;
1778 
1779     pa_assert(u);
1780 
1781     /* FIXME: also validate sample rates against what the device supports */
1782     PA_IDXSET_FOREACH(f, formats, idx) {
1783         if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1784             /* EAC3 cannot be sent over over S/PDIF */
1785             return false;
1786     }
1787 
1788     pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1789     u->formats = pa_idxset_new(NULL, NULL);
1790 
1791     /* Note: the logic below won't apply if we're using software encoding.
1792      * This is fine for now since we don't support that via the passthrough
1793      * framework, but this must be changed if we do. */
1794 
1795     /* Count how many sample rates we support */
1796     for (idx = 0, n = 0; u->supported_rates[idx]; idx++)
1797         n++;
1798 
1799     /* First insert non-PCM formats since we prefer those. */
1800     PA_IDXSET_FOREACH(f, formats, idx) {
1801         if (!pa_format_info_is_pcm(f)) {
1802             g = pa_format_info_copy(f);
1803             pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->supported_rates, n);
1804             pa_idxset_put(u->formats, g, NULL);
1805         }
1806     }
1807 
1808     /* Now add any PCM formats */
1809     PA_IDXSET_FOREACH(f, formats, idx) {
1810         if (pa_format_info_is_pcm(f)) {
1811             /* We don't set rates here since we'll just tack on a resampler for
1812              * unsupported rates */
1813             pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1814         }
1815     }
1816 
1817     return true;
1818 }
1819 
sink_reconfigure_cb(pa_sink * s,pa_sample_spec * spec,bool passthrough)1820 static void sink_reconfigure_cb(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1821     struct userdata *u = s->userdata;
1822     int i;
1823     bool format_supported = false;
1824     bool rate_supported = false;
1825 #ifdef USE_SMOOTHER_2
1826     pa_sample_spec effective_spec;
1827 #endif
1828 
1829     pa_assert(u);
1830 
1831 #ifdef USE_SMOOTHER_2
1832     effective_spec.channels = s->sample_spec.channels;
1833 #endif
1834 
1835     for (i = 0; u->supported_formats[i] != PA_SAMPLE_MAX; i++) {
1836         if (u->supported_formats[i] == spec->format) {
1837             pa_sink_set_sample_format(u->sink, spec->format);
1838 #ifdef USE_SMOOTHER_2
1839             effective_spec.format = spec->format;
1840 #endif
1841             format_supported = true;
1842             break;
1843         }
1844     }
1845 
1846     if (!format_supported) {
1847         pa_log_info("Sink does not support sample format of %s, set it to a verified value",
1848                     pa_sample_format_to_string(spec->format));
1849         pa_sink_set_sample_format(u->sink, u->verified_sample_spec.format);
1850 #ifdef USE_SMOOTHER_2
1851         effective_spec.format = u->verified_sample_spec.format;
1852 #endif
1853     }
1854 
1855     for (i = 0; u->supported_rates[i]; i++) {
1856         if (u->supported_rates[i] == spec->rate) {
1857             pa_sink_set_sample_rate(u->sink, spec->rate);
1858 #ifdef USE_SMOOTHER_2
1859             effective_spec.rate = spec->rate;
1860 #endif
1861             rate_supported = true;
1862             break;
1863         }
1864     }
1865 
1866     if (!rate_supported) {
1867         pa_log_info("Sink does not support sample rate of %u, set it to a verified value", spec->rate);
1868         pa_sink_set_sample_rate(u->sink, u->verified_sample_spec.rate);
1869 #ifdef USE_SMOOTHER_2
1870         effective_spec.rate = u->verified_sample_spec.rate;
1871 #endif
1872     }
1873 
1874 #ifdef USE_SMOOTHER_2
1875     pa_smoother_2_set_sample_spec(u->smoother, pa_rtclock_now(), &effective_spec);
1876 #endif
1877 
1878     /* Passthrough status change is handled during unsuspend */
1879 }
1880 
process_rewind(struct userdata * u)1881 static int process_rewind(struct userdata *u) {
1882     snd_pcm_sframes_t unused;
1883     size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1884     int err;
1885     pa_assert(u);
1886 
1887     if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1888         pa_sink_process_rewind(u->sink, 0);
1889         return 0;
1890     }
1891 
1892     /* Figure out how much we shall rewind and reset the counter */
1893     rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1894 
1895     pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1896 
1897     if (rewind_nbytes == 0)
1898         goto rewind_done;
1899 
1900     if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1901         if ((err = try_recover(u, "snd_pcm_avail", (int) unused)) < 0) {
1902             pa_log_warn("Trying to recover from underrun failed during rewind");
1903             return -1;
1904         }
1905         if (err == 1)
1906             goto rewind_done;
1907     }
1908 
1909     unused_nbytes = (size_t) unused * u->frame_size;
1910 
1911     /* make sure rewind doesn't go too far, can cause issues with DMAs */
1912     unused_nbytes += u->rewind_safeguard;
1913 
1914     if (u->hwbuf_size > unused_nbytes)
1915         limit_nbytes = u->hwbuf_size - unused_nbytes;
1916     else
1917         limit_nbytes = 0;
1918 
1919     if (rewind_nbytes > limit_nbytes)
1920         rewind_nbytes = limit_nbytes;
1921 
1922     if (rewind_nbytes > 0) {
1923         snd_pcm_sframes_t in_frames, out_frames;
1924 
1925         pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1926 
1927         in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1928         pa_log_debug("before: %lu", (unsigned long) in_frames);
1929         if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1930             pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1931             if ((err = try_recover(u, "process_rewind", out_frames)) < 0)
1932                 return -1;
1933             if (err == 1)
1934                 goto rewind_done;
1935             out_frames = 0;
1936         }
1937 
1938         pa_log_debug("after: %lu", (unsigned long) out_frames);
1939 
1940         rewind_nbytes = (size_t) out_frames * u->frame_size;
1941 
1942         if (rewind_nbytes <= 0)
1943             pa_log_info("Tried rewind, but was apparently not possible.");
1944         else {
1945             u->write_count -= rewind_nbytes;
1946             pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1947             pa_sink_process_rewind(u->sink, rewind_nbytes);
1948 
1949             u->after_rewind = true;
1950             return 0;
1951         }
1952     } else {
1953         pa_log_debug("Mhmm, actually there is nothing to rewind.");
1954         if (u->use_tsched)
1955             increase_watermark(u);
1956     }
1957 
1958 rewind_done:
1959     pa_sink_process_rewind(u->sink, 0);
1960     return 0;
1961 }
1962 
thread_func(void * userdata)1963 static void thread_func(void *userdata) {
1964     struct userdata *u = userdata;
1965     unsigned short revents = 0;
1966 
1967     pa_assert(u);
1968 
1969     pa_log_debug("Thread starting up");
1970 
1971     if (u->core->realtime_scheduling)
1972         pa_thread_make_realtime(u->core->realtime_priority);
1973 
1974     pa_thread_mq_install(&u->thread_mq);
1975 
1976     for (;;) {
1977         int ret;
1978         pa_usec_t rtpoll_sleep = 0, real_sleep;
1979 
1980 #ifdef DEBUG_TIMING
1981         pa_log_debug("Loop");
1982 #endif
1983 
1984         if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1985             if (process_rewind(u) < 0)
1986                 goto fail;
1987         }
1988 
1989         /* Render some data and write it to the dsp */
1990         if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1991             int work_done;
1992             pa_usec_t sleep_usec = 0;
1993             bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1994 
1995             if (u->use_mmap)
1996                 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1997             else
1998                 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1999 
2000             if (work_done < 0)
2001                 goto fail;
2002 
2003 /*             pa_log_debug("work_done = %i", work_done); */
2004 
2005             if (work_done) {
2006 
2007                 if (u->first) {
2008                     pa_log_info("Starting playback.");
2009                     snd_pcm_start(u->pcm_handle);
2010 
2011 #ifdef USE_SMOOTHER_2
2012                     pa_smoother_2_resume(u->smoother, pa_rtclock_now());
2013 #else
2014                     pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
2015 #endif
2016 
2017                     u->first = false;
2018                 }
2019 
2020                 update_smoother(u);
2021             }
2022 
2023             if (u->use_tsched) {
2024                 pa_usec_t cusec;
2025 
2026                 if (u->since_start <= u->hwbuf_size) {
2027 
2028                     /* USB devices on ALSA seem to hit a buffer
2029                      * underrun during the first iterations much
2030                      * quicker then we calculate here, probably due to
2031                      * the transport latency. To accommodate for that
2032                      * we artificially decrease the sleep time until
2033                      * we have filled the buffer at least once
2034                      * completely.*/
2035 
2036                     if (pa_log_ratelimit(PA_LOG_DEBUG))
2037                         pa_log_debug("Cutting sleep time for the initial iterations by half.");
2038                     sleep_usec /= 2;
2039                 }
2040 
2041                 /* OK, the playback buffer is now full, let's
2042                  * calculate when to wake up next */
2043 #ifdef DEBUG_TIMING
2044                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
2045 #endif
2046 
2047                 /* Convert from the sound card time domain to the
2048                  * system time domain */
2049 #ifdef USE_SMOOTHER_2
2050                 cusec = pa_smoother_2_translate(u->smoother, sleep_usec);
2051 #else
2052                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
2053 #endif
2054 
2055 #ifdef DEBUG_TIMING
2056                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
2057 #endif
2058 
2059                 /* We don't trust the conversion, so we wake up whatever comes first */
2060                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
2061             }
2062 
2063             u->after_rewind = false;
2064 
2065         }
2066 
2067         if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
2068             pa_usec_t volume_sleep;
2069             pa_sink_volume_change_apply(u->sink, &volume_sleep);
2070             if (volume_sleep > 0) {
2071                 if (rtpoll_sleep > 0)
2072                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
2073                 else
2074                     rtpoll_sleep = volume_sleep;
2075             }
2076         }
2077 
2078         if (rtpoll_sleep > 0) {
2079             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
2080             real_sleep = pa_rtclock_now();
2081         }
2082         else
2083             pa_rtpoll_set_timer_disabled(u->rtpoll);
2084 
2085         /* Hmm, nothing to do. Let's sleep */
2086         if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
2087             goto fail;
2088 
2089         if (rtpoll_sleep > 0) {
2090             real_sleep = pa_rtclock_now() - real_sleep;
2091 #ifdef DEBUG_TIMING
2092             pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
2093                 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
2094                 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
2095 #endif
2096             if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
2097                 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
2098                     (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
2099                     (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
2100         }
2101 
2102         if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
2103             pa_sink_volume_change_apply(u->sink, NULL);
2104 
2105         if (ret == 0)
2106             goto finish;
2107 
2108         /* Tell ALSA about this and process its response */
2109         if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
2110             struct pollfd *pollfd;
2111             int err;
2112             unsigned n;
2113 
2114             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
2115 
2116             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
2117                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
2118                 goto fail;
2119             }
2120 
2121             if (revents & ~POLLOUT) {
2122                 if ((err = pa_alsa_recover_from_poll(u->pcm_handle, revents)) < 0)
2123                     goto fail;
2124 
2125                 /* Stream needs to be restarted */
2126                 if (err == 1) {
2127                     close_pcm(u);
2128                     if (unsuspend(u, true) < 0)
2129                         goto fail;
2130                 } else
2131                     reset_vars(u);
2132 
2133                 revents = 0;
2134             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
2135                 pa_log_debug("Wakeup from ALSA!");
2136 
2137         } else
2138             revents = 0;
2139     }
2140 
2141 fail:
2142     /* If this was no regular exit from the loop we have to continue
2143      * processing messages until we received PA_MESSAGE_SHUTDOWN */
2144     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
2145     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
2146 
2147 finish:
2148     pa_log_debug("Thread shutting down");
2149 }
2150 
set_sink_name(pa_sink_new_data * data,pa_modargs * ma,const char * device_id,const char * device_name,pa_alsa_mapping * mapping)2151 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
2152     const char *n;
2153     char *t;
2154 
2155     pa_assert(data);
2156     pa_assert(ma);
2157     pa_assert(device_name);
2158 
2159     if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
2160         pa_sink_new_data_set_name(data, n);
2161         data->namereg_fail = true;
2162         return;
2163     }
2164 
2165     if ((n = pa_modargs_get_value(ma, "name", NULL)))
2166         data->namereg_fail = true;
2167     else {
2168         n = device_id ? device_id : device_name;
2169         data->namereg_fail = false;
2170     }
2171 
2172     if (mapping)
2173         t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
2174     else
2175         t = pa_sprintf_malloc("alsa_output.%s", n);
2176 
2177     pa_sink_new_data_set_name(data, t);
2178     pa_xfree(t);
2179 }
2180 
find_mixer(struct userdata * u,pa_alsa_mapping * mapping,const char * element,bool ignore_dB)2181 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
2182     const char *mdev;
2183 
2184     if (!mapping && !element)
2185         return;
2186 
2187     if (!element && mapping && pa_alsa_path_set_is_empty(mapping->output_path_set))
2188         return;
2189 
2190     u->mixers = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func,
2191                                     NULL, (pa_free_cb_t) pa_alsa_mixer_free);
2192 
2193     mdev = mapping ? pa_proplist_gets(mapping->proplist, "alsa.mixer_device") : NULL;
2194     if (mdev) {
2195         u->mixer_handle = pa_alsa_open_mixer_by_name(u->mixers, mdev, true);
2196     } else {
2197         u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->mixers, u->pcm_handle, true);
2198     }
2199     if (!u->mixer_handle) {
2200         pa_log_info("Failed to find a working mixer device.");
2201         return;
2202     }
2203 
2204     if (element) {
2205 
2206         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
2207             goto fail;
2208 
2209         if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
2210             goto fail;
2211 
2212         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
2213         pa_alsa_path_dump(u->mixer_path);
2214     } else {
2215         u->mixer_path_set = mapping->output_path_set;
2216     }
2217 
2218     return;
2219 
2220 fail:
2221 
2222     if (u->mixer_path) {
2223         pa_alsa_path_free(u->mixer_path);
2224         u->mixer_path = NULL;
2225     }
2226 
2227     u->mixer_handle = NULL;
2228     pa_hashmap_free(u->mixers);
2229     u->mixers = NULL;
2230 }
2231 
setup_mixer(struct userdata * u,bool ignore_dB)2232 static int setup_mixer(struct userdata *u, bool ignore_dB) {
2233     bool need_mixer_callback = false;
2234 
2235     pa_assert(u);
2236 
2237     /* This code is before the u->mixer_handle check, because if the UCM
2238      * configuration doesn't specify volume or mute controls, u->mixer_handle
2239      * will be NULL, but the UCM device enable sequence will still need to be
2240      * executed. */
2241     if (u->sink->active_port && u->ucm_context) {
2242         if (pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port) < 0)
2243             return -1;
2244     }
2245 
2246     if (!u->mixer_handle)
2247         return 0;
2248 
2249     if (u->sink->active_port) {
2250         if (!u->ucm_context) {
2251             pa_alsa_port_data *data;
2252 
2253             /* We have a list of supported paths, so let's activate the
2254              * one that has been chosen as active */
2255 
2256             data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2257             u->mixer_path = data->path;
2258 
2259             pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
2260         } else {
2261             pa_alsa_ucm_port_data *data;
2262 
2263             data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2264 
2265             /* Now activate volume controls, if any */
2266             if (data->path) {
2267                 u->mixer_path = data->path;
2268                 pa_alsa_path_select(u->mixer_path, NULL, u->mixer_handle, u->sink->muted);
2269             }
2270         }
2271     } else {
2272 
2273         if (!u->mixer_path && u->mixer_path_set)
2274             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
2275 
2276         if (u->mixer_path) {
2277             /* Hmm, we have only a single path, then let's activate it */
2278 
2279             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
2280         } else
2281             return 0;
2282     }
2283 
2284     mixer_volume_init(u);
2285 
2286     /* Will we need to register callbacks? */
2287     if (u->mixer_path_set && u->mixer_path_set->paths) {
2288         pa_alsa_path *p;
2289         void *state;
2290 
2291         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
2292             if (p->has_volume || p->has_mute)
2293                 need_mixer_callback = true;
2294         }
2295     }
2296     else if (u->mixer_path)
2297         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
2298 
2299     if (need_mixer_callback) {
2300         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
2301         if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
2302             u->mixer_pd = pa_alsa_mixer_pdata_new();
2303             mixer_callback = io_mixer_callback;
2304 
2305             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
2306                 pa_log("Failed to initialize file descriptor monitoring");
2307                 return -1;
2308             }
2309         } else {
2310             u->mixer_fdl = pa_alsa_fdlist_new();
2311             mixer_callback = ctl_mixer_callback;
2312 
2313             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2314                 pa_log("Failed to initialize file descriptor monitoring");
2315                 return -1;
2316             }
2317         }
2318 
2319         if (u->mixer_path_set)
2320             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2321         else
2322             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2323     }
2324 
2325     return 0;
2326 }
2327 
pa_alsa_sink_new(pa_module * m,pa_modargs * ma,const char * driver,pa_card * card,pa_alsa_mapping * mapping)2328 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2329 
2330     struct userdata *u = NULL;
2331     const char *dev_id = NULL, *key, *mod_name;
2332     pa_sample_spec ss;
2333     char *thread_name = NULL;
2334     uint32_t alternate_sample_rate;
2335     pa_channel_map map;
2336     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2337     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2338     size_t frame_size;
2339     bool use_mmap = true;
2340     bool use_tsched = true;
2341     bool ignore_dB = false;
2342     bool namereg_fail = false;
2343     bool deferred_volume = false;
2344     bool set_formats = false;
2345     bool fixed_latency_range = false;
2346     bool b;
2347     bool d;
2348     bool avoid_resampling;
2349     pa_sink_new_data data;
2350     bool volume_is_set;
2351     bool mute_is_set;
2352     pa_alsa_profile_set *profile_set = NULL;
2353     void *state = NULL;
2354 #ifdef USE_SMOOTHER_2
2355     snd_pcm_info_t* pcm_info;
2356     const char *id;
2357 #endif
2358 
2359     pa_assert(m);
2360     pa_assert(ma);
2361 
2362     ss = m->core->default_sample_spec;
2363     map = m->core->default_channel_map;
2364     avoid_resampling = m->core->avoid_resampling;
2365 
2366     /* Pick sample spec overrides from the mapping, if any */
2367     if (mapping) {
2368         if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
2369             ss.format = mapping->sample_spec.format;
2370         if (mapping->sample_spec.rate != 0)
2371             ss.rate = mapping->sample_spec.rate;
2372         if (mapping->sample_spec.channels != 0) {
2373             ss.channels = mapping->sample_spec.channels;
2374             if (pa_channel_map_valid(&mapping->channel_map))
2375                 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2376         }
2377     }
2378 
2379     /* Override with modargs if provided */
2380     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2381         pa_log("Failed to parse sample specification and channel map");
2382         goto fail;
2383     }
2384 
2385     alternate_sample_rate = m->core->alternate_sample_rate;
2386     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2387         pa_log("Failed to parse alternate sample rate");
2388         goto fail;
2389     }
2390 
2391     frame_size = pa_frame_size(&ss);
2392 
2393     nfrags = m->core->default_n_fragments;
2394     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2395     if (frag_size <= 0)
2396         frag_size = (uint32_t) frame_size;
2397     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2398     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2399 
2400     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2401         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2402         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2403         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2404         pa_log("Failed to parse buffer metrics");
2405         goto fail;
2406     }
2407 
2408     buffer_size = nfrags * frag_size;
2409 
2410     period_frames = frag_size/frame_size;
2411     buffer_frames = buffer_size/frame_size;
2412     tsched_frames = tsched_size/frame_size;
2413 
2414     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2415         pa_log("Failed to parse mmap argument.");
2416         goto fail;
2417     }
2418 
2419     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2420         pa_log("Failed to parse tsched argument.");
2421         goto fail;
2422     }
2423 
2424     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2425         pa_log("Failed to parse ignore_dB argument.");
2426         goto fail;
2427     }
2428 
2429     rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2430     if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2431         pa_log("Failed to parse rewind_safeguard argument");
2432         goto fail;
2433     }
2434 
2435     deferred_volume = m->core->deferred_volume;
2436     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2437         pa_log("Failed to parse deferred_volume argument.");
2438         goto fail;
2439     }
2440 
2441     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2442         pa_log("Failed to parse fixed_latency_range argument.");
2443         goto fail;
2444     }
2445 
2446     use_tsched = pa_alsa_may_tsched(use_tsched);
2447 
2448     u = pa_xnew0(struct userdata, 1);
2449     u->core = m->core;
2450     u->module = m;
2451     u->use_mmap = use_mmap;
2452     u->use_tsched = use_tsched;
2453     u->tsched_size = tsched_size;
2454     u->initial_info.nfrags = (size_t) nfrags;
2455     u->initial_info.fragment_size = (size_t) frag_size;
2456     u->initial_info.tsched_size = (size_t) tsched_size;
2457     u->initial_info.tsched_watermark = (size_t) tsched_watermark;
2458     u->initial_info.rewind_safeguard = (size_t) rewind_safeguard;
2459     u->deferred_volume = deferred_volume;
2460     u->fixed_latency_range = fixed_latency_range;
2461     u->first = true;
2462     u->rewind_safeguard = rewind_safeguard;
2463     u->rtpoll = pa_rtpoll_new();
2464 
2465     if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
2466         pa_log("pa_thread_mq_init() failed.");
2467         goto fail;
2468     }
2469 
2470 #ifndef USE_SMOOTHER_2
2471     u->smoother = pa_smoother_new(
2472             SMOOTHER_ADJUST_USEC,
2473             SMOOTHER_WINDOW_USEC,
2474             true,
2475             true,
2476             5,
2477             pa_rtclock_now(),
2478             true);
2479     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2480 #endif
2481 
2482     /* use ucm */
2483     if (mapping && mapping->ucm_context.ucm)
2484         u->ucm_context = &mapping->ucm_context;
2485 
2486     dev_id = pa_modargs_get_value(
2487             ma, "device_id",
2488             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2489 
2490     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2491 
2492     if (reserve_init(u, dev_id) < 0)
2493         goto fail;
2494 
2495     if (reserve_monitor_init(u, dev_id) < 0)
2496         goto fail;
2497 
2498     b = use_mmap;
2499     d = use_tsched;
2500 
2501     /* Force ALSA to reread its configuration if module-alsa-card didn't
2502      * do it for us. This matters if our device was hot-plugged after ALSA
2503      * has already read its configuration - see
2504      * https://bugs.freedesktop.org/show_bug.cgi?id=54029
2505      */
2506 
2507     if (!card)
2508         snd_config_update_free_global();
2509 
2510     if (mapping) {
2511 
2512         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2513             pa_log("device_id= not set");
2514             goto fail;
2515         }
2516 
2517         if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2518             if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2519                 pa_log("Failed to enable ucm modifier %s", mod_name);
2520             else
2521                 pa_log_debug("Enabled ucm modifier %s", mod_name);
2522         }
2523 
2524         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2525                       dev_id,
2526                       &u->device_name,
2527                       &ss, &map,
2528                       SND_PCM_STREAM_PLAYBACK,
2529                       &period_frames, &buffer_frames, tsched_frames,
2530                       &b, &d,
2531                       &u->supported_formats, &u->supported_rates,
2532                       mapping)))
2533             goto fail;
2534 
2535     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2536 
2537         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2538             goto fail;
2539 
2540         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2541                       dev_id,
2542                       &u->device_name,
2543                       &ss, &map,
2544                       SND_PCM_STREAM_PLAYBACK,
2545                       &period_frames, &buffer_frames, tsched_frames,
2546                       &b, &d,
2547                       &u->supported_formats, &u->supported_rates,
2548                       profile_set, &mapping)))
2549             goto fail;
2550 
2551     } else {
2552 
2553         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2554                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2555                       &u->device_name,
2556                       &ss, &map,
2557                       SND_PCM_STREAM_PLAYBACK,
2558                       &period_frames, &buffer_frames, tsched_frames,
2559                       &b, &d,
2560                       &u->supported_formats, &u->supported_rates,
2561                       false)))
2562             goto fail;
2563     }
2564 
2565     pa_assert(u->device_name);
2566     pa_log_info("Successfully opened device %s.", u->device_name);
2567 
2568     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2569         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2570         goto fail;
2571     }
2572 
2573     if (mapping)
2574         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2575 
2576     if (use_mmap && !b) {
2577         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2578         u->use_mmap = use_mmap = false;
2579     }
2580 
2581     if (use_tsched && (!b || !d)) {
2582         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2583         u->use_tsched = use_tsched = false;
2584     }
2585 
2586     if (u->use_mmap)
2587         pa_log_info("Successfully enabled mmap() mode.");
2588 
2589     if (u->use_tsched) {
2590         pa_log_info("Successfully enabled timer-based scheduling mode.");
2591 
2592         if (u->fixed_latency_range)
2593             pa_log_info("Disabling latency range changes on underrun");
2594     }
2595 
2596     /* All passthrough formats supported by PulseAudio require
2597      * IEC61937 framing with two fake channels. So, passthrough
2598      * clients will always send two channels. Multichannel sinks
2599      * cannot accept that, because nobody implemented sink channel count
2600      * switching so far. So just don't show known non-working settings
2601      * to the user. */
2602     if ((is_iec958(u) || is_hdmi(u)) && ss.channels == 2)
2603         set_formats = true;
2604 
2605     u->verified_sample_spec = ss;
2606 
2607     if (!u->supported_formats) {
2608         pa_log_error("Failed to find any supported sample formats.");
2609         goto fail;
2610     }
2611 
2612     if (!u->supported_rates) {
2613         pa_log_error("Failed to find any supported sample rates.");
2614         goto fail;
2615     }
2616 
2617     /* ALSA might tweak the sample spec, so recalculate the frame size */
2618     frame_size = pa_frame_size(&ss);
2619 
2620     pa_sink_new_data_init(&data);
2621     data.driver = driver;
2622     data.module = m;
2623     data.card = card;
2624     set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2625 
2626     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2627      * variable instead of using &data.namereg_fail directly, because
2628      * data.namereg_fail is a bitfield and taking the address of a bitfield
2629      * variable is impossible. */
2630     namereg_fail = data.namereg_fail;
2631     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2632         pa_log("Failed to parse namereg_fail argument.");
2633         pa_sink_new_data_done(&data);
2634         goto fail;
2635     }
2636     data.namereg_fail = namereg_fail;
2637 
2638     if (pa_modargs_get_value_boolean(ma, "avoid_resampling", &avoid_resampling) < 0) {
2639         pa_log("Failed to parse avoid_resampling argument.");
2640         pa_sink_new_data_done(&data);
2641         goto fail;
2642     }
2643     pa_sink_new_data_set_avoid_resampling(&data, avoid_resampling);
2644 
2645     pa_sink_new_data_set_sample_spec(&data, &ss);
2646     pa_sink_new_data_set_channel_map(&data, &map);
2647     pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2648 
2649     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2650     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2651     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2652     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2653     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2654 
2655     if (mapping) {
2656         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2657         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2658 
2659         state = NULL;
2660         while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2661             pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2662     }
2663 
2664     pa_alsa_init_description(data.proplist, card);
2665 
2666     if (u->control_device)
2667         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2668 
2669     if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2670         pa_log("Invalid properties");
2671         pa_sink_new_data_done(&data);
2672         goto fail;
2673     }
2674 
2675     if (u->ucm_context) {
2676         pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, true, card, u->pcm_handle, ignore_dB);
2677         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2678     } else {
2679         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2680         if (u->mixer_path_set)
2681             pa_alsa_add_ports(&data, u->mixer_path_set, card);
2682     }
2683 
2684     u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2685                           (set_formats ? PA_SINK_SET_FORMATS : 0));
2686     volume_is_set = data.volume_is_set;
2687     mute_is_set = data.muted_is_set;
2688     pa_sink_new_data_done(&data);
2689 
2690     if (!u->sink) {
2691         pa_log("Failed to create sink object");
2692         goto fail;
2693     }
2694 
2695 #ifdef USE_SMOOTHER_2
2696     u->smoother = pa_smoother_2_new(SMOOTHER_WINDOW_USEC, pa_rtclock_now(), frame_size, u->sink->sample_spec.rate);
2697 
2698     /* Check if this is an USB device, see alsa-util.c
2699      * USB devices unfortunately need some special handling */
2700     snd_pcm_info_alloca(&pcm_info);
2701     if (snd_pcm_info(u->pcm_handle, pcm_info) == 0 &&
2702         (id = snd_pcm_info_get_id(pcm_info))) {
2703         if (pa_streq(id, "USB Audio")) {
2704             uint32_t hack_threshold;
2705             /* USB device, set hack parameter */
2706             hack_threshold = 2000;
2707             if (!u->use_tsched)
2708                 hack_threshold = 1000;
2709             pa_smoother_2_usb_hack_enable(u->smoother, true, hack_threshold);
2710         }
2711     }
2712 #endif
2713 
2714     if (u->ucm_context) {
2715         pa_device_port *port;
2716         unsigned h_prio = 0;
2717         PA_HASHMAP_FOREACH(port, u->sink->ports, state) {
2718             if (!h_prio || port->priority > h_prio)
2719                 h_prio = port->priority;
2720         }
2721         /* ucm ports prioriy is 100, 200, ..., 900, change it to units digit */
2722         h_prio = h_prio / 100;
2723         u->sink->priority += h_prio;
2724     }
2725 
2726     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2727                                  &u->sink->thread_info.volume_change_safety_margin) < 0) {
2728         pa_log("Failed to parse deferred_volume_safety_margin parameter");
2729         goto fail;
2730     }
2731 
2732     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2733                                  &u->sink->thread_info.volume_change_extra_delay) < 0) {
2734         pa_log("Failed to parse deferred_volume_extra_delay parameter");
2735         goto fail;
2736     }
2737 
2738     u->sink->parent.process_msg = sink_process_msg;
2739     if (u->use_tsched)
2740         u->sink->update_requested_latency = sink_update_requested_latency_cb;
2741     u->sink->set_state_in_main_thread = sink_set_state_in_main_thread_cb;
2742     u->sink->set_state_in_io_thread = sink_set_state_in_io_thread_cb;
2743     if (u->ucm_context)
2744         u->sink->set_port = sink_set_port_ucm_cb;
2745     else
2746         u->sink->set_port = sink_set_port_cb;
2747     u->sink->reconfigure = sink_reconfigure_cb;
2748     u->sink->userdata = u;
2749 
2750     pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2751     pa_sink_set_rtpoll(u->sink, u->rtpoll);
2752 
2753     u->frame_size = frame_size;
2754     u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2755     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2756     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2757     pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2758 
2759     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2760                 (double) u->hwbuf_size / (double) u->fragment_size,
2761                 (long unsigned) u->fragment_size,
2762                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2763                 (long unsigned) u->hwbuf_size,
2764                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2765 
2766     pa_sink_set_max_request(u->sink, u->hwbuf_size);
2767     if (pa_alsa_pcm_is_hw(u->pcm_handle))
2768         pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2769     else {
2770         pa_log_info("Disabling rewind for device %s", u->device_name);
2771         pa_sink_set_max_rewind(u->sink, 0);
2772     }
2773 
2774     if (u->use_tsched) {
2775         u->tsched_watermark_ref = tsched_watermark;
2776         reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2777     } else
2778         pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2779 
2780     reserve_update(u);
2781 
2782     if (update_sw_params(u, false) < 0)
2783         goto fail;
2784 
2785     if (setup_mixer(u, ignore_dB) < 0)
2786         goto fail;
2787 
2788     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2789 
2790     thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2791     if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2792         pa_log("Failed to create thread.");
2793         goto fail;
2794     }
2795     pa_xfree(thread_name);
2796     thread_name = NULL;
2797 
2798     /* Get initial mixer settings */
2799     if (volume_is_set) {
2800         if (u->sink->set_volume)
2801             u->sink->set_volume(u->sink);
2802     } else {
2803         if (u->sink->get_volume)
2804             u->sink->get_volume(u->sink);
2805     }
2806 
2807     if (mute_is_set) {
2808         if (u->sink->set_mute)
2809             u->sink->set_mute(u->sink);
2810     } else {
2811         if (u->sink->get_mute) {
2812             bool mute;
2813 
2814             if (u->sink->get_mute(u->sink, &mute) >= 0)
2815                 pa_sink_set_mute(u->sink, mute, false);
2816         }
2817     }
2818 
2819     if ((volume_is_set || mute_is_set) && u->sink->write_volume)
2820         u->sink->write_volume(u->sink);
2821 
2822     if (set_formats) {
2823         /* For S/PDIF and HDMI, allow getting/setting custom formats */
2824         pa_format_info *format;
2825 
2826         /* To start with, we only support PCM formats. Other formats may be added
2827          * with pa_sink_set_formats().*/
2828         format = pa_format_info_new();
2829         format->encoding = PA_ENCODING_PCM;
2830         u->formats = pa_idxset_new(NULL, NULL);
2831         pa_idxset_put(u->formats, format, NULL);
2832 
2833         u->sink->get_formats = sink_get_formats;
2834         u->sink->set_formats = sink_set_formats;
2835     }
2836 
2837     pa_sink_put(u->sink);
2838 
2839     if (profile_set)
2840         pa_alsa_profile_set_free(profile_set);
2841 
2842     /* Suspend if necessary. FIXME: It would be better to start suspended, but
2843      * that would require some core changes. It's possible to set
2844      * pa_sink_new_data.suspend_cause, but that has to be done before the
2845      * pa_sink_new() call, and we know if we need to suspend only after the
2846      * pa_sink_new() call when the initial port has been chosen. Calling
2847      * pa_sink_suspend() between pa_sink_new() and pa_sink_put() would
2848      * otherwise work, but currently pa_sink_suspend() will crash if
2849      * pa_sink_put() hasn't been called. */
2850     if (u->sink->active_port && !u->ucm_context) {
2851         pa_alsa_port_data *port_data;
2852 
2853         port_data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2854 
2855         if (port_data->suspend_when_unavailable && u->sink->active_port->available == PA_AVAILABLE_NO)
2856             pa_sink_suspend(u->sink, true, PA_SUSPEND_UNAVAILABLE);
2857     }
2858 
2859     return u->sink;
2860 
2861 fail:
2862     pa_xfree(thread_name);
2863 
2864     if (u)
2865         userdata_free(u);
2866 
2867     if (profile_set)
2868         pa_alsa_profile_set_free(profile_set);
2869 
2870     return NULL;
2871 }
2872 
userdata_free(struct userdata * u)2873 static void userdata_free(struct userdata *u) {
2874     pa_assert(u);
2875 
2876     if (u->sink)
2877         pa_sink_unlink(u->sink);
2878 
2879     if (u->thread) {
2880         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2881         pa_thread_free(u->thread);
2882     }
2883 
2884     pa_thread_mq_done(&u->thread_mq);
2885 
2886     if (u->sink)
2887         pa_sink_unref(u->sink);
2888 
2889     if (u->memchunk.memblock)
2890         pa_memblock_unref(u->memchunk.memblock);
2891 
2892     if (u->mixer_pd)
2893         pa_alsa_mixer_pdata_free(u->mixer_pd);
2894 
2895     if (u->alsa_rtpoll_item)
2896         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2897 
2898     if (u->rtpoll)
2899         pa_rtpoll_free(u->rtpoll);
2900 
2901     if (u->pcm_handle) {
2902         snd_pcm_drop(u->pcm_handle);
2903         snd_pcm_close(u->pcm_handle);
2904     }
2905 
2906     if (u->mixer_fdl)
2907         pa_alsa_fdlist_free(u->mixer_fdl);
2908 
2909     /* Only free the mixer_path if the sink owns it */
2910     if (u->mixer_path && !u->mixer_path_set && !u->ucm_context)
2911         pa_alsa_path_free(u->mixer_path);
2912 
2913     if (u->mixers)
2914         pa_hashmap_free(u->mixers);
2915 
2916     if (u->smoother)
2917 #ifdef USE_SMOOTHER_2
2918         pa_smoother_2_free(u->smoother);
2919 #else
2920         pa_smoother_free(u->smoother);
2921 #endif
2922 
2923     if (u->formats)
2924         pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2925 
2926     if (u->supported_formats)
2927         pa_xfree(u->supported_formats);
2928 
2929     if (u->supported_rates)
2930         pa_xfree(u->supported_rates);
2931 
2932     reserve_done(u);
2933     monitor_done(u);
2934 
2935     pa_xfree(u->device_name);
2936     pa_xfree(u->control_device);
2937     pa_xfree(u->paths_dir);
2938     pa_xfree(u);
2939 }
2940 
pa_alsa_sink_free(pa_sink * s)2941 void pa_alsa_sink_free(pa_sink *s) {
2942     struct userdata *u;
2943 
2944     pa_sink_assert_ref(s);
2945     pa_assert_se(u = s->userdata);
2946 
2947     userdata_free(u);
2948 }
2949