• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #include <signal.h>
26 #include <stdio.h>
27 
28 #include <alsa/asoundlib.h>
29 
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 #endif
33 
34 #include <pulse/rtclock.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/volume.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/internal.h>
40 
41 #include <pulsecore/core.h>
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/module.h>
44 #include <pulsecore/memchunk.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/modargs.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/sample-util.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/macro.h>
52 #include <pulsecore/thread.h>
53 #include <pulsecore/thread-mq.h>
54 #include <pulsecore/rtpoll.h>
55 #include <pulsecore/time-smoother.h>
56 
57 #include <modules/reserve-wrap.h>
58 
59 #include "alsa-util.h"
60 #include "alsa-sink.h"
61 
62 /* #define DEBUG_TIMING */
63 
64 #define DEFAULT_DEVICE "default"
65 
66 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s    -- Overall buffer size */
67 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms  -- Fill up when only this much is left in the buffer */
68 
69 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  -- On underrun, increase watermark by this */
70 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms   -- When everything's great, decrease watermark by this */
71 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s   -- How long after a drop out recheck if things are good now */
72 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms   -- If the buffer level ever below this threshold, increase the watermark */
73 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this threshold in the verification time, decrease the watermark */
74 
75 /* Note that TSCHED_WATERMARK_INC_THRESHOLD_USEC == 0 means that we
76  * will increase the watermark only if we hit a real underrun. */
77 
78 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms  -- Sleep at least 10ms on each iteration */
79 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms   -- Wakeup at least this long before the buffer runs empty*/
80 
81 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s   -- smoother windows size */
82 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s    -- smoother adjust time */
83 
84 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms   -- min smoother update interval */
85 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms -- max smoother update interval */
86 
87 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)  /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */
88 
89 #define DEFAULT_REWIND_SAFEGUARD_BYTES (256U) /* 1.33ms @48kHz, we'll never rewind less than this */
90 #define DEFAULT_REWIND_SAFEGUARD_USEC (1330) /* 1.33ms, depending on channels/rate/sample we may rewind more than 256 above */
91 
92 #define DEFAULT_WRITE_ITERATION_THRESHOLD 0.03 /* don't iterate write if < 3% of the buffer is available */
93 
94 struct userdata {
95     pa_core *core;
96     pa_module *module;
97     pa_sink *sink;
98 
99     pa_thread *thread;
100     pa_thread_mq thread_mq;
101     pa_rtpoll *rtpoll;
102 
103     snd_pcm_t *pcm_handle;
104 
105     char *paths_dir;
106     pa_alsa_fdlist *mixer_fdl;
107     pa_alsa_mixer_pdata *mixer_pd;
108     pa_hashmap *mixers;
109     snd_mixer_t *mixer_handle;
110     pa_alsa_path_set *mixer_path_set;
111     pa_alsa_path *mixer_path;
112 
113     pa_cvolume hardware_volume;
114 
115     pa_sample_spec verified_sample_spec;
116     pa_sample_format_t *supported_formats;
117     unsigned int *supported_rates;
118     struct {
119         size_t fragment_size;
120         size_t nfrags;
121         size_t tsched_size;
122         size_t tsched_watermark;
123         size_t rewind_safeguard;
124     } initial_info;
125 
126     size_t
127         frame_size,
128         fragment_size,
129         hwbuf_size,
130         tsched_size,
131         tsched_watermark,
132         tsched_watermark_ref,
133         hwbuf_unused,
134         min_sleep,
135         min_wakeup,
136         watermark_inc_step,
137         watermark_dec_step,
138         watermark_inc_threshold,
139         watermark_dec_threshold,
140         rewind_safeguard;
141 
142     snd_pcm_uframes_t frames_per_block;
143 
144     pa_usec_t watermark_dec_not_before;
145     pa_usec_t min_latency_ref;
146     pa_usec_t tsched_watermark_usec;
147 
148     pa_memchunk memchunk;
149 
150     char *device_name;  /* name of the PCM device */
151     char *control_device; /* name of the control device */
152 
153     bool use_mmap:1, use_tsched:1, deferred_volume:1, fixed_latency_range:1;
154 
155     bool first, after_rewind;
156 
157     pa_rtpoll_item *alsa_rtpoll_item;
158 
159     pa_smoother *smoother;
160     uint64_t write_count;
161     uint64_t since_start;
162     pa_usec_t smoother_interval;
163     pa_usec_t last_smoother_update;
164 
165     pa_idxset *formats;
166 
167     pa_reserve_wrapper *reserve;
168     pa_hook_slot *reserve_slot;
169     pa_reserve_monitor_wrapper *monitor;
170     pa_hook_slot *monitor_slot;
171 
172     /* ucm context */
173     pa_alsa_ucm_mapping_context *ucm_context;
174 };
175 
176 enum {
177     SINK_MESSAGE_SYNC_MIXER = PA_SINK_MESSAGE_MAX
178 };
179 
180 static void userdata_free(struct userdata *u);
181 static int unsuspend(struct userdata *u, bool recovering);
182 
183 /* FIXME: Is there a better way to do this than device names? */
is_iec958(struct userdata * u)184 static bool is_iec958(struct userdata *u) {
185     return (strncmp("iec958", u->device_name, 6) == 0);
186 }
187 
is_hdmi(struct userdata * u)188 static bool is_hdmi(struct userdata *u) {
189     return (strncmp("hdmi", u->device_name, 4) == 0);
190 }
191 
reserve_cb(pa_reserve_wrapper * r,void * forced,struct userdata * u)192 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
193     pa_assert(r);
194     pa_assert(u);
195 
196     pa_log_debug("Suspending sink %s, because another application requested us to release the device.", u->sink->name);
197 
198     if (pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION) < 0)
199         return PA_HOOK_CANCEL;
200 
201     return PA_HOOK_OK;
202 }
203 
reserve_done(struct userdata * u)204 static void reserve_done(struct userdata *u) {
205     pa_assert(u);
206 
207     if (u->reserve_slot) {
208         pa_hook_slot_free(u->reserve_slot);
209         u->reserve_slot = NULL;
210     }
211 
212     if (u->reserve) {
213         pa_reserve_wrapper_unref(u->reserve);
214         u->reserve = NULL;
215     }
216 }
217 
reserve_update(struct userdata * u)218 static void reserve_update(struct userdata *u) {
219     const char *description;
220     pa_assert(u);
221 
222     if (!u->sink || !u->reserve)
223         return;
224 
225     if ((description = pa_proplist_gets(u->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)))
226         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
227 }
228 
reserve_init(struct userdata * u,const char * dname)229 static int reserve_init(struct userdata *u, const char *dname) {
230     char *rname;
231 
232     pa_assert(u);
233     pa_assert(dname);
234 
235     if (u->reserve)
236         return 0;
237 
238     if (pa_in_system_mode())
239         return 0;
240 
241     if (!(rname = pa_alsa_get_reserve_name(dname)))
242         return 0;
243 
244     /* We are resuming, try to lock the device */
245     u->reserve = pa_reserve_wrapper_get(u->core, rname);
246     pa_xfree(rname);
247 
248     if (!(u->reserve))
249         return -1;
250 
251     reserve_update(u);
252 
253     pa_assert(!u->reserve_slot);
254     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
255 
256     return 0;
257 }
258 
monitor_cb(pa_reserve_monitor_wrapper * w,void * busy,struct userdata * u)259 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
260     pa_assert(w);
261     pa_assert(u);
262 
263     if (PA_PTR_TO_UINT(busy) && !u->reserve) {
264         pa_log_debug("Suspending sink %s, because another application is blocking the access to the device.", u->sink->name);
265         pa_sink_suspend(u->sink, true, PA_SUSPEND_APPLICATION);
266     } else {
267         pa_log_debug("Resuming sink %s, because other applications aren't blocking access to the device any more.", u->sink->name);
268         pa_sink_suspend(u->sink, false, PA_SUSPEND_APPLICATION);
269     }
270 
271     return PA_HOOK_OK;
272 }
273 
monitor_done(struct userdata * u)274 static void monitor_done(struct userdata *u) {
275     pa_assert(u);
276 
277     if (u->monitor_slot) {
278         pa_hook_slot_free(u->monitor_slot);
279         u->monitor_slot = NULL;
280     }
281 
282     if (u->monitor) {
283         pa_reserve_monitor_wrapper_unref(u->monitor);
284         u->monitor = NULL;
285     }
286 }
287 
reserve_monitor_init(struct userdata * u,const char * dname)288 static int reserve_monitor_init(struct userdata *u, const char *dname) {
289     char *rname;
290 
291     pa_assert(u);
292     pa_assert(dname);
293 
294     if (pa_in_system_mode())
295         return 0;
296 
297     if (!(rname = pa_alsa_get_reserve_name(dname)))
298         return 0;
299 
300     /* We are resuming, try to lock the device */
301     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
302     pa_xfree(rname);
303 
304     if (!(u->monitor))
305         return -1;
306 
307     pa_assert(!u->monitor_slot);
308     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
309 
310     return 0;
311 }
312 
fix_min_sleep_wakeup(struct userdata * u)313 static void fix_min_sleep_wakeup(struct userdata *u) {
314     size_t max_use, max_use_2;
315 
316     pa_assert(u);
317     pa_assert(u->use_tsched);
318 
319     max_use = u->hwbuf_size - u->hwbuf_unused;
320     max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec);
321 
322     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->sink->sample_spec);
323     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
324 
325     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->sink->sample_spec);
326     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
327 }
328 
fix_tsched_watermark(struct userdata * u)329 static void fix_tsched_watermark(struct userdata *u) {
330     size_t max_use;
331     pa_assert(u);
332     pa_assert(u->use_tsched);
333 
334     max_use = u->hwbuf_size - u->hwbuf_unused;
335 
336     if (u->tsched_watermark > max_use - u->min_sleep)
337         u->tsched_watermark = max_use - u->min_sleep;
338 
339     if (u->tsched_watermark < u->min_wakeup)
340         u->tsched_watermark = u->min_wakeup;
341 
342     u->tsched_watermark_usec = pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec);
343 }
344 
increase_watermark(struct userdata * u)345 static void increase_watermark(struct userdata *u) {
346     size_t old_watermark;
347     pa_usec_t old_min_latency, new_min_latency;
348 
349     pa_assert(u);
350     pa_assert(u->use_tsched);
351 
352     /* First, just try to increase the watermark */
353     old_watermark = u->tsched_watermark;
354     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
355     fix_tsched_watermark(u);
356 
357     if (old_watermark != u->tsched_watermark) {
358         pa_log_info("Increasing wakeup watermark to %0.2f ms",
359                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
360         return;
361     }
362 
363     /* Hmm, we cannot increase the watermark any further, hence let's
364        raise the latency, unless doing so was disabled in
365        configuration */
366     if (u->fixed_latency_range)
367         return;
368 
369     old_min_latency = u->sink->thread_info.min_latency;
370     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
371     new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency);
372 
373     if (old_min_latency != new_min_latency) {
374         pa_log_info("Increasing minimal latency to %0.2f ms",
375                     (double) new_min_latency / PA_USEC_PER_MSEC);
376 
377         pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency);
378     }
379 
380     /* When we reach this we're officially fucked! */
381 }
382 
decrease_watermark(struct userdata * u)383 static void decrease_watermark(struct userdata *u) {
384     size_t old_watermark;
385     pa_usec_t now;
386 
387     pa_assert(u);
388     pa_assert(u->use_tsched);
389 
390     now = pa_rtclock_now();
391 
392     if (u->watermark_dec_not_before <= 0)
393         goto restart;
394 
395     if (u->watermark_dec_not_before > now)
396         return;
397 
398     old_watermark = u->tsched_watermark;
399 
400     if (u->tsched_watermark < u->watermark_dec_step)
401         u->tsched_watermark = u->tsched_watermark / 2;
402     else
403         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
404 
405     fix_tsched_watermark(u);
406 
407     if (old_watermark != u->tsched_watermark)
408         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
409                     (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
410 
411     /* We don't change the latency range*/
412 
413 restart:
414     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
415 }
416 
417 /* Called from IO Context on unsuspend or from main thread when creating sink */
reset_watermark(struct userdata * u,size_t tsched_watermark,pa_sample_spec * ss,bool in_thread)418 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
419                             bool in_thread) {
420     u->tsched_watermark = pa_convert_size(tsched_watermark, ss, &u->sink->sample_spec);
421 
422     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec);
423     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec);
424 
425     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec);
426     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec);
427 
428     fix_min_sleep_wakeup(u);
429     fix_tsched_watermark(u);
430 
431     if (in_thread)
432         pa_sink_set_latency_range_within_thread(u->sink,
433                                                 u->min_latency_ref,
434                                                 pa_bytes_to_usec(u->hwbuf_size, ss));
435     else {
436         pa_sink_set_latency_range(u->sink,
437                                   0,
438                                   pa_bytes_to_usec(u->hwbuf_size, ss));
439 
440         /* work-around assert in pa_sink_set_latency_within_thead,
441            keep track of min_latency and reuse it when
442            this routine is called from IO context */
443         u->min_latency_ref = u->sink->thread_info.min_latency;
444     }
445 
446     pa_log_info("Time scheduling watermark is %0.2fms",
447                 (double) u->tsched_watermark_usec / PA_USEC_PER_MSEC);
448 }
449 
hw_sleep_time(struct userdata * u,pa_usec_t * sleep_usec,pa_usec_t * process_usec)450 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
451     pa_usec_t usec, wm;
452 
453     pa_assert(sleep_usec);
454     pa_assert(process_usec);
455 
456     pa_assert(u);
457     pa_assert(u->use_tsched);
458 
459     usec = pa_sink_get_requested_latency_within_thread(u->sink);
460 
461     if (usec == (pa_usec_t) -1)
462         usec = pa_bytes_to_usec(u->hwbuf_size, &u->sink->sample_spec);
463 
464     wm = u->tsched_watermark_usec;
465 
466     if (wm > usec)
467         wm = usec/2;
468 
469     *sleep_usec = usec - wm;
470     *process_usec = wm;
471 
472 #ifdef DEBUG_TIMING
473     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
474                  (unsigned long) (usec / PA_USEC_PER_MSEC),
475                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
476                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
477 #endif
478 }
479 
480 /* Reset smoother and counters */
reset_vars(struct userdata * u)481 static void reset_vars(struct userdata *u) {
482 
483     pa_smoother_reset(u->smoother, pa_rtclock_now(), true);
484     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
485     u->last_smoother_update = 0;
486 
487     u->first = true;
488     u->since_start = 0;
489     u->write_count = 0;
490 }
491 
492 /* Called from IO context */
close_pcm(struct userdata * u)493 static void close_pcm(struct userdata *u) {
494     /* Let's suspend -- we don't call snd_pcm_drain() here since that might
495      * take awfully long with our long buffer sizes today. */
496     snd_pcm_close(u->pcm_handle);
497     u->pcm_handle = NULL;
498 
499     if (u->alsa_rtpoll_item) {
500         pa_rtpoll_item_free(u->alsa_rtpoll_item);
501         u->alsa_rtpoll_item = NULL;
502     }
503 }
504 
try_recover(struct userdata * u,const char * call,int err)505 static int try_recover(struct userdata *u, const char *call, int err) {
506     pa_assert(u);
507     pa_assert(call);
508     pa_assert(err < 0);
509 
510     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
511 
512     pa_assert(err != -EAGAIN);
513 
514     if (err == -EPIPE)
515         pa_log_debug("%s: Buffer underrun!", call);
516 
517     if (err == -ESTRPIPE)
518         pa_log_debug("%s: System suspended!", call);
519 
520     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
521         pa_log("%s: %s, trying to restart PCM", call, pa_alsa_strerror(err));
522 
523         /* As a last measure, restart the PCM and inform the caller about it. */
524         close_pcm(u);
525         if (unsuspend(u, true) < 0)
526             return -1;
527 
528         return 1;
529     }
530 
531     reset_vars(u);
532     return 0;
533 }
534 
check_left_to_play(struct userdata * u,size_t n_bytes,bool on_timeout)535 static size_t check_left_to_play(struct userdata *u, size_t n_bytes, bool on_timeout) {
536     size_t left_to_play;
537     bool underrun = false;
538 
539     /* We use <= instead of < for this check here because an underrun
540      * only happens after the last sample was processed, not already when
541      * it is removed from the buffer. This is particularly important
542      * when block transfer is used. */
543 
544     if (n_bytes <= u->hwbuf_size)
545         left_to_play = u->hwbuf_size - n_bytes;
546     else {
547 
548         /* We got a dropout. What a mess! */
549         left_to_play = 0;
550         underrun = true;
551 
552 #if 0
553         PA_DEBUG_TRAP;
554 #endif
555 
556         if (!u->first && !u->after_rewind)
557             if (pa_log_ratelimit(PA_LOG_INFO))
558                 pa_log_info("Underrun!");
559     }
560 
561 #ifdef DEBUG_TIMING
562     pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms",
563                  (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
564                  (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC,
565                  (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC);
566 #endif
567 
568     if (u->use_tsched) {
569         bool reset_not_before = true;
570 
571         if (!u->first && !u->after_rewind) {
572             if (underrun || left_to_play < u->watermark_inc_threshold)
573                 increase_watermark(u);
574             else if (left_to_play > u->watermark_dec_threshold) {
575                 reset_not_before = false;
576 
577                 /* We decrease the watermark only if have actually
578                  * been woken up by a timeout. If something else woke
579                  * us up it's too easy to fulfill the deadlines... */
580 
581                 if (on_timeout)
582                     decrease_watermark(u);
583             }
584         }
585 
586         if (reset_not_before)
587             u->watermark_dec_not_before = 0;
588     }
589 
590     return left_to_play;
591 }
592 
mmap_write(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)593 static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
594     bool work_done = false;
595     pa_usec_t max_sleep_usec = 0, process_usec = 0;
596     size_t left_to_play, input_underrun;
597     unsigned j = 0;
598 
599     pa_assert(u);
600     pa_sink_assert_ref(u->sink);
601 
602     if (u->use_tsched)
603         hw_sleep_time(u, &max_sleep_usec, &process_usec);
604 
605     for (;;) {
606         snd_pcm_sframes_t n;
607         size_t n_bytes;
608         int r;
609         bool after_avail = true;
610 
611         /* First we determine how many samples are missing to fill the
612          * buffer up to 100% */
613 
614         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
615 
616             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
617                 continue;
618 
619             return r;
620         }
621 
622         n_bytes = (size_t) n * u->frame_size;
623 
624 #ifdef DEBUG_TIMING
625         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
626 #endif
627 
628         left_to_play = check_left_to_play(u, n_bytes, on_timeout);
629         on_timeout = false;
630 
631         if (u->use_tsched)
632 
633             /* We won't fill up the playback buffer before at least
634             * half the sleep time is over because otherwise we might
635             * ask for more data from the clients then they expect. We
636             * need to guarantee that clients only have to keep around
637             * a single hw buffer length. */
638 
639             if (!polled &&
640                 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2) {
641 #ifdef DEBUG_TIMING
642                 pa_log_debug("Not filling up, because too early.");
643 #endif
644                 break;
645             }
646 
647         if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
648 
649             if (polled)
650                 PA_ONCE_BEGIN {
651                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
652                     pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write.\n"
653                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
654                              "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
655                            pa_strnull(dn));
656                     pa_xfree(dn);
657                 } PA_ONCE_END;
658 
659 #ifdef DEBUG_TIMING
660             pa_log_debug("Not filling up, because not necessary.");
661 #endif
662             break;
663         }
664 
665         j++;
666 
667         if (j > 10) {
668 #ifdef DEBUG_TIMING
669             pa_log_debug("Not filling up, because already too many iterations.");
670 #endif
671 
672             break;
673         } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
674 #ifdef DEBUG_TIMING
675             pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
676 #endif
677             break;
678         }
679 
680         n_bytes -= u->hwbuf_unused;
681         polled = false;
682 
683 #ifdef DEBUG_TIMING
684         pa_log_debug("Filling up");
685 #endif
686 
687         for (;;) {
688             pa_memchunk chunk;
689             void *p;
690             int err;
691             const snd_pcm_channel_area_t *areas;
692             snd_pcm_uframes_t offset, frames;
693             snd_pcm_sframes_t sframes;
694             size_t written;
695 
696             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
697 /*             pa_log_debug("%lu frames to write", (unsigned long) frames); */
698 
699             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
700 
701                 if (!after_avail && err == -EAGAIN)
702                     break;
703 
704                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
705                     continue;
706 
707                 if (r == 1)
708                     break;
709 
710                 return r;
711             }
712 
713             /* Make sure that if these memblocks need to be copied they will fit into one slot */
714             frames = PA_MIN(frames, u->frames_per_block);
715 
716             if (!after_avail && frames == 0)
717                 break;
718 
719             pa_assert(frames > 0);
720             after_avail = false;
721 
722             /* Check these are multiples of 8 bit */
723             pa_assert((areas[0].first & 7) == 0);
724             pa_assert((areas[0].step & 7) == 0);
725 
726             /* We assume a single interleaved memory buffer */
727             pa_assert((areas[0].first >> 3) == 0);
728             pa_assert((areas[0].step >> 3) == u->frame_size);
729 
730             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
731 
732             written = frames * u->frame_size;
733             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, written, true);
734             chunk.length = pa_memblock_get_length(chunk.memblock);
735             chunk.index = 0;
736 
737             pa_sink_render_into_full(u->sink, &chunk);
738             pa_memblock_unref_fixed(chunk.memblock);
739 
740             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
741 
742                 if ((int) sframes == -EAGAIN)
743                     break;
744 
745                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
746                     continue;
747 
748                 if (r == 1)
749                     break;
750 
751                 return r;
752             }
753 
754             work_done = true;
755 
756             u->write_count += written;
757             u->since_start += written;
758 
759 #ifdef DEBUG_TIMING
760             pa_log_debug("Wrote %lu bytes (of possible %lu bytes)", (unsigned long) written, (unsigned long) n_bytes);
761 #endif
762 
763             if (written >= n_bytes)
764                 break;
765 
766             n_bytes -= written;
767         }
768     }
769 
770     input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
771 
772     if (u->use_tsched) {
773         pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
774 
775         *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
776         process_usec = u->tsched_watermark_usec;
777 
778         if (*sleep_usec > process_usec)
779             *sleep_usec -= process_usec;
780         else
781             *sleep_usec = 0;
782 
783         *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
784     } else
785         *sleep_usec = 0;
786 
787     return work_done ? 1 : 0;
788 }
789 
unix_write(struct userdata * u,pa_usec_t * sleep_usec,bool polled,bool on_timeout)790 static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, bool polled, bool on_timeout) {
791     bool work_done = false;
792     pa_usec_t max_sleep_usec = 0, process_usec = 0;
793     size_t left_to_play, input_underrun;
794     unsigned j = 0;
795 
796     pa_assert(u);
797     pa_sink_assert_ref(u->sink);
798 
799     if (u->use_tsched)
800         hw_sleep_time(u, &max_sleep_usec, &process_usec);
801 
802     for (;;) {
803         snd_pcm_sframes_t n;
804         size_t n_bytes;
805         int r;
806         bool after_avail = true;
807 
808         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
809 
810             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) >= 0)
811                 continue;
812 
813             return r;
814         }
815 
816         n_bytes = (size_t) n * u->frame_size;
817 
818 #ifdef DEBUG_TIMING
819         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
820 #endif
821 
822         left_to_play = check_left_to_play(u, n_bytes, on_timeout);
823         on_timeout = false;
824 
825         if (u->use_tsched)
826 
827             /* We won't fill up the playback buffer before at least
828             * half the sleep time is over because otherwise we might
829             * ask for more data from the clients then they expect. We
830             * need to guarantee that clients only have to keep around
831             * a single hw buffer length. */
832 
833             if (!polled &&
834                 pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) > process_usec+max_sleep_usec/2)
835                 break;
836 
837         if (PA_UNLIKELY(n_bytes <= u->hwbuf_unused)) {
838 
839             if (polled)
840                 PA_ONCE_BEGIN {
841                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
842                     pa_log(_("ALSA woke us up to write new data to the device, but there was actually nothing to write.\n"
843                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
844                              "We were woken up with POLLOUT set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
845                            pa_strnull(dn));
846                     pa_xfree(dn);
847                 } PA_ONCE_END;
848 
849             break;
850         }
851 
852         j++;
853 
854         if (j > 10) {
855 #ifdef DEBUG_TIMING
856             pa_log_debug("Not filling up, because already too many iterations.");
857 #endif
858 
859             break;
860         } else if (j >= 2 && (n_bytes < (DEFAULT_WRITE_ITERATION_THRESHOLD * (u->hwbuf_size - u->hwbuf_unused)))) {
861 #ifdef DEBUG_TIMING
862             pa_log_debug("Not filling up, because <%g%% available.", DEFAULT_WRITE_ITERATION_THRESHOLD * 100);
863 #endif
864             break;
865         }
866 
867         n_bytes -= u->hwbuf_unused;
868         polled = false;
869 
870         for (;;) {
871             snd_pcm_sframes_t frames;
872             void *p;
873             size_t written;
874 
875 /*         pa_log_debug("%lu frames to write", (unsigned long) frames); */
876 
877             if (u->memchunk.length <= 0)
878                 pa_sink_render(u->sink, n_bytes, &u->memchunk);
879 
880             pa_assert(u->memchunk.length > 0);
881 
882             frames = (snd_pcm_sframes_t) (u->memchunk.length / u->frame_size);
883 
884             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
885                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
886 
887             p = pa_memblock_acquire(u->memchunk.memblock);
888             frames = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, (snd_pcm_uframes_t) frames);
889             pa_memblock_release(u->memchunk.memblock);
890 
891             if (PA_UNLIKELY(frames < 0)) {
892 
893                 if (!after_avail && (int) frames == -EAGAIN)
894                     break;
895 
896                 if ((r = try_recover(u, "snd_pcm_writei", (int) frames)) == 0)
897                     continue;
898 
899                 if (r == 1)
900                     break;
901 
902                 return r;
903             }
904 
905             if (!after_avail && frames == 0)
906                 break;
907 
908             pa_assert(frames > 0);
909             after_avail = false;
910 
911             written = frames * u->frame_size;
912             u->memchunk.index += written;
913             u->memchunk.length -= written;
914 
915             if (u->memchunk.length <= 0) {
916                 pa_memblock_unref(u->memchunk.memblock);
917                 pa_memchunk_reset(&u->memchunk);
918             }
919 
920             work_done = true;
921 
922             u->write_count += written;
923             u->since_start += written;
924 
925 /*         pa_log_debug("wrote %lu frames", (unsigned long) frames); */
926 
927             if (written >= n_bytes)
928                 break;
929 
930             n_bytes -= written;
931         }
932     }
933 
934     input_underrun = pa_sink_process_input_underruns(u->sink, left_to_play);
935 
936     if (u->use_tsched) {
937         pa_usec_t underrun_sleep = pa_bytes_to_usec_round_up(input_underrun, &u->sink->sample_spec);
938 
939         *sleep_usec = pa_bytes_to_usec(left_to_play, &u->sink->sample_spec);
940         process_usec = u->tsched_watermark_usec;
941 
942         if (*sleep_usec > process_usec)
943             *sleep_usec -= process_usec;
944         else
945             *sleep_usec = 0;
946 
947         *sleep_usec = PA_MIN(*sleep_usec, underrun_sleep);
948     } else
949         *sleep_usec = 0;
950 
951     return work_done ? 1 : 0;
952 }
953 
update_smoother(struct userdata * u)954 static void update_smoother(struct userdata *u) {
955     snd_pcm_sframes_t delay = 0;
956     int64_t position;
957     int err;
958     pa_usec_t now1 = 0, now2;
959     snd_pcm_status_t *status;
960     snd_htimestamp_t htstamp = { 0, 0 };
961 
962     snd_pcm_status_alloca(&status);
963 
964     pa_assert(u);
965     pa_assert(u->pcm_handle);
966 
967     /* Let's update the time smoother */
968 
969     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, status, &delay, u->hwbuf_size, &u->sink->sample_spec, false)) < 0)) {
970         pa_log_warn("Failed to query DSP status data: %s", pa_alsa_strerror(err));
971         return;
972     }
973 
974     snd_pcm_status_get_htstamp(status, &htstamp);
975     now1 = pa_timespec_load(&htstamp);
976 
977     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
978     if (now1 <= 0)
979         now1 = pa_rtclock_now();
980 
981     /* check if the time since the last update is bigger than the interval */
982     if (u->last_smoother_update > 0)
983         if (u->last_smoother_update + u->smoother_interval > now1)
984             return;
985 
986     position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size);
987 
988     if (PA_UNLIKELY(position < 0))
989         position = 0;
990 
991     now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec);
992 
993     pa_smoother_put(u->smoother, now1, now2);
994 
995     u->last_smoother_update = now1;
996     /* exponentially increase the update interval up to the MAX limit */
997     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
998 }
999 
sink_get_latency(struct userdata * u)1000 static int64_t sink_get_latency(struct userdata *u) {
1001     int64_t delay;
1002     pa_usec_t now1, now2;
1003 
1004     pa_assert(u);
1005 
1006     now1 = pa_rtclock_now();
1007     now2 = pa_smoother_get(u->smoother, now1);
1008 
1009     delay = (int64_t) pa_bytes_to_usec(u->write_count, &u->sink->sample_spec) - (int64_t) now2;
1010 
1011     if (u->memchunk.memblock)
1012         delay += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
1013 
1014     return delay;
1015 }
1016 
build_pollfd(struct userdata * u)1017 static int build_pollfd(struct userdata *u) {
1018     pa_assert(u);
1019     pa_assert(u->pcm_handle);
1020 
1021     if (u->alsa_rtpoll_item)
1022         pa_rtpoll_item_free(u->alsa_rtpoll_item);
1023 
1024     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
1025         return -1;
1026 
1027     return 0;
1028 }
1029 
1030 /* Called from IO context */
suspend(struct userdata * u)1031 static void suspend(struct userdata *u) {
1032     pa_assert(u);
1033 
1034     /* Handle may have been invalidated due to a device failure.
1035      * In that case there is nothing to do. */
1036     if (!u->pcm_handle)
1037         return;
1038 
1039     pa_smoother_pause(u->smoother, pa_rtclock_now());
1040 
1041     /* Close PCM device */
1042     close_pcm(u);
1043 
1044     /* We reset max_rewind/max_request here to make sure that while we
1045      * are suspended the old max_request/max_rewind values set before
1046      * the suspend can influence the per-stream buffer of newly
1047      * created streams, without their requirements having any
1048      * influence on them. */
1049     pa_sink_set_max_rewind_within_thread(u->sink, 0);
1050     pa_sink_set_max_request_within_thread(u->sink, 0);
1051 
1052     pa_log_info("Device suspended...");
1053 }
1054 
1055 /* Called from IO context */
update_sw_params(struct userdata * u,bool may_need_rewind)1056 static int update_sw_params(struct userdata *u, bool may_need_rewind) {
1057     size_t old_unused;
1058     snd_pcm_uframes_t avail_min;
1059     int err;
1060 
1061     pa_assert(u);
1062 
1063     /* Use the full buffer if no one asked us for anything specific */
1064     old_unused = u->hwbuf_unused;
1065     u->hwbuf_unused = 0;
1066 
1067     if (u->use_tsched) {
1068         pa_usec_t latency;
1069 
1070         if ((latency = pa_sink_get_requested_latency_within_thread(u->sink)) != (pa_usec_t) -1) {
1071             size_t b;
1072 
1073             pa_log_debug("Latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
1074 
1075             b = pa_usec_to_bytes(latency, &u->sink->sample_spec);
1076 
1077             /* We need at least one sample in our buffer */
1078 
1079             if (PA_UNLIKELY(b < u->frame_size))
1080                 b = u->frame_size;
1081 
1082             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
1083         }
1084 
1085         fix_min_sleep_wakeup(u);
1086         fix_tsched_watermark(u);
1087     }
1088 
1089     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
1090 
1091     /* We need at last one frame in the used part of the buffer */
1092     avail_min = (snd_pcm_uframes_t) u->hwbuf_unused / u->frame_size + 1;
1093 
1094     if (u->use_tsched) {
1095         pa_usec_t sleep_usec, process_usec;
1096 
1097         hw_sleep_time(u, &sleep_usec, &process_usec);
1098         avail_min += pa_usec_to_bytes(sleep_usec, &u->sink->sample_spec) / u->frame_size;
1099     }
1100 
1101     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
1102 
1103     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
1104         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
1105         return err;
1106     }
1107 
1108     /* If we're lowering the latency, we need to do a rewind, because otherwise
1109      * we might end up in a situation where the hw buffer contains more data
1110      * than the new configured latency. The rewind has to be requested before
1111      * updating max_rewind, because the rewind amount is limited to max_rewind.
1112      *
1113      * If may_need_rewind is false, it means that we're just starting playback,
1114      * and rewinding is never needed in that situation. */
1115     if (may_need_rewind && u->hwbuf_unused > old_unused) {
1116         pa_log_debug("Requesting rewind due to latency change.");
1117         pa_sink_request_rewind(u->sink, (size_t) -1);
1118     }
1119 
1120     pa_sink_set_max_request_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1121     if (pa_alsa_pcm_is_hw(u->pcm_handle))
1122         pa_sink_set_max_rewind_within_thread(u->sink, u->hwbuf_size - u->hwbuf_unused);
1123     else {
1124         pa_log_info("Disabling rewind_within_thread for device %s", u->device_name);
1125         pa_sink_set_max_rewind_within_thread(u->sink, 0);
1126     }
1127 
1128     return 0;
1129 }
1130 
1131 /* Called from IO Context on unsuspend */
update_size(struct userdata * u,pa_sample_spec * ss)1132 static void update_size(struct userdata *u, pa_sample_spec *ss) {
1133     pa_assert(u);
1134     pa_assert(ss);
1135 
1136     u->frame_size = pa_frame_size(ss);
1137     u->frames_per_block = pa_mempool_block_size_max(u->core->mempool) / u->frame_size;
1138 
1139     /* use initial values including module arguments */
1140     u->fragment_size = u->initial_info.fragment_size;
1141     u->hwbuf_size = u->initial_info.nfrags * u->fragment_size;
1142     u->tsched_size = u->initial_info.tsched_size;
1143     u->tsched_watermark = u->initial_info.tsched_watermark;
1144     u->rewind_safeguard = u->initial_info.rewind_safeguard;
1145 
1146     u->tsched_watermark_ref = u->tsched_watermark;
1147 
1148     pa_log_info("Updated frame_size %zu, frames_per_block %lu, fragment_size %zu, hwbuf_size %zu, tsched(size %zu, watermark %zu), rewind_safeguard %zu",
1149                 u->frame_size, (unsigned long) u->frames_per_block, u->fragment_size, u->hwbuf_size, u->tsched_size, u->tsched_watermark, u->rewind_safeguard);
1150 }
1151 
1152 /* Called from IO context */
unsuspend(struct userdata * u,bool recovering)1153 static int unsuspend(struct userdata *u, bool recovering) {
1154     pa_sample_spec ss;
1155     int err, i;
1156     bool b, d;
1157     snd_pcm_uframes_t period_frames, buffer_frames;
1158     snd_pcm_uframes_t tsched_frames = 0;
1159     char *device_name = NULL;
1160     bool frame_size_changed = false;
1161 
1162     pa_assert(u);
1163     pa_assert(!u->pcm_handle);
1164 
1165     pa_log_info("Trying resume...");
1166 
1167     if ((is_iec958(u) || is_hdmi(u)) && pa_sink_is_passthrough(u->sink)) {
1168         /* Need to open device in NONAUDIO mode */
1169         int len = strlen(u->device_name) + 8;
1170 
1171         device_name = pa_xmalloc(len);
1172         pa_snprintf(device_name, len, "%s,AES0=6", u->device_name);
1173     }
1174 
1175     /*
1176      * On some machines, during the system suspend and resume, the thread_func could receive
1177      * POLLERR events before the dev nodes in /dev/snd/ are accessible, and thread_func calls
1178      * the unsuspend() to try to recover the PCM, this will make the snd_pcm_open() fail, here
1179      * we add msleep and retry to make sure those nodes are accessible.
1180      */
1181     for (i = 0; i < 4; i++) {
1182 	if ((err = snd_pcm_open(&u->pcm_handle, device_name ? device_name : u->device_name, SND_PCM_STREAM_PLAYBACK,
1183 				SND_PCM_NONBLOCK|
1184 				SND_PCM_NO_AUTO_RESAMPLE|
1185 				SND_PCM_NO_AUTO_CHANNELS|
1186 				SND_PCM_NO_AUTO_FORMAT)) < 0 && recovering)
1187 	    pa_msleep(25);
1188 	else
1189 	    break;
1190     }
1191 
1192     if (err < 0) {
1193 	pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
1194 	goto fail;
1195     }
1196 
1197     if (pa_frame_size(&u->sink->sample_spec) != u->frame_size) {
1198         update_size(u, &u->sink->sample_spec);
1199         tsched_frames = u->tsched_size / u->frame_size;
1200         frame_size_changed = true;
1201     }
1202 
1203     ss = u->sink->sample_spec;
1204     period_frames = u->fragment_size / u->frame_size;
1205     buffer_frames = u->hwbuf_size / u->frame_size;
1206     b = u->use_mmap;
1207     d = u->use_tsched;
1208 
1209     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_frames, &buffer_frames, tsched_frames, &b, &d, true)) < 0) {
1210         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
1211         goto fail;
1212     }
1213 
1214     if (b != u->use_mmap || d != u->use_tsched) {
1215         pa_log_warn("Resume failed, couldn't get original access mode.");
1216         goto fail;
1217     }
1218 
1219     if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
1220         pa_log_warn("Resume failed, couldn't restore original sample settings.");
1221         goto fail;
1222     }
1223 
1224     if (frame_size_changed) {
1225         u->fragment_size = (size_t)(period_frames * u->frame_size);
1226         u->hwbuf_size = (size_t)(buffer_frames * u->frame_size);
1227         pa_proplist_setf(u->sink->proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%zu", u->hwbuf_size);
1228         pa_proplist_setf(u->sink->proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%zu", u->fragment_size);
1229 
1230     } else if (period_frames * u->frame_size != u->fragment_size ||
1231                 buffer_frames * u->frame_size != u->hwbuf_size) {
1232         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %zu/%zu, New %lu/%lu)",
1233                     u->hwbuf_size, u->fragment_size,
1234                     (unsigned long) buffer_frames * u->frame_size, (unsigned long) period_frames * u->frame_size);
1235         goto fail;
1236     }
1237 
1238     if (update_sw_params(u, false) < 0)
1239         goto fail;
1240 
1241     if (build_pollfd(u) < 0)
1242         goto fail;
1243 
1244     reset_vars(u);
1245 
1246     /* reset the watermark to the value defined when sink was created */
1247     if (u->use_tsched && !recovering)
1248         reset_watermark(u, u->tsched_watermark_ref, &u->sink->sample_spec, true);
1249 
1250     pa_log_info("Resumed successfully...");
1251 
1252     pa_xfree(device_name);
1253     return 0;
1254 
1255 fail:
1256     if (u->pcm_handle) {
1257         snd_pcm_close(u->pcm_handle);
1258         u->pcm_handle = NULL;
1259     }
1260 
1261     pa_xfree(device_name);
1262 
1263     return -PA_ERR_IO;
1264 }
1265 
1266 /* Called from the IO thread or the main thread depending on whether deferred
1267  * volume is enabled or not (with deferred volume all mixer handling is done
1268  * from the IO thread).
1269  *
1270  * Sets the mixer settings to match the current sink and port state (the port
1271  * is given as an argument, because active_port may still point to the old
1272  * port, if we're switching ports). */
sync_mixer(struct userdata * u,pa_device_port * port)1273 static void sync_mixer(struct userdata *u, pa_device_port *port) {
1274     pa_alsa_setting *setting = NULL;
1275 
1276     pa_assert(u);
1277 
1278     if (!u->mixer_path)
1279         return;
1280 
1281     /* port may be NULL, because if we use a synthesized mixer path, then the
1282      * sink has no ports. */
1283     if (port && !u->ucm_context) {
1284         pa_alsa_port_data *data;
1285 
1286         data = PA_DEVICE_PORT_DATA(port);
1287         setting = data->setting;
1288     }
1289 
1290     pa_alsa_path_select(u->mixer_path, setting, u->mixer_handle, u->sink->muted);
1291 
1292     if (u->sink->set_mute)
1293         u->sink->set_mute(u->sink);
1294     if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1295         if (u->sink->write_volume)
1296             u->sink->write_volume(u->sink);
1297     } else {
1298         if (u->sink->set_volume)
1299             u->sink->set_volume(u->sink);
1300     }
1301 }
1302 
1303 /* Called from IO context */
sink_process_msg(pa_msgobject * o,int code,void * data,int64_t offset,pa_memchunk * chunk)1304 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1305     struct userdata *u = PA_SINK(o)->userdata;
1306 
1307     switch (code) {
1308 
1309         case PA_SINK_MESSAGE_GET_LATENCY: {
1310             int64_t r = 0;
1311 
1312             if (u->pcm_handle)
1313                 r = sink_get_latency(u);
1314 
1315             *((int64_t*) data) = r;
1316 
1317             return 0;
1318         }
1319 
1320         case SINK_MESSAGE_SYNC_MIXER: {
1321             pa_device_port *port = data;
1322 
1323             sync_mixer(u, port);
1324             return 0;
1325         }
1326     }
1327 
1328     return pa_sink_process_msg(o, code, data, offset, chunk);
1329 }
1330 
1331 /* Called from main context */
sink_set_state_in_main_thread_cb(pa_sink * s,pa_sink_state_t new_state,pa_suspend_cause_t new_suspend_cause)1332 static int sink_set_state_in_main_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1333     pa_sink_state_t old_state;
1334     struct userdata *u;
1335 
1336     pa_sink_assert_ref(s);
1337     pa_assert_se(u = s->userdata);
1338 
1339     /* When our session becomes active, we need to sync the mixer, because
1340      * another user may have changed the mixer settings.
1341      *
1342      * If deferred volume is enabled, the syncing is done in the
1343      * set_state_in_io_thread() callback instead. */
1344     if (!(s->flags & PA_SINK_DEFERRED_VOLUME)
1345             && (s->suspend_cause & PA_SUSPEND_SESSION)
1346             && !(new_suspend_cause & PA_SUSPEND_SESSION))
1347         sync_mixer(u, s->active_port);
1348 
1349     old_state = u->sink->state;
1350 
1351     if (PA_SINK_IS_OPENED(old_state) && new_state == PA_SINK_SUSPENDED)
1352         reserve_done(u);
1353     else if (old_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(new_state))
1354         if (reserve_init(u, u->device_name) < 0)
1355             return -PA_ERR_BUSY;
1356 
1357     return 0;
1358 }
1359 
1360 /* Called from the IO thread. */
sink_set_state_in_io_thread_cb(pa_sink * s,pa_sink_state_t new_state,pa_suspend_cause_t new_suspend_cause)1361 static int sink_set_state_in_io_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
1362     struct userdata *u;
1363 
1364     pa_assert(s);
1365     pa_assert_se(u = s->userdata);
1366 
1367     /* When our session becomes active, we need to sync the mixer, because
1368      * another user may have changed the mixer settings.
1369      *
1370      * If deferred volume is disabled, the syncing is done in the
1371      * set_state_in_main_thread() callback instead. */
1372     if ((s->flags & PA_SINK_DEFERRED_VOLUME)
1373             && (s->suspend_cause & PA_SUSPEND_SESSION)
1374             && !(new_suspend_cause & PA_SUSPEND_SESSION))
1375         sync_mixer(u, s->active_port);
1376 
1377     /* It may be that only the suspend cause is changing, in which case there's
1378      * nothing more to do. */
1379     if (new_state == s->thread_info.state)
1380         return 0;
1381 
1382     switch (new_state) {
1383 
1384         case PA_SINK_SUSPENDED: {
1385             pa_assert(PA_SINK_IS_OPENED(s->thread_info.state));
1386 
1387             suspend(u);
1388 
1389             break;
1390         }
1391 
1392         case PA_SINK_IDLE:
1393         case PA_SINK_RUNNING: {
1394             int r;
1395 
1396             if (s->thread_info.state == PA_SINK_INIT) {
1397                 if (build_pollfd(u) < 0)
1398                     /* FIXME: This will cause an assertion failure, because
1399                      * with the current design pa_sink_put() is not allowed
1400                      * to fail and pa_sink_put() has no fallback code that
1401                      * would start the sink suspended if opening the device
1402                      * fails. */
1403                     return -PA_ERR_IO;
1404             }
1405 
1406             if (s->thread_info.state == PA_SINK_SUSPENDED) {
1407                 if ((r = unsuspend(u, false)) < 0)
1408                     return r;
1409             }
1410 
1411             break;
1412         }
1413 
1414         case PA_SINK_UNLINKED:
1415         case PA_SINK_INIT:
1416         case PA_SINK_INVALID_STATE:
1417             break;
1418     }
1419 
1420     return 0;
1421 }
1422 
ctl_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1423 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1424     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1425 
1426     pa_assert(u);
1427     pa_assert(u->mixer_handle);
1428 
1429     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1430         return 0;
1431 
1432     if (!PA_SINK_IS_LINKED(u->sink->state))
1433         return 0;
1434 
1435     if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1436         return 0;
1437 
1438     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1439         pa_sink_get_volume(u->sink, true);
1440         pa_sink_get_mute(u->sink, true);
1441     }
1442 
1443     return 0;
1444 }
1445 
io_mixer_callback(snd_mixer_elem_t * elem,unsigned int mask)1446 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1447     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1448 
1449     pa_assert(u);
1450     pa_assert(u->mixer_handle);
1451 
1452     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1453         return 0;
1454 
1455     if (u->sink->suspend_cause & PA_SUSPEND_SESSION)
1456         return 0;
1457 
1458     if (mask & SND_CTL_EVENT_MASK_VALUE)
1459         pa_sink_update_volume_and_mute(u->sink);
1460 
1461     return 0;
1462 }
1463 
sink_get_volume_cb(pa_sink * s)1464 static void sink_get_volume_cb(pa_sink *s) {
1465     struct userdata *u = s->userdata;
1466     pa_cvolume r;
1467     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1468 
1469     pa_assert(u);
1470     pa_assert(u->mixer_path);
1471     pa_assert(u->mixer_handle);
1472 
1473     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1474         return;
1475 
1476     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1477     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1478 
1479     pa_log_debug("Read hardware volume: %s",
1480                  pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, u->mixer_path->has_dB));
1481 
1482     if (pa_cvolume_equal(&u->hardware_volume, &r))
1483         return;
1484 
1485     s->real_volume = u->hardware_volume = r;
1486 
1487     /* Hmm, so the hardware volume changed, let's reset our software volume */
1488     if (u->mixer_path->has_dB)
1489         pa_sink_set_soft_volume(s, NULL);
1490 }
1491 
sink_set_volume_cb(pa_sink * s)1492 static void sink_set_volume_cb(pa_sink *s) {
1493     struct userdata *u = s->userdata;
1494     pa_cvolume r;
1495     char volume_buf[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1496     bool deferred_volume = !!(s->flags & PA_SINK_DEFERRED_VOLUME);
1497 
1498     pa_assert(u);
1499     pa_assert(u->mixer_path);
1500     pa_assert(u->mixer_handle);
1501 
1502     /* Shift up by the base volume */
1503     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1504 
1505     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1506         return;
1507 
1508     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1509     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1510 
1511     u->hardware_volume = r;
1512 
1513     if (u->mixer_path->has_dB) {
1514         pa_cvolume new_soft_volume;
1515         bool accurate_enough;
1516 
1517         /* Match exactly what the user requested by software */
1518         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1519 
1520         /* If the adjustment to do in software is only minimal we
1521          * can skip it. That saves us CPU at the expense of a bit of
1522          * accuracy */
1523         accurate_enough =
1524             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1525             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1526 
1527         pa_log_debug("Requested volume: %s",
1528                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &s->real_volume, &s->channel_map, true));
1529         pa_log_debug("Got hardware volume: %s",
1530                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &u->hardware_volume, &s->channel_map, true));
1531         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1532                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &new_soft_volume, &s->channel_map, true),
1533                      pa_yes_no(accurate_enough));
1534 
1535         if (!accurate_enough)
1536             s->soft_volume = new_soft_volume;
1537 
1538     } else {
1539         pa_log_debug("Wrote hardware volume: %s",
1540                      pa_cvolume_snprint_verbose(volume_buf, sizeof(volume_buf), &r, &s->channel_map, false));
1541 
1542         /* We can't match exactly what the user requested, hence let's
1543          * at least tell the user about it */
1544 
1545         s->real_volume = r;
1546     }
1547 }
1548 
sink_write_volume_cb(pa_sink * s)1549 static void sink_write_volume_cb(pa_sink *s) {
1550     struct userdata *u = s->userdata;
1551     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1552 
1553     pa_assert(u);
1554     pa_assert(u->mixer_path);
1555     pa_assert(u->mixer_handle);
1556     pa_assert(s->flags & PA_SINK_DEFERRED_VOLUME);
1557 
1558     /* Shift up by the base volume */
1559     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1560 
1561     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, true, true) < 0)
1562         pa_log_error("Writing HW volume failed");
1563     else {
1564         pa_cvolume tmp_vol;
1565         bool accurate_enough;
1566 
1567         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1568         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1569 
1570         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1571         accurate_enough =
1572             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1573             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1574 
1575         if (!accurate_enough) {
1576             char volume_buf[2][PA_CVOLUME_SNPRINT_VERBOSE_MAX];
1577 
1578             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1579                          pa_cvolume_snprint_verbose(volume_buf[0],
1580                                                     sizeof(volume_buf[0]),
1581                                                     &s->thread_info.current_hw_volume,
1582                                                     &s->channel_map,
1583                                                     true),
1584                          pa_cvolume_snprint_verbose(volume_buf[1], sizeof(volume_buf[1]), &hw_vol, &s->channel_map, true));
1585         }
1586     }
1587 }
1588 
sink_get_mute_cb(pa_sink * s,bool * mute)1589 static int sink_get_mute_cb(pa_sink *s, bool *mute) {
1590     struct userdata *u = s->userdata;
1591 
1592     pa_assert(u);
1593     pa_assert(u->mixer_path);
1594     pa_assert(u->mixer_handle);
1595 
1596     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, mute) < 0)
1597         return -1;
1598 
1599     return 0;
1600 }
1601 
sink_set_mute_cb(pa_sink * s)1602 static void sink_set_mute_cb(pa_sink *s) {
1603     struct userdata *u = s->userdata;
1604 
1605     pa_assert(u);
1606     pa_assert(u->mixer_path);
1607     pa_assert(u->mixer_handle);
1608 
1609     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1610 }
1611 
mixer_volume_init(struct userdata * u)1612 static void mixer_volume_init(struct userdata *u) {
1613     pa_assert(u);
1614 
1615     if (!u->mixer_path || !u->mixer_path->has_volume) {
1616         pa_sink_set_write_volume_callback(u->sink, NULL);
1617         pa_sink_set_get_volume_callback(u->sink, NULL);
1618         pa_sink_set_set_volume_callback(u->sink, NULL);
1619 
1620         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1621     } else {
1622         pa_sink_set_get_volume_callback(u->sink, sink_get_volume_cb);
1623         pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1624 
1625         if (u->mixer_path->has_dB && u->deferred_volume) {
1626             pa_sink_set_write_volume_callback(u->sink, sink_write_volume_cb);
1627             pa_log_info("Successfully enabled deferred volume.");
1628         } else
1629             pa_sink_set_write_volume_callback(u->sink, NULL);
1630 
1631         if (u->mixer_path->has_dB) {
1632             pa_sink_enable_decibel_volume(u->sink, true);
1633             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1634 
1635             u->sink->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1636             u->sink->n_volume_steps = PA_VOLUME_NORM+1;
1637 
1638             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->sink->base_volume));
1639         } else {
1640             pa_sink_enable_decibel_volume(u->sink, false);
1641             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1642 
1643             u->sink->base_volume = PA_VOLUME_NORM;
1644             u->sink->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1645         }
1646 
1647         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1648     }
1649 
1650     if (!u->mixer_path || !u->mixer_path->has_mute) {
1651         pa_sink_set_get_mute_callback(u->sink, NULL);
1652         pa_sink_set_set_mute_callback(u->sink, NULL);
1653         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1654     } else {
1655         pa_sink_set_get_mute_callback(u->sink, sink_get_mute_cb);
1656         pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1657         pa_log_info("Using hardware mute control.");
1658     }
1659 }
1660 
sink_set_port_ucm_cb(pa_sink * s,pa_device_port * p)1661 static int sink_set_port_ucm_cb(pa_sink *s, pa_device_port *p) {
1662     struct userdata *u = s->userdata;
1663     pa_alsa_ucm_port_data *data;
1664 
1665     pa_assert(u);
1666     pa_assert(p);
1667     pa_assert(u->ucm_context);
1668 
1669     data = PA_DEVICE_PORT_DATA(p);
1670     u->mixer_path = data->path;
1671     mixer_volume_init(u);
1672 
1673     if (s->flags & PA_SINK_DEFERRED_VOLUME)
1674         pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_SYNC_MIXER, p, 0, NULL);
1675     else
1676         sync_mixer(u, p);
1677 
1678     return pa_alsa_ucm_set_port(u->ucm_context, p, true);
1679 }
1680 
sink_set_port_cb(pa_sink * s,pa_device_port * p)1681 static int sink_set_port_cb(pa_sink *s, pa_device_port *p) {
1682     struct userdata *u = s->userdata;
1683     pa_alsa_port_data *data;
1684 
1685     pa_assert(u);
1686     pa_assert(p);
1687     pa_assert(u->mixer_handle);
1688     pa_assert(!u->ucm_context);
1689 
1690     data = PA_DEVICE_PORT_DATA(p);
1691     pa_assert_se(u->mixer_path = data->path);
1692     mixer_volume_init(u);
1693 
1694     if (s->flags & PA_SINK_DEFERRED_VOLUME)
1695         pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_SYNC_MIXER, p, 0, NULL);
1696     else
1697         sync_mixer(u, p);
1698 
1699     if (data->suspend_when_unavailable && p->available == PA_AVAILABLE_NO)
1700         pa_sink_suspend(s, true, PA_SUSPEND_UNAVAILABLE);
1701     else
1702         pa_sink_suspend(s, false, PA_SUSPEND_UNAVAILABLE);
1703 
1704     return 0;
1705 }
1706 
sink_update_requested_latency_cb(pa_sink * s)1707 static void sink_update_requested_latency_cb(pa_sink *s) {
1708     struct userdata *u = s->userdata;
1709     pa_assert(u);
1710     pa_assert(u->use_tsched); /* only when timer scheduling is used
1711                                * we can dynamically adjust the
1712                                * latency */
1713 
1714     if (!u->pcm_handle)
1715         return;
1716 
1717     update_sw_params(u, true);
1718 }
1719 
sink_get_formats(pa_sink * s)1720 static pa_idxset* sink_get_formats(pa_sink *s) {
1721     struct userdata *u = s->userdata;
1722 
1723     pa_assert(u);
1724 
1725     return pa_idxset_copy(u->formats, (pa_copy_func_t) pa_format_info_copy);
1726 }
1727 
sink_set_formats(pa_sink * s,pa_idxset * formats)1728 static bool sink_set_formats(pa_sink *s, pa_idxset *formats) {
1729     struct userdata *u = s->userdata;
1730     pa_format_info *f, *g;
1731     uint32_t idx, n;
1732 
1733     pa_assert(u);
1734 
1735     /* FIXME: also validate sample rates against what the device supports */
1736     PA_IDXSET_FOREACH(f, formats, idx) {
1737         if (is_iec958(u) && f->encoding == PA_ENCODING_EAC3_IEC61937)
1738             /* EAC3 cannot be sent over over S/PDIF */
1739             return false;
1740     }
1741 
1742     pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
1743     u->formats = pa_idxset_new(NULL, NULL);
1744 
1745     /* Note: the logic below won't apply if we're using software encoding.
1746      * This is fine for now since we don't support that via the passthrough
1747      * framework, but this must be changed if we do. */
1748 
1749     /* Count how many sample rates we support */
1750     for (idx = 0, n = 0; u->supported_rates[idx]; idx++)
1751         n++;
1752 
1753     /* First insert non-PCM formats since we prefer those. */
1754     PA_IDXSET_FOREACH(f, formats, idx) {
1755         if (!pa_format_info_is_pcm(f)) {
1756             g = pa_format_info_copy(f);
1757             pa_format_info_set_prop_int_array(g, PA_PROP_FORMAT_RATE, (int *) u->supported_rates, n);
1758             pa_idxset_put(u->formats, g, NULL);
1759         }
1760     }
1761 
1762     /* Now add any PCM formats */
1763     PA_IDXSET_FOREACH(f, formats, idx) {
1764         if (pa_format_info_is_pcm(f)) {
1765             /* We don't set rates here since we'll just tack on a resampler for
1766              * unsupported rates */
1767             pa_idxset_put(u->formats, pa_format_info_copy(f), NULL);
1768         }
1769     }
1770 
1771     return true;
1772 }
1773 
sink_reconfigure_cb(pa_sink * s,pa_sample_spec * spec,bool passthrough)1774 static void sink_reconfigure_cb(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1775     struct userdata *u = s->userdata;
1776     int i;
1777     bool format_supported = false;
1778     bool rate_supported = false;
1779 
1780     pa_assert(u);
1781 
1782     for (i = 0; u->supported_formats[i] != PA_SAMPLE_MAX; i++) {
1783         if (u->supported_formats[i] == spec->format) {
1784             pa_sink_set_sample_format(u->sink, spec->format);
1785             format_supported = true;
1786             break;
1787         }
1788     }
1789 
1790     if (!format_supported) {
1791         pa_log_info("Sink does not support sample format of %s, set it to a verified value",
1792                     pa_sample_format_to_string(spec->format));
1793         pa_sink_set_sample_format(u->sink, u->verified_sample_spec.format);
1794     }
1795 
1796     for (i = 0; u->supported_rates[i]; i++) {
1797         if (u->supported_rates[i] == spec->rate) {
1798             pa_sink_set_sample_rate(u->sink, spec->rate);
1799             rate_supported = true;
1800             break;
1801         }
1802     }
1803 
1804     if (!rate_supported) {
1805         pa_log_info("Sink does not support sample rate of %u, set it to a verified value", spec->rate);
1806         pa_sink_set_sample_rate(u->sink, u->verified_sample_spec.rate);
1807     }
1808 
1809     /* Passthrough status change is handled during unsuspend */
1810 }
1811 
process_rewind(struct userdata * u)1812 static int process_rewind(struct userdata *u) {
1813     snd_pcm_sframes_t unused;
1814     size_t rewind_nbytes, unused_nbytes, limit_nbytes;
1815     int err;
1816     pa_assert(u);
1817 
1818     if (!PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1819         pa_sink_process_rewind(u->sink, 0);
1820         return 0;
1821     }
1822 
1823     /* Figure out how much we shall rewind and reset the counter */
1824     rewind_nbytes = u->sink->thread_info.rewind_nbytes;
1825 
1826     pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes);
1827 
1828     if (PA_UNLIKELY((unused = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->sink->sample_spec)) < 0)) {
1829         if ((err = try_recover(u, "snd_pcm_avail", (int) unused)) < 0) {
1830             pa_log_warn("Trying to recover from underrun failed during rewind");
1831             return -1;
1832         }
1833         if (err == 1)
1834             goto rewind_done;
1835     }
1836 
1837     unused_nbytes = (size_t) unused * u->frame_size;
1838 
1839     /* make sure rewind doesn't go too far, can cause issues with DMAs */
1840     unused_nbytes += u->rewind_safeguard;
1841 
1842     if (u->hwbuf_size > unused_nbytes)
1843         limit_nbytes = u->hwbuf_size - unused_nbytes;
1844     else
1845         limit_nbytes = 0;
1846 
1847     if (rewind_nbytes > limit_nbytes)
1848         rewind_nbytes = limit_nbytes;
1849 
1850     if (rewind_nbytes > 0) {
1851         snd_pcm_sframes_t in_frames, out_frames;
1852 
1853         pa_log_debug("Limited to %lu bytes.", (unsigned long) rewind_nbytes);
1854 
1855         in_frames = (snd_pcm_sframes_t) (rewind_nbytes / u->frame_size);
1856         pa_log_debug("before: %lu", (unsigned long) in_frames);
1857         if ((out_frames = snd_pcm_rewind(u->pcm_handle, (snd_pcm_uframes_t) in_frames)) < 0) {
1858             pa_log("snd_pcm_rewind() failed: %s", pa_alsa_strerror((int) out_frames));
1859             if ((err = try_recover(u, "process_rewind", out_frames)) < 0)
1860                 return -1;
1861             if (err == 1)
1862                 goto rewind_done;
1863             out_frames = 0;
1864         }
1865 
1866         pa_log_debug("after: %lu", (unsigned long) out_frames);
1867 
1868         rewind_nbytes = (size_t) out_frames * u->frame_size;
1869 
1870         if (rewind_nbytes <= 0)
1871             pa_log_info("Tried rewind, but was apparently not possible.");
1872         else {
1873             u->write_count -= rewind_nbytes;
1874             pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes);
1875             pa_sink_process_rewind(u->sink, rewind_nbytes);
1876 
1877             u->after_rewind = true;
1878             return 0;
1879         }
1880     } else
1881         pa_log_debug("Mhmm, actually there is nothing to rewind.");
1882 
1883 rewind_done:
1884     pa_sink_process_rewind(u->sink, 0);
1885     return 0;
1886 }
1887 
thread_func(void * userdata)1888 static void thread_func(void *userdata) {
1889     struct userdata *u = userdata;
1890     unsigned short revents = 0;
1891 
1892     pa_assert(u);
1893 
1894     pa_log_debug("Thread starting up");
1895 
1896     if (u->core->realtime_scheduling)
1897         pa_thread_make_realtime(u->core->realtime_priority);
1898 
1899     pa_thread_mq_install(&u->thread_mq);
1900 
1901     for (;;) {
1902         int ret;
1903         pa_usec_t rtpoll_sleep = 0, real_sleep;
1904 
1905 #ifdef DEBUG_TIMING
1906         pa_log_debug("Loop");
1907 #endif
1908 
1909         if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) {
1910             if (process_rewind(u) < 0)
1911                 goto fail;
1912         }
1913 
1914         /* Render some data and write it to the dsp */
1915         if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
1916             int work_done;
1917             pa_usec_t sleep_usec = 0;
1918             bool on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1919 
1920             if (u->use_mmap)
1921                 work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1922             else
1923                 work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout);
1924 
1925             if (work_done < 0)
1926                 goto fail;
1927 
1928 /*             pa_log_debug("work_done = %i", work_done); */
1929 
1930             if (work_done) {
1931 
1932                 if (u->first) {
1933                     pa_log_info("Starting playback.");
1934                     snd_pcm_start(u->pcm_handle);
1935 
1936                     pa_smoother_resume(u->smoother, pa_rtclock_now(), true);
1937 
1938                     u->first = false;
1939                 }
1940 
1941                 update_smoother(u);
1942             }
1943 
1944             if (u->use_tsched) {
1945                 pa_usec_t cusec;
1946 
1947                 if (u->since_start <= u->hwbuf_size) {
1948 
1949                     /* USB devices on ALSA seem to hit a buffer
1950                      * underrun during the first iterations much
1951                      * quicker then we calculate here, probably due to
1952                      * the transport latency. To accommodate for that
1953                      * we artificially decrease the sleep time until
1954                      * we have filled the buffer at least once
1955                      * completely.*/
1956 
1957                     if (pa_log_ratelimit(PA_LOG_DEBUG))
1958                         pa_log_debug("Cutting sleep time for the initial iterations by half.");
1959                     sleep_usec /= 2;
1960                 }
1961 
1962                 /* OK, the playback buffer is now full, let's
1963                  * calculate when to wake up next */
1964 #ifdef DEBUG_TIMING
1965                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC);
1966 #endif
1967 
1968                 /* Convert from the sound card time domain to the
1969                  * system time domain */
1970                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1971 
1972 #ifdef DEBUG_TIMING
1973                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC);
1974 #endif
1975 
1976                 /* We don't trust the conversion, so we wake up whatever comes first */
1977                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1978             }
1979 
1980             u->after_rewind = false;
1981 
1982         }
1983 
1984         if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
1985             pa_usec_t volume_sleep;
1986             pa_sink_volume_change_apply(u->sink, &volume_sleep);
1987             if (volume_sleep > 0) {
1988                 if (rtpoll_sleep > 0)
1989                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1990                 else
1991                     rtpoll_sleep = volume_sleep;
1992             }
1993         }
1994 
1995         if (rtpoll_sleep > 0) {
1996             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1997             real_sleep = pa_rtclock_now();
1998         }
1999         else
2000             pa_rtpoll_set_timer_disabled(u->rtpoll);
2001 
2002         /* Hmm, nothing to do. Let's sleep */
2003         if ((ret = pa_rtpoll_run(u->rtpoll)) < 0)
2004             goto fail;
2005 
2006         if (rtpoll_sleep > 0) {
2007             real_sleep = pa_rtclock_now() - real_sleep;
2008 #ifdef DEBUG_TIMING
2009             pa_log_debug("Expected sleep: %0.2fms, real sleep: %0.2fms (diff %0.2f ms)",
2010                 (double) rtpoll_sleep / PA_USEC_PER_MSEC, (double) real_sleep / PA_USEC_PER_MSEC,
2011                 (double) ((int64_t) real_sleep - (int64_t) rtpoll_sleep) / PA_USEC_PER_MSEC);
2012 #endif
2013             if (u->use_tsched && real_sleep > rtpoll_sleep + u->tsched_watermark_usec)
2014                 pa_log_info("Scheduling delay of %0.2f ms > %0.2f ms, you might want to investigate this to improve latency...",
2015                     (double) (real_sleep - rtpoll_sleep) / PA_USEC_PER_MSEC,
2016                     (double) (u->tsched_watermark_usec) / PA_USEC_PER_MSEC);
2017         }
2018 
2019         if (u->sink->flags & PA_SINK_DEFERRED_VOLUME)
2020             pa_sink_volume_change_apply(u->sink, NULL);
2021 
2022         if (ret == 0)
2023             goto finish;
2024 
2025         /* Tell ALSA about this and process its response */
2026         if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) {
2027             struct pollfd *pollfd;
2028             int err;
2029             unsigned n;
2030 
2031             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
2032 
2033             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
2034                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
2035                 goto fail;
2036             }
2037 
2038             if (revents & ~POLLOUT) {
2039                 if ((err = pa_alsa_recover_from_poll(u->pcm_handle, revents)) < 0)
2040                     goto fail;
2041 
2042                 /* Stream needs to be restarted */
2043                 if (err == 1) {
2044                     close_pcm(u);
2045                     if (unsuspend(u, true) < 0)
2046                         goto fail;
2047                 } else
2048                     reset_vars(u);
2049 
2050                 revents = 0;
2051             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
2052                 pa_log_debug("Wakeup from ALSA!");
2053 
2054         } else
2055             revents = 0;
2056     }
2057 
2058 fail:
2059     /* If this was no regular exit from the loop we have to continue
2060      * processing messages until we received PA_MESSAGE_SHUTDOWN */
2061     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
2062     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
2063 
2064 finish:
2065     pa_log_debug("Thread shutting down");
2066 }
2067 
set_sink_name(pa_sink_new_data * data,pa_modargs * ma,const char * device_id,const char * device_name,pa_alsa_mapping * mapping)2068 static void set_sink_name(pa_sink_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
2069     const char *n;
2070     char *t;
2071 
2072     pa_assert(data);
2073     pa_assert(ma);
2074     pa_assert(device_name);
2075 
2076     if ((n = pa_modargs_get_value(ma, "sink_name", NULL))) {
2077         pa_sink_new_data_set_name(data, n);
2078         data->namereg_fail = true;
2079         return;
2080     }
2081 
2082     if ((n = pa_modargs_get_value(ma, "name", NULL)))
2083         data->namereg_fail = true;
2084     else {
2085         n = device_id ? device_id : device_name;
2086         data->namereg_fail = false;
2087     }
2088 
2089     if (mapping)
2090         t = pa_sprintf_malloc("alsa_output.%s.%s", n, mapping->name);
2091     else
2092         t = pa_sprintf_malloc("alsa_output.%s", n);
2093 
2094     pa_sink_new_data_set_name(data, t);
2095     pa_xfree(t);
2096 }
2097 
find_mixer(struct userdata * u,pa_alsa_mapping * mapping,const char * element,bool ignore_dB)2098 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, bool ignore_dB) {
2099     const char *mdev;
2100 
2101     if (!mapping && !element)
2102         return;
2103 
2104     if (!element && mapping && pa_alsa_path_set_is_empty(mapping->output_path_set))
2105         return;
2106 
2107     u->mixers = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func,
2108                                     NULL, (pa_free_cb_t) pa_alsa_mixer_free);
2109 
2110     mdev = pa_proplist_gets(mapping->proplist, "alsa.mixer_device");
2111     if (mdev) {
2112         u->mixer_handle = pa_alsa_open_mixer_by_name(u->mixers, mdev, true);
2113     } else {
2114         u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->mixers, u->pcm_handle, true);
2115     }
2116     if (!u->mixer_handle) {
2117         pa_log_info("Failed to find a working mixer device.");
2118         return;
2119     }
2120 
2121     if (element) {
2122 
2123         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_OUTPUT)))
2124             goto fail;
2125 
2126         if (pa_alsa_path_probe(u->mixer_path, NULL, u->mixer_handle, ignore_dB) < 0)
2127             goto fail;
2128 
2129         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
2130         pa_alsa_path_dump(u->mixer_path);
2131     } else {
2132         u->mixer_path_set = mapping->output_path_set;
2133     }
2134 
2135     return;
2136 
2137 fail:
2138 
2139     if (u->mixer_path) {
2140         pa_alsa_path_free(u->mixer_path);
2141         u->mixer_path = NULL;
2142     }
2143 
2144     u->mixer_handle = NULL;
2145     pa_hashmap_free(u->mixers);
2146     u->mixers = NULL;
2147 }
2148 
setup_mixer(struct userdata * u,bool ignore_dB)2149 static int setup_mixer(struct userdata *u, bool ignore_dB) {
2150     bool need_mixer_callback = false;
2151 
2152     pa_assert(u);
2153 
2154     /* This code is before the u->mixer_handle check, because if the UCM
2155      * configuration doesn't specify volume or mute controls, u->mixer_handle
2156      * will be NULL, but the UCM device enable sequence will still need to be
2157      * executed. */
2158     if (u->sink->active_port && u->ucm_context) {
2159         if (pa_alsa_ucm_set_port(u->ucm_context, u->sink->active_port, true) < 0)
2160             return -1;
2161     }
2162 
2163     if (!u->mixer_handle)
2164         return 0;
2165 
2166     if (u->sink->active_port) {
2167         if (!u->ucm_context) {
2168             pa_alsa_port_data *data;
2169 
2170             /* We have a list of supported paths, so let's activate the
2171              * one that has been chosen as active */
2172 
2173             data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2174             u->mixer_path = data->path;
2175 
2176             pa_alsa_path_select(data->path, data->setting, u->mixer_handle, u->sink->muted);
2177         } else {
2178             pa_alsa_ucm_port_data *data;
2179 
2180             data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2181 
2182             /* Now activate volume controls, if any */
2183             if (data->path) {
2184                 u->mixer_path = data->path;
2185                 pa_alsa_path_select(u->mixer_path, NULL, u->mixer_handle, u->sink->muted);
2186             }
2187         }
2188     } else {
2189 
2190         if (!u->mixer_path && u->mixer_path_set)
2191             u->mixer_path = pa_hashmap_first(u->mixer_path_set->paths);
2192 
2193         if (u->mixer_path) {
2194             /* Hmm, we have only a single path, then let's activate it */
2195 
2196             pa_alsa_path_select(u->mixer_path, u->mixer_path->settings, u->mixer_handle, u->sink->muted);
2197         } else
2198             return 0;
2199     }
2200 
2201     mixer_volume_init(u);
2202 
2203     /* Will we need to register callbacks? */
2204     if (u->mixer_path_set && u->mixer_path_set->paths) {
2205         pa_alsa_path *p;
2206         void *state;
2207 
2208         PA_HASHMAP_FOREACH(p, u->mixer_path_set->paths, state) {
2209             if (p->has_volume || p->has_mute)
2210                 need_mixer_callback = true;
2211         }
2212     }
2213     else if (u->mixer_path)
2214         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
2215 
2216     if (need_mixer_callback) {
2217         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
2218         if (u->sink->flags & PA_SINK_DEFERRED_VOLUME) {
2219             u->mixer_pd = pa_alsa_mixer_pdata_new();
2220             mixer_callback = io_mixer_callback;
2221 
2222             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
2223                 pa_log("Failed to initialize file descriptor monitoring");
2224                 return -1;
2225             }
2226         } else {
2227             u->mixer_fdl = pa_alsa_fdlist_new();
2228             mixer_callback = ctl_mixer_callback;
2229 
2230             if (pa_alsa_fdlist_set_handle(u->mixer_fdl, u->mixer_handle, NULL, u->core->mainloop) < 0) {
2231                 pa_log("Failed to initialize file descriptor monitoring");
2232                 return -1;
2233             }
2234         }
2235 
2236         if (u->mixer_path_set)
2237             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
2238         else
2239             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
2240     }
2241 
2242     return 0;
2243 }
2244 
pa_alsa_sink_new(pa_module * m,pa_modargs * ma,const char * driver,pa_card * card,pa_alsa_mapping * mapping)2245 pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
2246 
2247     struct userdata *u = NULL;
2248     const char *dev_id = NULL, *key, *mod_name;
2249     pa_sample_spec ss;
2250     char *thread_name = NULL;
2251     uint32_t alternate_sample_rate;
2252     pa_channel_map map;
2253     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark, rewind_safeguard;
2254     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
2255     size_t frame_size;
2256     bool use_mmap = true;
2257     bool use_tsched = true;
2258     bool ignore_dB = false;
2259     bool namereg_fail = false;
2260     bool deferred_volume = false;
2261     bool set_formats = false;
2262     bool fixed_latency_range = false;
2263     bool b;
2264     bool d;
2265     bool avoid_resampling;
2266     pa_sink_new_data data;
2267     bool volume_is_set;
2268     bool mute_is_set;
2269     pa_alsa_profile_set *profile_set = NULL;
2270     void *state;
2271 
2272     pa_assert(m);
2273     pa_assert(ma);
2274 
2275     ss = m->core->default_sample_spec;
2276     map = m->core->default_channel_map;
2277     avoid_resampling = m->core->avoid_resampling;
2278 
2279     /* Pick sample spec overrides from the mapping, if any */
2280     if (mapping) {
2281         if (mapping->sample_spec.format != PA_SAMPLE_INVALID)
2282             ss.format = mapping->sample_spec.format;
2283         if (mapping->sample_spec.rate != 0)
2284             ss.rate = mapping->sample_spec.rate;
2285         if (mapping->sample_spec.channels != 0) {
2286             ss.channels = mapping->sample_spec.channels;
2287             if (pa_channel_map_valid(&mapping->channel_map))
2288                 pa_assert(pa_channel_map_compatible(&mapping->channel_map, &ss));
2289         }
2290     }
2291 
2292     /* Override with modargs if provided */
2293     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
2294         pa_log("Failed to parse sample specification and channel map");
2295         goto fail;
2296     }
2297 
2298     alternate_sample_rate = m->core->alternate_sample_rate;
2299     if (pa_modargs_get_alternate_sample_rate(ma, &alternate_sample_rate) < 0) {
2300         pa_log("Failed to parse alternate sample rate");
2301         goto fail;
2302     }
2303 
2304     frame_size = pa_frame_size(&ss);
2305 
2306     nfrags = m->core->default_n_fragments;
2307     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
2308     if (frag_size <= 0)
2309         frag_size = (uint32_t) frame_size;
2310     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
2311     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
2312 
2313     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
2314         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
2315         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
2316         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
2317         pa_log("Failed to parse buffer metrics");
2318         goto fail;
2319     }
2320 
2321     buffer_size = nfrags * frag_size;
2322 
2323     period_frames = frag_size/frame_size;
2324     buffer_frames = buffer_size/frame_size;
2325     tsched_frames = tsched_size/frame_size;
2326 
2327     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
2328         pa_log("Failed to parse mmap argument.");
2329         goto fail;
2330     }
2331 
2332     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
2333         pa_log("Failed to parse tsched argument.");
2334         goto fail;
2335     }
2336 
2337     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
2338         pa_log("Failed to parse ignore_dB argument.");
2339         goto fail;
2340     }
2341 
2342     rewind_safeguard = PA_MAX(DEFAULT_REWIND_SAFEGUARD_BYTES, pa_usec_to_bytes(DEFAULT_REWIND_SAFEGUARD_USEC, &ss));
2343     if (pa_modargs_get_value_u32(ma, "rewind_safeguard", &rewind_safeguard) < 0) {
2344         pa_log("Failed to parse rewind_safeguard argument");
2345         goto fail;
2346     }
2347 
2348     deferred_volume = m->core->deferred_volume;
2349     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
2350         pa_log("Failed to parse deferred_volume argument.");
2351         goto fail;
2352     }
2353 
2354     if (pa_modargs_get_value_boolean(ma, "fixed_latency_range", &fixed_latency_range) < 0) {
2355         pa_log("Failed to parse fixed_latency_range argument.");
2356         goto fail;
2357     }
2358 
2359     use_tsched = pa_alsa_may_tsched(use_tsched);
2360 
2361     u = pa_xnew0(struct userdata, 1);
2362     u->core = m->core;
2363     u->module = m;
2364     u->use_mmap = use_mmap;
2365     u->use_tsched = use_tsched;
2366     u->tsched_size = tsched_size;
2367     u->initial_info.nfrags = (size_t) nfrags;
2368     u->initial_info.fragment_size = (size_t) frag_size;
2369     u->initial_info.tsched_size = (size_t) tsched_size;
2370     u->initial_info.tsched_watermark = (size_t) tsched_watermark;
2371     u->initial_info.rewind_safeguard = (size_t) rewind_safeguard;
2372     u->deferred_volume = deferred_volume;
2373     u->fixed_latency_range = fixed_latency_range;
2374     u->first = true;
2375     u->rewind_safeguard = rewind_safeguard;
2376     u->rtpoll = pa_rtpoll_new();
2377 
2378     if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) {
2379         pa_log("pa_thread_mq_init() failed.");
2380         goto fail;
2381     }
2382 
2383     u->smoother = pa_smoother_new(
2384             SMOOTHER_ADJUST_USEC,
2385             SMOOTHER_WINDOW_USEC,
2386             true,
2387             true,
2388             5,
2389             pa_rtclock_now(),
2390             true);
2391     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
2392 
2393     /* use ucm */
2394     if (mapping && mapping->ucm_context.ucm)
2395         u->ucm_context = &mapping->ucm_context;
2396 
2397     dev_id = pa_modargs_get_value(
2398             ma, "device_id",
2399             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
2400 
2401     u->paths_dir = pa_xstrdup(pa_modargs_get_value(ma, "paths_dir", NULL));
2402 
2403     if (reserve_init(u, dev_id) < 0)
2404         goto fail;
2405 
2406     if (reserve_monitor_init(u, dev_id) < 0)
2407         goto fail;
2408 
2409     b = use_mmap;
2410     d = use_tsched;
2411 
2412     /* Force ALSA to reread its configuration if module-alsa-card didn't
2413      * do it for us. This matters if our device was hot-plugged after ALSA
2414      * has already read its configuration - see
2415      * https://bugs.freedesktop.org/show_bug.cgi?id=54029
2416      */
2417 
2418     if (!card)
2419         snd_config_update_free_global();
2420 
2421     if (mapping) {
2422 
2423         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2424             pa_log("device_id= not set");
2425             goto fail;
2426         }
2427 
2428         if ((mod_name = pa_proplist_gets(mapping->proplist, PA_ALSA_PROP_UCM_MODIFIER))) {
2429             if (snd_use_case_set(u->ucm_context->ucm->ucm_mgr, "_enamod", mod_name) < 0)
2430                 pa_log("Failed to enable ucm modifier %s", mod_name);
2431             else
2432                 pa_log_debug("Enabled ucm modifier %s", mod_name);
2433         }
2434 
2435         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
2436                       dev_id,
2437                       &u->device_name,
2438                       &ss, &map,
2439                       SND_PCM_STREAM_PLAYBACK,
2440                       &period_frames, &buffer_frames, tsched_frames,
2441                       &b, &d, mapping)))
2442             goto fail;
2443 
2444     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
2445 
2446         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
2447             goto fail;
2448 
2449         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
2450                       dev_id,
2451                       &u->device_name,
2452                       &ss, &map,
2453                       SND_PCM_STREAM_PLAYBACK,
2454                       &period_frames, &buffer_frames, tsched_frames,
2455                       &b, &d, profile_set, &mapping)))
2456             goto fail;
2457 
2458     } else {
2459 
2460         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
2461                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
2462                       &u->device_name,
2463                       &ss, &map,
2464                       SND_PCM_STREAM_PLAYBACK,
2465                       &period_frames, &buffer_frames, tsched_frames,
2466                       &b, &d, false)))
2467             goto fail;
2468     }
2469 
2470     pa_assert(u->device_name);
2471     pa_log_info("Successfully opened device %s.", u->device_name);
2472 
2473     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
2474         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
2475         goto fail;
2476     }
2477 
2478     if (mapping)
2479         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
2480 
2481     if (use_mmap && !b) {
2482         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
2483         u->use_mmap = use_mmap = false;
2484     }
2485 
2486     if (use_tsched && (!b || !d)) {
2487         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
2488         u->use_tsched = use_tsched = false;
2489     }
2490 
2491     if (u->use_mmap)
2492         pa_log_info("Successfully enabled mmap() mode.");
2493 
2494     if (u->use_tsched) {
2495         pa_log_info("Successfully enabled timer-based scheduling mode.");
2496 
2497         if (u->fixed_latency_range)
2498             pa_log_info("Disabling latency range changes on underrun");
2499     }
2500 
2501     /* All passthrough formats supported by PulseAudio require
2502      * IEC61937 framing with two fake channels. So, passthrough
2503      * clients will always send two channels. Multichannel sinks
2504      * cannot accept that, because nobody implemented sink channel count
2505      * switching so far. So just don't show known non-working settings
2506      * to the user. */
2507     if ((is_iec958(u) || is_hdmi(u)) && ss.channels == 2)
2508         set_formats = true;
2509 
2510     u->verified_sample_spec = ss;
2511 
2512     u->supported_formats = pa_alsa_get_supported_formats(u->pcm_handle, ss.format);
2513     if (!u->supported_formats) {
2514         pa_log_error("Failed to find any supported sample formats.");
2515         goto fail;
2516     }
2517 
2518     u->supported_rates = pa_alsa_get_supported_rates(u->pcm_handle, ss.rate);
2519     if (!u->supported_rates) {
2520         pa_log_error("Failed to find any supported sample rates.");
2521         goto fail;
2522     }
2523 
2524     /* ALSA might tweak the sample spec, so recalculate the frame size */
2525     frame_size = pa_frame_size(&ss);
2526 
2527     pa_sink_new_data_init(&data);
2528     data.driver = driver;
2529     data.module = m;
2530     data.card = card;
2531     set_sink_name(&data, ma, dev_id, u->device_name, mapping);
2532 
2533     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
2534      * variable instead of using &data.namereg_fail directly, because
2535      * data.namereg_fail is a bitfield and taking the address of a bitfield
2536      * variable is impossible. */
2537     namereg_fail = data.namereg_fail;
2538     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
2539         pa_log("Failed to parse namereg_fail argument.");
2540         pa_sink_new_data_done(&data);
2541         goto fail;
2542     }
2543     data.namereg_fail = namereg_fail;
2544 
2545     if (pa_modargs_get_value_boolean(ma, "avoid_resampling", &avoid_resampling) < 0) {
2546         pa_log("Failed to parse avoid_resampling argument.");
2547         pa_sink_new_data_done(&data);
2548         goto fail;
2549     }
2550     pa_sink_new_data_set_avoid_resampling(&data, avoid_resampling);
2551 
2552     pa_sink_new_data_set_sample_spec(&data, &ss);
2553     pa_sink_new_data_set_channel_map(&data, &map);
2554     pa_sink_new_data_set_alternate_sample_rate(&data, alternate_sample_rate);
2555 
2556     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
2557     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
2558     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
2559     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
2560     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
2561 
2562     if (mapping) {
2563         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
2564         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
2565 
2566         state = NULL;
2567         while ((key = pa_proplist_iterate(mapping->proplist, &state)))
2568             pa_proplist_sets(data.proplist, key, pa_proplist_gets(mapping->proplist, key));
2569     }
2570 
2571     pa_alsa_init_description(data.proplist, card);
2572 
2573     if (u->control_device)
2574         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
2575 
2576     if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
2577         pa_log("Invalid properties");
2578         pa_sink_new_data_done(&data);
2579         goto fail;
2580     }
2581 
2582     if (u->ucm_context) {
2583         pa_alsa_ucm_add_ports(&data.ports, data.proplist, u->ucm_context, true, card, u->pcm_handle, ignore_dB);
2584         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2585     } else {
2586         find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
2587         if (u->mixer_path_set)
2588             pa_alsa_add_ports(&data, u->mixer_path_set, card);
2589     }
2590 
2591     u->sink = pa_sink_new(m->core, &data, PA_SINK_HARDWARE | PA_SINK_LATENCY | (u->use_tsched ? PA_SINK_DYNAMIC_LATENCY : 0) |
2592                           (set_formats ? PA_SINK_SET_FORMATS : 0));
2593     volume_is_set = data.volume_is_set;
2594     mute_is_set = data.muted_is_set;
2595     pa_sink_new_data_done(&data);
2596 
2597     if (!u->sink) {
2598         pa_log("Failed to create sink object");
2599         goto fail;
2600     }
2601 
2602     if (u->ucm_context) {
2603         pa_device_port *port;
2604         unsigned h_prio = 0;
2605         PA_HASHMAP_FOREACH(port, u->sink->ports, state) {
2606             if (!h_prio || port->priority > h_prio)
2607                 h_prio = port->priority;
2608         }
2609         /* ucm ports prioriy is 100, 200, ..., 900, change it to units digit */
2610         h_prio = h_prio / 100;
2611         u->sink->priority += h_prio;
2612     }
2613 
2614     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
2615                                  &u->sink->thread_info.volume_change_safety_margin) < 0) {
2616         pa_log("Failed to parse deferred_volume_safety_margin parameter");
2617         goto fail;
2618     }
2619 
2620     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
2621                                  &u->sink->thread_info.volume_change_extra_delay) < 0) {
2622         pa_log("Failed to parse deferred_volume_extra_delay parameter");
2623         goto fail;
2624     }
2625 
2626     u->sink->parent.process_msg = sink_process_msg;
2627     if (u->use_tsched)
2628         u->sink->update_requested_latency = sink_update_requested_latency_cb;
2629     u->sink->set_state_in_main_thread = sink_set_state_in_main_thread_cb;
2630     u->sink->set_state_in_io_thread = sink_set_state_in_io_thread_cb;
2631     if (u->ucm_context)
2632         u->sink->set_port = sink_set_port_ucm_cb;
2633     else
2634         u->sink->set_port = sink_set_port_cb;
2635     u->sink->reconfigure = sink_reconfigure_cb;
2636     u->sink->userdata = u;
2637 
2638     pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
2639     pa_sink_set_rtpoll(u->sink, u->rtpoll);
2640 
2641     u->frame_size = frame_size;
2642     u->frames_per_block = pa_mempool_block_size_max(m->core->mempool) / frame_size;
2643     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
2644     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
2645     pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels);
2646 
2647     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
2648                 (double) u->hwbuf_size / (double) u->fragment_size,
2649                 (long unsigned) u->fragment_size,
2650                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
2651                 (long unsigned) u->hwbuf_size,
2652                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
2653 
2654     pa_sink_set_max_request(u->sink, u->hwbuf_size);
2655     if (pa_alsa_pcm_is_hw(u->pcm_handle))
2656         pa_sink_set_max_rewind(u->sink, u->hwbuf_size);
2657     else {
2658         pa_log_info("Disabling rewind for device %s", u->device_name);
2659         pa_sink_set_max_rewind(u->sink, 0);
2660     }
2661 
2662     if (u->use_tsched) {
2663         u->tsched_watermark_ref = tsched_watermark;
2664         reset_watermark(u, u->tsched_watermark_ref, &ss, false);
2665     } else
2666         pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss));
2667 
2668     reserve_update(u);
2669 
2670     if (update_sw_params(u, false) < 0)
2671         goto fail;
2672 
2673     if (setup_mixer(u, ignore_dB) < 0)
2674         goto fail;
2675 
2676     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
2677 
2678     thread_name = pa_sprintf_malloc("alsa-sink-%s", pa_strnull(pa_proplist_gets(u->sink->proplist, "alsa.id")));
2679     if (!(u->thread = pa_thread_new(thread_name, thread_func, u))) {
2680         pa_log("Failed to create thread.");
2681         goto fail;
2682     }
2683     pa_xfree(thread_name);
2684     thread_name = NULL;
2685 
2686     /* Get initial mixer settings */
2687     if (volume_is_set) {
2688         if (u->sink->set_volume)
2689             u->sink->set_volume(u->sink);
2690     } else {
2691         if (u->sink->get_volume)
2692             u->sink->get_volume(u->sink);
2693     }
2694 
2695     if (mute_is_set) {
2696         if (u->sink->set_mute)
2697             u->sink->set_mute(u->sink);
2698     } else {
2699         if (u->sink->get_mute) {
2700             bool mute;
2701 
2702             if (u->sink->get_mute(u->sink, &mute) >= 0)
2703                 pa_sink_set_mute(u->sink, mute, false);
2704         }
2705     }
2706 
2707     if ((volume_is_set || mute_is_set) && u->sink->write_volume)
2708         u->sink->write_volume(u->sink);
2709 
2710     if (set_formats) {
2711         /* For S/PDIF and HDMI, allow getting/setting custom formats */
2712         pa_format_info *format;
2713 
2714         /* To start with, we only support PCM formats. Other formats may be added
2715          * with pa_sink_set_formats().*/
2716         format = pa_format_info_new();
2717         format->encoding = PA_ENCODING_PCM;
2718         u->formats = pa_idxset_new(NULL, NULL);
2719         pa_idxset_put(u->formats, format, NULL);
2720 
2721         u->sink->get_formats = sink_get_formats;
2722         u->sink->set_formats = sink_set_formats;
2723     }
2724 
2725     pa_sink_put(u->sink);
2726 
2727     if (profile_set)
2728         pa_alsa_profile_set_free(profile_set);
2729 
2730     /* Suspend if necessary. FIXME: It would be better to start suspended, but
2731      * that would require some core changes. It's possible to set
2732      * pa_sink_new_data.suspend_cause, but that has to be done before the
2733      * pa_sink_new() call, and we know if we need to suspend only after the
2734      * pa_sink_new() call when the initial port has been chosen. Calling
2735      * pa_sink_suspend() between pa_sink_new() and pa_sink_put() would
2736      * otherwise work, but currently pa_sink_suspend() will crash if
2737      * pa_sink_put() hasn't been called. */
2738     if (u->sink->active_port && !u->ucm_context) {
2739         pa_alsa_port_data *port_data;
2740 
2741         port_data = PA_DEVICE_PORT_DATA(u->sink->active_port);
2742 
2743         if (port_data->suspend_when_unavailable && u->sink->active_port->available == PA_AVAILABLE_NO)
2744             pa_sink_suspend(u->sink, true, PA_SUSPEND_UNAVAILABLE);
2745     }
2746 
2747     return u->sink;
2748 
2749 fail:
2750     pa_xfree(thread_name);
2751 
2752     if (u)
2753         userdata_free(u);
2754 
2755     if (profile_set)
2756         pa_alsa_profile_set_free(profile_set);
2757 
2758     return NULL;
2759 }
2760 
userdata_free(struct userdata * u)2761 static void userdata_free(struct userdata *u) {
2762     pa_assert(u);
2763 
2764     if (u->sink)
2765         pa_sink_unlink(u->sink);
2766 
2767     if (u->thread) {
2768         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2769         pa_thread_free(u->thread);
2770     }
2771 
2772     pa_thread_mq_done(&u->thread_mq);
2773 
2774     if (u->sink)
2775         pa_sink_unref(u->sink);
2776 
2777     if (u->memchunk.memblock)
2778         pa_memblock_unref(u->memchunk.memblock);
2779 
2780     if (u->mixer_pd)
2781         pa_alsa_mixer_pdata_free(u->mixer_pd);
2782 
2783     if (u->alsa_rtpoll_item)
2784         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2785 
2786     if (u->rtpoll)
2787         pa_rtpoll_free(u->rtpoll);
2788 
2789     if (u->pcm_handle) {
2790         snd_pcm_drop(u->pcm_handle);
2791         snd_pcm_close(u->pcm_handle);
2792     }
2793 
2794     if (u->mixer_fdl)
2795         pa_alsa_fdlist_free(u->mixer_fdl);
2796 
2797     /* Only free the mixer_path if the sink owns it */
2798     if (u->mixer_path && !u->mixer_path_set && !u->ucm_context)
2799         pa_alsa_path_free(u->mixer_path);
2800 
2801     if (u->mixers)
2802         pa_hashmap_free(u->mixers);
2803 
2804     if (u->smoother)
2805         pa_smoother_free(u->smoother);
2806 
2807     if (u->formats)
2808         pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free);
2809 
2810     if (u->supported_formats)
2811         pa_xfree(u->supported_formats);
2812 
2813     if (u->supported_rates)
2814         pa_xfree(u->supported_rates);
2815 
2816     reserve_done(u);
2817     monitor_done(u);
2818 
2819     pa_xfree(u->device_name);
2820     pa_xfree(u->control_device);
2821     pa_xfree(u->paths_dir);
2822     pa_xfree(u);
2823 }
2824 
pa_alsa_sink_free(pa_sink * s)2825 void pa_alsa_sink_free(pa_sink *s) {
2826     struct userdata *u;
2827 
2828     pa_sink_assert_ref(s);
2829     pa_assert_se(u = s->userdata);
2830 
2831     userdata_free(u);
2832 }
2833