1 /*
2 * Pulseaudio input
3 * Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
4 * Copyright 2004-2006 Lennart Poettering
5 * Copyright (c) 2014 Michael Niedermayer <michaelni@gmx.at>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #include <pulse/rtclock.h>
25 #include <pulse/error.h>
26
27 #include "libavutil/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/time.h"
30
31 #include "libavformat/avformat.h"
32 #include "libavformat/internal.h"
33 #include "pulse_audio_common.h"
34 #include "timefilter.h"
35
36 #define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
37
38 typedef struct PulseData {
39 AVClass *class;
40 char *server;
41 char *name;
42 char *stream_name;
43 int sample_rate;
44 int channels;
45 int frame_size;
46 int fragment_size;
47
48 pa_threaded_mainloop *mainloop;
49 pa_context *context;
50 pa_stream *stream;
51 size_t pa_frame_size;
52
53 TimeFilter *timefilter;
54 int last_period;
55 int wallclock;
56 } PulseData;
57
58
59 #define CHECK_SUCCESS_GOTO(rerror, expression, label) \
60 do { \
61 if (!(expression)) { \
62 rerror = AVERROR_EXTERNAL; \
63 goto label; \
64 } \
65 } while (0)
66
67 #define CHECK_DEAD_GOTO(p, rerror, label) \
68 do { \
69 if (!(p)->context || !PA_CONTEXT_IS_GOOD(pa_context_get_state((p)->context)) || \
70 !(p)->stream || !PA_STREAM_IS_GOOD(pa_stream_get_state((p)->stream))) { \
71 rerror = AVERROR_EXTERNAL; \
72 goto label; \
73 } \
74 } while (0)
75
context_state_cb(pa_context * c,void * userdata)76 static void context_state_cb(pa_context *c, void *userdata) {
77 PulseData *p = userdata;
78
79 switch (pa_context_get_state(c)) {
80 case PA_CONTEXT_READY:
81 case PA_CONTEXT_TERMINATED:
82 case PA_CONTEXT_FAILED:
83 pa_threaded_mainloop_signal(p->mainloop, 0);
84 break;
85 }
86 }
87
stream_state_cb(pa_stream * s,void * userdata)88 static void stream_state_cb(pa_stream *s, void * userdata) {
89 PulseData *p = userdata;
90
91 switch (pa_stream_get_state(s)) {
92 case PA_STREAM_READY:
93 case PA_STREAM_FAILED:
94 case PA_STREAM_TERMINATED:
95 pa_threaded_mainloop_signal(p->mainloop, 0);
96 break;
97 }
98 }
99
stream_request_cb(pa_stream * s,size_t length,void * userdata)100 static void stream_request_cb(pa_stream *s, size_t length, void *userdata) {
101 PulseData *p = userdata;
102
103 pa_threaded_mainloop_signal(p->mainloop, 0);
104 }
105
stream_latency_update_cb(pa_stream * s,void * userdata)106 static void stream_latency_update_cb(pa_stream *s, void *userdata) {
107 PulseData *p = userdata;
108
109 pa_threaded_mainloop_signal(p->mainloop, 0);
110 }
111
pulse_close(AVFormatContext * s)112 static av_cold int pulse_close(AVFormatContext *s)
113 {
114 PulseData *pd = s->priv_data;
115
116 if (pd->mainloop)
117 pa_threaded_mainloop_stop(pd->mainloop);
118
119 if (pd->stream)
120 pa_stream_unref(pd->stream);
121 pd->stream = NULL;
122
123 if (pd->context) {
124 pa_context_disconnect(pd->context);
125 pa_context_unref(pd->context);
126 }
127 pd->context = NULL;
128
129 if (pd->mainloop)
130 pa_threaded_mainloop_free(pd->mainloop);
131 pd->mainloop = NULL;
132
133 ff_timefilter_destroy(pd->timefilter);
134 pd->timefilter = NULL;
135
136 return 0;
137 }
138
pulse_read_header(AVFormatContext * s)139 static av_cold int pulse_read_header(AVFormatContext *s)
140 {
141 PulseData *pd = s->priv_data;
142 AVStream *st;
143 char *device = NULL;
144 int ret;
145 enum AVCodecID codec_id =
146 s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
147 const pa_sample_spec ss = { ff_codec_id_to_pulse_format(codec_id),
148 pd->sample_rate,
149 pd->channels };
150
151 pa_buffer_attr attr = { -1 };
152 pa_channel_map cmap;
153 const pa_buffer_attr *queried_attr;
154
155 pa_channel_map_init_extend(&cmap, pd->channels, PA_CHANNEL_MAP_WAVEEX);
156
157 st = avformat_new_stream(s, NULL);
158
159 if (!st) {
160 av_log(s, AV_LOG_ERROR, "Cannot add stream\n");
161 return AVERROR(ENOMEM);
162 }
163
164 attr.fragsize = pd->fragment_size;
165
166 if (s->url[0] != '\0' && strcmp(s->url, "default"))
167 device = s->url;
168
169 if (!(pd->mainloop = pa_threaded_mainloop_new())) {
170 pulse_close(s);
171 return AVERROR_EXTERNAL;
172 }
173
174 if (!(pd->context = pa_context_new(pa_threaded_mainloop_get_api(pd->mainloop), pd->name))) {
175 pulse_close(s);
176 return AVERROR_EXTERNAL;
177 }
178
179 pa_context_set_state_callback(pd->context, context_state_cb, pd);
180
181 if (pa_context_connect(pd->context, pd->server, 0, NULL) < 0) {
182 pulse_close(s);
183 return AVERROR(pa_context_errno(pd->context));
184 }
185
186 pa_threaded_mainloop_lock(pd->mainloop);
187
188 if (pa_threaded_mainloop_start(pd->mainloop) < 0) {
189 ret = -1;
190 goto unlock_and_fail;
191 }
192
193 for (;;) {
194 pa_context_state_t state;
195
196 state = pa_context_get_state(pd->context);
197
198 if (state == PA_CONTEXT_READY)
199 break;
200
201 if (!PA_CONTEXT_IS_GOOD(state)) {
202 ret = AVERROR(pa_context_errno(pd->context));
203 goto unlock_and_fail;
204 }
205
206 /* Wait until the context is ready */
207 pa_threaded_mainloop_wait(pd->mainloop);
208 }
209
210 if (!(pd->stream = pa_stream_new(pd->context, pd->stream_name, &ss, &cmap))) {
211 ret = AVERROR(pa_context_errno(pd->context));
212 goto unlock_and_fail;
213 }
214
215 pa_stream_set_state_callback(pd->stream, stream_state_cb, pd);
216 pa_stream_set_read_callback(pd->stream, stream_request_cb, pd);
217 pa_stream_set_write_callback(pd->stream, stream_request_cb, pd);
218 pa_stream_set_latency_update_callback(pd->stream, stream_latency_update_cb, pd);
219
220 ret = pa_stream_connect_record(pd->stream, device, &attr,
221 PA_STREAM_INTERPOLATE_TIMING
222 | (pd->fragment_size == -1 ? PA_STREAM_ADJUST_LATENCY : 0)
223 |PA_STREAM_AUTO_TIMING_UPDATE);
224
225 if (ret < 0) {
226 ret = AVERROR(pa_context_errno(pd->context));
227 goto unlock_and_fail;
228 }
229
230 for (;;) {
231 pa_stream_state_t state;
232
233 state = pa_stream_get_state(pd->stream);
234
235 if (state == PA_STREAM_READY)
236 break;
237
238 if (!PA_STREAM_IS_GOOD(state)) {
239 ret = AVERROR(pa_context_errno(pd->context));
240 goto unlock_and_fail;
241 }
242
243 /* Wait until the stream is ready */
244 pa_threaded_mainloop_wait(pd->mainloop);
245 }
246
247 /* Query actual fragment size */
248 queried_attr = pa_stream_get_buffer_attr(pd->stream);
249 if (!queried_attr || queried_attr->fragsize > INT_MAX/100) {
250 ret = AVERROR_EXTERNAL;
251 goto unlock_and_fail;
252 }
253 pd->fragment_size = queried_attr->fragsize;
254 pd->pa_frame_size = pa_frame_size(&ss);
255
256 pa_threaded_mainloop_unlock(pd->mainloop);
257
258 /* take real parameters */
259 st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
260 st->codecpar->codec_id = codec_id;
261 st->codecpar->sample_rate = pd->sample_rate;
262 st->codecpar->channels = pd->channels;
263 avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
264
265 pd->timefilter = ff_timefilter_new(1000000.0 / pd->sample_rate,
266 pd->fragment_size / pd->pa_frame_size, 1.5E-6);
267
268 if (!pd->timefilter) {
269 pulse_close(s);
270 return AVERROR(ENOMEM);
271 }
272
273 return 0;
274
275 unlock_and_fail:
276 pa_threaded_mainloop_unlock(pd->mainloop);
277
278 pulse_close(s);
279 return ret;
280 }
281
pulse_read_packet(AVFormatContext * s,AVPacket * pkt)282 static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
283 {
284 PulseData *pd = s->priv_data;
285 int ret;
286 size_t read_length;
287 const void *read_data = NULL;
288 int64_t dts;
289 pa_usec_t latency;
290 int negative;
291 ptrdiff_t pos = 0;
292
293 pa_threaded_mainloop_lock(pd->mainloop);
294
295 CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
296
297 while (pos < pd->fragment_size) {
298 int r;
299
300 r = pa_stream_peek(pd->stream, &read_data, &read_length);
301 CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
302
303 if (read_length <= 0) {
304 pa_threaded_mainloop_wait(pd->mainloop);
305 CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
306 } else if (!read_data) {
307 /* There's a hole in the stream, skip it. We could generate
308 * silence, but that wouldn't work for compressed streams. */
309 r = pa_stream_drop(pd->stream);
310 CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
311 } else {
312 if (!pos) {
313 if (av_new_packet(pkt, pd->fragment_size) < 0) {
314 ret = AVERROR(ENOMEM);
315 goto unlock_and_fail;
316 }
317
318 dts = av_gettime();
319 pa_operation_unref(pa_stream_update_timing_info(pd->stream, NULL, NULL));
320
321 if (pa_stream_get_latency(pd->stream, &latency, &negative) >= 0) {
322 if (negative) {
323 dts += latency;
324 } else
325 dts -= latency;
326 } else {
327 av_log(s, AV_LOG_WARNING, "pa_stream_get_latency() failed\n");
328 }
329 }
330 if (pkt->size - pos < read_length) {
331 if (pos)
332 break;
333 pa_stream_drop(pd->stream);
334 /* Oversized fragment??? */
335 ret = AVERROR_EXTERNAL;
336 goto unlock_and_fail;
337 }
338 memcpy(pkt->data + pos, read_data, read_length);
339 pos += read_length;
340 pa_stream_drop(pd->stream);
341 }
342 }
343
344 pa_threaded_mainloop_unlock(pd->mainloop);
345
346 av_shrink_packet(pkt, pos);
347
348 if (pd->wallclock)
349 pkt->pts = ff_timefilter_update(pd->timefilter, dts, pd->last_period);
350 pd->last_period = pkt->size / pd->pa_frame_size;
351
352 return 0;
353
354 unlock_and_fail:
355 av_packet_unref(pkt);
356 pa_threaded_mainloop_unlock(pd->mainloop);
357 return ret;
358 }
359
pulse_get_device_list(AVFormatContext * h,AVDeviceInfoList * device_list)360 static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
361 {
362 PulseData *s = h->priv_data;
363 return ff_pulse_audio_get_devices(device_list, s->server, 0);
364 }
365
366 #define OFFSET(a) offsetof(PulseData, a)
367 #define D AV_OPT_FLAG_DECODING_PARAM
368
369 static const AVOption options[] = {
370 { "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
371 { "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, D },
372 { "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
373 { "sample_rate", "set sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
374 { "channels", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
375 { "frame_size", "set number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
376 { "fragment_size", "set buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
377 { "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
378 { NULL },
379 };
380
381 static const AVClass pulse_demuxer_class = {
382 .class_name = "Pulse indev",
383 .item_name = av_default_item_name,
384 .option = options,
385 .version = LIBAVUTIL_VERSION_INT,
386 .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
387 };
388
389 AVInputFormat ff_pulse_demuxer = {
390 .name = "pulse",
391 .long_name = NULL_IF_CONFIG_SMALL("Pulse audio input"),
392 .priv_data_size = sizeof(PulseData),
393 .read_header = pulse_read_header,
394 .read_packet = pulse_read_packet,
395 .read_close = pulse_close,
396 .get_device_list = pulse_get_device_list,
397 .flags = AVFMT_NOFILE,
398 .priv_class = &pulse_demuxer_class,
399 };
400