1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // xfer-libasound-irq-rw.c - IRQ-based scheduling model for read/write operation.
4 //
5 // Copyright (c) 2018 Takashi Sakamoto <o-takashi@sakamocchi.jp>
6 //
7 // Licensed under the terms of the GNU General Public License, version 2.
8
9 #include "xfer-libasound.h"
10 #include "misc.h"
11 #include "frame-cache.h"
12
13 struct rw_closure {
14 snd_pcm_access_t access;
15 int (*process_frames)(struct libasound_state *state,
16 snd_pcm_state_t status, unsigned int *frame_count,
17 struct mapper_context *mapper,
18 struct container_context *cntrs);
19 struct frame_cache cache;
20 };
21
wait_for_avail(struct libasound_state * state)22 static int wait_for_avail(struct libasound_state *state)
23 {
24 unsigned int msec_per_buffer;
25 unsigned short revents;
26 unsigned short event;
27 int err;
28
29 // Wait during msec equivalent to all audio data frames in buffer
30 // instead of period, for safe.
31 err = snd_pcm_hw_params_get_buffer_time(state->hw_params,
32 &msec_per_buffer, NULL);
33 if (err < 0)
34 return err;
35 msec_per_buffer /= 1000;
36
37 // Wait for hardware IRQ when no available space.
38 err = xfer_libasound_wait_event(state, msec_per_buffer, &revents);
39 if (err < 0)
40 return err;
41
42 // TODO: error reporting.
43 if (revents & POLLERR)
44 return -EIO;
45
46 if (snd_pcm_stream(state->handle) == SND_PCM_STREAM_CAPTURE)
47 event = POLLIN;
48 else
49 event = POLLOUT;
50
51 if (!(revents & event))
52 return -EAGAIN;
53
54 return 0;
55 }
56
read_frames(struct libasound_state * state,unsigned int * frame_count,unsigned int avail_count,struct mapper_context * mapper,struct container_context * cntrs)57 static int read_frames(struct libasound_state *state, unsigned int *frame_count,
58 unsigned int avail_count, struct mapper_context *mapper,
59 struct container_context *cntrs)
60 {
61 struct rw_closure *closure = state->private_data;
62 snd_pcm_sframes_t handled_frame_count;
63 unsigned int consumed_count;
64 int err;
65
66 // Trim according up to expected frame count.
67 if (*frame_count < avail_count)
68 avail_count = *frame_count;
69
70 // Cache required amount of frames.
71 if (avail_count > frame_cache_get_count(&closure->cache)) {
72 avail_count -= frame_cache_get_count(&closure->cache);
73
74 // Execute write operation according to the shape of buffer.
75 // These operations automatically start the substream.
76 if (closure->access == SND_PCM_ACCESS_RW_INTERLEAVED) {
77 handled_frame_count = snd_pcm_readi(state->handle,
78 closure->cache.buf_ptr,
79 avail_count);
80 } else {
81 handled_frame_count = snd_pcm_readn(state->handle,
82 closure->cache.buf_ptr,
83 avail_count);
84 }
85 if (handled_frame_count < 0) {
86 err = handled_frame_count;
87 return err;
88 }
89 frame_cache_increase_count(&closure->cache, handled_frame_count);
90 avail_count = frame_cache_get_count(&closure->cache);
91 }
92
93 // Write out to file descriptors.
94 consumed_count = avail_count;
95 err = mapper_context_process_frames(mapper, closure->cache.buf,
96 &consumed_count, cntrs);
97 if (err < 0)
98 return err;
99
100 frame_cache_reduce(&closure->cache, consumed_count);
101
102 *frame_count = consumed_count;
103
104 return 0;
105 }
106
r_process_frames_blocking(struct libasound_state * state,snd_pcm_state_t status,unsigned int * frame_count,struct mapper_context * mapper,struct container_context * cntrs)107 static int r_process_frames_blocking(struct libasound_state *state,
108 snd_pcm_state_t status,
109 unsigned int *frame_count,
110 struct mapper_context *mapper,
111 struct container_context *cntrs)
112 {
113 snd_pcm_sframes_t avail;
114 snd_pcm_uframes_t avail_count;
115 int err = 0;
116
117 if (status == SND_PCM_STATE_RUNNING) {
118 // Check available space on the buffer.
119 avail = snd_pcm_avail(state->handle);
120 if (avail < 0) {
121 err = avail;
122 goto error;
123 }
124 avail_count = (snd_pcm_uframes_t)avail;
125
126 if (avail_count == 0) {
127 // Request data frames so that blocking is just
128 // released.
129 err = snd_pcm_sw_params_get_avail_min(state->sw_params,
130 &avail_count);
131 if (err < 0)
132 goto error;
133 }
134 } else {
135 // Request data frames so that the PCM substream starts.
136 snd_pcm_uframes_t frame_count;
137 err = snd_pcm_sw_params_get_start_threshold(state->sw_params,
138 &frame_count);
139 if (err < 0)
140 goto error;
141
142 avail_count = (unsigned int)frame_count;
143 }
144
145 err = read_frames(state, frame_count, avail_count, mapper, cntrs);
146 if (err < 0)
147 goto error;
148
149 return 0;
150 error:
151 *frame_count = 0;
152 return err;
153 }
154
r_process_frames_nonblocking(struct libasound_state * state,snd_pcm_state_t status,unsigned int * frame_count,struct mapper_context * mapper,struct container_context * cntrs)155 static int r_process_frames_nonblocking(struct libasound_state *state,
156 snd_pcm_state_t status,
157 unsigned int *frame_count,
158 struct mapper_context *mapper,
159 struct container_context *cntrs)
160 {
161 snd_pcm_sframes_t avail;
162 snd_pcm_uframes_t avail_count;
163 int err = 0;
164
165 if (status != SND_PCM_STATE_RUNNING) {
166 err = snd_pcm_start(state->handle);
167 if (err < 0)
168 goto error;
169 }
170
171 if (state->use_waiter) {
172 err = wait_for_avail(state);
173 if (err < 0)
174 goto error;
175 }
176
177 // Check available space on the buffer.
178 avail = snd_pcm_avail(state->handle);
179 if (avail < 0) {
180 err = avail;
181 goto error;
182 }
183 avail_count = (snd_pcm_uframes_t)avail;
184
185 if (avail_count == 0) {
186 // Let's go to a next iteration.
187 err = 0;
188 goto error;
189 }
190
191 err = read_frames(state, frame_count, avail_count, mapper, cntrs);
192 if (err < 0)
193 goto error;
194
195 return 0;
196 error:
197 *frame_count = 0;
198 return err;
199 }
200
write_frames(struct libasound_state * state,unsigned int * frame_count,unsigned int avail_count,struct mapper_context * mapper,struct container_context * cntrs)201 static int write_frames(struct libasound_state *state,
202 unsigned int *frame_count, unsigned int avail_count,
203 struct mapper_context *mapper,
204 struct container_context *cntrs)
205 {
206 struct rw_closure *closure = state->private_data;
207 snd_pcm_uframes_t consumed_count;
208 snd_pcm_sframes_t handled_frame_count;
209 int err;
210
211 // Trim according up to expected frame count.
212 if (*frame_count < avail_count)
213 avail_count = *frame_count;
214
215 // Cache required amount of frames.
216 if (avail_count > frame_cache_get_count(&closure->cache)) {
217 avail_count -= frame_cache_get_count(&closure->cache);
218
219 // Read frames to transfer.
220 err = mapper_context_process_frames(mapper,
221 closure->cache.buf_ptr, &avail_count, cntrs);
222 if (err < 0)
223 return err;
224 frame_cache_increase_count(&closure->cache, avail_count);
225 avail_count = frame_cache_get_count(&closure->cache);
226 }
227
228 // Execute write operation according to the shape of buffer. These
229 // operations automatically start the stream.
230 consumed_count = avail_count;
231 if (closure->access == SND_PCM_ACCESS_RW_INTERLEAVED) {
232 handled_frame_count = snd_pcm_writei(state->handle,
233 closure->cache.buf, consumed_count);
234 } else {
235 handled_frame_count = snd_pcm_writen(state->handle,
236 closure->cache.buf, consumed_count);
237 }
238 if (handled_frame_count < 0) {
239 err = handled_frame_count;
240 return err;
241 }
242
243 consumed_count = handled_frame_count;
244 frame_cache_reduce(&closure->cache, consumed_count);
245
246 *frame_count = consumed_count;
247
248 return 0;
249 }
250
w_process_frames_blocking(struct libasound_state * state,snd_pcm_state_t status,unsigned int * frame_count,struct mapper_context * mapper,struct container_context * cntrs)251 static int w_process_frames_blocking(struct libasound_state *state,
252 snd_pcm_state_t status,
253 unsigned int *frame_count,
254 struct mapper_context *mapper,
255 struct container_context *cntrs)
256 {
257 snd_pcm_sframes_t avail;
258 unsigned int avail_count;
259 int err;
260
261 if (status == SND_PCM_STATE_RUNNING) {
262 // Check available space on the buffer.
263 avail = snd_pcm_avail(state->handle);
264 if (avail < 0) {
265 err = avail;
266 goto error;
267 }
268 avail_count = (unsigned int)avail;
269
270 if (avail_count == 0) {
271 // Fill with data frames so that blocking is just
272 // released.
273 snd_pcm_uframes_t avail_min;
274 err = snd_pcm_sw_params_get_avail_min(state->sw_params,
275 &avail_min);
276 if (err < 0)
277 goto error;
278 avail_count = (unsigned int)avail_min;
279 }
280 } else {
281 snd_pcm_uframes_t frames_for_start_threshold;
282 snd_pcm_uframes_t frames_per_period;
283
284 // Fill with data frames so that the PCM substream starts.
285 err = snd_pcm_sw_params_get_start_threshold(state->sw_params,
286 &frames_for_start_threshold);
287 if (err < 0)
288 goto error;
289
290 // But the above number can be too small and cause XRUN because
291 // I/O operation is done per period.
292 err = snd_pcm_hw_params_get_period_size(state->hw_params,
293 &frames_per_period, NULL);
294 if (err < 0)
295 goto error;
296
297 // Use larger one to prevent from both of XRUN and successive
298 // blocking.
299 if (frames_for_start_threshold > frames_per_period)
300 avail_count = (unsigned int)frames_for_start_threshold;
301 else
302 avail_count = (unsigned int)frames_per_period;
303 }
304
305 err = write_frames(state, frame_count, avail_count, mapper, cntrs);
306 if (err < 0)
307 goto error;
308
309 return 0;
310 error:
311 *frame_count = 0;
312 return err;
313 }
314
w_process_frames_nonblocking(struct libasound_state * state,snd_pcm_state_t status,unsigned int * frame_count,struct mapper_context * mapper,struct container_context * cntrs)315 static int w_process_frames_nonblocking(struct libasound_state *state,
316 snd_pcm_state_t status,
317 unsigned int *frame_count,
318 struct mapper_context *mapper,
319 struct container_context *cntrs)
320 {
321 snd_pcm_sframes_t avail;
322 unsigned int avail_count;
323 int err;
324
325 if (state->use_waiter) {
326 err = wait_for_avail(state);
327 if (err < 0)
328 goto error;
329 }
330
331 // Check available space on the buffer.
332 avail = snd_pcm_avail(state->handle);
333 if (avail < 0) {
334 err = avail;
335 goto error;
336 }
337 avail_count = (unsigned int)avail;
338
339 if (avail_count == 0) {
340 // Let's go to a next iteration.
341 err = 0;
342 goto error;
343 }
344
345 err = write_frames(state, frame_count, avail_count, mapper, cntrs);
346 if (err < 0)
347 goto error;
348
349 // NOTE: The substream starts automatically when the accumulated number
350 // of queued data frame exceeds start_threshold.
351
352 return 0;
353 error:
354 *frame_count = 0;
355 return err;
356 }
357
irq_rw_pre_process(struct libasound_state * state)358 static int irq_rw_pre_process(struct libasound_state *state)
359 {
360 struct rw_closure *closure = state->private_data;
361 snd_pcm_format_t format;
362 snd_pcm_uframes_t frames_per_buffer;
363 int bytes_per_sample;
364 unsigned int samples_per_frame;
365 int err;
366
367 err = snd_pcm_hw_params_get_format(state->hw_params, &format);
368 if (err < 0)
369 return err;
370 bytes_per_sample = snd_pcm_format_physical_width(format) / 8;
371 if (bytes_per_sample <= 0)
372 return -ENXIO;
373
374 err = snd_pcm_hw_params_get_channels(state->hw_params,
375 &samples_per_frame);
376 if (err < 0)
377 return err;
378
379 err = snd_pcm_hw_params_get_buffer_size(state->hw_params,
380 &frames_per_buffer);
381 if (err < 0)
382 return err;
383
384 err = snd_pcm_hw_params_get_access(state->hw_params, &closure->access);
385 if (err < 0)
386 return err;
387
388 err = frame_cache_init(&closure->cache, closure->access,
389 bytes_per_sample, samples_per_frame,
390 frames_per_buffer);
391 if (err < 0)
392 return err;
393
394 if (snd_pcm_stream(state->handle) == SND_PCM_STREAM_CAPTURE) {
395 if (state->nonblock)
396 closure->process_frames = r_process_frames_nonblocking;
397 else
398 closure->process_frames = r_process_frames_blocking;
399 } else {
400 if (state->nonblock)
401 closure->process_frames = w_process_frames_nonblocking;
402 else
403 closure->process_frames = w_process_frames_blocking;
404 }
405
406 return 0;
407 }
408
irq_rw_process_frames(struct libasound_state * state,unsigned int * frame_count,struct mapper_context * mapper,struct container_context * cntrs)409 static int irq_rw_process_frames(struct libasound_state *state,
410 unsigned int *frame_count,
411 struct mapper_context *mapper,
412 struct container_context *cntrs)
413 {
414 struct rw_closure *closure = state->private_data;
415 snd_pcm_state_t status;
416
417 // Need to recover the stream.
418 status = snd_pcm_state(state->handle);
419 if (status != SND_PCM_STATE_RUNNING && status != SND_PCM_STATE_PREPARED)
420 return -EPIPE;
421
422 // NOTE: Actually, status can be shift always.
423 return closure->process_frames(state, status, frame_count, mapper, cntrs);
424 }
425
irq_rw_post_process(struct libasound_state * state)426 static void irq_rw_post_process(struct libasound_state *state)
427 {
428 struct rw_closure *closure = state->private_data;
429
430 frame_cache_destroy(&closure->cache);
431 }
432
433 const struct xfer_libasound_ops xfer_libasound_irq_rw_ops = {
434 .pre_process = irq_rw_pre_process,
435 .process_frames = irq_rw_process_frames,
436 .post_process = irq_rw_post_process,
437 .private_size = sizeof(struct rw_closure),
438 };
439