1 /* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6 #include <syslog.h>
7
8 #include "audio_thread_log.h"
9 #include "byte_buffer.h"
10 #include "cras_fmt_conv.h"
11 #include "dev_stream.h"
12 #include "cras_audio_area.h"
13 #include "cras_mix.h"
14 #include "cras_server_metrics.h"
15 #include "cras_shm.h"
16
17 /* Adjust device's sample rate by this step faster or slower. Used
18 * to make sure multiple active device has stable buffer level.
19 */
20 static const int coarse_rate_adjust_step = 3;
21
22 /*
23 * Allow capture callback to fire this much earlier than the scheduled
24 * next_cb_ts to avoid an extra wake of audio thread.
25 */
26 static const struct timespec capture_callback_fuzz_ts = {
27 .tv_sec = 0,
28 .tv_nsec = 1000000, /* 1 ms. */
29 };
30
31 /*
32 * Returns the size in frames that a format converter must allocate for its
33 * temporary buffers to be able to convert the specified number of stream
34 * frames to or from the corresponding number of device frames, at the
35 * specified device rate.
36 */
max_frames_for_conversion(unsigned int stream_frames,unsigned int stream_rate,unsigned int device_rate)37 unsigned int max_frames_for_conversion(unsigned int stream_frames,
38 unsigned int stream_rate,
39 unsigned int device_rate)
40 {
41 /*
42 * There are multiple temp buffers in the format converter,
43 * which are all the same size. Some of these contain audio
44 * in the source sample rate, and others in the converted
45 * sample rate. We need to make sure the converter is large
46 * enough to hold either.
47 */
48 return MAX(
49 // Number of stream frames does not require conversion.
50 stream_frames,
51 // Calculate corresponding number of frames at device rate.
52 cras_frames_at_rate(stream_rate, stream_frames,
53 device_rate))
54 /*
55 * Add 1 because the linear resampler's frame rate
56 * conversion does this, and is used to calculate
57 * how many frames to read from the device.
58 * See linear_resampler_{in,out}_frames_to_{out,in}(..)
59 */
60 + 1;
61 }
62
dev_stream_create(struct cras_rstream * stream,unsigned int dev_id,const struct cras_audio_format * dev_fmt,void * dev_ptr,struct timespec * cb_ts)63 struct dev_stream *dev_stream_create(struct cras_rstream *stream,
64 unsigned int dev_id,
65 const struct cras_audio_format *dev_fmt,
66 void *dev_ptr, struct timespec *cb_ts)
67 {
68 struct dev_stream *out;
69 struct cras_audio_format *stream_fmt = &stream->format;
70 int rc = 0;
71 unsigned int max_frames, dev_frames, buf_bytes;
72 const struct cras_audio_format *ofmt;
73
74 out = calloc(1, sizeof(*out));
75 out->dev_id = dev_id;
76 out->stream = stream;
77 out->dev_rate = dev_fmt->frame_rate;
78 out->is_running = 0;
79
80 max_frames = max_frames_for_conversion(stream->buffer_frames,
81 stream_fmt->frame_rate,
82 dev_fmt->frame_rate);
83
84 if (stream->direction == CRAS_STREAM_OUTPUT) {
85 rc = config_format_converter(&out->conv, stream->direction,
86 stream_fmt, dev_fmt, max_frames);
87 } else {
88 /*
89 * For input, take into account the stream specific processing
90 * like AEC. Use the post processing format to configure format
91 * converter.
92 */
93 ofmt = cras_rstream_post_processing_format(stream, dev_ptr) ?:
94 dev_fmt,
95 rc = config_format_converter(&out->conv, stream->direction,
96 ofmt, stream_fmt, max_frames);
97 }
98 if (rc) {
99 free(out);
100 return NULL;
101 }
102
103 ofmt = cras_fmt_conv_out_format(out->conv);
104
105 dev_frames =
106 (stream->direction == CRAS_STREAM_OUTPUT) ?
107 cras_fmt_conv_in_frames_to_out(out->conv,
108 stream->buffer_frames) :
109 cras_fmt_conv_out_frames_to_in(out->conv,
110 stream->buffer_frames);
111
112 out->conv_buffer_size_frames =
113 2 * MAX(dev_frames, stream->buffer_frames);
114
115 /* Create conversion buffer and area using the output format
116 * of the format converter. Note that this format might not be
117 * identical to stream_fmt for capture. */
118 buf_bytes = out->conv_buffer_size_frames * cras_get_format_bytes(ofmt);
119 out->conv_buffer = byte_buffer_create(buf_bytes);
120 out->conv_area = cras_audio_area_create(ofmt->num_channels);
121
122 cras_frames_to_time(cras_rstream_get_cb_threshold(stream),
123 stream_fmt->frame_rate, &stream->sleep_interval_ts);
124 stream->next_cb_ts = *cb_ts;
125
126 cras_rstream_dev_attach(stream, dev_id, dev_ptr);
127
128 return out;
129 }
130
dev_stream_destroy(struct dev_stream * dev_stream)131 void dev_stream_destroy(struct dev_stream *dev_stream)
132 {
133 cras_rstream_dev_detach(dev_stream->stream, dev_stream->dev_id);
134 if (dev_stream->conv) {
135 cras_audio_area_destroy(dev_stream->conv_area);
136 cras_fmt_conv_destroy(&dev_stream->conv);
137 byte_buffer_destroy(&dev_stream->conv_buffer);
138 }
139 free(dev_stream);
140 }
141
dev_stream_set_dev_rate(struct dev_stream * dev_stream,unsigned int dev_rate,double dev_rate_ratio,double master_rate_ratio,int coarse_rate_adjust)142 void dev_stream_set_dev_rate(struct dev_stream *dev_stream,
143 unsigned int dev_rate, double dev_rate_ratio,
144 double master_rate_ratio, int coarse_rate_adjust)
145 {
146 if (dev_stream->dev_id == dev_stream->stream->master_dev.dev_id) {
147 cras_fmt_conv_set_linear_resample_rates(dev_stream->conv,
148 dev_rate, dev_rate);
149 cras_frames_to_time_precise(
150 cras_rstream_get_cb_threshold(dev_stream->stream),
151 dev_stream->stream->format.frame_rate * dev_rate_ratio,
152 &dev_stream->stream->sleep_interval_ts);
153 } else {
154 double new_rate =
155 dev_rate * dev_rate_ratio / master_rate_ratio +
156 coarse_rate_adjust_step * coarse_rate_adjust;
157 cras_fmt_conv_set_linear_resample_rates(dev_stream->conv,
158 dev_rate, new_rate);
159 }
160 }
161
dev_stream_mix(struct dev_stream * dev_stream,const struct cras_audio_format * fmt,uint8_t * dst,unsigned int num_to_write)162 int dev_stream_mix(struct dev_stream *dev_stream,
163 const struct cras_audio_format *fmt, uint8_t *dst,
164 unsigned int num_to_write)
165 {
166 struct cras_rstream *rstream = dev_stream->stream;
167 uint8_t *src;
168 uint8_t *target = dst;
169 unsigned int fr_written, fr_read;
170 unsigned int buffer_offset;
171 int fr_in_buf;
172 unsigned int num_samples;
173 size_t frames = 0;
174 unsigned int dev_frames;
175 float mix_vol;
176
177 fr_in_buf = dev_stream_playback_frames(dev_stream);
178 if (fr_in_buf <= 0)
179 return fr_in_buf;
180 if (fr_in_buf < num_to_write)
181 num_to_write = fr_in_buf;
182
183 buffer_offset = cras_rstream_dev_offset(rstream, dev_stream->dev_id);
184
185 /* Stream volume scaler. */
186 mix_vol = cras_rstream_get_volume_scaler(dev_stream->stream);
187
188 fr_written = 0;
189 fr_read = 0;
190 while (fr_written < num_to_write) {
191 unsigned int read_frames;
192 src = cras_rstream_get_readable_frames(
193 rstream, buffer_offset + fr_read, &frames);
194 if (frames == 0)
195 break;
196 if (cras_fmt_conversion_needed(dev_stream->conv)) {
197 read_frames = frames;
198 dev_frames = cras_fmt_conv_convert_frames(
199 dev_stream->conv, src,
200 dev_stream->conv_buffer->bytes, &read_frames,
201 num_to_write - fr_written);
202 src = dev_stream->conv_buffer->bytes;
203 } else {
204 dev_frames = MIN(frames, num_to_write - fr_written);
205 read_frames = dev_frames;
206 }
207 num_samples = dev_frames * fmt->num_channels;
208 cras_mix_add(fmt->format, target, src, num_samples, 1,
209 cras_rstream_get_mute(rstream), mix_vol);
210 target += dev_frames * cras_get_format_bytes(fmt);
211 fr_written += dev_frames;
212 fr_read += read_frames;
213 }
214
215 cras_rstream_dev_offset_update(rstream, fr_read, dev_stream->dev_id);
216 ATLOG(atlog, AUDIO_THREAD_DEV_STREAM_MIX, fr_written, fr_read, 0);
217
218 return fr_written;
219 }
220
221 /* Copy from the captured buffer to the temporary format converted buffer. */
capture_with_fmt_conv(struct dev_stream * dev_stream,const uint8_t * source_samples,unsigned int num_frames)222 static unsigned int capture_with_fmt_conv(struct dev_stream *dev_stream,
223 const uint8_t *source_samples,
224 unsigned int num_frames)
225 {
226 const struct cras_audio_format *source_format;
227 const struct cras_audio_format *dst_format;
228 uint8_t *buffer;
229 unsigned int total_read = 0;
230 unsigned int write_frames;
231 unsigned int read_frames;
232 unsigned int source_frame_bytes;
233 unsigned int dst_frame_bytes;
234
235 source_format = cras_fmt_conv_in_format(dev_stream->conv);
236 source_frame_bytes = cras_get_format_bytes(source_format);
237 dst_format = cras_fmt_conv_out_format(dev_stream->conv);
238 dst_frame_bytes = cras_get_format_bytes(dst_format);
239
240 dev_stream->conv_area->num_channels = dst_format->num_channels;
241
242 while (total_read < num_frames) {
243 buffer = buf_write_pointer_size(dev_stream->conv_buffer,
244 &write_frames);
245 write_frames /= dst_frame_bytes;
246 if (write_frames == 0)
247 break;
248
249 read_frames = num_frames - total_read;
250 write_frames = cras_fmt_conv_convert_frames(
251 dev_stream->conv, source_samples, buffer, &read_frames,
252 write_frames);
253 total_read += read_frames;
254 source_samples += read_frames * source_frame_bytes;
255 buf_increment_write(dev_stream->conv_buffer,
256 write_frames * dst_frame_bytes);
257 }
258
259 return total_read;
260 }
261
262 /* Copy from the converted buffer to the stream shm. These have the same format
263 * at this point. */
264 static unsigned int
capture_copy_converted_to_stream(struct dev_stream * dev_stream,struct cras_rstream * rstream,float software_gain_scaler)265 capture_copy_converted_to_stream(struct dev_stream *dev_stream,
266 struct cras_rstream *rstream,
267 float software_gain_scaler)
268 {
269 struct cras_audio_shm *shm;
270 uint8_t *stream_samples;
271 uint8_t *converted_samples;
272 unsigned int num_frames;
273 unsigned int total_written = 0;
274 unsigned int write_frames;
275 unsigned int frame_bytes;
276 unsigned int offset;
277 const struct cras_audio_format *fmt;
278
279 shm = cras_rstream_shm(rstream);
280
281 fmt = cras_fmt_conv_out_format(dev_stream->conv);
282 frame_bytes = cras_get_format_bytes(fmt);
283
284 offset = cras_rstream_dev_offset(rstream, dev_stream->dev_id);
285
286 stream_samples = cras_shm_get_writeable_frames(
287 shm, cras_rstream_get_cb_threshold(rstream),
288 &rstream->audio_area->frames);
289 num_frames = MIN(rstream->audio_area->frames - offset,
290 buf_queued(dev_stream->conv_buffer) / frame_bytes);
291
292 ATLOG(atlog, AUDIO_THREAD_CONV_COPY, shm->header->write_buf_idx,
293 rstream->audio_area->frames, offset);
294
295 while (total_written < num_frames) {
296 converted_samples = buf_read_pointer_size(
297 dev_stream->conv_buffer, &write_frames);
298 write_frames /= frame_bytes;
299 write_frames = MIN(write_frames, num_frames - total_written);
300
301 cras_audio_area_config_buf_pointers(dev_stream->conv_area, fmt,
302 converted_samples);
303 cras_audio_area_config_channels(dev_stream->conv_area, fmt);
304 dev_stream->conv_area->frames = write_frames;
305
306 cras_audio_area_config_buf_pointers(
307 rstream->audio_area, &rstream->format, stream_samples);
308
309 cras_audio_area_copy(rstream->audio_area, offset,
310 &rstream->format, dev_stream->conv_area, 0,
311 software_gain_scaler);
312
313 buf_increment_read(dev_stream->conv_buffer,
314 write_frames * frame_bytes);
315 total_written += write_frames;
316 cras_rstream_dev_offset_update(rstream, write_frames,
317 dev_stream->dev_id);
318 offset = cras_rstream_dev_offset(rstream, dev_stream->dev_id);
319 }
320
321 ATLOG(atlog, AUDIO_THREAD_CAPTURE_WRITE, rstream->stream_id,
322 total_written, cras_shm_frames_written(shm));
323 return total_written;
324 }
325
dev_stream_capture(struct dev_stream * dev_stream,const struct cras_audio_area * area,unsigned int area_offset,float software_gain_scaler)326 unsigned int dev_stream_capture(struct dev_stream *dev_stream,
327 const struct cras_audio_area *area,
328 unsigned int area_offset,
329 float software_gain_scaler)
330 {
331 struct cras_rstream *rstream = dev_stream->stream;
332 struct cras_audio_shm *shm;
333 uint8_t *stream_samples;
334 unsigned int nread;
335
336 /* Check if format conversion is needed. */
337 if (cras_fmt_conversion_needed(dev_stream->conv)) {
338 unsigned int format_bytes, fr_to_capture;
339
340 fr_to_capture = dev_stream_capture_avail(dev_stream);
341 fr_to_capture = MIN(fr_to_capture, area->frames - area_offset);
342
343 format_bytes = cras_get_format_bytes(
344 cras_fmt_conv_in_format(dev_stream->conv));
345 nread = capture_with_fmt_conv(
346 dev_stream,
347 area->channels[0].buf + area_offset * format_bytes,
348 fr_to_capture);
349
350 capture_copy_converted_to_stream(dev_stream, rstream,
351 software_gain_scaler);
352 } else {
353 unsigned int offset =
354 cras_rstream_dev_offset(rstream, dev_stream->dev_id);
355
356 /* Set up the shm area and copy to it. */
357 shm = cras_rstream_shm(rstream);
358 stream_samples = cras_shm_get_writeable_frames(
359 shm, cras_rstream_get_cb_threshold(rstream),
360 &rstream->audio_area->frames);
361 cras_audio_area_config_buf_pointers(
362 rstream->audio_area, &rstream->format, stream_samples);
363
364 nread = cras_audio_area_copy(rstream->audio_area, offset,
365 &rstream->format, area,
366 area_offset, software_gain_scaler);
367
368 ATLOG(atlog, AUDIO_THREAD_CAPTURE_WRITE, rstream->stream_id,
369 nread, cras_shm_frames_written(shm));
370 cras_rstream_dev_offset_update(rstream, nread,
371 dev_stream->dev_id);
372 }
373
374 return nread;
375 }
376
dev_stream_attached_devs(const struct dev_stream * dev_stream)377 int dev_stream_attached_devs(const struct dev_stream *dev_stream)
378 {
379 return dev_stream->stream->num_attached_devs;
380 }
381
dev_stream_update_frames(const struct dev_stream * dev_stream)382 void dev_stream_update_frames(const struct dev_stream *dev_stream)
383 {
384 cras_rstream_update_queued_frames(dev_stream->stream);
385 }
386
dev_stream_playback_frames(const struct dev_stream * dev_stream)387 int dev_stream_playback_frames(const struct dev_stream *dev_stream)
388 {
389 int frames;
390
391 frames = cras_rstream_playable_frames(dev_stream->stream,
392 dev_stream->dev_id);
393 if (frames < 0)
394 return frames;
395
396 if (!dev_stream->conv)
397 return frames;
398
399 return cras_fmt_conv_in_frames_to_out(dev_stream->conv, frames);
400 }
401
dev_stream_cb_threshold(const struct dev_stream * dev_stream)402 unsigned int dev_stream_cb_threshold(const struct dev_stream *dev_stream)
403 {
404 const struct cras_rstream *rstream = dev_stream->stream;
405 unsigned int cb_threshold = cras_rstream_get_cb_threshold(rstream);
406
407 if (rstream->direction == CRAS_STREAM_OUTPUT)
408 return cras_fmt_conv_in_frames_to_out(dev_stream->conv,
409 cb_threshold);
410 else
411 return cras_fmt_conv_out_frames_to_in(dev_stream->conv,
412 cb_threshold);
413 }
414
dev_stream_capture_avail(const struct dev_stream * dev_stream)415 unsigned int dev_stream_capture_avail(const struct dev_stream *dev_stream)
416 {
417 struct cras_audio_shm *shm;
418 struct cras_rstream *rstream = dev_stream->stream;
419 unsigned int frames_avail;
420 unsigned int conv_buf_level;
421 unsigned int format_bytes;
422 unsigned int wlimit;
423 unsigned int dev_offset =
424 cras_rstream_dev_offset(rstream, dev_stream->dev_id);
425
426 shm = cras_rstream_shm(rstream);
427
428 wlimit = cras_rstream_get_max_write_frames(rstream);
429 wlimit -= dev_offset;
430 cras_shm_get_writeable_frames(shm, wlimit, &frames_avail);
431
432 if (!dev_stream->conv)
433 return frames_avail;
434
435 format_bytes = cras_get_format_bytes(
436 cras_fmt_conv_out_format(dev_stream->conv));
437
438 /* Sample rate conversion may cause some sample left in conv_buffer
439 * take this buffer into account. */
440 conv_buf_level = buf_queued(dev_stream->conv_buffer) / format_bytes;
441 if (frames_avail <= conv_buf_level)
442 return 0;
443 else
444 frames_avail -= conv_buf_level;
445
446 frames_avail =
447 MIN(frames_avail,
448 buf_available(dev_stream->conv_buffer) / format_bytes);
449
450 return cras_fmt_conv_out_frames_to_in(dev_stream->conv, frames_avail);
451 }
452
453 /* TODO(dgreid) remove this hack to reset the time if needed. */
check_next_wake_time(struct dev_stream * dev_stream)454 static void check_next_wake_time(struct dev_stream *dev_stream)
455 {
456 struct cras_rstream *rstream = dev_stream->stream;
457 struct timespec now;
458
459 clock_gettime(CLOCK_MONOTONIC_RAW, &now);
460 if (timespec_after(&now, &rstream->next_cb_ts)) {
461 rstream->next_cb_ts = now;
462 add_timespecs(&rstream->next_cb_ts,
463 &rstream->sleep_interval_ts);
464 ATLOG(atlog, AUDIO_THREAD_STREAM_RESCHEDULE, rstream->stream_id,
465 rstream->next_cb_ts.tv_sec, rstream->next_cb_ts.tv_nsec);
466 cras_server_metrics_missed_cb_event(rstream);
467 }
468 }
469
dev_stream_update_next_wake_time(struct dev_stream * dev_stream)470 void dev_stream_update_next_wake_time(struct dev_stream *dev_stream)
471 {
472 struct cras_rstream *rstream = dev_stream->stream;
473
474 /*
475 * The empty next_cb_ts means it is the first time update for input stream.
476 * Initialize next_cb_ts without recording missed callback.
477 */
478 if (rstream->direction == CRAS_STREAM_INPUT &&
479 !timespec_is_nonzero(&rstream->next_cb_ts)) {
480 clock_gettime(CLOCK_MONOTONIC_RAW, &rstream->next_cb_ts);
481 add_timespecs(&rstream->next_cb_ts,
482 &rstream->sleep_interval_ts);
483 return;
484 }
485 /* Update next callback time according to perfect schedule. */
486 add_timespecs(&rstream->next_cb_ts, &rstream->sleep_interval_ts);
487 /* Reset schedule if the schedule is missed. */
488 check_next_wake_time(dev_stream);
489 }
490
dev_stream_playback_update_rstream(struct dev_stream * dev_stream)491 int dev_stream_playback_update_rstream(struct dev_stream *dev_stream)
492 {
493 cras_rstream_update_output_read_pointer(dev_stream->stream);
494 return 0;
495 }
496
late_enough_for_capture_callback(struct dev_stream * dev_stream)497 static int late_enough_for_capture_callback(struct dev_stream *dev_stream)
498 {
499 struct timespec now;
500 struct cras_rstream *rstream = dev_stream->stream;
501 clock_gettime(CLOCK_MONOTONIC_RAW, &now);
502 add_timespecs(&now, &capture_callback_fuzz_ts);
503 return timespec_after(&now, &rstream->next_cb_ts);
504 }
505
dev_stream_capture_update_rstream(struct dev_stream * dev_stream)506 int dev_stream_capture_update_rstream(struct dev_stream *dev_stream)
507 {
508 struct cras_rstream *rstream = dev_stream->stream;
509 unsigned int frames_ready = cras_rstream_get_cb_threshold(rstream);
510 int rc;
511
512 if ((rstream->flags & TRIGGER_ONLY) && rstream->triggered)
513 return 0;
514
515 cras_rstream_update_input_write_pointer(rstream);
516
517 /*
518 * For stream without BULK_AUDIO_OK flag, if it isn't time for
519 * this stream then skip it.
520 */
521 if (!(rstream->flags & BULK_AUDIO_OK) &&
522 !late_enough_for_capture_callback(dev_stream))
523 return 0;
524
525 /* If there is not enough data for one callback, skip it. */
526 if (!cras_rstream_input_level_met(rstream))
527 return 0;
528
529 /* Enough data for this stream. */
530 if (rstream->flags & BULK_AUDIO_OK)
531 frames_ready = cras_rstream_level(rstream);
532
533 ATLOG(atlog, AUDIO_THREAD_CAPTURE_POST, rstream->stream_id,
534 frames_ready, rstream->shm->header->read_buf_idx);
535
536 rc = cras_rstream_audio_ready(rstream, frames_ready);
537
538 if (rc < 0)
539 return rc;
540
541 if (rstream->flags & TRIGGER_ONLY)
542 rstream->triggered = 1;
543
544 dev_stream_update_next_wake_time(dev_stream);
545
546 return 0;
547 }
548
cras_set_playback_timestamp(size_t frame_rate,size_t frames,struct cras_timespec * ts)549 void cras_set_playback_timestamp(size_t frame_rate, size_t frames,
550 struct cras_timespec *ts)
551 {
552 cras_clock_gettime(CLOCK_MONOTONIC_RAW, ts);
553
554 /* For playback, want now + samples left to be played.
555 * ts = time next written sample will be played to DAC,
556 */
557 ts->tv_nsec += frames * 1000000000ULL / frame_rate;
558 while (ts->tv_nsec > 1000000000ULL) {
559 ts->tv_sec++;
560 ts->tv_nsec -= 1000000000ULL;
561 }
562 }
563
cras_set_capture_timestamp(size_t frame_rate,size_t frames,struct cras_timespec * ts)564 void cras_set_capture_timestamp(size_t frame_rate, size_t frames,
565 struct cras_timespec *ts)
566 {
567 long tmp;
568
569 cras_clock_gettime(CLOCK_MONOTONIC_RAW, ts);
570
571 /* For capture, now - samples left to be read.
572 * ts = time next sample to be read was captured at ADC.
573 */
574 tmp = frames * (1000000000L / frame_rate);
575 while (tmp > 1000000000L) {
576 tmp -= 1000000000L;
577 ts->tv_sec--;
578 }
579 if (ts->tv_nsec >= tmp)
580 ts->tv_nsec -= tmp;
581 else {
582 tmp -= ts->tv_nsec;
583 ts->tv_nsec = 1000000000L - tmp;
584 ts->tv_sec--;
585 }
586 }
587
dev_stream_set_delay(const struct dev_stream * dev_stream,unsigned int delay_frames)588 void dev_stream_set_delay(const struct dev_stream *dev_stream,
589 unsigned int delay_frames)
590 {
591 struct cras_rstream *rstream = dev_stream->stream;
592 struct cras_audio_shm *shm;
593 unsigned int stream_frames;
594
595 if (rstream->direction == CRAS_STREAM_OUTPUT) {
596 shm = cras_rstream_shm(rstream);
597 stream_frames = cras_fmt_conv_out_frames_to_in(dev_stream->conv,
598 delay_frames);
599 cras_set_playback_timestamp(rstream->format.frame_rate,
600 stream_frames +
601 cras_shm_get_frames(shm),
602 &shm->header->ts);
603 } else {
604 shm = cras_rstream_shm(rstream);
605 stream_frames = cras_fmt_conv_in_frames_to_out(dev_stream->conv,
606 delay_frames);
607 if (cras_shm_frames_written(shm) == 0)
608 cras_set_capture_timestamp(rstream->format.frame_rate,
609 stream_frames,
610 &shm->header->ts);
611 }
612 }
613
dev_stream_request_playback_samples(struct dev_stream * dev_stream,const struct timespec * now)614 int dev_stream_request_playback_samples(struct dev_stream *dev_stream,
615 const struct timespec *now)
616 {
617 int rc;
618
619 rc = cras_rstream_request_audio(dev_stream->stream, now);
620 if (rc < 0)
621 return rc;
622
623 dev_stream_update_next_wake_time(dev_stream);
624
625 return 0;
626 }
627
dev_stream_poll_stream_fd(const struct dev_stream * dev_stream)628 int dev_stream_poll_stream_fd(const struct dev_stream *dev_stream)
629 {
630 const struct cras_rstream *stream = dev_stream->stream;
631
632 /* For streams which rely on dev level timing, we should
633 * let client response wake audio thread up. */
634 if (stream_uses_input(stream) && (stream->flags & USE_DEV_TIMING) &&
635 cras_rstream_is_pending_reply(stream))
636 return stream->fd;
637
638 if (!stream_uses_output(stream) ||
639 !cras_rstream_is_pending_reply(stream) ||
640 cras_rstream_get_is_draining(stream))
641 return -1;
642
643 return stream->fd;
644 }
645
646 /*
647 * Gets proper wake up time for an input stream. It considers both
648 * time for samples to reach one callback level, and the time for next callback.
649 * Returns:
650 * 0 on success; negavite error code on failure. A positive value if
651 * there is no need to set wake up time for this stream.
652 */
get_input_wake_time(struct dev_stream * dev_stream,unsigned int curr_level,struct timespec * level_tstamp,unsigned int cap_limit,int is_cap_limit_stream,struct timespec * wake_time_out)653 static int get_input_wake_time(struct dev_stream *dev_stream,
654 unsigned int curr_level,
655 struct timespec *level_tstamp,
656 unsigned int cap_limit, int is_cap_limit_stream,
657 struct timespec *wake_time_out)
658 {
659 struct cras_rstream *rstream = dev_stream->stream;
660 struct timespec time_for_sample;
661 int needed_frames_from_device;
662
663 needed_frames_from_device = dev_stream_capture_avail(dev_stream);
664
665 /*
666 * If this stream is not cap_limit stream, and it needs more
667 * frames than the capture limit from audio thread, don't bother
668 * re-calculating the wake time for it because
669 * |needed_frames_from_device| cannot be all copied to shm until
670 * the cap_limit stream get its samples in shm read by client
671 * and relieve the cap_limit.
672 *
673 * Note that we need to know whether this stream is cap_limit
674 * stream here because the client of cap_limit stream may read
675 * the data from shm during this time window, and cause
676 * needed_frames_from_device to be greater than cap_limit which
677 * was calculated before.
678 */
679 if (!is_cap_limit_stream && needed_frames_from_device > cap_limit)
680 return 1;
681
682 /*
683 * For capture stream using device timing, the flow would be:
684 * 1. Device has less than one cb_threshold of data.
685 * 2. Device has a large chunk of data that client needs to consume
686 * in multiple cycles.
687 * 3. Audio thread sends one block to client and goes to sleep.
688 * 4. Client sends reply to wake up audio thread.
689 * 5. Repeat 3 and 4 until there is less than one cb_threshold of data.
690 * 6. Goes to 1.
691 *
692 * In 1, we schedule the next wake up time based on the needed frames.
693 * This is needed to poll the samples from device.
694 *
695 * In 3, we do not schedule a wake up time for this stream.
696 * We let reply from client wakes up audio thread to send next
697 * cb_threshold of data.
698 *
699 * TODO(cychiang) Do we want to actually block sending data to client
700 * until client replies ? Or control the scheduling of wake up time
701 * is enough ?
702 *
703 */
704 if ((rstream->flags & USE_DEV_TIMING) &&
705 cras_rstream_is_pending_reply(rstream))
706 return 1;
707
708 *wake_time_out = rstream->next_cb_ts;
709
710 /*
711 * If current frames in the device can provide needed amount for stream,
712 * there is no need to wait.
713 */
714 if (curr_level >= needed_frames_from_device)
715 needed_frames_from_device = 0;
716 else
717 needed_frames_from_device -= curr_level;
718
719 cras_frames_to_time(needed_frames_from_device, dev_stream->dev_rate,
720 &time_for_sample);
721
722 add_timespecs(&time_for_sample, level_tstamp);
723
724 /* Select the time that is later so both sample and time conditions
725 * are met. */
726 if (timespec_after(&time_for_sample, &rstream->next_cb_ts))
727 *wake_time_out = time_for_sample;
728 /* Using device timing means the stream neglects next callback time. */
729 if (rstream->flags & USE_DEV_TIMING)
730 *wake_time_out = time_for_sample;
731
732 ATLOG(atlog, AUDIO_THREAD_STREAM_SLEEP_TIME,
733 dev_stream->stream->stream_id, wake_time_out->tv_sec,
734 wake_time_out->tv_nsec);
735
736 return 0;
737 }
738
dev_stream_wake_time(struct dev_stream * dev_stream,unsigned int curr_level,struct timespec * level_tstamp,unsigned int cap_limit,int is_cap_limit_stream,struct timespec * wake_time_out)739 int dev_stream_wake_time(struct dev_stream *dev_stream, unsigned int curr_level,
740 struct timespec *level_tstamp, unsigned int cap_limit,
741 int is_cap_limit_stream,
742 struct timespec *wake_time_out)
743 {
744 if (dev_stream->stream->direction == CRAS_STREAM_OUTPUT) {
745 /*
746 * TODO(cychiang) Implement the method for output stream.
747 * The logic should be similar to what
748 * get_next_stream_wake_from_list in audio_thread.c is doing.
749 */
750 return -EINVAL;
751 }
752
753 return get_input_wake_time(dev_stream, curr_level, level_tstamp,
754 cap_limit, is_cap_limit_stream,
755 wake_time_out);
756 }
757
dev_stream_is_pending_reply(const struct dev_stream * dev_stream)758 int dev_stream_is_pending_reply(const struct dev_stream *dev_stream)
759 {
760 return cras_rstream_is_pending_reply(dev_stream->stream);
761 }
762
dev_stream_flush_old_audio_messages(struct dev_stream * dev_stream)763 int dev_stream_flush_old_audio_messages(struct dev_stream *dev_stream)
764 {
765 return cras_rstream_flush_old_audio_messages(dev_stream->stream);
766 }
767