1 /* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6 #include <syslog.h>
7
8 #include "audio_thread_log.h"
9 #include "byte_buffer.h"
10 #include "cras_fmt_conv.h"
11 #include "dev_stream.h"
12 #include "cras_audio_area.h"
13 #include "cras_mix.h"
14 #include "cras_server_metrics.h"
15 #include "cras_shm.h"
16
17 /* Adjust device's sample rate by this step faster or slower. Used
18 * to make sure multiple active device has stable buffer level.
19 */
20 static const int coarse_rate_adjust_step = 3;
21
22 /*
23 * Allow capture callback to fire this much earlier than the scheduled
24 * next_cb_ts to avoid an extra wake of audio thread.
25 */
26 static const struct timespec capture_callback_fuzz_ts = {
27 .tv_sec = 0,
28 .tv_nsec = 1000000, /* 1 ms. */
29 };
30
31 /*
32 * Returns the size in frames that a format converter must allocate for its
33 * temporary buffers to be able to convert the specified number of stream
34 * frames to or from the corresponding number of device frames, at the
35 * specified device rate.
36 */
max_frames_for_conversion(unsigned int stream_frames,unsigned int stream_rate,unsigned int device_rate)37 unsigned int max_frames_for_conversion(unsigned int stream_frames,
38 unsigned int stream_rate,
39 unsigned int device_rate)
40 {
41 /*
42 * There are multiple temp buffers in the format converter,
43 * which are all the same size. Some of these contain audio
44 * in the source sample rate, and others in the converted
45 * sample rate. We need to make sure the converter is large
46 * enough to hold either.
47 */
48 return MAX(
49 // Number of stream frames does not require conversion.
50 stream_frames,
51 // Calculate corresponding number of frames at device rate.
52 cras_frames_at_rate(stream_rate, stream_frames,
53 device_rate))
54 /*
55 * Add 1 because the linear resampler's frame rate
56 * conversion does this, and is used to calculate
57 * how many frames to read from the device.
58 * See linear_resampler_{in,out}_frames_to_{out,in}(..)
59 */
60 + 1;
61 }
62
dev_stream_create(struct cras_rstream * stream,unsigned int dev_id,const struct cras_audio_format * dev_fmt,void * dev_ptr,struct timespec * cb_ts,const struct timespec * sleep_interval_ts)63 struct dev_stream *dev_stream_create(struct cras_rstream *stream,
64 unsigned int dev_id,
65 const struct cras_audio_format *dev_fmt,
66 void *dev_ptr, struct timespec *cb_ts,
67 const struct timespec *sleep_interval_ts)
68 {
69 struct dev_stream *out;
70 struct cras_audio_format *stream_fmt = &stream->format;
71 int rc = 0;
72 unsigned int max_frames, dev_frames, buf_bytes;
73 const struct cras_audio_format *ofmt;
74
75 out = calloc(1, sizeof(*out));
76 out->dev_id = dev_id;
77 out->stream = stream;
78 out->dev_rate = dev_fmt->frame_rate;
79 out->is_running = 0;
80
81 max_frames = max_frames_for_conversion(stream->buffer_frames,
82 stream_fmt->frame_rate,
83 dev_fmt->frame_rate);
84
85 if (stream->direction == CRAS_STREAM_OUTPUT) {
86 rc = config_format_converter(&out->conv, stream->direction,
87 stream_fmt, dev_fmt, max_frames);
88 } else {
89 /*
90 * For input, take into account the stream specific processing
91 * like AEC. APM exists only in input path, and has no dependency
92 * to dev_stream. Starts APM in dev_stream's constructor just to
93 * align with its life cycle, and then gets the post processing
94 * format to configure format converter.
95 */
96 cras_apm_list_start_apm(stream->apm_list, dev_ptr);
97 ofmt = cras_rstream_post_processing_format(stream, dev_ptr) ?:
98 dev_fmt,
99 rc = config_format_converter(&out->conv, stream->direction,
100 ofmt, stream_fmt, max_frames);
101 }
102 if (rc) {
103 free(out);
104 return NULL;
105 }
106
107 ofmt = cras_fmt_conv_out_format(out->conv);
108
109 dev_frames =
110 (stream->direction == CRAS_STREAM_OUTPUT) ?
111 cras_fmt_conv_in_frames_to_out(out->conv,
112 stream->buffer_frames) :
113 cras_fmt_conv_out_frames_to_in(out->conv,
114 stream->buffer_frames);
115
116 out->conv_buffer_size_frames =
117 2 * MAX(dev_frames, stream->buffer_frames);
118
119 /* Create conversion buffer and area using the output format
120 * of the format converter. Note that this format might not be
121 * identical to stream_fmt for capture. */
122 buf_bytes = out->conv_buffer_size_frames * cras_get_format_bytes(ofmt);
123 out->conv_buffer = byte_buffer_create(buf_bytes);
124 out->conv_area = cras_audio_area_create(ofmt->num_channels);
125
126 /* Use sleep interval hint from argument if it is provided */
127 if (sleep_interval_ts) {
128 stream->sleep_interval_ts = *sleep_interval_ts;
129 } else {
130 cras_frames_to_time(cras_rstream_get_cb_threshold(stream),
131 stream_fmt->frame_rate,
132 &stream->sleep_interval_ts);
133 }
134
135 stream->next_cb_ts = *cb_ts;
136
137 /* Sets up the stream & dev pair. */
138 cras_rstream_dev_attach(stream, dev_id, dev_ptr);
139
140 return out;
141 }
142
dev_stream_destroy(struct dev_stream * dev_stream)143 void dev_stream_destroy(struct dev_stream *dev_stream)
144 {
145 void *dev_ptr =
146 cras_rstream_dev_ptr(dev_stream->stream, dev_stream->dev_id);
147 /* Stops the APM and then unlink the dev stream pair. */
148 cras_apm_list_stop_apm(dev_stream->stream->apm_list, dev_ptr);
149 cras_rstream_dev_detach(dev_stream->stream, dev_stream->dev_id);
150 if (dev_stream->conv) {
151 cras_audio_area_destroy(dev_stream->conv_area);
152 cras_fmt_conv_destroy(&dev_stream->conv);
153 byte_buffer_destroy(&dev_stream->conv_buffer);
154 }
155 free(dev_stream);
156 }
157
dev_stream_set_dev_rate(struct dev_stream * dev_stream,unsigned int dev_rate,double dev_rate_ratio,double main_rate_ratio,int coarse_rate_adjust)158 void dev_stream_set_dev_rate(struct dev_stream *dev_stream,
159 unsigned int dev_rate, double dev_rate_ratio,
160 double main_rate_ratio, int coarse_rate_adjust)
161 {
162 if (dev_stream->dev_id == dev_stream->stream->main_dev.dev_id) {
163 cras_fmt_conv_set_linear_resample_rates(dev_stream->conv,
164 dev_rate, dev_rate);
165 cras_frames_to_time_precise(
166 cras_rstream_get_cb_threshold(dev_stream->stream),
167 dev_stream->stream->format.frame_rate * dev_rate_ratio,
168 &dev_stream->stream->sleep_interval_ts);
169 } else {
170 double new_rate = dev_rate * dev_rate_ratio / main_rate_ratio +
171 coarse_rate_adjust_step * coarse_rate_adjust;
172 cras_fmt_conv_set_linear_resample_rates(dev_stream->conv,
173 dev_rate, new_rate);
174 }
175 }
176
dev_stream_mix(struct dev_stream * dev_stream,const struct cras_audio_format * fmt,uint8_t * dst,unsigned int num_to_write)177 int dev_stream_mix(struct dev_stream *dev_stream,
178 const struct cras_audio_format *fmt, uint8_t *dst,
179 unsigned int num_to_write)
180 {
181 struct cras_rstream *rstream = dev_stream->stream;
182 uint8_t *src;
183 uint8_t *target = dst;
184 unsigned int fr_written, fr_read;
185 unsigned int buffer_offset;
186 int fr_in_buf;
187 unsigned int num_samples;
188 size_t frames = 0;
189 unsigned int dev_frames;
190 float mix_vol;
191
192 fr_in_buf = dev_stream_playback_frames(dev_stream);
193 if (fr_in_buf <= 0)
194 return fr_in_buf;
195 if (fr_in_buf < num_to_write)
196 num_to_write = fr_in_buf;
197
198 buffer_offset = cras_rstream_dev_offset(rstream, dev_stream->dev_id);
199
200 /* Stream volume scaler. */
201 mix_vol = cras_rstream_get_volume_scaler(dev_stream->stream);
202
203 fr_written = 0;
204 fr_read = 0;
205 while (fr_written < num_to_write) {
206 unsigned int read_frames;
207 src = cras_rstream_get_readable_frames(
208 rstream, buffer_offset + fr_read, &frames);
209 if (frames == 0)
210 break;
211 if (cras_fmt_conversion_needed(dev_stream->conv)) {
212 read_frames = frames;
213 dev_frames = cras_fmt_conv_convert_frames(
214 dev_stream->conv, src,
215 dev_stream->conv_buffer->bytes, &read_frames,
216 num_to_write - fr_written);
217 src = dev_stream->conv_buffer->bytes;
218 } else {
219 dev_frames = MIN(frames, num_to_write - fr_written);
220 read_frames = dev_frames;
221 }
222 num_samples = dev_frames * fmt->num_channels;
223 cras_mix_add(fmt->format, target, src, num_samples, 1,
224 cras_rstream_get_mute(rstream), mix_vol);
225 target += dev_frames * cras_get_format_bytes(fmt);
226 fr_written += dev_frames;
227 fr_read += read_frames;
228 }
229
230 cras_rstream_dev_offset_update(rstream, fr_read, dev_stream->dev_id);
231 ATLOG(atlog, AUDIO_THREAD_DEV_STREAM_MIX, fr_written, fr_read, 0);
232
233 return fr_written;
234 }
235
236 /* Copy from the captured buffer to the temporary format converted buffer. */
capture_with_fmt_conv(struct dev_stream * dev_stream,const uint8_t * source_samples,unsigned int num_frames)237 static unsigned int capture_with_fmt_conv(struct dev_stream *dev_stream,
238 const uint8_t *source_samples,
239 unsigned int num_frames)
240 {
241 const struct cras_audio_format *source_format;
242 const struct cras_audio_format *dst_format;
243 uint8_t *buffer;
244 unsigned int total_read = 0;
245 unsigned int write_frames;
246 unsigned int read_frames;
247 unsigned int source_frame_bytes;
248 unsigned int dst_frame_bytes;
249
250 source_format = cras_fmt_conv_in_format(dev_stream->conv);
251 source_frame_bytes = cras_get_format_bytes(source_format);
252 dst_format = cras_fmt_conv_out_format(dev_stream->conv);
253 dst_frame_bytes = cras_get_format_bytes(dst_format);
254
255 dev_stream->conv_area->num_channels = dst_format->num_channels;
256
257 while (total_read < num_frames) {
258 buffer = buf_write_pointer_size(dev_stream->conv_buffer,
259 &write_frames);
260 write_frames /= dst_frame_bytes;
261 if (write_frames == 0)
262 break;
263
264 read_frames = num_frames - total_read;
265 write_frames = cras_fmt_conv_convert_frames(
266 dev_stream->conv, source_samples, buffer, &read_frames,
267 write_frames);
268 total_read += read_frames;
269 source_samples += read_frames * source_frame_bytes;
270 buf_increment_write(dev_stream->conv_buffer,
271 (size_t)write_frames *
272 (size_t)dst_frame_bytes);
273 }
274
275 return total_read;
276 }
277
278 /* Copy from the converted buffer to the stream shm. These have the same format
279 * at this point. */
280 static unsigned int
capture_copy_converted_to_stream(struct dev_stream * dev_stream,struct cras_rstream * rstream,float software_gain_scaler)281 capture_copy_converted_to_stream(struct dev_stream *dev_stream,
282 struct cras_rstream *rstream,
283 float software_gain_scaler)
284 {
285 struct cras_audio_shm *shm;
286 uint8_t *stream_samples;
287 uint8_t *converted_samples;
288 unsigned int num_frames;
289 unsigned int total_written = 0;
290 unsigned int write_frames;
291 unsigned int frame_bytes;
292 unsigned int offset;
293 const struct cras_audio_format *fmt;
294
295 shm = cras_rstream_shm(rstream);
296
297 fmt = cras_fmt_conv_out_format(dev_stream->conv);
298 frame_bytes = cras_get_format_bytes(fmt);
299
300 offset = cras_rstream_dev_offset(rstream, dev_stream->dev_id);
301
302 stream_samples = cras_shm_get_writeable_frames(
303 shm, cras_rstream_get_cb_threshold(rstream),
304 &rstream->audio_area->frames);
305 num_frames = MIN(rstream->audio_area->frames - offset,
306 buf_queued(dev_stream->conv_buffer) / frame_bytes);
307
308 ATLOG(atlog, AUDIO_THREAD_CONV_COPY, shm->header->write_buf_idx,
309 rstream->audio_area->frames, offset);
310
311 while (total_written < num_frames) {
312 converted_samples = buf_read_pointer_size(
313 dev_stream->conv_buffer, &write_frames);
314 write_frames /= frame_bytes;
315 write_frames = MIN(write_frames, num_frames - total_written);
316
317 cras_audio_area_config_buf_pointers(dev_stream->conv_area, fmt,
318 converted_samples);
319 cras_audio_area_config_channels(dev_stream->conv_area, fmt);
320 dev_stream->conv_area->frames = write_frames;
321
322 cras_audio_area_config_buf_pointers(
323 rstream->audio_area, &rstream->format, stream_samples);
324
325 cras_audio_area_copy(rstream->audio_area, offset,
326 &rstream->format, dev_stream->conv_area, 0,
327 software_gain_scaler);
328
329 buf_increment_read(dev_stream->conv_buffer,
330 (size_t)write_frames * (size_t)frame_bytes);
331 total_written += write_frames;
332 cras_rstream_dev_offset_update(rstream, write_frames,
333 dev_stream->dev_id);
334 offset = cras_rstream_dev_offset(rstream, dev_stream->dev_id);
335 }
336
337 ATLOG(atlog, AUDIO_THREAD_CAPTURE_WRITE, rstream->stream_id,
338 total_written, cras_shm_frames_written(shm));
339 return total_written;
340 }
341
dev_stream_capture(struct dev_stream * dev_stream,const struct cras_audio_area * area,unsigned int area_offset,float software_gain_scaler)342 unsigned int dev_stream_capture(struct dev_stream *dev_stream,
343 const struct cras_audio_area *area,
344 unsigned int area_offset,
345 float software_gain_scaler)
346 {
347 struct cras_rstream *rstream = dev_stream->stream;
348 struct cras_audio_shm *shm;
349 uint8_t *stream_samples;
350 unsigned int nread;
351
352 /* Check if format conversion is needed. */
353 if (cras_fmt_conversion_needed(dev_stream->conv)) {
354 unsigned int format_bytes, fr_to_capture;
355
356 fr_to_capture = dev_stream_capture_avail(dev_stream);
357 fr_to_capture = MIN(fr_to_capture, area->frames - area_offset);
358
359 format_bytes = cras_get_format_bytes(
360 cras_fmt_conv_in_format(dev_stream->conv));
361 nread = capture_with_fmt_conv(
362 dev_stream,
363 area->channels[0].buf + area_offset * format_bytes,
364 fr_to_capture);
365
366 capture_copy_converted_to_stream(dev_stream, rstream,
367 software_gain_scaler);
368 } else {
369 unsigned int offset =
370 cras_rstream_dev_offset(rstream, dev_stream->dev_id);
371
372 /* Set up the shm area and copy to it. */
373 shm = cras_rstream_shm(rstream);
374 stream_samples = cras_shm_get_writeable_frames(
375 shm, cras_rstream_get_cb_threshold(rstream),
376 &rstream->audio_area->frames);
377 cras_audio_area_config_buf_pointers(
378 rstream->audio_area, &rstream->format, stream_samples);
379
380 nread = cras_audio_area_copy(rstream->audio_area, offset,
381 &rstream->format, area,
382 area_offset, software_gain_scaler);
383
384 ATLOG(atlog, AUDIO_THREAD_CAPTURE_WRITE, rstream->stream_id,
385 nread, cras_shm_frames_written(shm));
386 cras_rstream_dev_offset_update(rstream, nread,
387 dev_stream->dev_id);
388 }
389
390 return nread;
391 }
392
dev_stream_attached_devs(const struct dev_stream * dev_stream)393 int dev_stream_attached_devs(const struct dev_stream *dev_stream)
394 {
395 return dev_stream->stream->num_attached_devs;
396 }
397
dev_stream_update_frames(const struct dev_stream * dev_stream)398 void dev_stream_update_frames(const struct dev_stream *dev_stream)
399 {
400 cras_rstream_update_queued_frames(dev_stream->stream);
401 }
402
dev_stream_playback_frames(const struct dev_stream * dev_stream)403 int dev_stream_playback_frames(const struct dev_stream *dev_stream)
404 {
405 int frames;
406
407 frames = cras_rstream_playable_frames(dev_stream->stream,
408 dev_stream->dev_id);
409 if (frames < 0)
410 return frames;
411
412 if (!dev_stream->conv)
413 return frames;
414
415 return cras_fmt_conv_in_frames_to_out(dev_stream->conv, frames);
416 }
417
dev_stream_cb_threshold(const struct dev_stream * dev_stream)418 unsigned int dev_stream_cb_threshold(const struct dev_stream *dev_stream)
419 {
420 const struct cras_rstream *rstream = dev_stream->stream;
421 unsigned int cb_threshold = cras_rstream_get_cb_threshold(rstream);
422
423 if (rstream->direction == CRAS_STREAM_OUTPUT)
424 return cras_fmt_conv_in_frames_to_out(dev_stream->conv,
425 cb_threshold);
426 else
427 return cras_fmt_conv_out_frames_to_in(dev_stream->conv,
428 cb_threshold);
429 }
430
dev_stream_capture_avail(const struct dev_stream * dev_stream)431 unsigned int dev_stream_capture_avail(const struct dev_stream *dev_stream)
432 {
433 struct cras_audio_shm *shm;
434 struct cras_rstream *rstream = dev_stream->stream;
435 unsigned int frames_avail;
436 unsigned int conv_buf_level;
437 unsigned int format_bytes;
438 unsigned int wlimit;
439 unsigned int dev_offset =
440 cras_rstream_dev_offset(rstream, dev_stream->dev_id);
441
442 shm = cras_rstream_shm(rstream);
443
444 wlimit = cras_rstream_get_max_write_frames(rstream);
445 wlimit -= dev_offset;
446 cras_shm_get_writeable_frames(shm, wlimit, &frames_avail);
447
448 if (!dev_stream->conv)
449 return frames_avail;
450
451 format_bytes = cras_get_format_bytes(
452 cras_fmt_conv_out_format(dev_stream->conv));
453
454 /* Sample rate conversion may cause some sample left in conv_buffer
455 * take this buffer into account. */
456 conv_buf_level = buf_queued(dev_stream->conv_buffer) / format_bytes;
457 if (frames_avail <= conv_buf_level)
458 return 0;
459 else
460 frames_avail -= conv_buf_level;
461
462 frames_avail =
463 MIN(frames_avail,
464 buf_available(dev_stream->conv_buffer) / format_bytes);
465
466 return cras_fmt_conv_out_frames_to_in(dev_stream->conv, frames_avail);
467 }
468
469 /* TODO(dgreid) remove this hack to reset the time if needed. */
check_next_wake_time(struct dev_stream * dev_stream)470 static void check_next_wake_time(struct dev_stream *dev_stream)
471 {
472 struct cras_rstream *rstream = dev_stream->stream;
473 struct timespec now;
474
475 clock_gettime(CLOCK_MONOTONIC_RAW, &now);
476 if (timespec_after(&now, &rstream->next_cb_ts)) {
477 rstream->next_cb_ts = now;
478 add_timespecs(&rstream->next_cb_ts,
479 &rstream->sleep_interval_ts);
480 ATLOG(atlog, AUDIO_THREAD_STREAM_RESCHEDULE, rstream->stream_id,
481 rstream->next_cb_ts.tv_sec, rstream->next_cb_ts.tv_nsec);
482 cras_server_metrics_missed_cb_event(rstream);
483 }
484 }
485
dev_stream_update_next_wake_time(struct dev_stream * dev_stream)486 void dev_stream_update_next_wake_time(struct dev_stream *dev_stream)
487 {
488 struct cras_rstream *rstream = dev_stream->stream;
489
490 /*
491 * The empty next_cb_ts means it is the first time update for input stream.
492 * Initialize next_cb_ts without recording missed callback.
493 */
494 if (rstream->direction == CRAS_STREAM_INPUT &&
495 !timespec_is_nonzero(&rstream->next_cb_ts)) {
496 clock_gettime(CLOCK_MONOTONIC_RAW, &rstream->next_cb_ts);
497 add_timespecs(&rstream->next_cb_ts,
498 &rstream->sleep_interval_ts);
499 return;
500 }
501 /* Update next callback time according to perfect schedule. */
502 add_timespecs(&rstream->next_cb_ts, &rstream->sleep_interval_ts);
503 /* Reset schedule if the schedule is missed. */
504 check_next_wake_time(dev_stream);
505 }
506
dev_stream_playback_update_rstream(struct dev_stream * dev_stream)507 int dev_stream_playback_update_rstream(struct dev_stream *dev_stream)
508 {
509 cras_rstream_update_output_read_pointer(dev_stream->stream);
510 return 0;
511 }
512
late_enough_for_capture_callback(struct dev_stream * dev_stream)513 static int late_enough_for_capture_callback(struct dev_stream *dev_stream)
514 {
515 struct timespec now;
516 struct cras_rstream *rstream = dev_stream->stream;
517 clock_gettime(CLOCK_MONOTONIC_RAW, &now);
518 add_timespecs(&now, &capture_callback_fuzz_ts);
519 return timespec_after(&now, &rstream->next_cb_ts);
520 }
521
dev_stream_capture_update_rstream(struct dev_stream * dev_stream)522 int dev_stream_capture_update_rstream(struct dev_stream *dev_stream)
523 {
524 struct cras_rstream *rstream = dev_stream->stream;
525 unsigned int frames_ready = cras_rstream_get_cb_threshold(rstream);
526 int rc;
527
528 if ((rstream->flags & TRIGGER_ONLY) && rstream->triggered)
529 return 0;
530
531 cras_rstream_update_input_write_pointer(rstream);
532
533 /*
534 * For stream without BULK_AUDIO_OK flag, if it isn't time for
535 * this stream then skip it.
536 */
537 if (!(rstream->flags & BULK_AUDIO_OK) &&
538 !late_enough_for_capture_callback(dev_stream))
539 return 0;
540
541 /* If there is not enough data for one callback, skip it. */
542 if (!cras_rstream_input_level_met(rstream))
543 return 0;
544
545 /* Enough data for this stream. */
546 if (rstream->flags & BULK_AUDIO_OK)
547 frames_ready = cras_rstream_level(rstream);
548
549 ATLOG(atlog, AUDIO_THREAD_CAPTURE_POST, rstream->stream_id,
550 frames_ready, rstream->shm->header->read_buf_idx);
551
552 rc = cras_rstream_audio_ready(rstream, frames_ready);
553
554 if (rc < 0)
555 return rc;
556
557 if (rstream->flags & TRIGGER_ONLY)
558 rstream->triggered = 1;
559
560 dev_stream_update_next_wake_time(dev_stream);
561
562 return 0;
563 }
564
cras_set_playback_timestamp(size_t frame_rate,size_t frames,struct cras_timespec * ts)565 void cras_set_playback_timestamp(size_t frame_rate, size_t frames,
566 struct cras_timespec *ts)
567 {
568 cras_clock_gettime(CLOCK_MONOTONIC_RAW, ts);
569
570 /* For playback, want now + samples left to be played.
571 * ts = time next written sample will be played to DAC,
572 */
573 ts->tv_nsec += frames * 1000000000ULL / frame_rate;
574 while (ts->tv_nsec > 1000000000ULL) {
575 ts->tv_sec++;
576 ts->tv_nsec -= 1000000000ULL;
577 }
578 }
579
cras_set_capture_timestamp(size_t frame_rate,size_t frames,struct cras_timespec * ts)580 void cras_set_capture_timestamp(size_t frame_rate, size_t frames,
581 struct cras_timespec *ts)
582 {
583 long tmp;
584
585 cras_clock_gettime(CLOCK_MONOTONIC_RAW, ts);
586
587 /* For capture, now - samples left to be read.
588 * ts = time next sample to be read was captured at ADC.
589 */
590 tmp = frames * (1000000000L / frame_rate);
591 while (tmp > 1000000000L) {
592 tmp -= 1000000000L;
593 ts->tv_sec--;
594 }
595 if (ts->tv_nsec >= tmp)
596 ts->tv_nsec -= tmp;
597 else {
598 tmp -= ts->tv_nsec;
599 ts->tv_nsec = 1000000000L - tmp;
600 ts->tv_sec--;
601 }
602 }
603
dev_stream_set_delay(const struct dev_stream * dev_stream,unsigned int delay_frames)604 void dev_stream_set_delay(const struct dev_stream *dev_stream,
605 unsigned int delay_frames)
606 {
607 struct cras_rstream *rstream = dev_stream->stream;
608 struct cras_audio_shm *shm;
609 unsigned int stream_frames;
610
611 if (rstream->direction == CRAS_STREAM_OUTPUT) {
612 shm = cras_rstream_shm(rstream);
613 stream_frames = cras_fmt_conv_out_frames_to_in(dev_stream->conv,
614 delay_frames);
615 cras_set_playback_timestamp(rstream->format.frame_rate,
616 stream_frames +
617 cras_shm_get_frames(shm),
618 &shm->header->ts);
619 } else {
620 shm = cras_rstream_shm(rstream);
621 stream_frames = cras_fmt_conv_in_frames_to_out(dev_stream->conv,
622 delay_frames);
623 if (cras_shm_frames_written(shm) == 0)
624 cras_set_capture_timestamp(rstream->format.frame_rate,
625 stream_frames,
626 &shm->header->ts);
627 }
628 }
629
dev_stream_request_playback_samples(struct dev_stream * dev_stream,const struct timespec * now)630 int dev_stream_request_playback_samples(struct dev_stream *dev_stream,
631 const struct timespec *now)
632 {
633 int rc;
634
635 rc = cras_rstream_request_audio(dev_stream->stream, now);
636 if (rc < 0)
637 return rc;
638
639 dev_stream_update_next_wake_time(dev_stream);
640
641 return 0;
642 }
643
dev_stream_poll_stream_fd(const struct dev_stream * dev_stream)644 int dev_stream_poll_stream_fd(const struct dev_stream *dev_stream)
645 {
646 const struct cras_rstream *stream = dev_stream->stream;
647
648 /* For streams which rely on dev level timing, we should
649 * let client response wake audio thread up. */
650 if (stream_uses_input(stream) && (stream->flags & USE_DEV_TIMING) &&
651 cras_rstream_is_pending_reply(stream))
652 return stream->fd;
653
654 if (!stream_uses_output(stream) ||
655 !cras_rstream_is_pending_reply(stream) ||
656 cras_rstream_get_is_draining(stream))
657 return -1;
658
659 return stream->fd;
660 }
661
662 /*
663 * Gets proper wake up time for an input stream. It considers both
664 * time for samples to reach one callback level, and the time for next callback.
665 * Returns:
666 * 0 on success; negavite error code on failure. A positive value if
667 * there is no need to set wake up time for this stream.
668 */
get_input_wake_time(struct dev_stream * dev_stream,unsigned int curr_level,struct timespec * level_tstamp,unsigned int cap_limit,int is_cap_limit_stream,struct timespec * wake_time_out)669 static int get_input_wake_time(struct dev_stream *dev_stream,
670 unsigned int curr_level,
671 struct timespec *level_tstamp,
672 unsigned int cap_limit, int is_cap_limit_stream,
673 struct timespec *wake_time_out)
674 {
675 struct cras_rstream *rstream = dev_stream->stream;
676 struct timespec time_for_sample;
677 int needed_frames_from_device;
678
679 needed_frames_from_device = dev_stream_capture_avail(dev_stream);
680
681 /*
682 * If this stream is not cap_limit stream, and it needs more
683 * frames than the capture limit from audio thread, don't bother
684 * re-calculating the wake time for it because
685 * |needed_frames_from_device| cannot be all copied to shm until
686 * the cap_limit stream get its samples in shm read by client
687 * and relieve the cap_limit.
688 *
689 * Note that we need to know whether this stream is cap_limit
690 * stream here because the client of cap_limit stream may read
691 * the data from shm during this time window, and cause
692 * needed_frames_from_device to be greater than cap_limit which
693 * was calculated before.
694 */
695 if (!is_cap_limit_stream && needed_frames_from_device > cap_limit)
696 return 1;
697
698 /*
699 * For capture stream using device timing, the flow would be:
700 * 1. Device has less than one cb_threshold of data.
701 * 2. Device has a large chunk of data that client needs to consume
702 * in multiple cycles.
703 * 3. Audio thread sends one block to client and goes to sleep.
704 * 4. Client sends reply to wake up audio thread.
705 * 5. Repeat 3 and 4 until there is less than one cb_threshold of data.
706 * 6. Goes to 1.
707 *
708 * In 1, we schedule the next wake up time based on the needed frames.
709 * This is needed to poll the samples from device.
710 *
711 * In 3, we do not schedule a wake up time for this stream.
712 * We let reply from client wakes up audio thread to send next
713 * cb_threshold of data.
714 *
715 * TODO(cychiang) Do we want to actually block sending data to client
716 * until client replies ? Or control the scheduling of wake up time
717 * is enough ?
718 *
719 */
720 if ((rstream->flags & USE_DEV_TIMING) &&
721 cras_rstream_is_pending_reply(rstream))
722 return 1;
723
724 *wake_time_out = rstream->next_cb_ts;
725
726 /*
727 * If current frames in the device can provide needed amount for stream,
728 * there is no need to wait.
729 */
730 if (curr_level >= needed_frames_from_device)
731 needed_frames_from_device = 0;
732 else
733 needed_frames_from_device -= curr_level;
734
735 cras_frames_to_time(needed_frames_from_device, dev_stream->dev_rate,
736 &time_for_sample);
737
738 add_timespecs(&time_for_sample, level_tstamp);
739
740 /* Select the time that is later so both sample and time conditions
741 * are met. */
742 if (timespec_after(&time_for_sample, &rstream->next_cb_ts))
743 *wake_time_out = time_for_sample;
744 /* Using device timing means the stream neglects next callback time. */
745 if (rstream->flags & USE_DEV_TIMING)
746 *wake_time_out = time_for_sample;
747
748 ATLOG(atlog, AUDIO_THREAD_STREAM_SLEEP_TIME,
749 dev_stream->stream->stream_id, wake_time_out->tv_sec,
750 wake_time_out->tv_nsec);
751
752 return 0;
753 }
754
dev_stream_wake_time(struct dev_stream * dev_stream,unsigned int curr_level,struct timespec * level_tstamp,unsigned int cap_limit,int is_cap_limit_stream,struct timespec * wake_time_out)755 int dev_stream_wake_time(struct dev_stream *dev_stream, unsigned int curr_level,
756 struct timespec *level_tstamp, unsigned int cap_limit,
757 int is_cap_limit_stream,
758 struct timespec *wake_time_out)
759 {
760 if (dev_stream->stream->direction == CRAS_STREAM_OUTPUT) {
761 /*
762 * TODO(cychiang) Implement the method for output stream.
763 * The logic should be similar to what
764 * get_next_stream_wake_from_list in audio_thread.c is doing.
765 */
766 return -EINVAL;
767 }
768
769 return get_input_wake_time(dev_stream, curr_level, level_tstamp,
770 cap_limit, is_cap_limit_stream,
771 wake_time_out);
772 }
773
dev_stream_is_pending_reply(const struct dev_stream * dev_stream)774 int dev_stream_is_pending_reply(const struct dev_stream *dev_stream)
775 {
776 return cras_rstream_is_pending_reply(dev_stream->stream);
777 }
778
dev_stream_flush_old_audio_messages(struct dev_stream * dev_stream)779 int dev_stream_flush_old_audio_messages(struct dev_stream *dev_stream)
780 {
781 return cras_rstream_flush_old_audio_messages(dev_stream->stream);
782 }
783