1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6 #include <syslog.h>
7
8 #include "audio_thread.h"
9 #include "cras_empty_iodev.h"
10 #include "cras_iodev.h"
11 #include "cras_iodev_info.h"
12 #include "cras_iodev_list.h"
13 #include "cras_loopback_iodev.h"
14 #include "cras_main_thread_log.h"
15 #include "cras_observer.h"
16 #include "cras_rstream.h"
17 #include "cras_server.h"
18 #include "cras_tm.h"
19 #include "cras_types.h"
20 #include "cras_system_state.h"
21 #include "server_stream.h"
22 #include "softvol_curve.h"
23 #include "stream_list.h"
24 #include "test_iodev.h"
25 #include "utlist.h"
26
27 const struct timespec idle_timeout_interval = { .tv_sec = 10, .tv_nsec = 0 };
28
29 /* Linked list of available devices. */
30 struct iodev_list {
31 struct cras_iodev *iodevs;
32 size_t size;
33 };
34
35 /* List of enabled input/output devices.
36 * dev - The device.
37 * init_timer - Timer for a delayed call to init this iodev.
38 */
39 struct enabled_dev {
40 struct cras_iodev *dev;
41 struct enabled_dev *prev, *next;
42 };
43
44 struct dev_init_retry {
45 int dev_idx;
46 struct cras_timer *init_timer;
47 struct dev_init_retry *next, *prev;
48 };
49
50 struct device_enabled_cb {
51 device_enabled_callback_t enabled_cb;
52 device_disabled_callback_t disabled_cb;
53 void *cb_data;
54 struct device_enabled_cb *next, *prev;
55 };
56
57 struct main_thread_event_log *main_log;
58
59 /* Lists for devs[CRAS_STREAM_INPUT] and devs[CRAS_STREAM_OUTPUT]. */
60 static struct iodev_list devs[CRAS_NUM_DIRECTIONS];
61 /* The observer client iodev_list used to listen on various events. */
62 static struct cras_observer_client *list_observer;
63 /* Keep a list of enabled inputs and outputs. */
64 static struct enabled_dev *enabled_devs[CRAS_NUM_DIRECTIONS];
65 /* Keep an empty device per direction. */
66 static struct cras_iodev *fallback_devs[CRAS_NUM_DIRECTIONS];
67 /* Special empty device for hotword streams. */
68 static struct cras_iodev *empty_hotword_dev;
69 /* Loopback devices. */
70 static struct cras_iodev *loopdev_post_mix;
71 static struct cras_iodev *loopdev_post_dsp;
72 /* List of pending device init retries. */
73 static struct dev_init_retry *init_retries;
74
75 /* Keep a constantly increasing index for iodevs. Index 0 is reserved
76 * to mean "no device". */
77 static uint32_t next_iodev_idx = MAX_SPECIAL_DEVICE_IDX;
78
79 /* Call when a device is enabled or disabled. */
80 struct device_enabled_cb *device_enable_cbs;
81
82 /* Thread that handles audio input and output. */
83 static struct audio_thread *audio_thread;
84 /* List of all streams. */
85 static struct stream_list *stream_list;
86 /* Idle device timer. */
87 static struct cras_timer *idle_timer;
88 /* Flag to indicate that the stream list is disconnected from audio thread. */
89 static int stream_list_suspended = 0;
90 /* If init device failed, retry after 1 second. */
91 static const unsigned int INIT_DEV_DELAY_MS = 1000;
92 /* Flag to indicate that hotword streams are suspended. */
93 static int hotword_suspended = 0;
94 /* Flag to indicate that suspended hotword streams should be auto-resumed at
95 * system resume. */
96 static int hotword_auto_resume = 0;
97
98 static void idle_dev_check(struct cras_timer *timer, void *data);
99
find_dev(size_t dev_index)100 static struct cras_iodev *find_dev(size_t dev_index)
101 {
102 struct cras_iodev *dev;
103
104 DL_FOREACH (devs[CRAS_STREAM_OUTPUT].iodevs, dev)
105 if (dev->info.idx == dev_index)
106 return dev;
107
108 DL_FOREACH (devs[CRAS_STREAM_INPUT].iodevs, dev)
109 if (dev->info.idx == dev_index)
110 return dev;
111
112 return NULL;
113 }
114
find_node(struct cras_iodev * iodev,unsigned int node_idx)115 static struct cras_ionode *find_node(struct cras_iodev *iodev,
116 unsigned int node_idx)
117 {
118 struct cras_ionode *node;
119 DL_SEARCH_SCALAR(iodev->nodes, node, idx, node_idx);
120 return node;
121 }
122
123 /* Adds a device to the list. Used from add_input and add_output. */
add_dev_to_list(struct cras_iodev * dev)124 static int add_dev_to_list(struct cras_iodev *dev)
125 {
126 struct cras_iodev *tmp;
127 uint32_t new_idx;
128 struct iodev_list *list = &devs[dev->direction];
129
130 DL_FOREACH (list->iodevs, tmp)
131 if (tmp == dev)
132 return -EEXIST;
133
134 dev->format = NULL;
135 dev->format = NULL;
136 dev->prev = dev->next = NULL;
137
138 /* Move to the next index and make sure it isn't taken. */
139 new_idx = next_iodev_idx;
140 while (1) {
141 if (new_idx < MAX_SPECIAL_DEVICE_IDX)
142 new_idx = MAX_SPECIAL_DEVICE_IDX;
143 DL_SEARCH_SCALAR(list->iodevs, tmp, info.idx, new_idx);
144 if (tmp == NULL)
145 break;
146 new_idx++;
147 }
148 dev->info.idx = new_idx;
149 next_iodev_idx = new_idx + 1;
150 list->size++;
151
152 syslog(LOG_INFO, "Adding %s dev at index %u.",
153 dev->direction == CRAS_STREAM_OUTPUT ? "output" : "input",
154 dev->info.idx);
155 DL_PREPEND(list->iodevs, dev);
156
157 cras_iodev_list_update_device_list();
158 return 0;
159 }
160
161 /* Removes a device to the list. Used from rm_input and rm_output. */
rm_dev_from_list(struct cras_iodev * dev)162 static int rm_dev_from_list(struct cras_iodev *dev)
163 {
164 struct cras_iodev *tmp;
165
166 DL_FOREACH (devs[dev->direction].iodevs, tmp)
167 if (tmp == dev) {
168 if (cras_iodev_is_open(dev))
169 return -EBUSY;
170 DL_DELETE(devs[dev->direction].iodevs, dev);
171 devs[dev->direction].size--;
172 return 0;
173 }
174
175 /* Device not found. */
176 return -EINVAL;
177 }
178
179 /* Fills a dev_info array from the iodev_list. */
fill_dev_list(struct iodev_list * list,struct cras_iodev_info * dev_info,size_t out_size)180 static void fill_dev_list(struct iodev_list *list,
181 struct cras_iodev_info *dev_info, size_t out_size)
182 {
183 int i = 0;
184 struct cras_iodev *tmp;
185 DL_FOREACH (list->iodevs, tmp) {
186 memcpy(&dev_info[i], &tmp->info, sizeof(dev_info[0]));
187 i++;
188 if (i == out_size)
189 return;
190 }
191 }
192
node_type_to_str(struct cras_ionode * node)193 static const char *node_type_to_str(struct cras_ionode *node)
194 {
195 switch (node->type) {
196 case CRAS_NODE_TYPE_INTERNAL_SPEAKER:
197 return "INTERNAL_SPEAKER";
198 case CRAS_NODE_TYPE_HEADPHONE:
199 return "HEADPHONE";
200 case CRAS_NODE_TYPE_HDMI:
201 return "HDMI";
202 case CRAS_NODE_TYPE_HAPTIC:
203 return "HAPTIC";
204 case CRAS_NODE_TYPE_MIC:
205 switch (node->position) {
206 case NODE_POSITION_INTERNAL:
207 return "INTERNAL_MIC";
208 case NODE_POSITION_FRONT:
209 return "FRONT_MIC";
210 case NODE_POSITION_REAR:
211 return "REAR_MIC";
212 case NODE_POSITION_KEYBOARD:
213 return "KEYBOARD_MIC";
214 case NODE_POSITION_EXTERNAL:
215 default:
216 return "MIC";
217 }
218 case CRAS_NODE_TYPE_HOTWORD:
219 return "HOTWORD";
220 case CRAS_NODE_TYPE_LINEOUT:
221 return "LINEOUT";
222 case CRAS_NODE_TYPE_POST_MIX_PRE_DSP:
223 return "POST_MIX_LOOPBACK";
224 case CRAS_NODE_TYPE_POST_DSP:
225 return "POST_DSP_LOOPBACK";
226 case CRAS_NODE_TYPE_USB:
227 return "USB";
228 case CRAS_NODE_TYPE_BLUETOOTH:
229 return "BLUETOOTH";
230 case CRAS_NODE_TYPE_BLUETOOTH_NB_MIC:
231 return "BLUETOOTH_NB_MIC";
232 case CRAS_NODE_TYPE_FALLBACK_NORMAL:
233 return "FALLBACK_NORMAL";
234 case CRAS_NODE_TYPE_FALLBACK_ABNORMAL:
235 return "FALLBACK_ABNORMAL";
236 case CRAS_NODE_TYPE_ECHO_REFERENCE:
237 return "ECHO_REFERENCE";
238 case CRAS_NODE_TYPE_ALSA_LOOPBACK:
239 return "ALSA_LOOPBACK";
240 case CRAS_NODE_TYPE_UNKNOWN:
241 default:
242 return "UNKNOWN";
243 }
244 }
245
246 /* Fills an ionode_info array from the iodev_list. */
fill_node_list(struct iodev_list * list,struct cras_ionode_info * node_info,size_t out_size)247 static int fill_node_list(struct iodev_list *list,
248 struct cras_ionode_info *node_info, size_t out_size)
249 {
250 int i = 0;
251 struct cras_iodev *dev;
252 struct cras_ionode *node;
253 DL_FOREACH (list->iodevs, dev) {
254 DL_FOREACH (dev->nodes, node) {
255 node_info->iodev_idx = dev->info.idx;
256 node_info->ionode_idx = node->idx;
257 node_info->plugged = node->plugged;
258 node_info->plugged_time.tv_sec =
259 node->plugged_time.tv_sec;
260 node_info->plugged_time.tv_usec =
261 node->plugged_time.tv_usec;
262 node_info->active =
263 dev->is_enabled && (dev->active_node == node);
264 node_info->volume = node->volume;
265 node_info->capture_gain = node->capture_gain;
266 node_info->ui_gain_scaler = node->ui_gain_scaler;
267 node_info->left_right_swapped =
268 node->left_right_swapped;
269 node_info->stable_id = node->stable_id;
270 strcpy(node_info->name, node->name);
271 strcpy(node_info->active_hotword_model,
272 node->active_hotword_model);
273 snprintf(node_info->type, sizeof(node_info->type), "%s",
274 node_type_to_str(node));
275 node_info->type_enum = node->type;
276 node_info++;
277 i++;
278 if (i == out_size)
279 return i;
280 }
281 }
282 return i;
283 }
284
285 /* Copies the info for each device in the list to "list_out". */
get_dev_list(struct iodev_list * list,struct cras_iodev_info ** list_out)286 static int get_dev_list(struct iodev_list *list,
287 struct cras_iodev_info **list_out)
288 {
289 struct cras_iodev_info *dev_info;
290
291 if (!list_out)
292 return list->size;
293
294 *list_out = NULL;
295 if (list->size == 0)
296 return 0;
297
298 dev_info = malloc(sizeof(*list_out[0]) * list->size);
299 if (dev_info == NULL)
300 return -ENOMEM;
301
302 fill_dev_list(list, dev_info, list->size);
303
304 *list_out = dev_info;
305 return list->size;
306 }
307
308 /* Called when the system volume changes. Pass the current volume setting to
309 * the default output if it is active. */
sys_vol_change(void * context,int32_t volume)310 static void sys_vol_change(void *context, int32_t volume)
311 {
312 struct cras_iodev *dev;
313
314 DL_FOREACH (devs[CRAS_STREAM_OUTPUT].iodevs, dev) {
315 if (dev->set_volume && cras_iodev_is_open(dev))
316 dev->set_volume(dev);
317 }
318 }
319
320 /* Called when the system mute state changes. Pass the current mute setting
321 * to the default output if it is active. */
sys_mute_change(void * context,int muted,int user_muted,int mute_locked)322 static void sys_mute_change(void *context, int muted, int user_muted,
323 int mute_locked)
324 {
325 struct cras_iodev *dev;
326 int should_mute = muted || user_muted;
327
328 DL_FOREACH (devs[CRAS_STREAM_OUTPUT].iodevs, dev) {
329 if (!cras_iodev_is_open(dev)) {
330 /* For closed devices, just set its mute state. */
331 cras_iodev_set_mute(dev);
332 } else {
333 audio_thread_dev_start_ramp(
334 audio_thread, dev->info.idx,
335 (should_mute ?
336 CRAS_IODEV_RAMP_REQUEST_DOWN_MUTE :
337 CRAS_IODEV_RAMP_REQUEST_UP_UNMUTE));
338 }
339 }
340 }
341
remove_all_streams_from_dev(struct cras_iodev * dev)342 static void remove_all_streams_from_dev(struct cras_iodev *dev)
343 {
344 struct cras_rstream *rstream;
345
346 audio_thread_rm_open_dev(audio_thread, dev->direction, dev->info.idx);
347
348 DL_FOREACH (stream_list_get(stream_list), rstream) {
349 if (rstream->apm_list == NULL)
350 continue;
351 cras_apm_list_remove_apm(rstream->apm_list, dev);
352 }
353 }
354
355 /*
356 * If output dev has an echo reference dev associated, add a server
357 * stream to read audio data from it so APM can analyze.
358 */
possibly_enable_echo_reference(struct cras_iodev * dev)359 static void possibly_enable_echo_reference(struct cras_iodev *dev)
360 {
361 if (dev->direction != CRAS_STREAM_OUTPUT)
362 return;
363
364 if (dev->echo_reference_dev == NULL)
365 return;
366
367 server_stream_create(stream_list, dev->echo_reference_dev->info.idx,
368 dev->format);
369 }
370
371 /*
372 * If output dev has an echo reference dev associated, check if there
373 * is server stream opened for it and remove it.
374 */
possibly_disable_echo_reference(struct cras_iodev * dev)375 static void possibly_disable_echo_reference(struct cras_iodev *dev)
376 {
377 if (dev->echo_reference_dev == NULL)
378 return;
379
380 server_stream_destroy(stream_list, dev->echo_reference_dev->info.idx);
381 }
382
383 /*
384 * Removes all attached streams and close dev if it's opened.
385 */
close_dev(struct cras_iodev * dev)386 static void close_dev(struct cras_iodev *dev)
387 {
388 if (!cras_iodev_is_open(dev))
389 return;
390
391 MAINLOG(main_log, MAIN_THREAD_DEV_CLOSE, dev->info.idx, 0, 0);
392 remove_all_streams_from_dev(dev);
393 dev->idle_timeout.tv_sec = 0;
394 /* close echo ref first to avoid underrun in hardware */
395 possibly_disable_echo_reference(dev);
396 cras_iodev_close(dev);
397 }
398
idle_dev_check(struct cras_timer * timer,void * data)399 static void idle_dev_check(struct cras_timer *timer, void *data)
400 {
401 struct enabled_dev *edev;
402 struct timespec now;
403 struct timespec min_idle_expiration;
404 unsigned int num_idle_devs = 0;
405 unsigned int min_idle_timeout_ms;
406
407 clock_gettime(CLOCK_MONOTONIC_RAW, &now);
408 min_idle_expiration.tv_sec = 0;
409 min_idle_expiration.tv_nsec = 0;
410
411 DL_FOREACH (enabled_devs[CRAS_STREAM_OUTPUT], edev) {
412 if (edev->dev->idle_timeout.tv_sec == 0)
413 continue;
414 if (timespec_after(&now, &edev->dev->idle_timeout)) {
415 close_dev(edev->dev);
416 continue;
417 }
418 num_idle_devs++;
419 if (min_idle_expiration.tv_sec == 0 ||
420 timespec_after(&min_idle_expiration,
421 &edev->dev->idle_timeout))
422 min_idle_expiration = edev->dev->idle_timeout;
423 }
424
425 idle_timer = NULL;
426 if (!num_idle_devs)
427 return;
428 if (timespec_after(&now, &min_idle_expiration)) {
429 min_idle_timeout_ms = 0;
430 } else {
431 struct timespec timeout;
432 subtract_timespecs(&min_idle_expiration, &now, &timeout);
433 min_idle_timeout_ms = timespec_to_ms(&timeout);
434 }
435 /* Wake up when it is time to close the next idle device. Sleep for a
436 * minimum of 10 milliseconds. */
437 idle_timer = cras_tm_create_timer(cras_system_state_get_tm(),
438 MAX(min_idle_timeout_ms, 10),
439 idle_dev_check, NULL);
440 }
441
442 /*
443 * Cancel pending init tries. Called at device initialization or when device
444 * is disabled.
445 */
cancel_pending_init_retries(unsigned int dev_idx)446 static void cancel_pending_init_retries(unsigned int dev_idx)
447 {
448 struct dev_init_retry *retry;
449
450 DL_FOREACH (init_retries, retry) {
451 if (retry->dev_idx != dev_idx)
452 continue;
453 cras_tm_cancel_timer(cras_system_state_get_tm(),
454 retry->init_timer);
455 DL_DELETE(init_retries, retry);
456 free(retry);
457 }
458 }
459
460 /* Open the device potentially filling the output with a pre buffer. */
init_device(struct cras_iodev * dev,struct cras_rstream * rstream)461 static int init_device(struct cras_iodev *dev, struct cras_rstream *rstream)
462 {
463 int rc;
464
465 cras_iodev_exit_idle(dev);
466
467 if (cras_iodev_is_open(dev))
468 return 0;
469 cancel_pending_init_retries(dev->info.idx);
470 MAINLOG(main_log, MAIN_THREAD_DEV_INIT, dev->info.idx,
471 rstream->format.num_channels, rstream->format.frame_rate);
472
473 rc = cras_iodev_open(dev, rstream->cb_threshold, &rstream->format);
474 if (rc)
475 return rc;
476
477 rc = audio_thread_add_open_dev(audio_thread, dev);
478 if (rc)
479 cras_iodev_close(dev);
480
481 possibly_enable_echo_reference(dev);
482
483 return rc;
484 }
485
suspend_devs()486 static void suspend_devs()
487 {
488 struct enabled_dev *edev;
489 struct cras_rstream *rstream;
490
491 MAINLOG(main_log, MAIN_THREAD_SUSPEND_DEVS, 0, 0, 0);
492
493 DL_FOREACH (stream_list_get(stream_list), rstream) {
494 if (rstream->is_pinned) {
495 struct cras_iodev *dev;
496
497 /* Skip closing hotword stream in the first pass.
498 * Closing an input device may resume hotword stream
499 * with its post_close_iodev_hook so we should deal
500 * with hotword stream in the second pass.
501 */
502 if ((rstream->flags & HOTWORD_STREAM) == HOTWORD_STREAM)
503 continue;
504
505 dev = find_dev(rstream->pinned_dev_idx);
506 if (dev) {
507 audio_thread_disconnect_stream(audio_thread,
508 rstream, dev);
509 if (!cras_iodev_list_dev_is_enabled(dev))
510 close_dev(dev);
511 }
512 } else {
513 audio_thread_disconnect_stream(audio_thread, rstream,
514 NULL);
515 }
516 }
517 stream_list_suspended = 1;
518
519 DL_FOREACH (enabled_devs[CRAS_STREAM_OUTPUT], edev) {
520 close_dev(edev->dev);
521 }
522 DL_FOREACH (enabled_devs[CRAS_STREAM_INPUT], edev) {
523 close_dev(edev->dev);
524 }
525
526 /* Doing this check after all the other enabled iodevs are closed to
527 * ensure preempted hotword streams obey the pause_at_suspend flag.
528 */
529 if (cras_system_get_hotword_pause_at_suspend()) {
530 cras_iodev_list_suspend_hotword_streams();
531 hotword_auto_resume = 1;
532 }
533 }
534
535 static int stream_added_cb(struct cras_rstream *rstream);
536
resume_devs()537 static void resume_devs()
538 {
539 struct enabled_dev *edev;
540 struct cras_rstream *rstream;
541
542 int has_output_stream = 0;
543 stream_list_suspended = 0;
544
545 MAINLOG(main_log, MAIN_THREAD_RESUME_DEVS, 0, 0, 0);
546
547 /* Auto-resume based on the local flag in case the system state flag has
548 * changed.
549 */
550 if (hotword_auto_resume) {
551 cras_iodev_list_resume_hotword_stream();
552 hotword_auto_resume = 0;
553 }
554
555 /*
556 * To remove the short popped noise caused by applications that can not
557 * stop playback "right away" after resume, we mute all output devices
558 * for a short time if there is any output stream.
559 */
560 DL_FOREACH (stream_list_get(stream_list), rstream) {
561 if (rstream->direction == CRAS_STREAM_OUTPUT)
562 has_output_stream++;
563 }
564 if (has_output_stream) {
565 DL_FOREACH (enabled_devs[CRAS_STREAM_OUTPUT], edev) {
566 edev->dev->initial_ramp_request =
567 CRAS_IODEV_RAMP_REQUEST_RESUME_MUTE;
568 }
569 }
570
571 DL_FOREACH (stream_list_get(stream_list), rstream) {
572 if ((rstream->flags & HOTWORD_STREAM) == HOTWORD_STREAM)
573 continue;
574 stream_added_cb(rstream);
575 }
576 }
577
578 /* Called when the system audio is suspended or resumed. */
sys_suspend_change(void * arg,int suspended)579 void sys_suspend_change(void *arg, int suspended)
580 {
581 if (suspended)
582 suspend_devs();
583 else
584 resume_devs();
585 }
586
587 /* Called when the system capture mute state changes. Pass the current capture
588 * mute setting to the default input if it is active. */
sys_cap_mute_change(void * context,int muted,int mute_locked)589 static void sys_cap_mute_change(void *context, int muted, int mute_locked)
590 {
591 struct cras_iodev *dev;
592
593 DL_FOREACH (devs[CRAS_STREAM_INPUT].iodevs, dev) {
594 if (dev->set_capture_mute && cras_iodev_is_open(dev))
595 dev->set_capture_mute(dev);
596 }
597 }
598
599 static int disable_device(struct enabled_dev *edev, bool force);
600 static int enable_device(struct cras_iodev *dev);
601
possibly_disable_fallback(enum CRAS_STREAM_DIRECTION dir)602 static void possibly_disable_fallback(enum CRAS_STREAM_DIRECTION dir)
603 {
604 struct enabled_dev *edev;
605
606 DL_FOREACH (enabled_devs[dir], edev) {
607 if (edev->dev == fallback_devs[dir])
608 disable_device(edev, false);
609 }
610 }
611
612 /*
613 * Possibly enables fallback device to handle streams.
614 * dir - output or input.
615 * error - true if enable fallback device because no other iodevs can be
616 * initialized successfully.
617 */
possibly_enable_fallback(enum CRAS_STREAM_DIRECTION dir,bool error)618 static void possibly_enable_fallback(enum CRAS_STREAM_DIRECTION dir, bool error)
619 {
620 if (fallback_devs[dir] == NULL)
621 return;
622
623 /*
624 * The fallback device is a special device. It doesn't have a real
625 * device to get a correct node type. Therefore, we need to set it by
626 * ourselves, which indicates the reason to use this device.
627 * NORMAL - Use it because of nodes changed.
628 * ABNORMAL - Use it because there are no other usable devices.
629 */
630 if (error)
631 syslog(LOG_ERR,
632 "Enable fallback device because there are no other usable devices.");
633
634 fallback_devs[dir]->active_node->type =
635 error ? CRAS_NODE_TYPE_FALLBACK_ABNORMAL :
636 CRAS_NODE_TYPE_FALLBACK_NORMAL;
637 if (!cras_iodev_list_dev_is_enabled(fallback_devs[dir]))
638 enable_device(fallback_devs[dir]);
639 }
640
641 /*
642 * Adds stream to one or more open iodevs. If the stream has processing effect
643 * turned on, create new APM instance and add to the list. This makes sure the
644 * time consuming APM creation happens in main thread.
645 */
add_stream_to_open_devs(struct cras_rstream * stream,struct cras_iodev ** iodevs,unsigned int num_iodevs)646 static int add_stream_to_open_devs(struct cras_rstream *stream,
647 struct cras_iodev **iodevs,
648 unsigned int num_iodevs)
649 {
650 int i;
651 if (stream->apm_list) {
652 for (i = 0; i < num_iodevs; i++)
653 cras_apm_list_add_apm(stream->apm_list, iodevs[i],
654 iodevs[i]->format,
655 cras_iodev_is_aec_use_case(
656 iodevs[i]->active_node));
657 }
658 return audio_thread_add_stream(audio_thread, stream, iodevs,
659 num_iodevs);
660 }
661
init_and_attach_streams(struct cras_iodev * dev)662 static int init_and_attach_streams(struct cras_iodev *dev)
663 {
664 int rc;
665 enum CRAS_STREAM_DIRECTION dir = dev->direction;
666 struct cras_rstream *stream;
667 int dev_enabled = cras_iodev_list_dev_is_enabled(dev);
668
669 /* If called after suspend, for example bluetooth
670 * profile switching, don't add back the stream list. */
671 if (stream_list_suspended)
672 return 0;
673
674 /* If there are active streams to attach to this device,
675 * open it. */
676 DL_FOREACH (stream_list_get(stream_list), stream) {
677 bool can_attach = 0;
678
679 if (stream->direction != dir)
680 continue;
681 /*
682 * For normal stream, if device is enabled by UI then it can
683 * attach to this dev.
684 */
685 if (!stream->is_pinned) {
686 can_attach = dev_enabled;
687 }
688 /*
689 * If this is a pinned stream, attach it if its pinned dev id
690 * matches this device or any fallback dev. Note that attaching
691 * a pinned stream to fallback device is temporary. When the
692 * fallback dev gets disabled in possibly_disable_fallback()
693 * the check stream_list_has_pinned_stream() is key to allow
694 * all streams to be removed from fallback and close it.
695 */
696 else if ((stream->pinned_dev_idx == dev->info.idx) ||
697 (SILENT_PLAYBACK_DEVICE == dev->info.idx) ||
698 (SILENT_RECORD_DEVICE == dev->info.idx)) {
699 can_attach = 1;
700 }
701
702 if (!can_attach)
703 continue;
704
705 /*
706 * Note that the stream list is descending ordered by channel
707 * count, which guarantees the first attachable stream will have
708 * the highest channel count.
709 */
710 rc = init_device(dev, stream);
711 if (rc) {
712 syslog(LOG_ERR, "Enable %s failed, rc = %d",
713 dev->info.name, rc);
714 return rc;
715 }
716 add_stream_to_open_devs(stream, &dev, 1);
717 }
718 return 0;
719 }
720
init_device_cb(struct cras_timer * timer,void * arg)721 static void init_device_cb(struct cras_timer *timer, void *arg)
722 {
723 int rc;
724 struct dev_init_retry *retry = (struct dev_init_retry *)arg;
725 struct cras_iodev *dev = find_dev(retry->dev_idx);
726
727 /*
728 * First of all, remove retry record to avoid confusion to the
729 * actual device init work.
730 */
731 DL_DELETE(init_retries, retry);
732 free(retry);
733
734 if (!dev || cras_iodev_is_open(dev))
735 return;
736
737 rc = init_and_attach_streams(dev);
738 if (rc < 0)
739 syslog(LOG_ERR, "Init device retry failed");
740 else
741 possibly_disable_fallback(dev->direction);
742 }
743
schedule_init_device_retry(struct cras_iodev * dev)744 static int schedule_init_device_retry(struct cras_iodev *dev)
745 {
746 struct dev_init_retry *retry;
747 struct cras_tm *tm = cras_system_state_get_tm();
748
749 retry = (struct dev_init_retry *)calloc(1, sizeof(*retry));
750 if (!retry)
751 return -ENOMEM;
752
753 retry->dev_idx = dev->info.idx;
754 retry->init_timer = cras_tm_create_timer(tm, INIT_DEV_DELAY_MS,
755 init_device_cb, retry);
756 DL_APPEND(init_retries, retry);
757 return 0;
758 }
759
init_pinned_device(struct cras_iodev * dev,struct cras_rstream * rstream)760 static int init_pinned_device(struct cras_iodev *dev,
761 struct cras_rstream *rstream)
762 {
763 int rc;
764
765 cras_iodev_exit_idle(dev);
766
767 if (audio_thread_is_dev_open(audio_thread, dev))
768 return 0;
769
770 /* Make sure the active node is configured properly, it could be
771 * disabled when last normal stream removed. */
772 dev->update_active_node(dev, dev->active_node->idx, 1);
773
774 /* Negative EAGAIN code indicates dev will be opened later. */
775 rc = init_device(dev, rstream);
776 if (rc)
777 return rc;
778 return 0;
779 }
780
781 /*
782 * Close device enabled by pinned stream. Since it's NOT in the enabled
783 * dev list, make sure update_active_node() is called to correctly
784 * configure the ALSA UCM or BT profile state.
785 */
close_pinned_device(struct cras_iodev * dev)786 static int close_pinned_device(struct cras_iodev *dev)
787 {
788 close_dev(dev);
789 dev->update_active_node(dev, dev->active_node->idx, 0);
790 return 0;
791 }
792
find_pinned_device(struct cras_rstream * rstream)793 static struct cras_iodev *find_pinned_device(struct cras_rstream *rstream)
794 {
795 struct cras_iodev *dev;
796 if (!rstream->is_pinned)
797 return NULL;
798
799 dev = find_dev(rstream->pinned_dev_idx);
800
801 if ((rstream->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
802 return dev;
803
804 /* Double check node type for hotword stream */
805 if (dev && dev->active_node->type != CRAS_NODE_TYPE_HOTWORD) {
806 syslog(LOG_ERR, "Hotword stream pinned to invalid dev %u",
807 dev->info.idx);
808 return NULL;
809 }
810
811 return hotword_suspended ? empty_hotword_dev : dev;
812 }
813
pinned_stream_added(struct cras_rstream * rstream)814 static int pinned_stream_added(struct cras_rstream *rstream)
815 {
816 struct cras_iodev *dev;
817 int rc;
818
819 /* Check that the target device is valid for pinned streams. */
820 dev = find_pinned_device(rstream);
821 if (!dev)
822 return -EINVAL;
823
824 rc = init_pinned_device(dev, rstream);
825 if (rc) {
826 syslog(LOG_INFO, "init_pinned_device failed, rc %d", rc);
827 return schedule_init_device_retry(dev);
828 }
829
830 return add_stream_to_open_devs(rstream, &dev, 1);
831 }
832
stream_added_cb(struct cras_rstream * rstream)833 static int stream_added_cb(struct cras_rstream *rstream)
834 {
835 struct enabled_dev *edev;
836 struct cras_iodev *iodevs[10];
837 unsigned int num_iodevs;
838 int rc;
839 bool iodev_reopened;
840
841 if (stream_list_suspended)
842 return 0;
843
844 MAINLOG(main_log, MAIN_THREAD_STREAM_ADDED, rstream->stream_id,
845 rstream->direction, rstream->buffer_frames);
846
847 if (rstream->is_pinned)
848 return pinned_stream_added(rstream);
849
850 /* Add the new stream to all enabled iodevs at once to avoid offset
851 * in shm level between different ouput iodevs. */
852 num_iodevs = 0;
853 iodev_reopened = false;
854 DL_FOREACH (enabled_devs[rstream->direction], edev) {
855 if (num_iodevs >= ARRAY_SIZE(iodevs)) {
856 syslog(LOG_ERR, "too many enabled devices");
857 break;
858 }
859
860 if (cras_iodev_is_open(edev->dev) &&
861 (rstream->format.num_channels >
862 edev->dev->format->num_channels) &&
863 (rstream->format.num_channels <=
864 edev->dev->info.max_supported_channels)) {
865 /* Re-open the device with the format of the attached
866 * stream if it has higher channel count than the
867 * current format of the device, and doesn't exceed the
868 * max_supported_channels of the device.
869 * Fallback device will be transciently enabled during
870 * the device re-opening.
871 */
872 MAINLOG(main_log, MAIN_THREAD_DEV_REOPEN,
873 rstream->format.num_channels,
874 edev->dev->format->num_channels,
875 edev->dev->format->frame_rate);
876 syslog(LOG_INFO, "re-open %s for higher channel count",
877 edev->dev->info.name);
878 possibly_enable_fallback(rstream->direction, false);
879 cras_iodev_list_suspend_dev(edev->dev->info.idx);
880 cras_iodev_list_resume_dev(edev->dev->info.idx);
881 possibly_disable_fallback(rstream->direction);
882 iodev_reopened = true;
883 } else {
884 rc = init_device(edev->dev, rstream);
885 if (rc) {
886 /* Error log but don't return error here, because
887 * stopping audio could block video playback.
888 */
889 syslog(LOG_ERR, "Init %s failed, rc = %d",
890 edev->dev->info.name, rc);
891 schedule_init_device_retry(edev->dev);
892 continue;
893 }
894
895 iodevs[num_iodevs++] = edev->dev;
896 }
897 }
898 if (num_iodevs) {
899 rc = add_stream_to_open_devs(rstream, iodevs, num_iodevs);
900 if (rc) {
901 syslog(LOG_ERR, "adding stream to thread fail");
902 return rc;
903 }
904 } else if (!iodev_reopened) {
905 /* Enable fallback device if no other iodevs can be initialized
906 * or re-opened successfully.
907 * For error codes like EAGAIN and ENOENT, a new iodev will be
908 * enabled soon so streams are going to route there. As for the
909 * rest of the error cases, silence will be played or recorded
910 * so client won't be blocked.
911 * The enabled fallback device will be disabled when
912 * cras_iodev_list_select_node() is called to re-select the
913 * active node.
914 */
915 possibly_enable_fallback(rstream->direction, true);
916 }
917 return 0;
918 }
919
possibly_close_enabled_devs(enum CRAS_STREAM_DIRECTION dir)920 static int possibly_close_enabled_devs(enum CRAS_STREAM_DIRECTION dir)
921 {
922 struct enabled_dev *edev;
923 const struct cras_rstream *s;
924
925 /* Check if there are still default streams attached. */
926 DL_FOREACH (stream_list_get(stream_list), s) {
927 if (s->direction == dir && !s->is_pinned)
928 return 0;
929 }
930
931 /* No more default streams, close any device that doesn't have a stream
932 * pinned to it. */
933 DL_FOREACH (enabled_devs[dir], edev) {
934 if (stream_list_has_pinned_stream(stream_list,
935 edev->dev->info.idx))
936 continue;
937 if (dir == CRAS_STREAM_INPUT) {
938 close_dev(edev->dev);
939 continue;
940 }
941 /* Allow output devs to drain before closing. */
942 clock_gettime(CLOCK_MONOTONIC_RAW, &edev->dev->idle_timeout);
943 add_timespecs(&edev->dev->idle_timeout, &idle_timeout_interval);
944 idle_dev_check(NULL, NULL);
945 }
946
947 return 0;
948 }
949
pinned_stream_removed(struct cras_rstream * rstream)950 static void pinned_stream_removed(struct cras_rstream *rstream)
951 {
952 struct cras_iodev *dev;
953
954 dev = find_pinned_device(rstream);
955 if (!dev)
956 return;
957 if (!cras_iodev_list_dev_is_enabled(dev) &&
958 !stream_list_has_pinned_stream(stream_list, dev->info.idx))
959 close_pinned_device(dev);
960 }
961
962 /* Returns the number of milliseconds left to drain this stream. This is passed
963 * directly from the audio thread. */
stream_removed_cb(struct cras_rstream * rstream)964 static int stream_removed_cb(struct cras_rstream *rstream)
965 {
966 enum CRAS_STREAM_DIRECTION direction = rstream->direction;
967 int rc;
968
969 rc = audio_thread_drain_stream(audio_thread, rstream);
970 if (rc)
971 return rc;
972
973 MAINLOG(main_log, MAIN_THREAD_STREAM_REMOVED, rstream->stream_id, 0, 0);
974
975 if (rstream->is_pinned)
976 pinned_stream_removed(rstream);
977
978 possibly_close_enabled_devs(direction);
979
980 return 0;
981 }
982
enable_device(struct cras_iodev * dev)983 static int enable_device(struct cras_iodev *dev)
984 {
985 int rc;
986 struct enabled_dev *edev;
987 enum CRAS_STREAM_DIRECTION dir = dev->direction;
988 struct device_enabled_cb *callback;
989
990 DL_FOREACH (enabled_devs[dir], edev) {
991 if (edev->dev == dev)
992 return -EEXIST;
993 }
994
995 edev = calloc(1, sizeof(*edev));
996 edev->dev = dev;
997 DL_APPEND(enabled_devs[dir], edev);
998 dev->is_enabled = 1;
999
1000 rc = init_and_attach_streams(dev);
1001 if (rc < 0) {
1002 syslog(LOG_INFO, "Enable device fail, rc %d", rc);
1003 schedule_init_device_retry(dev);
1004 return rc;
1005 }
1006
1007 DL_FOREACH (device_enable_cbs, callback)
1008 callback->enabled_cb(dev, callback->cb_data);
1009
1010 return 0;
1011 }
1012
1013 /* Set `force to true to flush any pinned streams before closing the device. */
disable_device(struct enabled_dev * edev,bool force)1014 static int disable_device(struct enabled_dev *edev, bool force)
1015 {
1016 struct cras_iodev *dev = edev->dev;
1017 enum CRAS_STREAM_DIRECTION dir = dev->direction;
1018 struct cras_rstream *stream;
1019 struct device_enabled_cb *callback;
1020
1021 MAINLOG(main_log, MAIN_THREAD_DEV_DISABLE, dev->info.idx, force, 0);
1022 /*
1023 * Remove from enabled dev list. However this dev could have a stream
1024 * pinned to it, only cancel pending init timers when force flag is set.
1025 */
1026 DL_DELETE(enabled_devs[dir], edev);
1027 free(edev);
1028 dev->is_enabled = 0;
1029 if (force) {
1030 cancel_pending_init_retries(dev->info.idx);
1031 }
1032 /* If there's a pinned stream exists, simply disconnect all the normal
1033 * streams off this device and return. */
1034 else if (stream_list_has_pinned_stream(stream_list, dev->info.idx)) {
1035 DL_FOREACH (stream_list_get(stream_list), stream) {
1036 if (stream->direction != dev->direction)
1037 continue;
1038 if (stream->is_pinned)
1039 continue;
1040 audio_thread_disconnect_stream(audio_thread, stream,
1041 dev);
1042 }
1043 return 0;
1044 }
1045
1046 DL_FOREACH (device_enable_cbs, callback)
1047 callback->disabled_cb(dev, callback->cb_data);
1048 close_dev(dev);
1049 dev->update_active_node(dev, dev->active_node->idx, 0);
1050
1051 return 0;
1052 }
1053
1054 /*
1055 * Exported Interface.
1056 */
1057
cras_iodev_list_init()1058 void cras_iodev_list_init()
1059 {
1060 struct cras_observer_ops observer_ops;
1061
1062 memset(&observer_ops, 0, sizeof(observer_ops));
1063 observer_ops.output_volume_changed = sys_vol_change;
1064 observer_ops.output_mute_changed = sys_mute_change;
1065 observer_ops.capture_mute_changed = sys_cap_mute_change;
1066 observer_ops.suspend_changed = sys_suspend_change;
1067 list_observer = cras_observer_add(&observer_ops, NULL);
1068 idle_timer = NULL;
1069
1070 main_log = main_thread_event_log_init();
1071
1072 /* Create the audio stream list for the system. */
1073 stream_list =
1074 stream_list_create(stream_added_cb, stream_removed_cb,
1075 cras_rstream_create, cras_rstream_destroy,
1076 cras_system_state_get_tm());
1077
1078 /* Add an empty device so there is always something to play to or
1079 * capture from. */
1080 fallback_devs[CRAS_STREAM_OUTPUT] = empty_iodev_create(
1081 CRAS_STREAM_OUTPUT, CRAS_NODE_TYPE_FALLBACK_NORMAL);
1082 fallback_devs[CRAS_STREAM_INPUT] = empty_iodev_create(
1083 CRAS_STREAM_INPUT, CRAS_NODE_TYPE_FALLBACK_NORMAL);
1084 enable_device(fallback_devs[CRAS_STREAM_OUTPUT]);
1085 enable_device(fallback_devs[CRAS_STREAM_INPUT]);
1086
1087 empty_hotword_dev =
1088 empty_iodev_create(CRAS_STREAM_INPUT, CRAS_NODE_TYPE_HOTWORD);
1089
1090 /* Create loopback devices. */
1091 loopdev_post_mix = loopback_iodev_create(LOOPBACK_POST_MIX_PRE_DSP);
1092 loopdev_post_dsp = loopback_iodev_create(LOOPBACK_POST_DSP);
1093
1094 audio_thread = audio_thread_create();
1095 if (!audio_thread) {
1096 syslog(LOG_ERR, "Fatal: audio thread init");
1097 exit(-ENOMEM);
1098 }
1099 audio_thread_start(audio_thread);
1100
1101 cras_iodev_list_update_device_list();
1102 }
1103
cras_iodev_list_deinit()1104 void cras_iodev_list_deinit()
1105 {
1106 audio_thread_destroy(audio_thread);
1107 loopback_iodev_destroy(loopdev_post_dsp);
1108 loopback_iodev_destroy(loopdev_post_mix);
1109 empty_iodev_destroy(empty_hotword_dev);
1110 empty_iodev_destroy(fallback_devs[CRAS_STREAM_INPUT]);
1111 empty_iodev_destroy(fallback_devs[CRAS_STREAM_OUTPUT]);
1112 stream_list_destroy(stream_list);
1113 main_thread_event_log_deinit(main_log);
1114 if (list_observer) {
1115 cras_observer_remove(list_observer);
1116 list_observer = NULL;
1117 }
1118 }
1119
cras_iodev_list_dev_is_enabled(const struct cras_iodev * dev)1120 int cras_iodev_list_dev_is_enabled(const struct cras_iodev *dev)
1121 {
1122 struct enabled_dev *edev;
1123
1124 DL_FOREACH (enabled_devs[dev->direction], edev) {
1125 if (edev->dev == dev)
1126 return 1;
1127 }
1128
1129 return 0;
1130 }
1131
cras_iodev_list_enable_dev(struct cras_iodev * dev)1132 void cras_iodev_list_enable_dev(struct cras_iodev *dev)
1133 {
1134 possibly_disable_fallback(dev->direction);
1135 /* Enable ucm setting of active node. */
1136 dev->update_active_node(dev, dev->active_node->idx, 1);
1137 enable_device(dev);
1138 cras_iodev_list_notify_active_node_changed(dev->direction);
1139 }
1140
cras_iodev_list_add_active_node(enum CRAS_STREAM_DIRECTION dir,cras_node_id_t node_id)1141 void cras_iodev_list_add_active_node(enum CRAS_STREAM_DIRECTION dir,
1142 cras_node_id_t node_id)
1143 {
1144 struct cras_iodev *new_dev;
1145 new_dev = find_dev(dev_index_of(node_id));
1146 if (!new_dev || new_dev->direction != dir)
1147 return;
1148
1149 MAINLOG(main_log, MAIN_THREAD_ADD_ACTIVE_NODE, new_dev->info.idx, 0, 0);
1150
1151 /* If the new dev is already enabled but its active node needs to be
1152 * changed. Disable new dev first, update active node, and then
1153 * re-enable it again.
1154 */
1155 if (cras_iodev_list_dev_is_enabled(new_dev)) {
1156 if (node_index_of(node_id) == new_dev->active_node->idx)
1157 return;
1158 else
1159 cras_iodev_list_disable_dev(new_dev, true);
1160 }
1161
1162 new_dev->update_active_node(new_dev, node_index_of(node_id), 1);
1163 cras_iodev_list_enable_dev(new_dev);
1164 }
1165
1166 /*
1167 * Disables device which may or may not be in enabled_devs list.
1168 */
cras_iodev_list_disable_dev(struct cras_iodev * dev,bool force_close)1169 void cras_iodev_list_disable_dev(struct cras_iodev *dev, bool force_close)
1170 {
1171 struct enabled_dev *edev, *edev_to_disable = NULL;
1172
1173 int is_the_only_enabled_device = 1;
1174
1175 DL_FOREACH (enabled_devs[dev->direction], edev) {
1176 if (edev->dev == dev)
1177 edev_to_disable = edev;
1178 else
1179 is_the_only_enabled_device = 0;
1180 }
1181
1182 /*
1183 * Disables the device for these two cases:
1184 * 1. Disable a device in the enabled_devs list.
1185 * 2. Force close a device that is not in the enabled_devs list,
1186 * but it is running a pinned stream.
1187 */
1188 if (!edev_to_disable) {
1189 if (force_close)
1190 close_pinned_device(dev);
1191 return;
1192 }
1193
1194 /* If the device to be closed is the only enabled device, we should
1195 * enable the fallback device first then disable the target
1196 * device. */
1197 if (is_the_only_enabled_device && fallback_devs[dev->direction])
1198 enable_device(fallback_devs[dev->direction]);
1199
1200 disable_device(edev_to_disable, force_close);
1201
1202 cras_iodev_list_notify_active_node_changed(dev->direction);
1203 return;
1204 }
1205
cras_iodev_list_suspend_dev(unsigned int dev_idx)1206 void cras_iodev_list_suspend_dev(unsigned int dev_idx)
1207 {
1208 struct cras_iodev *dev = find_dev(dev_idx);
1209
1210 if (!dev)
1211 return;
1212
1213 /* Remove all streams including the pinned streams, and close
1214 * this iodev. */
1215 close_dev(dev);
1216 dev->update_active_node(dev, dev->active_node->idx, 0);
1217 }
1218
cras_iodev_list_resume_dev(unsigned int dev_idx)1219 void cras_iodev_list_resume_dev(unsigned int dev_idx)
1220 {
1221 struct cras_iodev *dev = find_dev(dev_idx);
1222 int rc;
1223
1224 if (!dev)
1225 return;
1226
1227 dev->update_active_node(dev, dev->active_node->idx, 1);
1228 rc = init_and_attach_streams(dev);
1229 if (rc == 0) {
1230 /* If dev initialize succeeded and this is not a pinned device,
1231 * disable the silent fallback device because it's just
1232 * unnecessary. */
1233 if (!stream_list_has_pinned_stream(stream_list, dev_idx))
1234 possibly_disable_fallback(dev->direction);
1235 } else {
1236 syslog(LOG_INFO, "Enable dev fail at resume, rc %d", rc);
1237 schedule_init_device_retry(dev);
1238 }
1239 }
1240
cras_iodev_list_set_dev_mute(unsigned int dev_idx)1241 void cras_iodev_list_set_dev_mute(unsigned int dev_idx)
1242 {
1243 struct cras_iodev *dev;
1244
1245 dev = find_dev(dev_idx);
1246 if (!dev)
1247 return;
1248
1249 cras_iodev_set_mute(dev);
1250 }
1251
cras_iodev_list_rm_active_node(enum CRAS_STREAM_DIRECTION dir,cras_node_id_t node_id)1252 void cras_iodev_list_rm_active_node(enum CRAS_STREAM_DIRECTION dir,
1253 cras_node_id_t node_id)
1254 {
1255 struct cras_iodev *dev;
1256
1257 dev = find_dev(dev_index_of(node_id));
1258 if (!dev)
1259 return;
1260
1261 cras_iodev_list_disable_dev(dev, false);
1262 }
1263
cras_iodev_list_add_output(struct cras_iodev * output)1264 int cras_iodev_list_add_output(struct cras_iodev *output)
1265 {
1266 int rc;
1267
1268 if (output->direction != CRAS_STREAM_OUTPUT)
1269 return -EINVAL;
1270
1271 rc = add_dev_to_list(output);
1272 if (rc)
1273 return rc;
1274
1275 MAINLOG(main_log, MAIN_THREAD_ADD_TO_DEV_LIST, output->info.idx,
1276 CRAS_STREAM_OUTPUT, 0);
1277 return 0;
1278 }
1279
cras_iodev_list_add_input(struct cras_iodev * input)1280 int cras_iodev_list_add_input(struct cras_iodev *input)
1281 {
1282 int rc;
1283
1284 if (input->direction != CRAS_STREAM_INPUT)
1285 return -EINVAL;
1286
1287 rc = add_dev_to_list(input);
1288 if (rc)
1289 return rc;
1290
1291 MAINLOG(main_log, MAIN_THREAD_ADD_TO_DEV_LIST, input->info.idx,
1292 CRAS_STREAM_INPUT, 0);
1293 return 0;
1294 }
1295
cras_iodev_list_rm_output(struct cras_iodev * dev)1296 int cras_iodev_list_rm_output(struct cras_iodev *dev)
1297 {
1298 int res;
1299
1300 /* Retire the current active output device before removing it from
1301 * list, otherwise it could be busy and remain in the list.
1302 */
1303 cras_iodev_list_disable_dev(dev, true);
1304 res = rm_dev_from_list(dev);
1305 if (res == 0)
1306 cras_iodev_list_update_device_list();
1307 return res;
1308 }
1309
cras_iodev_list_rm_input(struct cras_iodev * dev)1310 int cras_iodev_list_rm_input(struct cras_iodev *dev)
1311 {
1312 int res;
1313
1314 /* Retire the current active input device before removing it from
1315 * list, otherwise it could be busy and remain in the list.
1316 */
1317 cras_iodev_list_disable_dev(dev, true);
1318 res = rm_dev_from_list(dev);
1319 if (res == 0)
1320 cras_iodev_list_update_device_list();
1321 return res;
1322 }
1323
cras_iodev_list_get_outputs(struct cras_iodev_info ** list_out)1324 int cras_iodev_list_get_outputs(struct cras_iodev_info **list_out)
1325 {
1326 return get_dev_list(&devs[CRAS_STREAM_OUTPUT], list_out);
1327 }
1328
cras_iodev_list_get_inputs(struct cras_iodev_info ** list_out)1329 int cras_iodev_list_get_inputs(struct cras_iodev_info **list_out)
1330 {
1331 return get_dev_list(&devs[CRAS_STREAM_INPUT], list_out);
1332 }
1333
1334 struct cras_iodev *
cras_iodev_list_get_first_enabled_iodev(enum CRAS_STREAM_DIRECTION direction)1335 cras_iodev_list_get_first_enabled_iodev(enum CRAS_STREAM_DIRECTION direction)
1336 {
1337 struct enabled_dev *edev = enabled_devs[direction];
1338
1339 return edev ? edev->dev : NULL;
1340 }
1341
1342 struct cras_iodev *
cras_iodev_list_get_sco_pcm_iodev(enum CRAS_STREAM_DIRECTION direction)1343 cras_iodev_list_get_sco_pcm_iodev(enum CRAS_STREAM_DIRECTION direction)
1344 {
1345 struct cras_iodev *dev;
1346 struct cras_ionode *node;
1347
1348 DL_FOREACH (devs[direction].iodevs, dev) {
1349 DL_FOREACH (dev->nodes, node) {
1350 if (node->is_sco_pcm)
1351 return dev;
1352 }
1353 }
1354
1355 return NULL;
1356 }
1357
1358 cras_node_id_t
cras_iodev_list_get_active_node_id(enum CRAS_STREAM_DIRECTION direction)1359 cras_iodev_list_get_active_node_id(enum CRAS_STREAM_DIRECTION direction)
1360 {
1361 struct enabled_dev *edev = enabled_devs[direction];
1362
1363 if (!edev || !edev->dev || !edev->dev->active_node)
1364 return 0;
1365
1366 return cras_make_node_id(edev->dev->info.idx,
1367 edev->dev->active_node->idx);
1368 }
1369
cras_iodev_list_update_device_list()1370 void cras_iodev_list_update_device_list()
1371 {
1372 struct cras_server_state *state;
1373
1374 state = cras_system_state_update_begin();
1375 if (!state)
1376 return;
1377
1378 state->num_output_devs = devs[CRAS_STREAM_OUTPUT].size;
1379 state->num_input_devs = devs[CRAS_STREAM_INPUT].size;
1380 fill_dev_list(&devs[CRAS_STREAM_OUTPUT], &state->output_devs[0],
1381 CRAS_MAX_IODEVS);
1382 fill_dev_list(&devs[CRAS_STREAM_INPUT], &state->input_devs[0],
1383 CRAS_MAX_IODEVS);
1384
1385 state->num_output_nodes =
1386 fill_node_list(&devs[CRAS_STREAM_OUTPUT],
1387 &state->output_nodes[0], CRAS_MAX_IONODES);
1388 state->num_input_nodes =
1389 fill_node_list(&devs[CRAS_STREAM_INPUT], &state->input_nodes[0],
1390 CRAS_MAX_IONODES);
1391
1392 cras_system_state_update_complete();
1393 }
1394
1395 /* Look up the first hotword stream and the device it pins to. */
find_hotword_stream_dev(struct cras_iodev ** dev,struct cras_rstream ** stream)1396 int find_hotword_stream_dev(struct cras_iodev **dev,
1397 struct cras_rstream **stream)
1398 {
1399 DL_FOREACH (stream_list_get(stream_list), *stream) {
1400 if (((*stream)->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
1401 continue;
1402
1403 *dev = find_dev((*stream)->pinned_dev_idx);
1404 if (*dev == NULL)
1405 return -ENOENT;
1406 break;
1407 }
1408 return 0;
1409 }
1410
1411 /* Suspend/resume hotword streams functions are used to provide seamless
1412 * experience to cras clients when there's hardware limitation about concurrent
1413 * DSP and normal recording. The empty hotword iodev is used to hold all
1414 * hotword streams during suspend, so client side will not know about the
1415 * transition, and can still remove or add streams. At resume, the real hotword
1416 * device will be initialized and opened again to re-arm the DSP.
1417 */
cras_iodev_list_suspend_hotword_streams()1418 int cras_iodev_list_suspend_hotword_streams()
1419 {
1420 struct cras_iodev *hotword_dev;
1421 struct cras_rstream *stream = NULL;
1422 int rc;
1423
1424 rc = find_hotword_stream_dev(&hotword_dev, &stream);
1425 if (rc)
1426 return rc;
1427
1428 if (stream == NULL) {
1429 hotword_suspended = 1;
1430 return 0;
1431 }
1432 /* Move all existing hotword streams to the empty hotword iodev. */
1433 init_pinned_device(empty_hotword_dev, stream);
1434 DL_FOREACH (stream_list_get(stream_list), stream) {
1435 if ((stream->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
1436 continue;
1437 if (stream->pinned_dev_idx != hotword_dev->info.idx) {
1438 syslog(LOG_ERR,
1439 "Failed to suspend hotword stream on dev %u",
1440 stream->pinned_dev_idx);
1441 continue;
1442 }
1443
1444 audio_thread_disconnect_stream(audio_thread, stream,
1445 hotword_dev);
1446 audio_thread_add_stream(audio_thread, stream,
1447 &empty_hotword_dev, 1);
1448 }
1449 close_pinned_device(hotword_dev);
1450 hotword_suspended = 1;
1451 return 0;
1452 }
1453
cras_iodev_list_resume_hotword_stream()1454 int cras_iodev_list_resume_hotword_stream()
1455 {
1456 struct cras_iodev *hotword_dev;
1457 struct cras_rstream *stream = NULL;
1458 int rc;
1459
1460 rc = find_hotword_stream_dev(&hotword_dev, &stream);
1461 if (rc)
1462 return rc;
1463
1464 if (stream == NULL) {
1465 hotword_suspended = 0;
1466 return 0;
1467 }
1468 /* Move all existing hotword streams to the real hotword iodev. */
1469 init_pinned_device(hotword_dev, stream);
1470 DL_FOREACH (stream_list_get(stream_list), stream) {
1471 if ((stream->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
1472 continue;
1473 if (stream->pinned_dev_idx != hotword_dev->info.idx) {
1474 syslog(LOG_ERR,
1475 "Fail to resume hotword stream on dev %u",
1476 stream->pinned_dev_idx);
1477 continue;
1478 }
1479
1480 audio_thread_disconnect_stream(audio_thread, stream,
1481 empty_hotword_dev);
1482 audio_thread_add_stream(audio_thread, stream, &hotword_dev, 1);
1483 }
1484 close_pinned_device(empty_hotword_dev);
1485 hotword_suspended = 0;
1486 return 0;
1487 }
1488
cras_iodev_list_get_hotword_models(cras_node_id_t node_id)1489 char *cras_iodev_list_get_hotword_models(cras_node_id_t node_id)
1490 {
1491 struct cras_iodev *dev = NULL;
1492
1493 dev = find_dev(dev_index_of(node_id));
1494 if (!dev || !dev->get_hotword_models ||
1495 (dev->active_node->type != CRAS_NODE_TYPE_HOTWORD))
1496 return NULL;
1497
1498 return dev->get_hotword_models(dev);
1499 }
1500
cras_iodev_list_set_hotword_model(cras_node_id_t node_id,const char * model_name)1501 int cras_iodev_list_set_hotword_model(cras_node_id_t node_id,
1502 const char *model_name)
1503 {
1504 int ret;
1505 struct cras_iodev *dev = find_dev(dev_index_of(node_id));
1506 if (!dev || !dev->get_hotword_models ||
1507 (dev->active_node->type != CRAS_NODE_TYPE_HOTWORD))
1508 return -EINVAL;
1509
1510 ret = dev->set_hotword_model(dev, model_name);
1511 if (!ret)
1512 strncpy(dev->active_node->active_hotword_model, model_name,
1513 sizeof(dev->active_node->active_hotword_model) - 1);
1514 return ret;
1515 }
1516
cras_iodev_list_notify_nodes_changed()1517 void cras_iodev_list_notify_nodes_changed()
1518 {
1519 cras_observer_notify_nodes();
1520 }
1521
cras_iodev_list_notify_active_node_changed(enum CRAS_STREAM_DIRECTION direction)1522 void cras_iodev_list_notify_active_node_changed(
1523 enum CRAS_STREAM_DIRECTION direction)
1524 {
1525 cras_observer_notify_active_node(
1526 direction, cras_iodev_list_get_active_node_id(direction));
1527 }
1528
cras_iodev_list_select_node(enum CRAS_STREAM_DIRECTION direction,cras_node_id_t node_id)1529 void cras_iodev_list_select_node(enum CRAS_STREAM_DIRECTION direction,
1530 cras_node_id_t node_id)
1531 {
1532 struct cras_iodev *new_dev = NULL;
1533 struct enabled_dev *edev;
1534 int new_node_already_enabled = 0;
1535 struct cras_rstream *rstream;
1536 int has_output_stream = 0;
1537 int rc;
1538
1539 /* find the devices for the id. */
1540 new_dev = find_dev(dev_index_of(node_id));
1541
1542 MAINLOG(main_log, MAIN_THREAD_SELECT_NODE, dev_index_of(node_id), 0, 0);
1543
1544 /* Do nothing if the direction is mismatched. The new_dev == NULL case
1545 could happen if node_id is 0 (no selection), or the client tries
1546 to select a non-existing node (maybe it's unplugged just before
1547 the client selects it). We will just behave like there is no selected
1548 node. */
1549 if (new_dev && new_dev->direction != direction)
1550 return;
1551
1552 /* Determine whether the new device and node are already enabled - if
1553 * they are, the selection algorithm should avoid disabling the new
1554 * device. */
1555 DL_FOREACH (enabled_devs[direction], edev) {
1556 if (edev->dev == new_dev &&
1557 edev->dev->active_node->idx == node_index_of(node_id)) {
1558 new_node_already_enabled = 1;
1559 break;
1560 }
1561 }
1562
1563 /* Enable fallback device during the transition so client will not be
1564 * blocked in this duration, which is as long as 300 ms on some boards
1565 * before new device is opened.
1566 * Note that the fallback node is not needed if the new node is already
1567 * enabled - the new node will remain enabled. */
1568 if (!new_node_already_enabled)
1569 possibly_enable_fallback(direction, false);
1570
1571 DL_FOREACH (enabled_devs[direction], edev) {
1572 /* Don't disable fallback devices. */
1573 if (edev->dev == fallback_devs[direction])
1574 continue;
1575 /*
1576 * Disable enabled device if it's not the new one, use non-force
1577 * disable call so we don't interrupt existing pinned streams on
1578 * it.
1579 */
1580 if (edev->dev != new_dev) {
1581 disable_device(edev, false);
1582 }
1583 /*
1584 * Otherwise if this happens to be the new device but about to
1585 * select to a different node (on the same dev). Force disable
1586 * this device to avoid any pinned stream occupies it in audio
1587 * thread and cause problem in later update_active_node call.
1588 */
1589 else if (!new_node_already_enabled) {
1590 disable_device(edev, true);
1591 }
1592 }
1593
1594 if (new_dev && !new_node_already_enabled) {
1595 new_dev->update_active_node(new_dev, node_index_of(node_id), 1);
1596
1597 /* To reduce the popped noise of active device change, mute
1598 * new_dev's for RAMP_SWITCH_MUTE_DURATION_SECS s.
1599 */
1600 DL_FOREACH (stream_list_get(stream_list), rstream) {
1601 if (rstream->direction == CRAS_STREAM_OUTPUT)
1602 has_output_stream++;
1603 }
1604 if (direction == CRAS_STREAM_OUTPUT && has_output_stream) {
1605 new_dev->initial_ramp_request =
1606 CRAS_IODEV_RAMP_REQUEST_SWITCH_MUTE;
1607 }
1608
1609 rc = enable_device(new_dev);
1610 if (rc == 0) {
1611 /* Disable fallback device after new device is enabled.
1612 * Leave the fallback device enabled if new_dev failed
1613 * to open, or the new_dev == NULL case. */
1614 possibly_disable_fallback(direction);
1615 }
1616 }
1617
1618 cras_iodev_list_notify_active_node_changed(direction);
1619 }
1620
set_node_plugged(struct cras_iodev * iodev,unsigned int node_idx,int plugged)1621 static int set_node_plugged(struct cras_iodev *iodev, unsigned int node_idx,
1622 int plugged)
1623 {
1624 struct cras_ionode *node;
1625
1626 node = find_node(iodev, node_idx);
1627 if (!node)
1628 return -EINVAL;
1629 cras_iodev_set_node_plugged(node, plugged);
1630 return 0;
1631 }
1632
set_node_volume(struct cras_iodev * iodev,unsigned int node_idx,int volume)1633 static int set_node_volume(struct cras_iodev *iodev, unsigned int node_idx,
1634 int volume)
1635 {
1636 struct cras_ionode *node;
1637
1638 node = find_node(iodev, node_idx);
1639 if (!node)
1640 return -EINVAL;
1641
1642 if (iodev->ramp && cras_iodev_software_volume_needed(iodev) &&
1643 !cras_system_get_mute())
1644 cras_iodev_start_volume_ramp(iodev, node->volume, volume);
1645
1646 node->volume = volume;
1647 if (iodev->set_volume)
1648 iodev->set_volume(iodev);
1649 cras_iodev_list_notify_node_volume(node);
1650 MAINLOG(main_log, MAIN_THREAD_OUTPUT_NODE_VOLUME, iodev->info.idx,
1651 volume, 0);
1652 return 0;
1653 }
1654
set_node_capture_gain(struct cras_iodev * iodev,unsigned int node_idx,int value)1655 static int set_node_capture_gain(struct cras_iodev *iodev,
1656 unsigned int node_idx, int value)
1657 {
1658 struct cras_ionode *node;
1659 int db_scale;
1660
1661 node = find_node(iodev, node_idx);
1662 if (!node)
1663 return -EINVAL;
1664
1665 /* Assert value in range 0 - 100. */
1666 if (value < 0)
1667 value = 0;
1668 if (value > 100)
1669 value = 100;
1670
1671 /* Linear maps (0, 50) to (-4000, 0) and (50, 100) to (0, 2000) dBFS.
1672 * Calculate and store corresponding scaler in ui_gain_scaler. */
1673 db_scale = (value > 50) ? 40 : 80;
1674 node->ui_gain_scaler =
1675 convert_softvol_scaler_from_dB((value - 50) * db_scale);
1676
1677 if (iodev->set_capture_gain)
1678 iodev->set_capture_gain(iodev);
1679 cras_iodev_list_notify_node_capture_gain(node);
1680 MAINLOG(main_log, MAIN_THREAD_INPUT_NODE_GAIN, iodev->info.idx, value,
1681 0);
1682 return 0;
1683 }
1684
set_node_left_right_swapped(struct cras_iodev * iodev,unsigned int node_idx,int left_right_swapped)1685 static int set_node_left_right_swapped(struct cras_iodev *iodev,
1686 unsigned int node_idx,
1687 int left_right_swapped)
1688 {
1689 struct cras_ionode *node;
1690 int rc;
1691
1692 if (!iodev->set_swap_mode_for_node)
1693 return -EINVAL;
1694 node = find_node(iodev, node_idx);
1695 if (!node)
1696 return -EINVAL;
1697
1698 rc = iodev->set_swap_mode_for_node(iodev, node, left_right_swapped);
1699 if (rc) {
1700 syslog(LOG_ERR, "Failed to set swap mode on node %s to %d",
1701 node->name, left_right_swapped);
1702 return rc;
1703 }
1704 node->left_right_swapped = left_right_swapped;
1705 cras_iodev_list_notify_node_left_right_swapped(node);
1706 return 0;
1707 }
1708
cras_iodev_list_set_node_attr(cras_node_id_t node_id,enum ionode_attr attr,int value)1709 int cras_iodev_list_set_node_attr(cras_node_id_t node_id, enum ionode_attr attr,
1710 int value)
1711 {
1712 struct cras_iodev *iodev;
1713 int rc = 0;
1714
1715 iodev = find_dev(dev_index_of(node_id));
1716 if (!iodev)
1717 return -EINVAL;
1718
1719 switch (attr) {
1720 case IONODE_ATTR_PLUGGED:
1721 rc = set_node_plugged(iodev, node_index_of(node_id), value);
1722 break;
1723 case IONODE_ATTR_VOLUME:
1724 rc = set_node_volume(iodev, node_index_of(node_id), value);
1725 break;
1726 case IONODE_ATTR_CAPTURE_GAIN:
1727 rc = set_node_capture_gain(iodev, node_index_of(node_id),
1728 value);
1729 break;
1730 case IONODE_ATTR_SWAP_LEFT_RIGHT:
1731 rc = set_node_left_right_swapped(iodev, node_index_of(node_id),
1732 value);
1733 break;
1734 default:
1735 return -EINVAL;
1736 }
1737
1738 return rc;
1739 }
1740
cras_iodev_list_notify_node_volume(struct cras_ionode * node)1741 void cras_iodev_list_notify_node_volume(struct cras_ionode *node)
1742 {
1743 cras_node_id_t id = cras_make_node_id(node->dev->info.idx, node->idx);
1744 cras_iodev_list_update_device_list();
1745 cras_observer_notify_output_node_volume(id, node->volume);
1746 }
1747
cras_iodev_list_notify_node_left_right_swapped(struct cras_ionode * node)1748 void cras_iodev_list_notify_node_left_right_swapped(struct cras_ionode *node)
1749 {
1750 cras_node_id_t id = cras_make_node_id(node->dev->info.idx, node->idx);
1751 cras_iodev_list_update_device_list();
1752 cras_observer_notify_node_left_right_swapped(id,
1753 node->left_right_swapped);
1754 }
1755
cras_iodev_list_notify_node_capture_gain(struct cras_ionode * node)1756 void cras_iodev_list_notify_node_capture_gain(struct cras_ionode *node)
1757 {
1758 cras_node_id_t id = cras_make_node_id(node->dev->info.idx, node->idx);
1759 cras_iodev_list_update_device_list();
1760 cras_observer_notify_input_node_gain(id, node->capture_gain);
1761 }
1762
cras_iodev_list_add_test_dev(enum TEST_IODEV_TYPE type)1763 void cras_iodev_list_add_test_dev(enum TEST_IODEV_TYPE type)
1764 {
1765 if (type != TEST_IODEV_HOTWORD)
1766 return;
1767 test_iodev_create(CRAS_STREAM_INPUT, type);
1768 }
1769
cras_iodev_list_test_dev_command(unsigned int iodev_idx,enum CRAS_TEST_IODEV_CMD command,unsigned int data_len,const uint8_t * data)1770 void cras_iodev_list_test_dev_command(unsigned int iodev_idx,
1771 enum CRAS_TEST_IODEV_CMD command,
1772 unsigned int data_len,
1773 const uint8_t *data)
1774 {
1775 struct cras_iodev *dev = find_dev(iodev_idx);
1776
1777 if (!dev)
1778 return;
1779
1780 test_iodev_command(dev, command, data_len, data);
1781 }
1782
cras_iodev_list_get_audio_thread()1783 struct audio_thread *cras_iodev_list_get_audio_thread()
1784 {
1785 return audio_thread;
1786 }
1787
cras_iodev_list_get_stream_list()1788 struct stream_list *cras_iodev_list_get_stream_list()
1789 {
1790 return stream_list;
1791 }
1792
cras_iodev_list_set_device_enabled_callback(device_enabled_callback_t enabled_cb,device_disabled_callback_t disabled_cb,void * cb_data)1793 int cras_iodev_list_set_device_enabled_callback(
1794 device_enabled_callback_t enabled_cb,
1795 device_disabled_callback_t disabled_cb, void *cb_data)
1796 {
1797 struct device_enabled_cb *callback;
1798
1799 DL_FOREACH (device_enable_cbs, callback) {
1800 if (callback->cb_data != cb_data)
1801 continue;
1802
1803 DL_DELETE(device_enable_cbs, callback);
1804 free(callback);
1805 }
1806
1807 if (enabled_cb && disabled_cb) {
1808 callback = (struct device_enabled_cb *)calloc(
1809 1, sizeof(*callback));
1810 callback->enabled_cb = enabled_cb;
1811 callback->disabled_cb = disabled_cb;
1812 callback->cb_data = cb_data;
1813 DL_APPEND(device_enable_cbs, callback);
1814 }
1815
1816 return 0;
1817 }
1818
cras_iodev_list_register_loopback(enum CRAS_LOOPBACK_TYPE loopback_type,unsigned int output_dev_idx,loopback_hook_data_t hook_data,loopback_hook_control_t hook_control,unsigned int loopback_dev_idx)1819 void cras_iodev_list_register_loopback(enum CRAS_LOOPBACK_TYPE loopback_type,
1820 unsigned int output_dev_idx,
1821 loopback_hook_data_t hook_data,
1822 loopback_hook_control_t hook_control,
1823 unsigned int loopback_dev_idx)
1824 {
1825 struct cras_iodev *iodev = find_dev(output_dev_idx);
1826 struct cras_iodev *loopback_dev;
1827 struct cras_loopback *loopback;
1828 bool dev_open;
1829
1830 if (iodev == NULL) {
1831 syslog(LOG_ERR, "Output dev %u not found for loopback",
1832 output_dev_idx);
1833 return;
1834 }
1835
1836 loopback_dev = find_dev(loopback_dev_idx);
1837 if (loopback_dev == NULL) {
1838 syslog(LOG_ERR, "Loopback dev %u not found", loopback_dev_idx);
1839 return;
1840 }
1841
1842 dev_open = cras_iodev_is_open(iodev);
1843
1844 loopback = (struct cras_loopback *)calloc(1, sizeof(*loopback));
1845 if (NULL == loopback) {
1846 syslog(LOG_ERR, "Not enough memory for loopback");
1847 return;
1848 }
1849
1850 loopback->type = loopback_type;
1851 loopback->hook_data = hook_data;
1852 loopback->hook_control = hook_control;
1853 loopback->cb_data = loopback_dev;
1854 if (loopback->hook_control && dev_open)
1855 loopback->hook_control(true, loopback->cb_data);
1856
1857 DL_APPEND(iodev->loopbacks, loopback);
1858 }
1859
cras_iodev_list_unregister_loopback(enum CRAS_LOOPBACK_TYPE type,unsigned int output_dev_idx,unsigned int loopback_dev_idx)1860 void cras_iodev_list_unregister_loopback(enum CRAS_LOOPBACK_TYPE type,
1861 unsigned int output_dev_idx,
1862 unsigned int loopback_dev_idx)
1863 {
1864 struct cras_iodev *iodev = find_dev(output_dev_idx);
1865 struct cras_iodev *loopback_dev;
1866 struct cras_loopback *loopback;
1867
1868 if (iodev == NULL)
1869 return;
1870
1871 loopback_dev = find_dev(loopback_dev_idx);
1872 if (loopback_dev == NULL)
1873 return;
1874
1875 DL_FOREACH (iodev->loopbacks, loopback) {
1876 if ((loopback->cb_data == loopback_dev) &&
1877 (loopback->type == type)) {
1878 DL_DELETE(iodev->loopbacks, loopback);
1879 free(loopback);
1880 }
1881 }
1882 }
1883
cras_iodev_list_reset_for_noise_cancellation()1884 void cras_iodev_list_reset_for_noise_cancellation()
1885 {
1886 struct cras_iodev *dev;
1887 bool enabled = cras_system_get_noise_cancellation_enabled();
1888
1889 DL_FOREACH (devs[CRAS_STREAM_INPUT].iodevs, dev) {
1890 if (!cras_iodev_is_open(dev) ||
1891 !cras_iodev_support_noise_cancellation(dev))
1892 continue;
1893 syslog(LOG_INFO, "Re-open %s for %s noise cancellation",
1894 dev->info.name, enabled ? "enabling" : "disabling");
1895 possibly_enable_fallback(CRAS_STREAM_INPUT, false);
1896 cras_iodev_list_suspend_dev(dev->info.idx);
1897 cras_iodev_list_resume_dev(dev->info.idx);
1898 possibly_disable_fallback(CRAS_STREAM_INPUT);
1899 }
1900 }
1901
cras_iodev_list_reset()1902 void cras_iodev_list_reset()
1903 {
1904 struct enabled_dev *edev;
1905
1906 DL_FOREACH (enabled_devs[CRAS_STREAM_OUTPUT], edev) {
1907 DL_DELETE(enabled_devs[CRAS_STREAM_OUTPUT], edev);
1908 free(edev);
1909 }
1910 enabled_devs[CRAS_STREAM_OUTPUT] = NULL;
1911 DL_FOREACH (enabled_devs[CRAS_STREAM_INPUT], edev) {
1912 DL_DELETE(enabled_devs[CRAS_STREAM_INPUT], edev);
1913 free(edev);
1914 }
1915 enabled_devs[CRAS_STREAM_INPUT] = NULL;
1916 devs[CRAS_STREAM_OUTPUT].iodevs = NULL;
1917 devs[CRAS_STREAM_INPUT].iodevs = NULL;
1918 devs[CRAS_STREAM_OUTPUT].size = 0;
1919 devs[CRAS_STREAM_INPUT].size = 0;
1920 }
1921