1 /* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file.
4 */
5
6 #include <syslog.h>
7
8 #include "audio_thread.h"
9 #include "cras_empty_iodev.h"
10 #include "cras_iodev.h"
11 #include "cras_iodev_info.h"
12 #include "cras_iodev_list.h"
13 #include "cras_loopback_iodev.h"
14 #include "cras_observer.h"
15 #include "cras_rstream.h"
16 #include "cras_server.h"
17 #include "cras_tm.h"
18 #include "cras_types.h"
19 #include "cras_system_state.h"
20 #include "server_stream.h"
21 #include "stream_list.h"
22 #include "test_iodev.h"
23 #include "utlist.h"
24
25 const struct timespec idle_timeout_interval = { .tv_sec = 10, .tv_nsec = 0 };
26
27 /* Linked list of available devices. */
28 struct iodev_list {
29 struct cras_iodev *iodevs;
30 size_t size;
31 };
32
33 /* List of enabled input/output devices.
34 * dev - The device.
35 * init_timer - Timer for a delayed call to init this iodev.
36 */
37 struct enabled_dev {
38 struct cras_iodev *dev;
39 struct enabled_dev *prev, *next;
40 };
41
42 struct dev_init_retry {
43 int dev_idx;
44 struct cras_timer *init_timer;
45 struct dev_init_retry *next, *prev;
46 };
47
48 struct device_enabled_cb {
49 device_enabled_callback_t enabled_cb;
50 device_disabled_callback_t disabled_cb;
51 void *cb_data;
52 struct device_enabled_cb *next, *prev;
53 };
54
55 /* Lists for devs[CRAS_STREAM_INPUT] and devs[CRAS_STREAM_OUTPUT]. */
56 static struct iodev_list devs[CRAS_NUM_DIRECTIONS];
57 /* The observer client iodev_list used to listen on various events. */
58 static struct cras_observer_client *list_observer;
59 /* Keep a list of enabled inputs and outputs. */
60 static struct enabled_dev *enabled_devs[CRAS_NUM_DIRECTIONS];
61 /* Keep an empty device per direction. */
62 static struct cras_iodev *fallback_devs[CRAS_NUM_DIRECTIONS];
63 /* Special empty device for hotword streams. */
64 static struct cras_iodev *empty_hotword_dev;
65 /* Loopback devices. */
66 static struct cras_iodev *loopdev_post_mix;
67 static struct cras_iodev *loopdev_post_dsp;
68 /* List of pending device init retries. */
69 static struct dev_init_retry *init_retries;
70
71 /* Keep a constantly increasing index for iodevs. Index 0 is reserved
72 * to mean "no device". */
73 static uint32_t next_iodev_idx = MAX_SPECIAL_DEVICE_IDX;
74
75 /* Call when a device is enabled or disabled. */
76 struct device_enabled_cb *device_enable_cbs;
77
78 /* Thread that handles audio input and output. */
79 static struct audio_thread *audio_thread;
80 /* List of all streams. */
81 static struct stream_list *stream_list;
82 /* Idle device timer. */
83 static struct cras_timer *idle_timer;
84 /* Flag to indicate that the stream list is disconnected from audio thread. */
85 static int stream_list_suspended = 0;
86 /* If init device failed, retry after 1 second. */
87 static const unsigned int INIT_DEV_DELAY_MS = 1000;
88 /* Flag to indicate that hotword streams are suspended. */
89 static int hotword_suspended = 0;
90
91 static void idle_dev_check(struct cras_timer *timer, void *data);
92
find_dev(size_t dev_index)93 static struct cras_iodev *find_dev(size_t dev_index)
94 {
95 struct cras_iodev *dev;
96
97 DL_FOREACH (devs[CRAS_STREAM_OUTPUT].iodevs, dev)
98 if (dev->info.idx == dev_index)
99 return dev;
100
101 DL_FOREACH (devs[CRAS_STREAM_INPUT].iodevs, dev)
102 if (dev->info.idx == dev_index)
103 return dev;
104
105 return NULL;
106 }
107
find_node(struct cras_iodev * iodev,unsigned int node_idx)108 static struct cras_ionode *find_node(struct cras_iodev *iodev,
109 unsigned int node_idx)
110 {
111 struct cras_ionode *node;
112 DL_SEARCH_SCALAR(iodev->nodes, node, idx, node_idx);
113 return node;
114 }
115
116 /* Adds a device to the list. Used from add_input and add_output. */
add_dev_to_list(struct cras_iodev * dev)117 static int add_dev_to_list(struct cras_iodev *dev)
118 {
119 struct cras_iodev *tmp;
120 uint32_t new_idx;
121 struct iodev_list *list = &devs[dev->direction];
122
123 DL_FOREACH (list->iodevs, tmp)
124 if (tmp == dev)
125 return -EEXIST;
126
127 dev->format = NULL;
128 dev->format = NULL;
129 dev->prev = dev->next = NULL;
130
131 /* Move to the next index and make sure it isn't taken. */
132 new_idx = next_iodev_idx;
133 while (1) {
134 if (new_idx < MAX_SPECIAL_DEVICE_IDX)
135 new_idx = MAX_SPECIAL_DEVICE_IDX;
136 DL_SEARCH_SCALAR(list->iodevs, tmp, info.idx, new_idx);
137 if (tmp == NULL)
138 break;
139 new_idx++;
140 }
141 dev->info.idx = new_idx;
142 next_iodev_idx = new_idx + 1;
143 list->size++;
144
145 syslog(LOG_INFO, "Adding %s dev at index %u.",
146 dev->direction == CRAS_STREAM_OUTPUT ? "output" : "input",
147 dev->info.idx);
148 DL_PREPEND(list->iodevs, dev);
149
150 cras_iodev_list_update_device_list();
151 return 0;
152 }
153
154 /* Removes a device to the list. Used from rm_input and rm_output. */
rm_dev_from_list(struct cras_iodev * dev)155 static int rm_dev_from_list(struct cras_iodev *dev)
156 {
157 struct cras_iodev *tmp;
158
159 DL_FOREACH (devs[dev->direction].iodevs, tmp)
160 if (tmp == dev) {
161 if (cras_iodev_is_open(dev))
162 return -EBUSY;
163 DL_DELETE(devs[dev->direction].iodevs, dev);
164 devs[dev->direction].size--;
165 return 0;
166 }
167
168 /* Device not found. */
169 return -EINVAL;
170 }
171
172 /* Fills a dev_info array from the iodev_list. */
fill_dev_list(struct iodev_list * list,struct cras_iodev_info * dev_info,size_t out_size)173 static void fill_dev_list(struct iodev_list *list,
174 struct cras_iodev_info *dev_info, size_t out_size)
175 {
176 int i = 0;
177 struct cras_iodev *tmp;
178 DL_FOREACH (list->iodevs, tmp) {
179 memcpy(&dev_info[i], &tmp->info, sizeof(dev_info[0]));
180 i++;
181 if (i == out_size)
182 return;
183 }
184 }
185
node_type_to_str(struct cras_ionode * node)186 static const char *node_type_to_str(struct cras_ionode *node)
187 {
188 switch (node->type) {
189 case CRAS_NODE_TYPE_INTERNAL_SPEAKER:
190 return "INTERNAL_SPEAKER";
191 case CRAS_NODE_TYPE_HEADPHONE:
192 return "HEADPHONE";
193 case CRAS_NODE_TYPE_HDMI:
194 return "HDMI";
195 case CRAS_NODE_TYPE_HAPTIC:
196 return "HAPTIC";
197 case CRAS_NODE_TYPE_MIC:
198 switch (node->position) {
199 case NODE_POSITION_INTERNAL:
200 return "INTERNAL_MIC";
201 case NODE_POSITION_FRONT:
202 return "FRONT_MIC";
203 case NODE_POSITION_REAR:
204 return "REAR_MIC";
205 case NODE_POSITION_KEYBOARD:
206 return "KEYBOARD_MIC";
207 case NODE_POSITION_EXTERNAL:
208 default:
209 return "MIC";
210 }
211 case CRAS_NODE_TYPE_HOTWORD:
212 return "HOTWORD";
213 case CRAS_NODE_TYPE_LINEOUT:
214 return "LINEOUT";
215 case CRAS_NODE_TYPE_POST_MIX_PRE_DSP:
216 return "POST_MIX_LOOPBACK";
217 case CRAS_NODE_TYPE_POST_DSP:
218 return "POST_DSP_LOOPBACK";
219 case CRAS_NODE_TYPE_USB:
220 return "USB";
221 case CRAS_NODE_TYPE_BLUETOOTH:
222 return "BLUETOOTH";
223 case CRAS_NODE_TYPE_FALLBACK_NORMAL:
224 return "FALLBACK_NORMAL";
225 case CRAS_NODE_TYPE_FALLBACK_ABNORMAL:
226 return "FALLBACK_ABNORMAL";
227 case CRAS_NODE_TYPE_UNKNOWN:
228 default:
229 return "UNKNOWN";
230 }
231 }
232
233 /* Fills an ionode_info array from the iodev_list. */
fill_node_list(struct iodev_list * list,struct cras_ionode_info * node_info,size_t out_size)234 static int fill_node_list(struct iodev_list *list,
235 struct cras_ionode_info *node_info, size_t out_size)
236 {
237 int i = 0;
238 struct cras_iodev *dev;
239 struct cras_ionode *node;
240 DL_FOREACH (list->iodevs, dev) {
241 DL_FOREACH (dev->nodes, node) {
242 node_info->iodev_idx = dev->info.idx;
243 node_info->ionode_idx = node->idx;
244 node_info->plugged = node->plugged;
245 node_info->plugged_time.tv_sec =
246 node->plugged_time.tv_sec;
247 node_info->plugged_time.tv_usec =
248 node->plugged_time.tv_usec;
249 node_info->active =
250 dev->is_enabled && (dev->active_node == node);
251 node_info->volume = node->volume;
252 node_info->capture_gain = node->capture_gain;
253 node_info->left_right_swapped =
254 node->left_right_swapped;
255 node_info->stable_id = node->stable_id;
256 strcpy(node_info->mic_positions, node->mic_positions);
257 strcpy(node_info->name, node->name);
258 strcpy(node_info->active_hotword_model,
259 node->active_hotword_model);
260 snprintf(node_info->type, sizeof(node_info->type), "%s",
261 node_type_to_str(node));
262 node_info->type_enum = node->type;
263 node_info++;
264 i++;
265 if (i == out_size)
266 return i;
267 }
268 }
269 return i;
270 }
271
272 /* Copies the info for each device in the list to "list_out". */
get_dev_list(struct iodev_list * list,struct cras_iodev_info ** list_out)273 static int get_dev_list(struct iodev_list *list,
274 struct cras_iodev_info **list_out)
275 {
276 struct cras_iodev_info *dev_info;
277
278 if (!list_out)
279 return list->size;
280
281 *list_out = NULL;
282 if (list->size == 0)
283 return 0;
284
285 dev_info = malloc(sizeof(*list_out[0]) * list->size);
286 if (dev_info == NULL)
287 return -ENOMEM;
288
289 fill_dev_list(list, dev_info, list->size);
290
291 *list_out = dev_info;
292 return list->size;
293 }
294
295 /* Called when the system volume changes. Pass the current volume setting to
296 * the default output if it is active. */
sys_vol_change(void * context,int32_t volume)297 static void sys_vol_change(void *context, int32_t volume)
298 {
299 struct cras_iodev *dev;
300
301 DL_FOREACH (devs[CRAS_STREAM_OUTPUT].iodevs, dev) {
302 if (dev->set_volume && cras_iodev_is_open(dev))
303 dev->set_volume(dev);
304 }
305 }
306
307 /* Called when the system mute state changes. Pass the current mute setting
308 * to the default output if it is active. */
sys_mute_change(void * context,int muted,int user_muted,int mute_locked)309 static void sys_mute_change(void *context, int muted, int user_muted,
310 int mute_locked)
311 {
312 struct cras_iodev *dev;
313 int should_mute = muted || user_muted;
314
315 DL_FOREACH (devs[CRAS_STREAM_OUTPUT].iodevs, dev) {
316 if (!cras_iodev_is_open(dev)) {
317 /* For closed devices, just set its mute state. */
318 cras_iodev_set_mute(dev);
319 } else {
320 audio_thread_dev_start_ramp(
321 audio_thread, dev->info.idx,
322 (should_mute ?
323 CRAS_IODEV_RAMP_REQUEST_DOWN_MUTE :
324 CRAS_IODEV_RAMP_REQUEST_UP_UNMUTE));
325 }
326 }
327 }
328
remove_all_streams_from_dev(struct cras_iodev * dev)329 static void remove_all_streams_from_dev(struct cras_iodev *dev)
330 {
331 struct cras_rstream *rstream;
332
333 audio_thread_rm_open_dev(audio_thread, dev->direction, dev->info.idx);
334
335 DL_FOREACH (stream_list_get(stream_list), rstream) {
336 if (rstream->apm_list == NULL)
337 continue;
338 cras_apm_list_remove(rstream->apm_list, dev);
339 }
340 }
341
342 /*
343 * If output dev has an echo reference dev associated, add a server
344 * stream to read audio data from it so APM can analyze.
345 */
possibly_enable_echo_reference(struct cras_iodev * dev)346 static void possibly_enable_echo_reference(struct cras_iodev *dev)
347 {
348 if (dev->direction != CRAS_STREAM_OUTPUT)
349 return;
350
351 if (dev->echo_reference_dev == NULL)
352 return;
353
354 server_stream_create(stream_list, dev->echo_reference_dev->info.idx);
355 }
356
357 /*
358 * If output dev has an echo reference dev associated, check if there
359 * is server stream opened for it and remove it.
360 */
possibly_disable_echo_reference(struct cras_iodev * dev)361 static void possibly_disable_echo_reference(struct cras_iodev *dev)
362 {
363 if (dev->echo_reference_dev == NULL)
364 return;
365
366 server_stream_destroy(stream_list, dev->echo_reference_dev->info.idx);
367 }
368
369 /*
370 * Close dev if it's opened, without the extra call to idle_dev_check.
371 * This is useful for closing a dev inside idle_dev_check function to
372 * avoid infinite recursive call.
373 *
374 * Returns:
375 * -EINVAL if device was not opened, otherwise return 0.
376 */
close_dev_without_idle_check(struct cras_iodev * dev)377 static int close_dev_without_idle_check(struct cras_iodev *dev)
378 {
379 if (!cras_iodev_is_open(dev))
380 return -EINVAL;
381
382 remove_all_streams_from_dev(dev);
383 dev->idle_timeout.tv_sec = 0;
384 cras_iodev_close(dev);
385 possibly_disable_echo_reference(dev);
386 return 0;
387 }
388
close_dev(struct cras_iodev * dev)389 static void close_dev(struct cras_iodev *dev)
390 {
391 if (close_dev_without_idle_check(dev))
392 return;
393
394 if (idle_timer)
395 cras_tm_cancel_timer(cras_system_state_get_tm(), idle_timer);
396 idle_dev_check(NULL, NULL);
397 }
398
idle_dev_check(struct cras_timer * timer,void * data)399 static void idle_dev_check(struct cras_timer *timer, void *data)
400 {
401 struct enabled_dev *edev;
402 struct timespec now;
403 struct timespec min_idle_expiration;
404 unsigned int num_idle_devs = 0;
405 unsigned int min_idle_timeout_ms;
406
407 clock_gettime(CLOCK_MONOTONIC_RAW, &now);
408 min_idle_expiration.tv_sec = 0;
409 min_idle_expiration.tv_nsec = 0;
410
411 DL_FOREACH (enabled_devs[CRAS_STREAM_OUTPUT], edev) {
412 if (edev->dev->idle_timeout.tv_sec == 0)
413 continue;
414 if (timespec_after(&now, &edev->dev->idle_timeout)) {
415 close_dev_without_idle_check(edev->dev);
416 continue;
417 }
418 num_idle_devs++;
419 if (min_idle_expiration.tv_sec == 0 ||
420 timespec_after(&min_idle_expiration,
421 &edev->dev->idle_timeout))
422 min_idle_expiration = edev->dev->idle_timeout;
423 }
424
425 idle_timer = NULL;
426 if (!num_idle_devs)
427 return;
428 if (timespec_after(&now, &min_idle_expiration)) {
429 min_idle_timeout_ms = 0;
430 } else {
431 struct timespec timeout;
432 subtract_timespecs(&min_idle_expiration, &now, &timeout);
433 min_idle_timeout_ms = timespec_to_ms(&timeout);
434 }
435 /* Wake up when it is time to close the next idle device. Sleep for a
436 * minimum of 10 milliseconds. */
437 idle_timer = cras_tm_create_timer(cras_system_state_get_tm(),
438 MAX(min_idle_timeout_ms, 10),
439 idle_dev_check, NULL);
440 }
441
442 /*
443 * Cancel pending init tries. Called at device initialization or when device
444 * is disabled.
445 */
cancel_pending_init_retries(unsigned int dev_idx)446 static void cancel_pending_init_retries(unsigned int dev_idx)
447 {
448 struct dev_init_retry *retry;
449
450 DL_FOREACH (init_retries, retry) {
451 if (retry->dev_idx != dev_idx)
452 continue;
453 cras_tm_cancel_timer(cras_system_state_get_tm(),
454 retry->init_timer);
455 DL_DELETE(init_retries, retry);
456 free(retry);
457 }
458 }
459
460 /* Open the device potentially filling the output with a pre buffer. */
init_device(struct cras_iodev * dev,struct cras_rstream * rstream)461 static int init_device(struct cras_iodev *dev, struct cras_rstream *rstream)
462 {
463 int rc;
464
465 cras_iodev_exit_idle(dev);
466
467 if (cras_iodev_is_open(dev))
468 return 0;
469 cancel_pending_init_retries(dev->info.idx);
470
471 rc = cras_iodev_open(dev, rstream->cb_threshold, &rstream->format);
472 if (rc)
473 return rc;
474
475 rc = audio_thread_add_open_dev(audio_thread, dev);
476 if (rc)
477 cras_iodev_close(dev);
478
479 possibly_enable_echo_reference(dev);
480
481 return rc;
482 }
483
suspend_devs()484 static void suspend_devs()
485 {
486 struct enabled_dev *edev;
487 struct cras_rstream *rstream;
488
489 DL_FOREACH (stream_list_get(stream_list), rstream) {
490 if (rstream->is_pinned) {
491 struct cras_iodev *dev;
492
493 if ((rstream->flags & HOTWORD_STREAM) == HOTWORD_STREAM)
494 continue;
495
496 dev = find_dev(rstream->pinned_dev_idx);
497 if (dev) {
498 audio_thread_disconnect_stream(audio_thread,
499 rstream, dev);
500 if (!cras_iodev_list_dev_is_enabled(dev))
501 close_dev(dev);
502 }
503 } else {
504 audio_thread_disconnect_stream(audio_thread, rstream,
505 NULL);
506 }
507 }
508 stream_list_suspended = 1;
509
510 DL_FOREACH (enabled_devs[CRAS_STREAM_OUTPUT], edev) {
511 close_dev(edev->dev);
512 }
513 DL_FOREACH (enabled_devs[CRAS_STREAM_INPUT], edev) {
514 close_dev(edev->dev);
515 }
516 }
517
518 static int stream_added_cb(struct cras_rstream *rstream);
519
resume_devs()520 static void resume_devs()
521 {
522 struct cras_rstream *rstream;
523
524 stream_list_suspended = 0;
525 DL_FOREACH (stream_list_get(stream_list), rstream) {
526 if ((rstream->flags & HOTWORD_STREAM) == HOTWORD_STREAM)
527 continue;
528 stream_added_cb(rstream);
529 }
530 }
531
532 /* Called when the system audio is suspended or resumed. */
sys_suspend_change(void * arg,int suspended)533 void sys_suspend_change(void *arg, int suspended)
534 {
535 if (suspended)
536 suspend_devs();
537 else
538 resume_devs();
539 }
540
541 /* Called when the system capture gain changes. Pass the current capture_gain
542 * setting to the default input if it is active. */
sys_cap_gain_change(void * context,int32_t gain)543 void sys_cap_gain_change(void *context, int32_t gain)
544 {
545 struct cras_iodev *dev;
546
547 DL_FOREACH (devs[CRAS_STREAM_INPUT].iodevs, dev) {
548 if (dev->set_capture_gain && cras_iodev_is_open(dev))
549 dev->set_capture_gain(dev);
550 }
551 }
552
553 /* Called when the system capture mute state changes. Pass the current capture
554 * mute setting to the default input if it is active. */
sys_cap_mute_change(void * context,int muted,int mute_locked)555 static void sys_cap_mute_change(void *context, int muted, int mute_locked)
556 {
557 struct cras_iodev *dev;
558
559 DL_FOREACH (devs[CRAS_STREAM_INPUT].iodevs, dev) {
560 if (dev->set_capture_mute && cras_iodev_is_open(dev))
561 dev->set_capture_mute(dev);
562 }
563 }
564
565 static int disable_device(struct enabled_dev *edev, bool force);
566 static int enable_device(struct cras_iodev *dev);
567
possibly_disable_fallback(enum CRAS_STREAM_DIRECTION dir)568 static void possibly_disable_fallback(enum CRAS_STREAM_DIRECTION dir)
569 {
570 struct enabled_dev *edev;
571
572 DL_FOREACH (enabled_devs[dir], edev) {
573 if (edev->dev == fallback_devs[dir])
574 disable_device(edev, false);
575 }
576 }
577
578 /*
579 * Possibly enables fallback device to handle streams.
580 * dir - output or input.
581 * error - true if enable fallback device because no other iodevs can be
582 * initialized successfully.
583 */
possibly_enable_fallback(enum CRAS_STREAM_DIRECTION dir,bool error)584 static void possibly_enable_fallback(enum CRAS_STREAM_DIRECTION dir, bool error)
585 {
586 if (fallback_devs[dir] == NULL)
587 return;
588
589 /*
590 * The fallback device is a special device. It doesn't have a real
591 * device to get a correct node type. Therefore, we need to set it by
592 * ourselves, which indicates the reason to use this device.
593 * NORMAL - Use it because of nodes changed.
594 * ABNORMAL - Use it because there are no other usable devices.
595 */
596 if (error)
597 syslog(LOG_ERR,
598 "Enable fallback device because there are no other usable devices.");
599
600 fallback_devs[dir]->active_node->type =
601 error ? CRAS_NODE_TYPE_FALLBACK_ABNORMAL :
602 CRAS_NODE_TYPE_FALLBACK_NORMAL;
603 if (!cras_iodev_list_dev_is_enabled(fallback_devs[dir]))
604 enable_device(fallback_devs[dir]);
605 }
606
607 /*
608 * Adds stream to one or more open iodevs. If the stream has processing effect
609 * turned on, create new APM instance and add to the list. This makes sure the
610 * time consuming APM creation happens in main thread.
611 */
add_stream_to_open_devs(struct cras_rstream * stream,struct cras_iodev ** iodevs,unsigned int num_iodevs)612 static int add_stream_to_open_devs(struct cras_rstream *stream,
613 struct cras_iodev **iodevs,
614 unsigned int num_iodevs)
615 {
616 int i;
617 if (stream->apm_list) {
618 for (i = 0; i < num_iodevs; i++)
619 cras_apm_list_add(stream->apm_list, iodevs[i],
620 iodevs[i]->format);
621 }
622 return audio_thread_add_stream(audio_thread, stream, iodevs,
623 num_iodevs);
624 }
625
init_and_attach_streams(struct cras_iodev * dev)626 static int init_and_attach_streams(struct cras_iodev *dev)
627 {
628 int rc;
629 enum CRAS_STREAM_DIRECTION dir = dev->direction;
630 struct cras_rstream *stream;
631 int dev_enabled = cras_iodev_list_dev_is_enabled(dev);
632
633 /* If called after suspend, for example bluetooth
634 * profile switching, don't add back the stream list. */
635 if (stream_list_suspended)
636 return 0;
637
638 /* If there are active streams to attach to this device,
639 * open it. */
640 DL_FOREACH (stream_list_get(stream_list), stream) {
641 if (stream->direction != dir)
642 continue;
643 /*
644 * Don't attach this stream if (1) this stream pins to a
645 * different device, or (2) this is a normal stream, but
646 * device is not enabled.
647 */
648 if (stream->is_pinned) {
649 if (stream->pinned_dev_idx != dev->info.idx)
650 continue;
651 } else if (!dev_enabled) {
652 continue;
653 }
654
655 rc = init_device(dev, stream);
656 if (rc) {
657 syslog(LOG_ERR, "Enable %s failed, rc = %d",
658 dev->info.name, rc);
659 return rc;
660 }
661 add_stream_to_open_devs(stream, &dev, 1);
662 }
663 return 0;
664 }
665
init_device_cb(struct cras_timer * timer,void * arg)666 static void init_device_cb(struct cras_timer *timer, void *arg)
667 {
668 int rc;
669 struct dev_init_retry *retry = (struct dev_init_retry *)arg;
670 struct cras_iodev *dev = find_dev(retry->dev_idx);
671
672 /*
673 * First of all, remove retry record to avoid confusion to the
674 * actual device init work.
675 */
676 DL_DELETE(init_retries, retry);
677 free(retry);
678
679 if (!dev || cras_iodev_is_open(dev))
680 return;
681
682 rc = init_and_attach_streams(dev);
683 if (rc < 0)
684 syslog(LOG_ERR, "Init device retry failed");
685 else
686 possibly_disable_fallback(dev->direction);
687 }
688
schedule_init_device_retry(struct cras_iodev * dev)689 static int schedule_init_device_retry(struct cras_iodev *dev)
690 {
691 struct dev_init_retry *retry;
692 struct cras_tm *tm = cras_system_state_get_tm();
693
694 retry = (struct dev_init_retry *)calloc(1, sizeof(*retry));
695 if (!retry)
696 return -ENOMEM;
697
698 retry->dev_idx = dev->info.idx;
699 retry->init_timer = cras_tm_create_timer(tm, INIT_DEV_DELAY_MS,
700 init_device_cb, retry);
701 DL_APPEND(init_retries, retry);
702 return 0;
703 }
704
init_pinned_device(struct cras_iodev * dev,struct cras_rstream * rstream)705 static int init_pinned_device(struct cras_iodev *dev,
706 struct cras_rstream *rstream)
707 {
708 int rc;
709
710 cras_iodev_exit_idle(dev);
711
712 if (audio_thread_is_dev_open(audio_thread, dev))
713 return 0;
714
715 /* Make sure the active node is configured properly, it could be
716 * disabled when last normal stream removed. */
717 dev->update_active_node(dev, dev->active_node->idx, 1);
718
719 /* Negative EAGAIN code indicates dev will be opened later. */
720 rc = init_device(dev, rstream);
721 if (rc)
722 return rc;
723 return 0;
724 }
725
close_pinned_device(struct cras_iodev * dev)726 static int close_pinned_device(struct cras_iodev *dev)
727 {
728 close_dev(dev);
729 dev->update_active_node(dev, dev->active_node->idx, 0);
730 return 0;
731 }
732
find_pinned_device(struct cras_rstream * rstream)733 static struct cras_iodev *find_pinned_device(struct cras_rstream *rstream)
734 {
735 struct cras_iodev *dev;
736 if (!rstream->is_pinned)
737 return NULL;
738
739 dev = find_dev(rstream->pinned_dev_idx);
740
741 if ((rstream->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
742 return dev;
743
744 /* Double check node type for hotword stream */
745 if (dev && dev->active_node->type != CRAS_NODE_TYPE_HOTWORD) {
746 syslog(LOG_ERR, "Hotword stream pinned to invalid dev %u",
747 dev->info.idx);
748 return NULL;
749 }
750
751 return hotword_suspended ? empty_hotword_dev : dev;
752 }
753
pinned_stream_added(struct cras_rstream * rstream)754 static int pinned_stream_added(struct cras_rstream *rstream)
755 {
756 struct cras_iodev *dev;
757 int rc;
758
759 /* Check that the target device is valid for pinned streams. */
760 dev = find_pinned_device(rstream);
761 if (!dev)
762 return -EINVAL;
763
764 rc = init_pinned_device(dev, rstream);
765 if (rc) {
766 syslog(LOG_INFO, "init_pinned_device failed, rc %d", rc);
767 return schedule_init_device_retry(dev);
768 }
769
770 return add_stream_to_open_devs(rstream, &dev, 1);
771 }
772
stream_added_cb(struct cras_rstream * rstream)773 static int stream_added_cb(struct cras_rstream *rstream)
774 {
775 struct enabled_dev *edev;
776 struct cras_iodev *iodevs[10];
777 unsigned int num_iodevs;
778 int rc;
779
780 if (stream_list_suspended)
781 return 0;
782
783 if (rstream->is_pinned)
784 return pinned_stream_added(rstream);
785
786 /* Add the new stream to all enabled iodevs at once to avoid offset
787 * in shm level between different ouput iodevs. */
788 num_iodevs = 0;
789 DL_FOREACH (enabled_devs[rstream->direction], edev) {
790 if (num_iodevs >= ARRAY_SIZE(iodevs)) {
791 syslog(LOG_ERR, "too many enabled devices");
792 break;
793 }
794
795 rc = init_device(edev->dev, rstream);
796 if (rc) {
797 /* Error log but don't return error here, because
798 * stopping audio could block video playback.
799 */
800 syslog(LOG_ERR, "Init %s failed, rc = %d",
801 edev->dev->info.name, rc);
802 schedule_init_device_retry(edev->dev);
803 continue;
804 }
805
806 iodevs[num_iodevs++] = edev->dev;
807 }
808 if (num_iodevs) {
809 rc = add_stream_to_open_devs(rstream, iodevs, num_iodevs);
810 if (rc) {
811 syslog(LOG_ERR, "adding stream to thread fail");
812 return rc;
813 }
814 } else {
815 /* Enable fallback device if no other iodevs can be initialized
816 * successfully.
817 * For error codes like EAGAIN and ENOENT, a new iodev will be
818 * enabled soon so streams are going to route there. As for the
819 * rest of the error cases, silence will be played or recorded
820 * so client won't be blocked.
821 * The enabled fallback device will be disabled when
822 * cras_iodev_list_select_node() is called to re-select the
823 * active node.
824 */
825 possibly_enable_fallback(rstream->direction, true);
826 }
827 return 0;
828 }
829
possibly_close_enabled_devs(enum CRAS_STREAM_DIRECTION dir)830 static int possibly_close_enabled_devs(enum CRAS_STREAM_DIRECTION dir)
831 {
832 struct enabled_dev *edev;
833 const struct cras_rstream *s;
834
835 /* Check if there are still default streams attached. */
836 DL_FOREACH (stream_list_get(stream_list), s) {
837 if (s->direction == dir && !s->is_pinned)
838 return 0;
839 }
840
841 /* No more default streams, close any device that doesn't have a stream
842 * pinned to it. */
843 DL_FOREACH (enabled_devs[dir], edev) {
844 if (stream_list_has_pinned_stream(stream_list,
845 edev->dev->info.idx))
846 continue;
847 if (dir == CRAS_STREAM_INPUT) {
848 close_dev(edev->dev);
849 continue;
850 }
851 /* Allow output devs to drain before closing. */
852 clock_gettime(CLOCK_MONOTONIC_RAW, &edev->dev->idle_timeout);
853 add_timespecs(&edev->dev->idle_timeout, &idle_timeout_interval);
854 idle_dev_check(NULL, NULL);
855 }
856
857 return 0;
858 }
859
pinned_stream_removed(struct cras_rstream * rstream)860 static void pinned_stream_removed(struct cras_rstream *rstream)
861 {
862 struct cras_iodev *dev;
863
864 dev = find_pinned_device(rstream);
865 if (!dev)
866 return;
867 if (!cras_iodev_list_dev_is_enabled(dev) &&
868 !stream_list_has_pinned_stream(stream_list, dev->info.idx))
869 close_pinned_device(dev);
870 }
871
872 /* Returns the number of milliseconds left to drain this stream. This is passed
873 * directly from the audio thread. */
stream_removed_cb(struct cras_rstream * rstream)874 static int stream_removed_cb(struct cras_rstream *rstream)
875 {
876 enum CRAS_STREAM_DIRECTION direction = rstream->direction;
877 int rc;
878
879 rc = audio_thread_drain_stream(audio_thread, rstream);
880 if (rc)
881 return rc;
882
883 if (rstream->is_pinned)
884 pinned_stream_removed(rstream);
885
886 possibly_close_enabled_devs(direction);
887
888 return 0;
889 }
890
enable_device(struct cras_iodev * dev)891 static int enable_device(struct cras_iodev *dev)
892 {
893 int rc;
894 struct enabled_dev *edev;
895 enum CRAS_STREAM_DIRECTION dir = dev->direction;
896 struct device_enabled_cb *callback;
897
898 DL_FOREACH (enabled_devs[dir], edev) {
899 if (edev->dev == dev)
900 return -EEXIST;
901 }
902
903 edev = calloc(1, sizeof(*edev));
904 edev->dev = dev;
905 DL_APPEND(enabled_devs[dir], edev);
906 dev->is_enabled = 1;
907
908 rc = init_and_attach_streams(dev);
909 if (rc < 0) {
910 syslog(LOG_INFO, "Enable device fail, rc %d", rc);
911 schedule_init_device_retry(dev);
912 return rc;
913 }
914
915 DL_FOREACH (device_enable_cbs, callback)
916 callback->enabled_cb(dev, callback->cb_data);
917
918 return 0;
919 }
920
921 /* Set `force to true to flush any pinned streams before closing the device. */
disable_device(struct enabled_dev * edev,bool force)922 static int disable_device(struct enabled_dev *edev, bool force)
923 {
924 struct cras_iodev *dev = edev->dev;
925 enum CRAS_STREAM_DIRECTION dir = dev->direction;
926 struct cras_rstream *stream;
927 struct device_enabled_cb *callback;
928
929 /*
930 * Remove from enabled dev list. However this dev could have a stream
931 * pinned to it, only cancel pending init timers when force flag is set.
932 */
933 DL_DELETE(enabled_devs[dir], edev);
934 free(edev);
935 dev->is_enabled = 0;
936 if (force)
937 cancel_pending_init_retries(dev->info.idx);
938
939 /*
940 * Pull all default streams off this device.
941 * Pull all pinned streams off as well if force is true.
942 */
943 DL_FOREACH (stream_list_get(stream_list), stream) {
944 if (stream->direction != dev->direction)
945 continue;
946 if (stream->is_pinned && !force)
947 continue;
948 audio_thread_disconnect_stream(audio_thread, stream, dev);
949 }
950 /* If this is a force disable call, that guarantees pinned streams have
951 * all been detached. Otherwise check with stream_list to see if
952 * there's still a pinned stream using this device.
953 */
954 if (!force && stream_list_has_pinned_stream(stream_list, dev->info.idx))
955 return 0;
956 DL_FOREACH (device_enable_cbs, callback)
957 callback->disabled_cb(dev, callback->cb_data);
958 close_dev(dev);
959 dev->update_active_node(dev, dev->active_node->idx, 0);
960
961 return 0;
962 }
963
964 /*
965 * Assume the device is not in enabled_devs list.
966 * Assume there is no default stream on the device.
967 * An example is that this device is unplugged while it is playing
968 * a pinned stream. The device and stream may have been removed in
969 * audio thread due to I/O error handling.
970 */
force_close_pinned_only_device(struct cras_iodev * dev)971 static int force_close_pinned_only_device(struct cras_iodev *dev)
972 {
973 struct cras_rstream *rstream;
974
975 /* Pull pinned streams off this device. Note that this is initiated
976 * from server side, so the pin stream still exist in stream_list
977 * pending client side to actually remove it.
978 */
979 DL_FOREACH (stream_list_get(stream_list), rstream) {
980 if (rstream->direction != dev->direction)
981 continue;
982 if (!rstream->is_pinned)
983 continue;
984 if (dev->info.idx != rstream->pinned_dev_idx)
985 continue;
986 audio_thread_disconnect_stream(audio_thread, rstream, dev);
987 }
988
989 close_dev(dev);
990 dev->update_active_node(dev, dev->active_node->idx, 0);
991 return 0;
992 }
993
994 /*
995 * Exported Interface.
996 */
997
cras_iodev_list_init()998 void cras_iodev_list_init()
999 {
1000 struct cras_observer_ops observer_ops;
1001
1002 memset(&observer_ops, 0, sizeof(observer_ops));
1003 observer_ops.output_volume_changed = sys_vol_change;
1004 observer_ops.output_mute_changed = sys_mute_change;
1005 observer_ops.capture_gain_changed = sys_cap_gain_change;
1006 observer_ops.capture_mute_changed = sys_cap_mute_change;
1007 observer_ops.suspend_changed = sys_suspend_change;
1008 list_observer = cras_observer_add(&observer_ops, NULL);
1009 idle_timer = NULL;
1010
1011 /* Create the audio stream list for the system. */
1012 stream_list =
1013 stream_list_create(stream_added_cb, stream_removed_cb,
1014 cras_rstream_create, cras_rstream_destroy,
1015 cras_system_state_get_tm());
1016
1017 /* Add an empty device so there is always something to play to or
1018 * capture from. */
1019 fallback_devs[CRAS_STREAM_OUTPUT] = empty_iodev_create(
1020 CRAS_STREAM_OUTPUT, CRAS_NODE_TYPE_FALLBACK_NORMAL);
1021 fallback_devs[CRAS_STREAM_INPUT] = empty_iodev_create(
1022 CRAS_STREAM_INPUT, CRAS_NODE_TYPE_FALLBACK_NORMAL);
1023 enable_device(fallback_devs[CRAS_STREAM_OUTPUT]);
1024 enable_device(fallback_devs[CRAS_STREAM_INPUT]);
1025
1026 empty_hotword_dev =
1027 empty_iodev_create(CRAS_STREAM_INPUT, CRAS_NODE_TYPE_HOTWORD);
1028
1029 /* Create loopback devices. */
1030 loopdev_post_mix = loopback_iodev_create(LOOPBACK_POST_MIX_PRE_DSP);
1031 loopdev_post_dsp = loopback_iodev_create(LOOPBACK_POST_DSP);
1032
1033 audio_thread = audio_thread_create();
1034 if (!audio_thread) {
1035 syslog(LOG_ERR, "Fatal: audio thread init");
1036 exit(-ENOMEM);
1037 }
1038 audio_thread_start(audio_thread);
1039
1040 cras_iodev_list_update_device_list();
1041 }
1042
cras_iodev_list_deinit()1043 void cras_iodev_list_deinit()
1044 {
1045 audio_thread_destroy(audio_thread);
1046 loopback_iodev_destroy(loopdev_post_dsp);
1047 loopback_iodev_destroy(loopdev_post_mix);
1048 empty_iodev_destroy(empty_hotword_dev);
1049 empty_iodev_destroy(fallback_devs[CRAS_STREAM_INPUT]);
1050 empty_iodev_destroy(fallback_devs[CRAS_STREAM_OUTPUT]);
1051 stream_list_destroy(stream_list);
1052 if (list_observer) {
1053 cras_observer_remove(list_observer);
1054 list_observer = NULL;
1055 }
1056 }
1057
cras_iodev_list_dev_is_enabled(const struct cras_iodev * dev)1058 int cras_iodev_list_dev_is_enabled(const struct cras_iodev *dev)
1059 {
1060 struct enabled_dev *edev;
1061
1062 DL_FOREACH (enabled_devs[dev->direction], edev) {
1063 if (edev->dev == dev)
1064 return 1;
1065 }
1066
1067 return 0;
1068 }
1069
cras_iodev_list_enable_dev(struct cras_iodev * dev)1070 void cras_iodev_list_enable_dev(struct cras_iodev *dev)
1071 {
1072 possibly_disable_fallback(dev->direction);
1073 /* Enable ucm setting of active node. */
1074 dev->update_active_node(dev, dev->active_node->idx, 1);
1075 enable_device(dev);
1076 cras_iodev_list_notify_active_node_changed(dev->direction);
1077 }
1078
cras_iodev_list_add_active_node(enum CRAS_STREAM_DIRECTION dir,cras_node_id_t node_id)1079 void cras_iodev_list_add_active_node(enum CRAS_STREAM_DIRECTION dir,
1080 cras_node_id_t node_id)
1081 {
1082 struct cras_iodev *new_dev;
1083 new_dev = find_dev(dev_index_of(node_id));
1084 if (!new_dev || new_dev->direction != dir)
1085 return;
1086
1087 /* If the new dev is already enabled but its active node needs to be
1088 * changed. Disable new dev first, update active node, and then
1089 * re-enable it again.
1090 */
1091 if (cras_iodev_list_dev_is_enabled(new_dev)) {
1092 if (node_index_of(node_id) == new_dev->active_node->idx)
1093 return;
1094 else
1095 cras_iodev_list_disable_dev(new_dev, true);
1096 }
1097
1098 new_dev->update_active_node(new_dev, node_index_of(node_id), 1);
1099 cras_iodev_list_enable_dev(new_dev);
1100 }
1101
1102 /*
1103 * Disables device which may or may not be in enabled_devs list.
1104 */
cras_iodev_list_disable_dev(struct cras_iodev * dev,bool force_close)1105 void cras_iodev_list_disable_dev(struct cras_iodev *dev, bool force_close)
1106 {
1107 struct enabled_dev *edev, *edev_to_disable = NULL;
1108
1109 int is_the_only_enabled_device = 1;
1110
1111 DL_FOREACH (enabled_devs[dev->direction], edev) {
1112 if (edev->dev == dev)
1113 edev_to_disable = edev;
1114 else
1115 is_the_only_enabled_device = 0;
1116 }
1117
1118 /*
1119 * Disables the device for these two cases:
1120 * 1. Disable a device in the enabled_devs list.
1121 * 2. Force close a device that is not in the enabled_devs list,
1122 * but it is running a pinned stream.
1123 */
1124 if (!edev_to_disable) {
1125 if (force_close)
1126 force_close_pinned_only_device(dev);
1127 return;
1128 }
1129
1130 /* If the device to be closed is the only enabled device, we should
1131 * enable the fallback device first then disable the target
1132 * device. */
1133 if (is_the_only_enabled_device && fallback_devs[dev->direction])
1134 enable_device(fallback_devs[dev->direction]);
1135
1136 disable_device(edev_to_disable, force_close);
1137
1138 cras_iodev_list_notify_active_node_changed(dev->direction);
1139 return;
1140 }
1141
cras_iodev_list_suspend_dev(unsigned int dev_idx)1142 void cras_iodev_list_suspend_dev(unsigned int dev_idx)
1143 {
1144 struct cras_rstream *rstream;
1145 struct cras_iodev *dev = find_dev(dev_idx);
1146
1147 if (!dev)
1148 return;
1149
1150 DL_FOREACH (stream_list_get(stream_list), rstream) {
1151 if (rstream->direction != dev->direction)
1152 continue;
1153 /* Disconnect all streams that are either:
1154 * (1) normal stream while dev is enabled by UI, or
1155 * (2) stream specifically pins to this dev.
1156 */
1157 if ((dev->is_enabled && !rstream->is_pinned) ||
1158 (rstream->is_pinned &&
1159 (dev->info.idx != rstream->pinned_dev_idx)))
1160 audio_thread_disconnect_stream(audio_thread, rstream,
1161 dev);
1162 }
1163 close_dev(dev);
1164 dev->update_active_node(dev, dev->active_node->idx, 0);
1165 }
1166
cras_iodev_list_resume_dev(unsigned int dev_idx)1167 void cras_iodev_list_resume_dev(unsigned int dev_idx)
1168 {
1169 struct cras_iodev *dev = find_dev(dev_idx);
1170 int rc;
1171
1172 if (!dev)
1173 return;
1174
1175 dev->update_active_node(dev, dev->active_node->idx, 1);
1176 rc = init_and_attach_streams(dev);
1177 if (rc == 0) {
1178 /* If dev initialize succeeded and this is not a pinned device,
1179 * disable the silent fallback device because it's just
1180 * unnecessary. */
1181 if (!stream_list_has_pinned_stream(stream_list, dev_idx))
1182 possibly_disable_fallback(dev->direction);
1183 } else {
1184 syslog(LOG_INFO, "Enable dev fail at resume, rc %d", rc);
1185 schedule_init_device_retry(dev);
1186 }
1187 }
1188
cras_iodev_list_set_dev_mute(unsigned int dev_idx)1189 void cras_iodev_list_set_dev_mute(unsigned int dev_idx)
1190 {
1191 struct cras_iodev *dev;
1192
1193 dev = find_dev(dev_idx);
1194 if (!dev)
1195 return;
1196
1197 cras_iodev_set_mute(dev);
1198 }
1199
cras_iodev_list_rm_active_node(enum CRAS_STREAM_DIRECTION dir,cras_node_id_t node_id)1200 void cras_iodev_list_rm_active_node(enum CRAS_STREAM_DIRECTION dir,
1201 cras_node_id_t node_id)
1202 {
1203 struct cras_iodev *dev;
1204
1205 dev = find_dev(dev_index_of(node_id));
1206 if (!dev)
1207 return;
1208
1209 cras_iodev_list_disable_dev(dev, false);
1210 }
1211
cras_iodev_list_add_output(struct cras_iodev * output)1212 int cras_iodev_list_add_output(struct cras_iodev *output)
1213 {
1214 int rc;
1215
1216 if (output->direction != CRAS_STREAM_OUTPUT)
1217 return -EINVAL;
1218
1219 rc = add_dev_to_list(output);
1220 if (rc)
1221 return rc;
1222
1223 return 0;
1224 }
1225
cras_iodev_list_add_input(struct cras_iodev * input)1226 int cras_iodev_list_add_input(struct cras_iodev *input)
1227 {
1228 int rc;
1229
1230 if (input->direction != CRAS_STREAM_INPUT)
1231 return -EINVAL;
1232
1233 rc = add_dev_to_list(input);
1234 if (rc)
1235 return rc;
1236
1237 return 0;
1238 }
1239
cras_iodev_list_rm_output(struct cras_iodev * dev)1240 int cras_iodev_list_rm_output(struct cras_iodev *dev)
1241 {
1242 int res;
1243
1244 /* Retire the current active output device before removing it from
1245 * list, otherwise it could be busy and remain in the list.
1246 */
1247 cras_iodev_list_disable_dev(dev, true);
1248 res = rm_dev_from_list(dev);
1249 if (res == 0)
1250 cras_iodev_list_update_device_list();
1251 return res;
1252 }
1253
cras_iodev_list_rm_input(struct cras_iodev * dev)1254 int cras_iodev_list_rm_input(struct cras_iodev *dev)
1255 {
1256 int res;
1257
1258 /* Retire the current active input device before removing it from
1259 * list, otherwise it could be busy and remain in the list.
1260 */
1261 cras_iodev_list_disable_dev(dev, true);
1262 res = rm_dev_from_list(dev);
1263 if (res == 0)
1264 cras_iodev_list_update_device_list();
1265 return res;
1266 }
1267
cras_iodev_list_get_outputs(struct cras_iodev_info ** list_out)1268 int cras_iodev_list_get_outputs(struct cras_iodev_info **list_out)
1269 {
1270 return get_dev_list(&devs[CRAS_STREAM_OUTPUT], list_out);
1271 }
1272
cras_iodev_list_get_inputs(struct cras_iodev_info ** list_out)1273 int cras_iodev_list_get_inputs(struct cras_iodev_info **list_out)
1274 {
1275 return get_dev_list(&devs[CRAS_STREAM_INPUT], list_out);
1276 }
1277
1278 struct cras_iodev *
cras_iodev_list_get_first_enabled_iodev(enum CRAS_STREAM_DIRECTION direction)1279 cras_iodev_list_get_first_enabled_iodev(enum CRAS_STREAM_DIRECTION direction)
1280 {
1281 struct enabled_dev *edev = enabled_devs[direction];
1282
1283 return edev ? edev->dev : NULL;
1284 }
1285
1286 struct cras_iodev *
cras_iodev_list_get_sco_pcm_iodev(enum CRAS_STREAM_DIRECTION direction)1287 cras_iodev_list_get_sco_pcm_iodev(enum CRAS_STREAM_DIRECTION direction)
1288 {
1289 struct cras_iodev *dev;
1290 struct cras_ionode *node;
1291
1292 DL_FOREACH (devs[direction].iodevs, dev) {
1293 DL_FOREACH (dev->nodes, node) {
1294 if (node->is_sco_pcm)
1295 return dev;
1296 }
1297 }
1298
1299 return NULL;
1300 }
1301
1302 cras_node_id_t
cras_iodev_list_get_active_node_id(enum CRAS_STREAM_DIRECTION direction)1303 cras_iodev_list_get_active_node_id(enum CRAS_STREAM_DIRECTION direction)
1304 {
1305 struct enabled_dev *edev = enabled_devs[direction];
1306
1307 if (!edev || !edev->dev || !edev->dev->active_node)
1308 return 0;
1309
1310 return cras_make_node_id(edev->dev->info.idx,
1311 edev->dev->active_node->idx);
1312 }
1313
cras_iodev_list_update_device_list()1314 void cras_iodev_list_update_device_list()
1315 {
1316 struct cras_server_state *state;
1317
1318 state = cras_system_state_update_begin();
1319 if (!state)
1320 return;
1321
1322 state->num_output_devs = devs[CRAS_STREAM_OUTPUT].size;
1323 state->num_input_devs = devs[CRAS_STREAM_INPUT].size;
1324 fill_dev_list(&devs[CRAS_STREAM_OUTPUT], &state->output_devs[0],
1325 CRAS_MAX_IODEVS);
1326 fill_dev_list(&devs[CRAS_STREAM_INPUT], &state->input_devs[0],
1327 CRAS_MAX_IODEVS);
1328
1329 state->num_output_nodes =
1330 fill_node_list(&devs[CRAS_STREAM_OUTPUT],
1331 &state->output_nodes[0], CRAS_MAX_IONODES);
1332 state->num_input_nodes =
1333 fill_node_list(&devs[CRAS_STREAM_INPUT], &state->input_nodes[0],
1334 CRAS_MAX_IONODES);
1335
1336 cras_system_state_update_complete();
1337 }
1338
1339 /* Look up the first hotword stream and the device it pins to. */
find_hotword_stream_dev(struct cras_iodev ** dev,struct cras_rstream ** stream)1340 int find_hotword_stream_dev(struct cras_iodev **dev,
1341 struct cras_rstream **stream)
1342 {
1343 DL_FOREACH (stream_list_get(stream_list), *stream) {
1344 if (((*stream)->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
1345 continue;
1346
1347 *dev = find_dev((*stream)->pinned_dev_idx);
1348 if (*dev == NULL)
1349 return -ENOENT;
1350 break;
1351 }
1352 return 0;
1353 }
1354
1355 /* Suspend/resume hotword streams functions are used to provide seamless
1356 * experience to cras clients when there's hardware limitation about concurrent
1357 * DSP and normal recording. The empty hotword iodev is used to hold all
1358 * hotword streams during suspend, so client side will not know about the
1359 * transition, and can still remove or add streams. At resume, the real hotword
1360 * device will be initialized and opened again to re-arm the DSP.
1361 */
cras_iodev_list_suspend_hotword_streams()1362 int cras_iodev_list_suspend_hotword_streams()
1363 {
1364 struct cras_iodev *hotword_dev;
1365 struct cras_rstream *stream = NULL;
1366 int rc;
1367
1368 rc = find_hotword_stream_dev(&hotword_dev, &stream);
1369 if (rc)
1370 return rc;
1371
1372 if (stream == NULL) {
1373 hotword_suspended = 1;
1374 return 0;
1375 }
1376 /* Move all existing hotword streams to the empty hotword iodev. */
1377 init_pinned_device(empty_hotword_dev, stream);
1378 DL_FOREACH (stream_list_get(stream_list), stream) {
1379 if ((stream->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
1380 continue;
1381 if (stream->pinned_dev_idx != hotword_dev->info.idx) {
1382 syslog(LOG_ERR,
1383 "Failed to suspend hotword stream on dev %u",
1384 stream->pinned_dev_idx);
1385 continue;
1386 }
1387
1388 audio_thread_disconnect_stream(audio_thread, stream,
1389 hotword_dev);
1390 audio_thread_add_stream(audio_thread, stream,
1391 &empty_hotword_dev, 1);
1392 }
1393 close_pinned_device(hotword_dev);
1394 hotword_suspended = 1;
1395 return 0;
1396 }
1397
cras_iodev_list_resume_hotword_stream()1398 int cras_iodev_list_resume_hotword_stream()
1399 {
1400 struct cras_iodev *hotword_dev;
1401 struct cras_rstream *stream = NULL;
1402 int rc;
1403
1404 rc = find_hotword_stream_dev(&hotword_dev, &stream);
1405 if (rc)
1406 return rc;
1407
1408 if (stream == NULL) {
1409 hotword_suspended = 0;
1410 return 0;
1411 }
1412 /* Move all existing hotword streams to the real hotword iodev. */
1413 init_pinned_device(hotword_dev, stream);
1414 DL_FOREACH (stream_list_get(stream_list), stream) {
1415 if ((stream->flags & HOTWORD_STREAM) != HOTWORD_STREAM)
1416 continue;
1417 if (stream->pinned_dev_idx != hotword_dev->info.idx) {
1418 syslog(LOG_ERR,
1419 "Fail to resume hotword stream on dev %u",
1420 stream->pinned_dev_idx);
1421 continue;
1422 }
1423
1424 audio_thread_disconnect_stream(audio_thread, stream,
1425 empty_hotword_dev);
1426 audio_thread_add_stream(audio_thread, stream, &hotword_dev, 1);
1427 }
1428 close_pinned_device(empty_hotword_dev);
1429 hotword_suspended = 0;
1430 return 0;
1431 }
1432
cras_iodev_list_get_hotword_models(cras_node_id_t node_id)1433 char *cras_iodev_list_get_hotword_models(cras_node_id_t node_id)
1434 {
1435 struct cras_iodev *dev = NULL;
1436
1437 dev = find_dev(dev_index_of(node_id));
1438 if (!dev || !dev->get_hotword_models ||
1439 (dev->active_node->type != CRAS_NODE_TYPE_HOTWORD))
1440 return NULL;
1441
1442 return dev->get_hotword_models(dev);
1443 }
1444
cras_iodev_list_set_hotword_model(cras_node_id_t node_id,const char * model_name)1445 int cras_iodev_list_set_hotword_model(cras_node_id_t node_id,
1446 const char *model_name)
1447 {
1448 int ret;
1449 struct cras_iodev *dev = find_dev(dev_index_of(node_id));
1450 if (!dev || !dev->get_hotword_models ||
1451 (dev->active_node->type != CRAS_NODE_TYPE_HOTWORD))
1452 return -EINVAL;
1453
1454 ret = dev->set_hotword_model(dev, model_name);
1455 if (!ret)
1456 strncpy(dev->active_node->active_hotword_model, model_name,
1457 sizeof(dev->active_node->active_hotword_model) - 1);
1458 return ret;
1459 }
1460
cras_iodev_list_notify_nodes_changed()1461 void cras_iodev_list_notify_nodes_changed()
1462 {
1463 cras_observer_notify_nodes();
1464 }
1465
cras_iodev_list_notify_active_node_changed(enum CRAS_STREAM_DIRECTION direction)1466 void cras_iodev_list_notify_active_node_changed(
1467 enum CRAS_STREAM_DIRECTION direction)
1468 {
1469 cras_observer_notify_active_node(
1470 direction, cras_iodev_list_get_active_node_id(direction));
1471 }
1472
cras_iodev_list_select_node(enum CRAS_STREAM_DIRECTION direction,cras_node_id_t node_id)1473 void cras_iodev_list_select_node(enum CRAS_STREAM_DIRECTION direction,
1474 cras_node_id_t node_id)
1475 {
1476 struct cras_iodev *new_dev = NULL;
1477 struct enabled_dev *edev;
1478 int new_node_already_enabled = 0;
1479 int rc;
1480
1481 /* find the devices for the id. */
1482 new_dev = find_dev(dev_index_of(node_id));
1483
1484 /* Do nothing if the direction is mismatched. The new_dev == NULL case
1485 could happen if node_id is 0 (no selection), or the client tries
1486 to select a non-existing node (maybe it's unplugged just before
1487 the client selects it). We will just behave like there is no selected
1488 node. */
1489 if (new_dev && new_dev->direction != direction)
1490 return;
1491
1492 /* Determine whether the new device and node are already enabled - if
1493 * they are, the selection algorithm should avoid disabling the new
1494 * device. */
1495 DL_FOREACH (enabled_devs[direction], edev) {
1496 if (edev->dev == new_dev &&
1497 edev->dev->active_node->idx == node_index_of(node_id)) {
1498 new_node_already_enabled = 1;
1499 break;
1500 }
1501 }
1502
1503 /* Enable fallback device during the transition so client will not be
1504 * blocked in this duration, which is as long as 300 ms on some boards
1505 * before new device is opened.
1506 * Note that the fallback node is not needed if the new node is already
1507 * enabled - the new node will remain enabled. */
1508 if (!new_node_already_enabled)
1509 possibly_enable_fallback(direction, false);
1510
1511 /* Disable all devices except for fallback device, and the new device,
1512 * provided it is already enabled. */
1513 DL_FOREACH (enabled_devs[direction], edev) {
1514 if (edev->dev != fallback_devs[direction] &&
1515 !(new_node_already_enabled && edev->dev == new_dev)) {
1516 disable_device(edev, false);
1517 }
1518 }
1519
1520 if (new_dev && !new_node_already_enabled) {
1521 new_dev->update_active_node(new_dev, node_index_of(node_id), 1);
1522 rc = enable_device(new_dev);
1523 if (rc == 0) {
1524 /* Disable fallback device after new device is enabled.
1525 * Leave the fallback device enabled if new_dev failed
1526 * to open, or the new_dev == NULL case. */
1527 possibly_disable_fallback(direction);
1528 }
1529 }
1530
1531 cras_iodev_list_notify_active_node_changed(direction);
1532 }
1533
set_node_plugged(struct cras_iodev * iodev,unsigned int node_idx,int plugged)1534 static int set_node_plugged(struct cras_iodev *iodev, unsigned int node_idx,
1535 int plugged)
1536 {
1537 struct cras_ionode *node;
1538
1539 node = find_node(iodev, node_idx);
1540 if (!node)
1541 return -EINVAL;
1542 cras_iodev_set_node_plugged(node, plugged);
1543 return 0;
1544 }
1545
set_node_volume(struct cras_iodev * iodev,unsigned int node_idx,int volume)1546 static int set_node_volume(struct cras_iodev *iodev, unsigned int node_idx,
1547 int volume)
1548 {
1549 struct cras_ionode *node;
1550
1551 node = find_node(iodev, node_idx);
1552 if (!node)
1553 return -EINVAL;
1554
1555 if (iodev->ramp && cras_iodev_software_volume_needed(iodev) &&
1556 !cras_system_get_mute())
1557 cras_iodev_start_volume_ramp(iodev, node->volume, volume);
1558
1559 node->volume = volume;
1560 if (iodev->set_volume)
1561 iodev->set_volume(iodev);
1562 cras_iodev_list_notify_node_volume(node);
1563 return 0;
1564 }
1565
set_node_capture_gain(struct cras_iodev * iodev,unsigned int node_idx,int capture_gain)1566 static int set_node_capture_gain(struct cras_iodev *iodev,
1567 unsigned int node_idx, int capture_gain)
1568 {
1569 struct cras_ionode *node;
1570
1571 node = find_node(iodev, node_idx);
1572 if (!node)
1573 return -EINVAL;
1574
1575 node->capture_gain = capture_gain;
1576 if (iodev->set_capture_gain)
1577 iodev->set_capture_gain(iodev);
1578 cras_iodev_list_notify_node_capture_gain(node);
1579 return 0;
1580 }
1581
set_node_left_right_swapped(struct cras_iodev * iodev,unsigned int node_idx,int left_right_swapped)1582 static int set_node_left_right_swapped(struct cras_iodev *iodev,
1583 unsigned int node_idx,
1584 int left_right_swapped)
1585 {
1586 struct cras_ionode *node;
1587 int rc;
1588
1589 if (!iodev->set_swap_mode_for_node)
1590 return -EINVAL;
1591 node = find_node(iodev, node_idx);
1592 if (!node)
1593 return -EINVAL;
1594
1595 rc = iodev->set_swap_mode_for_node(iodev, node, left_right_swapped);
1596 if (rc) {
1597 syslog(LOG_ERR, "Failed to set swap mode on node %s to %d",
1598 node->name, left_right_swapped);
1599 return rc;
1600 }
1601 node->left_right_swapped = left_right_swapped;
1602 cras_iodev_list_notify_node_left_right_swapped(node);
1603 return 0;
1604 }
1605
cras_iodev_list_set_node_attr(cras_node_id_t node_id,enum ionode_attr attr,int value)1606 int cras_iodev_list_set_node_attr(cras_node_id_t node_id, enum ionode_attr attr,
1607 int value)
1608 {
1609 struct cras_iodev *iodev;
1610 int rc = 0;
1611
1612 iodev = find_dev(dev_index_of(node_id));
1613 if (!iodev)
1614 return -EINVAL;
1615
1616 switch (attr) {
1617 case IONODE_ATTR_PLUGGED:
1618 rc = set_node_plugged(iodev, node_index_of(node_id), value);
1619 break;
1620 case IONODE_ATTR_VOLUME:
1621 rc = set_node_volume(iodev, node_index_of(node_id), value);
1622 break;
1623 case IONODE_ATTR_CAPTURE_GAIN:
1624 rc = set_node_capture_gain(iodev, node_index_of(node_id),
1625 value);
1626 break;
1627 case IONODE_ATTR_SWAP_LEFT_RIGHT:
1628 rc = set_node_left_right_swapped(iodev, node_index_of(node_id),
1629 value);
1630 break;
1631 default:
1632 return -EINVAL;
1633 }
1634
1635 return rc;
1636 }
1637
cras_iodev_list_notify_node_volume(struct cras_ionode * node)1638 void cras_iodev_list_notify_node_volume(struct cras_ionode *node)
1639 {
1640 cras_node_id_t id = cras_make_node_id(node->dev->info.idx, node->idx);
1641 cras_iodev_list_update_device_list();
1642 cras_observer_notify_output_node_volume(id, node->volume);
1643 }
1644
cras_iodev_list_notify_node_left_right_swapped(struct cras_ionode * node)1645 void cras_iodev_list_notify_node_left_right_swapped(struct cras_ionode *node)
1646 {
1647 cras_node_id_t id = cras_make_node_id(node->dev->info.idx, node->idx);
1648 cras_iodev_list_update_device_list();
1649 cras_observer_notify_node_left_right_swapped(id,
1650 node->left_right_swapped);
1651 }
1652
cras_iodev_list_notify_node_capture_gain(struct cras_ionode * node)1653 void cras_iodev_list_notify_node_capture_gain(struct cras_ionode *node)
1654 {
1655 cras_node_id_t id = cras_make_node_id(node->dev->info.idx, node->idx);
1656 cras_iodev_list_update_device_list();
1657 cras_observer_notify_input_node_gain(id, node->capture_gain);
1658 }
1659
cras_iodev_list_add_test_dev(enum TEST_IODEV_TYPE type)1660 void cras_iodev_list_add_test_dev(enum TEST_IODEV_TYPE type)
1661 {
1662 if (type != TEST_IODEV_HOTWORD)
1663 return;
1664 test_iodev_create(CRAS_STREAM_INPUT, type);
1665 }
1666
cras_iodev_list_test_dev_command(unsigned int iodev_idx,enum CRAS_TEST_IODEV_CMD command,unsigned int data_len,const uint8_t * data)1667 void cras_iodev_list_test_dev_command(unsigned int iodev_idx,
1668 enum CRAS_TEST_IODEV_CMD command,
1669 unsigned int data_len,
1670 const uint8_t *data)
1671 {
1672 struct cras_iodev *dev = find_dev(iodev_idx);
1673
1674 if (!dev)
1675 return;
1676
1677 test_iodev_command(dev, command, data_len, data);
1678 }
1679
cras_iodev_list_get_audio_thread()1680 struct audio_thread *cras_iodev_list_get_audio_thread()
1681 {
1682 return audio_thread;
1683 }
1684
cras_iodev_list_get_stream_list()1685 struct stream_list *cras_iodev_list_get_stream_list()
1686 {
1687 return stream_list;
1688 }
1689
cras_iodev_list_set_device_enabled_callback(device_enabled_callback_t enabled_cb,device_disabled_callback_t disabled_cb,void * cb_data)1690 int cras_iodev_list_set_device_enabled_callback(
1691 device_enabled_callback_t enabled_cb,
1692 device_disabled_callback_t disabled_cb, void *cb_data)
1693 {
1694 struct device_enabled_cb *callback;
1695
1696 DL_FOREACH (device_enable_cbs, callback) {
1697 if (callback->cb_data != cb_data)
1698 continue;
1699
1700 DL_DELETE(device_enable_cbs, callback);
1701 free(callback);
1702 }
1703
1704 if (enabled_cb && disabled_cb) {
1705 callback = (struct device_enabled_cb *)calloc(
1706 1, sizeof(*callback));
1707 callback->enabled_cb = enabled_cb;
1708 callback->disabled_cb = disabled_cb;
1709 callback->cb_data = cb_data;
1710 DL_APPEND(device_enable_cbs, callback);
1711 }
1712
1713 return 0;
1714 }
1715
cras_iodev_list_register_loopback(enum CRAS_LOOPBACK_TYPE loopback_type,unsigned int output_dev_idx,loopback_hook_data_t hook_data,loopback_hook_control_t hook_control,unsigned int loopback_dev_idx)1716 void cras_iodev_list_register_loopback(enum CRAS_LOOPBACK_TYPE loopback_type,
1717 unsigned int output_dev_idx,
1718 loopback_hook_data_t hook_data,
1719 loopback_hook_control_t hook_control,
1720 unsigned int loopback_dev_idx)
1721 {
1722 struct cras_iodev *iodev = find_dev(output_dev_idx);
1723 struct cras_iodev *loopback_dev;
1724 struct cras_loopback *loopback;
1725 bool dev_open;
1726
1727 if (iodev == NULL) {
1728 syslog(LOG_ERR, "Output dev %u not found for loopback",
1729 output_dev_idx);
1730 return;
1731 }
1732
1733 loopback_dev = find_dev(loopback_dev_idx);
1734 if (loopback_dev == NULL) {
1735 syslog(LOG_ERR, "Loopback dev %u not found", loopback_dev_idx);
1736 return;
1737 }
1738
1739 dev_open = cras_iodev_is_open(iodev);
1740
1741 loopback = (struct cras_loopback *)calloc(1, sizeof(*loopback));
1742 if (NULL == loopback) {
1743 syslog(LOG_ERR, "Not enough memory for loopback");
1744 return;
1745 }
1746
1747 loopback->type = loopback_type;
1748 loopback->hook_data = hook_data;
1749 loopback->hook_control = hook_control;
1750 loopback->cb_data = loopback_dev;
1751 if (loopback->hook_control && dev_open)
1752 loopback->hook_control(true, loopback->cb_data);
1753
1754 DL_APPEND(iodev->loopbacks, loopback);
1755 }
1756
cras_iodev_list_unregister_loopback(enum CRAS_LOOPBACK_TYPE type,unsigned int output_dev_idx,unsigned int loopback_dev_idx)1757 void cras_iodev_list_unregister_loopback(enum CRAS_LOOPBACK_TYPE type,
1758 unsigned int output_dev_idx,
1759 unsigned int loopback_dev_idx)
1760 {
1761 struct cras_iodev *iodev = find_dev(output_dev_idx);
1762 struct cras_iodev *loopback_dev;
1763 struct cras_loopback *loopback;
1764
1765 if (iodev == NULL)
1766 return;
1767
1768 loopback_dev = find_dev(loopback_dev_idx);
1769 if (loopback_dev == NULL)
1770 return;
1771
1772 DL_FOREACH (iodev->loopbacks, loopback) {
1773 if ((loopback->cb_data == loopback_dev) &&
1774 (loopback->type == type)) {
1775 DL_DELETE(iodev->loopbacks, loopback);
1776 free(loopback);
1777 }
1778 }
1779 }
1780
cras_iodev_list_reset()1781 void cras_iodev_list_reset()
1782 {
1783 struct enabled_dev *edev;
1784
1785 DL_FOREACH (enabled_devs[CRAS_STREAM_OUTPUT], edev) {
1786 DL_DELETE(enabled_devs[CRAS_STREAM_OUTPUT], edev);
1787 free(edev);
1788 }
1789 enabled_devs[CRAS_STREAM_OUTPUT] = NULL;
1790 DL_FOREACH (enabled_devs[CRAS_STREAM_INPUT], edev) {
1791 DL_DELETE(enabled_devs[CRAS_STREAM_INPUT], edev);
1792 free(edev);
1793 }
1794 enabled_devs[CRAS_STREAM_INPUT] = NULL;
1795 devs[CRAS_STREAM_OUTPUT].iodevs = NULL;
1796 devs[CRAS_STREAM_INPUT].iodevs = NULL;
1797 devs[CRAS_STREAM_OUTPUT].size = 0;
1798 devs[CRAS_STREAM_INPUT].size = 0;
1799 }
1800