1 /******************************************************************************
2 *
3 * Copyright (C) 2014 Google, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 ******************************************************************************/
18
19 #define LOG_TAG "bt_hci"
20
21 #include <assert.h>
22 #include <cutils/properties.h>
23 #include <errno.h>
24 #include <string.h>
25 #include <signal.h>
26 #include <string.h>
27 #include <sys/types.h>
28
29 #include "buffer_allocator.h"
30 #include "btsnoop.h"
31 #include "osi/include/fixed_queue.h"
32 #include "osi/include/future.h"
33 #include "hcidefs.h"
34 #include "hcimsgs.h"
35 #include "hci_hal.h"
36 #include "hci_internals.h"
37 #include "hci_inject.h"
38 #include "hci_layer.h"
39 #include "osi/include/list.h"
40 #include "low_power_manager.h"
41 #include "btcore/include/module.h"
42 #include "osi/include/non_repeating_timer.h"
43 #include "osi/include/osi.h"
44 #include "osi/include/log.h"
45 #include "packet_fragmenter.h"
46 #include "osi/include/reactor.h"
47 #include "vendor.h"
48
49 // TODO(zachoverflow): remove this hack extern
50 #include <hardware/bluetooth.h>
51 bt_bdaddr_t btif_local_bd_addr;
52
53 #define INBOUND_PACKET_TYPE_COUNT 3
54 #define PACKET_TYPE_TO_INBOUND_INDEX(type) ((type) - 2)
55 #define PACKET_TYPE_TO_INDEX(type) ((type) - 1)
56
57 #define PREAMBLE_BUFFER_SIZE 4 // max preamble size, ACL
58 #define RETRIEVE_ACL_LENGTH(preamble) ((((preamble)[3]) << 8) | (preamble)[2])
59
60 static const uint8_t preamble_sizes[] = {
61 HCI_COMMAND_PREAMBLE_SIZE,
62 HCI_ACL_PREAMBLE_SIZE,
63 HCI_SCO_PREAMBLE_SIZE,
64 HCI_EVENT_PREAMBLE_SIZE
65 };
66
67 static const uint16_t outbound_event_types[] =
68 {
69 MSG_HC_TO_STACK_HCI_ERR,
70 MSG_HC_TO_STACK_HCI_ACL,
71 MSG_HC_TO_STACK_HCI_SCO,
72 MSG_HC_TO_STACK_HCI_EVT
73 };
74
75 typedef enum {
76 BRAND_NEW,
77 PREAMBLE,
78 BODY,
79 IGNORE,
80 FINISHED
81 } receive_state_t;
82
83 typedef struct {
84 receive_state_t state;
85 uint16_t bytes_remaining;
86 uint8_t preamble[PREAMBLE_BUFFER_SIZE];
87 uint16_t index;
88 BT_HDR *buffer;
89 } packet_receive_data_t;
90
91 typedef struct {
92 uint16_t opcode;
93 future_t *complete_future;
94 command_complete_cb complete_callback;
95 command_status_cb status_callback;
96 void *context;
97 BT_HDR *command;
98 } waiting_command_t;
99
100 // Using a define here, because it can be stringified for the property lookup
101 #define DEFAULT_STARTUP_TIMEOUT_MS 8000
102 #define STRING_VALUE_OF(x) #x
103
104 static const uint32_t EPILOG_TIMEOUT_MS = 3000;
105 static const uint32_t COMMAND_PENDING_TIMEOUT = 8000;
106
107 // Our interface
108 static bool interface_created;
109 static hci_t interface;
110
111 // Modules we import and callbacks we export
112 static const allocator_t *buffer_allocator;
113 static const btsnoop_t *btsnoop;
114 static const hci_hal_t *hal;
115 static const hci_hal_callbacks_t hal_callbacks;
116 static const hci_inject_t *hci_inject;
117 static const low_power_manager_t *low_power_manager;
118 static const packet_fragmenter_t *packet_fragmenter;
119 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks;
120 static const vendor_t *vendor;
121
122 static future_t *startup_future;
123 static thread_t *thread; // We own this
124
125 static volatile bool firmware_is_configured = false;
126 static non_repeating_timer_t *epilog_timer;
127 static non_repeating_timer_t *startup_timer;
128
129 // Outbound-related
130 static int command_credits = 1;
131 static fixed_queue_t *command_queue;
132 static fixed_queue_t *packet_queue;
133
134 // Inbound-related
135 static non_repeating_timer_t *command_response_timer;
136 static list_t *commands_pending_response;
137 static pthread_mutex_t commands_pending_response_lock;
138 static packet_receive_data_t incoming_packets[INBOUND_PACKET_TYPE_COUNT];
139
140 // The hand-off point for data going to a higher layer, set by the higher layer
141 static fixed_queue_t *upwards_data_queue;
142
143 static future_t *shut_down();
144
145 static void event_finish_startup(void *context);
146 static void firmware_config_callback(bool success);
147 static void startup_timer_expired(void *context);
148
149 static void event_postload(void *context);
150 static void sco_config_callback(bool success);
151
152 static void event_epilog(void *context);
153 static void epilog_finished_callback(bool success);
154 static void epilog_timer_expired(void *context);
155
156 static void event_command_ready(fixed_queue_t *queue, void *context);
157 static void event_packet_ready(fixed_queue_t *queue, void *context);
158 static void command_timed_out(void *context);
159
160 static void hal_says_data_ready(serial_data_type_t type);
161 static bool filter_incoming_event(BT_HDR *packet);
162
163 static serial_data_type_t event_to_data_type(uint16_t event);
164 static waiting_command_t *get_waiting_command(command_opcode_t opcode);
165
166 // Module lifecycle functions
167
start_up(void)168 static future_t *start_up(void) {
169 LOG_INFO("%s", __func__);
170
171 // The host is only allowed to send at most one command initially,
172 // as per the Bluetooth spec, Volume 2, Part E, 4.4 (Command Flow Control)
173 // This value can change when you get a command complete or command status event.
174 command_credits = 1;
175 firmware_is_configured = false;
176
177 pthread_mutex_init(&commands_pending_response_lock, NULL);
178
179 // Grab the override startup timeout ms, if present.
180 period_ms_t startup_timeout_ms;
181 char timeout_prop[PROPERTY_VALUE_MAX];
182 if (!property_get("bluetooth.enable_timeout_ms", timeout_prop, STRING_VALUE_OF(DEFAULT_STARTUP_TIMEOUT_MS))
183 || (startup_timeout_ms = atoi(timeout_prop)) < 100)
184 startup_timeout_ms = DEFAULT_STARTUP_TIMEOUT_MS;
185
186 startup_timer = non_repeating_timer_new(startup_timeout_ms, startup_timer_expired, NULL);
187 if (!startup_timer) {
188 LOG_ERROR("%s unable to create startup timer.", __func__);
189 goto error;
190 }
191
192 // Make sure we run in a bounded amount of time
193 non_repeating_timer_restart(startup_timer);
194
195 epilog_timer = non_repeating_timer_new(EPILOG_TIMEOUT_MS, epilog_timer_expired, NULL);
196 if (!epilog_timer) {
197 LOG_ERROR("%s unable to create epilog timer.", __func__);
198 goto error;
199 }
200
201 command_response_timer = non_repeating_timer_new(COMMAND_PENDING_TIMEOUT, command_timed_out, NULL);
202 if (!command_response_timer) {
203 LOG_ERROR("%s unable to create command response timer.", __func__);
204 goto error;
205 }
206
207 command_queue = fixed_queue_new(SIZE_MAX);
208 if (!command_queue) {
209 LOG_ERROR("%s unable to create pending command queue.", __func__);
210 goto error;
211 }
212
213 packet_queue = fixed_queue_new(SIZE_MAX);
214 if (!packet_queue) {
215 LOG_ERROR("%s unable to create pending packet queue.", __func__);
216 goto error;
217 }
218
219 thread = thread_new("hci_thread");
220 if (!thread) {
221 LOG_ERROR("%s unable to create thread.", __func__);
222 goto error;
223 }
224
225 commands_pending_response = list_new(NULL);
226 if (!commands_pending_response) {
227 LOG_ERROR("%s unable to create list for commands pending response.", __func__);
228 goto error;
229 }
230
231 memset(incoming_packets, 0, sizeof(incoming_packets));
232
233 packet_fragmenter->init(&packet_fragmenter_callbacks);
234
235 fixed_queue_register_dequeue(command_queue, thread_get_reactor(thread), event_command_ready, NULL);
236 fixed_queue_register_dequeue(packet_queue, thread_get_reactor(thread), event_packet_ready, NULL);
237
238 vendor->open(btif_local_bd_addr.address, &interface);
239 hal->init(&hal_callbacks, thread);
240 low_power_manager->init(thread);
241
242 vendor->set_callback(VENDOR_CONFIGURE_FIRMWARE, firmware_config_callback);
243 vendor->set_callback(VENDOR_CONFIGURE_SCO, sco_config_callback);
244 vendor->set_callback(VENDOR_DO_EPILOG, epilog_finished_callback);
245
246 if (!hci_inject->open(&interface)) {
247 // TODO(sharvil): gracefully propagate failures from this layer.
248 }
249
250 int power_state = BT_VND_PWR_OFF;
251 #if (defined (BT_CLEAN_TURN_ON_DISABLED) && BT_CLEAN_TURN_ON_DISABLED == TRUE)
252 LOG_WARN("%s not turning off the chip before turning on.", __func__);
253 // So apparently this hack was needed in the past because a Wingray kernel driver
254 // didn't handle power off commands in a powered off state correctly.
255
256 // The comment in the old code said the workaround should be removed when the
257 // problem was fixed. Sadly, I have no idea if said bug was fixed or if said
258 // kernel is still in use, so we must leave this here for posterity. #sadpanda
259 #else
260 // cycle power on the chip to ensure it has been reset
261 vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
262 #endif
263 power_state = BT_VND_PWR_ON;
264 vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
265
266 startup_future = future_new();
267 LOG_DEBUG("%s starting async portion", __func__);
268 thread_post(thread, event_finish_startup, NULL);
269 return startup_future;
270 error:;
271 shut_down(); // returns NULL so no need to wait for it
272 return future_new_immediate(FUTURE_FAIL);
273 }
274
shut_down()275 static future_t *shut_down() {
276 LOG_INFO("%s", __func__);
277
278 hci_inject->close();
279
280 if (thread) {
281 if (firmware_is_configured) {
282 non_repeating_timer_restart(epilog_timer);
283 thread_post(thread, event_epilog, NULL);
284 } else {
285 thread_stop(thread);
286 }
287
288 thread_join(thread);
289 }
290
291 fixed_queue_free(command_queue, osi_free);
292 fixed_queue_free(packet_queue, buffer_allocator->free);
293 list_free(commands_pending_response);
294
295 pthread_mutex_destroy(&commands_pending_response_lock);
296
297 packet_fragmenter->cleanup();
298
299 non_repeating_timer_free(epilog_timer);
300 non_repeating_timer_free(command_response_timer);
301 non_repeating_timer_free(startup_timer);
302
303 epilog_timer = NULL;
304 command_response_timer = NULL;
305
306 low_power_manager->cleanup();
307 hal->close();
308
309 // Turn off the chip
310 int power_state = BT_VND_PWR_OFF;
311 vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
312 vendor->close();
313
314 thread_free(thread);
315 thread = NULL;
316 firmware_is_configured = false;
317
318 return NULL;
319 }
320
321 const module_t hci_module = {
322 .name = HCI_MODULE,
323 .init = NULL,
324 .start_up = start_up,
325 .shut_down = shut_down,
326 .clean_up = NULL,
327 .dependencies = {
328 BTSNOOP_MODULE,
329 NULL
330 }
331 };
332
333 // Interface functions
334
do_postload()335 static void do_postload() {
336 LOG_DEBUG("%s posting postload work item", __func__);
337 thread_post(thread, event_postload, NULL);
338 }
339
set_data_queue(fixed_queue_t * queue)340 static void set_data_queue(fixed_queue_t *queue) {
341 upwards_data_queue = queue;
342 }
343
transmit_command(BT_HDR * command,command_complete_cb complete_callback,command_status_cb status_callback,void * context)344 static void transmit_command(
345 BT_HDR *command,
346 command_complete_cb complete_callback,
347 command_status_cb status_callback,
348 void *context) {
349 waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
350 if (!wait_entry) {
351 LOG_ERROR("%s couldn't allocate space for wait entry.", __func__);
352 return;
353 }
354
355 uint8_t *stream = command->data + command->offset;
356 STREAM_TO_UINT16(wait_entry->opcode, stream);
357 wait_entry->complete_callback = complete_callback;
358 wait_entry->status_callback = status_callback;
359 wait_entry->command = command;
360 wait_entry->context = context;
361
362 // Store the command message type in the event field
363 // in case the upper layer didn't already
364 command->event = MSG_STACK_TO_HC_HCI_CMD;
365
366 fixed_queue_enqueue(command_queue, wait_entry);
367 }
368
transmit_command_futured(BT_HDR * command)369 static future_t *transmit_command_futured(BT_HDR *command) {
370 waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
371 assert(wait_entry != NULL);
372
373 future_t *future = future_new();
374
375 uint8_t *stream = command->data + command->offset;
376 STREAM_TO_UINT16(wait_entry->opcode, stream);
377 wait_entry->complete_future = future;
378 wait_entry->command = command;
379
380 // Store the command message type in the event field
381 // in case the upper layer didn't already
382 command->event = MSG_STACK_TO_HC_HCI_CMD;
383
384 fixed_queue_enqueue(command_queue, wait_entry);
385 return future;
386 }
387
transmit_downward(data_dispatcher_type_t type,void * data)388 static void transmit_downward(data_dispatcher_type_t type, void *data) {
389 if (type == MSG_STACK_TO_HC_HCI_CMD) {
390 // TODO(zachoverflow): eliminate this call
391 transmit_command((BT_HDR *)data, NULL, NULL, NULL);
392 LOG_WARN("%s legacy transmit of command. Use transmit_command instead.", __func__);
393 } else {
394 fixed_queue_enqueue(packet_queue, data);
395 }
396 }
397
398 // Start up functions
399
event_finish_startup(UNUSED_ATTR void * context)400 static void event_finish_startup(UNUSED_ATTR void *context) {
401 LOG_INFO("%s", __func__);
402 hal->open();
403 vendor->send_async_command(VENDOR_CONFIGURE_FIRMWARE, NULL);
404 }
405
firmware_config_callback(UNUSED_ATTR bool success)406 static void firmware_config_callback(UNUSED_ATTR bool success) {
407 LOG_INFO("%s", __func__);
408 firmware_is_configured = true;
409 non_repeating_timer_cancel(startup_timer);
410
411 future_ready(startup_future, FUTURE_SUCCESS);
412 startup_future = NULL;
413 }
414
startup_timer_expired(UNUSED_ATTR void * context)415 static void startup_timer_expired(UNUSED_ATTR void *context) {
416 LOG_ERROR("%s", __func__);
417 future_ready(startup_future, FUTURE_FAIL);
418 startup_future = NULL;
419 }
420
421 // Postload functions
422
event_postload(UNUSED_ATTR void * context)423 static void event_postload(UNUSED_ATTR void *context) {
424 LOG_INFO("%s", __func__);
425 if(vendor->send_async_command(VENDOR_CONFIGURE_SCO, NULL) == -1) {
426 // If couldn't configure sco, we won't get the sco configuration callback
427 // so go pretend to do it now
428 sco_config_callback(false);
429
430 }
431 }
432
sco_config_callback(UNUSED_ATTR bool success)433 static void sco_config_callback(UNUSED_ATTR bool success) {
434 LOG_INFO("%s postload finished.", __func__);
435 }
436
437 // Epilog functions
438
event_epilog(UNUSED_ATTR void * context)439 static void event_epilog(UNUSED_ATTR void *context) {
440 vendor->send_async_command(VENDOR_DO_EPILOG, NULL);
441 }
442
epilog_finished_callback(UNUSED_ATTR bool success)443 static void epilog_finished_callback(UNUSED_ATTR bool success) {
444 LOG_INFO("%s", __func__);
445 thread_stop(thread);
446 }
447
epilog_timer_expired(UNUSED_ATTR void * context)448 static void epilog_timer_expired(UNUSED_ATTR void *context) {
449 LOG_INFO("%s", __func__);
450 thread_stop(thread);
451 }
452
453 // Command/packet transmitting functions
454
event_command_ready(fixed_queue_t * queue,UNUSED_ATTR void * context)455 static void event_command_ready(fixed_queue_t *queue, UNUSED_ATTR void *context) {
456 if (command_credits > 0) {
457 waiting_command_t *wait_entry = fixed_queue_dequeue(queue);
458 command_credits--;
459
460 // Move it to the list of commands awaiting response
461 pthread_mutex_lock(&commands_pending_response_lock);
462 list_append(commands_pending_response, wait_entry);
463 pthread_mutex_unlock(&commands_pending_response_lock);
464
465 // Send it off
466 low_power_manager->wake_assert();
467 packet_fragmenter->fragment_and_dispatch(wait_entry->command);
468 low_power_manager->transmit_done();
469
470 non_repeating_timer_restart_if(command_response_timer, !list_is_empty(commands_pending_response));
471 }
472 }
473
event_packet_ready(fixed_queue_t * queue,UNUSED_ATTR void * context)474 static void event_packet_ready(fixed_queue_t *queue, UNUSED_ATTR void *context) {
475 // The queue may be the command queue or the packet queue, we don't care
476 BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue);
477
478 low_power_manager->wake_assert();
479 packet_fragmenter->fragment_and_dispatch(packet);
480 low_power_manager->transmit_done();
481 }
482
483 // Callback for the fragmenter to send a fragment
transmit_fragment(BT_HDR * packet,bool send_transmit_finished)484 static void transmit_fragment(BT_HDR *packet, bool send_transmit_finished) {
485 uint16_t event = packet->event & MSG_EVT_MASK;
486 serial_data_type_t type = event_to_data_type(event);
487
488 btsnoop->capture(packet, false);
489 hal->transmit_data(type, packet->data + packet->offset, packet->len);
490
491 if (event != MSG_STACK_TO_HC_HCI_CMD && send_transmit_finished)
492 buffer_allocator->free(packet);
493 }
494
fragmenter_transmit_finished(BT_HDR * packet,bool all_fragments_sent)495 static void fragmenter_transmit_finished(BT_HDR *packet, bool all_fragments_sent) {
496 if (all_fragments_sent) {
497 buffer_allocator->free(packet);
498 } else {
499 // This is kind of a weird case, since we're dispatching a partially sent packet
500 // up to a higher layer.
501 // TODO(zachoverflow): rework upper layer so this isn't necessary.
502 data_dispatcher_dispatch(interface.event_dispatcher, packet->event & MSG_EVT_MASK, packet);
503 }
504 }
505
command_timed_out(UNUSED_ATTR void * context)506 static void command_timed_out(UNUSED_ATTR void *context) {
507 pthread_mutex_lock(&commands_pending_response_lock);
508
509 if (list_is_empty(commands_pending_response)) {
510 LOG_ERROR("%s with no commands pending response", __func__);
511 } else {
512 waiting_command_t *wait_entry = list_front(commands_pending_response);
513 pthread_mutex_unlock(&commands_pending_response_lock);
514
515 // We shouldn't try to recover the stack from this command timeout.
516 // If it's caused by a software bug, fix it. If it's a hardware bug, fix it.
517 LOG_ERROR("%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, wait_entry->opcode);
518 }
519
520 LOG_ERROR("%s restarting the bluetooth process.", __func__);
521 TEMP_FAILURE_RETRY(usleep(10000));
522 kill(getpid(), SIGKILL);
523 }
524
525 // Event/packet receiving functions
526
527 // This function is not required to read all of a packet in one go, so
528 // be wary of reentry. But this function must return after finishing a packet.
hal_says_data_ready(serial_data_type_t type)529 static void hal_says_data_ready(serial_data_type_t type) {
530 packet_receive_data_t *incoming = &incoming_packets[PACKET_TYPE_TO_INBOUND_INDEX(type)];
531
532 uint8_t byte;
533 while (hal->read_data(type, &byte, 1, false) != 0) {
534 switch (incoming->state) {
535 case BRAND_NEW:
536 // Initialize and prepare to jump to the preamble reading state
537 incoming->bytes_remaining = preamble_sizes[PACKET_TYPE_TO_INDEX(type)];
538 memset(incoming->preamble, 0, PREAMBLE_BUFFER_SIZE);
539 incoming->index = 0;
540 incoming->state = PREAMBLE;
541 // INTENTIONAL FALLTHROUGH
542 case PREAMBLE:
543 incoming->preamble[incoming->index] = byte;
544 incoming->index++;
545 incoming->bytes_remaining--;
546
547 if (incoming->bytes_remaining == 0) {
548 // For event and sco preambles, the last byte we read is the length
549 incoming->bytes_remaining = (type == DATA_TYPE_ACL) ? RETRIEVE_ACL_LENGTH(incoming->preamble) : byte;
550
551 size_t buffer_size = BT_HDR_SIZE + incoming->index + incoming->bytes_remaining;
552 incoming->buffer = (BT_HDR *)buffer_allocator->alloc(buffer_size);
553
554 if (!incoming->buffer) {
555 LOG_ERROR("%s error getting buffer for incoming packet of type %d and size %zd", __func__, type, buffer_size);
556 // Can't read any more of this current packet, so jump out
557 incoming->state = incoming->bytes_remaining == 0 ? BRAND_NEW : IGNORE;
558 break;
559 }
560
561 // Initialize the buffer
562 incoming->buffer->offset = 0;
563 incoming->buffer->layer_specific = 0;
564 incoming->buffer->event = outbound_event_types[PACKET_TYPE_TO_INDEX(type)];
565 memcpy(incoming->buffer->data, incoming->preamble, incoming->index);
566
567 incoming->state = incoming->bytes_remaining > 0 ? BODY : FINISHED;
568 }
569
570 break;
571 case BODY:
572 incoming->buffer->data[incoming->index] = byte;
573 incoming->index++;
574 incoming->bytes_remaining--;
575
576 size_t bytes_read = hal->read_data(type, (incoming->buffer->data + incoming->index), incoming->bytes_remaining, false);
577 incoming->index += bytes_read;
578 incoming->bytes_remaining -= bytes_read;
579
580 incoming->state = incoming->bytes_remaining == 0 ? FINISHED : incoming->state;
581 break;
582 case IGNORE:
583 incoming->bytes_remaining--;
584 if (incoming->bytes_remaining == 0) {
585 incoming->state = BRAND_NEW;
586 // Don't forget to let the hal know we finished the packet we were ignoring.
587 // Otherwise we'll get out of sync with hals that embed extra information
588 // in the uart stream (like H4). #badnewsbears
589 hal->packet_finished(type);
590 return;
591 }
592
593 break;
594 case FINISHED:
595 LOG_ERROR("%s the state machine should not have been left in the finished state.", __func__);
596 break;
597 }
598
599 if (incoming->state == FINISHED) {
600 incoming->buffer->len = incoming->index;
601 btsnoop->capture(incoming->buffer, true);
602
603 if (type != DATA_TYPE_EVENT) {
604 packet_fragmenter->reassemble_and_dispatch(incoming->buffer);
605 } else if (!filter_incoming_event(incoming->buffer)) {
606 // Dispatch the event by event code
607 uint8_t *stream = incoming->buffer->data;
608 uint8_t event_code;
609 STREAM_TO_UINT8(event_code, stream);
610
611 data_dispatcher_dispatch(
612 interface.event_dispatcher,
613 event_code,
614 incoming->buffer
615 );
616 }
617
618 // We don't control the buffer anymore
619 incoming->buffer = NULL;
620 incoming->state = BRAND_NEW;
621 hal->packet_finished(type);
622
623 // We return after a packet is finished for two reasons:
624 // 1. The type of the next packet could be different.
625 // 2. We don't want to hog cpu time.
626 return;
627 }
628 }
629 }
630
631 // Returns true if the event was intercepted and should not proceed to
632 // higher layers. Also inspects an incoming event for interesting
633 // information, like how many commands are now able to be sent.
filter_incoming_event(BT_HDR * packet)634 static bool filter_incoming_event(BT_HDR *packet) {
635 waiting_command_t *wait_entry = NULL;
636 uint8_t *stream = packet->data;
637 uint8_t event_code;
638 command_opcode_t opcode;
639
640 STREAM_TO_UINT8(event_code, stream);
641 STREAM_SKIP_UINT8(stream); // Skip the parameter total length field
642
643 if (event_code == HCI_COMMAND_COMPLETE_EVT) {
644 STREAM_TO_UINT8(command_credits, stream);
645 STREAM_TO_UINT16(opcode, stream);
646
647 wait_entry = get_waiting_command(opcode);
648 if (!wait_entry)
649 LOG_WARN("%s command complete event with no matching command. opcode: 0x%x.", __func__, opcode);
650 else if (wait_entry->complete_callback)
651 wait_entry->complete_callback(packet, wait_entry->context);
652 else if (wait_entry->complete_future)
653 future_ready(wait_entry->complete_future, packet);
654
655 goto intercepted;
656 } else if (event_code == HCI_COMMAND_STATUS_EVT) {
657 uint8_t status;
658 STREAM_TO_UINT8(status, stream);
659 STREAM_TO_UINT8(command_credits, stream);
660 STREAM_TO_UINT16(opcode, stream);
661
662 // If a command generates a command status event, it won't be getting a command complete event
663
664 wait_entry = get_waiting_command(opcode);
665 if (!wait_entry)
666 LOG_WARN("%s command status event with no matching command. opcode: 0x%x", __func__, opcode);
667 else if (wait_entry->status_callback)
668 wait_entry->status_callback(status, wait_entry->command, wait_entry->context);
669
670 goto intercepted;
671 }
672
673 return false;
674 intercepted:;
675 non_repeating_timer_restart_if(command_response_timer, !list_is_empty(commands_pending_response));
676
677 if (wait_entry) {
678 // If it has a callback, it's responsible for freeing the packet
679 if (event_code == HCI_COMMAND_STATUS_EVT || (!wait_entry->complete_callback && !wait_entry->complete_future))
680 buffer_allocator->free(packet);
681
682 // If it has a callback, it's responsible for freeing the command
683 if (event_code == HCI_COMMAND_COMPLETE_EVT || !wait_entry->status_callback)
684 buffer_allocator->free(wait_entry->command);
685
686 osi_free(wait_entry);
687 } else {
688 buffer_allocator->free(packet);
689 }
690
691 return true;
692 }
693
694 // Callback for the fragmenter to dispatch up a completely reassembled packet
dispatch_reassembled(BT_HDR * packet)695 static void dispatch_reassembled(BT_HDR *packet) {
696 // Events should already have been dispatched before this point
697 assert((packet->event & MSG_EVT_MASK) != MSG_HC_TO_STACK_HCI_EVT);
698 assert(upwards_data_queue != NULL);
699
700 if (upwards_data_queue) {
701 fixed_queue_enqueue(upwards_data_queue, packet);
702 } else {
703 LOG_ERROR("%s had no queue to place upwards data packet in. Dropping it on the floor.", __func__);
704 buffer_allocator->free(packet);
705 }
706 }
707
708 // Misc internal functions
709
710 // TODO(zachoverflow): we seem to do this a couple places, like the HCI inject module. #centralize
event_to_data_type(uint16_t event)711 static serial_data_type_t event_to_data_type(uint16_t event) {
712 if (event == MSG_STACK_TO_HC_HCI_ACL)
713 return DATA_TYPE_ACL;
714 else if (event == MSG_STACK_TO_HC_HCI_SCO)
715 return DATA_TYPE_SCO;
716 else if (event == MSG_STACK_TO_HC_HCI_CMD)
717 return DATA_TYPE_COMMAND;
718 else
719 LOG_ERROR("%s invalid event type, could not translate 0x%x", __func__, event);
720
721 return 0;
722 }
723
get_waiting_command(command_opcode_t opcode)724 static waiting_command_t *get_waiting_command(command_opcode_t opcode) {
725 pthread_mutex_lock(&commands_pending_response_lock);
726
727 for (const list_node_t *node = list_begin(commands_pending_response);
728 node != list_end(commands_pending_response);
729 node = list_next(node)) {
730 waiting_command_t *wait_entry = list_node(node);
731
732 if (!wait_entry || wait_entry->opcode != opcode)
733 continue;
734
735 list_remove(commands_pending_response, wait_entry);
736
737 pthread_mutex_unlock(&commands_pending_response_lock);
738 return wait_entry;
739 }
740
741 pthread_mutex_unlock(&commands_pending_response_lock);
742 return NULL;
743 }
744
init_layer_interface()745 static void init_layer_interface() {
746 if (!interface_created) {
747 interface.send_low_power_command = low_power_manager->post_command;
748 interface.do_postload = do_postload;
749
750 // It's probably ok for this to live forever. It's small and
751 // there's only one instance of the hci interface.
752 interface.event_dispatcher = data_dispatcher_new("hci_layer");
753 if (!interface.event_dispatcher) {
754 LOG_ERROR("%s could not create upward dispatcher.", __func__);
755 return;
756 }
757
758 interface.set_data_queue = set_data_queue;
759 interface.transmit_command = transmit_command;
760 interface.transmit_command_futured = transmit_command_futured;
761 interface.transmit_downward = transmit_downward;
762 interface_created = true;
763 }
764 }
765
766 static const hci_hal_callbacks_t hal_callbacks = {
767 hal_says_data_ready
768 };
769
770 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks = {
771 transmit_fragment,
772 dispatch_reassembled,
773 fragmenter_transmit_finished
774 };
775
hci_layer_get_interface()776 const hci_t *hci_layer_get_interface() {
777 buffer_allocator = buffer_allocator_get_interface();
778 hal = hci_hal_get_interface();
779 btsnoop = btsnoop_get_interface();
780 hci_inject = hci_inject_get_interface();
781 packet_fragmenter = packet_fragmenter_get_interface();
782 vendor = vendor_get_interface();
783 low_power_manager = low_power_manager_get_interface();
784
785 init_layer_interface();
786 return &interface;
787 }
788
hci_layer_get_test_interface(const allocator_t * buffer_allocator_interface,const hci_hal_t * hal_interface,const btsnoop_t * btsnoop_interface,const hci_inject_t * hci_inject_interface,const packet_fragmenter_t * packet_fragmenter_interface,const vendor_t * vendor_interface,const low_power_manager_t * low_power_manager_interface)789 const hci_t *hci_layer_get_test_interface(
790 const allocator_t *buffer_allocator_interface,
791 const hci_hal_t *hal_interface,
792 const btsnoop_t *btsnoop_interface,
793 const hci_inject_t *hci_inject_interface,
794 const packet_fragmenter_t *packet_fragmenter_interface,
795 const vendor_t *vendor_interface,
796 const low_power_manager_t *low_power_manager_interface) {
797
798 buffer_allocator = buffer_allocator_interface;
799 hal = hal_interface;
800 btsnoop = btsnoop_interface;
801 hci_inject = hci_inject_interface;
802 packet_fragmenter = packet_fragmenter_interface;
803 vendor = vendor_interface;
804 low_power_manager = low_power_manager_interface;
805
806 init_layer_interface();
807 return &interface;
808 }
809