1 /******************************************************************************
2 *
3 * Copyright 2009-2012 Broadcom Corporation
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 ******************************************************************************/
18
19 /*****************************************************************************
20 *
21 * Filename: uipc.cc
22 *
23 * Description: UIPC implementation for fluoride
24 *
25 *****************************************************************************/
26
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <signal.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <sys/mman.h>
34 #include <sys/poll.h>
35 #include <sys/prctl.h>
36 #include <sys/select.h>
37 #include <sys/socket.h>
38 #include <sys/stat.h>
39 #include <sys/un.h>
40 #include <unistd.h>
41 #include <mutex>
42 #include <set>
43
44 #include "audio_a2dp_hw/include/audio_a2dp_hw.h"
45 #include "bt_common.h"
46 #include "bt_types.h"
47 #include "bt_utils.h"
48 #include "osi/include/osi.h"
49 #include "osi/include/socket_utils/sockets.h"
50 #include "uipc.h"
51
52 /*****************************************************************************
53 * Constants & Macros
54 *****************************************************************************/
55
56 #define PCM_FILENAME "/data/test.pcm"
57
58 #define MAX(a, b) ((a) > (b) ? (a) : (b))
59
60 #define CASE_RETURN_STR(const) \
61 case const: \
62 return #const;
63
64 #define UIPC_DISCONNECTED (-1)
65
66 #define SAFE_FD_ISSET(fd, set) (((fd) == -1) ? false : FD_ISSET((fd), (set)))
67
68 #define UIPC_FLUSH_BUFFER_SIZE 1024
69
70 /*****************************************************************************
71 * Local type definitions
72 *****************************************************************************/
73
74 typedef enum {
75 UIPC_TASK_FLAG_DISCONNECT_CHAN = 0x1,
76 } tUIPC_TASK_FLAGS;
77
78 /*****************************************************************************
79 * Static functions
80 *****************************************************************************/
81 static int uipc_close_ch_locked(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id);
82
83 /*****************************************************************************
84 * Externs
85 *****************************************************************************/
86
87 /*****************************************************************************
88 * Helper functions
89 *****************************************************************************/
90
dump_uipc_event(tUIPC_EVENT event)91 const char* dump_uipc_event(tUIPC_EVENT event) {
92 switch (event) {
93 CASE_RETURN_STR(UIPC_OPEN_EVT)
94 CASE_RETURN_STR(UIPC_CLOSE_EVT)
95 CASE_RETURN_STR(UIPC_RX_DATA_EVT)
96 CASE_RETURN_STR(UIPC_RX_DATA_READY_EVT)
97 CASE_RETURN_STR(UIPC_TX_DATA_READY_EVT)
98 default:
99 return "UNKNOWN MSG ID";
100 }
101 }
102
103 /*****************************************************************************
104 * socket helper functions
105 ****************************************************************************/
106
create_server_socket(const char * name)107 static inline int create_server_socket(const char* name) {
108 int s = socket(AF_LOCAL, SOCK_STREAM, 0);
109 if (s < 0) return -1;
110
111 BTIF_TRACE_EVENT("create_server_socket %s", name);
112
113 if (osi_socket_local_server_bind(s, name,
114 #if defined(OS_GENERIC)
115 ANDROID_SOCKET_NAMESPACE_FILESYSTEM
116 #else // !defined(OS_GENERIC)
117 ANDROID_SOCKET_NAMESPACE_ABSTRACT
118 #endif // defined(OS_GENERIC)
119 ) < 0) {
120 BTIF_TRACE_EVENT("socket failed to create (%s)", strerror(errno));
121 close(s);
122 return -1;
123 }
124
125 if (listen(s, 5) < 0) {
126 BTIF_TRACE_EVENT("listen failed", strerror(errno));
127 close(s);
128 return -1;
129 }
130
131 BTIF_TRACE_EVENT("created socket fd %d", s);
132 return s;
133 }
134
accept_server_socket(int sfd)135 static int accept_server_socket(int sfd) {
136 struct sockaddr_un remote;
137 struct pollfd pfd;
138 int fd;
139 socklen_t len = sizeof(struct sockaddr_un);
140
141 BTIF_TRACE_EVENT("accept fd %d", sfd);
142
143 /* make sure there is data to process */
144 pfd.fd = sfd;
145 pfd.events = POLLIN;
146
147 int poll_ret;
148 OSI_NO_INTR(poll_ret = poll(&pfd, 1, 0));
149 if (poll_ret == 0) {
150 BTIF_TRACE_WARNING("accept poll timeout");
151 return -1;
152 }
153
154 // BTIF_TRACE_EVENT("poll revents 0x%x", pfd.revents);
155
156 OSI_NO_INTR(fd = accept(sfd, (struct sockaddr*)&remote, &len));
157 if (fd == -1) {
158 BTIF_TRACE_ERROR("sock accept failed (%s)", strerror(errno));
159 return -1;
160 }
161
162 // match socket buffer size option with client
163 const int size = AUDIO_STREAM_OUTPUT_BUFFER_SZ;
164 int ret =
165 setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (char*)&size, (int)sizeof(size));
166 if (ret < 0) {
167 BTIF_TRACE_ERROR("setsockopt failed (%s)", strerror(errno));
168 }
169
170 // BTIF_TRACE_EVENT("new fd %d", fd);
171
172 return fd;
173 }
174
175 /*****************************************************************************
176 *
177 * uipc helper functions
178 *
179 ****************************************************************************/
180
uipc_main_init(tUIPC_STATE & uipc)181 static int uipc_main_init(tUIPC_STATE& uipc) {
182 int i;
183
184 BTIF_TRACE_EVENT("### uipc_main_init ###");
185
186 uipc.tid = 0;
187 uipc.running = 0;
188 memset(&uipc.active_set, 0, sizeof(uipc.active_set));
189 memset(&uipc.read_set, 0, sizeof(uipc.read_set));
190 uipc.max_fd = 0;
191 memset(&uipc.signal_fds, 0, sizeof(uipc.signal_fds));
192 memset(&uipc.ch, 0, sizeof(uipc.ch));
193
194 /* setup interrupt socket pair */
195 if (socketpair(AF_UNIX, SOCK_STREAM, 0, uipc.signal_fds) < 0) {
196 return -1;
197 }
198
199 FD_SET(uipc.signal_fds[0], &uipc.active_set);
200 uipc.max_fd = MAX(uipc.max_fd, uipc.signal_fds[0]);
201
202 for (i = 0; i < UIPC_CH_NUM; i++) {
203 tUIPC_CHAN* p = &uipc.ch[i];
204 p->srvfd = UIPC_DISCONNECTED;
205 p->fd = UIPC_DISCONNECTED;
206 p->task_evt_flags = 0;
207 p->cback = NULL;
208 }
209
210 return 0;
211 }
212
uipc_main_cleanup(tUIPC_STATE & uipc)213 void uipc_main_cleanup(tUIPC_STATE& uipc) {
214 int i;
215
216 BTIF_TRACE_EVENT("uipc_main_cleanup");
217
218 close(uipc.signal_fds[0]);
219 close(uipc.signal_fds[1]);
220
221 /* close any open channels */
222 for (i = 0; i < UIPC_CH_NUM; i++) uipc_close_ch_locked(uipc, i);
223 }
224
225 /* check pending events in read task */
uipc_check_task_flags_locked(tUIPC_STATE & uipc)226 static void uipc_check_task_flags_locked(tUIPC_STATE& uipc) {
227 int i;
228
229 for (i = 0; i < UIPC_CH_NUM; i++) {
230 if (uipc.ch[i].task_evt_flags & UIPC_TASK_FLAG_DISCONNECT_CHAN) {
231 uipc.ch[i].task_evt_flags &= ~UIPC_TASK_FLAG_DISCONNECT_CHAN;
232 uipc_close_ch_locked(uipc, i);
233 }
234
235 /* add here */
236 }
237 }
238
uipc_check_fd_locked(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id)239 static int uipc_check_fd_locked(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id) {
240 if (ch_id >= UIPC_CH_NUM) return -1;
241
242 // BTIF_TRACE_EVENT("CHECK SRVFD %d (ch %d)", uipc.ch[ch_id].srvfd,
243 // ch_id);
244
245 if (SAFE_FD_ISSET(uipc.ch[ch_id].srvfd, &uipc.read_set)) {
246 BTIF_TRACE_EVENT("INCOMING CONNECTION ON CH %d", ch_id);
247
248 // Close the previous connection
249 if (uipc.ch[ch_id].fd != UIPC_DISCONNECTED) {
250 BTIF_TRACE_EVENT("CLOSE CONNECTION (FD %d)", uipc.ch[ch_id].fd);
251 close(uipc.ch[ch_id].fd);
252 FD_CLR(uipc.ch[ch_id].fd, &uipc.active_set);
253 uipc.ch[ch_id].fd = UIPC_DISCONNECTED;
254 }
255
256 uipc.ch[ch_id].fd = accept_server_socket(uipc.ch[ch_id].srvfd);
257
258 BTIF_TRACE_EVENT("NEW FD %d", uipc.ch[ch_id].fd);
259
260 if ((uipc.ch[ch_id].fd >= 0) && uipc.ch[ch_id].cback) {
261 /* if we have a callback we should add this fd to the active set
262 and notify user with callback event */
263 BTIF_TRACE_EVENT("ADD FD %d TO ACTIVE SET", uipc.ch[ch_id].fd);
264 FD_SET(uipc.ch[ch_id].fd, &uipc.active_set);
265 uipc.max_fd = MAX(uipc.max_fd, uipc.ch[ch_id].fd);
266 }
267
268 if (uipc.ch[ch_id].fd < 0) {
269 BTIF_TRACE_ERROR("FAILED TO ACCEPT CH %d", ch_id);
270 return -1;
271 }
272
273 if (uipc.ch[ch_id].cback) uipc.ch[ch_id].cback(ch_id, UIPC_OPEN_EVT);
274 }
275
276 // BTIF_TRACE_EVENT("CHECK FD %d (ch %d)", uipc.ch[ch_id].fd, ch_id);
277
278 if (SAFE_FD_ISSET(uipc.ch[ch_id].fd, &uipc.read_set)) {
279 // BTIF_TRACE_EVENT("INCOMING DATA ON CH %d", ch_id);
280
281 if (uipc.ch[ch_id].cback)
282 uipc.ch[ch_id].cback(ch_id, UIPC_RX_DATA_READY_EVT);
283 }
284 return 0;
285 }
286
uipc_check_interrupt_locked(tUIPC_STATE & uipc)287 static void uipc_check_interrupt_locked(tUIPC_STATE& uipc) {
288 if (SAFE_FD_ISSET(uipc.signal_fds[0], &uipc.read_set)) {
289 char sig_recv = 0;
290 OSI_NO_INTR(
291 recv(uipc.signal_fds[0], &sig_recv, sizeof(sig_recv), MSG_WAITALL));
292 }
293 }
294
uipc_wakeup_locked(tUIPC_STATE & uipc)295 static inline void uipc_wakeup_locked(tUIPC_STATE& uipc) {
296 char sig_on = 1;
297 BTIF_TRACE_EVENT("UIPC SEND WAKE UP");
298
299 OSI_NO_INTR(send(uipc.signal_fds[1], &sig_on, sizeof(sig_on), 0));
300 }
301
uipc_setup_server_locked(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id,const char * name,tUIPC_RCV_CBACK * cback)302 static int uipc_setup_server_locked(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id,
303 const char* name, tUIPC_RCV_CBACK* cback) {
304 int fd;
305
306 BTIF_TRACE_EVENT("SETUP CHANNEL SERVER %d", ch_id);
307
308 if (ch_id >= UIPC_CH_NUM) return -1;
309
310 std::lock_guard<std::recursive_mutex> guard(uipc.mutex);
311
312 fd = create_server_socket(name);
313
314 if (fd < 0) {
315 BTIF_TRACE_ERROR("failed to setup %s", name, strerror(errno));
316 return -1;
317 }
318
319 BTIF_TRACE_EVENT("ADD SERVER FD TO ACTIVE SET %d", fd);
320 FD_SET(fd, &uipc.active_set);
321 uipc.max_fd = MAX(uipc.max_fd, fd);
322
323 uipc.ch[ch_id].srvfd = fd;
324 uipc.ch[ch_id].cback = cback;
325 uipc.ch[ch_id].read_poll_tmo_ms = DEFAULT_READ_POLL_TMO_MS;
326
327 /* trigger main thread to update read set */
328 uipc_wakeup_locked(uipc);
329
330 return 0;
331 }
332
uipc_flush_ch_locked(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id)333 static void uipc_flush_ch_locked(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id) {
334 char buf[UIPC_FLUSH_BUFFER_SIZE];
335 struct pollfd pfd;
336
337 pfd.events = POLLIN;
338 pfd.fd = uipc.ch[ch_id].fd;
339
340 if (uipc.ch[ch_id].fd == UIPC_DISCONNECTED) {
341 BTIF_TRACE_EVENT("%s() - fd disconnected. Exiting", __func__);
342 return;
343 }
344
345 while (1) {
346 int ret;
347 OSI_NO_INTR(ret = poll(&pfd, 1, 1));
348 if (ret == 0) {
349 BTIF_TRACE_VERBOSE("%s(): poll() timeout - nothing to do. Exiting",
350 __func__);
351 return;
352 }
353 if (ret < 0) {
354 BTIF_TRACE_WARNING(
355 "%s() - poll() failed: return %d errno %d (%s). Exiting", __func__,
356 ret, errno, strerror(errno));
357 return;
358 }
359 BTIF_TRACE_VERBOSE("%s() - polling fd %d, revents: 0x%x, ret %d", __func__,
360 pfd.fd, pfd.revents, ret);
361 if (pfd.revents & (POLLERR | POLLHUP)) {
362 BTIF_TRACE_WARNING("%s() - POLLERR or POLLHUP. Exiting", __func__);
363 return;
364 }
365
366 /* read sufficiently large buffer to ensure flush empties socket faster than
367 it is getting refilled */
368 read(pfd.fd, &buf, UIPC_FLUSH_BUFFER_SIZE);
369 }
370 }
371
uipc_flush_locked(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id)372 static void uipc_flush_locked(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id) {
373 if (ch_id >= UIPC_CH_NUM) return;
374
375 switch (ch_id) {
376 case UIPC_CH_ID_AV_CTRL:
377 uipc_flush_ch_locked(uipc, UIPC_CH_ID_AV_CTRL);
378 break;
379
380 case UIPC_CH_ID_AV_AUDIO:
381 uipc_flush_ch_locked(uipc, UIPC_CH_ID_AV_AUDIO);
382 break;
383 }
384 }
385
uipc_close_ch_locked(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id)386 static int uipc_close_ch_locked(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id) {
387 int wakeup = 0;
388
389 BTIF_TRACE_EVENT("CLOSE CHANNEL %d", ch_id);
390
391 if (ch_id >= UIPC_CH_NUM) return -1;
392
393 if (uipc.ch[ch_id].srvfd != UIPC_DISCONNECTED) {
394 BTIF_TRACE_EVENT("CLOSE SERVER (FD %d)", uipc.ch[ch_id].srvfd);
395 close(uipc.ch[ch_id].srvfd);
396 FD_CLR(uipc.ch[ch_id].srvfd, &uipc.active_set);
397 uipc.ch[ch_id].srvfd = UIPC_DISCONNECTED;
398 wakeup = 1;
399 }
400
401 if (uipc.ch[ch_id].fd != UIPC_DISCONNECTED) {
402 BTIF_TRACE_EVENT("CLOSE CONNECTION (FD %d)", uipc.ch[ch_id].fd);
403 close(uipc.ch[ch_id].fd);
404 FD_CLR(uipc.ch[ch_id].fd, &uipc.active_set);
405 uipc.ch[ch_id].fd = UIPC_DISCONNECTED;
406 wakeup = 1;
407 }
408
409 /* notify this connection is closed */
410 if (uipc.ch[ch_id].cback) uipc.ch[ch_id].cback(ch_id, UIPC_CLOSE_EVT);
411
412 /* trigger main thread update if something was updated */
413 if (wakeup) uipc_wakeup_locked(uipc);
414
415 return 0;
416 }
417
uipc_close_locked(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id)418 void uipc_close_locked(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id) {
419 if (uipc.ch[ch_id].srvfd == UIPC_DISCONNECTED) {
420 BTIF_TRACE_EVENT("CHANNEL %d ALREADY CLOSED", ch_id);
421 return;
422 }
423
424 /* schedule close on this channel */
425 uipc.ch[ch_id].task_evt_flags |= UIPC_TASK_FLAG_DISCONNECT_CHAN;
426 uipc_wakeup_locked(uipc);
427 }
428
uipc_read_task(void * arg)429 static void* uipc_read_task(void* arg) {
430 tUIPC_STATE& uipc = *((tUIPC_STATE*)arg);
431 int ch_id;
432 int result;
433
434 prctl(PR_SET_NAME, (unsigned long)"uipc-main", 0, 0, 0);
435
436 raise_priority_a2dp(TASK_UIPC_READ);
437
438 while (uipc.running) {
439 uipc.read_set = uipc.active_set;
440
441 result = select(uipc.max_fd + 1, &uipc.read_set, NULL, NULL, NULL);
442
443 if (result == 0) {
444 BTIF_TRACE_EVENT("select timeout");
445 continue;
446 }
447 if (result < 0) {
448 if (errno != EINTR) {
449 BTIF_TRACE_EVENT("select failed %s", strerror(errno));
450 }
451 continue;
452 }
453
454 {
455 std::lock_guard<std::recursive_mutex> guard(uipc.mutex);
456
457 /* clear any wakeup interrupt */
458 uipc_check_interrupt_locked(uipc);
459
460 /* check pending task events */
461 uipc_check_task_flags_locked(uipc);
462
463 /* make sure we service audio channel first */
464 uipc_check_fd_locked(uipc, UIPC_CH_ID_AV_AUDIO);
465
466 /* check for other connections */
467 for (ch_id = 0; ch_id < UIPC_CH_NUM; ch_id++) {
468 if (ch_id != UIPC_CH_ID_AV_AUDIO) uipc_check_fd_locked(uipc, ch_id);
469 }
470 }
471 }
472
473 BTIF_TRACE_EVENT("UIPC READ THREAD EXITING");
474
475 uipc_main_cleanup(uipc);
476
477 uipc.tid = 0;
478
479 BTIF_TRACE_EVENT("UIPC READ THREAD DONE");
480
481 return nullptr;
482 }
483
uipc_start_main_server_thread(tUIPC_STATE & uipc)484 int uipc_start_main_server_thread(tUIPC_STATE& uipc) {
485 uipc.running = 1;
486
487 if (pthread_create(&uipc.tid, (const pthread_attr_t*)NULL, uipc_read_task,
488 &uipc) < 0) {
489 BTIF_TRACE_ERROR("uipc_thread_create pthread_create failed:%d", errno);
490 return -1;
491 }
492
493 return 0;
494 }
495
496 /* blocking call */
uipc_stop_main_server_thread(tUIPC_STATE & uipc)497 void uipc_stop_main_server_thread(tUIPC_STATE& uipc) {
498 /* request shutdown of read thread */
499 {
500 std::lock_guard<std::recursive_mutex> lock(uipc.mutex);
501 uipc.running = 0;
502 uipc_wakeup_locked(uipc);
503 }
504
505 /* wait until read thread is fully terminated */
506 /* tid might hold pointer value where it's value
507 is negative vaule with singed bit is set, so
508 corrected the logic to check zero or non zero */
509 if (uipc.tid) pthread_join(uipc.tid, NULL);
510 }
511
512 /*******************************************************************************
513 **
514 ** Function UIPC_Init
515 **
516 ** Description Initialize UIPC module
517 **
518 ** Returns void
519 **
520 ******************************************************************************/
UIPC_Init()521 std::unique_ptr<tUIPC_STATE> UIPC_Init() {
522 std::unique_ptr<tUIPC_STATE> uipc = std::make_unique<tUIPC_STATE>();
523 BTIF_TRACE_DEBUG("UIPC_Init");
524
525 std::lock_guard<std::recursive_mutex> lock(uipc->mutex);
526
527 uipc_main_init(*uipc);
528 uipc_start_main_server_thread(*uipc);
529
530 return uipc;
531 }
532
533 /*******************************************************************************
534 **
535 ** Function UIPC_Open
536 **
537 ** Description Open UIPC interface
538 **
539 ** Returns true in case of success, false in case of failure.
540 **
541 ******************************************************************************/
UIPC_Open(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id,tUIPC_RCV_CBACK * p_cback,const char * socket_path)542 bool UIPC_Open(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id, tUIPC_RCV_CBACK* p_cback,
543 const char* socket_path) {
544 BTIF_TRACE_DEBUG("UIPC_Open : ch_id %d, p_cback %x", ch_id, p_cback);
545
546 std::lock_guard<std::recursive_mutex> lock(uipc.mutex);
547
548 if (ch_id >= UIPC_CH_NUM) {
549 return false;
550 }
551
552 if (uipc.ch[ch_id].srvfd != UIPC_DISCONNECTED) {
553 BTIF_TRACE_EVENT("CHANNEL %d ALREADY OPEN", ch_id);
554 return 0;
555 }
556
557 uipc_setup_server_locked(uipc, ch_id, socket_path, p_cback);
558
559 return true;
560 }
561
562 /*******************************************************************************
563 **
564 ** Function UIPC_Close
565 **
566 ** Description Close UIPC interface
567 **
568 ** Returns void
569 **
570 ******************************************************************************/
UIPC_Close(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id)571 void UIPC_Close(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id) {
572 BTIF_TRACE_DEBUG("UIPC_Close : ch_id %d", ch_id);
573
574 /* special case handling uipc shutdown */
575 if (ch_id != UIPC_CH_ID_ALL) {
576 std::lock_guard<std::recursive_mutex> lock(uipc.mutex);
577 uipc_close_locked(uipc, ch_id);
578 return;
579 }
580
581 BTIF_TRACE_DEBUG("UIPC_Close : waiting for shutdown to complete");
582 uipc_stop_main_server_thread(uipc);
583 BTIF_TRACE_DEBUG("UIPC_Close : shutdown complete");
584 }
585
586 /*******************************************************************************
587 **
588 ** Function UIPC_Send
589 **
590 ** Description Called to transmit a message over UIPC.
591 **
592 ** Returns true in case of success, false in case of failure.
593 **
594 ******************************************************************************/
UIPC_Send(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id,UNUSED_ATTR uint16_t msg_evt,const uint8_t * p_buf,uint16_t msglen)595 bool UIPC_Send(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id,
596 UNUSED_ATTR uint16_t msg_evt, const uint8_t* p_buf,
597 uint16_t msglen) {
598 BTIF_TRACE_DEBUG("UIPC_Send : ch_id:%d %d bytes", ch_id, msglen);
599
600 std::lock_guard<std::recursive_mutex> lock(uipc.mutex);
601
602 ssize_t ret;
603 OSI_NO_INTR(ret = write(uipc.ch[ch_id].fd, p_buf, msglen));
604 if (ret < 0) {
605 BTIF_TRACE_ERROR("failed to write (%s)", strerror(errno));
606 }
607
608 return false;
609 }
610
611 /*******************************************************************************
612 **
613 ** Function UIPC_Read
614 **
615 ** Description Called to read a message from UIPC.
616 **
617 ** Returns return the number of bytes read.
618 **
619 ******************************************************************************/
620
UIPC_Read(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id,UNUSED_ATTR uint16_t * p_msg_evt,uint8_t * p_buf,uint32_t len)621 uint32_t UIPC_Read(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id,
622 UNUSED_ATTR uint16_t* p_msg_evt, uint8_t* p_buf,
623 uint32_t len) {
624 if (ch_id >= UIPC_CH_NUM) {
625 BTIF_TRACE_ERROR("UIPC_Read : invalid ch id %d", ch_id);
626 return 0;
627 }
628
629 int n_read = 0;
630 int fd = uipc.ch[ch_id].fd;
631 struct pollfd pfd;
632
633 if (fd == UIPC_DISCONNECTED) {
634 BTIF_TRACE_ERROR("UIPC_Read : channel %d closed", ch_id);
635 return 0;
636 }
637
638 while (n_read < (int)len) {
639 pfd.fd = fd;
640 pfd.events = POLLIN | POLLHUP;
641
642 /* make sure there is data prior to attempting read to avoid blocking
643 a read for more than poll timeout */
644
645 int poll_ret;
646 OSI_NO_INTR(poll_ret = poll(&pfd, 1, uipc.ch[ch_id].read_poll_tmo_ms));
647 if (poll_ret == 0) {
648 BTIF_TRACE_WARNING("poll timeout (%d ms)",
649 uipc.ch[ch_id].read_poll_tmo_ms);
650 break;
651 }
652 if (poll_ret < 0) {
653 BTIF_TRACE_ERROR("%s(): poll() failed: return %d errno %d (%s)", __func__,
654 poll_ret, errno, strerror(errno));
655 break;
656 }
657
658 // BTIF_TRACE_EVENT("poll revents %x", pfd.revents);
659
660 if (pfd.revents & (POLLHUP | POLLNVAL)) {
661 BTIF_TRACE_WARNING("poll : channel detached remotely");
662 std::lock_guard<std::recursive_mutex> lock(uipc.mutex);
663 uipc_close_locked(uipc, ch_id);
664 return 0;
665 }
666
667 ssize_t n;
668 OSI_NO_INTR(n = recv(fd, p_buf + n_read, len - n_read, 0));
669
670 // BTIF_TRACE_EVENT("read %d bytes", n);
671
672 if (n == 0) {
673 BTIF_TRACE_WARNING("UIPC_Read : channel detached remotely");
674 std::lock_guard<std::recursive_mutex> lock(uipc.mutex);
675 uipc_close_locked(uipc, ch_id);
676 return 0;
677 }
678
679 if (n < 0) {
680 BTIF_TRACE_WARNING("UIPC_Read : read failed (%s)", strerror(errno));
681 return 0;
682 }
683
684 n_read += n;
685 }
686
687 return n_read;
688 }
689
690 /*******************************************************************************
691 *
692 * Function UIPC_Ioctl
693 *
694 * Description Called to control UIPC.
695 *
696 * Returns void
697 *
698 ******************************************************************************/
699
UIPC_Ioctl(tUIPC_STATE & uipc,tUIPC_CH_ID ch_id,uint32_t request,void * param)700 extern bool UIPC_Ioctl(tUIPC_STATE& uipc, tUIPC_CH_ID ch_id, uint32_t request,
701 void* param) {
702 BTIF_TRACE_DEBUG("#### UIPC_Ioctl : ch_id %d, request %d ####", ch_id,
703 request);
704 std::lock_guard<std::recursive_mutex> lock(uipc.mutex);
705
706 switch (request) {
707 case UIPC_REQ_RX_FLUSH:
708 uipc_flush_locked(uipc, ch_id);
709 break;
710
711 case UIPC_REG_CBACK:
712 // BTIF_TRACE_EVENT("register callback ch %d srvfd %d, fd %d", ch_id,
713 // uipc.ch[ch_id].srvfd, uipc.ch[ch_id].fd);
714 uipc.ch[ch_id].cback = (tUIPC_RCV_CBACK*)param;
715 break;
716
717 case UIPC_REG_REMOVE_ACTIVE_READSET:
718 /* user will read data directly and not use select loop */
719 if (uipc.ch[ch_id].fd != UIPC_DISCONNECTED) {
720 /* remove this channel from active set */
721 FD_CLR(uipc.ch[ch_id].fd, &uipc.active_set);
722
723 /* refresh active set */
724 uipc_wakeup_locked(uipc);
725 }
726 break;
727
728 case UIPC_SET_READ_POLL_TMO:
729 uipc.ch[ch_id].read_poll_tmo_ms = (intptr_t)param;
730 BTIF_TRACE_EVENT("UIPC_SET_READ_POLL_TMO : CH %d, TMO %d ms", ch_id,
731 uipc.ch[ch_id].read_poll_tmo_ms);
732 break;
733
734 default:
735 BTIF_TRACE_EVENT("UIPC_Ioctl : request not handled (%d)", request);
736 break;
737 }
738
739 return false;
740 }
741