1 /* Copyright 2008 The Android Open Source Project
2 */
3
4 #define LOG_TAG "Binder"
5
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <inttypes.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/mman.h>
13 #include <unistd.h>
14
15 #include <log/log.h>
16
17 #include "binder.h"
18
19 #define MAX_BIO_SIZE (1 << 30)
20
21 #define TRACE 0
22
23 void bio_init_from_txn(struct binder_io *io, struct binder_transaction_data *txn);
24
25 #if TRACE
hexdump(void * _data,size_t len)26 void hexdump(void *_data, size_t len)
27 {
28 unsigned char *data = _data;
29 size_t count;
30
31 for (count = 0; count < len; count++) {
32 if ((count & 15) == 0)
33 fprintf(stderr,"%04zu:", count);
34 fprintf(stderr," %02x %c", *data,
35 (*data < 32) || (*data > 126) ? '.' : *data);
36 data++;
37 if ((count & 15) == 15)
38 fprintf(stderr,"\n");
39 }
40 if ((count & 15) != 0)
41 fprintf(stderr,"\n");
42 }
43
binder_dump_txn(struct binder_transaction_data * txn)44 void binder_dump_txn(struct binder_transaction_data *txn)
45 {
46 struct flat_binder_object *obj;
47 binder_size_t *offs = (binder_size_t *)(uintptr_t)txn->data.ptr.offsets;
48 size_t count = txn->offsets_size / sizeof(binder_size_t);
49
50 fprintf(stderr," target %016"PRIx64" cookie %016"PRIx64" code %08x flags %08x\n",
51 (uint64_t)txn->target.ptr, (uint64_t)txn->cookie, txn->code, txn->flags);
52 fprintf(stderr," pid %8d uid %8d data %"PRIu64" offs %"PRIu64"\n",
53 txn->sender_pid, txn->sender_euid, (uint64_t)txn->data_size, (uint64_t)txn->offsets_size);
54 hexdump((void *)(uintptr_t)txn->data.ptr.buffer, txn->data_size);
55 while (count--) {
56 obj = (struct flat_binder_object *) (((char*)(uintptr_t)txn->data.ptr.buffer) + *offs++);
57 fprintf(stderr," - type %08x flags %08x ptr %016"PRIx64" cookie %016"PRIx64"\n",
58 obj->type, obj->flags, (uint64_t)obj->binder, (uint64_t)obj->cookie);
59 }
60 }
61
62 #define NAME(n) case n: return #n
cmd_name(uint32_t cmd)63 const char *cmd_name(uint32_t cmd)
64 {
65 switch(cmd) {
66 NAME(BR_NOOP);
67 NAME(BR_TRANSACTION_COMPLETE);
68 NAME(BR_INCREFS);
69 NAME(BR_ACQUIRE);
70 NAME(BR_RELEASE);
71 NAME(BR_DECREFS);
72 NAME(BR_TRANSACTION);
73 NAME(BR_REPLY);
74 NAME(BR_FAILED_REPLY);
75 NAME(BR_DEAD_REPLY);
76 NAME(BR_DEAD_BINDER);
77 default: return "???";
78 }
79 }
80 #else
81 #define hexdump(a,b) do{} while (0)
82 #define binder_dump_txn(txn) do{} while (0)
83 #endif
84
85 #define BIO_F_SHARED 0x01 /* needs to be buffer freed */
86 #define BIO_F_OVERFLOW 0x02 /* ran out of space */
87 #define BIO_F_IOERROR 0x04
88 #define BIO_F_MALLOCED 0x08 /* needs to be free()'d */
89
90 struct binder_state
91 {
92 int fd;
93 void *mapped;
94 size_t mapsize;
95 };
96
binder_open(const char * driver,size_t mapsize)97 struct binder_state *binder_open(const char* driver, size_t mapsize)
98 {
99 struct binder_state *bs;
100 struct binder_version vers;
101
102 bs = malloc(sizeof(*bs));
103 if (!bs) {
104 errno = ENOMEM;
105 return NULL;
106 }
107
108 bs->fd = open(driver, O_RDWR | O_CLOEXEC);
109 if (bs->fd < 0) {
110 fprintf(stderr,"binder: cannot open %s (%s)\n",
111 driver, strerror(errno));
112 goto fail_open;
113 }
114
115 if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
116 (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
117 fprintf(stderr,
118 "binder: kernel driver version (%d) differs from user space version (%d)\n",
119 vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
120 goto fail_open;
121 }
122
123 bs->mapsize = mapsize;
124 bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
125 if (bs->mapped == MAP_FAILED) {
126 fprintf(stderr,"binder: cannot map device (%s)\n",
127 strerror(errno));
128 goto fail_map;
129 }
130
131 return bs;
132
133 fail_map:
134 close(bs->fd);
135 fail_open:
136 free(bs);
137 return NULL;
138 }
139
binder_close(struct binder_state * bs)140 void binder_close(struct binder_state *bs)
141 {
142 munmap(bs->mapped, bs->mapsize);
143 close(bs->fd);
144 free(bs);
145 }
146
binder_become_context_manager(struct binder_state * bs)147 int binder_become_context_manager(struct binder_state *bs)
148 {
149 struct flat_binder_object obj;
150 memset(&obj, 0, sizeof(obj));
151 obj.flags = FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
152
153 int result = ioctl(bs->fd, BINDER_SET_CONTEXT_MGR_EXT, &obj);
154
155 // fallback to original method
156 if (result != 0) {
157 android_errorWriteLog(0x534e4554, "121035042");
158
159 result = ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
160 }
161 return result;
162 }
163
binder_write(struct binder_state * bs,void * data,size_t len)164 int binder_write(struct binder_state *bs, void *data, size_t len)
165 {
166 struct binder_write_read bwr;
167 int res;
168
169 bwr.write_size = len;
170 bwr.write_consumed = 0;
171 bwr.write_buffer = (uintptr_t) data;
172 bwr.read_size = 0;
173 bwr.read_consumed = 0;
174 bwr.read_buffer = 0;
175 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
176 if (res < 0) {
177 fprintf(stderr,"binder_write: ioctl failed (%s)\n",
178 strerror(errno));
179 }
180 return res;
181 }
182
binder_free_buffer(struct binder_state * bs,binder_uintptr_t buffer_to_free)183 void binder_free_buffer(struct binder_state *bs,
184 binder_uintptr_t buffer_to_free)
185 {
186 struct {
187 uint32_t cmd_free;
188 binder_uintptr_t buffer;
189 } __attribute__((packed)) data;
190 data.cmd_free = BC_FREE_BUFFER;
191 data.buffer = buffer_to_free;
192 binder_write(bs, &data, sizeof(data));
193 }
194
binder_send_reply(struct binder_state * bs,struct binder_io * reply,binder_uintptr_t buffer_to_free,int status)195 void binder_send_reply(struct binder_state *bs,
196 struct binder_io *reply,
197 binder_uintptr_t buffer_to_free,
198 int status)
199 {
200 struct {
201 uint32_t cmd_free;
202 binder_uintptr_t buffer;
203 uint32_t cmd_reply;
204 struct binder_transaction_data txn;
205 } __attribute__((packed)) data;
206
207 data.cmd_free = BC_FREE_BUFFER;
208 data.buffer = buffer_to_free;
209 data.cmd_reply = BC_REPLY;
210 data.txn.target.ptr = 0;
211 data.txn.cookie = 0;
212 data.txn.code = 0;
213 if (status) {
214 data.txn.flags = TF_STATUS_CODE;
215 data.txn.data_size = sizeof(int);
216 data.txn.offsets_size = 0;
217 data.txn.data.ptr.buffer = (uintptr_t)&status;
218 data.txn.data.ptr.offsets = 0;
219 } else {
220 data.txn.flags = 0;
221 data.txn.data_size = reply->data - reply->data0;
222 data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
223 data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
224 data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
225 }
226 binder_write(bs, &data, sizeof(data));
227 }
228
binder_parse(struct binder_state * bs,struct binder_io * bio,uintptr_t ptr,size_t size,binder_handler func)229 int binder_parse(struct binder_state *bs, struct binder_io *bio,
230 uintptr_t ptr, size_t size, binder_handler func)
231 {
232 int r = 1;
233 uintptr_t end = ptr + (uintptr_t) size;
234
235 while (ptr < end) {
236 uint32_t cmd = *(uint32_t *) ptr;
237 ptr += sizeof(uint32_t);
238 #if TRACE
239 fprintf(stderr,"%s:\n", cmd_name(cmd));
240 #endif
241 switch(cmd) {
242 case BR_NOOP:
243 break;
244 case BR_TRANSACTION_COMPLETE:
245 break;
246 case BR_INCREFS:
247 case BR_ACQUIRE:
248 case BR_RELEASE:
249 case BR_DECREFS:
250 #if TRACE
251 fprintf(stderr," %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
252 #endif
253 ptr += sizeof(struct binder_ptr_cookie);
254 break;
255 case BR_TRANSACTION_SEC_CTX:
256 case BR_TRANSACTION: {
257 struct binder_transaction_data_secctx txn;
258 if (cmd == BR_TRANSACTION_SEC_CTX) {
259 if ((end - ptr) < sizeof(struct binder_transaction_data_secctx)) {
260 ALOGE("parse: txn too small (binder_transaction_data_secctx)!\n");
261 return -1;
262 }
263 memcpy(&txn, (void*) ptr, sizeof(struct binder_transaction_data_secctx));
264 ptr += sizeof(struct binder_transaction_data_secctx);
265 } else /* BR_TRANSACTION */ {
266 if ((end - ptr) < sizeof(struct binder_transaction_data)) {
267 ALOGE("parse: txn too small (binder_transaction_data)!\n");
268 return -1;
269 }
270 memcpy(&txn.transaction_data, (void*) ptr, sizeof(struct binder_transaction_data));
271 ptr += sizeof(struct binder_transaction_data);
272
273 txn.secctx = 0;
274 }
275
276 binder_dump_txn(&txn.transaction_data);
277 if (func) {
278 unsigned rdata[256/4];
279 struct binder_io msg;
280 struct binder_io reply;
281 int res;
282
283 bio_init(&reply, rdata, sizeof(rdata), 4);
284 bio_init_from_txn(&msg, &txn.transaction_data);
285 res = func(bs, &txn, &msg, &reply);
286 if (txn.transaction_data.flags & TF_ONE_WAY) {
287 binder_free_buffer(bs, txn.transaction_data.data.ptr.buffer);
288 } else {
289 binder_send_reply(bs, &reply, txn.transaction_data.data.ptr.buffer, res);
290 }
291 }
292 break;
293 }
294 case BR_REPLY: {
295 struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
296 if ((end - ptr) < sizeof(*txn)) {
297 ALOGE("parse: reply too small!\n");
298 return -1;
299 }
300 binder_dump_txn(txn);
301 if (bio) {
302 bio_init_from_txn(bio, txn);
303 bio = 0;
304 } else {
305 /* todo FREE BUFFER */
306 }
307 ptr += sizeof(*txn);
308 r = 0;
309 break;
310 }
311 case BR_DEAD_BINDER: {
312 struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
313 ptr += sizeof(binder_uintptr_t);
314 death->func(bs, death->ptr);
315 break;
316 }
317 case BR_FAILED_REPLY:
318 r = -1;
319 break;
320 case BR_DEAD_REPLY:
321 r = -1;
322 break;
323 default:
324 ALOGE("parse: OOPS %d\n", cmd);
325 return -1;
326 }
327 }
328
329 return r;
330 }
331
binder_acquire(struct binder_state * bs,uint32_t target)332 void binder_acquire(struct binder_state *bs, uint32_t target)
333 {
334 uint32_t cmd[2];
335 cmd[0] = BC_ACQUIRE;
336 cmd[1] = target;
337 binder_write(bs, cmd, sizeof(cmd));
338 }
339
binder_release(struct binder_state * bs,uint32_t target)340 void binder_release(struct binder_state *bs, uint32_t target)
341 {
342 uint32_t cmd[2];
343 cmd[0] = BC_RELEASE;
344 cmd[1] = target;
345 binder_write(bs, cmd, sizeof(cmd));
346 }
347
binder_link_to_death(struct binder_state * bs,uint32_t target,struct binder_death * death)348 void binder_link_to_death(struct binder_state *bs, uint32_t target, struct binder_death *death)
349 {
350 struct {
351 uint32_t cmd;
352 struct binder_handle_cookie payload;
353 } __attribute__((packed)) data;
354
355 data.cmd = BC_REQUEST_DEATH_NOTIFICATION;
356 data.payload.handle = target;
357 data.payload.cookie = (uintptr_t) death;
358 binder_write(bs, &data, sizeof(data));
359 }
360
binder_call(struct binder_state * bs,struct binder_io * msg,struct binder_io * reply,uint32_t target,uint32_t code)361 int binder_call(struct binder_state *bs,
362 struct binder_io *msg, struct binder_io *reply,
363 uint32_t target, uint32_t code)
364 {
365 int res;
366 struct binder_write_read bwr;
367 struct {
368 uint32_t cmd;
369 struct binder_transaction_data txn;
370 } __attribute__((packed)) writebuf;
371 unsigned readbuf[32];
372
373 if (msg->flags & BIO_F_OVERFLOW) {
374 fprintf(stderr,"binder: txn buffer overflow\n");
375 goto fail;
376 }
377
378 writebuf.cmd = BC_TRANSACTION;
379 writebuf.txn.target.handle = target;
380 writebuf.txn.code = code;
381 writebuf.txn.flags = 0;
382 writebuf.txn.data_size = msg->data - msg->data0;
383 writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);
384 writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
385 writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;
386
387 bwr.write_size = sizeof(writebuf);
388 bwr.write_consumed = 0;
389 bwr.write_buffer = (uintptr_t) &writebuf;
390
391 hexdump(msg->data0, msg->data - msg->data0);
392 for (;;) {
393 bwr.read_size = sizeof(readbuf);
394 bwr.read_consumed = 0;
395 bwr.read_buffer = (uintptr_t) readbuf;
396
397 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
398
399 if (res < 0) {
400 fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
401 goto fail;
402 }
403
404 res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
405 if (res == 0) return 0;
406 if (res < 0) goto fail;
407 }
408
409 fail:
410 memset(reply, 0, sizeof(*reply));
411 reply->flags |= BIO_F_IOERROR;
412 return -1;
413 }
414
binder_loop(struct binder_state * bs,binder_handler func)415 void binder_loop(struct binder_state *bs, binder_handler func)
416 {
417 int res;
418 struct binder_write_read bwr;
419 uint32_t readbuf[32];
420
421 bwr.write_size = 0;
422 bwr.write_consumed = 0;
423 bwr.write_buffer = 0;
424
425 readbuf[0] = BC_ENTER_LOOPER;
426 binder_write(bs, readbuf, sizeof(uint32_t));
427
428 for (;;) {
429 bwr.read_size = sizeof(readbuf);
430 bwr.read_consumed = 0;
431 bwr.read_buffer = (uintptr_t) readbuf;
432
433 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
434
435 if (res < 0) {
436 ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
437 break;
438 }
439
440 res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
441 if (res == 0) {
442 ALOGE("binder_loop: unexpected reply?!\n");
443 break;
444 }
445 if (res < 0) {
446 ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
447 break;
448 }
449 }
450 }
451
bio_init_from_txn(struct binder_io * bio,struct binder_transaction_data * txn)452 void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
453 {
454 bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
455 bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
456 bio->data_avail = txn->data_size;
457 bio->offs_avail = txn->offsets_size / sizeof(size_t);
458 bio->flags = BIO_F_SHARED;
459 }
460
bio_init(struct binder_io * bio,void * data,size_t maxdata,size_t maxoffs)461 void bio_init(struct binder_io *bio, void *data,
462 size_t maxdata, size_t maxoffs)
463 {
464 size_t n = maxoffs * sizeof(size_t);
465
466 if (n > maxdata) {
467 bio->flags = BIO_F_OVERFLOW;
468 bio->data_avail = 0;
469 bio->offs_avail = 0;
470 return;
471 }
472
473 bio->data = bio->data0 = (char *) data + n;
474 bio->offs = bio->offs0 = data;
475 bio->data_avail = maxdata - n;
476 bio->offs_avail = maxoffs;
477 bio->flags = 0;
478 }
479
bio_alloc(struct binder_io * bio,size_t size)480 static void *bio_alloc(struct binder_io *bio, size_t size)
481 {
482 size = (size + 3) & (~3);
483 if (size > bio->data_avail) {
484 bio->flags |= BIO_F_OVERFLOW;
485 return NULL;
486 } else {
487 void *ptr = bio->data;
488 bio->data += size;
489 bio->data_avail -= size;
490 return ptr;
491 }
492 }
493
binder_done(struct binder_state * bs,__unused struct binder_io * msg,struct binder_io * reply)494 void binder_done(struct binder_state *bs,
495 __unused struct binder_io *msg,
496 struct binder_io *reply)
497 {
498 struct {
499 uint32_t cmd;
500 uintptr_t buffer;
501 } __attribute__((packed)) data;
502
503 if (reply->flags & BIO_F_SHARED) {
504 data.cmd = BC_FREE_BUFFER;
505 data.buffer = (uintptr_t) reply->data0;
506 binder_write(bs, &data, sizeof(data));
507 reply->flags = 0;
508 }
509 }
510
bio_alloc_obj(struct binder_io * bio)511 static struct flat_binder_object *bio_alloc_obj(struct binder_io *bio)
512 {
513 struct flat_binder_object *obj;
514
515 obj = bio_alloc(bio, sizeof(*obj));
516
517 if (obj && bio->offs_avail) {
518 bio->offs_avail--;
519 *bio->offs++ = ((char*) obj) - ((char*) bio->data0);
520 return obj;
521 }
522
523 bio->flags |= BIO_F_OVERFLOW;
524 return NULL;
525 }
526
bio_put_uint32(struct binder_io * bio,uint32_t n)527 void bio_put_uint32(struct binder_io *bio, uint32_t n)
528 {
529 uint32_t *ptr = bio_alloc(bio, sizeof(n));
530 if (ptr)
531 *ptr = n;
532 }
533
bio_put_obj(struct binder_io * bio,void * ptr)534 void bio_put_obj(struct binder_io *bio, void *ptr)
535 {
536 struct flat_binder_object *obj;
537
538 obj = bio_alloc_obj(bio);
539 if (!obj)
540 return;
541
542 obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
543 obj->hdr.type = BINDER_TYPE_BINDER;
544 obj->binder = (uintptr_t)ptr;
545 obj->cookie = 0;
546 }
547
bio_put_ref(struct binder_io * bio,uint32_t handle)548 void bio_put_ref(struct binder_io *bio, uint32_t handle)
549 {
550 struct flat_binder_object *obj;
551
552 if (handle)
553 obj = bio_alloc_obj(bio);
554 else
555 obj = bio_alloc(bio, sizeof(*obj));
556
557 if (!obj)
558 return;
559
560 obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
561 obj->hdr.type = BINDER_TYPE_HANDLE;
562 obj->handle = handle;
563 obj->cookie = 0;
564 }
565
bio_put_string16(struct binder_io * bio,const uint16_t * str)566 void bio_put_string16(struct binder_io *bio, const uint16_t *str)
567 {
568 size_t len;
569 uint16_t *ptr;
570
571 if (!str) {
572 bio_put_uint32(bio, 0xffffffff);
573 return;
574 }
575
576 len = 0;
577 while (str[len]) len++;
578
579 if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) {
580 bio_put_uint32(bio, 0xffffffff);
581 return;
582 }
583
584 /* Note: The payload will carry 32bit size instead of size_t */
585 bio_put_uint32(bio, (uint32_t) len);
586 len = (len + 1) * sizeof(uint16_t);
587 ptr = bio_alloc(bio, len);
588 if (ptr)
589 memcpy(ptr, str, len);
590 }
591
bio_put_string16_x(struct binder_io * bio,const char * _str)592 void bio_put_string16_x(struct binder_io *bio, const char *_str)
593 {
594 unsigned char *str = (unsigned char*) _str;
595 size_t len;
596 uint16_t *ptr;
597
598 if (!str) {
599 bio_put_uint32(bio, 0xffffffff);
600 return;
601 }
602
603 len = strlen(_str);
604
605 if (len >= (MAX_BIO_SIZE / sizeof(uint16_t))) {
606 bio_put_uint32(bio, 0xffffffff);
607 return;
608 }
609
610 /* Note: The payload will carry 32bit size instead of size_t */
611 bio_put_uint32(bio, len);
612 ptr = bio_alloc(bio, (len + 1) * sizeof(uint16_t));
613 if (!ptr)
614 return;
615
616 while (*str)
617 *ptr++ = *str++;
618 *ptr++ = 0;
619 }
620
bio_get(struct binder_io * bio,size_t size)621 static void *bio_get(struct binder_io *bio, size_t size)
622 {
623 size = (size + 3) & (~3);
624
625 if (bio->data_avail < size){
626 bio->data_avail = 0;
627 bio->flags |= BIO_F_OVERFLOW;
628 return NULL;
629 } else {
630 void *ptr = bio->data;
631 bio->data += size;
632 bio->data_avail -= size;
633 return ptr;
634 }
635 }
636
bio_get_uint32(struct binder_io * bio)637 uint32_t bio_get_uint32(struct binder_io *bio)
638 {
639 uint32_t *ptr = bio_get(bio, sizeof(*ptr));
640 return ptr ? *ptr : 0;
641 }
642
bio_get_string16(struct binder_io * bio,size_t * sz)643 uint16_t *bio_get_string16(struct binder_io *bio, size_t *sz)
644 {
645 size_t len;
646
647 /* Note: The payload will carry 32bit size instead of size_t */
648 len = (size_t) bio_get_uint32(bio);
649 if (sz)
650 *sz = len;
651 return bio_get(bio, (len + 1) * sizeof(uint16_t));
652 }
653
_bio_get_obj(struct binder_io * bio)654 static struct flat_binder_object *_bio_get_obj(struct binder_io *bio)
655 {
656 size_t n;
657 size_t off = bio->data - bio->data0;
658
659 /* TODO: be smarter about this? */
660 for (n = 0; n < bio->offs_avail; n++) {
661 if (bio->offs[n] == off)
662 return bio_get(bio, sizeof(struct flat_binder_object));
663 }
664
665 bio->data_avail = 0;
666 bio->flags |= BIO_F_OVERFLOW;
667 return NULL;
668 }
669
bio_get_ref(struct binder_io * bio)670 uint32_t bio_get_ref(struct binder_io *bio)
671 {
672 struct flat_binder_object *obj;
673
674 obj = _bio_get_obj(bio);
675 if (!obj)
676 return 0;
677
678 if (obj->hdr.type == BINDER_TYPE_HANDLE)
679 return obj->handle;
680
681 return 0;
682 }
683