1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <fstream>
20 #include <poll.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24
25 #include <gtest/gtest.h>
26
27 #include <binder/Binder.h>
28 #include <binder/IBinder.h>
29 #include <binder/IPCThreadState.h>
30 #include <binder/IServiceManager.h>
31
32 #include <private/binder/binder_module.h>
33 #include <sys/epoll.h>
34 #include <sys/prctl.h>
35
36 #include "binderAbiHelper.h"
37
38 #define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
39
40 using namespace android;
41
IsPageAligned(void * buf)42 static ::testing::AssertionResult IsPageAligned(void *buf) {
43 if (((unsigned long)buf & ((unsigned long)PAGE_SIZE - 1)) == 0)
44 return ::testing::AssertionSuccess();
45 else
46 return ::testing::AssertionFailure() << buf << " is not page aligned";
47 }
48
49 static testing::Environment* binder_env;
50 static char *binderservername;
51 static char *binderserversuffix;
52 static char binderserverarg[] = "--binderserver";
53
54 static constexpr int kSchedPolicy = SCHED_RR;
55 static constexpr int kSchedPriority = 7;
56
57 static String16 binderLibTestServiceName = String16("test.binderLib");
58
59 enum BinderLibTestTranscationCode {
60 BINDER_LIB_TEST_NOP_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
61 BINDER_LIB_TEST_REGISTER_SERVER,
62 BINDER_LIB_TEST_ADD_SERVER,
63 BINDER_LIB_TEST_ADD_POLL_SERVER,
64 BINDER_LIB_TEST_CALL_BACK,
65 BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF,
66 BINDER_LIB_TEST_DELAYED_CALL_BACK,
67 BINDER_LIB_TEST_NOP_CALL_BACK,
68 BINDER_LIB_TEST_GET_SELF_TRANSACTION,
69 BINDER_LIB_TEST_GET_ID_TRANSACTION,
70 BINDER_LIB_TEST_INDIRECT_TRANSACTION,
71 BINDER_LIB_TEST_SET_ERROR_TRANSACTION,
72 BINDER_LIB_TEST_GET_STATUS_TRANSACTION,
73 BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION,
74 BINDER_LIB_TEST_LINK_DEATH_TRANSACTION,
75 BINDER_LIB_TEST_WRITE_FILE_TRANSACTION,
76 BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION,
77 BINDER_LIB_TEST_EXIT_TRANSACTION,
78 BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION,
79 BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION,
80 BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION,
81 BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION,
82 BINDER_LIB_TEST_GET_SCHEDULING_POLICY,
83 BINDER_LIB_TEST_NOP_TRANSACTION_WAIT,
84 BINDER_LIB_TEST_GETPID,
85 BINDER_LIB_TEST_ECHO_VECTOR,
86 BINDER_LIB_TEST_REJECT_BUF,
87 };
88
start_server_process(int arg2,bool usePoll=false)89 pid_t start_server_process(int arg2, bool usePoll = false)
90 {
91 int ret;
92 pid_t pid;
93 status_t status;
94 int pipefd[2];
95 char stri[16];
96 char strpipefd1[16];
97 char usepoll[2];
98 char *childargv[] = {
99 binderservername,
100 binderserverarg,
101 stri,
102 strpipefd1,
103 usepoll,
104 binderserversuffix,
105 nullptr
106 };
107
108 ret = pipe(pipefd);
109 if (ret < 0)
110 return ret;
111
112 snprintf(stri, sizeof(stri), "%d", arg2);
113 snprintf(strpipefd1, sizeof(strpipefd1), "%d", pipefd[1]);
114 snprintf(usepoll, sizeof(usepoll), "%d", usePoll ? 1 : 0);
115
116 pid = fork();
117 if (pid == -1)
118 return pid;
119 if (pid == 0) {
120 prctl(PR_SET_PDEATHSIG, SIGHUP);
121 close(pipefd[0]);
122 execv(binderservername, childargv);
123 status = -errno;
124 write(pipefd[1], &status, sizeof(status));
125 fprintf(stderr, "execv failed, %s\n", strerror(errno));
126 _exit(EXIT_FAILURE);
127 }
128 close(pipefd[1]);
129 ret = read(pipefd[0], &status, sizeof(status));
130 //printf("pipe read returned %d, status %d\n", ret, status);
131 close(pipefd[0]);
132 if (ret == sizeof(status)) {
133 ret = status;
134 } else {
135 kill(pid, SIGKILL);
136 if (ret >= 0) {
137 ret = NO_INIT;
138 }
139 }
140 if (ret < 0) {
141 wait(nullptr);
142 return ret;
143 }
144 return pid;
145 }
146
147 class BinderLibTestEnv : public ::testing::Environment {
148 public:
BinderLibTestEnv()149 BinderLibTestEnv() {}
getServer(void)150 sp<IBinder> getServer(void) {
151 return m_server;
152 }
153
154 private:
SetUp()155 virtual void SetUp() {
156 m_serverpid = start_server_process(0);
157 //printf("m_serverpid %d\n", m_serverpid);
158 ASSERT_GT(m_serverpid, 0);
159
160 sp<IServiceManager> sm = defaultServiceManager();
161 //printf("%s: pid %d, get service\n", __func__, m_pid);
162 m_server = sm->getService(binderLibTestServiceName);
163 ASSERT_TRUE(m_server != nullptr);
164 //printf("%s: pid %d, get service done\n", __func__, m_pid);
165 }
TearDown()166 virtual void TearDown() {
167 status_t ret;
168 Parcel data, reply;
169 int exitStatus;
170 pid_t pid;
171
172 //printf("%s: pid %d\n", __func__, m_pid);
173 if (m_server != nullptr) {
174 ret = m_server->transact(BINDER_LIB_TEST_GET_STATUS_TRANSACTION, data, &reply);
175 EXPECT_EQ(0, ret);
176 ret = m_server->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
177 EXPECT_EQ(0, ret);
178 }
179 if (m_serverpid > 0) {
180 //printf("wait for %d\n", m_pids[i]);
181 pid = wait(&exitStatus);
182 EXPECT_EQ(m_serverpid, pid);
183 EXPECT_TRUE(WIFEXITED(exitStatus));
184 EXPECT_EQ(0, WEXITSTATUS(exitStatus));
185 }
186 }
187
188 pid_t m_serverpid;
189 sp<IBinder> m_server;
190 };
191
192 class BinderLibTest : public ::testing::Test {
193 public:
SetUp()194 virtual void SetUp() {
195 m_server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
196 IPCThreadState::self()->restoreCallingWorkSource(0);
197 }
TearDown()198 virtual void TearDown() {
199 }
200 protected:
addServerEtc(int32_t * idPtr,int code)201 sp<IBinder> addServerEtc(int32_t *idPtr, int code)
202 {
203 int ret;
204 int32_t id;
205 Parcel data, reply;
206 sp<IBinder> binder;
207
208 ret = m_server->transact(code, data, &reply);
209 EXPECT_EQ(NO_ERROR, ret);
210
211 EXPECT_FALSE(binder != nullptr);
212 binder = reply.readStrongBinder();
213 EXPECT_TRUE(binder != nullptr);
214 ret = reply.readInt32(&id);
215 EXPECT_EQ(NO_ERROR, ret);
216 if (idPtr)
217 *idPtr = id;
218 return binder;
219 }
220
addServer(int32_t * idPtr=nullptr)221 sp<IBinder> addServer(int32_t *idPtr = nullptr)
222 {
223 return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_SERVER);
224 }
225
addPollServer(int32_t * idPtr=nullptr)226 sp<IBinder> addPollServer(int32_t *idPtr = nullptr)
227 {
228 return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_POLL_SERVER);
229 }
230
waitForReadData(int fd,int timeout_ms)231 void waitForReadData(int fd, int timeout_ms) {
232 int ret;
233 pollfd pfd = pollfd();
234
235 pfd.fd = fd;
236 pfd.events = POLLIN;
237 ret = poll(&pfd, 1, timeout_ms);
238 EXPECT_EQ(1, ret);
239 }
240
241 sp<IBinder> m_server;
242 };
243
244 class BinderLibTestBundle : public Parcel
245 {
246 public:
BinderLibTestBundle(void)247 BinderLibTestBundle(void) {}
BinderLibTestBundle(const Parcel * source)248 explicit BinderLibTestBundle(const Parcel *source) : m_isValid(false) {
249 int32_t mark;
250 int32_t bundleLen;
251 size_t pos;
252
253 if (source->readInt32(&mark))
254 return;
255 if (mark != MARK_START)
256 return;
257 if (source->readInt32(&bundleLen))
258 return;
259 pos = source->dataPosition();
260 if (Parcel::appendFrom(source, pos, bundleLen))
261 return;
262 source->setDataPosition(pos + bundleLen);
263 if (source->readInt32(&mark))
264 return;
265 if (mark != MARK_END)
266 return;
267 m_isValid = true;
268 setDataPosition(0);
269 }
appendTo(Parcel * dest)270 void appendTo(Parcel *dest) {
271 dest->writeInt32(MARK_START);
272 dest->writeInt32(dataSize());
273 dest->appendFrom(this, 0, dataSize());
274 dest->writeInt32(MARK_END);
275 };
isValid(void)276 bool isValid(void) {
277 return m_isValid;
278 }
279 private:
280 enum {
281 MARK_START = B_PACK_CHARS('B','T','B','S'),
282 MARK_END = B_PACK_CHARS('B','T','B','E'),
283 };
284 bool m_isValid;
285 };
286
287 class BinderLibTestEvent
288 {
289 public:
BinderLibTestEvent(void)290 BinderLibTestEvent(void)
291 : m_eventTriggered(false)
292 {
293 pthread_mutex_init(&m_waitMutex, nullptr);
294 pthread_cond_init(&m_waitCond, nullptr);
295 }
waitEvent(int timeout_s)296 int waitEvent(int timeout_s)
297 {
298 int ret;
299 pthread_mutex_lock(&m_waitMutex);
300 if (!m_eventTriggered) {
301 struct timespec ts;
302 clock_gettime(CLOCK_REALTIME, &ts);
303 ts.tv_sec += timeout_s;
304 pthread_cond_timedwait(&m_waitCond, &m_waitMutex, &ts);
305 }
306 ret = m_eventTriggered ? NO_ERROR : TIMED_OUT;
307 pthread_mutex_unlock(&m_waitMutex);
308 return ret;
309 }
getTriggeringThread()310 pthread_t getTriggeringThread()
311 {
312 return m_triggeringThread;
313 }
314 protected:
triggerEvent(void)315 void triggerEvent(void) {
316 pthread_mutex_lock(&m_waitMutex);
317 pthread_cond_signal(&m_waitCond);
318 m_eventTriggered = true;
319 m_triggeringThread = pthread_self();
320 pthread_mutex_unlock(&m_waitMutex);
321 };
322 private:
323 pthread_mutex_t m_waitMutex;
324 pthread_cond_t m_waitCond;
325 bool m_eventTriggered;
326 pthread_t m_triggeringThread;
327 };
328
329 class BinderLibTestCallBack : public BBinder, public BinderLibTestEvent
330 {
331 public:
BinderLibTestCallBack()332 BinderLibTestCallBack()
333 : m_result(NOT_ENOUGH_DATA)
334 , m_prev_end(nullptr)
335 {
336 }
getResult(void)337 status_t getResult(void)
338 {
339 return m_result;
340 }
341
342 private:
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)343 virtual status_t onTransact(uint32_t code,
344 const Parcel& data, Parcel* reply,
345 uint32_t flags = 0)
346 {
347 (void)reply;
348 (void)flags;
349 switch(code) {
350 case BINDER_LIB_TEST_CALL_BACK: {
351 status_t status = data.readInt32(&m_result);
352 if (status != NO_ERROR) {
353 m_result = status;
354 }
355 triggerEvent();
356 return NO_ERROR;
357 }
358 case BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF: {
359 sp<IBinder> server;
360 int ret;
361 const uint8_t *buf = data.data();
362 size_t size = data.dataSize();
363 if (m_prev_end) {
364 /* 64-bit kernel needs at most 8 bytes to align buffer end */
365 EXPECT_LE((size_t)(buf - m_prev_end), (size_t)8);
366 } else {
367 EXPECT_TRUE(IsPageAligned((void *)buf));
368 }
369
370 m_prev_end = buf + size + data.objectsCount() * sizeof(binder_size_t);
371
372 if (size > 0) {
373 server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
374 ret = server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION,
375 data, reply);
376 EXPECT_EQ(NO_ERROR, ret);
377 }
378 return NO_ERROR;
379 }
380 default:
381 return UNKNOWN_TRANSACTION;
382 }
383 }
384
385 status_t m_result;
386 const uint8_t *m_prev_end;
387 };
388
389 class TestDeathRecipient : public IBinder::DeathRecipient, public BinderLibTestEvent
390 {
391 private:
binderDied(const wp<IBinder> & who)392 virtual void binderDied(const wp<IBinder>& who) {
393 (void)who;
394 triggerEvent();
395 };
396 };
397
TEST_F(BinderLibTest,NopTransaction)398 TEST_F(BinderLibTest, NopTransaction) {
399 status_t ret;
400 Parcel data, reply;
401 ret = m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply);
402 EXPECT_EQ(NO_ERROR, ret);
403 }
404
TEST_F(BinderLibTest,Freeze)405 TEST_F(BinderLibTest, Freeze) {
406 status_t ret;
407 Parcel data, reply, replypid;
408 std::ifstream freezer_file("/sys/fs/cgroup/freezer/cgroup.freeze");
409
410 //Pass test on devices where the freezer is not supported
411 if (freezer_file.fail()) {
412 GTEST_SKIP();
413 return;
414 }
415
416 std::string freezer_enabled;
417 std::getline(freezer_file, freezer_enabled);
418
419 //Pass test on devices where the freezer is disabled
420 if (freezer_enabled != "1") {
421 GTEST_SKIP();
422 return;
423 }
424
425 ret = m_server->transact(BINDER_LIB_TEST_GETPID, data, &replypid);
426 int32_t pid = replypid.readInt32();
427 EXPECT_EQ(NO_ERROR, ret);
428 for (int i = 0; i < 10; i++) {
429 EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION_WAIT, data, &reply, TF_ONE_WAY));
430 }
431 EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, 1, 0));
432 EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, 1, 0));
433 EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, 1, 1000));
434 EXPECT_EQ(FAILED_TRANSACTION, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
435
436 bool sync_received, async_received;
437
438 EXPECT_EQ(NO_ERROR, IPCThreadState::self()->getProcessFreezeInfo(pid, &sync_received,
439 &async_received));
440
441 EXPECT_EQ(sync_received, 1);
442 EXPECT_EQ(async_received, 0);
443
444 EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, 0, 0));
445 EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
446 }
447
TEST_F(BinderLibTest,SetError)448 TEST_F(BinderLibTest, SetError) {
449 int32_t testValue[] = { 0, -123, 123 };
450 for (size_t i = 0; i < ARRAY_SIZE(testValue); i++) {
451 status_t ret;
452 Parcel data, reply;
453 data.writeInt32(testValue[i]);
454 ret = m_server->transact(BINDER_LIB_TEST_SET_ERROR_TRANSACTION, data, &reply);
455 EXPECT_EQ(testValue[i], ret);
456 }
457 }
458
TEST_F(BinderLibTest,GetId)459 TEST_F(BinderLibTest, GetId) {
460 status_t ret;
461 int32_t id;
462 Parcel data, reply;
463 ret = m_server->transact(BINDER_LIB_TEST_GET_ID_TRANSACTION, data, &reply);
464 EXPECT_EQ(NO_ERROR, ret);
465 ret = reply.readInt32(&id);
466 EXPECT_EQ(NO_ERROR, ret);
467 EXPECT_EQ(0, id);
468 }
469
TEST_F(BinderLibTest,PtrSize)470 TEST_F(BinderLibTest, PtrSize) {
471 status_t ret;
472 int32_t ptrsize;
473 Parcel data, reply;
474 sp<IBinder> server = addServer();
475 ASSERT_TRUE(server != nullptr);
476 ret = server->transact(BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION, data, &reply);
477 EXPECT_EQ(NO_ERROR, ret);
478 ret = reply.readInt32(&ptrsize);
479 EXPECT_EQ(NO_ERROR, ret);
480 RecordProperty("TestPtrSize", sizeof(void *));
481 RecordProperty("ServerPtrSize", sizeof(void *));
482 }
483
TEST_F(BinderLibTest,IndirectGetId2)484 TEST_F(BinderLibTest, IndirectGetId2)
485 {
486 status_t ret;
487 int32_t id;
488 int32_t count;
489 Parcel data, reply;
490 int32_t serverId[3];
491
492 data.writeInt32(ARRAY_SIZE(serverId));
493 for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
494 sp<IBinder> server;
495 BinderLibTestBundle datai;
496
497 server = addServer(&serverId[i]);
498 ASSERT_TRUE(server != nullptr);
499 data.writeStrongBinder(server);
500 data.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
501 datai.appendTo(&data);
502 }
503
504 ret = m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply);
505 ASSERT_EQ(NO_ERROR, ret);
506
507 ret = reply.readInt32(&id);
508 ASSERT_EQ(NO_ERROR, ret);
509 EXPECT_EQ(0, id);
510
511 ret = reply.readInt32(&count);
512 ASSERT_EQ(NO_ERROR, ret);
513 EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
514
515 for (size_t i = 0; i < (size_t)count; i++) {
516 BinderLibTestBundle replyi(&reply);
517 EXPECT_TRUE(replyi.isValid());
518 ret = replyi.readInt32(&id);
519 EXPECT_EQ(NO_ERROR, ret);
520 EXPECT_EQ(serverId[i], id);
521 EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
522 }
523
524 EXPECT_EQ(reply.dataSize(), reply.dataPosition());
525 }
526
TEST_F(BinderLibTest,IndirectGetId3)527 TEST_F(BinderLibTest, IndirectGetId3)
528 {
529 status_t ret;
530 int32_t id;
531 int32_t count;
532 Parcel data, reply;
533 int32_t serverId[3];
534
535 data.writeInt32(ARRAY_SIZE(serverId));
536 for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
537 sp<IBinder> server;
538 BinderLibTestBundle datai;
539 BinderLibTestBundle datai2;
540
541 server = addServer(&serverId[i]);
542 ASSERT_TRUE(server != nullptr);
543 data.writeStrongBinder(server);
544 data.writeInt32(BINDER_LIB_TEST_INDIRECT_TRANSACTION);
545
546 datai.writeInt32(1);
547 datai.writeStrongBinder(m_server);
548 datai.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
549 datai2.appendTo(&datai);
550
551 datai.appendTo(&data);
552 }
553
554 ret = m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply);
555 ASSERT_EQ(NO_ERROR, ret);
556
557 ret = reply.readInt32(&id);
558 ASSERT_EQ(NO_ERROR, ret);
559 EXPECT_EQ(0, id);
560
561 ret = reply.readInt32(&count);
562 ASSERT_EQ(NO_ERROR, ret);
563 EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
564
565 for (size_t i = 0; i < (size_t)count; i++) {
566 int32_t counti;
567
568 BinderLibTestBundle replyi(&reply);
569 EXPECT_TRUE(replyi.isValid());
570 ret = replyi.readInt32(&id);
571 EXPECT_EQ(NO_ERROR, ret);
572 EXPECT_EQ(serverId[i], id);
573
574 ret = replyi.readInt32(&counti);
575 ASSERT_EQ(NO_ERROR, ret);
576 EXPECT_EQ(1, counti);
577
578 BinderLibTestBundle replyi2(&replyi);
579 EXPECT_TRUE(replyi2.isValid());
580 ret = replyi2.readInt32(&id);
581 EXPECT_EQ(NO_ERROR, ret);
582 EXPECT_EQ(0, id);
583 EXPECT_EQ(replyi2.dataSize(), replyi2.dataPosition());
584
585 EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
586 }
587
588 EXPECT_EQ(reply.dataSize(), reply.dataPosition());
589 }
590
TEST_F(BinderLibTest,CallBack)591 TEST_F(BinderLibTest, CallBack)
592 {
593 status_t ret;
594 Parcel data, reply;
595 sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
596 data.writeStrongBinder(callBack);
597 ret = m_server->transact(BINDER_LIB_TEST_NOP_CALL_BACK, data, &reply, TF_ONE_WAY);
598 EXPECT_EQ(NO_ERROR, ret);
599 ret = callBack->waitEvent(5);
600 EXPECT_EQ(NO_ERROR, ret);
601 ret = callBack->getResult();
602 EXPECT_EQ(NO_ERROR, ret);
603 }
604
TEST_F(BinderLibTest,AddServer)605 TEST_F(BinderLibTest, AddServer)
606 {
607 sp<IBinder> server = addServer();
608 ASSERT_TRUE(server != nullptr);
609 }
610
TEST_F(BinderLibTest,DeathNotificationStrongRef)611 TEST_F(BinderLibTest, DeathNotificationStrongRef)
612 {
613 status_t ret;
614 sp<IBinder> sbinder;
615
616 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
617
618 {
619 sp<IBinder> binder = addServer();
620 ASSERT_TRUE(binder != nullptr);
621 ret = binder->linkToDeath(testDeathRecipient);
622 EXPECT_EQ(NO_ERROR, ret);
623 sbinder = binder;
624 }
625 {
626 Parcel data, reply;
627 ret = sbinder->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
628 EXPECT_EQ(0, ret);
629 }
630 IPCThreadState::self()->flushCommands();
631 ret = testDeathRecipient->waitEvent(5);
632 EXPECT_EQ(NO_ERROR, ret);
633 ret = sbinder->unlinkToDeath(testDeathRecipient);
634 EXPECT_EQ(DEAD_OBJECT, ret);
635 }
636
TEST_F(BinderLibTest,DeathNotificationMultiple)637 TEST_F(BinderLibTest, DeathNotificationMultiple)
638 {
639 status_t ret;
640 const int clientcount = 2;
641 sp<IBinder> target;
642 sp<IBinder> linkedclient[clientcount];
643 sp<BinderLibTestCallBack> callBack[clientcount];
644 sp<IBinder> passiveclient[clientcount];
645
646 target = addServer();
647 ASSERT_TRUE(target != nullptr);
648 for (int i = 0; i < clientcount; i++) {
649 {
650 Parcel data, reply;
651
652 linkedclient[i] = addServer();
653 ASSERT_TRUE(linkedclient[i] != nullptr);
654 callBack[i] = new BinderLibTestCallBack();
655 data.writeStrongBinder(target);
656 data.writeStrongBinder(callBack[i]);
657 ret = linkedclient[i]->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data, &reply, TF_ONE_WAY);
658 EXPECT_EQ(NO_ERROR, ret);
659 }
660 {
661 Parcel data, reply;
662
663 passiveclient[i] = addServer();
664 ASSERT_TRUE(passiveclient[i] != nullptr);
665 data.writeStrongBinder(target);
666 ret = passiveclient[i]->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data, &reply, TF_ONE_WAY);
667 EXPECT_EQ(NO_ERROR, ret);
668 }
669 }
670 {
671 Parcel data, reply;
672 ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
673 EXPECT_EQ(0, ret);
674 }
675
676 for (int i = 0; i < clientcount; i++) {
677 ret = callBack[i]->waitEvent(5);
678 EXPECT_EQ(NO_ERROR, ret);
679 ret = callBack[i]->getResult();
680 EXPECT_EQ(NO_ERROR, ret);
681 }
682 }
683
TEST_F(BinderLibTest,DeathNotificationThread)684 TEST_F(BinderLibTest, DeathNotificationThread)
685 {
686 status_t ret;
687 sp<BinderLibTestCallBack> callback;
688 sp<IBinder> target = addServer();
689 ASSERT_TRUE(target != nullptr);
690 sp<IBinder> client = addServer();
691 ASSERT_TRUE(client != nullptr);
692
693 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
694
695 ret = target->linkToDeath(testDeathRecipient);
696 EXPECT_EQ(NO_ERROR, ret);
697
698 {
699 Parcel data, reply;
700 ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
701 EXPECT_EQ(0, ret);
702 }
703
704 /* Make sure it's dead */
705 testDeathRecipient->waitEvent(5);
706
707 /* Now, pass the ref to another process and ask that process to
708 * call linkToDeath() on it, and wait for a response. This tests
709 * two things:
710 * 1) You still get death notifications when calling linkToDeath()
711 * on a ref that is already dead when it was passed to you.
712 * 2) That death notifications are not directly pushed to the thread
713 * registering them, but to the threadpool (proc workqueue) instead.
714 *
715 * 2) is tested because the thread handling BINDER_LIB_TEST_DEATH_TRANSACTION
716 * is blocked on a condition variable waiting for the death notification to be
717 * called; therefore, that thread is not available for handling proc work.
718 * So, if the death notification was pushed to the thread workqueue, the callback
719 * would never be called, and the test would timeout and fail.
720 *
721 * Note that we can't do this part of the test from this thread itself, because
722 * the binder driver would only push death notifications to the thread if
723 * it is a looper thread, which this thread is not.
724 *
725 * See b/23525545 for details.
726 */
727 {
728 Parcel data, reply;
729
730 callback = new BinderLibTestCallBack();
731 data.writeStrongBinder(target);
732 data.writeStrongBinder(callback);
733 ret = client->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data, &reply, TF_ONE_WAY);
734 EXPECT_EQ(NO_ERROR, ret);
735 }
736
737 ret = callback->waitEvent(5);
738 EXPECT_EQ(NO_ERROR, ret);
739 ret = callback->getResult();
740 EXPECT_EQ(NO_ERROR, ret);
741 }
742
TEST_F(BinderLibTest,PassFile)743 TEST_F(BinderLibTest, PassFile) {
744 int ret;
745 int pipefd[2];
746 uint8_t buf[1] = { 0 };
747 uint8_t write_value = 123;
748
749 ret = pipe2(pipefd, O_NONBLOCK);
750 ASSERT_EQ(0, ret);
751
752 {
753 Parcel data, reply;
754 uint8_t writebuf[1] = { write_value };
755
756 ret = data.writeFileDescriptor(pipefd[1], true);
757 EXPECT_EQ(NO_ERROR, ret);
758
759 ret = data.writeInt32(sizeof(writebuf));
760 EXPECT_EQ(NO_ERROR, ret);
761
762 ret = data.write(writebuf, sizeof(writebuf));
763 EXPECT_EQ(NO_ERROR, ret);
764
765 ret = m_server->transact(BINDER_LIB_TEST_WRITE_FILE_TRANSACTION, data, &reply);
766 EXPECT_EQ(NO_ERROR, ret);
767 }
768
769 ret = read(pipefd[0], buf, sizeof(buf));
770 EXPECT_EQ(sizeof(buf), (size_t)ret);
771 EXPECT_EQ(write_value, buf[0]);
772
773 waitForReadData(pipefd[0], 5000); /* wait for other proccess to close pipe */
774
775 ret = read(pipefd[0], buf, sizeof(buf));
776 EXPECT_EQ(0, ret);
777
778 close(pipefd[0]);
779 }
780
TEST_F(BinderLibTest,PassParcelFileDescriptor)781 TEST_F(BinderLibTest, PassParcelFileDescriptor) {
782 const int datasize = 123;
783 std::vector<uint8_t> writebuf(datasize);
784 for (size_t i = 0; i < writebuf.size(); ++i) {
785 writebuf[i] = i;
786 }
787
788 android::base::unique_fd read_end, write_end;
789 {
790 int pipefd[2];
791 ASSERT_EQ(0, pipe2(pipefd, O_NONBLOCK));
792 read_end.reset(pipefd[0]);
793 write_end.reset(pipefd[1]);
794 }
795 {
796 Parcel data;
797 EXPECT_EQ(NO_ERROR, data.writeDupParcelFileDescriptor(write_end.get()));
798 write_end.reset();
799 EXPECT_EQ(NO_ERROR, data.writeInt32(datasize));
800 EXPECT_EQ(NO_ERROR, data.write(writebuf.data(), datasize));
801
802 Parcel reply;
803 EXPECT_EQ(NO_ERROR,
804 m_server->transact(BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION, data,
805 &reply));
806 }
807 std::vector<uint8_t> readbuf(datasize);
808 EXPECT_EQ(datasize, read(read_end.get(), readbuf.data(), datasize));
809 EXPECT_EQ(writebuf, readbuf);
810
811 waitForReadData(read_end.get(), 5000); /* wait for other proccess to close pipe */
812
813 EXPECT_EQ(0, read(read_end.get(), readbuf.data(), datasize));
814 }
815
TEST_F(BinderLibTest,PromoteLocal)816 TEST_F(BinderLibTest, PromoteLocal) {
817 sp<IBinder> strong = new BBinder();
818 wp<IBinder> weak = strong;
819 sp<IBinder> strong_from_weak = weak.promote();
820 EXPECT_TRUE(strong != nullptr);
821 EXPECT_EQ(strong, strong_from_weak);
822 strong = nullptr;
823 strong_from_weak = nullptr;
824 strong_from_weak = weak.promote();
825 EXPECT_TRUE(strong_from_weak == nullptr);
826 }
827
TEST_F(BinderLibTest,LocalGetExtension)828 TEST_F(BinderLibTest, LocalGetExtension) {
829 sp<BBinder> binder = new BBinder();
830 sp<IBinder> ext = new BBinder();
831 binder->setExtension(ext);
832 EXPECT_EQ(ext, binder->getExtension());
833 }
834
TEST_F(BinderLibTest,RemoteGetExtension)835 TEST_F(BinderLibTest, RemoteGetExtension) {
836 sp<IBinder> server = addServer();
837 ASSERT_TRUE(server != nullptr);
838
839 sp<IBinder> extension;
840 EXPECT_EQ(NO_ERROR, server->getExtension(&extension));
841 ASSERT_NE(nullptr, extension.get());
842
843 EXPECT_EQ(NO_ERROR, extension->pingBinder());
844 }
845
TEST_F(BinderLibTest,CheckHandleZeroBinderHighBitsZeroCookie)846 TEST_F(BinderLibTest, CheckHandleZeroBinderHighBitsZeroCookie) {
847 status_t ret;
848 Parcel data, reply;
849
850 ret = m_server->transact(BINDER_LIB_TEST_GET_SELF_TRANSACTION, data, &reply);
851 EXPECT_EQ(NO_ERROR, ret);
852
853 const flat_binder_object *fb = reply.readObject(false);
854 ASSERT_TRUE(fb != nullptr);
855 EXPECT_EQ(BINDER_TYPE_HANDLE, fb->hdr.type);
856 EXPECT_EQ(m_server, ProcessState::self()->getStrongProxyForHandle(fb->handle));
857 EXPECT_EQ((binder_uintptr_t)0, fb->cookie);
858 EXPECT_EQ((uint64_t)0, (uint64_t)fb->binder >> 32);
859 }
860
TEST_F(BinderLibTest,FreedBinder)861 TEST_F(BinderLibTest, FreedBinder) {
862 status_t ret;
863
864 sp<IBinder> server = addServer();
865 ASSERT_TRUE(server != nullptr);
866
867 __u32 freedHandle;
868 wp<IBinder> keepFreedBinder;
869 {
870 Parcel data, reply;
871 ret = server->transact(BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION, data, &reply);
872 ASSERT_EQ(NO_ERROR, ret);
873 struct flat_binder_object *freed = (struct flat_binder_object *)(reply.data());
874 freedHandle = freed->handle;
875 /* Add a weak ref to the freed binder so the driver does not
876 * delete its reference to it - otherwise the transaction
877 * fails regardless of whether the driver is fixed.
878 */
879 keepFreedBinder = reply.readStrongBinder();
880 }
881 IPCThreadState::self()->flushCommands();
882 {
883 Parcel data, reply;
884 data.writeStrongBinder(server);
885 /* Replace original handle with handle to the freed binder */
886 struct flat_binder_object *strong = (struct flat_binder_object *)(data.data());
887 __u32 oldHandle = strong->handle;
888 strong->handle = freedHandle;
889 ret = server->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data, &reply);
890 /* Returns DEAD_OBJECT (-32) if target crashes and
891 * FAILED_TRANSACTION if the driver rejects the invalid
892 * object.
893 */
894 EXPECT_EQ((status_t)FAILED_TRANSACTION, ret);
895 /* Restore original handle so parcel destructor does not use
896 * the wrong handle.
897 */
898 strong->handle = oldHandle;
899 }
900 }
901
TEST_F(BinderLibTest,CheckNoHeaderMappedInUser)902 TEST_F(BinderLibTest, CheckNoHeaderMappedInUser) {
903 status_t ret;
904 Parcel data, reply;
905 sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
906 for (int i = 0; i < 2; i++) {
907 BinderLibTestBundle datai;
908 datai.appendFrom(&data, 0, data.dataSize());
909
910 data.freeData();
911 data.writeInt32(1);
912 data.writeStrongBinder(callBack);
913 data.writeInt32(BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF);
914
915 datai.appendTo(&data);
916 }
917 ret = m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply);
918 EXPECT_EQ(NO_ERROR, ret);
919 }
920
TEST_F(BinderLibTest,OnewayQueueing)921 TEST_F(BinderLibTest, OnewayQueueing)
922 {
923 status_t ret;
924 Parcel data, data2;
925
926 sp<IBinder> pollServer = addPollServer();
927
928 sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
929 data.writeStrongBinder(callBack);
930 data.writeInt32(500000); // delay in us before calling back
931
932 sp<BinderLibTestCallBack> callBack2 = new BinderLibTestCallBack();
933 data2.writeStrongBinder(callBack2);
934 data2.writeInt32(0); // delay in us
935
936 ret = pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data, nullptr, TF_ONE_WAY);
937 EXPECT_EQ(NO_ERROR, ret);
938
939 // The delay ensures that this second transaction will end up on the async_todo list
940 // (for a single-threaded server)
941 ret = pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data2, nullptr, TF_ONE_WAY);
942 EXPECT_EQ(NO_ERROR, ret);
943
944 // The server will ensure that the two transactions are handled in the expected order;
945 // If the ordering is not as expected, an error will be returned through the callbacks.
946 ret = callBack->waitEvent(2);
947 EXPECT_EQ(NO_ERROR, ret);
948 ret = callBack->getResult();
949 EXPECT_EQ(NO_ERROR, ret);
950
951 ret = callBack2->waitEvent(2);
952 EXPECT_EQ(NO_ERROR, ret);
953 ret = callBack2->getResult();
954 EXPECT_EQ(NO_ERROR, ret);
955 }
956
TEST_F(BinderLibTest,WorkSourceUnsetByDefault)957 TEST_F(BinderLibTest, WorkSourceUnsetByDefault)
958 {
959 status_t ret;
960 Parcel data, reply;
961 data.writeInterfaceToken(binderLibTestServiceName);
962 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
963 EXPECT_EQ(-1, reply.readInt32());
964 EXPECT_EQ(NO_ERROR, ret);
965 }
966
TEST_F(BinderLibTest,WorkSourceSet)967 TEST_F(BinderLibTest, WorkSourceSet)
968 {
969 status_t ret;
970 Parcel data, reply;
971 IPCThreadState::self()->clearCallingWorkSource();
972 int64_t previousWorkSource = IPCThreadState::self()->setCallingWorkSourceUid(100);
973 data.writeInterfaceToken(binderLibTestServiceName);
974 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
975 EXPECT_EQ(100, reply.readInt32());
976 EXPECT_EQ(-1, previousWorkSource);
977 EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
978 EXPECT_EQ(NO_ERROR, ret);
979 }
980
TEST_F(BinderLibTest,WorkSourceSetWithoutPropagation)981 TEST_F(BinderLibTest, WorkSourceSetWithoutPropagation)
982 {
983 status_t ret;
984 Parcel data, reply;
985
986 IPCThreadState::self()->setCallingWorkSourceUidWithoutPropagation(100);
987 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
988
989 data.writeInterfaceToken(binderLibTestServiceName);
990 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
991 EXPECT_EQ(-1, reply.readInt32());
992 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
993 EXPECT_EQ(NO_ERROR, ret);
994 }
995
TEST_F(BinderLibTest,WorkSourceCleared)996 TEST_F(BinderLibTest, WorkSourceCleared)
997 {
998 status_t ret;
999 Parcel data, reply;
1000
1001 IPCThreadState::self()->setCallingWorkSourceUid(100);
1002 int64_t token = IPCThreadState::self()->clearCallingWorkSource();
1003 int32_t previousWorkSource = (int32_t)token;
1004 data.writeInterfaceToken(binderLibTestServiceName);
1005 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1006
1007 EXPECT_EQ(-1, reply.readInt32());
1008 EXPECT_EQ(100, previousWorkSource);
1009 EXPECT_EQ(NO_ERROR, ret);
1010 }
1011
TEST_F(BinderLibTest,WorkSourceRestored)1012 TEST_F(BinderLibTest, WorkSourceRestored)
1013 {
1014 status_t ret;
1015 Parcel data, reply;
1016
1017 IPCThreadState::self()->setCallingWorkSourceUid(100);
1018 int64_t token = IPCThreadState::self()->clearCallingWorkSource();
1019 IPCThreadState::self()->restoreCallingWorkSource(token);
1020
1021 data.writeInterfaceToken(binderLibTestServiceName);
1022 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1023
1024 EXPECT_EQ(100, reply.readInt32());
1025 EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1026 EXPECT_EQ(NO_ERROR, ret);
1027 }
1028
TEST_F(BinderLibTest,PropagateFlagSet)1029 TEST_F(BinderLibTest, PropagateFlagSet)
1030 {
1031 IPCThreadState::self()->clearPropagateWorkSource();
1032 IPCThreadState::self()->setCallingWorkSourceUid(100);
1033 EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1034 }
1035
TEST_F(BinderLibTest,PropagateFlagCleared)1036 TEST_F(BinderLibTest, PropagateFlagCleared)
1037 {
1038 IPCThreadState::self()->setCallingWorkSourceUid(100);
1039 IPCThreadState::self()->clearPropagateWorkSource();
1040 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1041 }
1042
TEST_F(BinderLibTest,PropagateFlagRestored)1043 TEST_F(BinderLibTest, PropagateFlagRestored)
1044 {
1045 int token = IPCThreadState::self()->setCallingWorkSourceUid(100);
1046 IPCThreadState::self()->restoreCallingWorkSource(token);
1047
1048 EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1049 }
1050
TEST_F(BinderLibTest,WorkSourcePropagatedForAllFollowingBinderCalls)1051 TEST_F(BinderLibTest, WorkSourcePropagatedForAllFollowingBinderCalls)
1052 {
1053 IPCThreadState::self()->setCallingWorkSourceUid(100);
1054
1055 Parcel data, reply;
1056 status_t ret;
1057 data.writeInterfaceToken(binderLibTestServiceName);
1058 ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1059
1060 Parcel data2, reply2;
1061 status_t ret2;
1062 data2.writeInterfaceToken(binderLibTestServiceName);
1063 ret2 = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data2, &reply2);
1064 EXPECT_EQ(100, reply2.readInt32());
1065 EXPECT_EQ(NO_ERROR, ret2);
1066 }
1067
TEST_F(BinderLibTest,SchedPolicySet)1068 TEST_F(BinderLibTest, SchedPolicySet) {
1069 sp<IBinder> server = addServer();
1070 ASSERT_TRUE(server != nullptr);
1071
1072 Parcel data, reply;
1073 status_t ret = server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply);
1074 EXPECT_EQ(NO_ERROR, ret);
1075
1076 int policy = reply.readInt32();
1077 int priority = reply.readInt32();
1078
1079 EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
1080 EXPECT_EQ(kSchedPriority, priority);
1081 }
1082
1083
TEST_F(BinderLibTest,VectorSent)1084 TEST_F(BinderLibTest, VectorSent) {
1085 Parcel data, reply;
1086 sp<IBinder> server = addServer();
1087 ASSERT_TRUE(server != nullptr);
1088
1089 std::vector<uint64_t> const testValue = { std::numeric_limits<uint64_t>::max(), 0, 200 };
1090 data.writeUint64Vector(testValue);
1091
1092 status_t ret = server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply);
1093 EXPECT_EQ(NO_ERROR, ret);
1094 std::vector<uint64_t> readValue;
1095 ret = reply.readUint64Vector(&readValue);
1096 EXPECT_EQ(readValue, testValue);
1097 }
1098
TEST_F(BinderLibTest,BufRejected)1099 TEST_F(BinderLibTest, BufRejected) {
1100 Parcel data, reply;
1101 uint32_t buf;
1102 sp<IBinder> server = addServer();
1103 ASSERT_TRUE(server != nullptr);
1104
1105 binder_buffer_object obj {
1106 .hdr = { .type = BINDER_TYPE_PTR },
1107 .flags = 0,
1108 .buffer = reinterpret_cast<binder_uintptr_t>((void*)&buf),
1109 .length = 4,
1110 };
1111 data.setDataCapacity(1024);
1112 // Write a bogus object at offset 0 to get an entry in the offset table
1113 data.writeFileDescriptor(0);
1114 EXPECT_EQ(data.objectsCount(), 1);
1115 uint8_t *parcelData = const_cast<uint8_t*>(data.data());
1116 // And now, overwrite it with the buffer object
1117 memcpy(parcelData, &obj, sizeof(obj));
1118 data.setDataSize(sizeof(obj));
1119
1120 status_t ret = server->transact(BINDER_LIB_TEST_REJECT_BUF, data, &reply);
1121 // Either the kernel should reject this transaction (if it's correct), but
1122 // if it's not, the server implementation should return an error if it
1123 // finds an object in the received Parcel.
1124 EXPECT_NE(NO_ERROR, ret);
1125 }
1126
1127 class BinderLibTestService : public BBinder
1128 {
1129 public:
BinderLibTestService(int32_t id)1130 explicit BinderLibTestService(int32_t id)
1131 : m_id(id)
1132 , m_nextServerId(id + 1)
1133 , m_serverStartRequested(false)
1134 , m_callback(nullptr)
1135 {
1136 pthread_mutex_init(&m_serverWaitMutex, nullptr);
1137 pthread_cond_init(&m_serverWaitCond, nullptr);
1138 }
~BinderLibTestService()1139 ~BinderLibTestService()
1140 {
1141 exit(EXIT_SUCCESS);
1142 }
1143
processPendingCall()1144 void processPendingCall() {
1145 if (m_callback != nullptr) {
1146 Parcel data;
1147 data.writeInt32(NO_ERROR);
1148 m_callback->transact(BINDER_LIB_TEST_CALL_BACK, data, nullptr, TF_ONE_WAY);
1149 m_callback = nullptr;
1150 }
1151 }
1152
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)1153 virtual status_t onTransact(uint32_t code,
1154 const Parcel& data, Parcel* reply,
1155 uint32_t flags = 0) {
1156 //printf("%s: code %d\n", __func__, code);
1157 (void)flags;
1158
1159 if (getuid() != (uid_t)IPCThreadState::self()->getCallingUid()) {
1160 return PERMISSION_DENIED;
1161 }
1162 switch (code) {
1163 case BINDER_LIB_TEST_REGISTER_SERVER: {
1164 int32_t id;
1165 sp<IBinder> binder;
1166 id = data.readInt32();
1167 binder = data.readStrongBinder();
1168 if (binder == nullptr) {
1169 return BAD_VALUE;
1170 }
1171
1172 if (m_id != 0)
1173 return INVALID_OPERATION;
1174
1175 pthread_mutex_lock(&m_serverWaitMutex);
1176 if (m_serverStartRequested) {
1177 m_serverStartRequested = false;
1178 m_serverStarted = binder;
1179 pthread_cond_signal(&m_serverWaitCond);
1180 }
1181 pthread_mutex_unlock(&m_serverWaitMutex);
1182 return NO_ERROR;
1183 }
1184 case BINDER_LIB_TEST_ADD_POLL_SERVER:
1185 case BINDER_LIB_TEST_ADD_SERVER: {
1186 int ret;
1187 int serverid;
1188
1189 if (m_id != 0) {
1190 return INVALID_OPERATION;
1191 }
1192 pthread_mutex_lock(&m_serverWaitMutex);
1193 if (m_serverStartRequested) {
1194 ret = -EBUSY;
1195 } else {
1196 serverid = m_nextServerId++;
1197 m_serverStartRequested = true;
1198 bool usePoll = code == BINDER_LIB_TEST_ADD_POLL_SERVER;
1199
1200 pthread_mutex_unlock(&m_serverWaitMutex);
1201 ret = start_server_process(serverid, usePoll);
1202 pthread_mutex_lock(&m_serverWaitMutex);
1203 }
1204 if (ret > 0) {
1205 if (m_serverStartRequested) {
1206 struct timespec ts;
1207 clock_gettime(CLOCK_REALTIME, &ts);
1208 ts.tv_sec += 5;
1209 ret = pthread_cond_timedwait(&m_serverWaitCond, &m_serverWaitMutex, &ts);
1210 }
1211 if (m_serverStartRequested) {
1212 m_serverStartRequested = false;
1213 ret = -ETIMEDOUT;
1214 } else {
1215 reply->writeStrongBinder(m_serverStarted);
1216 reply->writeInt32(serverid);
1217 m_serverStarted = nullptr;
1218 ret = NO_ERROR;
1219 }
1220 } else if (ret >= 0) {
1221 m_serverStartRequested = false;
1222 ret = UNKNOWN_ERROR;
1223 }
1224 pthread_mutex_unlock(&m_serverWaitMutex);
1225 return ret;
1226 }
1227 case BINDER_LIB_TEST_GETPID:
1228 reply->writeInt32(getpid());
1229 return NO_ERROR;
1230 case BINDER_LIB_TEST_NOP_TRANSACTION_WAIT:
1231 usleep(5000);
1232 return NO_ERROR;
1233 case BINDER_LIB_TEST_NOP_TRANSACTION:
1234 return NO_ERROR;
1235 case BINDER_LIB_TEST_DELAYED_CALL_BACK: {
1236 // Note: this transaction is only designed for use with a
1237 // poll() server. See comments around epoll_wait().
1238 if (m_callback != nullptr) {
1239 // A callback was already pending; this means that
1240 // we received a second call while still processing
1241 // the first one. Fail the test.
1242 sp<IBinder> callback = data.readStrongBinder();
1243 Parcel data2;
1244 data2.writeInt32(UNKNOWN_ERROR);
1245
1246 callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, nullptr, TF_ONE_WAY);
1247 } else {
1248 m_callback = data.readStrongBinder();
1249 int32_t delayUs = data.readInt32();
1250 /*
1251 * It's necessary that we sleep here, so the next
1252 * transaction the caller makes will be queued to
1253 * the async queue.
1254 */
1255 usleep(delayUs);
1256
1257 /*
1258 * Now when we return, libbinder will tell the kernel
1259 * we are done with this transaction, and the kernel
1260 * can move the queued transaction to either the
1261 * thread todo worklist (for kernels without the fix),
1262 * or the proc todo worklist. In case of the former,
1263 * the next outbound call will pick up the pending
1264 * transaction, which leads to undesired reentrant
1265 * behavior. This is caught in the if() branch above.
1266 */
1267 }
1268
1269 return NO_ERROR;
1270 }
1271 case BINDER_LIB_TEST_NOP_CALL_BACK: {
1272 Parcel data2, reply2;
1273 sp<IBinder> binder;
1274 binder = data.readStrongBinder();
1275 if (binder == nullptr) {
1276 return BAD_VALUE;
1277 }
1278 data2.writeInt32(NO_ERROR);
1279 binder->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1280 return NO_ERROR;
1281 }
1282 case BINDER_LIB_TEST_GET_SELF_TRANSACTION:
1283 reply->writeStrongBinder(this);
1284 return NO_ERROR;
1285 case BINDER_LIB_TEST_GET_ID_TRANSACTION:
1286 reply->writeInt32(m_id);
1287 return NO_ERROR;
1288 case BINDER_LIB_TEST_INDIRECT_TRANSACTION: {
1289 int32_t count;
1290 uint32_t indirect_code;
1291 sp<IBinder> binder;
1292
1293 count = data.readInt32();
1294 reply->writeInt32(m_id);
1295 reply->writeInt32(count);
1296 for (int i = 0; i < count; i++) {
1297 binder = data.readStrongBinder();
1298 if (binder == nullptr) {
1299 return BAD_VALUE;
1300 }
1301 indirect_code = data.readInt32();
1302 BinderLibTestBundle data2(&data);
1303 if (!data2.isValid()) {
1304 return BAD_VALUE;
1305 }
1306 BinderLibTestBundle reply2;
1307 binder->transact(indirect_code, data2, &reply2);
1308 reply2.appendTo(reply);
1309 }
1310 return NO_ERROR;
1311 }
1312 case BINDER_LIB_TEST_SET_ERROR_TRANSACTION:
1313 reply->setError(data.readInt32());
1314 return NO_ERROR;
1315 case BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION:
1316 reply->writeInt32(sizeof(void *));
1317 return NO_ERROR;
1318 case BINDER_LIB_TEST_GET_STATUS_TRANSACTION:
1319 return NO_ERROR;
1320 case BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION:
1321 m_strongRef = data.readStrongBinder();
1322 return NO_ERROR;
1323 case BINDER_LIB_TEST_LINK_DEATH_TRANSACTION: {
1324 int ret;
1325 Parcel data2, reply2;
1326 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
1327 sp<IBinder> target;
1328 sp<IBinder> callback;
1329
1330 target = data.readStrongBinder();
1331 if (target == nullptr) {
1332 return BAD_VALUE;
1333 }
1334 callback = data.readStrongBinder();
1335 if (callback == nullptr) {
1336 return BAD_VALUE;
1337 }
1338 ret = target->linkToDeath(testDeathRecipient);
1339 if (ret == NO_ERROR)
1340 ret = testDeathRecipient->waitEvent(5);
1341 data2.writeInt32(ret);
1342 callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1343 return NO_ERROR;
1344 }
1345 case BINDER_LIB_TEST_WRITE_FILE_TRANSACTION: {
1346 int ret;
1347 int32_t size;
1348 const void *buf;
1349 int fd;
1350
1351 fd = data.readFileDescriptor();
1352 if (fd < 0) {
1353 return BAD_VALUE;
1354 }
1355 ret = data.readInt32(&size);
1356 if (ret != NO_ERROR) {
1357 return ret;
1358 }
1359 buf = data.readInplace(size);
1360 if (buf == nullptr) {
1361 return BAD_VALUE;
1362 }
1363 ret = write(fd, buf, size);
1364 if (ret != size)
1365 return UNKNOWN_ERROR;
1366 return NO_ERROR;
1367 }
1368 case BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION: {
1369 int ret;
1370 int32_t size;
1371 const void *buf;
1372 android::base::unique_fd fd;
1373
1374 ret = data.readUniqueParcelFileDescriptor(&fd);
1375 if (ret != NO_ERROR) {
1376 return ret;
1377 }
1378 ret = data.readInt32(&size);
1379 if (ret != NO_ERROR) {
1380 return ret;
1381 }
1382 buf = data.readInplace(size);
1383 if (buf == nullptr) {
1384 return BAD_VALUE;
1385 }
1386 ret = write(fd.get(), buf, size);
1387 if (ret != size) return UNKNOWN_ERROR;
1388 return NO_ERROR;
1389 }
1390 case BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION:
1391 alarm(10);
1392 return NO_ERROR;
1393 case BINDER_LIB_TEST_EXIT_TRANSACTION:
1394 while (wait(nullptr) != -1 || errno != ECHILD)
1395 ;
1396 exit(EXIT_SUCCESS);
1397 case BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION: {
1398 sp<IBinder> binder = new BBinder();
1399 reply->writeStrongBinder(binder);
1400 return NO_ERROR;
1401 }
1402 case BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION: {
1403 data.enforceInterface(binderLibTestServiceName);
1404 reply->writeInt32(IPCThreadState::self()->getCallingWorkSourceUid());
1405 return NO_ERROR;
1406 }
1407 case BINDER_LIB_TEST_GET_SCHEDULING_POLICY: {
1408 int policy = 0;
1409 sched_param param;
1410 if (0 != pthread_getschedparam(pthread_self(), &policy, ¶m)) {
1411 return UNKNOWN_ERROR;
1412 }
1413 reply->writeInt32(policy);
1414 reply->writeInt32(param.sched_priority);
1415 return NO_ERROR;
1416 }
1417 case BINDER_LIB_TEST_ECHO_VECTOR: {
1418 std::vector<uint64_t> vector;
1419 auto err = data.readUint64Vector(&vector);
1420 if (err != NO_ERROR)
1421 return err;
1422 reply->writeUint64Vector(vector);
1423 return NO_ERROR;
1424 }
1425 case BINDER_LIB_TEST_REJECT_BUF: {
1426 return data.objectsCount() == 0 ? BAD_VALUE : NO_ERROR;
1427 }
1428 default:
1429 return UNKNOWN_TRANSACTION;
1430 };
1431 }
1432 private:
1433 int32_t m_id;
1434 int32_t m_nextServerId;
1435 pthread_mutex_t m_serverWaitMutex;
1436 pthread_cond_t m_serverWaitCond;
1437 bool m_serverStartRequested;
1438 sp<IBinder> m_serverStarted;
1439 sp<IBinder> m_strongRef;
1440 sp<IBinder> m_callback;
1441 };
1442
run_server(int index,int readypipefd,bool usePoll)1443 int run_server(int index, int readypipefd, bool usePoll)
1444 {
1445 binderLibTestServiceName += String16(binderserversuffix);
1446
1447 status_t ret;
1448 sp<IServiceManager> sm = defaultServiceManager();
1449 BinderLibTestService* testServicePtr;
1450 {
1451 sp<BinderLibTestService> testService = new BinderLibTestService(index);
1452
1453 testService->setMinSchedulerPolicy(kSchedPolicy, kSchedPriority);
1454
1455 /*
1456 * Normally would also contain functionality as well, but we are only
1457 * testing the extension mechanism.
1458 */
1459 testService->setExtension(new BBinder());
1460
1461 // Required for test "BufRejected'
1462 testService->setRequestingSid(true);
1463
1464 /*
1465 * We need this below, but can't hold a sp<> because it prevents the
1466 * node from being cleaned up automatically. It's safe in this case
1467 * because of how the tests are written.
1468 */
1469 testServicePtr = testService.get();
1470
1471 if (index == 0) {
1472 ret = sm->addService(binderLibTestServiceName, testService);
1473 } else {
1474 sp<IBinder> server = sm->getService(binderLibTestServiceName);
1475 Parcel data, reply;
1476 data.writeInt32(index);
1477 data.writeStrongBinder(testService);
1478
1479 ret = server->transact(BINDER_LIB_TEST_REGISTER_SERVER, data, &reply);
1480 }
1481 }
1482 write(readypipefd, &ret, sizeof(ret));
1483 close(readypipefd);
1484 //printf("%s: ret %d\n", __func__, ret);
1485 if (ret)
1486 return 1;
1487 //printf("%s: joinThreadPool\n", __func__);
1488 if (usePoll) {
1489 int fd;
1490 struct epoll_event ev;
1491 int epoll_fd;
1492 IPCThreadState::self()->setupPolling(&fd);
1493 if (fd < 0) {
1494 return 1;
1495 }
1496 IPCThreadState::self()->flushCommands(); // flush BC_ENTER_LOOPER
1497
1498 epoll_fd = epoll_create1(EPOLL_CLOEXEC);
1499 if (epoll_fd == -1) {
1500 return 1;
1501 }
1502
1503 ev.events = EPOLLIN;
1504 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &ev) == -1) {
1505 return 1;
1506 }
1507
1508 while (1) {
1509 /*
1510 * We simulate a single-threaded process using the binder poll
1511 * interface; besides handling binder commands, it can also
1512 * issue outgoing transactions, by storing a callback in
1513 * m_callback.
1514 *
1515 * processPendingCall() will then issue that transaction.
1516 */
1517 struct epoll_event events[1];
1518 int numEvents = epoll_wait(epoll_fd, events, 1, 1000);
1519 if (numEvents < 0) {
1520 if (errno == EINTR) {
1521 continue;
1522 }
1523 return 1;
1524 }
1525 if (numEvents > 0) {
1526 IPCThreadState::self()->handlePolledCommands();
1527 IPCThreadState::self()->flushCommands(); // flush BC_FREE_BUFFER
1528 testServicePtr->processPendingCall();
1529 }
1530 }
1531 } else {
1532 ProcessState::self()->startThreadPool();
1533 IPCThreadState::self()->joinThreadPool();
1534 }
1535 //printf("%s: joinThreadPool returned\n", __func__);
1536 return 1; /* joinThreadPool should not return */
1537 }
1538
main(int argc,char ** argv)1539 int main(int argc, char **argv) {
1540 ExitIfWrongAbi();
1541
1542 if (argc == 4 && !strcmp(argv[1], "--servername")) {
1543 binderservername = argv[2];
1544 } else {
1545 binderservername = argv[0];
1546 }
1547
1548 if (argc == 6 && !strcmp(argv[1], binderserverarg)) {
1549 binderserversuffix = argv[5];
1550 return run_server(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]) == 1);
1551 }
1552 binderserversuffix = new char[16];
1553 snprintf(binderserversuffix, 16, "%d", getpid());
1554 binderLibTestServiceName += String16(binderserversuffix);
1555
1556 ::testing::InitGoogleTest(&argc, argv);
1557 binder_env = AddGlobalTestEnvironment(new BinderLibTestEnv());
1558 ProcessState::self()->startThreadPool();
1559 return RUN_ALL_TESTS();
1560 }
1561