• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <errno.h>
18 #include <poll.h>
19 #include <pthread.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 
23 #include <chrono>
24 #include <fstream>
25 #include <thread>
26 
27 #include <gmock/gmock.h>
28 #include <gtest/gtest.h>
29 
30 #include <android-base/properties.h>
31 #include <android-base/result-gmock.h>
32 #include <android-base/result.h>
33 #include <android-base/scopeguard.h>
34 #include <android-base/strings.h>
35 #include <android-base/unique_fd.h>
36 #include <binder/Binder.h>
37 #include <binder/BpBinder.h>
38 #include <binder/IBinder.h>
39 #include <binder/IPCThreadState.h>
40 #include <binder/IServiceManager.h>
41 #include <binder/RpcServer.h>
42 #include <binder/RpcSession.h>
43 
44 #include <linux/sched.h>
45 #include <sys/epoll.h>
46 #include <sys/prctl.h>
47 #include <sys/socket.h>
48 #include <sys/un.h>
49 
50 #include "../binder_module.h"
51 #include "binderAbiHelper.h"
52 
53 #define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
54 
55 using namespace android;
56 using namespace std::string_literals;
57 using namespace std::chrono_literals;
58 using android::base::testing::HasValue;
59 using android::base::testing::Ok;
60 using testing::ExplainMatchResult;
61 using testing::Matcher;
62 using testing::Not;
63 using testing::WithParamInterface;
64 
65 // e.g. EXPECT_THAT(expr, StatusEq(OK)) << "additional message";
66 MATCHER_P(StatusEq, expected, (negation ? "not " : "") + statusToString(expected)) {
67     *result_listener << statusToString(arg);
68     return expected == arg;
69 }
70 
IsPageAligned(void * buf)71 static ::testing::AssertionResult IsPageAligned(void *buf) {
72     if (((unsigned long)buf & ((unsigned long)PAGE_SIZE - 1)) == 0)
73         return ::testing::AssertionSuccess();
74     else
75         return ::testing::AssertionFailure() << buf << " is not page aligned";
76 }
77 
78 static testing::Environment* binder_env;
79 static char *binderservername;
80 static char *binderserversuffix;
81 static char binderserverarg[] = "--binderserver";
82 
83 static constexpr int kSchedPolicy = SCHED_RR;
84 static constexpr int kSchedPriority = 7;
85 static constexpr int kSchedPriorityMore = 8;
86 static constexpr int kKernelThreads = 15;
87 
88 static String16 binderLibTestServiceName = String16("test.binderLib");
89 
90 enum BinderLibTestTranscationCode {
91     BINDER_LIB_TEST_NOP_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
92     BINDER_LIB_TEST_REGISTER_SERVER,
93     BINDER_LIB_TEST_ADD_SERVER,
94     BINDER_LIB_TEST_ADD_POLL_SERVER,
95     BINDER_LIB_TEST_USE_CALLING_GUARD_TRANSACTION,
96     BINDER_LIB_TEST_CALL_BACK,
97     BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF,
98     BINDER_LIB_TEST_DELAYED_CALL_BACK,
99     BINDER_LIB_TEST_NOP_CALL_BACK,
100     BINDER_LIB_TEST_GET_SELF_TRANSACTION,
101     BINDER_LIB_TEST_GET_ID_TRANSACTION,
102     BINDER_LIB_TEST_INDIRECT_TRANSACTION,
103     BINDER_LIB_TEST_SET_ERROR_TRANSACTION,
104     BINDER_LIB_TEST_GET_STATUS_TRANSACTION,
105     BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION,
106     BINDER_LIB_TEST_LINK_DEATH_TRANSACTION,
107     BINDER_LIB_TEST_WRITE_FILE_TRANSACTION,
108     BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION,
109     BINDER_LIB_TEST_EXIT_TRANSACTION,
110     BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION,
111     BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION,
112     BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION,
113     BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION,
114     BINDER_LIB_TEST_GET_SCHEDULING_POLICY,
115     BINDER_LIB_TEST_NOP_TRANSACTION_WAIT,
116     BINDER_LIB_TEST_GETPID,
117     BINDER_LIB_TEST_ECHO_VECTOR,
118     BINDER_LIB_TEST_GET_NON_BLOCKING_FD,
119     BINDER_LIB_TEST_REJECT_OBJECTS,
120     BINDER_LIB_TEST_CAN_GET_SID,
121     BINDER_LIB_TEST_GET_MAX_THREAD_COUNT,
122     BINDER_LIB_TEST_SET_MAX_THREAD_COUNT,
123     BINDER_LIB_TEST_IS_THREADPOOL_STARTED,
124     BINDER_LIB_TEST_LOCK_UNLOCK,
125     BINDER_LIB_TEST_PROCESS_LOCK,
126     BINDER_LIB_TEST_UNLOCK_AFTER_MS,
127     BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK
128 };
129 
start_server_process(int arg2,bool usePoll=false)130 pid_t start_server_process(int arg2, bool usePoll = false)
131 {
132     int ret;
133     pid_t pid;
134     status_t status;
135     int pipefd[2];
136     char stri[16];
137     char strpipefd1[16];
138     char usepoll[2];
139     char *childargv[] = {
140         binderservername,
141         binderserverarg,
142         stri,
143         strpipefd1,
144         usepoll,
145         binderserversuffix,
146         nullptr
147     };
148 
149     ret = pipe(pipefd);
150     if (ret < 0)
151         return ret;
152 
153     snprintf(stri, sizeof(stri), "%d", arg2);
154     snprintf(strpipefd1, sizeof(strpipefd1), "%d", pipefd[1]);
155     snprintf(usepoll, sizeof(usepoll), "%d", usePoll ? 1 : 0);
156 
157     pid = fork();
158     if (pid == -1)
159         return pid;
160     if (pid == 0) {
161         prctl(PR_SET_PDEATHSIG, SIGHUP);
162         close(pipefd[0]);
163         execv(binderservername, childargv);
164         status = -errno;
165         write(pipefd[1], &status, sizeof(status));
166         fprintf(stderr, "execv failed, %s\n", strerror(errno));
167         _exit(EXIT_FAILURE);
168     }
169     close(pipefd[1]);
170     ret = read(pipefd[0], &status, sizeof(status));
171     //printf("pipe read returned %d, status %d\n", ret, status);
172     close(pipefd[0]);
173     if (ret == sizeof(status)) {
174         ret = status;
175     } else {
176         kill(pid, SIGKILL);
177         if (ret >= 0) {
178             ret = NO_INIT;
179         }
180     }
181     if (ret < 0) {
182         wait(nullptr);
183         return ret;
184     }
185     return pid;
186 }
187 
GetId(sp<IBinder> service)188 android::base::Result<int32_t> GetId(sp<IBinder> service) {
189     using android::base::Error;
190     Parcel data, reply;
191     data.markForBinder(service);
192     const char *prefix = data.isForRpc() ? "On RPC server, " : "On binder server, ";
193     status_t status = service->transact(BINDER_LIB_TEST_GET_ID_TRANSACTION, data, &reply);
194     if (status != OK)
195         return Error(status) << prefix << "transact(GET_ID): " << statusToString(status);
196     int32_t result = 0;
197     status = reply.readInt32(&result);
198     if (status != OK) return Error(status) << prefix << "readInt32: " << statusToString(status);
199     return result;
200 }
201 
202 class BinderLibTestEnv : public ::testing::Environment {
203     public:
BinderLibTestEnv()204         BinderLibTestEnv() {}
getServer(void)205         sp<IBinder> getServer(void) {
206             return m_server;
207         }
208 
209     private:
SetUp()210         virtual void SetUp() {
211             m_serverpid = start_server_process(0);
212             //printf("m_serverpid %d\n", m_serverpid);
213             ASSERT_GT(m_serverpid, 0);
214 
215             sp<IServiceManager> sm = defaultServiceManager();
216             //printf("%s: pid %d, get service\n", __func__, m_pid);
217             m_server = sm->getService(binderLibTestServiceName);
218             ASSERT_TRUE(m_server != nullptr);
219             //printf("%s: pid %d, get service done\n", __func__, m_pid);
220         }
TearDown()221         virtual void TearDown() {
222             status_t ret;
223             Parcel data, reply;
224             int exitStatus;
225             pid_t pid;
226 
227             //printf("%s: pid %d\n", __func__, m_pid);
228             if (m_server != nullptr) {
229                 ret = m_server->transact(BINDER_LIB_TEST_GET_STATUS_TRANSACTION, data, &reply);
230                 EXPECT_EQ(0, ret);
231                 ret = m_server->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
232                 EXPECT_EQ(0, ret);
233             }
234             if (m_serverpid > 0) {
235                 //printf("wait for %d\n", m_pids[i]);
236                 pid = wait(&exitStatus);
237                 EXPECT_EQ(m_serverpid, pid);
238                 EXPECT_TRUE(WIFEXITED(exitStatus));
239                 EXPECT_EQ(0, WEXITSTATUS(exitStatus));
240             }
241         }
242 
243         pid_t m_serverpid;
244         sp<IBinder> m_server;
245 };
246 
247 class BinderLibTest : public ::testing::Test {
248     public:
SetUp()249         virtual void SetUp() {
250             m_server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
251             IPCThreadState::self()->restoreCallingWorkSource(0);
252         }
TearDown()253         virtual void TearDown() {
254         }
255     protected:
addServerEtc(int32_t * idPtr,int code)256         sp<IBinder> addServerEtc(int32_t *idPtr, int code)
257         {
258             int32_t id;
259             Parcel data, reply;
260 
261             EXPECT_THAT(m_server->transact(code, data, &reply), StatusEq(NO_ERROR));
262 
263             sp<IBinder> binder = reply.readStrongBinder();
264             EXPECT_NE(nullptr, binder);
265             EXPECT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
266             if (idPtr)
267                 *idPtr = id;
268             return binder;
269         }
270 
addServer(int32_t * idPtr=nullptr)271         sp<IBinder> addServer(int32_t *idPtr = nullptr)
272         {
273             return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_SERVER);
274         }
275 
addPollServer(int32_t * idPtr=nullptr)276         sp<IBinder> addPollServer(int32_t *idPtr = nullptr)
277         {
278             return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_POLL_SERVER);
279         }
280 
waitForReadData(int fd,int timeout_ms)281         void waitForReadData(int fd, int timeout_ms) {
282             int ret;
283             pollfd pfd = pollfd();
284 
285             pfd.fd = fd;
286             pfd.events = POLLIN;
287             ret = poll(&pfd, 1, timeout_ms);
288             EXPECT_EQ(1, ret);
289         }
290 
291         sp<IBinder> m_server;
292 };
293 
294 class BinderLibTestBundle : public Parcel
295 {
296     public:
BinderLibTestBundle(void)297         BinderLibTestBundle(void) {}
BinderLibTestBundle(const Parcel * source)298         explicit BinderLibTestBundle(const Parcel *source) : m_isValid(false) {
299             int32_t mark;
300             int32_t bundleLen;
301             size_t pos;
302 
303             if (source->readInt32(&mark))
304                 return;
305             if (mark != MARK_START)
306                 return;
307             if (source->readInt32(&bundleLen))
308                 return;
309             pos = source->dataPosition();
310             if (Parcel::appendFrom(source, pos, bundleLen))
311                 return;
312             source->setDataPosition(pos + bundleLen);
313             if (source->readInt32(&mark))
314                 return;
315             if (mark != MARK_END)
316                 return;
317             m_isValid = true;
318             setDataPosition(0);
319         }
appendTo(Parcel * dest)320         void appendTo(Parcel *dest) {
321             dest->writeInt32(MARK_START);
322             dest->writeInt32(dataSize());
323             dest->appendFrom(this, 0, dataSize());
324             dest->writeInt32(MARK_END);
325         };
isValid(void)326         bool isValid(void) {
327             return m_isValid;
328         }
329     private:
330         enum {
331             MARK_START  = B_PACK_CHARS('B','T','B','S'),
332             MARK_END    = B_PACK_CHARS('B','T','B','E'),
333         };
334         bool m_isValid;
335 };
336 
337 class BinderLibTestEvent
338 {
339     public:
BinderLibTestEvent(void)340         BinderLibTestEvent(void)
341             : m_eventTriggered(false)
342         {
343             pthread_mutex_init(&m_waitMutex, nullptr);
344             pthread_cond_init(&m_waitCond, nullptr);
345         }
waitEvent(int timeout_s)346         int waitEvent(int timeout_s)
347         {
348             int ret;
349             pthread_mutex_lock(&m_waitMutex);
350             if (!m_eventTriggered) {
351                 struct timespec ts;
352                 clock_gettime(CLOCK_REALTIME, &ts);
353                 ts.tv_sec += timeout_s;
354                 pthread_cond_timedwait(&m_waitCond, &m_waitMutex, &ts);
355             }
356             ret = m_eventTriggered ? NO_ERROR : TIMED_OUT;
357             pthread_mutex_unlock(&m_waitMutex);
358             return ret;
359         }
getTriggeringThread()360         pthread_t getTriggeringThread()
361         {
362             return m_triggeringThread;
363         }
364     protected:
triggerEvent(void)365         void triggerEvent(void) {
366             pthread_mutex_lock(&m_waitMutex);
367             pthread_cond_signal(&m_waitCond);
368             m_eventTriggered = true;
369             m_triggeringThread = pthread_self();
370             pthread_mutex_unlock(&m_waitMutex);
371         };
372     private:
373         pthread_mutex_t m_waitMutex;
374         pthread_cond_t m_waitCond;
375         bool m_eventTriggered;
376         pthread_t m_triggeringThread;
377 };
378 
379 class BinderLibTestCallBack : public BBinder, public BinderLibTestEvent
380 {
381     public:
BinderLibTestCallBack()382         BinderLibTestCallBack()
383             : m_result(NOT_ENOUGH_DATA)
384             , m_prev_end(nullptr)
385         {
386         }
getResult(void)387         status_t getResult(void)
388         {
389             return m_result;
390         }
391 
392     private:
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)393         virtual status_t onTransact(uint32_t code,
394                                     const Parcel& data, Parcel* reply,
395                                     uint32_t flags = 0)
396         {
397             (void)reply;
398             (void)flags;
399             switch(code) {
400             case BINDER_LIB_TEST_CALL_BACK: {
401                 status_t status = data.readInt32(&m_result);
402                 if (status != NO_ERROR) {
403                     m_result = status;
404                 }
405                 triggerEvent();
406                 return NO_ERROR;
407             }
408             case BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF: {
409                 sp<IBinder> server;
410                 int ret;
411                 const uint8_t *buf = data.data();
412                 size_t size = data.dataSize();
413                 if (m_prev_end) {
414                     /* 64-bit kernel needs at most 8 bytes to align buffer end */
415                     EXPECT_LE((size_t)(buf - m_prev_end), (size_t)8);
416                 } else {
417                     EXPECT_TRUE(IsPageAligned((void *)buf));
418                 }
419 
420                 m_prev_end = buf + size + data.objectsCount() * sizeof(binder_size_t);
421 
422                 if (size > 0) {
423                     server = static_cast<BinderLibTestEnv *>(binder_env)->getServer();
424                     ret = server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION,
425                                            data, reply);
426                     EXPECT_EQ(NO_ERROR, ret);
427                 }
428                 return NO_ERROR;
429             }
430             default:
431                 return UNKNOWN_TRANSACTION;
432             }
433         }
434 
435         status_t m_result;
436         const uint8_t *m_prev_end;
437 };
438 
439 class TestDeathRecipient : public IBinder::DeathRecipient, public BinderLibTestEvent
440 {
441     private:
binderDied(const wp<IBinder> & who)442         virtual void binderDied(const wp<IBinder>& who) {
443             (void)who;
444             triggerEvent();
445         };
446 };
447 
TEST_F(BinderLibTest,CannotUseBinderAfterFork)448 TEST_F(BinderLibTest, CannotUseBinderAfterFork) {
449     // EXPECT_DEATH works by forking the process
450     EXPECT_DEATH({ ProcessState::self(); }, "libbinder ProcessState can not be used after fork");
451 }
452 
TEST_F(BinderLibTest,AddManagerToManager)453 TEST_F(BinderLibTest, AddManagerToManager) {
454     sp<IServiceManager> sm = defaultServiceManager();
455     sp<IBinder> binder = IInterface::asBinder(sm);
456     EXPECT_EQ(NO_ERROR, sm->addService(String16("binderLibTest-manager"), binder));
457 }
458 
TEST_F(BinderLibTest,WasParceled)459 TEST_F(BinderLibTest, WasParceled) {
460     auto binder = sp<BBinder>::make();
461     EXPECT_FALSE(binder->wasParceled());
462     Parcel data;
463     data.writeStrongBinder(binder);
464     EXPECT_TRUE(binder->wasParceled());
465 }
466 
TEST_F(BinderLibTest,NopTransaction)467 TEST_F(BinderLibTest, NopTransaction) {
468     Parcel data, reply;
469     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply),
470                 StatusEq(NO_ERROR));
471 }
472 
TEST_F(BinderLibTest,NopTransactionOneway)473 TEST_F(BinderLibTest, NopTransactionOneway) {
474     Parcel data, reply;
475     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply, TF_ONE_WAY),
476                 StatusEq(NO_ERROR));
477 }
478 
TEST_F(BinderLibTest,NopTransactionClear)479 TEST_F(BinderLibTest, NopTransactionClear) {
480     Parcel data, reply;
481     // make sure it accepts the transaction flag
482     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply, TF_CLEAR_BUF),
483                 StatusEq(NO_ERROR));
484 }
485 
TEST_F(BinderLibTest,Freeze)486 TEST_F(BinderLibTest, Freeze) {
487     Parcel data, reply, replypid;
488     std::ifstream freezer_file("/sys/fs/cgroup/uid_0/cgroup.freeze");
489 
490     // Pass test on devices where the cgroup v2 freezer is not supported
491     if (freezer_file.fail()) {
492         GTEST_SKIP();
493         return;
494     }
495 
496     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GETPID, data, &replypid), StatusEq(NO_ERROR));
497     int32_t pid = replypid.readInt32();
498     for (int i = 0; i < 10; i++) {
499         EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION_WAIT, data, &reply, TF_ONE_WAY));
500     }
501 
502     // Pass test on devices where BINDER_FREEZE ioctl is not supported
503     int ret = IPCThreadState::self()->freeze(pid, false, 0);
504     if (ret != 0) {
505         GTEST_SKIP();
506         return;
507     }
508 
509     EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, true, 0));
510 
511     // b/268232063 - succeeds ~0.08% of the time
512     {
513         auto ret = IPCThreadState::self()->freeze(pid, true, 0);
514         EXPECT_TRUE(ret == -EAGAIN || ret == OK);
515     }
516 
517     EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, true, 1000));
518     EXPECT_EQ(FAILED_TRANSACTION, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
519 
520     uint32_t sync_received, async_received;
521 
522     EXPECT_EQ(NO_ERROR, IPCThreadState::self()->getProcessFreezeInfo(pid, &sync_received,
523                 &async_received));
524 
525     EXPECT_EQ(sync_received, 1);
526     EXPECT_EQ(async_received, 0);
527 
528     EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, false, 0));
529     EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
530 }
531 
TEST_F(BinderLibTest,SetError)532 TEST_F(BinderLibTest, SetError) {
533     int32_t testValue[] = { 0, -123, 123 };
534     for (size_t i = 0; i < ARRAY_SIZE(testValue); i++) {
535         Parcel data, reply;
536         data.writeInt32(testValue[i]);
537         EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_SET_ERROR_TRANSACTION, data, &reply),
538                     StatusEq(testValue[i]));
539     }
540 }
541 
TEST_F(BinderLibTest,GetId)542 TEST_F(BinderLibTest, GetId) {
543     EXPECT_THAT(GetId(m_server), HasValue(0));
544 }
545 
TEST_F(BinderLibTest,PtrSize)546 TEST_F(BinderLibTest, PtrSize) {
547     int32_t ptrsize;
548     Parcel data, reply;
549     sp<IBinder> server = addServer();
550     ASSERT_TRUE(server != nullptr);
551     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION, data, &reply),
552                 StatusEq(NO_ERROR));
553     EXPECT_THAT(reply.readInt32(&ptrsize), StatusEq(NO_ERROR));
554     RecordProperty("TestPtrSize", sizeof(void *));
555     RecordProperty("ServerPtrSize", sizeof(void *));
556 }
557 
TEST_F(BinderLibTest,IndirectGetId2)558 TEST_F(BinderLibTest, IndirectGetId2)
559 {
560     int32_t id;
561     int32_t count;
562     Parcel data, reply;
563     int32_t serverId[3];
564 
565     data.writeInt32(ARRAY_SIZE(serverId));
566     for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
567         sp<IBinder> server;
568         BinderLibTestBundle datai;
569 
570         server = addServer(&serverId[i]);
571         ASSERT_TRUE(server != nullptr);
572         data.writeStrongBinder(server);
573         data.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
574         datai.appendTo(&data);
575     }
576 
577     ASSERT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
578                 StatusEq(NO_ERROR));
579 
580     ASSERT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
581     EXPECT_EQ(0, id);
582 
583     ASSERT_THAT(reply.readInt32(&count), StatusEq(NO_ERROR));
584     EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
585 
586     for (size_t i = 0; i < (size_t)count; i++) {
587         BinderLibTestBundle replyi(&reply);
588         EXPECT_TRUE(replyi.isValid());
589         EXPECT_THAT(replyi.readInt32(&id), StatusEq(NO_ERROR));
590         EXPECT_EQ(serverId[i], id);
591         EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
592     }
593 
594     EXPECT_EQ(reply.dataSize(), reply.dataPosition());
595 }
596 
TEST_F(BinderLibTest,IndirectGetId3)597 TEST_F(BinderLibTest, IndirectGetId3)
598 {
599     int32_t id;
600     int32_t count;
601     Parcel data, reply;
602     int32_t serverId[3];
603 
604     data.writeInt32(ARRAY_SIZE(serverId));
605     for (size_t i = 0; i < ARRAY_SIZE(serverId); i++) {
606         sp<IBinder> server;
607         BinderLibTestBundle datai;
608         BinderLibTestBundle datai2;
609 
610         server = addServer(&serverId[i]);
611         ASSERT_TRUE(server != nullptr);
612         data.writeStrongBinder(server);
613         data.writeInt32(BINDER_LIB_TEST_INDIRECT_TRANSACTION);
614 
615         datai.writeInt32(1);
616         datai.writeStrongBinder(m_server);
617         datai.writeInt32(BINDER_LIB_TEST_GET_ID_TRANSACTION);
618         datai2.appendTo(&datai);
619 
620         datai.appendTo(&data);
621     }
622 
623     ASSERT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
624                 StatusEq(NO_ERROR));
625 
626     ASSERT_THAT(reply.readInt32(&id), StatusEq(NO_ERROR));
627     EXPECT_EQ(0, id);
628 
629     ASSERT_THAT(reply.readInt32(&count), StatusEq(NO_ERROR));
630     EXPECT_EQ(ARRAY_SIZE(serverId), (size_t)count);
631 
632     for (size_t i = 0; i < (size_t)count; i++) {
633         int32_t counti;
634 
635         BinderLibTestBundle replyi(&reply);
636         EXPECT_TRUE(replyi.isValid());
637         EXPECT_THAT(replyi.readInt32(&id), StatusEq(NO_ERROR));
638         EXPECT_EQ(serverId[i], id);
639 
640         ASSERT_THAT(replyi.readInt32(&counti), StatusEq(NO_ERROR));
641         EXPECT_EQ(1, counti);
642 
643         BinderLibTestBundle replyi2(&replyi);
644         EXPECT_TRUE(replyi2.isValid());
645         EXPECT_THAT(replyi2.readInt32(&id), StatusEq(NO_ERROR));
646         EXPECT_EQ(0, id);
647         EXPECT_EQ(replyi2.dataSize(), replyi2.dataPosition());
648 
649         EXPECT_EQ(replyi.dataSize(), replyi.dataPosition());
650     }
651 
652     EXPECT_EQ(reply.dataSize(), reply.dataPosition());
653 }
654 
TEST_F(BinderLibTest,CallBack)655 TEST_F(BinderLibTest, CallBack)
656 {
657     Parcel data, reply;
658     sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
659     data.writeStrongBinder(callBack);
660     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_NOP_CALL_BACK, data, &reply, TF_ONE_WAY),
661                 StatusEq(NO_ERROR));
662     EXPECT_THAT(callBack->waitEvent(5), StatusEq(NO_ERROR));
663     EXPECT_THAT(callBack->getResult(), StatusEq(NO_ERROR));
664 }
665 
TEST_F(BinderLibTest,BinderCallContextGuard)666 TEST_F(BinderLibTest, BinderCallContextGuard) {
667     sp<IBinder> binder = addServer();
668     Parcel data, reply;
669     EXPECT_THAT(binder->transact(BINDER_LIB_TEST_USE_CALLING_GUARD_TRANSACTION, data, &reply),
670                 StatusEq(DEAD_OBJECT));
671 }
672 
TEST_F(BinderLibTest,AddServer)673 TEST_F(BinderLibTest, AddServer)
674 {
675     sp<IBinder> server = addServer();
676     ASSERT_TRUE(server != nullptr);
677 }
678 
TEST_F(BinderLibTest,DeathNotificationStrongRef)679 TEST_F(BinderLibTest, DeathNotificationStrongRef)
680 {
681     sp<IBinder> sbinder;
682 
683     sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
684 
685     {
686         sp<IBinder> binder = addServer();
687         ASSERT_TRUE(binder != nullptr);
688         EXPECT_THAT(binder->linkToDeath(testDeathRecipient), StatusEq(NO_ERROR));
689         sbinder = binder;
690     }
691     {
692         Parcel data, reply;
693         EXPECT_THAT(sbinder->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY),
694                     StatusEq(OK));
695     }
696     IPCThreadState::self()->flushCommands();
697     EXPECT_THAT(testDeathRecipient->waitEvent(5), StatusEq(NO_ERROR));
698     EXPECT_THAT(sbinder->unlinkToDeath(testDeathRecipient), StatusEq(DEAD_OBJECT));
699 }
700 
TEST_F(BinderLibTest,DeathNotificationMultiple)701 TEST_F(BinderLibTest, DeathNotificationMultiple)
702 {
703     status_t ret;
704     const int clientcount = 2;
705     sp<IBinder> target;
706     sp<IBinder> linkedclient[clientcount];
707     sp<BinderLibTestCallBack> callBack[clientcount];
708     sp<IBinder> passiveclient[clientcount];
709 
710     target = addServer();
711     ASSERT_TRUE(target != nullptr);
712     for (int i = 0; i < clientcount; i++) {
713         {
714             Parcel data, reply;
715 
716             linkedclient[i] = addServer();
717             ASSERT_TRUE(linkedclient[i] != nullptr);
718             callBack[i] = new BinderLibTestCallBack();
719             data.writeStrongBinder(target);
720             data.writeStrongBinder(callBack[i]);
721             EXPECT_THAT(linkedclient[i]->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data,
722                                                   &reply, TF_ONE_WAY),
723                         StatusEq(NO_ERROR));
724         }
725         {
726             Parcel data, reply;
727 
728             passiveclient[i] = addServer();
729             ASSERT_TRUE(passiveclient[i] != nullptr);
730             data.writeStrongBinder(target);
731             EXPECT_THAT(passiveclient[i]->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data,
732                                                    &reply, TF_ONE_WAY),
733                         StatusEq(NO_ERROR));
734         }
735     }
736     {
737         Parcel data, reply;
738         ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
739         EXPECT_EQ(0, ret);
740     }
741 
742     for (int i = 0; i < clientcount; i++) {
743         EXPECT_THAT(callBack[i]->waitEvent(5), StatusEq(NO_ERROR));
744         EXPECT_THAT(callBack[i]->getResult(), StatusEq(NO_ERROR));
745     }
746 }
747 
TEST_F(BinderLibTest,DeathNotificationThread)748 TEST_F(BinderLibTest, DeathNotificationThread)
749 {
750     status_t ret;
751     sp<BinderLibTestCallBack> callback;
752     sp<IBinder> target = addServer();
753     ASSERT_TRUE(target != nullptr);
754     sp<IBinder> client = addServer();
755     ASSERT_TRUE(client != nullptr);
756 
757     sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
758 
759     EXPECT_THAT(target->linkToDeath(testDeathRecipient), StatusEq(NO_ERROR));
760 
761     {
762         Parcel data, reply;
763         ret = target->transact(BINDER_LIB_TEST_EXIT_TRANSACTION, data, &reply, TF_ONE_WAY);
764         EXPECT_EQ(0, ret);
765     }
766 
767     /* Make sure it's dead */
768     testDeathRecipient->waitEvent(5);
769 
770     /* Now, pass the ref to another process and ask that process to
771      * call linkToDeath() on it, and wait for a response. This tests
772      * two things:
773      * 1) You still get death notifications when calling linkToDeath()
774      *    on a ref that is already dead when it was passed to you.
775      * 2) That death notifications are not directly pushed to the thread
776      *    registering them, but to the threadpool (proc workqueue) instead.
777      *
778      * 2) is tested because the thread handling BINDER_LIB_TEST_DEATH_TRANSACTION
779      * is blocked on a condition variable waiting for the death notification to be
780      * called; therefore, that thread is not available for handling proc work.
781      * So, if the death notification was pushed to the thread workqueue, the callback
782      * would never be called, and the test would timeout and fail.
783      *
784      * Note that we can't do this part of the test from this thread itself, because
785      * the binder driver would only push death notifications to the thread if
786      * it is a looper thread, which this thread is not.
787      *
788      * See b/23525545 for details.
789      */
790     {
791         Parcel data, reply;
792 
793         callback = new BinderLibTestCallBack();
794         data.writeStrongBinder(target);
795         data.writeStrongBinder(callback);
796         EXPECT_THAT(client->transact(BINDER_LIB_TEST_LINK_DEATH_TRANSACTION, data, &reply,
797                                      TF_ONE_WAY),
798                     StatusEq(NO_ERROR));
799     }
800 
801     EXPECT_THAT(callback->waitEvent(5), StatusEq(NO_ERROR));
802     EXPECT_THAT(callback->getResult(), StatusEq(NO_ERROR));
803 }
804 
TEST_F(BinderLibTest,PassFile)805 TEST_F(BinderLibTest, PassFile) {
806     int ret;
807     int pipefd[2];
808     uint8_t buf[1] = { 0 };
809     uint8_t write_value = 123;
810 
811     ret = pipe2(pipefd, O_NONBLOCK);
812     ASSERT_EQ(0, ret);
813 
814     {
815         Parcel data, reply;
816         uint8_t writebuf[1] = { write_value };
817 
818         EXPECT_THAT(data.writeFileDescriptor(pipefd[1], true), StatusEq(NO_ERROR));
819 
820         EXPECT_THAT(data.writeInt32(sizeof(writebuf)), StatusEq(NO_ERROR));
821 
822         EXPECT_THAT(data.write(writebuf, sizeof(writebuf)), StatusEq(NO_ERROR));
823 
824         EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_WRITE_FILE_TRANSACTION, data, &reply),
825                     StatusEq(NO_ERROR));
826     }
827 
828     ret = read(pipefd[0], buf, sizeof(buf));
829     EXPECT_EQ(sizeof(buf), (size_t)ret);
830     EXPECT_EQ(write_value, buf[0]);
831 
832     waitForReadData(pipefd[0], 5000); /* wait for other proccess to close pipe */
833 
834     ret = read(pipefd[0], buf, sizeof(buf));
835     EXPECT_EQ(0, ret);
836 
837     close(pipefd[0]);
838 }
839 
TEST_F(BinderLibTest,PassParcelFileDescriptor)840 TEST_F(BinderLibTest, PassParcelFileDescriptor) {
841     const int datasize = 123;
842     std::vector<uint8_t> writebuf(datasize);
843     for (size_t i = 0; i < writebuf.size(); ++i) {
844         writebuf[i] = i;
845     }
846 
847     android::base::unique_fd read_end, write_end;
848     {
849         int pipefd[2];
850         ASSERT_EQ(0, pipe2(pipefd, O_NONBLOCK));
851         read_end.reset(pipefd[0]);
852         write_end.reset(pipefd[1]);
853     }
854     {
855         Parcel data;
856         EXPECT_EQ(NO_ERROR, data.writeDupParcelFileDescriptor(write_end.get()));
857         write_end.reset();
858         EXPECT_EQ(NO_ERROR, data.writeInt32(datasize));
859         EXPECT_EQ(NO_ERROR, data.write(writebuf.data(), datasize));
860 
861         Parcel reply;
862         EXPECT_EQ(NO_ERROR,
863                   m_server->transact(BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION, data,
864                                      &reply));
865     }
866     std::vector<uint8_t> readbuf(datasize);
867     EXPECT_EQ(datasize, read(read_end.get(), readbuf.data(), datasize));
868     EXPECT_EQ(writebuf, readbuf);
869 
870     waitForReadData(read_end.get(), 5000); /* wait for other proccess to close pipe */
871 
872     EXPECT_EQ(0, read(read_end.get(), readbuf.data(), datasize));
873 }
874 
TEST_F(BinderLibTest,PromoteLocal)875 TEST_F(BinderLibTest, PromoteLocal) {
876     sp<IBinder> strong = new BBinder();
877     wp<IBinder> weak = strong;
878     sp<IBinder> strong_from_weak = weak.promote();
879     EXPECT_TRUE(strong != nullptr);
880     EXPECT_EQ(strong, strong_from_weak);
881     strong = nullptr;
882     strong_from_weak = nullptr;
883     strong_from_weak = weak.promote();
884     EXPECT_TRUE(strong_from_weak == nullptr);
885 }
886 
TEST_F(BinderLibTest,LocalGetExtension)887 TEST_F(BinderLibTest, LocalGetExtension) {
888     sp<BBinder> binder = new BBinder();
889     sp<IBinder> ext = new BBinder();
890     binder->setExtension(ext);
891     EXPECT_EQ(ext, binder->getExtension());
892 }
893 
TEST_F(BinderLibTest,RemoteGetExtension)894 TEST_F(BinderLibTest, RemoteGetExtension) {
895     sp<IBinder> server = addServer();
896     ASSERT_TRUE(server != nullptr);
897 
898     sp<IBinder> extension;
899     EXPECT_EQ(NO_ERROR, server->getExtension(&extension));
900     ASSERT_NE(nullptr, extension.get());
901 
902     EXPECT_EQ(NO_ERROR, extension->pingBinder());
903 }
904 
TEST_F(BinderLibTest,CheckHandleZeroBinderHighBitsZeroCookie)905 TEST_F(BinderLibTest, CheckHandleZeroBinderHighBitsZeroCookie) {
906     Parcel data, reply;
907 
908     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_GET_SELF_TRANSACTION, data, &reply),
909                 StatusEq(NO_ERROR));
910 
911     const flat_binder_object *fb = reply.readObject(false);
912     ASSERT_TRUE(fb != nullptr);
913     EXPECT_EQ(BINDER_TYPE_HANDLE, fb->hdr.type);
914     EXPECT_EQ(m_server, ProcessState::self()->getStrongProxyForHandle(fb->handle));
915     EXPECT_EQ((binder_uintptr_t)0, fb->cookie);
916     EXPECT_EQ((uint64_t)0, (uint64_t)fb->binder >> 32);
917 }
918 
TEST_F(BinderLibTest,FreedBinder)919 TEST_F(BinderLibTest, FreedBinder) {
920     status_t ret;
921 
922     sp<IBinder> server = addServer();
923     ASSERT_TRUE(server != nullptr);
924 
925     __u32 freedHandle;
926     wp<IBinder> keepFreedBinder;
927     {
928         Parcel data, reply;
929         ASSERT_THAT(server->transact(BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION, data, &reply),
930                     StatusEq(NO_ERROR));
931         struct flat_binder_object *freed = (struct flat_binder_object *)(reply.data());
932         freedHandle = freed->handle;
933         /* Add a weak ref to the freed binder so the driver does not
934          * delete its reference to it - otherwise the transaction
935          * fails regardless of whether the driver is fixed.
936          */
937         keepFreedBinder = reply.readStrongBinder();
938     }
939     IPCThreadState::self()->flushCommands();
940     {
941         Parcel data, reply;
942         data.writeStrongBinder(server);
943         /* Replace original handle with handle to the freed binder */
944         struct flat_binder_object *strong = (struct flat_binder_object *)(data.data());
945         __u32 oldHandle = strong->handle;
946         strong->handle = freedHandle;
947         ret = server->transact(BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION, data, &reply);
948         /* Returns DEAD_OBJECT (-32) if target crashes and
949          * FAILED_TRANSACTION if the driver rejects the invalid
950          * object.
951          */
952         EXPECT_EQ((status_t)FAILED_TRANSACTION, ret);
953         /* Restore original handle so parcel destructor does not use
954          * the wrong handle.
955          */
956         strong->handle = oldHandle;
957     }
958 }
959 
TEST_F(BinderLibTest,CheckNoHeaderMappedInUser)960 TEST_F(BinderLibTest, CheckNoHeaderMappedInUser) {
961     Parcel data, reply;
962     sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
963     for (int i = 0; i < 2; i++) {
964         BinderLibTestBundle datai;
965         datai.appendFrom(&data, 0, data.dataSize());
966 
967         data.freeData();
968         data.writeInt32(1);
969         data.writeStrongBinder(callBack);
970         data.writeInt32(BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF);
971 
972         datai.appendTo(&data);
973     }
974     EXPECT_THAT(m_server->transact(BINDER_LIB_TEST_INDIRECT_TRANSACTION, data, &reply),
975                 StatusEq(NO_ERROR));
976 }
977 
TEST_F(BinderLibTest,OnewayQueueing)978 TEST_F(BinderLibTest, OnewayQueueing)
979 {
980     Parcel data, data2;
981 
982     sp<IBinder> pollServer = addPollServer();
983 
984     sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
985     data.writeStrongBinder(callBack);
986     data.writeInt32(500000); // delay in us before calling back
987 
988     sp<BinderLibTestCallBack> callBack2 = new BinderLibTestCallBack();
989     data2.writeStrongBinder(callBack2);
990     data2.writeInt32(0); // delay in us
991 
992     EXPECT_THAT(pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data, nullptr, TF_ONE_WAY),
993                 StatusEq(NO_ERROR));
994 
995     // The delay ensures that this second transaction will end up on the async_todo list
996     // (for a single-threaded server)
997     EXPECT_THAT(pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data2, nullptr, TF_ONE_WAY),
998                 StatusEq(NO_ERROR));
999 
1000     // The server will ensure that the two transactions are handled in the expected order;
1001     // If the ordering is not as expected, an error will be returned through the callbacks.
1002     EXPECT_THAT(callBack->waitEvent(2), StatusEq(NO_ERROR));
1003     EXPECT_THAT(callBack->getResult(), StatusEq(NO_ERROR));
1004 
1005     EXPECT_THAT(callBack2->waitEvent(2), StatusEq(NO_ERROR));
1006     EXPECT_THAT(callBack2->getResult(), StatusEq(NO_ERROR));
1007 }
1008 
TEST_F(BinderLibTest,WorkSourceUnsetByDefault)1009 TEST_F(BinderLibTest, WorkSourceUnsetByDefault)
1010 {
1011     status_t ret;
1012     Parcel data, reply;
1013     data.writeInterfaceToken(binderLibTestServiceName);
1014     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1015     EXPECT_EQ(-1, reply.readInt32());
1016     EXPECT_EQ(NO_ERROR, ret);
1017 }
1018 
TEST_F(BinderLibTest,WorkSourceSet)1019 TEST_F(BinderLibTest, WorkSourceSet)
1020 {
1021     status_t ret;
1022     Parcel data, reply;
1023     IPCThreadState::self()->clearCallingWorkSource();
1024     int64_t previousWorkSource = IPCThreadState::self()->setCallingWorkSourceUid(100);
1025     data.writeInterfaceToken(binderLibTestServiceName);
1026     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1027     EXPECT_EQ(100, reply.readInt32());
1028     EXPECT_EQ(-1, previousWorkSource);
1029     EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1030     EXPECT_EQ(NO_ERROR, ret);
1031 }
1032 
TEST_F(BinderLibTest,WorkSourceSetWithoutPropagation)1033 TEST_F(BinderLibTest, WorkSourceSetWithoutPropagation)
1034 {
1035     status_t ret;
1036     Parcel data, reply;
1037 
1038     IPCThreadState::self()->setCallingWorkSourceUidWithoutPropagation(100);
1039     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1040 
1041     data.writeInterfaceToken(binderLibTestServiceName);
1042     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1043     EXPECT_EQ(-1, reply.readInt32());
1044     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1045     EXPECT_EQ(NO_ERROR, ret);
1046 }
1047 
TEST_F(BinderLibTest,WorkSourceCleared)1048 TEST_F(BinderLibTest, WorkSourceCleared)
1049 {
1050     status_t ret;
1051     Parcel data, reply;
1052 
1053     IPCThreadState::self()->setCallingWorkSourceUid(100);
1054     int64_t token = IPCThreadState::self()->clearCallingWorkSource();
1055     int32_t previousWorkSource = (int32_t)token;
1056     data.writeInterfaceToken(binderLibTestServiceName);
1057     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1058 
1059     EXPECT_EQ(-1, reply.readInt32());
1060     EXPECT_EQ(100, previousWorkSource);
1061     EXPECT_EQ(NO_ERROR, ret);
1062 }
1063 
TEST_F(BinderLibTest,WorkSourceRestored)1064 TEST_F(BinderLibTest, WorkSourceRestored)
1065 {
1066     status_t ret;
1067     Parcel data, reply;
1068 
1069     IPCThreadState::self()->setCallingWorkSourceUid(100);
1070     int64_t token = IPCThreadState::self()->clearCallingWorkSource();
1071     IPCThreadState::self()->restoreCallingWorkSource(token);
1072 
1073     data.writeInterfaceToken(binderLibTestServiceName);
1074     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1075 
1076     EXPECT_EQ(100, reply.readInt32());
1077     EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1078     EXPECT_EQ(NO_ERROR, ret);
1079 }
1080 
TEST_F(BinderLibTest,PropagateFlagSet)1081 TEST_F(BinderLibTest, PropagateFlagSet)
1082 {
1083     IPCThreadState::self()->clearPropagateWorkSource();
1084     IPCThreadState::self()->setCallingWorkSourceUid(100);
1085     EXPECT_EQ(true, IPCThreadState::self()->shouldPropagateWorkSource());
1086 }
1087 
TEST_F(BinderLibTest,PropagateFlagCleared)1088 TEST_F(BinderLibTest, PropagateFlagCleared)
1089 {
1090     IPCThreadState::self()->setCallingWorkSourceUid(100);
1091     IPCThreadState::self()->clearPropagateWorkSource();
1092     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1093 }
1094 
TEST_F(BinderLibTest,PropagateFlagRestored)1095 TEST_F(BinderLibTest, PropagateFlagRestored)
1096 {
1097     int token = IPCThreadState::self()->setCallingWorkSourceUid(100);
1098     IPCThreadState::self()->restoreCallingWorkSource(token);
1099 
1100     EXPECT_EQ(false, IPCThreadState::self()->shouldPropagateWorkSource());
1101 }
1102 
TEST_F(BinderLibTest,WorkSourcePropagatedForAllFollowingBinderCalls)1103 TEST_F(BinderLibTest, WorkSourcePropagatedForAllFollowingBinderCalls)
1104 {
1105     IPCThreadState::self()->setCallingWorkSourceUid(100);
1106 
1107     Parcel data, reply;
1108     status_t ret;
1109     data.writeInterfaceToken(binderLibTestServiceName);
1110     ret = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data, &reply);
1111 
1112     Parcel data2, reply2;
1113     status_t ret2;
1114     data2.writeInterfaceToken(binderLibTestServiceName);
1115     ret2 = m_server->transact(BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION, data2, &reply2);
1116     EXPECT_EQ(100, reply2.readInt32());
1117     EXPECT_EQ(NO_ERROR, ret2);
1118 }
1119 
TEST_F(BinderLibTest,SchedPolicySet)1120 TEST_F(BinderLibTest, SchedPolicySet) {
1121     sp<IBinder> server = addServer();
1122     ASSERT_TRUE(server != nullptr);
1123 
1124     Parcel data, reply;
1125     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply),
1126                 StatusEq(NO_ERROR));
1127 
1128     int policy = reply.readInt32();
1129     int priority = reply.readInt32();
1130 
1131     EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
1132     EXPECT_EQ(kSchedPriority, priority);
1133 }
1134 
TEST_F(BinderLibTest,InheritRt)1135 TEST_F(BinderLibTest, InheritRt) {
1136     sp<IBinder> server = addServer();
1137     ASSERT_TRUE(server != nullptr);
1138 
1139     const struct sched_param param {
1140         .sched_priority = kSchedPriorityMore,
1141     };
1142     EXPECT_EQ(0, sched_setscheduler(getpid(), SCHED_RR, &param));
1143 
1144     Parcel data, reply;
1145     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply),
1146                 StatusEq(NO_ERROR));
1147 
1148     int policy = reply.readInt32();
1149     int priority = reply.readInt32();
1150 
1151     EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
1152     EXPECT_EQ(kSchedPriorityMore, priority);
1153 }
1154 
TEST_F(BinderLibTest,VectorSent)1155 TEST_F(BinderLibTest, VectorSent) {
1156     Parcel data, reply;
1157     sp<IBinder> server = addServer();
1158     ASSERT_TRUE(server != nullptr);
1159 
1160     std::vector<uint64_t> const testValue = { std::numeric_limits<uint64_t>::max(), 0, 200 };
1161     data.writeUint64Vector(testValue);
1162 
1163     EXPECT_THAT(server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply), StatusEq(NO_ERROR));
1164     std::vector<uint64_t> readValue;
1165     EXPECT_THAT(reply.readUint64Vector(&readValue), StatusEq(OK));
1166     EXPECT_EQ(readValue, testValue);
1167 }
1168 
TEST_F(BinderLibTest,FileDescriptorRemainsNonBlocking)1169 TEST_F(BinderLibTest, FileDescriptorRemainsNonBlocking) {
1170     sp<IBinder> server = addServer();
1171     ASSERT_TRUE(server != nullptr);
1172 
1173     Parcel reply;
1174     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_NON_BLOCKING_FD, {} /*data*/, &reply),
1175                 StatusEq(NO_ERROR));
1176     base::unique_fd fd;
1177     EXPECT_THAT(reply.readUniqueFileDescriptor(&fd), StatusEq(OK));
1178 
1179     const int result = fcntl(fd.get(), F_GETFL);
1180     ASSERT_NE(result, -1);
1181     EXPECT_EQ(result & O_NONBLOCK, O_NONBLOCK);
1182 }
1183 
1184 // see ProcessState.cpp BINDER_VM_SIZE = 1MB.
1185 // This value is not exposed, but some code in the framework relies on being able to use
1186 // buffers near the cap size.
1187 constexpr size_t kSizeBytesAlmostFull = 950'000;
1188 constexpr size_t kSizeBytesOverFull = 1'050'000;
1189 
TEST_F(BinderLibTest,GargantuanVectorSent)1190 TEST_F(BinderLibTest, GargantuanVectorSent) {
1191     sp<IBinder> server = addServer();
1192     ASSERT_TRUE(server != nullptr);
1193 
1194     for (size_t i = 0; i < 10; i++) {
1195         // a slight variation in size is used to consider certain possible caching implementations
1196         const std::vector<uint64_t> testValue((kSizeBytesAlmostFull + i) / sizeof(uint64_t), 42);
1197 
1198         Parcel data, reply;
1199         data.writeUint64Vector(testValue);
1200         EXPECT_THAT(server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply), StatusEq(NO_ERROR))
1201                 << i;
1202         std::vector<uint64_t> readValue;
1203         EXPECT_THAT(reply.readUint64Vector(&readValue), StatusEq(OK));
1204         EXPECT_EQ(readValue, testValue);
1205     }
1206 }
1207 
TEST_F(BinderLibTest,LimitExceededVectorSent)1208 TEST_F(BinderLibTest, LimitExceededVectorSent) {
1209     sp<IBinder> server = addServer();
1210     ASSERT_TRUE(server != nullptr);
1211     const std::vector<uint64_t> testValue(kSizeBytesOverFull / sizeof(uint64_t), 42);
1212 
1213     Parcel data, reply;
1214     data.writeUint64Vector(testValue);
1215     EXPECT_THAT(server->transact(BINDER_LIB_TEST_ECHO_VECTOR, data, &reply),
1216                 StatusEq(FAILED_TRANSACTION));
1217 }
1218 
TEST_F(BinderLibTest,BufRejected)1219 TEST_F(BinderLibTest, BufRejected) {
1220     Parcel data, reply;
1221     uint32_t buf;
1222     sp<IBinder> server = addServer();
1223     ASSERT_TRUE(server != nullptr);
1224 
1225     binder_buffer_object obj {
1226         .hdr = { .type = BINDER_TYPE_PTR },
1227         .flags = 0,
1228         .buffer = reinterpret_cast<binder_uintptr_t>((void*)&buf),
1229         .length = 4,
1230     };
1231     data.setDataCapacity(1024);
1232     // Write a bogus object at offset 0 to get an entry in the offset table
1233     data.writeFileDescriptor(0);
1234     EXPECT_EQ(data.objectsCount(), 1);
1235     uint8_t *parcelData = const_cast<uint8_t*>(data.data());
1236     // And now, overwrite it with the buffer object
1237     memcpy(parcelData, &obj, sizeof(obj));
1238     data.setDataSize(sizeof(obj));
1239 
1240     EXPECT_EQ(data.objectsCount(), 1);
1241 
1242     // Either the kernel should reject this transaction (if it's correct), but
1243     // if it's not, the server implementation should return an error if it
1244     // finds an object in the received Parcel.
1245     EXPECT_THAT(server->transact(BINDER_LIB_TEST_REJECT_OBJECTS, data, &reply),
1246                 Not(StatusEq(NO_ERROR)));
1247 }
1248 
TEST_F(BinderLibTest,WeakRejected)1249 TEST_F(BinderLibTest, WeakRejected) {
1250     Parcel data, reply;
1251     sp<IBinder> server = addServer();
1252     ASSERT_TRUE(server != nullptr);
1253 
1254     auto binder = sp<BBinder>::make();
1255     wp<BBinder> wpBinder(binder);
1256     flat_binder_object obj{
1257             .hdr = {.type = BINDER_TYPE_WEAK_BINDER},
1258             .flags = 0,
1259             .binder = reinterpret_cast<uintptr_t>(wpBinder.get_refs()),
1260             .cookie = reinterpret_cast<uintptr_t>(wpBinder.unsafe_get()),
1261     };
1262     data.setDataCapacity(1024);
1263     // Write a bogus object at offset 0 to get an entry in the offset table
1264     data.writeFileDescriptor(0);
1265     EXPECT_EQ(data.objectsCount(), 1);
1266     uint8_t *parcelData = const_cast<uint8_t *>(data.data());
1267     // And now, overwrite it with the weak binder
1268     memcpy(parcelData, &obj, sizeof(obj));
1269     data.setDataSize(sizeof(obj));
1270 
1271     // a previous bug caused other objects to be released an extra time, so we
1272     // test with an object that libbinder will actually try to release
1273     EXPECT_EQ(OK, data.writeStrongBinder(sp<BBinder>::make()));
1274 
1275     EXPECT_EQ(data.objectsCount(), 2);
1276 
1277     // send it many times, since previous error was memory corruption, make it
1278     // more likely that the server crashes
1279     for (size_t i = 0; i < 100; i++) {
1280         EXPECT_THAT(server->transact(BINDER_LIB_TEST_REJECT_OBJECTS, data, &reply),
1281                     StatusEq(BAD_VALUE));
1282     }
1283 
1284     EXPECT_THAT(server->pingBinder(), StatusEq(NO_ERROR));
1285 }
1286 
TEST_F(BinderLibTest,GotSid)1287 TEST_F(BinderLibTest, GotSid) {
1288     sp<IBinder> server = addServer();
1289 
1290     Parcel data;
1291     EXPECT_THAT(server->transact(BINDER_LIB_TEST_CAN_GET_SID, data, nullptr), StatusEq(OK));
1292 }
1293 
1294 struct TooManyFdsFlattenable : Flattenable<TooManyFdsFlattenable> {
TooManyFdsFlattenableTooManyFdsFlattenable1295     TooManyFdsFlattenable(size_t fdCount) : mFdCount(fdCount) {}
1296 
1297     // Flattenable protocol
getFlattenedSizeTooManyFdsFlattenable1298     size_t getFlattenedSize() const {
1299         // Return a valid non-zero size here so we don't get an unintended
1300         // BAD_VALUE from Parcel::write
1301         return 16;
1302     }
getFdCountTooManyFdsFlattenable1303     size_t getFdCount() const { return mFdCount; }
flattenTooManyFdsFlattenable1304     status_t flatten(void *& /*buffer*/, size_t & /*size*/, int *&fds, size_t &count) const {
1305         for (size_t i = 0; i < count; i++) {
1306             fds[i] = STDIN_FILENO;
1307         }
1308         return NO_ERROR;
1309     }
unflattenTooManyFdsFlattenable1310     status_t unflatten(void const *& /*buffer*/, size_t & /*size*/, int const *& /*fds*/,
1311                        size_t & /*count*/) {
1312         /* This doesn't get called */
1313         return NO_ERROR;
1314     }
1315 
1316     size_t mFdCount;
1317 };
1318 
TEST_F(BinderLibTest,TooManyFdsFlattenable)1319 TEST_F(BinderLibTest, TooManyFdsFlattenable) {
1320     rlimit origNofile;
1321     int ret = getrlimit(RLIMIT_NOFILE, &origNofile);
1322     ASSERT_EQ(0, ret);
1323 
1324     // Restore the original file limits when the test finishes
1325     base::ScopeGuard guardUnguard([&]() { setrlimit(RLIMIT_NOFILE, &origNofile); });
1326 
1327     rlimit testNofile = {1024, 1024};
1328     ret = setrlimit(RLIMIT_NOFILE, &testNofile);
1329     ASSERT_EQ(0, ret);
1330 
1331     Parcel parcel;
1332     // Try to write more file descriptors than supported by the OS
1333     TooManyFdsFlattenable tooManyFds1(1024);
1334     EXPECT_THAT(parcel.write(tooManyFds1), StatusEq(-EMFILE));
1335 
1336     // Try to write more file descriptors than the internal limit
1337     TooManyFdsFlattenable tooManyFds2(1025);
1338     EXPECT_THAT(parcel.write(tooManyFds2), StatusEq(BAD_VALUE));
1339 }
1340 
TEST(ServiceNotifications,Unregister)1341 TEST(ServiceNotifications, Unregister) {
1342     auto sm = defaultServiceManager();
1343     using LocalRegistrationCallback = IServiceManager::LocalRegistrationCallback;
1344     class LocalRegistrationCallbackImpl : public virtual LocalRegistrationCallback {
1345         void onServiceRegistration(const String16 &, const sp<IBinder> &) override {}
1346         virtual ~LocalRegistrationCallbackImpl() {}
1347     };
1348     sp<LocalRegistrationCallback> cb = sp<LocalRegistrationCallbackImpl>::make();
1349 
1350     EXPECT_EQ(sm->registerForNotifications(String16("RogerRafa"), cb), OK);
1351     EXPECT_EQ(sm->unregisterForNotifications(String16("RogerRafa"), cb), OK);
1352 }
1353 
TEST_F(BinderLibTest,ThreadPoolAvailableThreads)1354 TEST_F(BinderLibTest, ThreadPoolAvailableThreads) {
1355     Parcel data, reply;
1356     sp<IBinder> server = addServer();
1357     ASSERT_TRUE(server != nullptr);
1358     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_MAX_THREAD_COUNT, data, &reply),
1359                 StatusEq(NO_ERROR));
1360     int32_t replyi = reply.readInt32();
1361     // Expect 16 threads: kKernelThreads = 15 + Pool thread == 16
1362     EXPECT_TRUE(replyi == kKernelThreads || replyi == kKernelThreads + 1);
1363     EXPECT_THAT(server->transact(BINDER_LIB_TEST_PROCESS_LOCK, data, &reply), NO_ERROR);
1364 
1365     /*
1366      * This will use all threads in the pool expect the main pool thread.
1367      * The service should run fine without locking, and the thread count should
1368      * not exceed 16 (15 Max + pool thread).
1369      */
1370     std::vector<std::thread> ts;
1371     for (size_t i = 0; i < kKernelThreads; i++) {
1372         ts.push_back(std::thread([&] {
1373             Parcel local_reply;
1374             EXPECT_THAT(server->transact(BINDER_LIB_TEST_LOCK_UNLOCK, data, &local_reply),
1375                         NO_ERROR);
1376         }));
1377     }
1378 
1379     data.writeInt32(500);
1380     // Give a chance for all threads to be used
1381     EXPECT_THAT(server->transact(BINDER_LIB_TEST_UNLOCK_AFTER_MS, data, &reply), NO_ERROR);
1382 
1383     for (auto &t : ts) {
1384         t.join();
1385     }
1386 
1387     EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_MAX_THREAD_COUNT, data, &reply),
1388                 StatusEq(NO_ERROR));
1389     replyi = reply.readInt32();
1390     EXPECT_EQ(replyi, kKernelThreads + 1);
1391 }
1392 
TEST_F(BinderLibTest,ThreadPoolStarted)1393 TEST_F(BinderLibTest, ThreadPoolStarted) {
1394     Parcel data, reply;
1395     sp<IBinder> server = addServer();
1396     ASSERT_TRUE(server != nullptr);
1397     EXPECT_THAT(server->transact(BINDER_LIB_TEST_IS_THREADPOOL_STARTED, data, &reply), NO_ERROR);
1398     EXPECT_TRUE(reply.readBool());
1399 }
1400 
epochMillis()1401 size_t epochMillis() {
1402     using std::chrono::duration_cast;
1403     using std::chrono::milliseconds;
1404     using std::chrono::seconds;
1405     using std::chrono::system_clock;
1406     return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
1407 }
1408 
TEST_F(BinderLibTest,HangingServices)1409 TEST_F(BinderLibTest, HangingServices) {
1410     Parcel data, reply;
1411     sp<IBinder> server = addServer();
1412     ASSERT_TRUE(server != nullptr);
1413     int32_t delay = 1000; // ms
1414     data.writeInt32(delay);
1415     // b/266537959 - must take before taking lock, since countdown is started in the remote
1416     // process there.
1417     size_t epochMsBefore = epochMillis();
1418     EXPECT_THAT(server->transact(BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK, data, &reply), NO_ERROR);
1419     std::vector<std::thread> ts;
1420     for (size_t i = 0; i < kKernelThreads + 1; i++) {
1421         ts.push_back(std::thread([&] {
1422             Parcel local_reply;
1423             EXPECT_THAT(server->transact(BINDER_LIB_TEST_LOCK_UNLOCK, data, &local_reply),
1424                         NO_ERROR);
1425         }));
1426     }
1427 
1428     for (auto &t : ts) {
1429         t.join();
1430     }
1431     size_t epochMsAfter = epochMillis();
1432 
1433     // deadlock occurred and threads only finished after 1s passed.
1434     EXPECT_GE(epochMsAfter, epochMsBefore + delay);
1435 }
1436 
1437 class BinderLibRpcTestBase : public BinderLibTest {
1438 public:
SetUp()1439     void SetUp() override {
1440         if (!base::GetBoolProperty("ro.debuggable", false)) {
1441             GTEST_SKIP() << "Binder RPC is only enabled on debuggable builds, skipping test on "
1442                             "non-debuggable builds.";
1443         }
1444         BinderLibTest::SetUp();
1445     }
1446 
CreateSocket()1447     std::tuple<android::base::unique_fd, unsigned int> CreateSocket() {
1448         auto rpcServer = RpcServer::make();
1449         EXPECT_NE(nullptr, rpcServer);
1450         if (rpcServer == nullptr) return {};
1451         unsigned int port;
1452         if (status_t status = rpcServer->setupInetServer("127.0.0.1", 0, &port); status != OK) {
1453             ADD_FAILURE() << "setupInetServer failed" << statusToString(status);
1454             return {};
1455         }
1456         return {rpcServer->releaseServer(), port};
1457     }
1458 };
1459 
1460 class BinderLibRpcTest : public BinderLibRpcTestBase {};
1461 
1462 // e.g. EXPECT_THAT(expr, Debuggable(StatusEq(...))
1463 // If device is debuggable AND not on user builds, expects matcher.
1464 // Otherwise expects INVALID_OPERATION.
1465 // Debuggable + non user builds is necessary but not sufficient for setRpcClientDebug to work.
Debuggable(const Matcher<status_t> & matcher)1466 static Matcher<status_t> Debuggable(const Matcher<status_t> &matcher) {
1467     bool isDebuggable = android::base::GetBoolProperty("ro.debuggable", false) &&
1468             android::base::GetProperty("ro.build.type", "") != "user";
1469     return isDebuggable ? matcher : StatusEq(INVALID_OPERATION);
1470 }
1471 
TEST_F(BinderLibRpcTest,SetRpcClientDebug)1472 TEST_F(BinderLibRpcTest, SetRpcClientDebug) {
1473     auto binder = addServer();
1474     ASSERT_TRUE(binder != nullptr);
1475     auto [socket, port] = CreateSocket();
1476     ASSERT_TRUE(socket.ok());
1477     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket), sp<BBinder>::make()),
1478                 Debuggable(StatusEq(OK)));
1479 }
1480 
1481 // Tests for multiple RpcServer's on the same binder object.
TEST_F(BinderLibRpcTest,SetRpcClientDebugTwice)1482 TEST_F(BinderLibRpcTest, SetRpcClientDebugTwice) {
1483     auto binder = addServer();
1484     ASSERT_TRUE(binder != nullptr);
1485 
1486     auto [socket1, port1] = CreateSocket();
1487     ASSERT_TRUE(socket1.ok());
1488     auto keepAliveBinder1 = sp<BBinder>::make();
1489     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket1), keepAliveBinder1),
1490                 Debuggable(StatusEq(OK)));
1491 
1492     auto [socket2, port2] = CreateSocket();
1493     ASSERT_TRUE(socket2.ok());
1494     auto keepAliveBinder2 = sp<BBinder>::make();
1495     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket2), keepAliveBinder2),
1496                 Debuggable(StatusEq(OK)));
1497 }
1498 
1499 // Negative tests for RPC APIs on IBinder. Call should fail in the same way on both remote and
1500 // local binders.
1501 class BinderLibRpcTestP : public BinderLibRpcTestBase, public WithParamInterface<bool> {
1502 public:
GetService()1503     sp<IBinder> GetService() {
1504         return GetParam() ? sp<IBinder>(addServer()) : sp<IBinder>(sp<BBinder>::make());
1505     }
ParamToString(const testing::TestParamInfo<ParamType> & info)1506     static std::string ParamToString(const testing::TestParamInfo<ParamType> &info) {
1507         return info.param ? "remote" : "local";
1508     }
1509 };
1510 
TEST_P(BinderLibRpcTestP,SetRpcClientDebugNoFd)1511 TEST_P(BinderLibRpcTestP, SetRpcClientDebugNoFd) {
1512     auto binder = GetService();
1513     ASSERT_TRUE(binder != nullptr);
1514     EXPECT_THAT(binder->setRpcClientDebug(android::base::unique_fd(), sp<BBinder>::make()),
1515                 Debuggable(StatusEq(BAD_VALUE)));
1516 }
1517 
TEST_P(BinderLibRpcTestP,SetRpcClientDebugNoKeepAliveBinder)1518 TEST_P(BinderLibRpcTestP, SetRpcClientDebugNoKeepAliveBinder) {
1519     auto binder = GetService();
1520     ASSERT_TRUE(binder != nullptr);
1521     auto [socket, port] = CreateSocket();
1522     ASSERT_TRUE(socket.ok());
1523     EXPECT_THAT(binder->setRpcClientDebug(std::move(socket), nullptr),
1524                 Debuggable(StatusEq(UNEXPECTED_NULL)));
1525 }
1526 INSTANTIATE_TEST_CASE_P(BinderLibTest, BinderLibRpcTestP, testing::Bool(),
1527                         BinderLibRpcTestP::ParamToString);
1528 
1529 class BinderLibTestService : public BBinder {
1530 public:
BinderLibTestService(int32_t id,bool exitOnDestroy=true)1531     explicit BinderLibTestService(int32_t id, bool exitOnDestroy = true)
1532           : m_id(id),
1533             m_nextServerId(id + 1),
1534             m_serverStartRequested(false),
1535             m_callback(nullptr),
1536             m_exitOnDestroy(exitOnDestroy) {
1537         pthread_mutex_init(&m_serverWaitMutex, nullptr);
1538         pthread_cond_init(&m_serverWaitCond, nullptr);
1539     }
~BinderLibTestService()1540     ~BinderLibTestService() {
1541         if (m_exitOnDestroy) exit(EXIT_SUCCESS);
1542     }
1543 
processPendingCall()1544     void processPendingCall() {
1545         if (m_callback != nullptr) {
1546             Parcel data;
1547             data.writeInt32(NO_ERROR);
1548             m_callback->transact(BINDER_LIB_TEST_CALL_BACK, data, nullptr, TF_ONE_WAY);
1549             m_callback = nullptr;
1550         }
1551     }
1552 
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags=0)1553     virtual status_t onTransact(uint32_t code, const Parcel &data, Parcel *reply,
1554                                 uint32_t flags = 0) {
1555         // TODO(b/182914638): also checks getCallingUid() for RPC
1556         if (!data.isForRpc() && getuid() != (uid_t)IPCThreadState::self()->getCallingUid()) {
1557             return PERMISSION_DENIED;
1558         }
1559         switch (code) {
1560             case BINDER_LIB_TEST_REGISTER_SERVER: {
1561                 int32_t id;
1562                 sp<IBinder> binder;
1563                 id = data.readInt32();
1564                 binder = data.readStrongBinder();
1565                 if (binder == nullptr) {
1566                     return BAD_VALUE;
1567                 }
1568 
1569                 if (m_id != 0) return INVALID_OPERATION;
1570 
1571                 pthread_mutex_lock(&m_serverWaitMutex);
1572                 if (m_serverStartRequested) {
1573                     m_serverStartRequested = false;
1574                     m_serverStarted = binder;
1575                     pthread_cond_signal(&m_serverWaitCond);
1576                 }
1577                 pthread_mutex_unlock(&m_serverWaitMutex);
1578                 return NO_ERROR;
1579             }
1580             case BINDER_LIB_TEST_ADD_POLL_SERVER:
1581             case BINDER_LIB_TEST_ADD_SERVER: {
1582                 int ret;
1583                 int serverid;
1584 
1585                 if (m_id != 0) {
1586                     return INVALID_OPERATION;
1587                 }
1588                 pthread_mutex_lock(&m_serverWaitMutex);
1589                 if (m_serverStartRequested) {
1590                     ret = -EBUSY;
1591                 } else {
1592                     serverid = m_nextServerId++;
1593                     m_serverStartRequested = true;
1594                     bool usePoll = code == BINDER_LIB_TEST_ADD_POLL_SERVER;
1595 
1596                     pthread_mutex_unlock(&m_serverWaitMutex);
1597                     ret = start_server_process(serverid, usePoll);
1598                     pthread_mutex_lock(&m_serverWaitMutex);
1599                 }
1600                 if (ret > 0) {
1601                     if (m_serverStartRequested) {
1602                         struct timespec ts;
1603                         clock_gettime(CLOCK_REALTIME, &ts);
1604                         ts.tv_sec += 5;
1605                         ret = pthread_cond_timedwait(&m_serverWaitCond, &m_serverWaitMutex, &ts);
1606                     }
1607                     if (m_serverStartRequested) {
1608                         m_serverStartRequested = false;
1609                         ret = -ETIMEDOUT;
1610                     } else {
1611                         reply->writeStrongBinder(m_serverStarted);
1612                         reply->writeInt32(serverid);
1613                         m_serverStarted = nullptr;
1614                         ret = NO_ERROR;
1615                     }
1616                 } else if (ret >= 0) {
1617                     m_serverStartRequested = false;
1618                     ret = UNKNOWN_ERROR;
1619                 }
1620                 pthread_mutex_unlock(&m_serverWaitMutex);
1621                 return ret;
1622             }
1623             case BINDER_LIB_TEST_USE_CALLING_GUARD_TRANSACTION: {
1624                 IPCThreadState::SpGuard spGuard{
1625                         .address = __builtin_frame_address(0),
1626                         .context = "GuardInBinderTransaction",
1627                 };
1628                 const IPCThreadState::SpGuard *origGuard =
1629                         IPCThreadState::self()->pushGetCallingSpGuard(&spGuard);
1630 
1631                 // if the guard works, this should abort
1632                 (void)IPCThreadState::self()->getCallingPid();
1633 
1634                 IPCThreadState::self()->restoreGetCallingSpGuard(origGuard);
1635                 return NO_ERROR;
1636             }
1637 
1638             case BINDER_LIB_TEST_GETPID:
1639                 reply->writeInt32(getpid());
1640                 return NO_ERROR;
1641             case BINDER_LIB_TEST_NOP_TRANSACTION_WAIT:
1642                 usleep(5000);
1643                 [[fallthrough]];
1644             case BINDER_LIB_TEST_NOP_TRANSACTION:
1645                 // oneway error codes should be ignored
1646                 if (flags & TF_ONE_WAY) {
1647                     return UNKNOWN_ERROR;
1648                 }
1649                 return NO_ERROR;
1650             case BINDER_LIB_TEST_DELAYED_CALL_BACK: {
1651                 // Note: this transaction is only designed for use with a
1652                 // poll() server. See comments around epoll_wait().
1653                 if (m_callback != nullptr) {
1654                     // A callback was already pending; this means that
1655                     // we received a second call while still processing
1656                     // the first one. Fail the test.
1657                     sp<IBinder> callback = data.readStrongBinder();
1658                     Parcel data2;
1659                     data2.writeInt32(UNKNOWN_ERROR);
1660 
1661                     callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, nullptr, TF_ONE_WAY);
1662                 } else {
1663                     m_callback = data.readStrongBinder();
1664                     int32_t delayUs = data.readInt32();
1665                     /*
1666                      * It's necessary that we sleep here, so the next
1667                      * transaction the caller makes will be queued to
1668                      * the async queue.
1669                      */
1670                     usleep(delayUs);
1671 
1672                     /*
1673                      * Now when we return, libbinder will tell the kernel
1674                      * we are done with this transaction, and the kernel
1675                      * can move the queued transaction to either the
1676                      * thread todo worklist (for kernels without the fix),
1677                      * or the proc todo worklist. In case of the former,
1678                      * the next outbound call will pick up the pending
1679                      * transaction, which leads to undesired reentrant
1680                      * behavior. This is caught in the if() branch above.
1681                      */
1682                 }
1683 
1684                 return NO_ERROR;
1685             }
1686             case BINDER_LIB_TEST_NOP_CALL_BACK: {
1687                 Parcel data2, reply2;
1688                 sp<IBinder> binder;
1689                 binder = data.readStrongBinder();
1690                 if (binder == nullptr) {
1691                     return BAD_VALUE;
1692                 }
1693                 data2.writeInt32(NO_ERROR);
1694                 binder->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1695                 return NO_ERROR;
1696             }
1697             case BINDER_LIB_TEST_GET_SELF_TRANSACTION:
1698                 reply->writeStrongBinder(this);
1699                 return NO_ERROR;
1700             case BINDER_LIB_TEST_GET_ID_TRANSACTION:
1701                 reply->writeInt32(m_id);
1702                 return NO_ERROR;
1703             case BINDER_LIB_TEST_INDIRECT_TRANSACTION: {
1704                 int32_t count;
1705                 uint32_t indirect_code;
1706                 sp<IBinder> binder;
1707 
1708                 count = data.readInt32();
1709                 reply->writeInt32(m_id);
1710                 reply->writeInt32(count);
1711                 for (int i = 0; i < count; i++) {
1712                     binder = data.readStrongBinder();
1713                     if (binder == nullptr) {
1714                         return BAD_VALUE;
1715                     }
1716                     indirect_code = data.readInt32();
1717                     BinderLibTestBundle data2(&data);
1718                     if (!data2.isValid()) {
1719                         return BAD_VALUE;
1720                     }
1721                     BinderLibTestBundle reply2;
1722                     binder->transact(indirect_code, data2, &reply2);
1723                     reply2.appendTo(reply);
1724                 }
1725                 return NO_ERROR;
1726             }
1727             case BINDER_LIB_TEST_SET_ERROR_TRANSACTION:
1728                 reply->setError(data.readInt32());
1729                 return NO_ERROR;
1730             case BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION:
1731                 reply->writeInt32(sizeof(void *));
1732                 return NO_ERROR;
1733             case BINDER_LIB_TEST_GET_STATUS_TRANSACTION:
1734                 return NO_ERROR;
1735             case BINDER_LIB_TEST_ADD_STRONG_REF_TRANSACTION:
1736                 m_strongRef = data.readStrongBinder();
1737                 return NO_ERROR;
1738             case BINDER_LIB_TEST_LINK_DEATH_TRANSACTION: {
1739                 int ret;
1740                 Parcel data2, reply2;
1741                 sp<TestDeathRecipient> testDeathRecipient = new TestDeathRecipient();
1742                 sp<IBinder> target;
1743                 sp<IBinder> callback;
1744 
1745                 target = data.readStrongBinder();
1746                 if (target == nullptr) {
1747                     return BAD_VALUE;
1748                 }
1749                 callback = data.readStrongBinder();
1750                 if (callback == nullptr) {
1751                     return BAD_VALUE;
1752                 }
1753                 ret = target->linkToDeath(testDeathRecipient);
1754                 if (ret == NO_ERROR) ret = testDeathRecipient->waitEvent(5);
1755                 data2.writeInt32(ret);
1756                 callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, &reply2);
1757                 return NO_ERROR;
1758             }
1759             case BINDER_LIB_TEST_WRITE_FILE_TRANSACTION: {
1760                 int ret;
1761                 int32_t size;
1762                 const void *buf;
1763                 int fd;
1764 
1765                 fd = data.readFileDescriptor();
1766                 if (fd < 0) {
1767                     return BAD_VALUE;
1768                 }
1769                 ret = data.readInt32(&size);
1770                 if (ret != NO_ERROR) {
1771                     return ret;
1772                 }
1773                 buf = data.readInplace(size);
1774                 if (buf == nullptr) {
1775                     return BAD_VALUE;
1776                 }
1777                 ret = write(fd, buf, size);
1778                 if (ret != size) return UNKNOWN_ERROR;
1779                 return NO_ERROR;
1780             }
1781             case BINDER_LIB_TEST_WRITE_PARCEL_FILE_DESCRIPTOR_TRANSACTION: {
1782                 int ret;
1783                 int32_t size;
1784                 const void *buf;
1785                 android::base::unique_fd fd;
1786 
1787                 ret = data.readUniqueParcelFileDescriptor(&fd);
1788                 if (ret != NO_ERROR) {
1789                     return ret;
1790                 }
1791                 ret = data.readInt32(&size);
1792                 if (ret != NO_ERROR) {
1793                     return ret;
1794                 }
1795                 buf = data.readInplace(size);
1796                 if (buf == nullptr) {
1797                     return BAD_VALUE;
1798                 }
1799                 ret = write(fd.get(), buf, size);
1800                 if (ret != size) return UNKNOWN_ERROR;
1801                 return NO_ERROR;
1802             }
1803             case BINDER_LIB_TEST_DELAYED_EXIT_TRANSACTION:
1804                 alarm(10);
1805                 return NO_ERROR;
1806             case BINDER_LIB_TEST_EXIT_TRANSACTION:
1807                 while (wait(nullptr) != -1 || errno != ECHILD)
1808                     ;
1809                 exit(EXIT_SUCCESS);
1810             case BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION: {
1811                 sp<IBinder> binder = new BBinder();
1812                 reply->writeStrongBinder(binder);
1813                 return NO_ERROR;
1814             }
1815             case BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION: {
1816                 data.enforceInterface(binderLibTestServiceName);
1817                 reply->writeInt32(IPCThreadState::self()->getCallingWorkSourceUid());
1818                 return NO_ERROR;
1819             }
1820             case BINDER_LIB_TEST_GET_SCHEDULING_POLICY: {
1821                 int policy = 0;
1822                 sched_param param;
1823                 if (0 != pthread_getschedparam(pthread_self(), &policy, &param)) {
1824                     return UNKNOWN_ERROR;
1825                 }
1826                 reply->writeInt32(policy);
1827                 reply->writeInt32(param.sched_priority);
1828                 return NO_ERROR;
1829             }
1830             case BINDER_LIB_TEST_ECHO_VECTOR: {
1831                 std::vector<uint64_t> vector;
1832                 auto err = data.readUint64Vector(&vector);
1833                 if (err != NO_ERROR) return err;
1834                 reply->writeUint64Vector(vector);
1835                 return NO_ERROR;
1836             }
1837             case BINDER_LIB_TEST_GET_NON_BLOCKING_FD: {
1838                 std::array<int, 2> sockets;
1839                 const bool created = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sockets.data()) == 0;
1840                 if (!created) {
1841                     ALOGE("Could not create socket pair");
1842                     return UNKNOWN_ERROR;
1843                 }
1844 
1845                 const int result = fcntl(sockets[0], F_SETFL, O_NONBLOCK);
1846                 if (result != 0) {
1847                     ALOGE("Could not make socket non-blocking: %s", strerror(errno));
1848                     return UNKNOWN_ERROR;
1849                 }
1850                 base::unique_fd out(sockets[0]);
1851                 status_t writeResult = reply->writeUniqueFileDescriptor(out);
1852                 if (writeResult != NO_ERROR) {
1853                     ALOGE("Could not write unique_fd");
1854                     return writeResult;
1855                 }
1856                 close(sockets[1]); // we don't need the other side of the fd
1857                 return NO_ERROR;
1858             }
1859             case BINDER_LIB_TEST_REJECT_OBJECTS: {
1860                 return data.objectsCount() == 0 ? BAD_VALUE : NO_ERROR;
1861             }
1862             case BINDER_LIB_TEST_CAN_GET_SID: {
1863                 return IPCThreadState::self()->getCallingSid() == nullptr ? BAD_VALUE : NO_ERROR;
1864             }
1865             case BINDER_LIB_TEST_GET_MAX_THREAD_COUNT: {
1866                 reply->writeInt32(ProcessState::self()->getThreadPoolMaxTotalThreadCount());
1867                 return NO_ERROR;
1868             }
1869             case BINDER_LIB_TEST_IS_THREADPOOL_STARTED: {
1870                 reply->writeBool(ProcessState::self()->isThreadPoolStarted());
1871                 return NO_ERROR;
1872             }
1873             case BINDER_LIB_TEST_PROCESS_LOCK: {
1874                 m_blockMutex.lock();
1875                 return NO_ERROR;
1876             }
1877             case BINDER_LIB_TEST_LOCK_UNLOCK: {
1878                 std::lock_guard<std::mutex> _l(m_blockMutex);
1879                 return NO_ERROR;
1880             }
1881             case BINDER_LIB_TEST_UNLOCK_AFTER_MS: {
1882                 int32_t ms = data.readInt32();
1883                 return unlockInMs(ms);
1884             }
1885             case BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK: {
1886                 m_blockMutex.lock();
1887                 sp<BinderLibTestService> thisService = this;
1888                 int32_t value = data.readInt32();
1889                 // start local thread to unlock in 1s
1890                 std::thread t([=] { thisService->unlockInMs(value); });
1891                 t.detach();
1892                 return NO_ERROR;
1893             }
1894             default:
1895                 return UNKNOWN_TRANSACTION;
1896         };
1897     }
1898 
unlockInMs(int32_t ms)1899     status_t unlockInMs(int32_t ms) {
1900         usleep(ms * 1000);
1901         m_blockMutex.unlock();
1902         return NO_ERROR;
1903     }
1904 
1905 private:
1906     int32_t m_id;
1907     int32_t m_nextServerId;
1908     pthread_mutex_t m_serverWaitMutex;
1909     pthread_cond_t m_serverWaitCond;
1910     bool m_serverStartRequested;
1911     sp<IBinder> m_serverStarted;
1912     sp<IBinder> m_strongRef;
1913     sp<IBinder> m_callback;
1914     bool m_exitOnDestroy;
1915     std::mutex m_blockMutex;
1916 };
1917 
run_server(int index,int readypipefd,bool usePoll)1918 int run_server(int index, int readypipefd, bool usePoll)
1919 {
1920     binderLibTestServiceName += String16(binderserversuffix);
1921 
1922     // Testing to make sure that calls that we are serving can use getCallin*
1923     // even though we don't here.
1924     IPCThreadState::SpGuard spGuard{
1925             .address = __builtin_frame_address(0),
1926             .context = "main server thread",
1927     };
1928     (void)IPCThreadState::self()->pushGetCallingSpGuard(&spGuard);
1929 
1930     status_t ret;
1931     sp<IServiceManager> sm = defaultServiceManager();
1932     BinderLibTestService* testServicePtr;
1933     {
1934         sp<BinderLibTestService> testService = new BinderLibTestService(index);
1935 
1936         testService->setMinSchedulerPolicy(kSchedPolicy, kSchedPriority);
1937 
1938         testService->setInheritRt(true);
1939 
1940         /*
1941          * Normally would also contain functionality as well, but we are only
1942          * testing the extension mechanism.
1943          */
1944         testService->setExtension(new BBinder());
1945 
1946         // Required for test "BufRejected'
1947         testService->setRequestingSid(true);
1948 
1949         /*
1950          * We need this below, but can't hold a sp<> because it prevents the
1951          * node from being cleaned up automatically. It's safe in this case
1952          * because of how the tests are written.
1953          */
1954         testServicePtr = testService.get();
1955 
1956         if (index == 0) {
1957             ret = sm->addService(binderLibTestServiceName, testService);
1958         } else {
1959             sp<IBinder> server = sm->getService(binderLibTestServiceName);
1960             Parcel data, reply;
1961             data.writeInt32(index);
1962             data.writeStrongBinder(testService);
1963 
1964             ret = server->transact(BINDER_LIB_TEST_REGISTER_SERVER, data, &reply);
1965         }
1966     }
1967     write(readypipefd, &ret, sizeof(ret));
1968     close(readypipefd);
1969     //printf("%s: ret %d\n", __func__, ret);
1970     if (ret)
1971         return 1;
1972     //printf("%s: joinThreadPool\n", __func__);
1973     if (usePoll) {
1974         int fd;
1975         struct epoll_event ev;
1976         int epoll_fd;
1977         IPCThreadState::self()->setupPolling(&fd);
1978         if (fd < 0) {
1979             return 1;
1980         }
1981         IPCThreadState::self()->flushCommands(); // flush BC_ENTER_LOOPER
1982 
1983         epoll_fd = epoll_create1(EPOLL_CLOEXEC);
1984         if (epoll_fd == -1) {
1985             return 1;
1986         }
1987 
1988         ev.events = EPOLLIN;
1989         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &ev) == -1) {
1990             return 1;
1991         }
1992 
1993         while (1) {
1994              /*
1995               * We simulate a single-threaded process using the binder poll
1996               * interface; besides handling binder commands, it can also
1997               * issue outgoing transactions, by storing a callback in
1998               * m_callback.
1999               *
2000               * processPendingCall() will then issue that transaction.
2001               */
2002              struct epoll_event events[1];
2003              int numEvents = epoll_wait(epoll_fd, events, 1, 1000);
2004              if (numEvents < 0) {
2005                  if (errno == EINTR) {
2006                      continue;
2007                  }
2008                  return 1;
2009              }
2010              if (numEvents > 0) {
2011                  IPCThreadState::self()->handlePolledCommands();
2012                  IPCThreadState::self()->flushCommands(); // flush BC_FREE_BUFFER
2013                  testServicePtr->processPendingCall();
2014              }
2015         }
2016     } else {
2017         ProcessState::self()->setThreadPoolMaxThreadCount(kKernelThreads);
2018         ProcessState::self()->startThreadPool();
2019         IPCThreadState::self()->joinThreadPool();
2020     }
2021     //printf("%s: joinThreadPool returned\n", __func__);
2022     return 1; /* joinThreadPool should not return */
2023 }
2024 
main(int argc,char ** argv)2025 int main(int argc, char **argv) {
2026     ExitIfWrongAbi();
2027 
2028     if (argc == 4 && !strcmp(argv[1], "--servername")) {
2029         binderservername = argv[2];
2030     } else {
2031         binderservername = argv[0];
2032     }
2033 
2034     if (argc == 6 && !strcmp(argv[1], binderserverarg)) {
2035         binderserversuffix = argv[5];
2036         return run_server(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]) == 1);
2037     }
2038     binderserversuffix = new char[16];
2039     snprintf(binderserversuffix, 16, "%d", getpid());
2040     binderLibTestServiceName += String16(binderserversuffix);
2041 
2042     ::testing::InitGoogleTest(&argc, argv);
2043     binder_env = AddGlobalTestEnvironment(new BinderLibTestEnv());
2044     ProcessState::self()->startThreadPool();
2045     return RUN_ALL_TESTS();
2046 }
2047