• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RpcSession"
18 
19 #include <binder/RpcSession.h>
20 
21 #include <dlfcn.h>
22 #include <inttypes.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <unistd.h>
26 
27 #include <string_view>
28 
29 #include <android-base/hex.h>
30 #include <android-base/macros.h>
31 #include <android-base/scopeguard.h>
32 #include <binder/BpBinder.h>
33 #include <binder/Parcel.h>
34 #include <binder/RpcServer.h>
35 #include <binder/RpcTransportRaw.h>
36 #include <binder/Stability.h>
37 #include <utils/String8.h>
38 
39 #include "FdTrigger.h"
40 #include "RpcSocketAddress.h"
41 #include "RpcState.h"
42 #include "RpcWireFormat.h"
43 #include "Utils.h"
44 
45 #ifdef __GLIBC__
46 extern "C" pid_t gettid();
47 #endif
48 
49 #ifndef __ANDROID_RECOVERY__
50 #include <android_runtime/vm.h>
51 #include <jni.h>
52 #endif
53 
54 namespace android {
55 
56 using base::unique_fd;
57 
RpcSession(std::unique_ptr<RpcTransportCtx> ctx)58 RpcSession::RpcSession(std::unique_ptr<RpcTransportCtx> ctx) : mCtx(std::move(ctx)) {
59     LOG_RPC_DETAIL("RpcSession created %p", this);
60 
61     mRpcBinderState = std::make_unique<RpcState>();
62 }
~RpcSession()63 RpcSession::~RpcSession() {
64     LOG_RPC_DETAIL("RpcSession destroyed %p", this);
65 
66     std::lock_guard<std::mutex> _l(mMutex);
67     LOG_ALWAYS_FATAL_IF(mConnections.mIncoming.size() != 0,
68                         "Should not be able to destroy a session with servers in use.");
69 }
70 
make()71 sp<RpcSession> RpcSession::make() {
72     // Default is without TLS.
73     return make(RpcTransportCtxFactoryRaw::make());
74 }
75 
make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory)76 sp<RpcSession> RpcSession::make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory) {
77     auto ctx = rpcTransportCtxFactory->newClientCtx();
78     if (ctx == nullptr) return nullptr;
79     return sp<RpcSession>::make(std::move(ctx));
80 }
81 
setMaxIncomingThreads(size_t threads)82 void RpcSession::setMaxIncomingThreads(size_t threads) {
83     std::lock_guard<std::mutex> _l(mMutex);
84     LOG_ALWAYS_FATAL_IF(!mConnections.mOutgoing.empty() || !mConnections.mIncoming.empty(),
85                         "Must set max incoming threads before setting up connections, but has %zu "
86                         "client(s) and %zu server(s)",
87                         mConnections.mOutgoing.size(), mConnections.mIncoming.size());
88     mMaxIncomingThreads = threads;
89 }
90 
getMaxIncomingThreads()91 size_t RpcSession::getMaxIncomingThreads() {
92     std::lock_guard<std::mutex> _l(mMutex);
93     return mMaxIncomingThreads;
94 }
95 
setMaxOutgoingThreads(size_t threads)96 void RpcSession::setMaxOutgoingThreads(size_t threads) {
97     std::lock_guard<std::mutex> _l(mMutex);
98     LOG_ALWAYS_FATAL_IF(!mConnections.mOutgoing.empty() || !mConnections.mIncoming.empty(),
99                         "Must set max outgoing threads before setting up connections, but has %zu "
100                         "client(s) and %zu server(s)",
101                         mConnections.mOutgoing.size(), mConnections.mIncoming.size());
102     mMaxOutgoingThreads = threads;
103 }
104 
getMaxOutgoingThreads()105 size_t RpcSession::getMaxOutgoingThreads() {
106     std::lock_guard<std::mutex> _l(mMutex);
107     return mMaxOutgoingThreads;
108 }
109 
setProtocolVersion(uint32_t version)110 bool RpcSession::setProtocolVersion(uint32_t version) {
111     if (version >= RPC_WIRE_PROTOCOL_VERSION_NEXT &&
112         version != RPC_WIRE_PROTOCOL_VERSION_EXPERIMENTAL) {
113         ALOGE("Cannot start RPC session with version %u which is unknown (current protocol version "
114               "is %u).",
115               version, RPC_WIRE_PROTOCOL_VERSION);
116         return false;
117     }
118 
119     std::lock_guard<std::mutex> _l(mMutex);
120     if (mProtocolVersion && version > *mProtocolVersion) {
121         ALOGE("Cannot upgrade explicitly capped protocol version %u to newer version %u",
122               *mProtocolVersion, version);
123         return false;
124     }
125 
126     mProtocolVersion = version;
127     return true;
128 }
129 
getProtocolVersion()130 std::optional<uint32_t> RpcSession::getProtocolVersion() {
131     std::lock_guard<std::mutex> _l(mMutex);
132     return mProtocolVersion;
133 }
134 
setupUnixDomainClient(const char * path)135 status_t RpcSession::setupUnixDomainClient(const char* path) {
136     return setupSocketClient(UnixSocketAddress(path));
137 }
138 
setupVsockClient(unsigned int cid,unsigned int port)139 status_t RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
140     return setupSocketClient(VsockSocketAddress(cid, port));
141 }
142 
setupInetClient(const char * addr,unsigned int port)143 status_t RpcSession::setupInetClient(const char* addr, unsigned int port) {
144     auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
145     if (aiStart == nullptr) return UNKNOWN_ERROR;
146     for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
147         InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
148         if (status_t status = setupSocketClient(socketAddress); status == OK) return OK;
149     }
150     ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
151     return NAME_NOT_FOUND;
152 }
153 
setupPreconnectedClient(unique_fd fd,std::function<unique_fd ()> && request)154 status_t RpcSession::setupPreconnectedClient(unique_fd fd, std::function<unique_fd()>&& request) {
155     // Why passing raw fd? When fd is passed as reference, Clang analyzer sees that the variable
156     // `fd` is a moved-from object. To work-around the issue, unwrap the raw fd from the outer `fd`,
157     // pass the raw fd by value to the lambda, and then finally wrap it in unique_fd inside the
158     // lambda.
159     return setupClient([&, raw = fd.release()](const std::vector<uint8_t>& sessionId,
160                                                bool incoming) -> status_t {
161         unique_fd fd(raw);
162         if (!fd.ok()) {
163             fd = request();
164             if (!fd.ok()) return BAD_VALUE;
165         }
166         if (auto res = setNonBlocking(fd); !res.ok()) {
167             ALOGE("setupPreconnectedClient: %s", res.error().message().c_str());
168             return res.error().code() == 0 ? UNKNOWN_ERROR : -res.error().code();
169         }
170         return initAndAddConnection(std::move(fd), sessionId, incoming);
171     });
172 }
173 
addNullDebuggingClient()174 status_t RpcSession::addNullDebuggingClient() {
175     // Note: only works on raw sockets.
176     if (auto status = initShutdownTrigger(); status != OK) return status;
177 
178     unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
179 
180     if (serverFd == -1) {
181         int savedErrno = errno;
182         ALOGE("Could not connect to /dev/null: %s", strerror(savedErrno));
183         return -savedErrno;
184     }
185 
186     auto server = mCtx->newTransport(std::move(serverFd), mShutdownTrigger.get());
187     if (server == nullptr) {
188         ALOGE("Unable to set up RpcTransport");
189         return UNKNOWN_ERROR;
190     }
191     return addOutgoingConnection(std::move(server), false);
192 }
193 
getRootObject()194 sp<IBinder> RpcSession::getRootObject() {
195     ExclusiveConnection connection;
196     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
197                                                 ConnectionUse::CLIENT, &connection);
198     if (status != OK) return nullptr;
199     return state()->getRootObject(connection.get(), sp<RpcSession>::fromExisting(this));
200 }
201 
getRemoteMaxThreads(size_t * maxThreads)202 status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) {
203     ExclusiveConnection connection;
204     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
205                                                 ConnectionUse::CLIENT, &connection);
206     if (status != OK) return status;
207     return state()->getMaxThreads(connection.get(), sp<RpcSession>::fromExisting(this), maxThreads);
208 }
209 
shutdownAndWait(bool wait)210 bool RpcSession::shutdownAndWait(bool wait) {
211     std::unique_lock<std::mutex> _l(mMutex);
212     LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr, "Shutdown trigger not installed");
213 
214     mShutdownTrigger->trigger();
215 
216     if (wait) {
217         LOG_ALWAYS_FATAL_IF(mShutdownListener == nullptr, "Shutdown listener not installed");
218         mShutdownListener->waitForShutdown(_l, sp<RpcSession>::fromExisting(this));
219 
220         LOG_ALWAYS_FATAL_IF(!mConnections.mThreads.empty(), "Shutdown failed");
221     }
222 
223     _l.unlock();
224     mRpcBinderState->clear();
225 
226     return true;
227 }
228 
transact(const sp<IBinder> & binder,uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)229 status_t RpcSession::transact(const sp<IBinder>& binder, uint32_t code, const Parcel& data,
230                               Parcel* reply, uint32_t flags) {
231     ExclusiveConnection connection;
232     status_t status =
233             ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
234                                       (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
235                                                                      : ConnectionUse::CLIENT,
236                                       &connection);
237     if (status != OK) return status;
238     return state()->transact(connection.get(), binder, code, data,
239                              sp<RpcSession>::fromExisting(this), reply, flags);
240 }
241 
sendDecStrong(const BpBinder * binder)242 status_t RpcSession::sendDecStrong(const BpBinder* binder) {
243     // target is 0 because this is used to free BpBinder objects
244     return sendDecStrongToTarget(binder->getPrivateAccessor().rpcAddress(), 0 /*target*/);
245 }
246 
sendDecStrongToTarget(uint64_t address,size_t target)247 status_t RpcSession::sendDecStrongToTarget(uint64_t address, size_t target) {
248     ExclusiveConnection connection;
249     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
250                                                 ConnectionUse::CLIENT_REFCOUNT, &connection);
251     if (status != OK) return status;
252     return state()->sendDecStrongToTarget(connection.get(), sp<RpcSession>::fromExisting(this),
253                                           address, target);
254 }
255 
readId()256 status_t RpcSession::readId() {
257     {
258         std::lock_guard<std::mutex> _l(mMutex);
259         LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
260     }
261 
262     ExclusiveConnection connection;
263     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
264                                                 ConnectionUse::CLIENT, &connection);
265     if (status != OK) return status;
266 
267     status = state()->getSessionId(connection.get(), sp<RpcSession>::fromExisting(this), &mId);
268     if (status != OK) return status;
269 
270     LOG_RPC_DETAIL("RpcSession %p has id %s", this,
271                    base::HexString(mId.data(), mId.size()).c_str());
272     return OK;
273 }
274 
onSessionAllIncomingThreadsEnded(const sp<RpcSession> & session)275 void RpcSession::WaitForShutdownListener::onSessionAllIncomingThreadsEnded(
276         const sp<RpcSession>& session) {
277     (void)session;
278 }
279 
onSessionIncomingThreadEnded()280 void RpcSession::WaitForShutdownListener::onSessionIncomingThreadEnded() {
281     mCv.notify_all();
282 }
283 
waitForShutdown(std::unique_lock<std::mutex> & lock,const sp<RpcSession> & session)284 void RpcSession::WaitForShutdownListener::waitForShutdown(std::unique_lock<std::mutex>& lock,
285                                                           const sp<RpcSession>& session) {
286     while (session->mConnections.mIncoming.size() > 0) {
287         if (std::cv_status::timeout == mCv.wait_for(lock, std::chrono::seconds(1))) {
288             ALOGE("Waiting for RpcSession to shut down (1s w/o progress): %zu incoming connections "
289                   "still.",
290                   session->mConnections.mIncoming.size());
291         }
292     }
293 }
294 
preJoinThreadOwnership(std::thread thread)295 void RpcSession::preJoinThreadOwnership(std::thread thread) {
296     LOG_ALWAYS_FATAL_IF(thread.get_id() != std::this_thread::get_id(), "Must own this thread");
297 
298     {
299         std::lock_guard<std::mutex> _l(mMutex);
300         mConnections.mThreads[thread.get_id()] = std::move(thread);
301     }
302 }
303 
preJoinSetup(std::unique_ptr<RpcTransport> rpcTransport)304 RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(
305         std::unique_ptr<RpcTransport> rpcTransport) {
306     // must be registered to allow arbitrary client code executing commands to
307     // be able to do nested calls (we can't only read from it)
308     sp<RpcConnection> connection = assignIncomingConnectionToThisThread(std::move(rpcTransport));
309 
310     status_t status;
311 
312     if (connection == nullptr) {
313         status = DEAD_OBJECT;
314     } else {
315         status =
316                 mRpcBinderState->readConnectionInit(connection, sp<RpcSession>::fromExisting(this));
317     }
318 
319     return PreJoinSetupResult{
320             .connection = std::move(connection),
321             .status = status,
322     };
323 }
324 
325 namespace {
326 #ifdef __ANDROID_RECOVERY__
327 class JavaThreadAttacher {};
328 #else
329 // RAII object for attaching / detaching current thread to JVM if Android Runtime exists. If
330 // Android Runtime doesn't exist, no-op.
331 class JavaThreadAttacher {
332 public:
333     JavaThreadAttacher() {
334         // Use dlsym to find androidJavaAttachThread because libandroid_runtime is loaded after
335         // libbinder.
336         auto vm = getJavaVM();
337         if (vm == nullptr) return;
338 
339         char threadName[16];
340         if (0 != pthread_getname_np(pthread_self(), threadName, sizeof(threadName))) {
341             constexpr const char* defaultThreadName = "UnknownRpcSessionThread";
342             memcpy(threadName, defaultThreadName,
343                    std::min<size_t>(sizeof(threadName), strlen(defaultThreadName) + 1));
344         }
345         LOG_RPC_DETAIL("Attaching current thread %s to JVM", threadName);
346         JavaVMAttachArgs args;
347         args.version = JNI_VERSION_1_2;
348         args.name = threadName;
349         args.group = nullptr;
350         JNIEnv* env;
351 
352         LOG_ALWAYS_FATAL_IF(vm->AttachCurrentThread(&env, &args) != JNI_OK,
353                             "Cannot attach thread %s to JVM", threadName);
354         mAttached = true;
355     }
356     ~JavaThreadAttacher() {
357         if (!mAttached) return;
358         auto vm = getJavaVM();
359         LOG_ALWAYS_FATAL_IF(vm == nullptr,
360                             "Unable to detach thread. No JavaVM, but it was present before!");
361 
362         LOG_RPC_DETAIL("Detaching current thread from JVM");
363         if (vm->DetachCurrentThread() != JNI_OK) {
364             mAttached = false;
365         } else {
366             ALOGW("Unable to detach current thread from JVM");
367         }
368     }
369 
370 private:
371     DISALLOW_COPY_AND_ASSIGN(JavaThreadAttacher);
372     bool mAttached = false;
373 
374     static JavaVM* getJavaVM() {
375         static auto fn = reinterpret_cast<decltype(&AndroidRuntimeGetJavaVM)>(
376                 dlsym(RTLD_DEFAULT, "AndroidRuntimeGetJavaVM"));
377         if (fn == nullptr) return nullptr;
378         return fn();
379     }
380 };
381 #endif
382 } // namespace
383 
join(sp<RpcSession> && session,PreJoinSetupResult && setupResult)384 void RpcSession::join(sp<RpcSession>&& session, PreJoinSetupResult&& setupResult) {
385     sp<RpcConnection>& connection = setupResult.connection;
386 
387     if (setupResult.status == OK) {
388         LOG_ALWAYS_FATAL_IF(!connection, "must have connection if setup succeeded");
389         [[maybe_unused]] JavaThreadAttacher javaThreadAttacher;
390         while (true) {
391             status_t status = session->state()->getAndExecuteCommand(connection, session,
392                                                                      RpcState::CommandType::ANY);
393             if (status != OK) {
394                 LOG_RPC_DETAIL("Binder connection thread closing w/ status %s",
395                                statusToString(status).c_str());
396                 break;
397             }
398         }
399     } else {
400         ALOGE("Connection failed to init, closing with status %s",
401               statusToString(setupResult.status).c_str());
402     }
403 
404     sp<RpcSession::EventListener> listener;
405     {
406         std::lock_guard<std::mutex> _l(session->mMutex);
407         auto it = session->mConnections.mThreads.find(std::this_thread::get_id());
408         LOG_ALWAYS_FATAL_IF(it == session->mConnections.mThreads.end());
409         it->second.detach();
410         session->mConnections.mThreads.erase(it);
411 
412         listener = session->mEventListener.promote();
413     }
414 
415     // done after all cleanup, since session shutdown progresses via callbacks here
416     if (connection != nullptr) {
417         LOG_ALWAYS_FATAL_IF(!session->removeIncomingConnection(connection),
418                             "bad state: connection object guaranteed to be in list");
419     }
420 
421     session = nullptr;
422 
423     if (listener != nullptr) {
424         listener->onSessionIncomingThreadEnded();
425     }
426 }
427 
server()428 sp<RpcServer> RpcSession::server() {
429     RpcServer* unsafeServer = mForServer.unsafe_get();
430     sp<RpcServer> server = mForServer.promote();
431 
432     LOG_ALWAYS_FATAL_IF((unsafeServer == nullptr) != (server == nullptr),
433                         "wp<> is to avoid strong cycle only");
434     return server;
435 }
436 
setupClient(const std::function<status_t (const std::vector<uint8_t> & sessionId,bool incoming)> & connectAndInit)437 status_t RpcSession::setupClient(const std::function<status_t(const std::vector<uint8_t>& sessionId,
438                                                               bool incoming)>& connectAndInit) {
439     {
440         std::lock_guard<std::mutex> _l(mMutex);
441         LOG_ALWAYS_FATAL_IF(mConnections.mOutgoing.size() != 0,
442                             "Must only setup session once, but already has %zu clients",
443                             mConnections.mOutgoing.size());
444     }
445 
446     if (auto status = initShutdownTrigger(); status != OK) return status;
447 
448     auto oldProtocolVersion = mProtocolVersion;
449     auto cleanup = base::ScopeGuard([&] {
450         // if any threads are started, shut them down
451         (void)shutdownAndWait(true);
452 
453         mShutdownListener = nullptr;
454         mEventListener.clear();
455 
456         mId.clear();
457 
458         mShutdownTrigger = nullptr;
459         mRpcBinderState = std::make_unique<RpcState>();
460 
461         // protocol version may have been downgraded - if we reuse this object
462         // to connect to another server, force that server to request a
463         // downgrade again
464         mProtocolVersion = oldProtocolVersion;
465 
466         mConnections = {};
467     });
468 
469     if (status_t status = connectAndInit({}, false /*incoming*/); status != OK) return status;
470 
471     {
472         ExclusiveConnection connection;
473         if (status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
474                                                         ConnectionUse::CLIENT, &connection);
475             status != OK)
476             return status;
477 
478         uint32_t version;
479         if (status_t status =
480                     state()->readNewSessionResponse(connection.get(),
481                                                     sp<RpcSession>::fromExisting(this), &version);
482             status != OK)
483             return status;
484         if (!setProtocolVersion(version)) return BAD_VALUE;
485     }
486 
487     // TODO(b/189955605): we should add additional sessions dynamically
488     // instead of all at once.
489     size_t numThreadsAvailable;
490     if (status_t status = getRemoteMaxThreads(&numThreadsAvailable); status != OK) {
491         ALOGE("Could not get max threads after initial session setup: %s",
492               statusToString(status).c_str());
493         return status;
494     }
495 
496     if (status_t status = readId(); status != OK) {
497         ALOGE("Could not get session id after initial session setup: %s",
498               statusToString(status).c_str());
499         return status;
500     }
501 
502     size_t outgoingThreads = std::min(numThreadsAvailable, mMaxOutgoingThreads);
503     ALOGI_IF(outgoingThreads != numThreadsAvailable,
504              "Server hints client to start %zu outgoing threads, but client will only start %zu "
505              "because it is preconfigured to start at most %zu outgoing threads.",
506              numThreadsAvailable, outgoingThreads, mMaxOutgoingThreads);
507 
508     // TODO(b/189955605): we should add additional sessions dynamically
509     // instead of all at once - the other side should be responsible for setting
510     // up additional connections. We need to create at least one (unless 0 are
511     // requested to be set) in order to allow the other side to reliably make
512     // any requests at all.
513 
514     // we've already setup one client
515     LOG_RPC_DETAIL("RpcSession::setupClient() instantiating %zu outgoing (server max: %zu) and %zu "
516                    "incoming threads",
517                    outgoingThreads, numThreadsAvailable, mMaxIncomingThreads);
518     for (size_t i = 0; i + 1 < outgoingThreads; i++) {
519         if (status_t status = connectAndInit(mId, false /*incoming*/); status != OK) return status;
520     }
521 
522     for (size_t i = 0; i < mMaxIncomingThreads; i++) {
523         if (status_t status = connectAndInit(mId, true /*incoming*/); status != OK) return status;
524     }
525 
526     cleanup.Disable();
527 
528     return OK;
529 }
530 
setupSocketClient(const RpcSocketAddress & addr)531 status_t RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
532     return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) {
533         return setupOneSocketConnection(addr, sessionId, incoming);
534     });
535 }
536 
setupOneSocketConnection(const RpcSocketAddress & addr,const std::vector<uint8_t> & sessionId,bool incoming)537 status_t RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr,
538                                               const std::vector<uint8_t>& sessionId,
539                                               bool incoming) {
540     for (size_t tries = 0; tries < 5; tries++) {
541         if (tries > 0) usleep(10000);
542 
543         unique_fd serverFd(TEMP_FAILURE_RETRY(
544                 socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)));
545         if (serverFd == -1) {
546             int savedErrno = errno;
547             ALOGE("Could not create socket at %s: %s", addr.toString().c_str(),
548                   strerror(savedErrno));
549             return -savedErrno;
550         }
551 
552         if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) {
553             int connErrno = errno;
554             if (connErrno == EAGAIN || connErrno == EINPROGRESS) {
555                 // For non-blocking sockets, connect() may return EAGAIN (for unix domain socket) or
556                 // EINPROGRESS (for others). Call poll() and getsockopt() to get the error.
557                 status_t pollStatus = mShutdownTrigger->triggerablePoll(serverFd, POLLOUT);
558                 if (pollStatus != OK) {
559                     ALOGE("Could not POLLOUT after connect() on non-blocking socket: %s",
560                           statusToString(pollStatus).c_str());
561                     return pollStatus;
562                 }
563                 // Set connErrno to the errno that connect() would have set if the fd were blocking.
564                 socklen_t connErrnoLen = sizeof(connErrno);
565                 int ret =
566                         getsockopt(serverFd.get(), SOL_SOCKET, SO_ERROR, &connErrno, &connErrnoLen);
567                 if (ret == -1) {
568                     int savedErrno = errno;
569                     ALOGE("Could not getsockopt() after connect() on non-blocking socket: %s. "
570                           "(Original error from connect() is: %s)",
571                           strerror(savedErrno), strerror(connErrno));
572                     return -savedErrno;
573                 }
574                 // Retrieved the real connErrno as if connect() was called with a blocking socket
575                 // fd. Continue checking connErrno.
576             }
577             if (connErrno == ECONNRESET) {
578                 ALOGW("Connection reset on %s", addr.toString().c_str());
579                 continue;
580             }
581             // connErrno could be zero if getsockopt determines so. Hence zero-check again.
582             if (connErrno != 0) {
583                 ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(),
584                       strerror(connErrno));
585                 return -connErrno;
586             }
587         }
588         LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());
589 
590         return initAndAddConnection(std::move(serverFd), sessionId, incoming);
591     }
592 
593     ALOGE("Ran out of retries to connect to %s", addr.toString().c_str());
594     return UNKNOWN_ERROR;
595 }
596 
initAndAddConnection(unique_fd fd,const std::vector<uint8_t> & sessionId,bool incoming)597 status_t RpcSession::initAndAddConnection(unique_fd fd, const std::vector<uint8_t>& sessionId,
598                                           bool incoming) {
599     LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr);
600     auto server = mCtx->newTransport(std::move(fd), mShutdownTrigger.get());
601     if (server == nullptr) {
602         ALOGE("%s: Unable to set up RpcTransport", __PRETTY_FUNCTION__);
603         return UNKNOWN_ERROR;
604     }
605 
606     LOG_RPC_DETAIL("Socket at client with RpcTransport %p", server.get());
607 
608     if (sessionId.size() > std::numeric_limits<uint16_t>::max()) {
609         ALOGE("Session ID too big %zu", sessionId.size());
610         return BAD_VALUE;
611     }
612 
613     RpcConnectionHeader header{
614             .version = mProtocolVersion.value_or(RPC_WIRE_PROTOCOL_VERSION),
615             .options = 0,
616             .sessionIdSize = static_cast<uint16_t>(sessionId.size()),
617     };
618 
619     if (incoming) {
620         header.options |= RPC_CONNECTION_OPTION_INCOMING;
621     }
622 
623     iovec headerIov{&header, sizeof(header)};
624     auto sendHeaderStatus =
625             server->interruptableWriteFully(mShutdownTrigger.get(), &headerIov, 1, {});
626     if (sendHeaderStatus != OK) {
627         ALOGE("Could not write connection header to socket: %s",
628               statusToString(sendHeaderStatus).c_str());
629         return sendHeaderStatus;
630     }
631 
632     if (sessionId.size() > 0) {
633         iovec sessionIov{const_cast<void*>(static_cast<const void*>(sessionId.data())),
634                          sessionId.size()};
635         auto sendSessionIdStatus =
636                 server->interruptableWriteFully(mShutdownTrigger.get(), &sessionIov, 1, {});
637         if (sendSessionIdStatus != OK) {
638             ALOGE("Could not write session ID ('%s') to socket: %s",
639                   base::HexString(sessionId.data(), sessionId.size()).c_str(),
640                   statusToString(sendSessionIdStatus).c_str());
641             return sendSessionIdStatus;
642         }
643     }
644 
645     LOG_RPC_DETAIL("Socket at client: header sent");
646 
647     if (incoming) {
648         return addIncomingConnection(std::move(server));
649     } else {
650         return addOutgoingConnection(std::move(server), true /*init*/);
651     }
652 }
653 
addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport)654 status_t RpcSession::addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport) {
655     std::mutex mutex;
656     std::condition_variable joinCv;
657     std::unique_lock<std::mutex> lock(mutex);
658     std::thread thread;
659     sp<RpcSession> thiz = sp<RpcSession>::fromExisting(this);
660     bool ownershipTransferred = false;
661     thread = std::thread([&]() {
662         std::unique_lock<std::mutex> threadLock(mutex);
663         std::unique_ptr<RpcTransport> movedRpcTransport = std::move(rpcTransport);
664         // NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
665         sp<RpcSession> session = thiz;
666         session->preJoinThreadOwnership(std::move(thread));
667 
668         // only continue once we have a response or the connection fails
669         auto setupResult = session->preJoinSetup(std::move(movedRpcTransport));
670 
671         ownershipTransferred = true;
672         threadLock.unlock();
673         joinCv.notify_one();
674         // do not use & vars below
675 
676         RpcSession::join(std::move(session), std::move(setupResult));
677     });
678     joinCv.wait(lock, [&] { return ownershipTransferred; });
679     LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
680     return OK;
681 }
682 
initShutdownTrigger()683 status_t RpcSession::initShutdownTrigger() {
684     // first client connection added, but setForServer not called, so
685     // initializaing for a client.
686     if (mShutdownTrigger == nullptr) {
687         mShutdownTrigger = FdTrigger::make();
688         mEventListener = mShutdownListener = sp<WaitForShutdownListener>::make();
689         if (mShutdownTrigger == nullptr) return INVALID_OPERATION;
690     }
691     return OK;
692 }
693 
addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport,bool init)694 status_t RpcSession::addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport, bool init) {
695     sp<RpcConnection> connection = sp<RpcConnection>::make();
696     {
697         std::lock_guard<std::mutex> _l(mMutex);
698         connection->rpcTransport = std::move(rpcTransport);
699         connection->exclusiveTid = gettid();
700         mConnections.mOutgoing.push_back(connection);
701     }
702 
703     status_t status = OK;
704     if (init) {
705         status =
706                 mRpcBinderState->sendConnectionInit(connection, sp<RpcSession>::fromExisting(this));
707     }
708 
709     {
710         std::lock_guard<std::mutex> _l(mMutex);
711         connection->exclusiveTid = std::nullopt;
712     }
713 
714     return status;
715 }
716 
setForServer(const wp<RpcServer> & server,const wp<EventListener> & eventListener,const std::vector<uint8_t> & sessionId,const sp<IBinder> & sessionSpecificRoot)717 bool RpcSession::setForServer(const wp<RpcServer>& server, const wp<EventListener>& eventListener,
718                               const std::vector<uint8_t>& sessionId,
719                               const sp<IBinder>& sessionSpecificRoot) {
720     LOG_ALWAYS_FATAL_IF(mForServer != nullptr);
721     LOG_ALWAYS_FATAL_IF(server == nullptr);
722     LOG_ALWAYS_FATAL_IF(mEventListener != nullptr);
723     LOG_ALWAYS_FATAL_IF(eventListener == nullptr);
724     LOG_ALWAYS_FATAL_IF(mShutdownTrigger != nullptr);
725 
726     mShutdownTrigger = FdTrigger::make();
727     if (mShutdownTrigger == nullptr) return false;
728 
729     mId = sessionId;
730     mForServer = server;
731     mEventListener = eventListener;
732     mSessionSpecificRootObject = sessionSpecificRoot;
733     return true;
734 }
735 
assignIncomingConnectionToThisThread(std::unique_ptr<RpcTransport> rpcTransport)736 sp<RpcSession::RpcConnection> RpcSession::assignIncomingConnectionToThisThread(
737         std::unique_ptr<RpcTransport> rpcTransport) {
738     std::lock_guard<std::mutex> _l(mMutex);
739 
740     if (mConnections.mIncoming.size() >= mMaxIncomingThreads) {
741         ALOGE("Cannot add thread to session with %zu threads (max is set to %zu)",
742               mConnections.mIncoming.size(), mMaxIncomingThreads);
743         return nullptr;
744     }
745 
746     // Don't accept any more connections, some have shutdown. Usually this
747     // happens when new connections are still being established as part of a
748     // very short-lived session which shuts down after it already started
749     // accepting new connections.
750     if (mConnections.mIncoming.size() < mConnections.mMaxIncoming) {
751         return nullptr;
752     }
753 
754     sp<RpcConnection> session = sp<RpcConnection>::make();
755     session->rpcTransport = std::move(rpcTransport);
756     session->exclusiveTid = gettid();
757 
758     mConnections.mIncoming.push_back(session);
759     mConnections.mMaxIncoming = mConnections.mIncoming.size();
760 
761     return session;
762 }
763 
removeIncomingConnection(const sp<RpcConnection> & connection)764 bool RpcSession::removeIncomingConnection(const sp<RpcConnection>& connection) {
765     std::unique_lock<std::mutex> _l(mMutex);
766     if (auto it =
767                 std::find(mConnections.mIncoming.begin(), mConnections.mIncoming.end(), connection);
768         it != mConnections.mIncoming.end()) {
769         mConnections.mIncoming.erase(it);
770         if (mConnections.mIncoming.size() == 0) {
771             sp<EventListener> listener = mEventListener.promote();
772             if (listener) {
773                 _l.unlock();
774                 listener->onSessionAllIncomingThreadsEnded(sp<RpcSession>::fromExisting(this));
775             }
776         }
777         return true;
778     }
779     return false;
780 }
781 
getCertificate(RpcCertificateFormat format)782 std::vector<uint8_t> RpcSession::getCertificate(RpcCertificateFormat format) {
783     return mCtx->getCertificate(format);
784 }
785 
find(const sp<RpcSession> & session,ConnectionUse use,ExclusiveConnection * connection)786 status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, ConnectionUse use,
787                                                ExclusiveConnection* connection) {
788     connection->mSession = session;
789     connection->mConnection = nullptr;
790     connection->mReentrant = false;
791 
792     pid_t tid = gettid();
793     std::unique_lock<std::mutex> _l(session->mMutex);
794 
795     session->mConnections.mWaitingThreads++;
796     while (true) {
797         sp<RpcConnection> exclusive;
798         sp<RpcConnection> available;
799 
800         // CHECK FOR DEDICATED CLIENT SOCKET
801         //
802         // A server/looper should always use a dedicated connection if available
803         findConnection(tid, &exclusive, &available, session->mConnections.mOutgoing,
804                        session->mConnections.mOutgoingOffset);
805 
806         // WARNING: this assumes a server cannot request its client to send
807         // a transaction, as mIncoming is excluded below.
808         //
809         // Imagine we have more than one thread in play, and a single thread
810         // sends a synchronous, then an asynchronous command. Imagine the
811         // asynchronous command is sent on the first client connection. Then, if
812         // we naively send a synchronous command to that same connection, the
813         // thread on the far side might be busy processing the asynchronous
814         // command. So, we move to considering the second available thread
815         // for subsequent calls.
816         if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
817             session->mConnections.mOutgoingOffset = (session->mConnections.mOutgoingOffset + 1) %
818                     session->mConnections.mOutgoing.size();
819         }
820 
821         // USE SERVING SOCKET (e.g. nested transaction)
822         if (use != ConnectionUse::CLIENT_ASYNC) {
823             sp<RpcConnection> exclusiveIncoming;
824             // server connections are always assigned to a thread
825             findConnection(tid, &exclusiveIncoming, nullptr /*available*/,
826                            session->mConnections.mIncoming, 0 /* index hint */);
827 
828             // asynchronous calls cannot be nested, we currently allow ref count
829             // calls to be nested (so that you can use this without having extra
830             // threads). Note 'drainCommands' is used so that these ref counts can't
831             // build up.
832             if (exclusiveIncoming != nullptr) {
833                 if (exclusiveIncoming->allowNested) {
834                     // guaranteed to be processed as nested command
835                     exclusive = exclusiveIncoming;
836                 } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) {
837                     // prefer available socket, but if we don't have one, don't
838                     // wait for one
839                     exclusive = exclusiveIncoming;
840                 }
841             }
842         }
843 
844         // if our thread is already using a connection, prioritize using that
845         if (exclusive != nullptr) {
846             connection->mConnection = exclusive;
847             connection->mReentrant = true;
848             break;
849         } else if (available != nullptr) {
850             connection->mConnection = available;
851             connection->mConnection->exclusiveTid = tid;
852             break;
853         }
854 
855         if (session->mConnections.mOutgoing.size() == 0) {
856             ALOGE("Session has no outgoing connections. This is required for an RPC server to make "
857                   "any non-nested (e.g. oneway or on another thread) calls. Use code request "
858                   "reason: %d. Incoming connections: %zu. %s.",
859                   static_cast<int>(use), session->mConnections.mIncoming.size(),
860                   (session->server()
861                            ? "This is a server session, so see RpcSession::setMaxIncomingThreads "
862                              "for the corresponding client"
863                            : "This is a client session, so see RpcSession::setMaxOutgoingThreads "
864                              "for this client or RpcServer::setMaxThreads for the corresponding "
865                              "server"));
866             return WOULD_BLOCK;
867         }
868 
869         LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
870                        session->mConnections.mOutgoing.size(),
871                        session->mConnections.mIncoming.size());
872         session->mAvailableConnectionCv.wait(_l);
873     }
874     session->mConnections.mWaitingThreads--;
875 
876     return OK;
877 }
878 
findConnection(pid_t tid,sp<RpcConnection> * exclusive,sp<RpcConnection> * available,std::vector<sp<RpcConnection>> & sockets,size_t socketsIndexHint)879 void RpcSession::ExclusiveConnection::findConnection(pid_t tid, sp<RpcConnection>* exclusive,
880                                                      sp<RpcConnection>* available,
881                                                      std::vector<sp<RpcConnection>>& sockets,
882                                                      size_t socketsIndexHint) {
883     LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
884                         "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
885 
886     if (*exclusive != nullptr) return; // consistent with break below
887 
888     for (size_t i = 0; i < sockets.size(); i++) {
889         sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
890 
891         // take first available connection (intuition = caching)
892         if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
893             *available = socket;
894             continue;
895         }
896 
897         // though, prefer to take connection which is already inuse by this thread
898         // (nested transactions)
899         if (exclusive && socket->exclusiveTid == tid) {
900             *exclusive = socket;
901             break; // consistent with return above
902         }
903     }
904 }
905 
~ExclusiveConnection()906 RpcSession::ExclusiveConnection::~ExclusiveConnection() {
907     // reentrant use of a connection means something less deep in the call stack
908     // is using this fd, and it retains the right to it. So, we don't give up
909     // exclusive ownership, and no thread is freed.
910     if (!mReentrant && mConnection != nullptr) {
911         std::unique_lock<std::mutex> _l(mSession->mMutex);
912         mConnection->exclusiveTid = std::nullopt;
913         if (mSession->mConnections.mWaitingThreads > 0) {
914             _l.unlock();
915             mSession->mAvailableConnectionCv.notify_one();
916         }
917     }
918 }
919 
920 } // namespace android
921