• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RpcSession"
18 
19 #include <binder/RpcSession.h>
20 
21 #include <dlfcn.h>
22 #include <inttypes.h>
23 #include <netinet/tcp.h>
24 #include <poll.h>
25 #include <unistd.h>
26 
27 #include <string_view>
28 
29 #include <binder/BpBinder.h>
30 #include <binder/Functional.h>
31 #include <binder/Parcel.h>
32 #include <binder/RpcServer.h>
33 #include <binder/RpcTransportRaw.h>
34 #include <binder/Stability.h>
35 #include <utils/String8.h>
36 
37 #include "BuildFlags.h"
38 #include "FdTrigger.h"
39 #include "OS.h"
40 #include "RpcSocketAddress.h"
41 #include "RpcState.h"
42 #include "RpcTransportUtils.h"
43 #include "RpcWireFormat.h"
44 #include "Utils.h"
45 
46 #if defined(__ANDROID__) && !defined(__ANDROID_RECOVERY__)
47 #include <jni.h>
48 extern "C" JavaVM* AndroidRuntimeGetJavaVM();
49 #endif
50 
51 namespace android {
52 
53 using namespace android::binder::impl;
54 using android::binder::borrowed_fd;
55 using android::binder::unique_fd;
56 
RpcSession(std::unique_ptr<RpcTransportCtx> ctx)57 RpcSession::RpcSession(std::unique_ptr<RpcTransportCtx> ctx) : mCtx(std::move(ctx)) {
58     LOG_RPC_DETAIL("RpcSession created %p", this);
59 
60     mRpcBinderState = std::make_unique<RpcState>();
61 }
~RpcSession()62 RpcSession::~RpcSession() {
63     LOG_RPC_DETAIL("RpcSession destroyed %p", this);
64 
65     RpcMutexLockGuard _l(mMutex);
66     LOG_ALWAYS_FATAL_IF(mConnections.mIncoming.size() != 0,
67                         "Should not be able to destroy a session with servers in use.");
68 }
69 
make()70 sp<RpcSession> RpcSession::make() {
71     // Default is without TLS.
72     return make(binder::os::makeDefaultRpcTransportCtxFactory());
73 }
74 
make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory)75 sp<RpcSession> RpcSession::make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory) {
76     auto ctx = rpcTransportCtxFactory->newClientCtx();
77     if (ctx == nullptr) return nullptr;
78     return sp<RpcSession>::make(std::move(ctx));
79 }
80 
setMaxIncomingThreads(size_t threads)81 void RpcSession::setMaxIncomingThreads(size_t threads) {
82     RpcMutexLockGuard _l(mMutex);
83     LOG_ALWAYS_FATAL_IF(mStartedSetup,
84                         "Must set max incoming threads before setting up connections");
85     mMaxIncomingThreads = threads;
86 }
87 
getMaxIncomingThreads()88 size_t RpcSession::getMaxIncomingThreads() {
89     RpcMutexLockGuard _l(mMutex);
90     return mMaxIncomingThreads;
91 }
92 
setMaxOutgoingConnections(size_t connections)93 void RpcSession::setMaxOutgoingConnections(size_t connections) {
94     RpcMutexLockGuard _l(mMutex);
95     LOG_ALWAYS_FATAL_IF(mStartedSetup,
96                         "Must set max outgoing threads before setting up connections");
97     mMaxOutgoingConnections = connections;
98 }
99 
getMaxOutgoingThreads()100 size_t RpcSession::getMaxOutgoingThreads() {
101     RpcMutexLockGuard _l(mMutex);
102     return mMaxOutgoingConnections;
103 }
104 
setProtocolVersionInternal(uint32_t version,bool checkStarted)105 bool RpcSession::setProtocolVersionInternal(uint32_t version, bool checkStarted) {
106     if (!RpcState::validateProtocolVersion(version)) {
107         return false;
108     }
109 
110     RpcMutexLockGuard _l(mMutex);
111     LOG_ALWAYS_FATAL_IF(checkStarted && mStartedSetup,
112                         "Must set protocol version before setting up connections");
113     if (mProtocolVersion && version > *mProtocolVersion) {
114         ALOGE("Cannot upgrade explicitly capped protocol version %u to newer version %u",
115               *mProtocolVersion, version);
116         return false;
117     }
118 
119     mProtocolVersion = version;
120     return true;
121 }
122 
setProtocolVersion(uint32_t version)123 bool RpcSession::setProtocolVersion(uint32_t version) {
124     return setProtocolVersionInternal(version, true);
125 }
126 
getProtocolVersion()127 std::optional<uint32_t> RpcSession::getProtocolVersion() {
128     RpcMutexLockGuard _l(mMutex);
129     return mProtocolVersion;
130 }
131 
setFileDescriptorTransportMode(FileDescriptorTransportMode mode)132 void RpcSession::setFileDescriptorTransportMode(FileDescriptorTransportMode mode) {
133     RpcMutexLockGuard _l(mMutex);
134     LOG_ALWAYS_FATAL_IF(mStartedSetup,
135                         "Must set file descriptor transport mode before setting up connections");
136     mFileDescriptorTransportMode = mode;
137 }
138 
getFileDescriptorTransportMode()139 RpcSession::FileDescriptorTransportMode RpcSession::getFileDescriptorTransportMode() {
140     return mFileDescriptorTransportMode;
141 }
142 
setupUnixDomainClient(const char * path)143 status_t RpcSession::setupUnixDomainClient(const char* path) {
144     return setupSocketClient(UnixSocketAddress(path));
145 }
146 
setupUnixDomainSocketBootstrapClient(unique_fd bootstrapFd)147 status_t RpcSession::setupUnixDomainSocketBootstrapClient(unique_fd bootstrapFd) {
148     mBootstrapTransport =
149             mCtx->newTransport(RpcTransportFd(std::move(bootstrapFd)), mShutdownTrigger.get());
150     return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) {
151         int socks[2];
152         if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0, socks) < 0) {
153             int savedErrno = errno;
154             ALOGE("Failed socketpair: %s", strerror(savedErrno));
155             return -savedErrno;
156         }
157         unique_fd clientFd(socks[0]), serverFd(socks[1]);
158 
159         int zero = 0;
160         iovec iov{&zero, sizeof(zero)};
161         std::vector<std::variant<unique_fd, borrowed_fd>> fds;
162         fds.push_back(std::move(serverFd));
163 
164         status_t status = mBootstrapTransport->interruptableWriteFully(mShutdownTrigger.get(), &iov,
165                                                                        1, std::nullopt, &fds);
166         if (status != OK) {
167             ALOGE("Failed to send fd over bootstrap transport: %s", statusToString(status).c_str());
168             return status;
169         }
170 
171         return initAndAddConnection(RpcTransportFd(std::move(clientFd)), sessionId, incoming);
172     });
173 }
174 
setupVsockClient(unsigned int cid,unsigned int port)175 status_t RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
176     return setupSocketClient(VsockSocketAddress(cid, port));
177 }
178 
setupInetClient(const char * addr,unsigned int port)179 status_t RpcSession::setupInetClient(const char* addr, unsigned int port) {
180     auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
181     if (aiStart == nullptr) return UNKNOWN_ERROR;
182     for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
183         InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
184         if (status_t status = setupSocketClient(socketAddress); status == OK) return OK;
185     }
186     ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
187     return NAME_NOT_FOUND;
188 }
189 
setupPreconnectedClient(unique_fd fd,std::function<unique_fd ()> && request)190 status_t RpcSession::setupPreconnectedClient(unique_fd fd, std::function<unique_fd()>&& request) {
191     return setupClient([&, fd = std::move(fd),
192                         request = std::move(request)](const std::vector<uint8_t>& sessionId,
193                                                       bool incoming) mutable -> status_t {
194         if (!fd.ok()) {
195             fd = request();
196             if (!fd.ok()) return BAD_VALUE;
197         }
198         if (status_t res = binder::os::setNonBlocking(fd); res != OK) return res;
199 
200         RpcTransportFd transportFd(std::move(fd));
201         status_t status = initAndAddConnection(std::move(transportFd), sessionId, incoming);
202         fd = unique_fd(); // Explicitly reset after move to avoid analyzer warning.
203         return status;
204     });
205 }
206 
addNullDebuggingClient()207 status_t RpcSession::addNullDebuggingClient() {
208     // Note: only works on raw sockets.
209     if (auto status = initShutdownTrigger(); status != OK) return status;
210 
211     unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
212 
213     if (!serverFd.ok()) {
214         int savedErrno = errno;
215         ALOGE("Could not connect to /dev/null: %s", strerror(savedErrno));
216         return -savedErrno;
217     }
218 
219     RpcTransportFd transportFd(std::move(serverFd));
220     auto server = mCtx->newTransport(std::move(transportFd), mShutdownTrigger.get());
221     if (server == nullptr) {
222         ALOGE("Unable to set up RpcTransport");
223         return UNKNOWN_ERROR;
224     }
225     return addOutgoingConnection(std::move(server), false);
226 }
227 
getRootObject()228 sp<IBinder> RpcSession::getRootObject() {
229     ExclusiveConnection connection;
230     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
231                                                 ConnectionUse::CLIENT, &connection);
232     if (status != OK) return nullptr;
233     return state()->getRootObject(connection.get(), sp<RpcSession>::fromExisting(this));
234 }
235 
getRemoteMaxThreads(size_t * maxThreads)236 status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) {
237     ExclusiveConnection connection;
238     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
239                                                 ConnectionUse::CLIENT, &connection);
240     if (status != OK) return status;
241     return state()->getMaxThreads(connection.get(), sp<RpcSession>::fromExisting(this), maxThreads);
242 }
243 
shutdownAndWait(bool wait)244 bool RpcSession::shutdownAndWait(bool wait) {
245     RpcMutexUniqueLock _l(mMutex);
246     LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr, "Shutdown trigger not installed");
247 
248     mShutdownTrigger->trigger();
249 
250     if (wait) {
251         LOG_ALWAYS_FATAL_IF(mShutdownListener == nullptr, "Shutdown listener not installed");
252         mShutdownListener->waitForShutdown(_l, sp<RpcSession>::fromExisting(this));
253 
254         LOG_ALWAYS_FATAL_IF(!mConnections.mThreads.empty(), "Shutdown failed");
255     }
256 
257     _l.unlock();
258 
259     if (status_t res = state()->sendObituaries(sp<RpcSession>::fromExisting(this)); res != OK) {
260         ALOGE("Failed to send obituaries as the RpcSession is shutting down: %s",
261               statusToString(res).c_str());
262     }
263 
264     mRpcBinderState->clear();
265 
266     return true;
267 }
268 
transact(const sp<IBinder> & binder,uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)269 status_t RpcSession::transact(const sp<IBinder>& binder, uint32_t code, const Parcel& data,
270                               Parcel* reply, uint32_t flags) {
271     ExclusiveConnection connection;
272     status_t status =
273             ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
274                                       (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
275                                                                      : ConnectionUse::CLIENT,
276                                       &connection);
277     if (status != OK) return status;
278     return state()->transact(connection.get(), binder, code, data,
279                              sp<RpcSession>::fromExisting(this), reply, flags);
280 }
281 
sendDecStrong(const BpBinder * binder)282 status_t RpcSession::sendDecStrong(const BpBinder* binder) {
283     // target is 0 because this is used to free BpBinder objects
284     return sendDecStrongToTarget(binder->getPrivateAccessor().rpcAddress(), 0 /*target*/);
285 }
286 
sendDecStrongToTarget(uint64_t address,size_t target)287 status_t RpcSession::sendDecStrongToTarget(uint64_t address, size_t target) {
288     ExclusiveConnection connection;
289     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
290                                                 ConnectionUse::CLIENT_REFCOUNT, &connection);
291     if (status != OK) return status;
292     return state()->sendDecStrongToTarget(connection.get(), sp<RpcSession>::fromExisting(this),
293                                           address, target);
294 }
295 
readId()296 status_t RpcSession::readId() {
297     {
298         RpcMutexLockGuard _l(mMutex);
299         LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
300     }
301 
302     ExclusiveConnection connection;
303     status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
304                                                 ConnectionUse::CLIENT, &connection);
305     if (status != OK) return status;
306 
307     status = state()->getSessionId(connection.get(), sp<RpcSession>::fromExisting(this), &mId);
308     if (status != OK) return status;
309 
310     LOG_RPC_DETAIL("RpcSession %p has id %s", this, HexString(mId.data(), mId.size()).c_str());
311     return OK;
312 }
313 
onSessionAllIncomingThreadsEnded(const sp<RpcSession> & session)314 void RpcSession::WaitForShutdownListener::onSessionAllIncomingThreadsEnded(
315         const sp<RpcSession>& session) {
316     (void)session;
317 }
318 
onSessionIncomingThreadEnded()319 void RpcSession::WaitForShutdownListener::onSessionIncomingThreadEnded() {
320     mShutdownCount += 1;
321     mCv.notify_all();
322 }
323 
waitForShutdown(RpcMutexUniqueLock & lock,const sp<RpcSession> & session)324 void RpcSession::WaitForShutdownListener::waitForShutdown(RpcMutexUniqueLock& lock,
325                                                           const sp<RpcSession>& session) {
326     while (mShutdownCount < session->mConnections.mMaxIncoming) {
327         if (std::cv_status::timeout == mCv.wait_for(lock, std::chrono::seconds(1))) {
328             ALOGE("Waiting for RpcSession to shut down (1s w/o progress): %zu incoming connections "
329                   "still %zu/%zu fully shutdown.",
330                   session->mConnections.mIncoming.size(), mShutdownCount.load(),
331                   session->mConnections.mMaxIncoming);
332         }
333     }
334 }
335 
preJoinThreadOwnership(RpcMaybeThread thread)336 void RpcSession::preJoinThreadOwnership(RpcMaybeThread thread) {
337     LOG_ALWAYS_FATAL_IF(thread.get_id() != rpc_this_thread::get_id(), "Must own this thread");
338 
339     {
340         RpcMutexLockGuard _l(mMutex);
341         mConnections.mThreads[thread.get_id()] = std::move(thread);
342     }
343 }
344 
preJoinSetup(std::unique_ptr<RpcTransport> rpcTransport)345 RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(
346         std::unique_ptr<RpcTransport> rpcTransport) {
347     // must be registered to allow arbitrary client code executing commands to
348     // be able to do nested calls (we can't only read from it)
349     sp<RpcConnection> connection = assignIncomingConnectionToThisThread(std::move(rpcTransport));
350 
351     status_t status;
352 
353     if (connection == nullptr) {
354         status = DEAD_OBJECT;
355     } else {
356         status =
357                 mRpcBinderState->readConnectionInit(connection, sp<RpcSession>::fromExisting(this));
358     }
359 
360     return PreJoinSetupResult{
361             .connection = std::move(connection),
362             .status = status,
363     };
364 }
365 
366 namespace {
367 #if !defined(__ANDROID__) || defined(__ANDROID_RECOVERY__)
368 class JavaThreadAttacher {};
369 #else
370 // RAII object for attaching / detaching current thread to JVM if Android Runtime exists. If
371 // Android Runtime doesn't exist, no-op.
372 class JavaThreadAttacher {
373 public:
374     JavaThreadAttacher() {
375         // Use dlsym to find androidJavaAttachThread because libandroid_runtime is loaded after
376         // libbinder.
377         auto vm = getJavaVM();
378         if (vm == nullptr) return;
379 
380         char threadName[16];
381         if (0 != pthread_getname_np(pthread_self(), threadName, sizeof(threadName))) {
382             constexpr const char* defaultThreadName = "UnknownRpcSessionThread";
383             memcpy(threadName, defaultThreadName,
384                    std::min<size_t>(sizeof(threadName), strlen(defaultThreadName) + 1));
385         }
386         LOG_RPC_DETAIL("Attaching current thread %s to JVM", threadName);
387         JavaVMAttachArgs args;
388         args.version = JNI_VERSION_1_2;
389         args.name = threadName;
390         args.group = nullptr;
391         JNIEnv* env;
392 
393         LOG_ALWAYS_FATAL_IF(vm->AttachCurrentThread(&env, &args) != JNI_OK,
394                             "Cannot attach thread %s to JVM", threadName);
395         mAttached = true;
396     }
397     ~JavaThreadAttacher() {
398         if (!mAttached) return;
399         auto vm = getJavaVM();
400         LOG_ALWAYS_FATAL_IF(vm == nullptr,
401                             "Unable to detach thread. No JavaVM, but it was present before!");
402 
403         LOG_RPC_DETAIL("Detaching current thread from JVM");
404         int ret = vm->DetachCurrentThread();
405         if (ret == JNI_OK) {
406             mAttached = false;
407         } else {
408             ALOGW("Unable to detach current thread from JVM (%d)", ret);
409         }
410     }
411 
412 private:
413     JavaThreadAttacher(const JavaThreadAttacher&) = delete;
414     void operator=(const JavaThreadAttacher&) = delete;
415 
416     bool mAttached = false;
417 
418     static JavaVM* getJavaVM() {
419         static auto fn = reinterpret_cast<decltype(&AndroidRuntimeGetJavaVM)>(
420                 dlsym(RTLD_DEFAULT, "AndroidRuntimeGetJavaVM"));
421         if (fn == nullptr) return nullptr;
422         return fn();
423     }
424 };
425 #endif
426 } // namespace
427 
join(sp<RpcSession> && session,PreJoinSetupResult && setupResult)428 void RpcSession::join(sp<RpcSession>&& session, PreJoinSetupResult&& setupResult) {
429     sp<RpcConnection>& connection = setupResult.connection;
430 
431     if (setupResult.status == OK) {
432         LOG_ALWAYS_FATAL_IF(!connection, "must have connection if setup succeeded");
433         [[maybe_unused]] JavaThreadAttacher javaThreadAttacher;
434         while (true) {
435             status_t status = session->state()->getAndExecuteCommand(connection, session,
436                                                                      RpcState::CommandType::ANY);
437             if (status != OK) {
438                 LOG_RPC_DETAIL("Binder connection thread closing w/ status %s",
439                                statusToString(status).c_str());
440                 break;
441             }
442         }
443     } else {
444         ALOGE("Connection failed to init, closing with status %s",
445               statusToString(setupResult.status).c_str());
446     }
447 
448     sp<RpcSession::EventListener> listener;
449     {
450         RpcMutexLockGuard _l(session->mMutex);
451         auto it = session->mConnections.mThreads.find(rpc_this_thread::get_id());
452         LOG_ALWAYS_FATAL_IF(it == session->mConnections.mThreads.end());
453         it->second.detach();
454         session->mConnections.mThreads.erase(it);
455 
456         listener = session->mEventListener.promote();
457     }
458 
459     // done after all cleanup, since session shutdown progresses via callbacks here
460     if (connection != nullptr) {
461         LOG_ALWAYS_FATAL_IF(!session->removeIncomingConnection(connection),
462                             "bad state: connection object guaranteed to be in list");
463     }
464 
465     session = nullptr;
466 
467     if (listener != nullptr) {
468         listener->onSessionIncomingThreadEnded();
469     }
470 }
471 
server()472 sp<RpcServer> RpcSession::server() {
473     RpcServer* unsafeServer = mForServer.unsafe_get();
474     sp<RpcServer> server = mForServer.promote();
475 
476     LOG_ALWAYS_FATAL_IF((unsafeServer == nullptr) != (server == nullptr),
477                         "wp<> is to avoid strong cycle only");
478     return server;
479 }
480 
481 template <typename Fn,
482           typename /* = std::enable_if_t<std::is_invocable_r_v<
483                       status_t, Fn, const std::vector<uint8_t>&, bool>> */>
setupClient(Fn && connectAndInit)484 status_t RpcSession::setupClient(Fn&& connectAndInit) {
485     {
486         RpcMutexLockGuard _l(mMutex);
487         LOG_ALWAYS_FATAL_IF(mStartedSetup, "Must only setup session once");
488         mStartedSetup = true;
489 
490         if constexpr (!kEnableRpcThreads) {
491             LOG_ALWAYS_FATAL_IF(mMaxIncomingThreads > 0,
492                                 "Incoming threads are not supported on single-threaded libbinder");
493             // mMaxIncomingThreads should not change from here to its use below,
494             // since we set mStartedSetup==true and setMaxIncomingThreads checks
495             // for that
496         }
497     }
498 
499     if (auto status = initShutdownTrigger(); status != OK) return status;
500 
501     auto oldProtocolVersion = mProtocolVersion;
502     auto cleanup = make_scope_guard([&] {
503         // if any threads are started, shut them down
504         (void)shutdownAndWait(true);
505 
506         mShutdownListener = nullptr;
507         mEventListener.clear();
508 
509         mId.clear();
510 
511         mShutdownTrigger = nullptr;
512         mRpcBinderState = std::make_unique<RpcState>();
513 
514         // protocol version may have been downgraded - if we reuse this object
515         // to connect to another server, force that server to request a
516         // downgrade again
517         mProtocolVersion = oldProtocolVersion;
518 
519         mConnections = {};
520 
521         // clear mStartedSetup so that we can reuse this RpcSession
522         mStartedSetup = false;
523     });
524 
525     if (status_t status = connectAndInit({}, false /*incoming*/); status != OK) return status;
526 
527     {
528         ExclusiveConnection connection;
529         if (status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
530                                                         ConnectionUse::CLIENT, &connection);
531             status != OK)
532             return status;
533 
534         uint32_t version;
535         if (status_t status =
536                     state()->readNewSessionResponse(connection.get(),
537                                                     sp<RpcSession>::fromExisting(this), &version);
538             status != OK)
539             return status;
540         if (!setProtocolVersionInternal(version, false)) return BAD_VALUE;
541     }
542 
543     // TODO(b/189955605): we should add additional sessions dynamically
544     // instead of all at once.
545     size_t numThreadsAvailable;
546     if (status_t status = getRemoteMaxThreads(&numThreadsAvailable); status != OK) {
547         ALOGE("Could not get max threads after initial session setup: %s",
548               statusToString(status).c_str());
549         return status;
550     }
551 
552     if (status_t status = readId(); status != OK) {
553         ALOGE("Could not get session id after initial session setup: %s",
554               statusToString(status).c_str());
555         return status;
556     }
557 
558     size_t outgoingConnections = std::min(numThreadsAvailable, mMaxOutgoingConnections);
559     ALOGI_IF(outgoingConnections != numThreadsAvailable,
560              "Server hints client to start %zu outgoing threads, but client will only start %zu "
561              "because it is preconfigured to start at most %zu outgoing threads.",
562              numThreadsAvailable, outgoingConnections, mMaxOutgoingConnections);
563 
564     // TODO(b/189955605): we should add additional sessions dynamically
565     // instead of all at once - the other side should be responsible for setting
566     // up additional connections. We need to create at least one (unless 0 are
567     // requested to be set) in order to allow the other side to reliably make
568     // any requests at all.
569 
570     // we've already setup one client
571     LOG_RPC_DETAIL("RpcSession::setupClient() instantiating %zu outgoing connections (server max: "
572                    "%zu) and %zu incoming threads",
573                    outgoingConnections, numThreadsAvailable, mMaxIncomingThreads);
574     for (size_t i = 0; i + 1 < outgoingConnections; i++) {
575         if (status_t status = connectAndInit(mId, false /*incoming*/); status != OK) return status;
576     }
577 
578     for (size_t i = 0; i < mMaxIncomingThreads; i++) {
579         if (status_t status = connectAndInit(mId, true /*incoming*/); status != OK) return status;
580     }
581 
582     cleanup.release();
583 
584     return OK;
585 }
586 
setupSocketClient(const RpcSocketAddress & addr)587 status_t RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
588     return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) {
589         return setupOneSocketConnection(addr, sessionId, incoming);
590     });
591 }
592 
setupOneSocketConnection(const RpcSocketAddress & addr,const std::vector<uint8_t> & sessionId,bool incoming)593 status_t RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr,
594                                               const std::vector<uint8_t>& sessionId,
595                                               bool incoming) {
596     RpcTransportFd transportFd;
597     status_t status = singleSocketConnection(addr, mShutdownTrigger, &transportFd);
598     if (status != OK) return status;
599 
600     return initAndAddConnection(std::move(transportFd), sessionId, incoming);
601 }
602 
singleSocketConnection(const RpcSocketAddress & addr,const std::unique_ptr<FdTrigger> & shutdownTrigger,RpcTransportFd * outFd)603 status_t singleSocketConnection(const RpcSocketAddress& addr,
604                                 const std::unique_ptr<FdTrigger>& shutdownTrigger,
605                                 RpcTransportFd* outFd) {
606     LOG_ALWAYS_FATAL_IF(outFd == nullptr,
607                         "There is no reason to call this function without an outFd");
608     LOG_ALWAYS_FATAL_IF(shutdownTrigger == nullptr,
609                         "FdTrigger argument is required so we don't get stuck in the connect call "
610                         "if the server process shuts down.");
611     for (size_t tries = 0; tries < 5; tries++) {
612         if (tries > 0) usleep(10000);
613 
614         unique_fd serverFd(TEMP_FAILURE_RETRY(
615                 socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)));
616         if (!serverFd.ok()) {
617             int savedErrno = errno;
618             ALOGE("Could not create socket at %s: %s", addr.toString().c_str(),
619                   strerror(savedErrno));
620             return -savedErrno;
621         }
622 
623         if (addr.addr()->sa_family == AF_INET || addr.addr()->sa_family == AF_INET6) {
624             int noDelay = 1;
625             int result =
626                     setsockopt(serverFd.get(), IPPROTO_TCP, TCP_NODELAY, &noDelay, sizeof(noDelay));
627             if (result < 0) {
628                 int savedErrno = errno;
629                 ALOGE("Could not set TCP_NODELAY on %s: %s", addr.toString().c_str(),
630                       strerror(savedErrno));
631                 return -savedErrno;
632             }
633         }
634 
635         RpcTransportFd transportFd(std::move(serverFd));
636 
637         if (0 != TEMP_FAILURE_RETRY(connect(transportFd.fd.get(), addr.addr(), addr.addrSize()))) {
638             int connErrno = errno;
639             if (connErrno == EAGAIN || connErrno == EINPROGRESS) {
640                 // For non-blocking sockets, connect() may return EAGAIN (for unix domain socket) or
641                 // EINPROGRESS (for others). Call poll() and getsockopt() to get the error.
642                 status_t pollStatus = shutdownTrigger->triggerablePoll(transportFd, POLLOUT);
643                 if (pollStatus != OK) {
644                     ALOGE("Could not POLLOUT after connect() on non-blocking socket: %s",
645                           statusToString(pollStatus).c_str());
646                     return pollStatus;
647                 }
648                 // Set connErrno to the errno that connect() would have set if the fd were blocking.
649                 socklen_t connErrnoLen = sizeof(connErrno);
650                 int ret = getsockopt(transportFd.fd.get(), SOL_SOCKET, SO_ERROR, &connErrno,
651                                      &connErrnoLen);
652                 if (ret == -1) {
653                     int savedErrno = errno;
654                     ALOGE("Could not getsockopt() after connect() on non-blocking socket: %s. "
655                           "(Original error from connect() is: %s)",
656                           strerror(savedErrno), strerror(connErrno));
657                     return -savedErrno;
658                 }
659                 // Retrieved the real connErrno as if connect() was called with a blocking socket
660                 // fd. Continue checking connErrno.
661             }
662             if (connErrno == ECONNRESET) {
663                 ALOGW("Connection reset on %s", addr.toString().c_str());
664                 continue;
665             }
666             // connErrno could be zero if getsockopt determines so. Hence zero-check again.
667             if (connErrno != 0) {
668                 ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(),
669                       strerror(connErrno));
670                 return -connErrno;
671             }
672         }
673         LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(),
674                        transportFd.fd.get());
675 
676         *outFd = std::move(transportFd);
677         return OK;
678     }
679 
680     ALOGE("Ran out of retries to connect to %s", addr.toString().c_str());
681     return UNKNOWN_ERROR;
682 }
683 
initAndAddConnection(RpcTransportFd fd,const std::vector<uint8_t> & sessionId,bool incoming)684 status_t RpcSession::initAndAddConnection(RpcTransportFd fd, const std::vector<uint8_t>& sessionId,
685                                           bool incoming) {
686     LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr);
687     auto server = mCtx->newTransport(std::move(fd), mShutdownTrigger.get());
688     if (server == nullptr) {
689         ALOGE("%s: Unable to set up RpcTransport", __PRETTY_FUNCTION__);
690         return UNKNOWN_ERROR;
691     }
692 
693     LOG_RPC_DETAIL("Socket at client with RpcTransport %p", server.get());
694 
695     if (sessionId.size() > std::numeric_limits<uint16_t>::max()) {
696         ALOGE("Session ID too big %zu", sessionId.size());
697         return BAD_VALUE;
698     }
699 
700     RpcConnectionHeader header{
701             .version = mProtocolVersion.value_or(RPC_WIRE_PROTOCOL_VERSION),
702             .options = 0,
703             .fileDescriptorTransportMode = static_cast<uint8_t>(mFileDescriptorTransportMode),
704             .sessionIdSize = static_cast<uint16_t>(sessionId.size()),
705     };
706 
707     if (incoming) {
708         header.options |= RPC_CONNECTION_OPTION_INCOMING;
709     }
710 
711     iovec headerIov{&header, sizeof(header)};
712     auto sendHeaderStatus = server->interruptableWriteFully(mShutdownTrigger.get(), &headerIov, 1,
713                                                             std::nullopt, nullptr);
714     if (sendHeaderStatus != OK) {
715         ALOGE("Could not write connection header to socket: %s",
716               statusToString(sendHeaderStatus).c_str());
717         return sendHeaderStatus;
718     }
719 
720     if (sessionId.size() > 0) {
721         iovec sessionIov{const_cast<void*>(static_cast<const void*>(sessionId.data())),
722                          sessionId.size()};
723         auto sendSessionIdStatus =
724                 server->interruptableWriteFully(mShutdownTrigger.get(), &sessionIov, 1,
725                                                 std::nullopt, nullptr);
726         if (sendSessionIdStatus != OK) {
727             ALOGE("Could not write session ID ('%s') to socket: %s",
728                   HexString(sessionId.data(), sessionId.size()).c_str(),
729                   statusToString(sendSessionIdStatus).c_str());
730             return sendSessionIdStatus;
731         }
732     }
733 
734     LOG_RPC_DETAIL("Socket at client: header sent");
735 
736     if (incoming) {
737         return addIncomingConnection(std::move(server));
738     } else {
739         return addOutgoingConnection(std::move(server), true /*init*/);
740     }
741 }
742 
addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport)743 status_t RpcSession::addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport) {
744     RpcMutex mutex;
745     RpcConditionVariable joinCv;
746     RpcMutexUniqueLock lock(mutex);
747     RpcMaybeThread thread;
748     sp<RpcSession> thiz = sp<RpcSession>::fromExisting(this);
749     bool ownershipTransferred = false;
750     thread = RpcMaybeThread([&]() {
751         RpcMutexUniqueLock threadLock(mutex);
752         std::unique_ptr<RpcTransport> movedRpcTransport = std::move(rpcTransport);
753         // NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
754         sp<RpcSession> session = thiz;
755         session->preJoinThreadOwnership(std::move(thread));
756 
757         // only continue once we have a response or the connection fails
758         auto setupResult = session->preJoinSetup(std::move(movedRpcTransport));
759 
760         ownershipTransferred = true;
761         threadLock.unlock();
762         joinCv.notify_one();
763         // do not use & vars below
764 
765         RpcSession::join(std::move(session), std::move(setupResult));
766     });
767     rpcJoinIfSingleThreaded(thread);
768     joinCv.wait(lock, [&] { return ownershipTransferred; });
769     LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
770     return OK;
771 }
772 
initShutdownTrigger()773 status_t RpcSession::initShutdownTrigger() {
774     // first client connection added, but setForServer not called, so
775     // initializaing for a client.
776     if (mShutdownTrigger == nullptr) {
777         mShutdownTrigger = FdTrigger::make();
778         mEventListener = mShutdownListener = sp<WaitForShutdownListener>::make();
779         if (mShutdownTrigger == nullptr) return INVALID_OPERATION;
780     }
781     return OK;
782 }
783 
addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport,bool init)784 status_t RpcSession::addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport, bool init) {
785     sp<RpcConnection> connection = sp<RpcConnection>::make();
786     {
787         RpcMutexLockGuard _l(mMutex);
788         connection->rpcTransport = std::move(rpcTransport);
789         connection->exclusiveTid = binder::os::GetThreadId();
790         mConnections.mOutgoing.push_back(connection);
791     }
792 
793     status_t status = OK;
794     if (init) {
795         status =
796                 mRpcBinderState->sendConnectionInit(connection, sp<RpcSession>::fromExisting(this));
797     }
798 
799     clearConnectionTid(connection);
800 
801     return status;
802 }
803 
setForServer(const wp<RpcServer> & server,const wp<EventListener> & eventListener,const std::vector<uint8_t> & sessionId,const sp<IBinder> & sessionSpecificRoot)804 bool RpcSession::setForServer(const wp<RpcServer>& server, const wp<EventListener>& eventListener,
805                               const std::vector<uint8_t>& sessionId,
806                               const sp<IBinder>& sessionSpecificRoot) {
807     LOG_ALWAYS_FATAL_IF(mForServer != nullptr);
808     LOG_ALWAYS_FATAL_IF(server == nullptr);
809     LOG_ALWAYS_FATAL_IF(mEventListener != nullptr);
810     LOG_ALWAYS_FATAL_IF(eventListener == nullptr);
811     LOG_ALWAYS_FATAL_IF(mShutdownTrigger != nullptr);
812     LOG_ALWAYS_FATAL_IF(mCtx != nullptr);
813 
814     mShutdownTrigger = FdTrigger::make();
815     if (mShutdownTrigger == nullptr) return false;
816 
817     mId = sessionId;
818     mForServer = server;
819     mEventListener = eventListener;
820     mSessionSpecificRootObject = sessionSpecificRoot;
821     return true;
822 }
823 
setSessionSpecificRoot(const sp<IBinder> & sessionSpecificRoot)824 void RpcSession::setSessionSpecificRoot(const sp<IBinder>& sessionSpecificRoot) {
825     LOG_ALWAYS_FATAL_IF(mSessionSpecificRootObject != nullptr,
826                         "Session specific root object already set");
827     LOG_ALWAYS_FATAL_IF(mForServer != nullptr,
828                         "Session specific root object cannot be set for a server");
829     mSessionSpecificRootObject = sessionSpecificRoot;
830 }
831 
assignIncomingConnectionToThisThread(std::unique_ptr<RpcTransport> rpcTransport)832 sp<RpcSession::RpcConnection> RpcSession::assignIncomingConnectionToThisThread(
833         std::unique_ptr<RpcTransport> rpcTransport) {
834     RpcMutexLockGuard _l(mMutex);
835 
836     if (mConnections.mIncoming.size() >= mMaxIncomingThreads) {
837         ALOGE("Cannot add thread to session with %zu threads (max is set to %zu)",
838               mConnections.mIncoming.size(), mMaxIncomingThreads);
839         return nullptr;
840     }
841 
842     // Don't accept any more connections, some have shutdown. Usually this
843     // happens when new connections are still being established as part of a
844     // very short-lived session which shuts down after it already started
845     // accepting new connections.
846     if (mConnections.mIncoming.size() < mConnections.mMaxIncoming) {
847         return nullptr;
848     }
849 
850     sp<RpcConnection> session = sp<RpcConnection>::make();
851     session->rpcTransport = std::move(rpcTransport);
852     session->exclusiveTid = binder::os::GetThreadId();
853 
854     mConnections.mIncoming.push_back(session);
855     mConnections.mMaxIncoming = mConnections.mIncoming.size();
856 
857     return session;
858 }
859 
removeIncomingConnection(const sp<RpcConnection> & connection)860 bool RpcSession::removeIncomingConnection(const sp<RpcConnection>& connection) {
861     RpcMutexUniqueLock _l(mMutex);
862     if (auto it =
863                 std::find(mConnections.mIncoming.begin(), mConnections.mIncoming.end(), connection);
864         it != mConnections.mIncoming.end()) {
865         mConnections.mIncoming.erase(it);
866         if (mConnections.mIncoming.size() == 0) {
867             sp<EventListener> listener = mEventListener.promote();
868             if (listener) {
869                 _l.unlock();
870                 listener->onSessionAllIncomingThreadsEnded(sp<RpcSession>::fromExisting(this));
871             }
872         }
873         return true;
874     }
875     return false;
876 }
877 
clearConnectionTid(const sp<RpcConnection> & connection)878 void RpcSession::clearConnectionTid(const sp<RpcConnection>& connection) {
879     RpcMutexUniqueLock _l(mMutex);
880     connection->exclusiveTid = std::nullopt;
881     if (mConnections.mWaitingThreads > 0) {
882         _l.unlock();
883         mAvailableConnectionCv.notify_one();
884     }
885 }
886 
getCertificate(RpcCertificateFormat format)887 std::vector<uint8_t> RpcSession::getCertificate(RpcCertificateFormat format) {
888     return mCtx->getCertificate(format);
889 }
890 
find(const sp<RpcSession> & session,ConnectionUse use,ExclusiveConnection * connection)891 status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, ConnectionUse use,
892                                                ExclusiveConnection* connection) {
893     connection->mSession = session;
894     connection->mConnection = nullptr;
895     connection->mReentrant = false;
896 
897     uint64_t tid = binder::os::GetThreadId();
898     RpcMutexUniqueLock _l(session->mMutex);
899 
900     session->mConnections.mWaitingThreads++;
901     while (true) {
902         sp<RpcConnection> exclusive;
903         sp<RpcConnection> available;
904 
905         // CHECK FOR DEDICATED CLIENT SOCKET
906         //
907         // A server/looper should always use a dedicated connection if available
908         findConnection(tid, &exclusive, &available, session->mConnections.mOutgoing,
909                        session->mConnections.mOutgoingOffset);
910 
911         // WARNING: this assumes a server cannot request its client to send
912         // a transaction, as mIncoming is excluded below.
913         //
914         // Imagine we have more than one thread in play, and a single thread
915         // sends a synchronous, then an asynchronous command. Imagine the
916         // asynchronous command is sent on the first client connection. Then, if
917         // we naively send a synchronous command to that same connection, the
918         // thread on the far side might be busy processing the asynchronous
919         // command. So, we move to considering the second available thread
920         // for subsequent calls.
921         if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
922             session->mConnections.mOutgoingOffset = (session->mConnections.mOutgoingOffset + 1) %
923                     session->mConnections.mOutgoing.size();
924         }
925 
926         // USE SERVING SOCKET (e.g. nested transaction)
927         if (use != ConnectionUse::CLIENT_ASYNC) {
928             sp<RpcConnection> exclusiveIncoming;
929             // server connections are always assigned to a thread
930             findConnection(tid, &exclusiveIncoming, nullptr /*available*/,
931                            session->mConnections.mIncoming, 0 /* index hint */);
932 
933             // asynchronous calls cannot be nested, we currently allow ref count
934             // calls to be nested (so that you can use this without having extra
935             // threads). Note 'drainCommands' is used so that these ref counts can't
936             // build up.
937             if (exclusiveIncoming != nullptr) {
938                 if (exclusiveIncoming->allowNested) {
939                     // guaranteed to be processed as nested command
940                     exclusive = exclusiveIncoming;
941                 } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) {
942                     // prefer available socket, but if we don't have one, don't
943                     // wait for one
944                     exclusive = exclusiveIncoming;
945                 }
946             }
947         }
948 
949         // if our thread is already using a connection, prioritize using that
950         if (exclusive != nullptr) {
951             connection->mConnection = exclusive;
952             connection->mReentrant = true;
953             break;
954         } else if (available != nullptr) {
955             connection->mConnection = available;
956             connection->mConnection->exclusiveTid = tid;
957             break;
958         }
959 
960         if (session->mConnections.mOutgoing.size() == 0) {
961             ALOGE("Session has no outgoing connections. This is required for an RPC server to make "
962                   "any non-nested (e.g. oneway or on another thread) calls. Use code request "
963                   "reason: %d. Incoming connections: %zu. %s.",
964                   static_cast<int>(use), session->mConnections.mIncoming.size(),
965                   (session->server()
966                            ? "This is a server session, so see RpcSession::setMaxIncomingThreads "
967                              "for the corresponding client"
968                            : "This is a client session, so see "
969                              "RpcSession::setMaxOutgoingConnections "
970                              "for this client or RpcServer::setMaxThreads for the corresponding "
971                              "server"));
972             return WOULD_BLOCK;
973         }
974 
975         LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
976                        session->mConnections.mOutgoing.size(),
977                        session->mConnections.mIncoming.size());
978         session->mAvailableConnectionCv.wait(_l);
979     }
980     session->mConnections.mWaitingThreads--;
981 
982     return OK;
983 }
984 
findConnection(uint64_t tid,sp<RpcConnection> * exclusive,sp<RpcConnection> * available,std::vector<sp<RpcConnection>> & sockets,size_t socketsIndexHint)985 void RpcSession::ExclusiveConnection::findConnection(uint64_t tid, sp<RpcConnection>* exclusive,
986                                                      sp<RpcConnection>* available,
987                                                      std::vector<sp<RpcConnection>>& sockets,
988                                                      size_t socketsIndexHint) {
989     LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
990                         "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
991 
992     if (*exclusive != nullptr) return; // consistent with break below
993 
994     for (size_t i = 0; i < sockets.size(); i++) {
995         sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
996 
997         // take first available connection (intuition = caching)
998         if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
999             *available = socket;
1000             continue;
1001         }
1002 
1003         // though, prefer to take connection which is already inuse by this thread
1004         // (nested transactions)
1005         if (exclusive && socket->exclusiveTid == tid) {
1006             *exclusive = socket;
1007             break; // consistent with return above
1008         }
1009     }
1010 }
1011 
~ExclusiveConnection()1012 RpcSession::ExclusiveConnection::~ExclusiveConnection() {
1013     // reentrant use of a connection means something less deep in the call stack
1014     // is using this fd, and it retains the right to it. So, we don't give up
1015     // exclusive ownership, and no thread is freed.
1016     if (!mReentrant && mConnection != nullptr) {
1017         mSession->clearConnectionTid(mConnection);
1018     }
1019 }
1020 
hasActiveConnection(const std::vector<sp<RpcConnection>> & connections)1021 bool RpcSession::hasActiveConnection(const std::vector<sp<RpcConnection>>& connections) {
1022     for (const auto& connection : connections) {
1023         if (connection->exclusiveTid != std::nullopt && !connection->rpcTransport->isWaiting()) {
1024             return true;
1025         }
1026     }
1027     return false;
1028 }
1029 
hasActiveRequests()1030 bool RpcSession::hasActiveRequests() {
1031     RpcMutexUniqueLock _l(mMutex);
1032     if (hasActiveConnection(mConnections.mIncoming)) {
1033         return true;
1034     }
1035     if (hasActiveConnection(mConnections.mOutgoing)) {
1036         return true;
1037     }
1038     return mConnections.mWaitingThreads != 0;
1039 }
1040 
1041 } // namespace android
1042