1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "RpcSession"
18
19 #include <binder/RpcSession.h>
20
21 #include <dlfcn.h>
22 #include <inttypes.h>
23 #include <netinet/tcp.h>
24 #include <poll.h>
25 #include <unistd.h>
26
27 #include <string_view>
28
29 #include <android-base/hex.h>
30 #include <android-base/macros.h>
31 #include <android-base/scopeguard.h>
32 #include <binder/BpBinder.h>
33 #include <binder/Parcel.h>
34 #include <binder/RpcServer.h>
35 #include <binder/RpcTransportRaw.h>
36 #include <binder/Stability.h>
37 #include <utils/Compat.h>
38 #include <utils/String8.h>
39
40 #include "BuildFlags.h"
41 #include "FdTrigger.h"
42 #include "OS.h"
43 #include "RpcSocketAddress.h"
44 #include "RpcState.h"
45 #include "RpcTransportUtils.h"
46 #include "RpcWireFormat.h"
47 #include "Utils.h"
48
49 #if defined(__ANDROID__) && !defined(__ANDROID_RECOVERY__)
50 #include <jni.h>
51 extern "C" JavaVM* AndroidRuntimeGetJavaVM();
52 #endif
53
54 namespace android {
55
56 using base::unique_fd;
57
RpcSession(std::unique_ptr<RpcTransportCtx> ctx)58 RpcSession::RpcSession(std::unique_ptr<RpcTransportCtx> ctx) : mCtx(std::move(ctx)) {
59 LOG_RPC_DETAIL("RpcSession created %p", this);
60
61 mRpcBinderState = std::make_unique<RpcState>();
62 }
~RpcSession()63 RpcSession::~RpcSession() {
64 LOG_RPC_DETAIL("RpcSession destroyed %p", this);
65
66 RpcMutexLockGuard _l(mMutex);
67 LOG_ALWAYS_FATAL_IF(mConnections.mIncoming.size() != 0,
68 "Should not be able to destroy a session with servers in use.");
69 }
70
make()71 sp<RpcSession> RpcSession::make() {
72 // Default is without TLS.
73 return make(makeDefaultRpcTransportCtxFactory());
74 }
75
make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory)76 sp<RpcSession> RpcSession::make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory) {
77 auto ctx = rpcTransportCtxFactory->newClientCtx();
78 if (ctx == nullptr) return nullptr;
79 return sp<RpcSession>::make(std::move(ctx));
80 }
81
setMaxIncomingThreads(size_t threads)82 void RpcSession::setMaxIncomingThreads(size_t threads) {
83 RpcMutexLockGuard _l(mMutex);
84 LOG_ALWAYS_FATAL_IF(mStartedSetup,
85 "Must set max incoming threads before setting up connections");
86 mMaxIncomingThreads = threads;
87 }
88
getMaxIncomingThreads()89 size_t RpcSession::getMaxIncomingThreads() {
90 RpcMutexLockGuard _l(mMutex);
91 return mMaxIncomingThreads;
92 }
93
setMaxOutgoingConnections(size_t connections)94 void RpcSession::setMaxOutgoingConnections(size_t connections) {
95 RpcMutexLockGuard _l(mMutex);
96 LOG_ALWAYS_FATAL_IF(mStartedSetup,
97 "Must set max outgoing threads before setting up connections");
98 mMaxOutgoingConnections = connections;
99 }
100
getMaxOutgoingThreads()101 size_t RpcSession::getMaxOutgoingThreads() {
102 RpcMutexLockGuard _l(mMutex);
103 return mMaxOutgoingConnections;
104 }
105
setProtocolVersionInternal(uint32_t version,bool checkStarted)106 bool RpcSession::setProtocolVersionInternal(uint32_t version, bool checkStarted) {
107 if (version >= RPC_WIRE_PROTOCOL_VERSION_NEXT &&
108 version != RPC_WIRE_PROTOCOL_VERSION_EXPERIMENTAL) {
109 ALOGE("Cannot start RPC session with version %u which is unknown (current protocol version "
110 "is %u).",
111 version, RPC_WIRE_PROTOCOL_VERSION);
112 return false;
113 }
114
115 RpcMutexLockGuard _l(mMutex);
116 LOG_ALWAYS_FATAL_IF(checkStarted && mStartedSetup,
117 "Must set protocol version before setting up connections");
118 if (mProtocolVersion && version > *mProtocolVersion) {
119 ALOGE("Cannot upgrade explicitly capped protocol version %u to newer version %u",
120 *mProtocolVersion, version);
121 return false;
122 }
123
124 mProtocolVersion = version;
125 return true;
126 }
127
setProtocolVersion(uint32_t version)128 bool RpcSession::setProtocolVersion(uint32_t version) {
129 return setProtocolVersionInternal(version, true);
130 }
131
getProtocolVersion()132 std::optional<uint32_t> RpcSession::getProtocolVersion() {
133 RpcMutexLockGuard _l(mMutex);
134 return mProtocolVersion;
135 }
136
setFileDescriptorTransportMode(FileDescriptorTransportMode mode)137 void RpcSession::setFileDescriptorTransportMode(FileDescriptorTransportMode mode) {
138 RpcMutexLockGuard _l(mMutex);
139 LOG_ALWAYS_FATAL_IF(mStartedSetup,
140 "Must set file descriptor transport mode before setting up connections");
141 mFileDescriptorTransportMode = mode;
142 }
143
getFileDescriptorTransportMode()144 RpcSession::FileDescriptorTransportMode RpcSession::getFileDescriptorTransportMode() {
145 return mFileDescriptorTransportMode;
146 }
147
setupUnixDomainClient(const char * path)148 status_t RpcSession::setupUnixDomainClient(const char* path) {
149 return setupSocketClient(UnixSocketAddress(path));
150 }
151
setupUnixDomainSocketBootstrapClient(unique_fd bootstrapFd)152 status_t RpcSession::setupUnixDomainSocketBootstrapClient(unique_fd bootstrapFd) {
153 mBootstrapTransport =
154 mCtx->newTransport(RpcTransportFd(std::move(bootstrapFd)), mShutdownTrigger.get());
155 return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) {
156 int socks[2];
157 if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0, socks) < 0) {
158 int savedErrno = errno;
159 ALOGE("Failed socketpair: %s", strerror(savedErrno));
160 return -savedErrno;
161 }
162 unique_fd clientFd(socks[0]), serverFd(socks[1]);
163
164 int zero = 0;
165 iovec iov{&zero, sizeof(zero)};
166 std::vector<std::variant<base::unique_fd, base::borrowed_fd>> fds;
167 fds.push_back(std::move(serverFd));
168
169 status_t status = mBootstrapTransport->interruptableWriteFully(mShutdownTrigger.get(), &iov,
170 1, std::nullopt, &fds);
171 if (status != OK) {
172 ALOGE("Failed to send fd over bootstrap transport: %s", strerror(-status));
173 return status;
174 }
175
176 return initAndAddConnection(RpcTransportFd(std::move(clientFd)), sessionId, incoming);
177 });
178 }
179
setupVsockClient(unsigned int cid,unsigned int port)180 status_t RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
181 return setupSocketClient(VsockSocketAddress(cid, port));
182 }
183
setupInetClient(const char * addr,unsigned int port)184 status_t RpcSession::setupInetClient(const char* addr, unsigned int port) {
185 auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
186 if (aiStart == nullptr) return UNKNOWN_ERROR;
187 for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
188 InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
189 if (status_t status = setupSocketClient(socketAddress); status == OK) return OK;
190 }
191 ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
192 return NAME_NOT_FOUND;
193 }
194
setupPreconnectedClient(base::unique_fd fd,std::function<unique_fd ()> && request)195 status_t RpcSession::setupPreconnectedClient(base::unique_fd fd,
196 std::function<unique_fd()>&& request) {
197 return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) -> status_t {
198 if (!fd.ok()) {
199 fd = request();
200 if (!fd.ok()) return BAD_VALUE;
201 }
202 if (auto res = setNonBlocking(fd); !res.ok()) {
203 ALOGE("setupPreconnectedClient: %s", res.error().message().c_str());
204 return res.error().code() == 0 ? UNKNOWN_ERROR : -res.error().code();
205 }
206
207 RpcTransportFd transportFd(std::move(fd));
208 status_t status = initAndAddConnection(std::move(transportFd), sessionId, incoming);
209 fd = unique_fd(); // Explicitly reset after move to avoid analyzer warning.
210 return status;
211 });
212 }
213
addNullDebuggingClient()214 status_t RpcSession::addNullDebuggingClient() {
215 // Note: only works on raw sockets.
216 if (auto status = initShutdownTrigger(); status != OK) return status;
217
218 unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
219
220 if (serverFd == -1) {
221 int savedErrno = errno;
222 ALOGE("Could not connect to /dev/null: %s", strerror(savedErrno));
223 return -savedErrno;
224 }
225
226 RpcTransportFd transportFd(std::move(serverFd));
227 auto server = mCtx->newTransport(std::move(transportFd), mShutdownTrigger.get());
228 if (server == nullptr) {
229 ALOGE("Unable to set up RpcTransport");
230 return UNKNOWN_ERROR;
231 }
232 return addOutgoingConnection(std::move(server), false);
233 }
234
getRootObject()235 sp<IBinder> RpcSession::getRootObject() {
236 ExclusiveConnection connection;
237 status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
238 ConnectionUse::CLIENT, &connection);
239 if (status != OK) return nullptr;
240 return state()->getRootObject(connection.get(), sp<RpcSession>::fromExisting(this));
241 }
242
getRemoteMaxThreads(size_t * maxThreads)243 status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) {
244 ExclusiveConnection connection;
245 status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
246 ConnectionUse::CLIENT, &connection);
247 if (status != OK) return status;
248 return state()->getMaxThreads(connection.get(), sp<RpcSession>::fromExisting(this), maxThreads);
249 }
250
shutdownAndWait(bool wait)251 bool RpcSession::shutdownAndWait(bool wait) {
252 RpcMutexUniqueLock _l(mMutex);
253 LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr, "Shutdown trigger not installed");
254
255 mShutdownTrigger->trigger();
256
257 if (wait) {
258 LOG_ALWAYS_FATAL_IF(mShutdownListener == nullptr, "Shutdown listener not installed");
259 mShutdownListener->waitForShutdown(_l, sp<RpcSession>::fromExisting(this));
260
261 LOG_ALWAYS_FATAL_IF(!mConnections.mThreads.empty(), "Shutdown failed");
262 }
263
264 _l.unlock();
265
266 if (status_t res = state()->sendObituaries(sp<RpcSession>::fromExisting(this)); res != OK) {
267 ALOGE("Failed to send obituaries as the RpcSession is shutting down: %s",
268 statusToString(res).c_str());
269 }
270
271 mRpcBinderState->clear();
272
273 return true;
274 }
275
transact(const sp<IBinder> & binder,uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)276 status_t RpcSession::transact(const sp<IBinder>& binder, uint32_t code, const Parcel& data,
277 Parcel* reply, uint32_t flags) {
278 ExclusiveConnection connection;
279 status_t status =
280 ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
281 (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
282 : ConnectionUse::CLIENT,
283 &connection);
284 if (status != OK) return status;
285 return state()->transact(connection.get(), binder, code, data,
286 sp<RpcSession>::fromExisting(this), reply, flags);
287 }
288
sendDecStrong(const BpBinder * binder)289 status_t RpcSession::sendDecStrong(const BpBinder* binder) {
290 // target is 0 because this is used to free BpBinder objects
291 return sendDecStrongToTarget(binder->getPrivateAccessor().rpcAddress(), 0 /*target*/);
292 }
293
sendDecStrongToTarget(uint64_t address,size_t target)294 status_t RpcSession::sendDecStrongToTarget(uint64_t address, size_t target) {
295 ExclusiveConnection connection;
296 status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
297 ConnectionUse::CLIENT_REFCOUNT, &connection);
298 if (status != OK) return status;
299 return state()->sendDecStrongToTarget(connection.get(), sp<RpcSession>::fromExisting(this),
300 address, target);
301 }
302
readId()303 status_t RpcSession::readId() {
304 {
305 RpcMutexLockGuard _l(mMutex);
306 LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
307 }
308
309 ExclusiveConnection connection;
310 status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
311 ConnectionUse::CLIENT, &connection);
312 if (status != OK) return status;
313
314 status = state()->getSessionId(connection.get(), sp<RpcSession>::fromExisting(this), &mId);
315 if (status != OK) return status;
316
317 LOG_RPC_DETAIL("RpcSession %p has id %s", this,
318 base::HexString(mId.data(), mId.size()).c_str());
319 return OK;
320 }
321
onSessionAllIncomingThreadsEnded(const sp<RpcSession> & session)322 void RpcSession::WaitForShutdownListener::onSessionAllIncomingThreadsEnded(
323 const sp<RpcSession>& session) {
324 (void)session;
325 }
326
onSessionIncomingThreadEnded()327 void RpcSession::WaitForShutdownListener::onSessionIncomingThreadEnded() {
328 mShutdownCount += 1;
329 mCv.notify_all();
330 }
331
waitForShutdown(RpcMutexUniqueLock & lock,const sp<RpcSession> & session)332 void RpcSession::WaitForShutdownListener::waitForShutdown(RpcMutexUniqueLock& lock,
333 const sp<RpcSession>& session) {
334 while (mShutdownCount < session->mConnections.mMaxIncoming) {
335 if (std::cv_status::timeout == mCv.wait_for(lock, std::chrono::seconds(1))) {
336 ALOGE("Waiting for RpcSession to shut down (1s w/o progress): %zu incoming connections "
337 "still %zu/%zu fully shutdown.",
338 session->mConnections.mIncoming.size(), mShutdownCount.load(),
339 session->mConnections.mMaxIncoming);
340 }
341 }
342 }
343
preJoinThreadOwnership(RpcMaybeThread thread)344 void RpcSession::preJoinThreadOwnership(RpcMaybeThread thread) {
345 LOG_ALWAYS_FATAL_IF(thread.get_id() != rpc_this_thread::get_id(), "Must own this thread");
346
347 {
348 RpcMutexLockGuard _l(mMutex);
349 mConnections.mThreads[thread.get_id()] = std::move(thread);
350 }
351 }
352
preJoinSetup(std::unique_ptr<RpcTransport> rpcTransport)353 RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(
354 std::unique_ptr<RpcTransport> rpcTransport) {
355 // must be registered to allow arbitrary client code executing commands to
356 // be able to do nested calls (we can't only read from it)
357 sp<RpcConnection> connection = assignIncomingConnectionToThisThread(std::move(rpcTransport));
358
359 status_t status;
360
361 if (connection == nullptr) {
362 status = DEAD_OBJECT;
363 } else {
364 status =
365 mRpcBinderState->readConnectionInit(connection, sp<RpcSession>::fromExisting(this));
366 }
367
368 return PreJoinSetupResult{
369 .connection = std::move(connection),
370 .status = status,
371 };
372 }
373
374 namespace {
375 #if !defined(__ANDROID__) || defined(__ANDROID_RECOVERY__)
376 class JavaThreadAttacher {};
377 #else
378 // RAII object for attaching / detaching current thread to JVM if Android Runtime exists. If
379 // Android Runtime doesn't exist, no-op.
380 class JavaThreadAttacher {
381 public:
382 JavaThreadAttacher() {
383 // Use dlsym to find androidJavaAttachThread because libandroid_runtime is loaded after
384 // libbinder.
385 auto vm = getJavaVM();
386 if (vm == nullptr) return;
387
388 char threadName[16];
389 if (0 != pthread_getname_np(pthread_self(), threadName, sizeof(threadName))) {
390 constexpr const char* defaultThreadName = "UnknownRpcSessionThread";
391 memcpy(threadName, defaultThreadName,
392 std::min<size_t>(sizeof(threadName), strlen(defaultThreadName) + 1));
393 }
394 LOG_RPC_DETAIL("Attaching current thread %s to JVM", threadName);
395 JavaVMAttachArgs args;
396 args.version = JNI_VERSION_1_2;
397 args.name = threadName;
398 args.group = nullptr;
399 JNIEnv* env;
400
401 LOG_ALWAYS_FATAL_IF(vm->AttachCurrentThread(&env, &args) != JNI_OK,
402 "Cannot attach thread %s to JVM", threadName);
403 mAttached = true;
404 }
405 ~JavaThreadAttacher() {
406 if (!mAttached) return;
407 auto vm = getJavaVM();
408 LOG_ALWAYS_FATAL_IF(vm == nullptr,
409 "Unable to detach thread. No JavaVM, but it was present before!");
410
411 LOG_RPC_DETAIL("Detaching current thread from JVM");
412 int ret = vm->DetachCurrentThread();
413 if (ret == JNI_OK) {
414 mAttached = false;
415 } else {
416 ALOGW("Unable to detach current thread from JVM (%d)", ret);
417 }
418 }
419
420 private:
421 DISALLOW_COPY_AND_ASSIGN(JavaThreadAttacher);
422 bool mAttached = false;
423
424 static JavaVM* getJavaVM() {
425 static auto fn = reinterpret_cast<decltype(&AndroidRuntimeGetJavaVM)>(
426 dlsym(RTLD_DEFAULT, "AndroidRuntimeGetJavaVM"));
427 if (fn == nullptr) return nullptr;
428 return fn();
429 }
430 };
431 #endif
432 } // namespace
433
join(sp<RpcSession> && session,PreJoinSetupResult && setupResult)434 void RpcSession::join(sp<RpcSession>&& session, PreJoinSetupResult&& setupResult) {
435 sp<RpcConnection>& connection = setupResult.connection;
436
437 if (setupResult.status == OK) {
438 LOG_ALWAYS_FATAL_IF(!connection, "must have connection if setup succeeded");
439 [[maybe_unused]] JavaThreadAttacher javaThreadAttacher;
440 while (true) {
441 status_t status = session->state()->getAndExecuteCommand(connection, session,
442 RpcState::CommandType::ANY);
443 if (status != OK) {
444 LOG_RPC_DETAIL("Binder connection thread closing w/ status %s",
445 statusToString(status).c_str());
446 break;
447 }
448 }
449 } else {
450 ALOGE("Connection failed to init, closing with status %s",
451 statusToString(setupResult.status).c_str());
452 }
453
454 sp<RpcSession::EventListener> listener;
455 {
456 RpcMutexLockGuard _l(session->mMutex);
457 auto it = session->mConnections.mThreads.find(rpc_this_thread::get_id());
458 LOG_ALWAYS_FATAL_IF(it == session->mConnections.mThreads.end());
459 it->second.detach();
460 session->mConnections.mThreads.erase(it);
461
462 listener = session->mEventListener.promote();
463 }
464
465 // done after all cleanup, since session shutdown progresses via callbacks here
466 if (connection != nullptr) {
467 LOG_ALWAYS_FATAL_IF(!session->removeIncomingConnection(connection),
468 "bad state: connection object guaranteed to be in list");
469 }
470
471 session = nullptr;
472
473 if (listener != nullptr) {
474 listener->onSessionIncomingThreadEnded();
475 }
476 }
477
server()478 sp<RpcServer> RpcSession::server() {
479 RpcServer* unsafeServer = mForServer.unsafe_get();
480 sp<RpcServer> server = mForServer.promote();
481
482 LOG_ALWAYS_FATAL_IF((unsafeServer == nullptr) != (server == nullptr),
483 "wp<> is to avoid strong cycle only");
484 return server;
485 }
486
setupClient(const std::function<status_t (const std::vector<uint8_t> & sessionId,bool incoming)> & connectAndInit)487 status_t RpcSession::setupClient(const std::function<status_t(const std::vector<uint8_t>& sessionId,
488 bool incoming)>& connectAndInit) {
489 {
490 RpcMutexLockGuard _l(mMutex);
491 LOG_ALWAYS_FATAL_IF(mStartedSetup, "Must only setup session once");
492 mStartedSetup = true;
493
494 if constexpr (!kEnableRpcThreads) {
495 LOG_ALWAYS_FATAL_IF(mMaxIncomingThreads > 0,
496 "Incoming threads are not supported on single-threaded libbinder");
497 // mMaxIncomingThreads should not change from here to its use below,
498 // since we set mStartedSetup==true and setMaxIncomingThreads checks
499 // for that
500 }
501 }
502
503 if (auto status = initShutdownTrigger(); status != OK) return status;
504
505 auto oldProtocolVersion = mProtocolVersion;
506 auto cleanup = base::ScopeGuard([&] {
507 // if any threads are started, shut them down
508 (void)shutdownAndWait(true);
509
510 mShutdownListener = nullptr;
511 mEventListener.clear();
512
513 mId.clear();
514
515 mShutdownTrigger = nullptr;
516 mRpcBinderState = std::make_unique<RpcState>();
517
518 // protocol version may have been downgraded - if we reuse this object
519 // to connect to another server, force that server to request a
520 // downgrade again
521 mProtocolVersion = oldProtocolVersion;
522
523 mConnections = {};
524
525 // clear mStartedSetup so that we can reuse this RpcSession
526 mStartedSetup = false;
527 });
528
529 if (status_t status = connectAndInit({}, false /*incoming*/); status != OK) return status;
530
531 {
532 ExclusiveConnection connection;
533 if (status_t status = ExclusiveConnection::find(sp<RpcSession>::fromExisting(this),
534 ConnectionUse::CLIENT, &connection);
535 status != OK)
536 return status;
537
538 uint32_t version;
539 if (status_t status =
540 state()->readNewSessionResponse(connection.get(),
541 sp<RpcSession>::fromExisting(this), &version);
542 status != OK)
543 return status;
544 if (!setProtocolVersionInternal(version, false)) return BAD_VALUE;
545 }
546
547 // TODO(b/189955605): we should add additional sessions dynamically
548 // instead of all at once.
549 size_t numThreadsAvailable;
550 if (status_t status = getRemoteMaxThreads(&numThreadsAvailable); status != OK) {
551 ALOGE("Could not get max threads after initial session setup: %s",
552 statusToString(status).c_str());
553 return status;
554 }
555
556 if (status_t status = readId(); status != OK) {
557 ALOGE("Could not get session id after initial session setup: %s",
558 statusToString(status).c_str());
559 return status;
560 }
561
562 size_t outgoingConnections = std::min(numThreadsAvailable, mMaxOutgoingConnections);
563 ALOGI_IF(outgoingConnections != numThreadsAvailable,
564 "Server hints client to start %zu outgoing threads, but client will only start %zu "
565 "because it is preconfigured to start at most %zu outgoing threads.",
566 numThreadsAvailable, outgoingConnections, mMaxOutgoingConnections);
567
568 // TODO(b/189955605): we should add additional sessions dynamically
569 // instead of all at once - the other side should be responsible for setting
570 // up additional connections. We need to create at least one (unless 0 are
571 // requested to be set) in order to allow the other side to reliably make
572 // any requests at all.
573
574 // we've already setup one client
575 LOG_RPC_DETAIL("RpcSession::setupClient() instantiating %zu outgoing connections (server max: "
576 "%zu) and %zu incoming threads",
577 outgoingConnections, numThreadsAvailable, mMaxIncomingThreads);
578 for (size_t i = 0; i + 1 < outgoingConnections; i++) {
579 if (status_t status = connectAndInit(mId, false /*incoming*/); status != OK) return status;
580 }
581
582 for (size_t i = 0; i < mMaxIncomingThreads; i++) {
583 if (status_t status = connectAndInit(mId, true /*incoming*/); status != OK) return status;
584 }
585
586 cleanup.Disable();
587
588 return OK;
589 }
590
setupSocketClient(const RpcSocketAddress & addr)591 status_t RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
592 return setupClient([&](const std::vector<uint8_t>& sessionId, bool incoming) {
593 return setupOneSocketConnection(addr, sessionId, incoming);
594 });
595 }
596
setupOneSocketConnection(const RpcSocketAddress & addr,const std::vector<uint8_t> & sessionId,bool incoming)597 status_t RpcSession::setupOneSocketConnection(const RpcSocketAddress& addr,
598 const std::vector<uint8_t>& sessionId,
599 bool incoming) {
600 for (size_t tries = 0; tries < 5; tries++) {
601 if (tries > 0) usleep(10000);
602
603 unique_fd serverFd(TEMP_FAILURE_RETRY(
604 socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC | SOCK_NONBLOCK, 0)));
605 if (serverFd == -1) {
606 int savedErrno = errno;
607 ALOGE("Could not create socket at %s: %s", addr.toString().c_str(),
608 strerror(savedErrno));
609 return -savedErrno;
610 }
611
612 if (addr.addr()->sa_family == AF_INET || addr.addr()->sa_family == AF_INET6) {
613 int noDelay = 1;
614 int result =
615 setsockopt(serverFd.get(), IPPROTO_TCP, TCP_NODELAY, &noDelay, sizeof(noDelay));
616 if (result < 0) {
617 int savedErrno = errno;
618 ALOGE("Could not set TCP_NODELAY on %s: %s", addr.toString().c_str(),
619 strerror(savedErrno));
620 return -savedErrno;
621 }
622 }
623
624 RpcTransportFd transportFd(std::move(serverFd));
625
626 if (0 != TEMP_FAILURE_RETRY(connect(transportFd.fd.get(), addr.addr(), addr.addrSize()))) {
627 int connErrno = errno;
628 if (connErrno == EAGAIN || connErrno == EINPROGRESS) {
629 // For non-blocking sockets, connect() may return EAGAIN (for unix domain socket) or
630 // EINPROGRESS (for others). Call poll() and getsockopt() to get the error.
631 status_t pollStatus = mShutdownTrigger->triggerablePoll(transportFd, POLLOUT);
632 if (pollStatus != OK) {
633 ALOGE("Could not POLLOUT after connect() on non-blocking socket: %s",
634 statusToString(pollStatus).c_str());
635 return pollStatus;
636 }
637 // Set connErrno to the errno that connect() would have set if the fd were blocking.
638 socklen_t connErrnoLen = sizeof(connErrno);
639 int ret = getsockopt(transportFd.fd.get(), SOL_SOCKET, SO_ERROR, &connErrno,
640 &connErrnoLen);
641 if (ret == -1) {
642 int savedErrno = errno;
643 ALOGE("Could not getsockopt() after connect() on non-blocking socket: %s. "
644 "(Original error from connect() is: %s)",
645 strerror(savedErrno), strerror(connErrno));
646 return -savedErrno;
647 }
648 // Retrieved the real connErrno as if connect() was called with a blocking socket
649 // fd. Continue checking connErrno.
650 }
651 if (connErrno == ECONNRESET) {
652 ALOGW("Connection reset on %s", addr.toString().c_str());
653 continue;
654 }
655 // connErrno could be zero if getsockopt determines so. Hence zero-check again.
656 if (connErrno != 0) {
657 ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(),
658 strerror(connErrno));
659 return -connErrno;
660 }
661 }
662 LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(),
663 transportFd.fd.get());
664
665 return initAndAddConnection(std::move(transportFd), sessionId, incoming);
666 }
667
668 ALOGE("Ran out of retries to connect to %s", addr.toString().c_str());
669 return UNKNOWN_ERROR;
670 }
671
initAndAddConnection(RpcTransportFd fd,const std::vector<uint8_t> & sessionId,bool incoming)672 status_t RpcSession::initAndAddConnection(RpcTransportFd fd, const std::vector<uint8_t>& sessionId,
673 bool incoming) {
674 LOG_ALWAYS_FATAL_IF(mShutdownTrigger == nullptr);
675 auto server = mCtx->newTransport(std::move(fd), mShutdownTrigger.get());
676 if (server == nullptr) {
677 ALOGE("%s: Unable to set up RpcTransport", __PRETTY_FUNCTION__);
678 return UNKNOWN_ERROR;
679 }
680
681 LOG_RPC_DETAIL("Socket at client with RpcTransport %p", server.get());
682
683 if (sessionId.size() > std::numeric_limits<uint16_t>::max()) {
684 ALOGE("Session ID too big %zu", sessionId.size());
685 return BAD_VALUE;
686 }
687
688 RpcConnectionHeader header{
689 .version = mProtocolVersion.value_or(RPC_WIRE_PROTOCOL_VERSION),
690 .options = 0,
691 .fileDescriptorTransportMode = static_cast<uint8_t>(mFileDescriptorTransportMode),
692 .sessionIdSize = static_cast<uint16_t>(sessionId.size()),
693 };
694
695 if (incoming) {
696 header.options |= RPC_CONNECTION_OPTION_INCOMING;
697 }
698
699 iovec headerIov{&header, sizeof(header)};
700 auto sendHeaderStatus = server->interruptableWriteFully(mShutdownTrigger.get(), &headerIov, 1,
701 std::nullopt, nullptr);
702 if (sendHeaderStatus != OK) {
703 ALOGE("Could not write connection header to socket: %s",
704 statusToString(sendHeaderStatus).c_str());
705 return sendHeaderStatus;
706 }
707
708 if (sessionId.size() > 0) {
709 iovec sessionIov{const_cast<void*>(static_cast<const void*>(sessionId.data())),
710 sessionId.size()};
711 auto sendSessionIdStatus =
712 server->interruptableWriteFully(mShutdownTrigger.get(), &sessionIov, 1,
713 std::nullopt, nullptr);
714 if (sendSessionIdStatus != OK) {
715 ALOGE("Could not write session ID ('%s') to socket: %s",
716 base::HexString(sessionId.data(), sessionId.size()).c_str(),
717 statusToString(sendSessionIdStatus).c_str());
718 return sendSessionIdStatus;
719 }
720 }
721
722 LOG_RPC_DETAIL("Socket at client: header sent");
723
724 if (incoming) {
725 return addIncomingConnection(std::move(server));
726 } else {
727 return addOutgoingConnection(std::move(server), true /*init*/);
728 }
729 }
730
addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport)731 status_t RpcSession::addIncomingConnection(std::unique_ptr<RpcTransport> rpcTransport) {
732 RpcMutex mutex;
733 RpcConditionVariable joinCv;
734 RpcMutexUniqueLock lock(mutex);
735 RpcMaybeThread thread;
736 sp<RpcSession> thiz = sp<RpcSession>::fromExisting(this);
737 bool ownershipTransferred = false;
738 thread = RpcMaybeThread([&]() {
739 RpcMutexUniqueLock threadLock(mutex);
740 std::unique_ptr<RpcTransport> movedRpcTransport = std::move(rpcTransport);
741 // NOLINTNEXTLINE(performance-unnecessary-copy-initialization)
742 sp<RpcSession> session = thiz;
743 session->preJoinThreadOwnership(std::move(thread));
744
745 // only continue once we have a response or the connection fails
746 auto setupResult = session->preJoinSetup(std::move(movedRpcTransport));
747
748 ownershipTransferred = true;
749 threadLock.unlock();
750 joinCv.notify_one();
751 // do not use & vars below
752
753 RpcSession::join(std::move(session), std::move(setupResult));
754 });
755 rpcJoinIfSingleThreaded(thread);
756 joinCv.wait(lock, [&] { return ownershipTransferred; });
757 LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
758 return OK;
759 }
760
initShutdownTrigger()761 status_t RpcSession::initShutdownTrigger() {
762 // first client connection added, but setForServer not called, so
763 // initializaing for a client.
764 if (mShutdownTrigger == nullptr) {
765 mShutdownTrigger = FdTrigger::make();
766 mEventListener = mShutdownListener = sp<WaitForShutdownListener>::make();
767 if (mShutdownTrigger == nullptr) return INVALID_OPERATION;
768 }
769 return OK;
770 }
771
addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport,bool init)772 status_t RpcSession::addOutgoingConnection(std::unique_ptr<RpcTransport> rpcTransport, bool init) {
773 sp<RpcConnection> connection = sp<RpcConnection>::make();
774 {
775 RpcMutexLockGuard _l(mMutex);
776 connection->rpcTransport = std::move(rpcTransport);
777 connection->exclusiveTid = rpcGetThreadId();
778 mConnections.mOutgoing.push_back(connection);
779 }
780
781 status_t status = OK;
782 if (init) {
783 status =
784 mRpcBinderState->sendConnectionInit(connection, sp<RpcSession>::fromExisting(this));
785 }
786
787 clearConnectionTid(connection);
788
789 return status;
790 }
791
setForServer(const wp<RpcServer> & server,const wp<EventListener> & eventListener,const std::vector<uint8_t> & sessionId,const sp<IBinder> & sessionSpecificRoot)792 bool RpcSession::setForServer(const wp<RpcServer>& server, const wp<EventListener>& eventListener,
793 const std::vector<uint8_t>& sessionId,
794 const sp<IBinder>& sessionSpecificRoot) {
795 LOG_ALWAYS_FATAL_IF(mForServer != nullptr);
796 LOG_ALWAYS_FATAL_IF(server == nullptr);
797 LOG_ALWAYS_FATAL_IF(mEventListener != nullptr);
798 LOG_ALWAYS_FATAL_IF(eventListener == nullptr);
799 LOG_ALWAYS_FATAL_IF(mShutdownTrigger != nullptr);
800 LOG_ALWAYS_FATAL_IF(mCtx != nullptr);
801
802 mShutdownTrigger = FdTrigger::make();
803 if (mShutdownTrigger == nullptr) return false;
804
805 mId = sessionId;
806 mForServer = server;
807 mEventListener = eventListener;
808 mSessionSpecificRootObject = sessionSpecificRoot;
809 return true;
810 }
811
assignIncomingConnectionToThisThread(std::unique_ptr<RpcTransport> rpcTransport)812 sp<RpcSession::RpcConnection> RpcSession::assignIncomingConnectionToThisThread(
813 std::unique_ptr<RpcTransport> rpcTransport) {
814 RpcMutexLockGuard _l(mMutex);
815
816 if (mConnections.mIncoming.size() >= mMaxIncomingThreads) {
817 ALOGE("Cannot add thread to session with %zu threads (max is set to %zu)",
818 mConnections.mIncoming.size(), mMaxIncomingThreads);
819 return nullptr;
820 }
821
822 // Don't accept any more connections, some have shutdown. Usually this
823 // happens when new connections are still being established as part of a
824 // very short-lived session which shuts down after it already started
825 // accepting new connections.
826 if (mConnections.mIncoming.size() < mConnections.mMaxIncoming) {
827 return nullptr;
828 }
829
830 sp<RpcConnection> session = sp<RpcConnection>::make();
831 session->rpcTransport = std::move(rpcTransport);
832 session->exclusiveTid = rpcGetThreadId();
833
834 mConnections.mIncoming.push_back(session);
835 mConnections.mMaxIncoming = mConnections.mIncoming.size();
836
837 return session;
838 }
839
removeIncomingConnection(const sp<RpcConnection> & connection)840 bool RpcSession::removeIncomingConnection(const sp<RpcConnection>& connection) {
841 RpcMutexUniqueLock _l(mMutex);
842 if (auto it =
843 std::find(mConnections.mIncoming.begin(), mConnections.mIncoming.end(), connection);
844 it != mConnections.mIncoming.end()) {
845 mConnections.mIncoming.erase(it);
846 if (mConnections.mIncoming.size() == 0) {
847 sp<EventListener> listener = mEventListener.promote();
848 if (listener) {
849 _l.unlock();
850 listener->onSessionAllIncomingThreadsEnded(sp<RpcSession>::fromExisting(this));
851 }
852 }
853 return true;
854 }
855 return false;
856 }
857
clearConnectionTid(const sp<RpcConnection> & connection)858 void RpcSession::clearConnectionTid(const sp<RpcConnection>& connection) {
859 RpcMutexUniqueLock _l(mMutex);
860 connection->exclusiveTid = std::nullopt;
861 if (mConnections.mWaitingThreads > 0) {
862 _l.unlock();
863 mAvailableConnectionCv.notify_one();
864 }
865 }
866
getCertificate(RpcCertificateFormat format)867 std::vector<uint8_t> RpcSession::getCertificate(RpcCertificateFormat format) {
868 return mCtx->getCertificate(format);
869 }
870
find(const sp<RpcSession> & session,ConnectionUse use,ExclusiveConnection * connection)871 status_t RpcSession::ExclusiveConnection::find(const sp<RpcSession>& session, ConnectionUse use,
872 ExclusiveConnection* connection) {
873 connection->mSession = session;
874 connection->mConnection = nullptr;
875 connection->mReentrant = false;
876
877 uint64_t tid = rpcGetThreadId();
878 RpcMutexUniqueLock _l(session->mMutex);
879
880 session->mConnections.mWaitingThreads++;
881 while (true) {
882 sp<RpcConnection> exclusive;
883 sp<RpcConnection> available;
884
885 // CHECK FOR DEDICATED CLIENT SOCKET
886 //
887 // A server/looper should always use a dedicated connection if available
888 findConnection(tid, &exclusive, &available, session->mConnections.mOutgoing,
889 session->mConnections.mOutgoingOffset);
890
891 // WARNING: this assumes a server cannot request its client to send
892 // a transaction, as mIncoming is excluded below.
893 //
894 // Imagine we have more than one thread in play, and a single thread
895 // sends a synchronous, then an asynchronous command. Imagine the
896 // asynchronous command is sent on the first client connection. Then, if
897 // we naively send a synchronous command to that same connection, the
898 // thread on the far side might be busy processing the asynchronous
899 // command. So, we move to considering the second available thread
900 // for subsequent calls.
901 if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
902 session->mConnections.mOutgoingOffset = (session->mConnections.mOutgoingOffset + 1) %
903 session->mConnections.mOutgoing.size();
904 }
905
906 // USE SERVING SOCKET (e.g. nested transaction)
907 if (use != ConnectionUse::CLIENT_ASYNC) {
908 sp<RpcConnection> exclusiveIncoming;
909 // server connections are always assigned to a thread
910 findConnection(tid, &exclusiveIncoming, nullptr /*available*/,
911 session->mConnections.mIncoming, 0 /* index hint */);
912
913 // asynchronous calls cannot be nested, we currently allow ref count
914 // calls to be nested (so that you can use this without having extra
915 // threads). Note 'drainCommands' is used so that these ref counts can't
916 // build up.
917 if (exclusiveIncoming != nullptr) {
918 if (exclusiveIncoming->allowNested) {
919 // guaranteed to be processed as nested command
920 exclusive = exclusiveIncoming;
921 } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) {
922 // prefer available socket, but if we don't have one, don't
923 // wait for one
924 exclusive = exclusiveIncoming;
925 }
926 }
927 }
928
929 // if our thread is already using a connection, prioritize using that
930 if (exclusive != nullptr) {
931 connection->mConnection = exclusive;
932 connection->mReentrant = true;
933 break;
934 } else if (available != nullptr) {
935 connection->mConnection = available;
936 connection->mConnection->exclusiveTid = tid;
937 break;
938 }
939
940 if (session->mConnections.mOutgoing.size() == 0) {
941 ALOGE("Session has no outgoing connections. This is required for an RPC server to make "
942 "any non-nested (e.g. oneway or on another thread) calls. Use code request "
943 "reason: %d. Incoming connections: %zu. %s.",
944 static_cast<int>(use), session->mConnections.mIncoming.size(),
945 (session->server()
946 ? "This is a server session, so see RpcSession::setMaxIncomingThreads "
947 "for the corresponding client"
948 : "This is a client session, so see "
949 "RpcSession::setMaxOutgoingConnections "
950 "for this client or RpcServer::setMaxThreads for the corresponding "
951 "server"));
952 return WOULD_BLOCK;
953 }
954
955 LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
956 session->mConnections.mOutgoing.size(),
957 session->mConnections.mIncoming.size());
958 session->mAvailableConnectionCv.wait(_l);
959 }
960 session->mConnections.mWaitingThreads--;
961
962 return OK;
963 }
964
findConnection(uint64_t tid,sp<RpcConnection> * exclusive,sp<RpcConnection> * available,std::vector<sp<RpcConnection>> & sockets,size_t socketsIndexHint)965 void RpcSession::ExclusiveConnection::findConnection(uint64_t tid, sp<RpcConnection>* exclusive,
966 sp<RpcConnection>* available,
967 std::vector<sp<RpcConnection>>& sockets,
968 size_t socketsIndexHint) {
969 LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
970 "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
971
972 if (*exclusive != nullptr) return; // consistent with break below
973
974 for (size_t i = 0; i < sockets.size(); i++) {
975 sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
976
977 // take first available connection (intuition = caching)
978 if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
979 *available = socket;
980 continue;
981 }
982
983 // though, prefer to take connection which is already inuse by this thread
984 // (nested transactions)
985 if (exclusive && socket->exclusiveTid == tid) {
986 *exclusive = socket;
987 break; // consistent with return above
988 }
989 }
990 }
991
~ExclusiveConnection()992 RpcSession::ExclusiveConnection::~ExclusiveConnection() {
993 // reentrant use of a connection means something less deep in the call stack
994 // is using this fd, and it retains the right to it. So, we don't give up
995 // exclusive ownership, and no thread is freed.
996 if (!mReentrant && mConnection != nullptr) {
997 mSession->clearConnectionTid(mConnection);
998 }
999 }
1000
hasActiveConnection(const std::vector<sp<RpcConnection>> & connections)1001 bool RpcSession::hasActiveConnection(const std::vector<sp<RpcConnection>>& connections) {
1002 for (const auto& connection : connections) {
1003 if (connection->exclusiveTid != std::nullopt && !connection->rpcTransport->isWaiting()) {
1004 return true;
1005 }
1006 }
1007 return false;
1008 }
1009
hasActiveRequests()1010 bool RpcSession::hasActiveRequests() {
1011 RpcMutexUniqueLock _l(mMutex);
1012 if (hasActiveConnection(mConnections.mIncoming)) {
1013 return true;
1014 }
1015 if (hasActiveConnection(mConnections.mOutgoing)) {
1016 return true;
1017 }
1018 return mConnections.mWaitingThreads != 0;
1019 }
1020
1021 } // namespace android
1022