1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "RpcState"
18
19 #include "RpcState.h"
20
21 #include <binder/BpBinder.h>
22 #include <binder/Functional.h>
23 #include <binder/IPCThreadState.h>
24 #include <binder/RpcServer.h>
25
26 #include "Constants.h"
27 #include "Debug.h"
28 #include "RpcWireFormat.h"
29 #include "Utils.h"
30
31 #include <random>
32 #include <sstream>
33
34 #include <inttypes.h>
35
36 #ifdef __ANDROID__
37 #include <cutils/properties.h>
38 #endif
39
40 namespace android {
41
42 using namespace android::binder::impl;
43 using android::binder::borrowed_fd;
44 using android::binder::unique_fd;
45
46 #if RPC_FLAKE_PRONE
rpcMaybeWaitToFlake()47 void rpcMaybeWaitToFlake() {
48 [[clang::no_destroy]] static std::random_device r;
49 [[clang::no_destroy]] static RpcMutex m;
50 unsigned num;
51 {
52 RpcMutexLockGuard lock(m);
53 num = r();
54 }
55 if (num % 10 == 0) usleep(num % 1000);
56 }
57 #endif
58
enableAncillaryFds(RpcSession::FileDescriptorTransportMode mode)59 static bool enableAncillaryFds(RpcSession::FileDescriptorTransportMode mode) {
60 switch (mode) {
61 case RpcSession::FileDescriptorTransportMode::NONE:
62 return false;
63 case RpcSession::FileDescriptorTransportMode::UNIX:
64 case RpcSession::FileDescriptorTransportMode::TRUSTY:
65 return true;
66 }
67 LOG_ALWAYS_FATAL("Invalid FileDescriptorTransportMode: %d", static_cast<int>(mode));
68 }
69
RpcState()70 RpcState::RpcState() {}
~RpcState()71 RpcState::~RpcState() {}
72
onBinderLeaving(const sp<RpcSession> & session,const sp<IBinder> & binder,uint64_t * outAddress)73 status_t RpcState::onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
74 uint64_t* outAddress) {
75 bool isRemote = binder->remoteBinder();
76 bool isRpc = isRemote && binder->remoteBinder()->isRpcBinder();
77
78 if (isRpc && binder->remoteBinder()->getPrivateAccessor().rpcSession() != session) {
79 // We need to be able to send instructions over the socket for how to
80 // connect to a different server, and we also need to let the host
81 // process know that this is happening.
82 ALOGE("Cannot send binder from unrelated binder RPC session.");
83 return INVALID_OPERATION;
84 }
85
86 if (isRemote && !isRpc) {
87 // Without additional work, this would have the effect of using this
88 // process to proxy calls from the socket over to the other process, and
89 // it would make those calls look like they come from us (not over the
90 // sockets). In order to make this work transparently like binder, we
91 // would instead need to send instructions over the socket for how to
92 // connect to the host process, and we also need to let the host process
93 // know this was happening.
94 ALOGE("Cannot send binder proxy %p over sockets", binder.get());
95 return INVALID_OPERATION;
96 }
97
98 RpcMutexLockGuard _l(mNodeMutex);
99 if (mTerminated) return DEAD_OBJECT;
100
101 // TODO(b/182939933): maybe move address out of BpBinder, and keep binder->address map
102 // in RpcState
103 for (auto& [addr, node] : mNodeForAddress) {
104 if (binder == node.binder) {
105 if (isRpc) {
106 // check integrity of data structure
107 uint64_t actualAddr = binder->remoteBinder()->getPrivateAccessor().rpcAddress();
108 LOG_ALWAYS_FATAL_IF(addr != actualAddr, "Address mismatch %" PRIu64 " vs %" PRIu64,
109 addr, actualAddr);
110 }
111 node.timesSent++;
112 node.sentRef = binder; // might already be set
113 *outAddress = addr;
114 return OK;
115 }
116 }
117 LOG_ALWAYS_FATAL_IF(isRpc, "RPC binder must have known address at this point");
118
119 bool forServer = session->server() != nullptr;
120
121 // arbitrary limit for maximum number of nodes in a process (otherwise we
122 // might run out of addresses)
123 if (mNodeForAddress.size() > 100000) {
124 return NO_MEMORY;
125 }
126
127 while (true) {
128 RpcWireAddress address{
129 .options = RPC_WIRE_ADDRESS_OPTION_CREATED,
130 .address = mNextId,
131 };
132 if (forServer) {
133 address.options |= RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
134 }
135
136 // avoid ubsan abort
137 if (mNextId >= std::numeric_limits<uint32_t>::max()) {
138 mNextId = 0;
139 } else {
140 mNextId++;
141 }
142
143 auto&& [it, inserted] = mNodeForAddress.insert({RpcWireAddress::toRaw(address),
144 BinderNode{
145 .binder = binder,
146 .sentRef = binder,
147 .timesSent = 1,
148 }});
149 if (inserted) {
150 *outAddress = it->first;
151 return OK;
152 }
153 }
154 }
155
onBinderEntering(const sp<RpcSession> & session,uint64_t address,sp<IBinder> * out)156 status_t RpcState::onBinderEntering(const sp<RpcSession>& session, uint64_t address,
157 sp<IBinder>* out) {
158 // ensure that: if we want to use addresses for something else in the future (for
159 // instance, allowing transitive binder sends), that we don't accidentally
160 // send those addresses to old server. Accidentally ignoring this in that
161 // case and considering the binder to be recognized could cause this
162 // process to accidentally proxy transactions for that binder. Of course,
163 // if we communicate with a binder, it could always be proxying
164 // information. However, we want to make sure that isn't done on accident
165 // by a client.
166 RpcWireAddress addr = RpcWireAddress::fromRaw(address);
167 constexpr uint32_t kKnownOptions =
168 RPC_WIRE_ADDRESS_OPTION_CREATED | RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
169 if (addr.options & ~kKnownOptions) {
170 ALOGE("Address is of an unknown type, rejecting: %" PRIu64, address);
171 return BAD_VALUE;
172 }
173
174 RpcMutexLockGuard _l(mNodeMutex);
175 if (mTerminated) return DEAD_OBJECT;
176
177 if (auto it = mNodeForAddress.find(address); it != mNodeForAddress.end()) {
178 *out = it->second.binder.promote();
179
180 // implicitly have strong RPC refcount, since we received this binder
181 it->second.timesRecd++;
182 return OK;
183 }
184
185 // we don't know about this binder, so the other side of the connection
186 // should have created it.
187 if ((addr.options & RPC_WIRE_ADDRESS_OPTION_FOR_SERVER) == !!session->server()) {
188 ALOGE("Server received unrecognized address which we should own the creation of %" PRIu64,
189 address);
190 return BAD_VALUE;
191 }
192
193 auto&& [it, inserted] = mNodeForAddress.insert({address, BinderNode{}});
194 LOG_ALWAYS_FATAL_IF(!inserted, "Failed to insert binder when creating proxy");
195
196 // Currently, all binders are assumed to be part of the same session (no
197 // device global binders in the RPC world).
198 it->second.binder = *out = BpBinder::PrivateAccessor::create(session, it->first);
199 it->second.timesRecd = 1;
200 return OK;
201 }
202
flushExcessBinderRefs(const sp<RpcSession> & session,uint64_t address,const sp<IBinder> & binder)203 status_t RpcState::flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
204 const sp<IBinder>& binder) {
205 // We can flush all references when the binder is destroyed. No need to send
206 // extra reference counting packets now.
207 if (binder->remoteBinder()) return OK;
208
209 RpcMutexUniqueLock _l(mNodeMutex);
210 if (mTerminated) return DEAD_OBJECT;
211
212 auto it = mNodeForAddress.find(address);
213
214 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Can't be deleted while we hold sp<>");
215 LOG_ALWAYS_FATAL_IF(it->second.binder != binder,
216 "Caller of flushExcessBinderRefs using inconsistent arguments");
217
218 LOG_ALWAYS_FATAL_IF(it->second.timesSent <= 0, "Local binder must have been sent %p",
219 binder.get());
220
221 // For a local binder, we only need to know that we sent it. Now that we
222 // have an sp<> for this call, we don't need anything more. If the other
223 // process is done with this binder, it needs to know we received the
224 // refcount associated with this call, so we can acknowledge that we
225 // received it. Once (or if) it has no other refcounts, it would reply with
226 // its own decStrong so that it could be removed from this session.
227 if (it->second.timesRecd != 0) {
228 _l.unlock();
229
230 return session->sendDecStrongToTarget(address, 0);
231 }
232
233 return OK;
234 }
235
sendObituaries(const sp<RpcSession> & session)236 status_t RpcState::sendObituaries(const sp<RpcSession>& session) {
237 RpcMutexUniqueLock _l(mNodeMutex);
238
239 // Gather strong pointers to all of the remote binders for this session so
240 // we hold the strong references. remoteBinder() returns a raw pointer.
241 // Send the obituaries and drop the strong pointers outside of the lock so
242 // the destructors and the onBinderDied calls are not done while locked.
243 std::vector<sp<IBinder>> remoteBinders;
244 for (const auto& [_, binderNode] : mNodeForAddress) {
245 if (auto binder = binderNode.binder.promote()) {
246 remoteBinders.push_back(std::move(binder));
247 }
248 }
249 _l.unlock();
250
251 for (const auto& binder : remoteBinders) {
252 if (binder->remoteBinder() &&
253 binder->remoteBinder()->getPrivateAccessor().rpcSession() == session) {
254 binder->remoteBinder()->sendObituary();
255 }
256 }
257 return OK;
258 }
259
countBinders()260 size_t RpcState::countBinders() {
261 RpcMutexLockGuard _l(mNodeMutex);
262 return mNodeForAddress.size();
263 }
264
dump()265 void RpcState::dump() {
266 RpcMutexLockGuard _l(mNodeMutex);
267 dumpLocked();
268 }
269
clear()270 void RpcState::clear() {
271 return clear(RpcMutexUniqueLock(mNodeMutex));
272 }
273
clear(RpcMutexUniqueLock nodeLock)274 void RpcState::clear(RpcMutexUniqueLock nodeLock) {
275 if (mTerminated) {
276 LOG_ALWAYS_FATAL_IF(!mNodeForAddress.empty(),
277 "New state should be impossible after terminating!");
278 return;
279 }
280 mTerminated = true;
281
282 if (SHOULD_LOG_RPC_DETAIL) {
283 ALOGE("RpcState::clear()");
284 dumpLocked();
285 }
286
287 // invariants
288 for (auto& [address, node] : mNodeForAddress) {
289 bool guaranteedHaveBinder = node.timesSent > 0;
290 if (guaranteedHaveBinder) {
291 LOG_ALWAYS_FATAL_IF(node.sentRef == nullptr,
292 "Binder expected to be owned with address: %" PRIu64 " %s", address,
293 node.toString().c_str());
294 }
295 }
296
297 // if the destructor of a binder object makes another RPC call, then calling
298 // decStrong could deadlock. So, we must hold onto these binders until
299 // mNodeMutex is no longer taken.
300 auto temp = std::move(mNodeForAddress);
301 mNodeForAddress.clear(); // RpcState isn't reusable, but for future/explicit
302
303 nodeLock.unlock();
304 temp.clear(); // explicit
305 }
306
dumpLocked()307 void RpcState::dumpLocked() {
308 ALOGE("DUMP OF RpcState %p", this);
309 ALOGE("DUMP OF RpcState (%zu nodes)", mNodeForAddress.size());
310 for (const auto& [address, node] : mNodeForAddress) {
311 ALOGE("- address: %" PRIu64 " %s", address, node.toString().c_str());
312 }
313 ALOGE("END DUMP OF RpcState");
314 }
315
toString() const316 std::string RpcState::BinderNode::toString() const {
317 sp<IBinder> strongBinder = this->binder.promote();
318
319 const char* desc;
320 if (strongBinder) {
321 if (strongBinder->remoteBinder()) {
322 if (strongBinder->remoteBinder()->isRpcBinder()) {
323 desc = "(rpc binder proxy)";
324 } else {
325 desc = "(binder proxy)";
326 }
327 } else {
328 desc = "(local binder)";
329 }
330 } else {
331 desc = "(not promotable)";
332 }
333
334 std::stringstream ss;
335 ss << "node{" << intptr_t(this->binder.unsafe_get()) << " times sent: " << this->timesSent
336 << " times recd: " << this->timesRecd << " type: " << desc << "}";
337 return ss.str();
338 }
339
CommandData(size_t size)340 RpcState::CommandData::CommandData(size_t size) : mSize(size) {
341 if (size == 0) return;
342
343 // The maximum size for regular binder is 1MB for all concurrent
344 // transactions. A very small proportion of transactions are even
345 // larger than a page, but we need to avoid allocating too much
346 // data on behalf of an arbitrary client, or we could risk being in
347 // a position where a single additional allocation could run out of
348 // memory.
349 //
350 // Note, this limit may not reflect the total amount of data allocated for a
351 // transaction (in some cases, additional fixed size amounts are added),
352 // though for rough consistency, we should avoid cases where this data type
353 // is used for multiple dynamic allocations for a single transaction.
354 if (size > binder::kRpcTransactionLimitBytes) {
355 ALOGE("Transaction requested too much data allocation: %zu bytes, failing.", size);
356 return;
357 } else if (size > binder::kLogTransactionsOverBytes) {
358 ALOGW("Transaction too large: inefficient and in danger of breaking: %zu bytes.", size);
359 }
360 mData.reset(new (std::nothrow) uint8_t[size]);
361 }
362
rpcSend(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs,const std::optional<SmallFunction<status_t ()>> & altPoll,const std::vector<std::variant<unique_fd,borrowed_fd>> * ancillaryFds)363 status_t RpcState::rpcSend(const sp<RpcSession::RpcConnection>& connection,
364 const sp<RpcSession>& session, const char* what, iovec* iovs, int niovs,
365 const std::optional<SmallFunction<status_t()>>& altPoll,
366 const std::vector<std::variant<unique_fd, borrowed_fd>>* ancillaryFds) {
367 for (int i = 0; i < niovs; i++) {
368 LOG_RPC_DETAIL("Sending %s (part %d of %d) on RpcTransport %p: %s",
369 what, i + 1, niovs, connection->rpcTransport.get(),
370 HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
371 }
372
373 if (status_t status =
374 connection->rpcTransport->interruptableWriteFully(session->mShutdownTrigger.get(),
375 iovs, niovs, altPoll,
376 ancillaryFds);
377 status != OK) {
378 LOG_RPC_DETAIL("Failed to write %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
379 connection->rpcTransport.get(), statusToString(status).c_str());
380 (void)session->shutdownAndWait(false);
381 return status;
382 }
383
384 return OK;
385 }
386
rpcRec(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs,std::vector<std::variant<unique_fd,borrowed_fd>> * ancillaryFds)387 status_t RpcState::rpcRec(const sp<RpcSession::RpcConnection>& connection,
388 const sp<RpcSession>& session, const char* what, iovec* iovs, int niovs,
389 std::vector<std::variant<unique_fd, borrowed_fd>>* ancillaryFds) {
390 if (status_t status =
391 connection->rpcTransport->interruptableReadFully(session->mShutdownTrigger.get(),
392 iovs, niovs, std::nullopt,
393 ancillaryFds);
394 status != OK) {
395 LOG_RPC_DETAIL("Failed to read %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
396 connection->rpcTransport.get(), statusToString(status).c_str());
397 (void)session->shutdownAndWait(false);
398 return status;
399 }
400
401 for (int i = 0; i < niovs; i++) {
402 LOG_RPC_DETAIL("Received %s (part %d of %d) on RpcTransport %p: %s",
403 what, i + 1, niovs, connection->rpcTransport.get(),
404 HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
405 }
406 return OK;
407 }
408
validateProtocolVersion(uint32_t version)409 bool RpcState::validateProtocolVersion(uint32_t version) {
410 if (version == RPC_WIRE_PROTOCOL_VERSION_EXPERIMENTAL) {
411 #if defined(__ANDROID__)
412 char codename[PROPERTY_VALUE_MAX];
413 property_get("ro.build.version.codename", codename, "");
414 if (!strcmp(codename, "REL")) {
415 ALOGE("Cannot use experimental RPC binder protocol in a release configuration.");
416 return false;
417 }
418 #else
419 ALOGE("Cannot use experimental RPC binder protocol outside of Android.");
420 return false;
421 #endif
422 } else if (version >= RPC_WIRE_PROTOCOL_VERSION_NEXT) {
423 ALOGE("Cannot use RPC binder protocol version %u which is unknown (current protocol "
424 "version "
425 "is %u).",
426 version, RPC_WIRE_PROTOCOL_VERSION);
427 return false;
428 }
429
430 return true;
431 }
432
readNewSessionResponse(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint32_t * version)433 status_t RpcState::readNewSessionResponse(const sp<RpcSession::RpcConnection>& connection,
434 const sp<RpcSession>& session, uint32_t* version) {
435 RpcNewSessionResponse response;
436 iovec iov{&response, sizeof(response)};
437 if (status_t status = rpcRec(connection, session, "new session response", &iov, 1, nullptr);
438 status != OK) {
439 return status;
440 }
441 *version = response.version;
442 return OK;
443 }
444
sendConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)445 status_t RpcState::sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
446 const sp<RpcSession>& session) {
447 RpcOutgoingConnectionInit init{
448 .msg = RPC_CONNECTION_INIT_OKAY,
449 };
450 iovec iov{&init, sizeof(init)};
451 return rpcSend(connection, session, "connection init", &iov, 1, std::nullopt);
452 }
453
readConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)454 status_t RpcState::readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
455 const sp<RpcSession>& session) {
456 RpcOutgoingConnectionInit init;
457 iovec iov{&init, sizeof(init)};
458 if (status_t status = rpcRec(connection, session, "connection init", &iov, 1, nullptr);
459 status != OK)
460 return status;
461
462 static_assert(sizeof(init.msg) == sizeof(RPC_CONNECTION_INIT_OKAY));
463 if (0 != strncmp(init.msg, RPC_CONNECTION_INIT_OKAY, sizeof(init.msg))) {
464 ALOGE("Connection init message unrecognized %.*s", static_cast<int>(sizeof(init.msg)),
465 init.msg);
466 return BAD_VALUE;
467 }
468 return OK;
469 }
470
getRootObject(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)471 sp<IBinder> RpcState::getRootObject(const sp<RpcSession::RpcConnection>& connection,
472 const sp<RpcSession>& session) {
473 Parcel data;
474 data.markForRpc(session);
475 Parcel reply;
476
477 status_t status =
478 transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_ROOT, data, session, &reply, 0);
479 if (status != OK) {
480 ALOGE("Error getting root object: %s", statusToString(status).c_str());
481 return nullptr;
482 }
483
484 return reply.readStrongBinder();
485 }
486
getMaxThreads(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,size_t * maxThreadsOut)487 status_t RpcState::getMaxThreads(const sp<RpcSession::RpcConnection>& connection,
488 const sp<RpcSession>& session, size_t* maxThreadsOut) {
489 Parcel data;
490 data.markForRpc(session);
491 Parcel reply;
492
493 status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
494 session, &reply, 0);
495 if (status != OK) {
496 ALOGE("Error getting max threads: %s", statusToString(status).c_str());
497 return status;
498 }
499
500 int32_t maxThreads;
501 status = reply.readInt32(&maxThreads);
502 if (status != OK) return status;
503 if (maxThreads <= 0) {
504 ALOGE("Error invalid max maxThreads: %d", maxThreads);
505 return BAD_VALUE;
506 }
507
508 *maxThreadsOut = maxThreads;
509 return OK;
510 }
511
getSessionId(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,std::vector<uint8_t> * sessionIdOut)512 status_t RpcState::getSessionId(const sp<RpcSession::RpcConnection>& connection,
513 const sp<RpcSession>& session, std::vector<uint8_t>* sessionIdOut) {
514 Parcel data;
515 data.markForRpc(session);
516 Parcel reply;
517
518 status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_SESSION_ID, data,
519 session, &reply, 0);
520 if (status != OK) {
521 ALOGE("Error getting session ID: %s", statusToString(status).c_str());
522 return status;
523 }
524
525 return reply.readByteVector(sessionIdOut);
526 }
527
transact(const sp<RpcSession::RpcConnection> & connection,const sp<IBinder> & binder,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)528 status_t RpcState::transact(const sp<RpcSession::RpcConnection>& connection,
529 const sp<IBinder>& binder, uint32_t code, const Parcel& data,
530 const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
531 std::string errorMsg;
532 if (status_t status = validateParcel(session, data, &errorMsg); status != OK) {
533 ALOGE("Refusing to send RPC on binder %p code %" PRIu32 ": Parcel %p failed validation: %s",
534 binder.get(), code, &data, errorMsg.c_str());
535 return status;
536 }
537 uint64_t address;
538 if (status_t status = onBinderLeaving(session, binder, &address); status != OK) return status;
539
540 return transactAddress(connection, address, code, data, session, reply, flags);
541 }
542
transactAddress(const sp<RpcSession::RpcConnection> & connection,uint64_t address,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)543 status_t RpcState::transactAddress(const sp<RpcSession::RpcConnection>& connection,
544 uint64_t address, uint32_t code, const Parcel& data,
545 const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
546 LOG_ALWAYS_FATAL_IF(!data.isForRpc());
547 LOG_ALWAYS_FATAL_IF(data.objectsCount() != 0);
548
549 uint64_t asyncNumber = 0;
550
551 if (address != 0) {
552 RpcMutexUniqueLock _l(mNodeMutex);
553 if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
554 auto it = mNodeForAddress.find(address);
555 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
556 "Sending transact on unknown address %" PRIu64, address);
557
558 if (flags & IBinder::FLAG_ONEWAY) {
559 asyncNumber = it->second.asyncNumber;
560 if (!nodeProgressAsyncNumber(&it->second)) {
561 _l.unlock();
562 (void)session->shutdownAndWait(false);
563 return DEAD_OBJECT;
564 }
565 }
566 }
567
568 auto* rpcFields = data.maybeRpcFields();
569 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
570
571 Span<const uint32_t> objectTableSpan = Span<const uint32_t>{rpcFields->mObjectPositions.data(),
572 rpcFields->mObjectPositions.size()};
573
574 uint32_t bodySize;
575 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(sizeof(RpcWireTransaction), data.dataSize(),
576 &bodySize) ||
577 __builtin_add_overflow(objectTableSpan.byteSize(), bodySize,
578 &bodySize),
579 "Too much data %zu", data.dataSize());
580 RpcWireHeader command{
581 .command = RPC_COMMAND_TRANSACT,
582 .bodySize = bodySize,
583 };
584
585 RpcWireTransaction transaction{
586 .address = RpcWireAddress::fromRaw(address),
587 .code = code,
588 .flags = flags,
589 .asyncNumber = asyncNumber,
590 // bodySize didn't overflow => this cast is safe
591 .parcelDataSize = static_cast<uint32_t>(data.dataSize()),
592 };
593
594 // Oneway calls have no sync point, so if many are sent before, whether this
595 // is a twoway or oneway transaction, they may have filled up the socket.
596 // So, make sure we drain them before polling
597 constexpr size_t kWaitMaxUs = 1000000;
598 constexpr size_t kWaitLogUs = 10000;
599 size_t waitUs = 0;
600
601 iovec iovs[]{
602 {&command, sizeof(RpcWireHeader)},
603 {&transaction, sizeof(RpcWireTransaction)},
604 {const_cast<uint8_t*>(data.data()), data.dataSize()},
605 objectTableSpan.toIovec(),
606 };
607 auto altPoll = [&] {
608 if (waitUs > kWaitLogUs) {
609 ALOGE("Cannot send command, trying to process pending refcounts. Waiting "
610 "%zuus. Too many oneway calls?",
611 waitUs);
612 }
613
614 if (waitUs > 0) {
615 usleep(waitUs);
616 waitUs = std::min(kWaitMaxUs, waitUs * 2);
617 } else {
618 waitUs = 1;
619 }
620
621 return drainCommands(connection, session, CommandType::CONTROL_ONLY);
622 };
623 if (status_t status = rpcSend(connection, session, "transaction", iovs, countof(iovs),
624 std::ref(altPoll), rpcFields->mFds.get());
625 status != OK) {
626 // rpcSend calls shutdownAndWait, so all refcounts should be reset. If we ever tolerate
627 // errors here, then we may need to undo the binder-sent counts for the transaction as
628 // well as for the binder objects in the Parcel
629 return status;
630 }
631
632 if (flags & IBinder::FLAG_ONEWAY) {
633 LOG_RPC_DETAIL("Oneway command, so no longer waiting on RpcTransport %p",
634 connection->rpcTransport.get());
635
636 // Do not wait on result.
637 return OK;
638 }
639
640 LOG_ALWAYS_FATAL_IF(reply == nullptr, "Reply parcel must be used for synchronous transaction.");
641
642 return waitForReply(connection, session, reply);
643 }
644
cleanup_reply_data(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)645 static void cleanup_reply_data(const uint8_t* data, size_t dataSize, const binder_size_t* objects,
646 size_t objectsCount) {
647 delete[] const_cast<uint8_t*>(data);
648 (void)dataSize;
649 LOG_ALWAYS_FATAL_IF(objects != nullptr);
650 (void)objectsCount;
651 }
652
waitForReply(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,Parcel * reply)653 status_t RpcState::waitForReply(const sp<RpcSession::RpcConnection>& connection,
654 const sp<RpcSession>& session, Parcel* reply) {
655 std::vector<std::variant<unique_fd, borrowed_fd>> ancillaryFds;
656 RpcWireHeader command;
657 while (true) {
658 iovec iov{&command, sizeof(command)};
659 if (status_t status = rpcRec(connection, session, "command header (for reply)", &iov, 1,
660 enableAncillaryFds(session->getFileDescriptorTransportMode())
661 ? &ancillaryFds
662 : nullptr);
663 status != OK)
664 return status;
665
666 if (command.command == RPC_COMMAND_REPLY) break;
667
668 if (status_t status = processCommand(connection, session, command, CommandType::ANY,
669 std::move(ancillaryFds));
670 status != OK)
671 return status;
672
673 // Reset to avoid spurious use-after-move warning from clang-tidy.
674 ancillaryFds = decltype(ancillaryFds)();
675 }
676
677 const size_t rpcReplyWireSize = RpcWireReply::wireSize(session->getProtocolVersion().value());
678
679 if (command.bodySize < rpcReplyWireSize) {
680 ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireReply. Terminating!",
681 sizeof(RpcWireReply), command.bodySize);
682 (void)session->shutdownAndWait(false);
683 return BAD_VALUE;
684 }
685
686 RpcWireReply rpcReply;
687 memset(&rpcReply, 0, sizeof(RpcWireReply)); // zero because of potential short read
688
689 CommandData data(command.bodySize - rpcReplyWireSize);
690 if (!data.valid()) return NO_MEMORY;
691
692 iovec iovs[]{
693 {&rpcReply, rpcReplyWireSize},
694 {data.data(), data.size()},
695 };
696 if (status_t status = rpcRec(connection, session, "reply body", iovs, countof(iovs), nullptr);
697 status != OK)
698 return status;
699
700 if (rpcReply.status != OK) return rpcReply.status;
701
702 Span<const uint8_t> parcelSpan = {data.data(), data.size()};
703 Span<const uint32_t> objectTableSpan;
704 if (session->getProtocolVersion().value() >=
705 RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE) {
706 std::optional<Span<const uint8_t>> objectTableBytes =
707 parcelSpan.splitOff(rpcReply.parcelDataSize);
708 if (!objectTableBytes.has_value()) {
709 ALOGE("Parcel size larger than available bytes: %" PRId32 " vs %zu. Terminating!",
710 rpcReply.parcelDataSize, parcelSpan.byteSize());
711 (void)session->shutdownAndWait(false);
712 return BAD_VALUE;
713 }
714 std::optional<Span<const uint32_t>> maybeSpan =
715 objectTableBytes->reinterpret<const uint32_t>();
716 if (!maybeSpan.has_value()) {
717 ALOGE("Bad object table size inferred from RpcWireReply. Saw bodySize=%" PRId32
718 " sizeofHeader=%zu parcelSize=%" PRId32 " objectTableBytesSize=%zu. Terminating!",
719 command.bodySize, rpcReplyWireSize, rpcReply.parcelDataSize,
720 objectTableBytes->size);
721 return BAD_VALUE;
722 }
723 objectTableSpan = *maybeSpan;
724 }
725
726 data.release();
727 return reply->rpcSetDataReference(session, parcelSpan.data, parcelSpan.size,
728 objectTableSpan.data, objectTableSpan.size,
729 std::move(ancillaryFds), cleanup_reply_data);
730 }
731
sendDecStrongToTarget(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint64_t addr,size_t target)732 status_t RpcState::sendDecStrongToTarget(const sp<RpcSession::RpcConnection>& connection,
733 const sp<RpcSession>& session, uint64_t addr,
734 size_t target) {
735 RpcDecStrong body = {
736 .address = RpcWireAddress::fromRaw(addr),
737 };
738
739 {
740 RpcMutexUniqueLock _l(mNodeMutex);
741 if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
742 auto it = mNodeForAddress.find(addr);
743 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
744 "Sending dec strong on unknown address %" PRIu64, addr);
745
746 LOG_ALWAYS_FATAL_IF(it->second.timesRecd < target, "Can't dec count of %zu to %zu.",
747 it->second.timesRecd, target);
748
749 // typically this happens when multiple threads send dec refs at the
750 // same time - the transactions will get combined automatically
751 if (it->second.timesRecd == target) return OK;
752
753 body.amount = it->second.timesRecd - target;
754 it->second.timesRecd = target;
755
756 LOG_ALWAYS_FATAL_IF(nullptr != tryEraseNode(session, std::move(_l), it),
757 "Bad state. RpcState shouldn't own received binder");
758 // LOCK ALREADY RELEASED
759 }
760
761 RpcWireHeader cmd = {
762 .command = RPC_COMMAND_DEC_STRONG,
763 .bodySize = sizeof(RpcDecStrong),
764 };
765 iovec iovs[]{{&cmd, sizeof(cmd)}, {&body, sizeof(body)}};
766 return rpcSend(connection, session, "dec ref", iovs, countof(iovs), std::nullopt);
767 }
768
getAndExecuteCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)769 status_t RpcState::getAndExecuteCommand(const sp<RpcSession::RpcConnection>& connection,
770 const sp<RpcSession>& session, CommandType type) {
771 LOG_RPC_DETAIL("getAndExecuteCommand on RpcTransport %p", connection->rpcTransport.get());
772
773 std::vector<std::variant<unique_fd, borrowed_fd>> ancillaryFds;
774 RpcWireHeader command;
775 iovec iov{&command, sizeof(command)};
776 if (status_t status =
777 rpcRec(connection, session, "command header (for server)", &iov, 1,
778 enableAncillaryFds(session->getFileDescriptorTransportMode()) ? &ancillaryFds
779 : nullptr);
780 status != OK)
781 return status;
782
783 return processCommand(connection, session, command, type, std::move(ancillaryFds));
784 }
785
drainCommands(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)786 status_t RpcState::drainCommands(const sp<RpcSession::RpcConnection>& connection,
787 const sp<RpcSession>& session, CommandType type) {
788 while (true) {
789 status_t status = connection->rpcTransport->pollRead();
790 if (status == WOULD_BLOCK) break;
791 if (status != OK) return status;
792
793 status = getAndExecuteCommand(connection, session, type);
794 if (status != OK) return status;
795 }
796 return OK;
797 }
798
processCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command,CommandType type,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds)799 status_t RpcState::processCommand(
800 const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
801 const RpcWireHeader& command, CommandType type,
802 std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds) {
803 #ifdef BINDER_WITH_KERNEL_IPC
804 IPCThreadState* kernelBinderState = IPCThreadState::selfOrNull();
805 IPCThreadState::SpGuard spGuard{
806 .address = __builtin_frame_address(0),
807 .context = "processing binder RPC command (where RpcServer::setPerSessionRootObject is "
808 "used to distinguish callers)",
809 };
810 const IPCThreadState::SpGuard* origGuard;
811 if (kernelBinderState != nullptr) {
812 origGuard = kernelBinderState->pushGetCallingSpGuard(&spGuard);
813 }
814
815 auto guardUnguard = make_scope_guard([&]() {
816 if (kernelBinderState != nullptr) {
817 kernelBinderState->restoreGetCallingSpGuard(origGuard);
818 }
819 });
820 #endif // BINDER_WITH_KERNEL_IPC
821
822 switch (command.command) {
823 case RPC_COMMAND_TRANSACT:
824 if (type != CommandType::ANY) return BAD_TYPE;
825 return processTransact(connection, session, command, std::move(ancillaryFds));
826 case RPC_COMMAND_DEC_STRONG:
827 return processDecStrong(connection, session, command);
828 }
829
830 // We should always know the version of the opposing side, and since the
831 // RPC-binder-level wire protocol is not self synchronizing, we have no way
832 // to understand where the current command ends and the next one begins. We
833 // also can't consider it a fatal error because this would allow any client
834 // to kill us, so ending the session for misbehaving client.
835 ALOGE("Unknown RPC command %d - terminating session", command.command);
836 (void)session->shutdownAndWait(false);
837 return DEAD_OBJECT;
838 }
processTransact(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds)839 status_t RpcState::processTransact(
840 const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
841 const RpcWireHeader& command,
842 std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds) {
843 LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_TRANSACT, "command: %d", command.command);
844
845 CommandData transactionData(command.bodySize);
846 if (!transactionData.valid()) {
847 return NO_MEMORY;
848 }
849 iovec iov{transactionData.data(), transactionData.size()};
850 if (status_t status = rpcRec(connection, session, "transaction body", &iov, 1, nullptr);
851 status != OK)
852 return status;
853
854 return processTransactInternal(connection, session, std::move(transactionData),
855 std::move(ancillaryFds));
856 }
857
do_nothing_to_transact_data(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)858 static void do_nothing_to_transact_data(const uint8_t* data, size_t dataSize,
859 const binder_size_t* objects, size_t objectsCount) {
860 (void)data;
861 (void)dataSize;
862 (void)objects;
863 (void)objectsCount;
864 }
865
processTransactInternal(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandData transactionData,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds)866 status_t RpcState::processTransactInternal(
867 const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
868 CommandData transactionData,
869 std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds) {
870 // for 'recursive' calls to this, we have already read and processed the
871 // binder from the transaction data and taken reference counts into account,
872 // so it is cached here.
873 sp<IBinder> target;
874 processTransactInternalTailCall:
875
876 if (transactionData.size() < sizeof(RpcWireTransaction)) {
877 ALOGE("Expecting %zu but got %zu bytes for RpcWireTransaction. Terminating!",
878 sizeof(RpcWireTransaction), transactionData.size());
879 (void)session->shutdownAndWait(false);
880 return BAD_VALUE;
881 }
882 RpcWireTransaction* transaction = reinterpret_cast<RpcWireTransaction*>(transactionData.data());
883
884 uint64_t addr = RpcWireAddress::toRaw(transaction->address);
885 bool oneway = transaction->flags & IBinder::FLAG_ONEWAY;
886
887 status_t replyStatus = OK;
888 if (addr != 0) {
889 if (!target) {
890 replyStatus = onBinderEntering(session, addr, &target);
891 }
892
893 if (replyStatus != OK) {
894 // do nothing
895 } else if (target == nullptr) {
896 // This can happen if the binder is remote in this process, and
897 // another thread has called the last decStrong on this binder.
898 // However, for local binders, it indicates a misbehaving client
899 // (any binder which is being transacted on should be holding a
900 // strong ref count), so in either case, terminating the
901 // session.
902 ALOGE("While transacting, binder has been deleted at address %" PRIu64 ". Terminating!",
903 addr);
904 (void)session->shutdownAndWait(false);
905 replyStatus = BAD_VALUE;
906 } else if (target->localBinder() == nullptr) {
907 ALOGE("Unknown binder address or non-local binder, not address %" PRIu64
908 ". Terminating!",
909 addr);
910 (void)session->shutdownAndWait(false);
911 replyStatus = BAD_VALUE;
912 } else if (oneway) {
913 RpcMutexUniqueLock _l(mNodeMutex);
914 auto it = mNodeForAddress.find(addr);
915 if (it->second.binder.promote() != target) {
916 ALOGE("Binder became invalid during transaction. Bad client? %" PRIu64, addr);
917 replyStatus = BAD_VALUE;
918 } else if (transaction->asyncNumber != it->second.asyncNumber) {
919 // we need to process some other asynchronous transaction
920 // first
921 it->second.asyncTodo.push(BinderNode::AsyncTodo{
922 .ref = target,
923 .data = std::move(transactionData),
924 .ancillaryFds = std::move(ancillaryFds),
925 .asyncNumber = transaction->asyncNumber,
926 });
927
928 size_t numPending = it->second.asyncTodo.size();
929 LOG_RPC_DETAIL("Enqueuing %" PRIu64 " on %" PRIu64 " (%zu pending)",
930 transaction->asyncNumber, addr, numPending);
931
932 constexpr size_t kArbitraryOnewayCallTerminateLevel = 10000;
933 constexpr size_t kArbitraryOnewayCallWarnLevel = 1000;
934 constexpr size_t kArbitraryOnewayCallWarnPer = 1000;
935
936 if (numPending >= kArbitraryOnewayCallWarnLevel) {
937 if (numPending >= kArbitraryOnewayCallTerminateLevel) {
938 ALOGE("WARNING: %zu pending oneway transactions. Terminating!", numPending);
939 _l.unlock();
940 (void)session->shutdownAndWait(false);
941 return FAILED_TRANSACTION;
942 }
943
944 if (numPending % kArbitraryOnewayCallWarnPer == 0) {
945 ALOGW("Warning: many oneway transactions built up on %p (%zu)",
946 target.get(), numPending);
947 }
948 }
949 return OK;
950 }
951 }
952 }
953
954 Parcel reply;
955 reply.markForRpc(session);
956
957 if (replyStatus == OK) {
958 Span<const uint8_t> parcelSpan = {transaction->data,
959 transactionData.size() -
960 offsetof(RpcWireTransaction, data)};
961 Span<const uint32_t> objectTableSpan;
962 if (session->getProtocolVersion().value() >=
963 RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE) {
964 std::optional<Span<const uint8_t>> objectTableBytes =
965 parcelSpan.splitOff(transaction->parcelDataSize);
966 if (!objectTableBytes.has_value()) {
967 ALOGE("Parcel size (%" PRId32 ") greater than available bytes (%zu). Terminating!",
968 transaction->parcelDataSize, parcelSpan.byteSize());
969 (void)session->shutdownAndWait(false);
970 return BAD_VALUE;
971 }
972 std::optional<Span<const uint32_t>> maybeSpan =
973 objectTableBytes->reinterpret<const uint32_t>();
974 if (!maybeSpan.has_value()) {
975 ALOGE("Bad object table size inferred from RpcWireTransaction. Saw bodySize=%zu "
976 "sizeofHeader=%zu parcelSize=%" PRId32
977 " objectTableBytesSize=%zu. Terminating!",
978 transactionData.size(), sizeof(RpcWireTransaction),
979 transaction->parcelDataSize, objectTableBytes->size);
980 return BAD_VALUE;
981 }
982 objectTableSpan = *maybeSpan;
983 }
984
985 Parcel data;
986 // transaction->data is owned by this function. Parcel borrows this data and
987 // only holds onto it for the duration of this function call. Parcel will be
988 // deleted before the 'transactionData' object.
989
990 replyStatus =
991 data.rpcSetDataReference(session, parcelSpan.data, parcelSpan.size,
992 objectTableSpan.data, objectTableSpan.size,
993 std::move(ancillaryFds), do_nothing_to_transact_data);
994 // Reset to avoid spurious use-after-move warning from clang-tidy.
995 ancillaryFds = std::remove_reference<decltype(ancillaryFds)>::type();
996
997 if (replyStatus == OK) {
998 if (target) {
999 bool origAllowNested = connection->allowNested;
1000 connection->allowNested = !oneway;
1001
1002 replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
1003
1004 connection->allowNested = origAllowNested;
1005 } else {
1006 LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
1007
1008 switch (transaction->code) {
1009 case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
1010 replyStatus = reply.writeInt32(session->getMaxIncomingThreads());
1011 break;
1012 }
1013 case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: {
1014 // for client connections, this should always report the value
1015 // originally returned from the server, so this is asserting
1016 // that it exists
1017 replyStatus = reply.writeByteVector(session->mId);
1018 break;
1019 }
1020 default: {
1021 sp<RpcServer> server = session->server();
1022 if (server) {
1023 switch (transaction->code) {
1024 case RPC_SPECIAL_TRANSACT_GET_ROOT: {
1025 sp<IBinder> root = session->mSessionSpecificRootObject
1026 ?: server->getRootObject();
1027 replyStatus = reply.writeStrongBinder(root);
1028 break;
1029 }
1030 default: {
1031 replyStatus = UNKNOWN_TRANSACTION;
1032 }
1033 }
1034 } else {
1035 ALOGE("Special command sent, but no server object attached.");
1036 }
1037 }
1038 }
1039 }
1040 }
1041 }
1042
1043 if (oneway) {
1044 if (replyStatus != OK) {
1045 ALOGW("Oneway call failed with error: %d", replyStatus);
1046 }
1047
1048 LOG_RPC_DETAIL("Processed async transaction %" PRIu64 " on %" PRIu64,
1049 transaction->asyncNumber, addr);
1050
1051 // Check to see if there is another asynchronous transaction to process.
1052 // This behavior differs from binder behavior, since in the binder
1053 // driver, asynchronous transactions will be processed after existing
1054 // pending binder transactions on the queue. The downside of this is
1055 // that asynchronous transactions can be drowned out by synchronous
1056 // transactions. However, we have no easy way to queue these
1057 // transactions after the synchronous transactions we may want to read
1058 // from the wire. So, in socket binder here, we have the opposite
1059 // downside: asynchronous transactions may drown out synchronous
1060 // transactions.
1061 {
1062 RpcMutexUniqueLock _l(mNodeMutex);
1063 auto it = mNodeForAddress.find(addr);
1064 // last refcount dropped after this transaction happened
1065 if (it == mNodeForAddress.end()) return OK;
1066
1067 if (!nodeProgressAsyncNumber(&it->second)) {
1068 _l.unlock();
1069 (void)session->shutdownAndWait(false);
1070 return DEAD_OBJECT;
1071 }
1072
1073 if (it->second.asyncTodo.size() != 0 &&
1074 it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
1075 LOG_RPC_DETAIL("Found next async transaction %" PRIu64 " on %" PRIu64,
1076 it->second.asyncNumber, addr);
1077
1078 // justification for const_cast (consider avoiding priority_queue):
1079 // - AsyncTodo operator< doesn't depend on 'data' or 'ref' objects
1080 // - gotta go fast
1081 auto& todo = const_cast<BinderNode::AsyncTodo&>(it->second.asyncTodo.top());
1082
1083 // reset up arguments
1084 transactionData = std::move(todo.data);
1085 ancillaryFds = std::move(todo.ancillaryFds);
1086 LOG_ALWAYS_FATAL_IF(target != todo.ref,
1087 "async list should be associated with a binder");
1088
1089 it->second.asyncTodo.pop();
1090 goto processTransactInternalTailCall;
1091 }
1092 }
1093
1094 // done processing all the async commands on this binder that we can, so
1095 // write decstrongs on the binder
1096 if (addr != 0 && replyStatus == OK) {
1097 return flushExcessBinderRefs(session, addr, target);
1098 }
1099
1100 return OK;
1101 }
1102
1103 // Binder refs are flushed for oneway calls only after all calls which are
1104 // built up are executed. Otherwise, they fill up the binder buffer.
1105 if (addr != 0 && replyStatus == OK) {
1106 replyStatus = flushExcessBinderRefs(session, addr, target);
1107 }
1108
1109 std::string errorMsg;
1110 if (status_t status = validateParcel(session, reply, &errorMsg); status != OK) {
1111 ALOGE("Reply Parcel failed validation: %s", errorMsg.c_str());
1112 // Forward the error to the client of the transaction.
1113 reply.freeData();
1114 reply.markForRpc(session);
1115 replyStatus = status;
1116 }
1117
1118 auto* rpcFields = reply.maybeRpcFields();
1119 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
1120
1121 const size_t rpcReplyWireSize = RpcWireReply::wireSize(session->getProtocolVersion().value());
1122
1123 Span<const uint32_t> objectTableSpan = Span<const uint32_t>{rpcFields->mObjectPositions.data(),
1124 rpcFields->mObjectPositions.size()};
1125
1126 uint32_t bodySize;
1127 LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(rpcReplyWireSize, reply.dataSize(), &bodySize) ||
1128 __builtin_add_overflow(objectTableSpan.byteSize(), bodySize,
1129 &bodySize),
1130 "Too much data for reply %zu", reply.dataSize());
1131 RpcWireHeader cmdReply{
1132 .command = RPC_COMMAND_REPLY,
1133 .bodySize = bodySize,
1134 };
1135 RpcWireReply rpcReply{
1136 .status = replyStatus,
1137 // NOTE: Not necessarily written to socket depending on session
1138 // version.
1139 // NOTE: bodySize didn't overflow => this cast is safe
1140 .parcelDataSize = static_cast<uint32_t>(reply.dataSize()),
1141 .reserved = {0, 0, 0},
1142 };
1143 iovec iovs[]{
1144 {&cmdReply, sizeof(RpcWireHeader)},
1145 {&rpcReply, rpcReplyWireSize},
1146 {const_cast<uint8_t*>(reply.data()), reply.dataSize()},
1147 objectTableSpan.toIovec(),
1148 };
1149 return rpcSend(connection, session, "reply", iovs, countof(iovs), std::nullopt,
1150 rpcFields->mFds.get());
1151 }
1152
processDecStrong(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command)1153 status_t RpcState::processDecStrong(const sp<RpcSession::RpcConnection>& connection,
1154 const sp<RpcSession>& session, const RpcWireHeader& command) {
1155 LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_DEC_STRONG, "command: %d", command.command);
1156
1157 if (command.bodySize != sizeof(RpcDecStrong)) {
1158 ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcDecStrong. Terminating!",
1159 sizeof(RpcDecStrong), command.bodySize);
1160 (void)session->shutdownAndWait(false);
1161 return BAD_VALUE;
1162 }
1163
1164 RpcDecStrong body;
1165 iovec iov{&body, sizeof(RpcDecStrong)};
1166 if (status_t status = rpcRec(connection, session, "dec ref body", &iov, 1, nullptr);
1167 status != OK)
1168 return status;
1169
1170 uint64_t addr = RpcWireAddress::toRaw(body.address);
1171 RpcMutexUniqueLock _l(mNodeMutex);
1172 auto it = mNodeForAddress.find(addr);
1173 if (it == mNodeForAddress.end()) {
1174 ALOGE("Unknown binder address %" PRIu64 " for dec strong.", addr);
1175 return OK;
1176 }
1177
1178 sp<IBinder> target = it->second.binder.promote();
1179 if (target == nullptr) {
1180 ALOGE("While requesting dec strong, binder has been deleted at address %" PRIu64
1181 ". Terminating!",
1182 addr);
1183 _l.unlock();
1184 (void)session->shutdownAndWait(false);
1185 return BAD_VALUE;
1186 }
1187
1188 if (it->second.timesSent < body.amount) {
1189 ALOGE("Record of sending binder %zu times, but requested decStrong for %" PRIu64 " of %u",
1190 it->second.timesSent, addr, body.amount);
1191 return OK;
1192 }
1193
1194 LOG_ALWAYS_FATAL_IF(it->second.sentRef == nullptr, "Inconsistent state, lost ref for %" PRIu64,
1195 addr);
1196
1197 LOG_RPC_DETAIL("Processing dec strong of %" PRIu64 " by %u from %zu", addr, body.amount,
1198 it->second.timesSent);
1199
1200 it->second.timesSent -= body.amount;
1201 sp<IBinder> tempHold = tryEraseNode(session, std::move(_l), it);
1202 // LOCK ALREADY RELEASED
1203 tempHold = nullptr; // destructor may make binder calls on this session
1204
1205 return OK;
1206 }
1207
validateParcel(const sp<RpcSession> & session,const Parcel & parcel,std::string * errorMsg)1208 status_t RpcState::validateParcel(const sp<RpcSession>& session, const Parcel& parcel,
1209 std::string* errorMsg) {
1210 auto* rpcFields = parcel.maybeRpcFields();
1211 if (rpcFields == nullptr) {
1212 *errorMsg = "Parcel not crafted for RPC call";
1213 return BAD_TYPE;
1214 }
1215
1216 if (rpcFields->mSession != session) {
1217 *errorMsg = "Parcel's session doesn't match";
1218 return BAD_TYPE;
1219 }
1220
1221 uint32_t protocolVersion = session->getProtocolVersion().value();
1222 if (protocolVersion < RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE &&
1223 !rpcFields->mObjectPositions.empty()) {
1224 std::stringstream ss;
1225 ss << "Parcel has attached objects but the session's protocol version (" << protocolVersion
1226 << ") is too old, must be at least "
1227 << RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE;
1228 *errorMsg = ss.str();
1229 return BAD_VALUE;
1230 }
1231
1232 if (rpcFields->mFds && !rpcFields->mFds->empty()) {
1233 switch (session->getFileDescriptorTransportMode()) {
1234 case RpcSession::FileDescriptorTransportMode::NONE:
1235 *errorMsg =
1236 "Parcel has file descriptors, but no file descriptor transport is enabled";
1237 return FDS_NOT_ALLOWED;
1238 case RpcSession::FileDescriptorTransportMode::UNIX: {
1239 constexpr size_t kMaxFdsPerMsg = 253;
1240 if (rpcFields->mFds->size() > kMaxFdsPerMsg) {
1241 std::stringstream ss;
1242 ss << "Too many file descriptors in Parcel for unix domain socket: "
1243 << rpcFields->mFds->size() << " (max is " << kMaxFdsPerMsg << ")";
1244 *errorMsg = ss.str();
1245 return BAD_VALUE;
1246 }
1247 break;
1248 }
1249 case RpcSession::FileDescriptorTransportMode::TRUSTY: {
1250 // Keep this in sync with trusty_ipc.h!!!
1251 // We could import that file here on Trusty, but it's not
1252 // available on Android
1253 constexpr size_t kMaxFdsPerMsg = 8;
1254 if (rpcFields->mFds->size() > kMaxFdsPerMsg) {
1255 std::stringstream ss;
1256 ss << "Too many file descriptors in Parcel for Trusty IPC connection: "
1257 << rpcFields->mFds->size() << " (max is " << kMaxFdsPerMsg << ")";
1258 *errorMsg = ss.str();
1259 return BAD_VALUE;
1260 }
1261 break;
1262 }
1263 }
1264 }
1265
1266 return OK;
1267 }
1268
tryEraseNode(const sp<RpcSession> & session,RpcMutexUniqueLock nodeLock,std::map<uint64_t,BinderNode>::iterator & it)1269 sp<IBinder> RpcState::tryEraseNode(const sp<RpcSession>& session, RpcMutexUniqueLock nodeLock,
1270 std::map<uint64_t, BinderNode>::iterator& it) {
1271 bool shouldShutdown = false;
1272
1273 sp<IBinder> ref;
1274
1275 if (it->second.timesSent == 0) {
1276 ref = std::move(it->second.sentRef);
1277
1278 if (it->second.timesRecd == 0) {
1279 LOG_ALWAYS_FATAL_IF(!it->second.asyncTodo.empty(),
1280 "Can't delete binder w/ pending async transactions");
1281 mNodeForAddress.erase(it);
1282
1283 if (mNodeForAddress.size() == 0) {
1284 shouldShutdown = true;
1285 }
1286 }
1287 }
1288
1289 // If we shutdown, prevent RpcState from being re-used. This prevents another
1290 // thread from getting the root object again.
1291 if (shouldShutdown) {
1292 clear(std::move(nodeLock));
1293 } else {
1294 nodeLock.unlock(); // explicit
1295 }
1296 // LOCK IS RELEASED
1297
1298 if (shouldShutdown) {
1299 ALOGI("RpcState has no binders left, so triggering shutdown...");
1300 (void)session->shutdownAndWait(false);
1301 }
1302
1303 return ref;
1304 }
1305
nodeProgressAsyncNumber(BinderNode * node)1306 bool RpcState::nodeProgressAsyncNumber(BinderNode* node) {
1307 // 2**64 =~ 10**19 =~ 1000 transactions per second for 585 million years to
1308 // a single binder
1309 if (node->asyncNumber >= std::numeric_limits<decltype(node->asyncNumber)>::max()) {
1310 ALOGE("Out of async transaction IDs. Terminating");
1311 return false;
1312 }
1313 node->asyncNumber++;
1314 return true;
1315 }
1316
1317 } // namespace android
1318