• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RpcState"
18 
19 #include "RpcState.h"
20 
21 #include <android-base/hex.h>
22 #include <android-base/macros.h>
23 #include <android-base/scopeguard.h>
24 #include <android-base/stringprintf.h>
25 #include <binder/BpBinder.h>
26 #include <binder/IPCThreadState.h>
27 #include <binder/RpcServer.h>
28 
29 #include "Debug.h"
30 #include "RpcWireFormat.h"
31 #include "Utils.h"
32 
33 #include <random>
34 
35 #include <inttypes.h>
36 
37 namespace android {
38 
39 using base::StringPrintf;
40 
41 #if RPC_FLAKE_PRONE
rpcMaybeWaitToFlake()42 void rpcMaybeWaitToFlake() {
43     [[clang::no_destroy]] static std::random_device r;
44     [[clang::no_destroy]] static RpcMutex m;
45     unsigned num;
46     {
47         RpcMutexLockGuard lock(m);
48         num = r();
49     }
50     if (num % 10 == 0) usleep(num % 1000);
51 }
52 #endif
53 
enableAncillaryFds(RpcSession::FileDescriptorTransportMode mode)54 static bool enableAncillaryFds(RpcSession::FileDescriptorTransportMode mode) {
55     switch (mode) {
56         case RpcSession::FileDescriptorTransportMode::NONE:
57             return false;
58         case RpcSession::FileDescriptorTransportMode::UNIX:
59         case RpcSession::FileDescriptorTransportMode::TRUSTY:
60             return true;
61     }
62 }
63 
RpcState()64 RpcState::RpcState() {}
~RpcState()65 RpcState::~RpcState() {}
66 
onBinderLeaving(const sp<RpcSession> & session,const sp<IBinder> & binder,uint64_t * outAddress)67 status_t RpcState::onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
68                                    uint64_t* outAddress) {
69     bool isRemote = binder->remoteBinder();
70     bool isRpc = isRemote && binder->remoteBinder()->isRpcBinder();
71 
72     if (isRpc && binder->remoteBinder()->getPrivateAccessor().rpcSession() != session) {
73         // We need to be able to send instructions over the socket for how to
74         // connect to a different server, and we also need to let the host
75         // process know that this is happening.
76         ALOGE("Cannot send binder from unrelated binder RPC session.");
77         return INVALID_OPERATION;
78     }
79 
80     if (isRemote && !isRpc) {
81         // Without additional work, this would have the effect of using this
82         // process to proxy calls from the socket over to the other process, and
83         // it would make those calls look like they come from us (not over the
84         // sockets). In order to make this work transparently like binder, we
85         // would instead need to send instructions over the socket for how to
86         // connect to the host process, and we also need to let the host process
87         // know this was happening.
88         ALOGE("Cannot send binder proxy %p over sockets", binder.get());
89         return INVALID_OPERATION;
90     }
91 
92     RpcMutexLockGuard _l(mNodeMutex);
93     if (mTerminated) return DEAD_OBJECT;
94 
95     // TODO(b/182939933): maybe move address out of BpBinder, and keep binder->address map
96     // in RpcState
97     for (auto& [addr, node] : mNodeForAddress) {
98         if (binder == node.binder) {
99             if (isRpc) {
100                 // check integrity of data structure
101                 uint64_t actualAddr = binder->remoteBinder()->getPrivateAccessor().rpcAddress();
102                 LOG_ALWAYS_FATAL_IF(addr != actualAddr, "Address mismatch %" PRIu64 " vs %" PRIu64,
103                                     addr, actualAddr);
104             }
105             node.timesSent++;
106             node.sentRef = binder; // might already be set
107             *outAddress = addr;
108             return OK;
109         }
110     }
111     LOG_ALWAYS_FATAL_IF(isRpc, "RPC binder must have known address at this point");
112 
113     bool forServer = session->server() != nullptr;
114 
115     // arbitrary limit for maximum number of nodes in a process (otherwise we
116     // might run out of addresses)
117     if (mNodeForAddress.size() > 100000) {
118         return NO_MEMORY;
119     }
120 
121     while (true) {
122         RpcWireAddress address{
123                 .options = RPC_WIRE_ADDRESS_OPTION_CREATED,
124                 .address = mNextId,
125         };
126         if (forServer) {
127             address.options |= RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
128         }
129 
130         // avoid ubsan abort
131         if (mNextId >= std::numeric_limits<uint32_t>::max()) {
132             mNextId = 0;
133         } else {
134             mNextId++;
135         }
136 
137         auto&& [it, inserted] = mNodeForAddress.insert({RpcWireAddress::toRaw(address),
138                                                         BinderNode{
139                                                                 .binder = binder,
140                                                                 .sentRef = binder,
141                                                                 .timesSent = 1,
142                                                         }});
143         if (inserted) {
144             *outAddress = it->first;
145             return OK;
146         }
147     }
148 }
149 
onBinderEntering(const sp<RpcSession> & session,uint64_t address,sp<IBinder> * out)150 status_t RpcState::onBinderEntering(const sp<RpcSession>& session, uint64_t address,
151                                     sp<IBinder>* out) {
152     // ensure that: if we want to use addresses for something else in the future (for
153     //   instance, allowing transitive binder sends), that we don't accidentally
154     //   send those addresses to old server. Accidentally ignoring this in that
155     //   case and considering the binder to be recognized could cause this
156     //   process to accidentally proxy transactions for that binder. Of course,
157     //   if we communicate with a binder, it could always be proxying
158     //   information. However, we want to make sure that isn't done on accident
159     //   by a client.
160     RpcWireAddress addr = RpcWireAddress::fromRaw(address);
161     constexpr uint32_t kKnownOptions =
162             RPC_WIRE_ADDRESS_OPTION_CREATED | RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
163     if (addr.options & ~kKnownOptions) {
164         ALOGE("Address is of an unknown type, rejecting: %" PRIu64, address);
165         return BAD_VALUE;
166     }
167 
168     RpcMutexLockGuard _l(mNodeMutex);
169     if (mTerminated) return DEAD_OBJECT;
170 
171     if (auto it = mNodeForAddress.find(address); it != mNodeForAddress.end()) {
172         *out = it->second.binder.promote();
173 
174         // implicitly have strong RPC refcount, since we received this binder
175         it->second.timesRecd++;
176         return OK;
177     }
178 
179     // we don't know about this binder, so the other side of the connection
180     // should have created it.
181     if ((addr.options & RPC_WIRE_ADDRESS_OPTION_FOR_SERVER) == !!session->server()) {
182         ALOGE("Server received unrecognized address which we should own the creation of %" PRIu64,
183               address);
184         return BAD_VALUE;
185     }
186 
187     auto&& [it, inserted] = mNodeForAddress.insert({address, BinderNode{}});
188     LOG_ALWAYS_FATAL_IF(!inserted, "Failed to insert binder when creating proxy");
189 
190     // Currently, all binders are assumed to be part of the same session (no
191     // device global binders in the RPC world).
192     it->second.binder = *out = BpBinder::PrivateAccessor::create(session, it->first);
193     it->second.timesRecd = 1;
194     return OK;
195 }
196 
flushExcessBinderRefs(const sp<RpcSession> & session,uint64_t address,const sp<IBinder> & binder)197 status_t RpcState::flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
198                                          const sp<IBinder>& binder) {
199     // We can flush all references when the binder is destroyed. No need to send
200     // extra reference counting packets now.
201     if (binder->remoteBinder()) return OK;
202 
203     RpcMutexUniqueLock _l(mNodeMutex);
204     if (mTerminated) return DEAD_OBJECT;
205 
206     auto it = mNodeForAddress.find(address);
207 
208     LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Can't be deleted while we hold sp<>");
209     LOG_ALWAYS_FATAL_IF(it->second.binder != binder,
210                         "Caller of flushExcessBinderRefs using inconsistent arguments");
211 
212     LOG_ALWAYS_FATAL_IF(it->second.timesSent <= 0, "Local binder must have been sent %p",
213                         binder.get());
214 
215     // For a local binder, we only need to know that we sent it. Now that we
216     // have an sp<> for this call, we don't need anything more. If the other
217     // process is done with this binder, it needs to know we received the
218     // refcount associated with this call, so we can acknowledge that we
219     // received it. Once (or if) it has no other refcounts, it would reply with
220     // its own decStrong so that it could be removed from this session.
221     if (it->second.timesRecd != 0) {
222         _l.unlock();
223 
224         return session->sendDecStrongToTarget(address, 0);
225     }
226 
227     return OK;
228 }
229 
sendObituaries(const sp<RpcSession> & session)230 status_t RpcState::sendObituaries(const sp<RpcSession>& session) {
231     RpcMutexUniqueLock _l(mNodeMutex);
232 
233     // Gather strong pointers to all of the remote binders for this session so
234     // we hold the strong references. remoteBinder() returns a raw pointer.
235     // Send the obituaries and drop the strong pointers outside of the lock so
236     // the destructors and the onBinderDied calls are not done while locked.
237     std::vector<sp<IBinder>> remoteBinders;
238     for (const auto& [_, binderNode] : mNodeForAddress) {
239         if (auto binder = binderNode.binder.promote()) {
240             remoteBinders.push_back(std::move(binder));
241         }
242     }
243     _l.unlock();
244 
245     for (const auto& binder : remoteBinders) {
246         if (binder->remoteBinder() &&
247             binder->remoteBinder()->getPrivateAccessor().rpcSession() == session) {
248             binder->remoteBinder()->sendObituary();
249         }
250     }
251     return OK;
252 }
253 
countBinders()254 size_t RpcState::countBinders() {
255     RpcMutexLockGuard _l(mNodeMutex);
256     return mNodeForAddress.size();
257 }
258 
dump()259 void RpcState::dump() {
260     RpcMutexLockGuard _l(mNodeMutex);
261     dumpLocked();
262 }
263 
clear()264 void RpcState::clear() {
265     return clear(RpcMutexUniqueLock(mNodeMutex));
266 }
267 
clear(RpcMutexUniqueLock nodeLock)268 void RpcState::clear(RpcMutexUniqueLock nodeLock) {
269     if (mTerminated) {
270         LOG_ALWAYS_FATAL_IF(!mNodeForAddress.empty(),
271                             "New state should be impossible after terminating!");
272         return;
273     }
274     mTerminated = true;
275 
276     if (SHOULD_LOG_RPC_DETAIL) {
277         ALOGE("RpcState::clear()");
278         dumpLocked();
279     }
280 
281     // invariants
282     for (auto& [address, node] : mNodeForAddress) {
283         bool guaranteedHaveBinder = node.timesSent > 0;
284         if (guaranteedHaveBinder) {
285             LOG_ALWAYS_FATAL_IF(node.sentRef == nullptr,
286                                 "Binder expected to be owned with address: %" PRIu64 " %s", address,
287                                 node.toString().c_str());
288         }
289     }
290 
291     // if the destructor of a binder object makes another RPC call, then calling
292     // decStrong could deadlock. So, we must hold onto these binders until
293     // mNodeMutex is no longer taken.
294     auto temp = std::move(mNodeForAddress);
295     mNodeForAddress.clear(); // RpcState isn't reusable, but for future/explicit
296 
297     nodeLock.unlock();
298     temp.clear(); // explicit
299 }
300 
dumpLocked()301 void RpcState::dumpLocked() {
302     ALOGE("DUMP OF RpcState %p", this);
303     ALOGE("DUMP OF RpcState (%zu nodes)", mNodeForAddress.size());
304     for (const auto& [address, node] : mNodeForAddress) {
305         ALOGE("- address: %" PRIu64 " %s", address, node.toString().c_str());
306     }
307     ALOGE("END DUMP OF RpcState");
308 }
309 
toString() const310 std::string RpcState::BinderNode::toString() const {
311     sp<IBinder> strongBinder = this->binder.promote();
312 
313     const char* desc;
314     if (strongBinder) {
315         if (strongBinder->remoteBinder()) {
316             if (strongBinder->remoteBinder()->isRpcBinder()) {
317                 desc = "(rpc binder proxy)";
318             } else {
319                 desc = "(binder proxy)";
320             }
321         } else {
322             desc = "(local binder)";
323         }
324     } else {
325         desc = "(not promotable)";
326     }
327 
328     return StringPrintf("node{%p times sent: %zu times recd: %zu type: %s}",
329                         this->binder.unsafe_get(), this->timesSent, this->timesRecd, desc);
330 }
331 
CommandData(size_t size)332 RpcState::CommandData::CommandData(size_t size) : mSize(size) {
333     // The maximum size for regular binder is 1MB for all concurrent
334     // transactions. A very small proportion of transactions are even
335     // larger than a page, but we need to avoid allocating too much
336     // data on behalf of an arbitrary client, or we could risk being in
337     // a position where a single additional allocation could run out of
338     // memory.
339     //
340     // Note, this limit may not reflect the total amount of data allocated for a
341     // transaction (in some cases, additional fixed size amounts are added),
342     // though for rough consistency, we should avoid cases where this data type
343     // is used for multiple dynamic allocations for a single transaction.
344     constexpr size_t kMaxTransactionAllocation = 100 * 1000;
345     if (size == 0) return;
346     if (size > kMaxTransactionAllocation) {
347         ALOGW("Transaction requested too much data allocation %zu", size);
348         return;
349     }
350     mData.reset(new (std::nothrow) uint8_t[size]);
351 }
352 
rpcSend(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs,const std::optional<android::base::function_ref<status_t ()>> & altPoll,const std::vector<std::variant<base::unique_fd,base::borrowed_fd>> * ancillaryFds)353 status_t RpcState::rpcSend(
354         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
355         const char* what, iovec* iovs, int niovs,
356         const std::optional<android::base::function_ref<status_t()>>& altPoll,
357         const std::vector<std::variant<base::unique_fd, base::borrowed_fd>>* ancillaryFds) {
358     for (int i = 0; i < niovs; i++) {
359         LOG_RPC_DETAIL("Sending %s (part %d of %d) on RpcTransport %p: %s",
360                        what, i + 1, niovs, connection->rpcTransport.get(),
361                        android::base::HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
362     }
363 
364     if (status_t status =
365                 connection->rpcTransport->interruptableWriteFully(session->mShutdownTrigger.get(),
366                                                                   iovs, niovs, altPoll,
367                                                                   ancillaryFds);
368         status != OK) {
369         LOG_RPC_DETAIL("Failed to write %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
370                        connection->rpcTransport.get(), statusToString(status).c_str());
371         (void)session->shutdownAndWait(false);
372         return status;
373     }
374 
375     return OK;
376 }
377 
rpcRec(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs,std::vector<std::variant<base::unique_fd,base::borrowed_fd>> * ancillaryFds)378 status_t RpcState::rpcRec(
379         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
380         const char* what, iovec* iovs, int niovs,
381         std::vector<std::variant<base::unique_fd, base::borrowed_fd>>* ancillaryFds) {
382     if (status_t status =
383                 connection->rpcTransport->interruptableReadFully(session->mShutdownTrigger.get(),
384                                                                  iovs, niovs, std::nullopt,
385                                                                  ancillaryFds);
386         status != OK) {
387         LOG_RPC_DETAIL("Failed to read %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
388                        connection->rpcTransport.get(), statusToString(status).c_str());
389         (void)session->shutdownAndWait(false);
390         return status;
391     }
392 
393     for (int i = 0; i < niovs; i++) {
394         LOG_RPC_DETAIL("Received %s (part %d of %d) on RpcTransport %p: %s",
395                        what, i + 1, niovs, connection->rpcTransport.get(),
396                        android::base::HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
397     }
398     return OK;
399 }
400 
readNewSessionResponse(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint32_t * version)401 status_t RpcState::readNewSessionResponse(const sp<RpcSession::RpcConnection>& connection,
402                                           const sp<RpcSession>& session, uint32_t* version) {
403     RpcNewSessionResponse response;
404     iovec iov{&response, sizeof(response)};
405     if (status_t status = rpcRec(connection, session, "new session response", &iov, 1, nullptr);
406         status != OK) {
407         return status;
408     }
409     *version = response.version;
410     return OK;
411 }
412 
sendConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)413 status_t RpcState::sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
414                                       const sp<RpcSession>& session) {
415     RpcOutgoingConnectionInit init{
416             .msg = RPC_CONNECTION_INIT_OKAY,
417     };
418     iovec iov{&init, sizeof(init)};
419     return rpcSend(connection, session, "connection init", &iov, 1, std::nullopt);
420 }
421 
readConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)422 status_t RpcState::readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
423                                       const sp<RpcSession>& session) {
424     RpcOutgoingConnectionInit init;
425     iovec iov{&init, sizeof(init)};
426     if (status_t status = rpcRec(connection, session, "connection init", &iov, 1, nullptr);
427         status != OK)
428         return status;
429 
430     static_assert(sizeof(init.msg) == sizeof(RPC_CONNECTION_INIT_OKAY));
431     if (0 != strncmp(init.msg, RPC_CONNECTION_INIT_OKAY, sizeof(init.msg))) {
432         ALOGE("Connection init message unrecognized %.*s", static_cast<int>(sizeof(init.msg)),
433               init.msg);
434         return BAD_VALUE;
435     }
436     return OK;
437 }
438 
getRootObject(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)439 sp<IBinder> RpcState::getRootObject(const sp<RpcSession::RpcConnection>& connection,
440                                     const sp<RpcSession>& session) {
441     Parcel data;
442     data.markForRpc(session);
443     Parcel reply;
444 
445     status_t status =
446             transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_ROOT, data, session, &reply, 0);
447     if (status != OK) {
448         ALOGE("Error getting root object: %s", statusToString(status).c_str());
449         return nullptr;
450     }
451 
452     return reply.readStrongBinder();
453 }
454 
getMaxThreads(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,size_t * maxThreadsOut)455 status_t RpcState::getMaxThreads(const sp<RpcSession::RpcConnection>& connection,
456                                  const sp<RpcSession>& session, size_t* maxThreadsOut) {
457     Parcel data;
458     data.markForRpc(session);
459     Parcel reply;
460 
461     status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
462                                       session, &reply, 0);
463     if (status != OK) {
464         ALOGE("Error getting max threads: %s", statusToString(status).c_str());
465         return status;
466     }
467 
468     int32_t maxThreads;
469     status = reply.readInt32(&maxThreads);
470     if (status != OK) return status;
471     if (maxThreads <= 0) {
472         ALOGE("Error invalid max maxThreads: %d", maxThreads);
473         return BAD_VALUE;
474     }
475 
476     *maxThreadsOut = maxThreads;
477     return OK;
478 }
479 
getSessionId(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,std::vector<uint8_t> * sessionIdOut)480 status_t RpcState::getSessionId(const sp<RpcSession::RpcConnection>& connection,
481                                 const sp<RpcSession>& session, std::vector<uint8_t>* sessionIdOut) {
482     Parcel data;
483     data.markForRpc(session);
484     Parcel reply;
485 
486     status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_SESSION_ID, data,
487                                       session, &reply, 0);
488     if (status != OK) {
489         ALOGE("Error getting session ID: %s", statusToString(status).c_str());
490         return status;
491     }
492 
493     return reply.readByteVector(sessionIdOut);
494 }
495 
transact(const sp<RpcSession::RpcConnection> & connection,const sp<IBinder> & binder,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)496 status_t RpcState::transact(const sp<RpcSession::RpcConnection>& connection,
497                             const sp<IBinder>& binder, uint32_t code, const Parcel& data,
498                             const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
499     std::string errorMsg;
500     if (status_t status = validateParcel(session, data, &errorMsg); status != OK) {
501         ALOGE("Refusing to send RPC on binder %p code %" PRIu32 ": Parcel %p failed validation: %s",
502               binder.get(), code, &data, errorMsg.c_str());
503         return status;
504     }
505     uint64_t address;
506     if (status_t status = onBinderLeaving(session, binder, &address); status != OK) return status;
507 
508     return transactAddress(connection, address, code, data, session, reply, flags);
509 }
510 
transactAddress(const sp<RpcSession::RpcConnection> & connection,uint64_t address,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)511 status_t RpcState::transactAddress(const sp<RpcSession::RpcConnection>& connection,
512                                    uint64_t address, uint32_t code, const Parcel& data,
513                                    const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
514     LOG_ALWAYS_FATAL_IF(!data.isForRpc());
515     LOG_ALWAYS_FATAL_IF(data.objectsCount() != 0);
516 
517     uint64_t asyncNumber = 0;
518 
519     if (address != 0) {
520         RpcMutexUniqueLock _l(mNodeMutex);
521         if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
522         auto it = mNodeForAddress.find(address);
523         LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
524                             "Sending transact on unknown address %" PRIu64, address);
525 
526         if (flags & IBinder::FLAG_ONEWAY) {
527             asyncNumber = it->second.asyncNumber;
528             if (!nodeProgressAsyncNumber(&it->second)) {
529                 _l.unlock();
530                 (void)session->shutdownAndWait(false);
531                 return DEAD_OBJECT;
532             }
533         }
534     }
535 
536     auto* rpcFields = data.maybeRpcFields();
537     LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
538 
539     Span<const uint32_t> objectTableSpan = Span<const uint32_t>{rpcFields->mObjectPositions.data(),
540                                                                 rpcFields->mObjectPositions.size()};
541 
542     uint32_t bodySize;
543     LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(sizeof(RpcWireTransaction), data.dataSize(),
544                                                &bodySize) ||
545                                 __builtin_add_overflow(objectTableSpan.byteSize(), bodySize,
546                                                        &bodySize),
547                         "Too much data %zu", data.dataSize());
548     RpcWireHeader command{
549             .command = RPC_COMMAND_TRANSACT,
550             .bodySize = bodySize,
551     };
552 
553     RpcWireTransaction transaction{
554             .address = RpcWireAddress::fromRaw(address),
555             .code = code,
556             .flags = flags,
557             .asyncNumber = asyncNumber,
558             // bodySize didn't overflow => this cast is safe
559             .parcelDataSize = static_cast<uint32_t>(data.dataSize()),
560     };
561 
562     // Oneway calls have no sync point, so if many are sent before, whether this
563     // is a twoway or oneway transaction, they may have filled up the socket.
564     // So, make sure we drain them before polling
565     constexpr size_t kWaitMaxUs = 1000000;
566     constexpr size_t kWaitLogUs = 10000;
567     size_t waitUs = 0;
568 
569     iovec iovs[]{
570             {&command, sizeof(RpcWireHeader)},
571             {&transaction, sizeof(RpcWireTransaction)},
572             {const_cast<uint8_t*>(data.data()), data.dataSize()},
573             objectTableSpan.toIovec(),
574     };
575     if (status_t status = rpcSend(
576                 connection, session, "transaction", iovs, arraysize(iovs),
577                 [&] {
578                     if (waitUs > kWaitLogUs) {
579                         ALOGE("Cannot send command, trying to process pending refcounts. Waiting "
580                               "%zuus. Too many oneway calls?",
581                               waitUs);
582                     }
583 
584                     if (waitUs > 0) {
585                         usleep(waitUs);
586                         waitUs = std::min(kWaitMaxUs, waitUs * 2);
587                     } else {
588                         waitUs = 1;
589                     }
590 
591                     return drainCommands(connection, session, CommandType::CONTROL_ONLY);
592                 },
593                 rpcFields->mFds.get());
594         status != OK) {
595         // rpcSend calls shutdownAndWait, so all refcounts should be reset. If we ever tolerate
596         // errors here, then we may need to undo the binder-sent counts for the transaction as
597         // well as for the binder objects in the Parcel
598         return status;
599     }
600 
601     if (flags & IBinder::FLAG_ONEWAY) {
602         LOG_RPC_DETAIL("Oneway command, so no longer waiting on RpcTransport %p",
603                        connection->rpcTransport.get());
604 
605         // Do not wait on result.
606         return OK;
607     }
608 
609     LOG_ALWAYS_FATAL_IF(reply == nullptr, "Reply parcel must be used for synchronous transaction.");
610 
611     return waitForReply(connection, session, reply);
612 }
613 
cleanup_reply_data(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)614 static void cleanup_reply_data(const uint8_t* data, size_t dataSize, const binder_size_t* objects,
615                                size_t objectsCount) {
616     delete[] const_cast<uint8_t*>(data);
617     (void)dataSize;
618     LOG_ALWAYS_FATAL_IF(objects != nullptr);
619     (void)objectsCount;
620 }
621 
waitForReply(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,Parcel * reply)622 status_t RpcState::waitForReply(const sp<RpcSession::RpcConnection>& connection,
623                                 const sp<RpcSession>& session, Parcel* reply) {
624     std::vector<std::variant<base::unique_fd, base::borrowed_fd>> ancillaryFds;
625     RpcWireHeader command;
626     while (true) {
627         iovec iov{&command, sizeof(command)};
628         if (status_t status = rpcRec(connection, session, "command header (for reply)", &iov, 1,
629                                      enableAncillaryFds(session->getFileDescriptorTransportMode())
630                                              ? &ancillaryFds
631                                              : nullptr);
632             status != OK)
633             return status;
634 
635         if (command.command == RPC_COMMAND_REPLY) break;
636 
637         if (status_t status = processCommand(connection, session, command, CommandType::ANY,
638                                              std::move(ancillaryFds));
639             status != OK)
640             return status;
641 
642         // Reset to avoid spurious use-after-move warning from clang-tidy.
643         ancillaryFds = decltype(ancillaryFds)();
644     }
645 
646     const size_t rpcReplyWireSize = RpcWireReply::wireSize(session->getProtocolVersion().value());
647 
648     if (command.bodySize < rpcReplyWireSize) {
649         ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireReply. Terminating!",
650               sizeof(RpcWireReply), command.bodySize);
651         (void)session->shutdownAndWait(false);
652         return BAD_VALUE;
653     }
654 
655     RpcWireReply rpcReply;
656     memset(&rpcReply, 0, sizeof(RpcWireReply)); // zero because of potential short read
657 
658     CommandData data(command.bodySize - rpcReplyWireSize);
659     if (!data.valid()) return NO_MEMORY;
660 
661     iovec iovs[]{
662             {&rpcReply, rpcReplyWireSize},
663             {data.data(), data.size()},
664     };
665     if (status_t status = rpcRec(connection, session, "reply body", iovs, arraysize(iovs), nullptr);
666         status != OK)
667         return status;
668 
669     if (rpcReply.status != OK) return rpcReply.status;
670 
671     Span<const uint8_t> parcelSpan = {data.data(), data.size()};
672     Span<const uint32_t> objectTableSpan;
673     if (session->getProtocolVersion().value() >=
674         RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE) {
675         std::optional<Span<const uint8_t>> objectTableBytes =
676                 parcelSpan.splitOff(rpcReply.parcelDataSize);
677         if (!objectTableBytes.has_value()) {
678             ALOGE("Parcel size larger than available bytes: %" PRId32 " vs %zu. Terminating!",
679                   rpcReply.parcelDataSize, parcelSpan.byteSize());
680             (void)session->shutdownAndWait(false);
681             return BAD_VALUE;
682         }
683         std::optional<Span<const uint32_t>> maybeSpan =
684                 objectTableBytes->reinterpret<const uint32_t>();
685         if (!maybeSpan.has_value()) {
686             ALOGE("Bad object table size inferred from RpcWireReply. Saw bodySize=%" PRId32
687                   " sizeofHeader=%zu parcelSize=%" PRId32 " objectTableBytesSize=%zu. Terminating!",
688                   command.bodySize, rpcReplyWireSize, rpcReply.parcelDataSize,
689                   objectTableBytes->size);
690             return BAD_VALUE;
691         }
692         objectTableSpan = *maybeSpan;
693     }
694 
695     data.release();
696     return reply->rpcSetDataReference(session, parcelSpan.data, parcelSpan.size,
697                                       objectTableSpan.data, objectTableSpan.size,
698                                       std::move(ancillaryFds), cleanup_reply_data);
699 }
700 
sendDecStrongToTarget(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint64_t addr,size_t target)701 status_t RpcState::sendDecStrongToTarget(const sp<RpcSession::RpcConnection>& connection,
702                                          const sp<RpcSession>& session, uint64_t addr,
703                                          size_t target) {
704     RpcDecStrong body = {
705             .address = RpcWireAddress::fromRaw(addr),
706     };
707 
708     {
709         RpcMutexUniqueLock _l(mNodeMutex);
710         if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
711         auto it = mNodeForAddress.find(addr);
712         LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
713                             "Sending dec strong on unknown address %" PRIu64, addr);
714 
715         LOG_ALWAYS_FATAL_IF(it->second.timesRecd < target, "Can't dec count of %zu to %zu.",
716                             it->second.timesRecd, target);
717 
718         // typically this happens when multiple threads send dec refs at the
719         // same time - the transactions will get combined automatically
720         if (it->second.timesRecd == target) return OK;
721 
722         body.amount = it->second.timesRecd - target;
723         it->second.timesRecd = target;
724 
725         LOG_ALWAYS_FATAL_IF(nullptr != tryEraseNode(session, std::move(_l), it),
726                             "Bad state. RpcState shouldn't own received binder");
727         // LOCK ALREADY RELEASED
728     }
729 
730     RpcWireHeader cmd = {
731             .command = RPC_COMMAND_DEC_STRONG,
732             .bodySize = sizeof(RpcDecStrong),
733     };
734     iovec iovs[]{{&cmd, sizeof(cmd)}, {&body, sizeof(body)}};
735     return rpcSend(connection, session, "dec ref", iovs, arraysize(iovs), std::nullopt);
736 }
737 
getAndExecuteCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)738 status_t RpcState::getAndExecuteCommand(const sp<RpcSession::RpcConnection>& connection,
739                                         const sp<RpcSession>& session, CommandType type) {
740     LOG_RPC_DETAIL("getAndExecuteCommand on RpcTransport %p", connection->rpcTransport.get());
741 
742     std::vector<std::variant<base::unique_fd, base::borrowed_fd>> ancillaryFds;
743     RpcWireHeader command;
744     iovec iov{&command, sizeof(command)};
745     if (status_t status =
746                 rpcRec(connection, session, "command header (for server)", &iov, 1,
747                        enableAncillaryFds(session->getFileDescriptorTransportMode()) ? &ancillaryFds
748                                                                                      : nullptr);
749         status != OK)
750         return status;
751 
752     return processCommand(connection, session, command, type, std::move(ancillaryFds));
753 }
754 
drainCommands(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)755 status_t RpcState::drainCommands(const sp<RpcSession::RpcConnection>& connection,
756                                  const sp<RpcSession>& session, CommandType type) {
757     while (true) {
758         status_t status = connection->rpcTransport->pollRead();
759         if (status == WOULD_BLOCK) break;
760         if (status != OK) return status;
761 
762         status = getAndExecuteCommand(connection, session, type);
763         if (status != OK) return status;
764     }
765     return OK;
766 }
767 
processCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command,CommandType type,std::vector<std::variant<base::unique_fd,base::borrowed_fd>> && ancillaryFds)768 status_t RpcState::processCommand(
769         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
770         const RpcWireHeader& command, CommandType type,
771         std::vector<std::variant<base::unique_fd, base::borrowed_fd>>&& ancillaryFds) {
772 #ifdef BINDER_WITH_KERNEL_IPC
773     IPCThreadState* kernelBinderState = IPCThreadState::selfOrNull();
774     IPCThreadState::SpGuard spGuard{
775             .address = __builtin_frame_address(0),
776             .context = "processing binder RPC command (where RpcServer::setPerSessionRootObject is "
777                        "used to distinguish callers)",
778     };
779     const IPCThreadState::SpGuard* origGuard;
780     if (kernelBinderState != nullptr) {
781         origGuard = kernelBinderState->pushGetCallingSpGuard(&spGuard);
782     }
783 
784     base::ScopeGuard guardUnguard = [&]() {
785         if (kernelBinderState != nullptr) {
786             kernelBinderState->restoreGetCallingSpGuard(origGuard);
787         }
788     };
789 #endif // BINDER_WITH_KERNEL_IPC
790 
791     switch (command.command) {
792         case RPC_COMMAND_TRANSACT:
793             if (type != CommandType::ANY) return BAD_TYPE;
794             return processTransact(connection, session, command, std::move(ancillaryFds));
795         case RPC_COMMAND_DEC_STRONG:
796             return processDecStrong(connection, session, command);
797     }
798 
799     // We should always know the version of the opposing side, and since the
800     // RPC-binder-level wire protocol is not self synchronizing, we have no way
801     // to understand where the current command ends and the next one begins. We
802     // also can't consider it a fatal error because this would allow any client
803     // to kill us, so ending the session for misbehaving client.
804     ALOGE("Unknown RPC command %d - terminating session", command.command);
805     (void)session->shutdownAndWait(false);
806     return DEAD_OBJECT;
807 }
processTransact(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command,std::vector<std::variant<base::unique_fd,base::borrowed_fd>> && ancillaryFds)808 status_t RpcState::processTransact(
809         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
810         const RpcWireHeader& command,
811         std::vector<std::variant<base::unique_fd, base::borrowed_fd>>&& ancillaryFds) {
812     LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_TRANSACT, "command: %d", command.command);
813 
814     CommandData transactionData(command.bodySize);
815     if (!transactionData.valid()) {
816         return NO_MEMORY;
817     }
818     iovec iov{transactionData.data(), transactionData.size()};
819     if (status_t status = rpcRec(connection, session, "transaction body", &iov, 1, nullptr);
820         status != OK)
821         return status;
822 
823     return processTransactInternal(connection, session, std::move(transactionData),
824                                    std::move(ancillaryFds));
825 }
826 
do_nothing_to_transact_data(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)827 static void do_nothing_to_transact_data(const uint8_t* data, size_t dataSize,
828                                         const binder_size_t* objects, size_t objectsCount) {
829     (void)data;
830     (void)dataSize;
831     (void)objects;
832     (void)objectsCount;
833 }
834 
processTransactInternal(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandData transactionData,std::vector<std::variant<base::unique_fd,base::borrowed_fd>> && ancillaryFds)835 status_t RpcState::processTransactInternal(
836         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
837         CommandData transactionData,
838         std::vector<std::variant<base::unique_fd, base::borrowed_fd>>&& ancillaryFds) {
839     // for 'recursive' calls to this, we have already read and processed the
840     // binder from the transaction data and taken reference counts into account,
841     // so it is cached here.
842     sp<IBinder> target;
843 processTransactInternalTailCall:
844 
845     if (transactionData.size() < sizeof(RpcWireTransaction)) {
846         ALOGE("Expecting %zu but got %zu bytes for RpcWireTransaction. Terminating!",
847               sizeof(RpcWireTransaction), transactionData.size());
848         (void)session->shutdownAndWait(false);
849         return BAD_VALUE;
850     }
851     RpcWireTransaction* transaction = reinterpret_cast<RpcWireTransaction*>(transactionData.data());
852 
853     uint64_t addr = RpcWireAddress::toRaw(transaction->address);
854     bool oneway = transaction->flags & IBinder::FLAG_ONEWAY;
855 
856     status_t replyStatus = OK;
857     if (addr != 0) {
858         if (!target) {
859             replyStatus = onBinderEntering(session, addr, &target);
860         }
861 
862         if (replyStatus != OK) {
863             // do nothing
864         } else if (target == nullptr) {
865             // This can happen if the binder is remote in this process, and
866             // another thread has called the last decStrong on this binder.
867             // However, for local binders, it indicates a misbehaving client
868             // (any binder which is being transacted on should be holding a
869             // strong ref count), so in either case, terminating the
870             // session.
871             ALOGE("While transacting, binder has been deleted at address %" PRIu64 ". Terminating!",
872                   addr);
873             (void)session->shutdownAndWait(false);
874             replyStatus = BAD_VALUE;
875         } else if (target->localBinder() == nullptr) {
876             ALOGE("Unknown binder address or non-local binder, not address %" PRIu64
877                   ". Terminating!",
878                   addr);
879             (void)session->shutdownAndWait(false);
880             replyStatus = BAD_VALUE;
881         } else if (oneway) {
882             RpcMutexUniqueLock _l(mNodeMutex);
883             auto it = mNodeForAddress.find(addr);
884             if (it->second.binder.promote() != target) {
885                 ALOGE("Binder became invalid during transaction. Bad client? %" PRIu64, addr);
886                 replyStatus = BAD_VALUE;
887             } else if (transaction->asyncNumber != it->second.asyncNumber) {
888                 // we need to process some other asynchronous transaction
889                 // first
890                 it->second.asyncTodo.push(BinderNode::AsyncTodo{
891                         .ref = target,
892                         .data = std::move(transactionData),
893                         .ancillaryFds = std::move(ancillaryFds),
894                         .asyncNumber = transaction->asyncNumber,
895                 });
896 
897                 size_t numPending = it->second.asyncTodo.size();
898                 LOG_RPC_DETAIL("Enqueuing %" PRIu64 " on %" PRIu64 " (%zu pending)",
899                                transaction->asyncNumber, addr, numPending);
900 
901                 constexpr size_t kArbitraryOnewayCallTerminateLevel = 10000;
902                 constexpr size_t kArbitraryOnewayCallWarnLevel = 1000;
903                 constexpr size_t kArbitraryOnewayCallWarnPer = 1000;
904 
905                 if (numPending >= kArbitraryOnewayCallWarnLevel) {
906                     if (numPending >= kArbitraryOnewayCallTerminateLevel) {
907                         ALOGE("WARNING: %zu pending oneway transactions. Terminating!", numPending);
908                         _l.unlock();
909                         (void)session->shutdownAndWait(false);
910                         return FAILED_TRANSACTION;
911                     }
912 
913                     if (numPending % kArbitraryOnewayCallWarnPer == 0) {
914                         ALOGW("Warning: many oneway transactions built up on %p (%zu)",
915                               target.get(), numPending);
916                     }
917                 }
918                 return OK;
919             }
920         }
921     }
922 
923     Parcel reply;
924     reply.markForRpc(session);
925 
926     if (replyStatus == OK) {
927         Span<const uint8_t> parcelSpan = {transaction->data,
928                                           transactionData.size() -
929                                                   offsetof(RpcWireTransaction, data)};
930         Span<const uint32_t> objectTableSpan;
931         if (session->getProtocolVersion().value() >=
932             RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE) {
933             std::optional<Span<const uint8_t>> objectTableBytes =
934                     parcelSpan.splitOff(transaction->parcelDataSize);
935             if (!objectTableBytes.has_value()) {
936                 ALOGE("Parcel size (%" PRId32 ") greater than available bytes (%zu). Terminating!",
937                       transaction->parcelDataSize, parcelSpan.byteSize());
938                 (void)session->shutdownAndWait(false);
939                 return BAD_VALUE;
940             }
941             std::optional<Span<const uint32_t>> maybeSpan =
942                     objectTableBytes->reinterpret<const uint32_t>();
943             if (!maybeSpan.has_value()) {
944                 ALOGE("Bad object table size inferred from RpcWireTransaction. Saw bodySize=%zu "
945                       "sizeofHeader=%zu parcelSize=%" PRId32
946                       " objectTableBytesSize=%zu. Terminating!",
947                       transactionData.size(), sizeof(RpcWireTransaction),
948                       transaction->parcelDataSize, objectTableBytes->size);
949                 return BAD_VALUE;
950             }
951             objectTableSpan = *maybeSpan;
952         }
953 
954         Parcel data;
955         // transaction->data is owned by this function. Parcel borrows this data and
956         // only holds onto it for the duration of this function call. Parcel will be
957         // deleted before the 'transactionData' object.
958 
959         replyStatus =
960                 data.rpcSetDataReference(session, parcelSpan.data, parcelSpan.size,
961                                          objectTableSpan.data, objectTableSpan.size,
962                                          std::move(ancillaryFds), do_nothing_to_transact_data);
963         // Reset to avoid spurious use-after-move warning from clang-tidy.
964         ancillaryFds = std::remove_reference<decltype(ancillaryFds)>::type();
965 
966         if (replyStatus == OK) {
967             if (target) {
968                 bool origAllowNested = connection->allowNested;
969                 connection->allowNested = !oneway;
970 
971                 replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
972 
973                 connection->allowNested = origAllowNested;
974             } else {
975                 LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
976 
977                 switch (transaction->code) {
978                     case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
979                         replyStatus = reply.writeInt32(session->getMaxIncomingThreads());
980                         break;
981                     }
982                     case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: {
983                         // for client connections, this should always report the value
984                         // originally returned from the server, so this is asserting
985                         // that it exists
986                         replyStatus = reply.writeByteVector(session->mId);
987                         break;
988                     }
989                     default: {
990                         sp<RpcServer> server = session->server();
991                         if (server) {
992                             switch (transaction->code) {
993                                 case RPC_SPECIAL_TRANSACT_GET_ROOT: {
994                                     sp<IBinder> root = session->mSessionSpecificRootObject
995                                             ?: server->getRootObject();
996                                     replyStatus = reply.writeStrongBinder(root);
997                                     break;
998                                 }
999                                 default: {
1000                                     replyStatus = UNKNOWN_TRANSACTION;
1001                                 }
1002                             }
1003                         } else {
1004                             ALOGE("Special command sent, but no server object attached.");
1005                         }
1006                     }
1007                 }
1008             }
1009         }
1010     }
1011 
1012     if (oneway) {
1013         if (replyStatus != OK) {
1014             ALOGW("Oneway call failed with error: %d", replyStatus);
1015         }
1016 
1017         LOG_RPC_DETAIL("Processed async transaction %" PRIu64 " on %" PRIu64,
1018                        transaction->asyncNumber, addr);
1019 
1020         // Check to see if there is another asynchronous transaction to process.
1021         // This behavior differs from binder behavior, since in the binder
1022         // driver, asynchronous transactions will be processed after existing
1023         // pending binder transactions on the queue. The downside of this is
1024         // that asynchronous transactions can be drowned out by synchronous
1025         // transactions. However, we have no easy way to queue these
1026         // transactions after the synchronous transactions we may want to read
1027         // from the wire. So, in socket binder here, we have the opposite
1028         // downside: asynchronous transactions may drown out synchronous
1029         // transactions.
1030         {
1031             RpcMutexUniqueLock _l(mNodeMutex);
1032             auto it = mNodeForAddress.find(addr);
1033             // last refcount dropped after this transaction happened
1034             if (it == mNodeForAddress.end()) return OK;
1035 
1036             if (!nodeProgressAsyncNumber(&it->second)) {
1037                 _l.unlock();
1038                 (void)session->shutdownAndWait(false);
1039                 return DEAD_OBJECT;
1040             }
1041 
1042             if (it->second.asyncTodo.size() != 0 &&
1043                 it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
1044                 LOG_RPC_DETAIL("Found next async transaction %" PRIu64 " on %" PRIu64,
1045                                it->second.asyncNumber, addr);
1046 
1047                 // justification for const_cast (consider avoiding priority_queue):
1048                 // - AsyncTodo operator< doesn't depend on 'data' or 'ref' objects
1049                 // - gotta go fast
1050                 auto& todo = const_cast<BinderNode::AsyncTodo&>(it->second.asyncTodo.top());
1051 
1052                 // reset up arguments
1053                 transactionData = std::move(todo.data);
1054                 ancillaryFds = std::move(todo.ancillaryFds);
1055                 LOG_ALWAYS_FATAL_IF(target != todo.ref,
1056                                     "async list should be associated with a binder");
1057 
1058                 it->second.asyncTodo.pop();
1059                 goto processTransactInternalTailCall;
1060             }
1061         }
1062 
1063         // done processing all the async commands on this binder that we can, so
1064         // write decstrongs on the binder
1065         if (addr != 0 && replyStatus == OK) {
1066             return flushExcessBinderRefs(session, addr, target);
1067         }
1068 
1069         return OK;
1070     }
1071 
1072     // Binder refs are flushed for oneway calls only after all calls which are
1073     // built up are executed. Otherwise, they fill up the binder buffer.
1074     if (addr != 0 && replyStatus == OK) {
1075         replyStatus = flushExcessBinderRefs(session, addr, target);
1076     }
1077 
1078     std::string errorMsg;
1079     if (status_t status = validateParcel(session, reply, &errorMsg); status != OK) {
1080         ALOGE("Reply Parcel failed validation: %s", errorMsg.c_str());
1081         // Forward the error to the client of the transaction.
1082         reply.freeData();
1083         reply.markForRpc(session);
1084         replyStatus = status;
1085     }
1086 
1087     auto* rpcFields = reply.maybeRpcFields();
1088     LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
1089 
1090     const size_t rpcReplyWireSize = RpcWireReply::wireSize(session->getProtocolVersion().value());
1091 
1092     Span<const uint32_t> objectTableSpan = Span<const uint32_t>{rpcFields->mObjectPositions.data(),
1093                                                                 rpcFields->mObjectPositions.size()};
1094 
1095     uint32_t bodySize;
1096     LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(rpcReplyWireSize, reply.dataSize(), &bodySize) ||
1097                                 __builtin_add_overflow(objectTableSpan.byteSize(), bodySize,
1098                                                        &bodySize),
1099                         "Too much data for reply %zu", reply.dataSize());
1100     RpcWireHeader cmdReply{
1101             .command = RPC_COMMAND_REPLY,
1102             .bodySize = bodySize,
1103     };
1104     RpcWireReply rpcReply{
1105             .status = replyStatus,
1106             // NOTE: Not necessarily written to socket depending on session
1107             // version.
1108             // NOTE: bodySize didn't overflow => this cast is safe
1109             .parcelDataSize = static_cast<uint32_t>(reply.dataSize()),
1110             .reserved = {0, 0, 0},
1111     };
1112     iovec iovs[]{
1113             {&cmdReply, sizeof(RpcWireHeader)},
1114             {&rpcReply, rpcReplyWireSize},
1115             {const_cast<uint8_t*>(reply.data()), reply.dataSize()},
1116             objectTableSpan.toIovec(),
1117     };
1118     return rpcSend(connection, session, "reply", iovs, arraysize(iovs), std::nullopt,
1119                    rpcFields->mFds.get());
1120 }
1121 
processDecStrong(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command)1122 status_t RpcState::processDecStrong(const sp<RpcSession::RpcConnection>& connection,
1123                                     const sp<RpcSession>& session, const RpcWireHeader& command) {
1124     LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_DEC_STRONG, "command: %d", command.command);
1125 
1126     if (command.bodySize != sizeof(RpcDecStrong)) {
1127         ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcDecStrong. Terminating!",
1128               sizeof(RpcDecStrong), command.bodySize);
1129         (void)session->shutdownAndWait(false);
1130         return BAD_VALUE;
1131     }
1132 
1133     RpcDecStrong body;
1134     iovec iov{&body, sizeof(RpcDecStrong)};
1135     if (status_t status = rpcRec(connection, session, "dec ref body", &iov, 1, nullptr);
1136         status != OK)
1137         return status;
1138 
1139     uint64_t addr = RpcWireAddress::toRaw(body.address);
1140     RpcMutexUniqueLock _l(mNodeMutex);
1141     auto it = mNodeForAddress.find(addr);
1142     if (it == mNodeForAddress.end()) {
1143         ALOGE("Unknown binder address %" PRIu64 " for dec strong.", addr);
1144         return OK;
1145     }
1146 
1147     sp<IBinder> target = it->second.binder.promote();
1148     if (target == nullptr) {
1149         ALOGE("While requesting dec strong, binder has been deleted at address %" PRIu64
1150               ". Terminating!",
1151               addr);
1152         _l.unlock();
1153         (void)session->shutdownAndWait(false);
1154         return BAD_VALUE;
1155     }
1156 
1157     if (it->second.timesSent < body.amount) {
1158         ALOGE("Record of sending binder %zu times, but requested decStrong for %" PRIu64 " of %u",
1159               it->second.timesSent, addr, body.amount);
1160         return OK;
1161     }
1162 
1163     LOG_ALWAYS_FATAL_IF(it->second.sentRef == nullptr, "Inconsistent state, lost ref for %" PRIu64,
1164                         addr);
1165 
1166     LOG_RPC_DETAIL("Processing dec strong of %" PRIu64 " by %u from %zu", addr, body.amount,
1167                    it->second.timesSent);
1168 
1169     it->second.timesSent -= body.amount;
1170     sp<IBinder> tempHold = tryEraseNode(session, std::move(_l), it);
1171     // LOCK ALREADY RELEASED
1172     tempHold = nullptr; // destructor may make binder calls on this session
1173 
1174     return OK;
1175 }
1176 
validateParcel(const sp<RpcSession> & session,const Parcel & parcel,std::string * errorMsg)1177 status_t RpcState::validateParcel(const sp<RpcSession>& session, const Parcel& parcel,
1178                                   std::string* errorMsg) {
1179     auto* rpcFields = parcel.maybeRpcFields();
1180     if (rpcFields == nullptr) {
1181         *errorMsg = "Parcel not crafted for RPC call";
1182         return BAD_TYPE;
1183     }
1184 
1185     if (rpcFields->mSession != session) {
1186         *errorMsg = "Parcel's session doesn't match";
1187         return BAD_TYPE;
1188     }
1189 
1190     uint32_t protocolVersion = session->getProtocolVersion().value();
1191     if (protocolVersion < RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE &&
1192         !rpcFields->mObjectPositions.empty()) {
1193         *errorMsg = StringPrintf("Parcel has attached objects but the session's protocol version "
1194                                  "(%" PRIu32 ") is too old, must be at least %" PRIu32,
1195                                  protocolVersion,
1196                                  RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE);
1197         return BAD_VALUE;
1198     }
1199 
1200     if (rpcFields->mFds && !rpcFields->mFds->empty()) {
1201         switch (session->getFileDescriptorTransportMode()) {
1202             case RpcSession::FileDescriptorTransportMode::NONE:
1203                 *errorMsg =
1204                         "Parcel has file descriptors, but no file descriptor transport is enabled";
1205                 return FDS_NOT_ALLOWED;
1206             case RpcSession::FileDescriptorTransportMode::UNIX: {
1207                 constexpr size_t kMaxFdsPerMsg = 253;
1208                 if (rpcFields->mFds->size() > kMaxFdsPerMsg) {
1209                     *errorMsg = StringPrintf("Too many file descriptors in Parcel for unix "
1210                                              "domain socket: %zu (max is %zu)",
1211                                              rpcFields->mFds->size(), kMaxFdsPerMsg);
1212                     return BAD_VALUE;
1213                 }
1214                 break;
1215             }
1216             case RpcSession::FileDescriptorTransportMode::TRUSTY: {
1217                 // Keep this in sync with trusty_ipc.h!!!
1218                 // We could import that file here on Trusty, but it's not
1219                 // available on Android
1220                 constexpr size_t kMaxFdsPerMsg = 8;
1221                 if (rpcFields->mFds->size() > kMaxFdsPerMsg) {
1222                     *errorMsg = StringPrintf("Too many file descriptors in Parcel for Trusty "
1223                                              "IPC connection: %zu (max is %zu)",
1224                                              rpcFields->mFds->size(), kMaxFdsPerMsg);
1225                     return BAD_VALUE;
1226                 }
1227                 break;
1228             }
1229         }
1230     }
1231 
1232     return OK;
1233 }
1234 
tryEraseNode(const sp<RpcSession> & session,RpcMutexUniqueLock nodeLock,std::map<uint64_t,BinderNode>::iterator & it)1235 sp<IBinder> RpcState::tryEraseNode(const sp<RpcSession>& session, RpcMutexUniqueLock nodeLock,
1236                                    std::map<uint64_t, BinderNode>::iterator& it) {
1237     bool shouldShutdown = false;
1238 
1239     sp<IBinder> ref;
1240 
1241     if (it->second.timesSent == 0) {
1242         ref = std::move(it->second.sentRef);
1243 
1244         if (it->second.timesRecd == 0) {
1245             LOG_ALWAYS_FATAL_IF(!it->second.asyncTodo.empty(),
1246                                 "Can't delete binder w/ pending async transactions");
1247             mNodeForAddress.erase(it);
1248 
1249             if (mNodeForAddress.size() == 0) {
1250                 shouldShutdown = true;
1251             }
1252         }
1253     }
1254 
1255     // If we shutdown, prevent RpcState from being re-used. This prevents another
1256     // thread from getting the root object again.
1257     if (shouldShutdown) {
1258         clear(std::move(nodeLock));
1259     } else {
1260         nodeLock.unlock(); // explicit
1261     }
1262     // LOCK IS RELEASED
1263 
1264     if (shouldShutdown) {
1265         ALOGI("RpcState has no binders left, so triggering shutdown...");
1266         (void)session->shutdownAndWait(false);
1267     }
1268 
1269     return ref;
1270 }
1271 
nodeProgressAsyncNumber(BinderNode * node)1272 bool RpcState::nodeProgressAsyncNumber(BinderNode* node) {
1273     // 2**64 =~ 10**19 =~ 1000 transactions per second for 585 million years to
1274     // a single binder
1275     if (node->asyncNumber >= std::numeric_limits<decltype(node->asyncNumber)>::max()) {
1276         ALOGE("Out of async transaction IDs. Terminating");
1277         return false;
1278     }
1279     node->asyncNumber++;
1280     return true;
1281 }
1282 
1283 } // namespace android
1284