• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RpcState"
18 
19 #include "RpcState.h"
20 
21 #include <binder/BpBinder.h>
22 #include <binder/Functional.h>
23 #include <binder/IPCThreadState.h>
24 #include <binder/RpcServer.h>
25 
26 #include "Debug.h"
27 #include "RpcWireFormat.h"
28 #include "Utils.h"
29 
30 #include <random>
31 #include <sstream>
32 
33 #include <inttypes.h>
34 
35 #ifdef __ANDROID__
36 #include <cutils/properties.h>
37 #endif
38 
39 namespace android {
40 
41 using namespace android::binder::impl;
42 using android::binder::borrowed_fd;
43 using android::binder::unique_fd;
44 
45 #if RPC_FLAKE_PRONE
rpcMaybeWaitToFlake()46 void rpcMaybeWaitToFlake() {
47     [[clang::no_destroy]] static std::random_device r;
48     [[clang::no_destroy]] static RpcMutex m;
49     unsigned num;
50     {
51         RpcMutexLockGuard lock(m);
52         num = r();
53     }
54     if (num % 10 == 0) usleep(num % 1000);
55 }
56 #endif
57 
enableAncillaryFds(RpcSession::FileDescriptorTransportMode mode)58 static bool enableAncillaryFds(RpcSession::FileDescriptorTransportMode mode) {
59     switch (mode) {
60         case RpcSession::FileDescriptorTransportMode::NONE:
61             return false;
62         case RpcSession::FileDescriptorTransportMode::UNIX:
63         case RpcSession::FileDescriptorTransportMode::TRUSTY:
64             return true;
65     }
66     LOG_ALWAYS_FATAL("Invalid FileDescriptorTransportMode: %d", static_cast<int>(mode));
67 }
68 
RpcState()69 RpcState::RpcState() {}
~RpcState()70 RpcState::~RpcState() {}
71 
onBinderLeaving(const sp<RpcSession> & session,const sp<IBinder> & binder,uint64_t * outAddress)72 status_t RpcState::onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
73                                    uint64_t* outAddress) {
74     bool isRemote = binder->remoteBinder();
75     bool isRpc = isRemote && binder->remoteBinder()->isRpcBinder();
76 
77     if (isRpc && binder->remoteBinder()->getPrivateAccessor().rpcSession() != session) {
78         // We need to be able to send instructions over the socket for how to
79         // connect to a different server, and we also need to let the host
80         // process know that this is happening.
81         ALOGE("Cannot send binder from unrelated binder RPC session.");
82         return INVALID_OPERATION;
83     }
84 
85     if (isRemote && !isRpc) {
86         // Without additional work, this would have the effect of using this
87         // process to proxy calls from the socket over to the other process, and
88         // it would make those calls look like they come from us (not over the
89         // sockets). In order to make this work transparently like binder, we
90         // would instead need to send instructions over the socket for how to
91         // connect to the host process, and we also need to let the host process
92         // know this was happening.
93         ALOGE("Cannot send binder proxy %p over sockets", binder.get());
94         return INVALID_OPERATION;
95     }
96 
97     RpcMutexLockGuard _l(mNodeMutex);
98     if (mTerminated) return DEAD_OBJECT;
99 
100     // TODO(b/182939933): maybe move address out of BpBinder, and keep binder->address map
101     // in RpcState
102     for (auto& [addr, node] : mNodeForAddress) {
103         if (binder == node.binder) {
104             if (isRpc) {
105                 // check integrity of data structure
106                 uint64_t actualAddr = binder->remoteBinder()->getPrivateAccessor().rpcAddress();
107                 LOG_ALWAYS_FATAL_IF(addr != actualAddr, "Address mismatch %" PRIu64 " vs %" PRIu64,
108                                     addr, actualAddr);
109             }
110             node.timesSent++;
111             node.sentRef = binder; // might already be set
112             *outAddress = addr;
113             return OK;
114         }
115     }
116     LOG_ALWAYS_FATAL_IF(isRpc, "RPC binder must have known address at this point");
117 
118     bool forServer = session->server() != nullptr;
119 
120     // arbitrary limit for maximum number of nodes in a process (otherwise we
121     // might run out of addresses)
122     if (mNodeForAddress.size() > 100000) {
123         return NO_MEMORY;
124     }
125 
126     while (true) {
127         RpcWireAddress address{
128                 .options = RPC_WIRE_ADDRESS_OPTION_CREATED,
129                 .address = mNextId,
130         };
131         if (forServer) {
132             address.options |= RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
133         }
134 
135         // avoid ubsan abort
136         if (mNextId >= std::numeric_limits<uint32_t>::max()) {
137             mNextId = 0;
138         } else {
139             mNextId++;
140         }
141 
142         auto&& [it, inserted] = mNodeForAddress.insert({RpcWireAddress::toRaw(address),
143                                                         BinderNode{
144                                                                 .binder = binder,
145                                                                 .sentRef = binder,
146                                                                 .timesSent = 1,
147                                                         }});
148         if (inserted) {
149             *outAddress = it->first;
150             return OK;
151         }
152     }
153 }
154 
onBinderEntering(const sp<RpcSession> & session,uint64_t address,sp<IBinder> * out)155 status_t RpcState::onBinderEntering(const sp<RpcSession>& session, uint64_t address,
156                                     sp<IBinder>* out) {
157     // ensure that: if we want to use addresses for something else in the future (for
158     //   instance, allowing transitive binder sends), that we don't accidentally
159     //   send those addresses to old server. Accidentally ignoring this in that
160     //   case and considering the binder to be recognized could cause this
161     //   process to accidentally proxy transactions for that binder. Of course,
162     //   if we communicate with a binder, it could always be proxying
163     //   information. However, we want to make sure that isn't done on accident
164     //   by a client.
165     RpcWireAddress addr = RpcWireAddress::fromRaw(address);
166     constexpr uint32_t kKnownOptions =
167             RPC_WIRE_ADDRESS_OPTION_CREATED | RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
168     if (addr.options & ~kKnownOptions) {
169         ALOGE("Address is of an unknown type, rejecting: %" PRIu64, address);
170         return BAD_VALUE;
171     }
172 
173     RpcMutexLockGuard _l(mNodeMutex);
174     if (mTerminated) return DEAD_OBJECT;
175 
176     if (auto it = mNodeForAddress.find(address); it != mNodeForAddress.end()) {
177         *out = it->second.binder.promote();
178 
179         // implicitly have strong RPC refcount, since we received this binder
180         it->second.timesRecd++;
181         return OK;
182     }
183 
184     // we don't know about this binder, so the other side of the connection
185     // should have created it.
186     if ((addr.options & RPC_WIRE_ADDRESS_OPTION_FOR_SERVER) == !!session->server()) {
187         ALOGE("Server received unrecognized address which we should own the creation of %" PRIu64,
188               address);
189         return BAD_VALUE;
190     }
191 
192     auto&& [it, inserted] = mNodeForAddress.insert({address, BinderNode{}});
193     LOG_ALWAYS_FATAL_IF(!inserted, "Failed to insert binder when creating proxy");
194 
195     // Currently, all binders are assumed to be part of the same session (no
196     // device global binders in the RPC world).
197     it->second.binder = *out = BpBinder::PrivateAccessor::create(session, it->first);
198     it->second.timesRecd = 1;
199     return OK;
200 }
201 
flushExcessBinderRefs(const sp<RpcSession> & session,uint64_t address,const sp<IBinder> & binder)202 status_t RpcState::flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
203                                          const sp<IBinder>& binder) {
204     // We can flush all references when the binder is destroyed. No need to send
205     // extra reference counting packets now.
206     if (binder->remoteBinder()) return OK;
207 
208     RpcMutexUniqueLock _l(mNodeMutex);
209     if (mTerminated) return DEAD_OBJECT;
210 
211     auto it = mNodeForAddress.find(address);
212 
213     LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Can't be deleted while we hold sp<>");
214     LOG_ALWAYS_FATAL_IF(it->second.binder != binder,
215                         "Caller of flushExcessBinderRefs using inconsistent arguments");
216 
217     LOG_ALWAYS_FATAL_IF(it->second.timesSent <= 0, "Local binder must have been sent %p",
218                         binder.get());
219 
220     // For a local binder, we only need to know that we sent it. Now that we
221     // have an sp<> for this call, we don't need anything more. If the other
222     // process is done with this binder, it needs to know we received the
223     // refcount associated with this call, so we can acknowledge that we
224     // received it. Once (or if) it has no other refcounts, it would reply with
225     // its own decStrong so that it could be removed from this session.
226     if (it->second.timesRecd != 0) {
227         _l.unlock();
228 
229         return session->sendDecStrongToTarget(address, 0);
230     }
231 
232     return OK;
233 }
234 
sendObituaries(const sp<RpcSession> & session)235 status_t RpcState::sendObituaries(const sp<RpcSession>& session) {
236     RpcMutexUniqueLock _l(mNodeMutex);
237 
238     // Gather strong pointers to all of the remote binders for this session so
239     // we hold the strong references. remoteBinder() returns a raw pointer.
240     // Send the obituaries and drop the strong pointers outside of the lock so
241     // the destructors and the onBinderDied calls are not done while locked.
242     std::vector<sp<IBinder>> remoteBinders;
243     for (const auto& [_, binderNode] : mNodeForAddress) {
244         if (auto binder = binderNode.binder.promote()) {
245             remoteBinders.push_back(std::move(binder));
246         }
247     }
248     _l.unlock();
249 
250     for (const auto& binder : remoteBinders) {
251         if (binder->remoteBinder() &&
252             binder->remoteBinder()->getPrivateAccessor().rpcSession() == session) {
253             binder->remoteBinder()->sendObituary();
254         }
255     }
256     return OK;
257 }
258 
countBinders()259 size_t RpcState::countBinders() {
260     RpcMutexLockGuard _l(mNodeMutex);
261     return mNodeForAddress.size();
262 }
263 
dump()264 void RpcState::dump() {
265     RpcMutexLockGuard _l(mNodeMutex);
266     dumpLocked();
267 }
268 
clear()269 void RpcState::clear() {
270     return clear(RpcMutexUniqueLock(mNodeMutex));
271 }
272 
clear(RpcMutexUniqueLock nodeLock)273 void RpcState::clear(RpcMutexUniqueLock nodeLock) {
274     if (mTerminated) {
275         LOG_ALWAYS_FATAL_IF(!mNodeForAddress.empty(),
276                             "New state should be impossible after terminating!");
277         return;
278     }
279     mTerminated = true;
280 
281     if (SHOULD_LOG_RPC_DETAIL) {
282         ALOGE("RpcState::clear()");
283         dumpLocked();
284     }
285 
286     // invariants
287     for (auto& [address, node] : mNodeForAddress) {
288         bool guaranteedHaveBinder = node.timesSent > 0;
289         if (guaranteedHaveBinder) {
290             LOG_ALWAYS_FATAL_IF(node.sentRef == nullptr,
291                                 "Binder expected to be owned with address: %" PRIu64 " %s", address,
292                                 node.toString().c_str());
293         }
294     }
295 
296     // if the destructor of a binder object makes another RPC call, then calling
297     // decStrong could deadlock. So, we must hold onto these binders until
298     // mNodeMutex is no longer taken.
299     auto temp = std::move(mNodeForAddress);
300     mNodeForAddress.clear(); // RpcState isn't reusable, but for future/explicit
301 
302     nodeLock.unlock();
303     temp.clear(); // explicit
304 }
305 
dumpLocked()306 void RpcState::dumpLocked() {
307     ALOGE("DUMP OF RpcState %p", this);
308     ALOGE("DUMP OF RpcState (%zu nodes)", mNodeForAddress.size());
309     for (const auto& [address, node] : mNodeForAddress) {
310         ALOGE("- address: %" PRIu64 " %s", address, node.toString().c_str());
311     }
312     ALOGE("END DUMP OF RpcState");
313 }
314 
toString() const315 std::string RpcState::BinderNode::toString() const {
316     sp<IBinder> strongBinder = this->binder.promote();
317 
318     const char* desc;
319     if (strongBinder) {
320         if (strongBinder->remoteBinder()) {
321             if (strongBinder->remoteBinder()->isRpcBinder()) {
322                 desc = "(rpc binder proxy)";
323             } else {
324                 desc = "(binder proxy)";
325             }
326         } else {
327             desc = "(local binder)";
328         }
329     } else {
330         desc = "(not promotable)";
331     }
332 
333     std::stringstream ss;
334     ss << "node{" << intptr_t(this->binder.unsafe_get()) << " times sent: " << this->timesSent
335        << " times recd: " << this->timesRecd << " type: " << desc << "}";
336     return ss.str();
337 }
338 
CommandData(size_t size)339 RpcState::CommandData::CommandData(size_t size) : mSize(size) {
340     // The maximum size for regular binder is 1MB for all concurrent
341     // transactions. A very small proportion of transactions are even
342     // larger than a page, but we need to avoid allocating too much
343     // data on behalf of an arbitrary client, or we could risk being in
344     // a position where a single additional allocation could run out of
345     // memory.
346     //
347     // Note, this limit may not reflect the total amount of data allocated for a
348     // transaction (in some cases, additional fixed size amounts are added),
349     // though for rough consistency, we should avoid cases where this data type
350     // is used for multiple dynamic allocations for a single transaction.
351     constexpr size_t kMaxTransactionAllocation = 100 * 1000;
352     if (size == 0) return;
353     if (size > kMaxTransactionAllocation) {
354         ALOGW("Transaction requested too much data allocation %zu", size);
355         return;
356     }
357     mData.reset(new (std::nothrow) uint8_t[size]);
358 }
359 
rpcSend(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs,const std::optional<SmallFunction<status_t ()>> & altPoll,const std::vector<std::variant<unique_fd,borrowed_fd>> * ancillaryFds)360 status_t RpcState::rpcSend(const sp<RpcSession::RpcConnection>& connection,
361                            const sp<RpcSession>& session, const char* what, iovec* iovs, int niovs,
362                            const std::optional<SmallFunction<status_t()>>& altPoll,
363                            const std::vector<std::variant<unique_fd, borrowed_fd>>* ancillaryFds) {
364     for (int i = 0; i < niovs; i++) {
365         LOG_RPC_DETAIL("Sending %s (part %d of %d) on RpcTransport %p: %s",
366                        what, i + 1, niovs, connection->rpcTransport.get(),
367                        HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
368     }
369 
370     if (status_t status =
371                 connection->rpcTransport->interruptableWriteFully(session->mShutdownTrigger.get(),
372                                                                   iovs, niovs, altPoll,
373                                                                   ancillaryFds);
374         status != OK) {
375         LOG_RPC_DETAIL("Failed to write %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
376                        connection->rpcTransport.get(), statusToString(status).c_str());
377         (void)session->shutdownAndWait(false);
378         return status;
379     }
380 
381     return OK;
382 }
383 
rpcRec(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs,std::vector<std::variant<unique_fd,borrowed_fd>> * ancillaryFds)384 status_t RpcState::rpcRec(const sp<RpcSession::RpcConnection>& connection,
385                           const sp<RpcSession>& session, const char* what, iovec* iovs, int niovs,
386                           std::vector<std::variant<unique_fd, borrowed_fd>>* ancillaryFds) {
387     if (status_t status =
388                 connection->rpcTransport->interruptableReadFully(session->mShutdownTrigger.get(),
389                                                                  iovs, niovs, std::nullopt,
390                                                                  ancillaryFds);
391         status != OK) {
392         LOG_RPC_DETAIL("Failed to read %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
393                        connection->rpcTransport.get(), statusToString(status).c_str());
394         (void)session->shutdownAndWait(false);
395         return status;
396     }
397 
398     for (int i = 0; i < niovs; i++) {
399         LOG_RPC_DETAIL("Received %s (part %d of %d) on RpcTransport %p: %s",
400                        what, i + 1, niovs, connection->rpcTransport.get(),
401                        HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
402     }
403     return OK;
404 }
405 
validateProtocolVersion(uint32_t version)406 bool RpcState::validateProtocolVersion(uint32_t version) {
407     if (version == RPC_WIRE_PROTOCOL_VERSION_EXPERIMENTAL) {
408 #if defined(__ANDROID__)
409         char codename[PROPERTY_VALUE_MAX];
410         property_get("ro.build.version.codename", codename, "");
411         if (!strcmp(codename, "REL")) {
412             ALOGE("Cannot use experimental RPC binder protocol in a release configuration.");
413             return false;
414         }
415 #else
416         ALOGE("Cannot use experimental RPC binder protocol outside of Android.");
417         return false;
418 #endif
419     } else if (version >= RPC_WIRE_PROTOCOL_VERSION_NEXT) {
420         ALOGE("Cannot use RPC binder protocol version %u which is unknown (current protocol "
421               "version "
422               "is %u).",
423               version, RPC_WIRE_PROTOCOL_VERSION);
424         return false;
425     }
426 
427     return true;
428 }
429 
readNewSessionResponse(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint32_t * version)430 status_t RpcState::readNewSessionResponse(const sp<RpcSession::RpcConnection>& connection,
431                                           const sp<RpcSession>& session, uint32_t* version) {
432     RpcNewSessionResponse response;
433     iovec iov{&response, sizeof(response)};
434     if (status_t status = rpcRec(connection, session, "new session response", &iov, 1, nullptr);
435         status != OK) {
436         return status;
437     }
438     *version = response.version;
439     return OK;
440 }
441 
sendConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)442 status_t RpcState::sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
443                                       const sp<RpcSession>& session) {
444     RpcOutgoingConnectionInit init{
445             .msg = RPC_CONNECTION_INIT_OKAY,
446     };
447     iovec iov{&init, sizeof(init)};
448     return rpcSend(connection, session, "connection init", &iov, 1, std::nullopt);
449 }
450 
readConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)451 status_t RpcState::readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
452                                       const sp<RpcSession>& session) {
453     RpcOutgoingConnectionInit init;
454     iovec iov{&init, sizeof(init)};
455     if (status_t status = rpcRec(connection, session, "connection init", &iov, 1, nullptr);
456         status != OK)
457         return status;
458 
459     static_assert(sizeof(init.msg) == sizeof(RPC_CONNECTION_INIT_OKAY));
460     if (0 != strncmp(init.msg, RPC_CONNECTION_INIT_OKAY, sizeof(init.msg))) {
461         ALOGE("Connection init message unrecognized %.*s", static_cast<int>(sizeof(init.msg)),
462               init.msg);
463         return BAD_VALUE;
464     }
465     return OK;
466 }
467 
getRootObject(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)468 sp<IBinder> RpcState::getRootObject(const sp<RpcSession::RpcConnection>& connection,
469                                     const sp<RpcSession>& session) {
470     Parcel data;
471     data.markForRpc(session);
472     Parcel reply;
473 
474     status_t status =
475             transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_ROOT, data, session, &reply, 0);
476     if (status != OK) {
477         ALOGE("Error getting root object: %s", statusToString(status).c_str());
478         return nullptr;
479     }
480 
481     return reply.readStrongBinder();
482 }
483 
getMaxThreads(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,size_t * maxThreadsOut)484 status_t RpcState::getMaxThreads(const sp<RpcSession::RpcConnection>& connection,
485                                  const sp<RpcSession>& session, size_t* maxThreadsOut) {
486     Parcel data;
487     data.markForRpc(session);
488     Parcel reply;
489 
490     status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
491                                       session, &reply, 0);
492     if (status != OK) {
493         ALOGE("Error getting max threads: %s", statusToString(status).c_str());
494         return status;
495     }
496 
497     int32_t maxThreads;
498     status = reply.readInt32(&maxThreads);
499     if (status != OK) return status;
500     if (maxThreads <= 0) {
501         ALOGE("Error invalid max maxThreads: %d", maxThreads);
502         return BAD_VALUE;
503     }
504 
505     *maxThreadsOut = maxThreads;
506     return OK;
507 }
508 
getSessionId(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,std::vector<uint8_t> * sessionIdOut)509 status_t RpcState::getSessionId(const sp<RpcSession::RpcConnection>& connection,
510                                 const sp<RpcSession>& session, std::vector<uint8_t>* sessionIdOut) {
511     Parcel data;
512     data.markForRpc(session);
513     Parcel reply;
514 
515     status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_SESSION_ID, data,
516                                       session, &reply, 0);
517     if (status != OK) {
518         ALOGE("Error getting session ID: %s", statusToString(status).c_str());
519         return status;
520     }
521 
522     return reply.readByteVector(sessionIdOut);
523 }
524 
transact(const sp<RpcSession::RpcConnection> & connection,const sp<IBinder> & binder,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)525 status_t RpcState::transact(const sp<RpcSession::RpcConnection>& connection,
526                             const sp<IBinder>& binder, uint32_t code, const Parcel& data,
527                             const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
528     std::string errorMsg;
529     if (status_t status = validateParcel(session, data, &errorMsg); status != OK) {
530         ALOGE("Refusing to send RPC on binder %p code %" PRIu32 ": Parcel %p failed validation: %s",
531               binder.get(), code, &data, errorMsg.c_str());
532         return status;
533     }
534     uint64_t address;
535     if (status_t status = onBinderLeaving(session, binder, &address); status != OK) return status;
536 
537     return transactAddress(connection, address, code, data, session, reply, flags);
538 }
539 
transactAddress(const sp<RpcSession::RpcConnection> & connection,uint64_t address,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)540 status_t RpcState::transactAddress(const sp<RpcSession::RpcConnection>& connection,
541                                    uint64_t address, uint32_t code, const Parcel& data,
542                                    const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
543     LOG_ALWAYS_FATAL_IF(!data.isForRpc());
544     LOG_ALWAYS_FATAL_IF(data.objectsCount() != 0);
545 
546     uint64_t asyncNumber = 0;
547 
548     if (address != 0) {
549         RpcMutexUniqueLock _l(mNodeMutex);
550         if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
551         auto it = mNodeForAddress.find(address);
552         LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
553                             "Sending transact on unknown address %" PRIu64, address);
554 
555         if (flags & IBinder::FLAG_ONEWAY) {
556             asyncNumber = it->second.asyncNumber;
557             if (!nodeProgressAsyncNumber(&it->second)) {
558                 _l.unlock();
559                 (void)session->shutdownAndWait(false);
560                 return DEAD_OBJECT;
561             }
562         }
563     }
564 
565     auto* rpcFields = data.maybeRpcFields();
566     LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
567 
568     Span<const uint32_t> objectTableSpan = Span<const uint32_t>{rpcFields->mObjectPositions.data(),
569                                                                 rpcFields->mObjectPositions.size()};
570 
571     uint32_t bodySize;
572     LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(sizeof(RpcWireTransaction), data.dataSize(),
573                                                &bodySize) ||
574                                 __builtin_add_overflow(objectTableSpan.byteSize(), bodySize,
575                                                        &bodySize),
576                         "Too much data %zu", data.dataSize());
577     RpcWireHeader command{
578             .command = RPC_COMMAND_TRANSACT,
579             .bodySize = bodySize,
580     };
581 
582     RpcWireTransaction transaction{
583             .address = RpcWireAddress::fromRaw(address),
584             .code = code,
585             .flags = flags,
586             .asyncNumber = asyncNumber,
587             // bodySize didn't overflow => this cast is safe
588             .parcelDataSize = static_cast<uint32_t>(data.dataSize()),
589     };
590 
591     // Oneway calls have no sync point, so if many are sent before, whether this
592     // is a twoway or oneway transaction, they may have filled up the socket.
593     // So, make sure we drain them before polling
594     constexpr size_t kWaitMaxUs = 1000000;
595     constexpr size_t kWaitLogUs = 10000;
596     size_t waitUs = 0;
597 
598     iovec iovs[]{
599             {&command, sizeof(RpcWireHeader)},
600             {&transaction, sizeof(RpcWireTransaction)},
601             {const_cast<uint8_t*>(data.data()), data.dataSize()},
602             objectTableSpan.toIovec(),
603     };
604     auto altPoll = [&] {
605         if (waitUs > kWaitLogUs) {
606             ALOGE("Cannot send command, trying to process pending refcounts. Waiting "
607                   "%zuus. Too many oneway calls?",
608                   waitUs);
609         }
610 
611         if (waitUs > 0) {
612             usleep(waitUs);
613             waitUs = std::min(kWaitMaxUs, waitUs * 2);
614         } else {
615             waitUs = 1;
616         }
617 
618         return drainCommands(connection, session, CommandType::CONTROL_ONLY);
619     };
620     if (status_t status = rpcSend(connection, session, "transaction", iovs, countof(iovs),
621                                   std::ref(altPoll), rpcFields->mFds.get());
622         status != OK) {
623         // rpcSend calls shutdownAndWait, so all refcounts should be reset. If we ever tolerate
624         // errors here, then we may need to undo the binder-sent counts for the transaction as
625         // well as for the binder objects in the Parcel
626         return status;
627     }
628 
629     if (flags & IBinder::FLAG_ONEWAY) {
630         LOG_RPC_DETAIL("Oneway command, so no longer waiting on RpcTransport %p",
631                        connection->rpcTransport.get());
632 
633         // Do not wait on result.
634         return OK;
635     }
636 
637     LOG_ALWAYS_FATAL_IF(reply == nullptr, "Reply parcel must be used for synchronous transaction.");
638 
639     return waitForReply(connection, session, reply);
640 }
641 
cleanup_reply_data(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)642 static void cleanup_reply_data(const uint8_t* data, size_t dataSize, const binder_size_t* objects,
643                                size_t objectsCount) {
644     delete[] const_cast<uint8_t*>(data);
645     (void)dataSize;
646     LOG_ALWAYS_FATAL_IF(objects != nullptr);
647     (void)objectsCount;
648 }
649 
waitForReply(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,Parcel * reply)650 status_t RpcState::waitForReply(const sp<RpcSession::RpcConnection>& connection,
651                                 const sp<RpcSession>& session, Parcel* reply) {
652     std::vector<std::variant<unique_fd, borrowed_fd>> ancillaryFds;
653     RpcWireHeader command;
654     while (true) {
655         iovec iov{&command, sizeof(command)};
656         if (status_t status = rpcRec(connection, session, "command header (for reply)", &iov, 1,
657                                      enableAncillaryFds(session->getFileDescriptorTransportMode())
658                                              ? &ancillaryFds
659                                              : nullptr);
660             status != OK)
661             return status;
662 
663         if (command.command == RPC_COMMAND_REPLY) break;
664 
665         if (status_t status = processCommand(connection, session, command, CommandType::ANY,
666                                              std::move(ancillaryFds));
667             status != OK)
668             return status;
669 
670         // Reset to avoid spurious use-after-move warning from clang-tidy.
671         ancillaryFds = decltype(ancillaryFds)();
672     }
673 
674     const size_t rpcReplyWireSize = RpcWireReply::wireSize(session->getProtocolVersion().value());
675 
676     if (command.bodySize < rpcReplyWireSize) {
677         ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireReply. Terminating!",
678               sizeof(RpcWireReply), command.bodySize);
679         (void)session->shutdownAndWait(false);
680         return BAD_VALUE;
681     }
682 
683     RpcWireReply rpcReply;
684     memset(&rpcReply, 0, sizeof(RpcWireReply)); // zero because of potential short read
685 
686     CommandData data(command.bodySize - rpcReplyWireSize);
687     if (!data.valid()) return NO_MEMORY;
688 
689     iovec iovs[]{
690             {&rpcReply, rpcReplyWireSize},
691             {data.data(), data.size()},
692     };
693     if (status_t status = rpcRec(connection, session, "reply body", iovs, countof(iovs), nullptr);
694         status != OK)
695         return status;
696 
697     if (rpcReply.status != OK) return rpcReply.status;
698 
699     Span<const uint8_t> parcelSpan = {data.data(), data.size()};
700     Span<const uint32_t> objectTableSpan;
701     if (session->getProtocolVersion().value() >=
702         RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE) {
703         std::optional<Span<const uint8_t>> objectTableBytes =
704                 parcelSpan.splitOff(rpcReply.parcelDataSize);
705         if (!objectTableBytes.has_value()) {
706             ALOGE("Parcel size larger than available bytes: %" PRId32 " vs %zu. Terminating!",
707                   rpcReply.parcelDataSize, parcelSpan.byteSize());
708             (void)session->shutdownAndWait(false);
709             return BAD_VALUE;
710         }
711         std::optional<Span<const uint32_t>> maybeSpan =
712                 objectTableBytes->reinterpret<const uint32_t>();
713         if (!maybeSpan.has_value()) {
714             ALOGE("Bad object table size inferred from RpcWireReply. Saw bodySize=%" PRId32
715                   " sizeofHeader=%zu parcelSize=%" PRId32 " objectTableBytesSize=%zu. Terminating!",
716                   command.bodySize, rpcReplyWireSize, rpcReply.parcelDataSize,
717                   objectTableBytes->size);
718             return BAD_VALUE;
719         }
720         objectTableSpan = *maybeSpan;
721     }
722 
723     data.release();
724     return reply->rpcSetDataReference(session, parcelSpan.data, parcelSpan.size,
725                                       objectTableSpan.data, objectTableSpan.size,
726                                       std::move(ancillaryFds), cleanup_reply_data);
727 }
728 
sendDecStrongToTarget(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint64_t addr,size_t target)729 status_t RpcState::sendDecStrongToTarget(const sp<RpcSession::RpcConnection>& connection,
730                                          const sp<RpcSession>& session, uint64_t addr,
731                                          size_t target) {
732     RpcDecStrong body = {
733             .address = RpcWireAddress::fromRaw(addr),
734     };
735 
736     {
737         RpcMutexUniqueLock _l(mNodeMutex);
738         if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
739         auto it = mNodeForAddress.find(addr);
740         LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
741                             "Sending dec strong on unknown address %" PRIu64, addr);
742 
743         LOG_ALWAYS_FATAL_IF(it->second.timesRecd < target, "Can't dec count of %zu to %zu.",
744                             it->second.timesRecd, target);
745 
746         // typically this happens when multiple threads send dec refs at the
747         // same time - the transactions will get combined automatically
748         if (it->second.timesRecd == target) return OK;
749 
750         body.amount = it->second.timesRecd - target;
751         it->second.timesRecd = target;
752 
753         LOG_ALWAYS_FATAL_IF(nullptr != tryEraseNode(session, std::move(_l), it),
754                             "Bad state. RpcState shouldn't own received binder");
755         // LOCK ALREADY RELEASED
756     }
757 
758     RpcWireHeader cmd = {
759             .command = RPC_COMMAND_DEC_STRONG,
760             .bodySize = sizeof(RpcDecStrong),
761     };
762     iovec iovs[]{{&cmd, sizeof(cmd)}, {&body, sizeof(body)}};
763     return rpcSend(connection, session, "dec ref", iovs, countof(iovs), std::nullopt);
764 }
765 
getAndExecuteCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)766 status_t RpcState::getAndExecuteCommand(const sp<RpcSession::RpcConnection>& connection,
767                                         const sp<RpcSession>& session, CommandType type) {
768     LOG_RPC_DETAIL("getAndExecuteCommand on RpcTransport %p", connection->rpcTransport.get());
769 
770     std::vector<std::variant<unique_fd, borrowed_fd>> ancillaryFds;
771     RpcWireHeader command;
772     iovec iov{&command, sizeof(command)};
773     if (status_t status =
774                 rpcRec(connection, session, "command header (for server)", &iov, 1,
775                        enableAncillaryFds(session->getFileDescriptorTransportMode()) ? &ancillaryFds
776                                                                                      : nullptr);
777         status != OK)
778         return status;
779 
780     return processCommand(connection, session, command, type, std::move(ancillaryFds));
781 }
782 
drainCommands(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)783 status_t RpcState::drainCommands(const sp<RpcSession::RpcConnection>& connection,
784                                  const sp<RpcSession>& session, CommandType type) {
785     while (true) {
786         status_t status = connection->rpcTransport->pollRead();
787         if (status == WOULD_BLOCK) break;
788         if (status != OK) return status;
789 
790         status = getAndExecuteCommand(connection, session, type);
791         if (status != OK) return status;
792     }
793     return OK;
794 }
795 
processCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command,CommandType type,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds)796 status_t RpcState::processCommand(
797         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
798         const RpcWireHeader& command, CommandType type,
799         std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds) {
800 #ifdef BINDER_WITH_KERNEL_IPC
801     IPCThreadState* kernelBinderState = IPCThreadState::selfOrNull();
802     IPCThreadState::SpGuard spGuard{
803             .address = __builtin_frame_address(0),
804             .context = "processing binder RPC command (where RpcServer::setPerSessionRootObject is "
805                        "used to distinguish callers)",
806     };
807     const IPCThreadState::SpGuard* origGuard;
808     if (kernelBinderState != nullptr) {
809         origGuard = kernelBinderState->pushGetCallingSpGuard(&spGuard);
810     }
811 
812     auto guardUnguard = make_scope_guard([&]() {
813         if (kernelBinderState != nullptr) {
814             kernelBinderState->restoreGetCallingSpGuard(origGuard);
815         }
816     });
817 #endif // BINDER_WITH_KERNEL_IPC
818 
819     switch (command.command) {
820         case RPC_COMMAND_TRANSACT:
821             if (type != CommandType::ANY) return BAD_TYPE;
822             return processTransact(connection, session, command, std::move(ancillaryFds));
823         case RPC_COMMAND_DEC_STRONG:
824             return processDecStrong(connection, session, command);
825     }
826 
827     // We should always know the version of the opposing side, and since the
828     // RPC-binder-level wire protocol is not self synchronizing, we have no way
829     // to understand where the current command ends and the next one begins. We
830     // also can't consider it a fatal error because this would allow any client
831     // to kill us, so ending the session for misbehaving client.
832     ALOGE("Unknown RPC command %d - terminating session", command.command);
833     (void)session->shutdownAndWait(false);
834     return DEAD_OBJECT;
835 }
processTransact(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds)836 status_t RpcState::processTransact(
837         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
838         const RpcWireHeader& command,
839         std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds) {
840     LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_TRANSACT, "command: %d", command.command);
841 
842     CommandData transactionData(command.bodySize);
843     if (!transactionData.valid()) {
844         return NO_MEMORY;
845     }
846     iovec iov{transactionData.data(), transactionData.size()};
847     if (status_t status = rpcRec(connection, session, "transaction body", &iov, 1, nullptr);
848         status != OK)
849         return status;
850 
851     return processTransactInternal(connection, session, std::move(transactionData),
852                                    std::move(ancillaryFds));
853 }
854 
do_nothing_to_transact_data(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)855 static void do_nothing_to_transact_data(const uint8_t* data, size_t dataSize,
856                                         const binder_size_t* objects, size_t objectsCount) {
857     (void)data;
858     (void)dataSize;
859     (void)objects;
860     (void)objectsCount;
861 }
862 
processTransactInternal(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandData transactionData,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds)863 status_t RpcState::processTransactInternal(
864         const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
865         CommandData transactionData,
866         std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds) {
867     // for 'recursive' calls to this, we have already read and processed the
868     // binder from the transaction data and taken reference counts into account,
869     // so it is cached here.
870     sp<IBinder> target;
871 processTransactInternalTailCall:
872 
873     if (transactionData.size() < sizeof(RpcWireTransaction)) {
874         ALOGE("Expecting %zu but got %zu bytes for RpcWireTransaction. Terminating!",
875               sizeof(RpcWireTransaction), transactionData.size());
876         (void)session->shutdownAndWait(false);
877         return BAD_VALUE;
878     }
879     RpcWireTransaction* transaction = reinterpret_cast<RpcWireTransaction*>(transactionData.data());
880 
881     uint64_t addr = RpcWireAddress::toRaw(transaction->address);
882     bool oneway = transaction->flags & IBinder::FLAG_ONEWAY;
883 
884     status_t replyStatus = OK;
885     if (addr != 0) {
886         if (!target) {
887             replyStatus = onBinderEntering(session, addr, &target);
888         }
889 
890         if (replyStatus != OK) {
891             // do nothing
892         } else if (target == nullptr) {
893             // This can happen if the binder is remote in this process, and
894             // another thread has called the last decStrong on this binder.
895             // However, for local binders, it indicates a misbehaving client
896             // (any binder which is being transacted on should be holding a
897             // strong ref count), so in either case, terminating the
898             // session.
899             ALOGE("While transacting, binder has been deleted at address %" PRIu64 ". Terminating!",
900                   addr);
901             (void)session->shutdownAndWait(false);
902             replyStatus = BAD_VALUE;
903         } else if (target->localBinder() == nullptr) {
904             ALOGE("Unknown binder address or non-local binder, not address %" PRIu64
905                   ". Terminating!",
906                   addr);
907             (void)session->shutdownAndWait(false);
908             replyStatus = BAD_VALUE;
909         } else if (oneway) {
910             RpcMutexUniqueLock _l(mNodeMutex);
911             auto it = mNodeForAddress.find(addr);
912             if (it->second.binder.promote() != target) {
913                 ALOGE("Binder became invalid during transaction. Bad client? %" PRIu64, addr);
914                 replyStatus = BAD_VALUE;
915             } else if (transaction->asyncNumber != it->second.asyncNumber) {
916                 // we need to process some other asynchronous transaction
917                 // first
918                 it->second.asyncTodo.push(BinderNode::AsyncTodo{
919                         .ref = target,
920                         .data = std::move(transactionData),
921                         .ancillaryFds = std::move(ancillaryFds),
922                         .asyncNumber = transaction->asyncNumber,
923                 });
924 
925                 size_t numPending = it->second.asyncTodo.size();
926                 LOG_RPC_DETAIL("Enqueuing %" PRIu64 " on %" PRIu64 " (%zu pending)",
927                                transaction->asyncNumber, addr, numPending);
928 
929                 constexpr size_t kArbitraryOnewayCallTerminateLevel = 10000;
930                 constexpr size_t kArbitraryOnewayCallWarnLevel = 1000;
931                 constexpr size_t kArbitraryOnewayCallWarnPer = 1000;
932 
933                 if (numPending >= kArbitraryOnewayCallWarnLevel) {
934                     if (numPending >= kArbitraryOnewayCallTerminateLevel) {
935                         ALOGE("WARNING: %zu pending oneway transactions. Terminating!", numPending);
936                         _l.unlock();
937                         (void)session->shutdownAndWait(false);
938                         return FAILED_TRANSACTION;
939                     }
940 
941                     if (numPending % kArbitraryOnewayCallWarnPer == 0) {
942                         ALOGW("Warning: many oneway transactions built up on %p (%zu)",
943                               target.get(), numPending);
944                     }
945                 }
946                 return OK;
947             }
948         }
949     }
950 
951     Parcel reply;
952     reply.markForRpc(session);
953 
954     if (replyStatus == OK) {
955         Span<const uint8_t> parcelSpan = {transaction->data,
956                                           transactionData.size() -
957                                                   offsetof(RpcWireTransaction, data)};
958         Span<const uint32_t> objectTableSpan;
959         if (session->getProtocolVersion().value() >=
960             RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE) {
961             std::optional<Span<const uint8_t>> objectTableBytes =
962                     parcelSpan.splitOff(transaction->parcelDataSize);
963             if (!objectTableBytes.has_value()) {
964                 ALOGE("Parcel size (%" PRId32 ") greater than available bytes (%zu). Terminating!",
965                       transaction->parcelDataSize, parcelSpan.byteSize());
966                 (void)session->shutdownAndWait(false);
967                 return BAD_VALUE;
968             }
969             std::optional<Span<const uint32_t>> maybeSpan =
970                     objectTableBytes->reinterpret<const uint32_t>();
971             if (!maybeSpan.has_value()) {
972                 ALOGE("Bad object table size inferred from RpcWireTransaction. Saw bodySize=%zu "
973                       "sizeofHeader=%zu parcelSize=%" PRId32
974                       " objectTableBytesSize=%zu. Terminating!",
975                       transactionData.size(), sizeof(RpcWireTransaction),
976                       transaction->parcelDataSize, objectTableBytes->size);
977                 return BAD_VALUE;
978             }
979             objectTableSpan = *maybeSpan;
980         }
981 
982         Parcel data;
983         // transaction->data is owned by this function. Parcel borrows this data and
984         // only holds onto it for the duration of this function call. Parcel will be
985         // deleted before the 'transactionData' object.
986 
987         replyStatus =
988                 data.rpcSetDataReference(session, parcelSpan.data, parcelSpan.size,
989                                          objectTableSpan.data, objectTableSpan.size,
990                                          std::move(ancillaryFds), do_nothing_to_transact_data);
991         // Reset to avoid spurious use-after-move warning from clang-tidy.
992         ancillaryFds = std::remove_reference<decltype(ancillaryFds)>::type();
993 
994         if (replyStatus == OK) {
995             if (target) {
996                 bool origAllowNested = connection->allowNested;
997                 connection->allowNested = !oneway;
998 
999                 replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
1000 
1001                 connection->allowNested = origAllowNested;
1002             } else {
1003                 LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
1004 
1005                 switch (transaction->code) {
1006                     case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
1007                         replyStatus = reply.writeInt32(session->getMaxIncomingThreads());
1008                         break;
1009                     }
1010                     case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: {
1011                         // for client connections, this should always report the value
1012                         // originally returned from the server, so this is asserting
1013                         // that it exists
1014                         replyStatus = reply.writeByteVector(session->mId);
1015                         break;
1016                     }
1017                     default: {
1018                         sp<RpcServer> server = session->server();
1019                         if (server) {
1020                             switch (transaction->code) {
1021                                 case RPC_SPECIAL_TRANSACT_GET_ROOT: {
1022                                     sp<IBinder> root = session->mSessionSpecificRootObject
1023                                             ?: server->getRootObject();
1024                                     replyStatus = reply.writeStrongBinder(root);
1025                                     break;
1026                                 }
1027                                 default: {
1028                                     replyStatus = UNKNOWN_TRANSACTION;
1029                                 }
1030                             }
1031                         } else {
1032                             ALOGE("Special command sent, but no server object attached.");
1033                         }
1034                     }
1035                 }
1036             }
1037         }
1038     }
1039 
1040     if (oneway) {
1041         if (replyStatus != OK) {
1042             ALOGW("Oneway call failed with error: %d", replyStatus);
1043         }
1044 
1045         LOG_RPC_DETAIL("Processed async transaction %" PRIu64 " on %" PRIu64,
1046                        transaction->asyncNumber, addr);
1047 
1048         // Check to see if there is another asynchronous transaction to process.
1049         // This behavior differs from binder behavior, since in the binder
1050         // driver, asynchronous transactions will be processed after existing
1051         // pending binder transactions on the queue. The downside of this is
1052         // that asynchronous transactions can be drowned out by synchronous
1053         // transactions. However, we have no easy way to queue these
1054         // transactions after the synchronous transactions we may want to read
1055         // from the wire. So, in socket binder here, we have the opposite
1056         // downside: asynchronous transactions may drown out synchronous
1057         // transactions.
1058         {
1059             RpcMutexUniqueLock _l(mNodeMutex);
1060             auto it = mNodeForAddress.find(addr);
1061             // last refcount dropped after this transaction happened
1062             if (it == mNodeForAddress.end()) return OK;
1063 
1064             if (!nodeProgressAsyncNumber(&it->second)) {
1065                 _l.unlock();
1066                 (void)session->shutdownAndWait(false);
1067                 return DEAD_OBJECT;
1068             }
1069 
1070             if (it->second.asyncTodo.size() != 0 &&
1071                 it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
1072                 LOG_RPC_DETAIL("Found next async transaction %" PRIu64 " on %" PRIu64,
1073                                it->second.asyncNumber, addr);
1074 
1075                 // justification for const_cast (consider avoiding priority_queue):
1076                 // - AsyncTodo operator< doesn't depend on 'data' or 'ref' objects
1077                 // - gotta go fast
1078                 auto& todo = const_cast<BinderNode::AsyncTodo&>(it->second.asyncTodo.top());
1079 
1080                 // reset up arguments
1081                 transactionData = std::move(todo.data);
1082                 ancillaryFds = std::move(todo.ancillaryFds);
1083                 LOG_ALWAYS_FATAL_IF(target != todo.ref,
1084                                     "async list should be associated with a binder");
1085 
1086                 it->second.asyncTodo.pop();
1087                 goto processTransactInternalTailCall;
1088             }
1089         }
1090 
1091         // done processing all the async commands on this binder that we can, so
1092         // write decstrongs on the binder
1093         if (addr != 0 && replyStatus == OK) {
1094             return flushExcessBinderRefs(session, addr, target);
1095         }
1096 
1097         return OK;
1098     }
1099 
1100     // Binder refs are flushed for oneway calls only after all calls which are
1101     // built up are executed. Otherwise, they fill up the binder buffer.
1102     if (addr != 0 && replyStatus == OK) {
1103         replyStatus = flushExcessBinderRefs(session, addr, target);
1104     }
1105 
1106     std::string errorMsg;
1107     if (status_t status = validateParcel(session, reply, &errorMsg); status != OK) {
1108         ALOGE("Reply Parcel failed validation: %s", errorMsg.c_str());
1109         // Forward the error to the client of the transaction.
1110         reply.freeData();
1111         reply.markForRpc(session);
1112         replyStatus = status;
1113     }
1114 
1115     auto* rpcFields = reply.maybeRpcFields();
1116     LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
1117 
1118     const size_t rpcReplyWireSize = RpcWireReply::wireSize(session->getProtocolVersion().value());
1119 
1120     Span<const uint32_t> objectTableSpan = Span<const uint32_t>{rpcFields->mObjectPositions.data(),
1121                                                                 rpcFields->mObjectPositions.size()};
1122 
1123     uint32_t bodySize;
1124     LOG_ALWAYS_FATAL_IF(__builtin_add_overflow(rpcReplyWireSize, reply.dataSize(), &bodySize) ||
1125                                 __builtin_add_overflow(objectTableSpan.byteSize(), bodySize,
1126                                                        &bodySize),
1127                         "Too much data for reply %zu", reply.dataSize());
1128     RpcWireHeader cmdReply{
1129             .command = RPC_COMMAND_REPLY,
1130             .bodySize = bodySize,
1131     };
1132     RpcWireReply rpcReply{
1133             .status = replyStatus,
1134             // NOTE: Not necessarily written to socket depending on session
1135             // version.
1136             // NOTE: bodySize didn't overflow => this cast is safe
1137             .parcelDataSize = static_cast<uint32_t>(reply.dataSize()),
1138             .reserved = {0, 0, 0},
1139     };
1140     iovec iovs[]{
1141             {&cmdReply, sizeof(RpcWireHeader)},
1142             {&rpcReply, rpcReplyWireSize},
1143             {const_cast<uint8_t*>(reply.data()), reply.dataSize()},
1144             objectTableSpan.toIovec(),
1145     };
1146     return rpcSend(connection, session, "reply", iovs, countof(iovs), std::nullopt,
1147                    rpcFields->mFds.get());
1148 }
1149 
processDecStrong(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command)1150 status_t RpcState::processDecStrong(const sp<RpcSession::RpcConnection>& connection,
1151                                     const sp<RpcSession>& session, const RpcWireHeader& command) {
1152     LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_DEC_STRONG, "command: %d", command.command);
1153 
1154     if (command.bodySize != sizeof(RpcDecStrong)) {
1155         ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcDecStrong. Terminating!",
1156               sizeof(RpcDecStrong), command.bodySize);
1157         (void)session->shutdownAndWait(false);
1158         return BAD_VALUE;
1159     }
1160 
1161     RpcDecStrong body;
1162     iovec iov{&body, sizeof(RpcDecStrong)};
1163     if (status_t status = rpcRec(connection, session, "dec ref body", &iov, 1, nullptr);
1164         status != OK)
1165         return status;
1166 
1167     uint64_t addr = RpcWireAddress::toRaw(body.address);
1168     RpcMutexUniqueLock _l(mNodeMutex);
1169     auto it = mNodeForAddress.find(addr);
1170     if (it == mNodeForAddress.end()) {
1171         ALOGE("Unknown binder address %" PRIu64 " for dec strong.", addr);
1172         return OK;
1173     }
1174 
1175     sp<IBinder> target = it->second.binder.promote();
1176     if (target == nullptr) {
1177         ALOGE("While requesting dec strong, binder has been deleted at address %" PRIu64
1178               ". Terminating!",
1179               addr);
1180         _l.unlock();
1181         (void)session->shutdownAndWait(false);
1182         return BAD_VALUE;
1183     }
1184 
1185     if (it->second.timesSent < body.amount) {
1186         ALOGE("Record of sending binder %zu times, but requested decStrong for %" PRIu64 " of %u",
1187               it->second.timesSent, addr, body.amount);
1188         return OK;
1189     }
1190 
1191     LOG_ALWAYS_FATAL_IF(it->second.sentRef == nullptr, "Inconsistent state, lost ref for %" PRIu64,
1192                         addr);
1193 
1194     LOG_RPC_DETAIL("Processing dec strong of %" PRIu64 " by %u from %zu", addr, body.amount,
1195                    it->second.timesSent);
1196 
1197     it->second.timesSent -= body.amount;
1198     sp<IBinder> tempHold = tryEraseNode(session, std::move(_l), it);
1199     // LOCK ALREADY RELEASED
1200     tempHold = nullptr; // destructor may make binder calls on this session
1201 
1202     return OK;
1203 }
1204 
validateParcel(const sp<RpcSession> & session,const Parcel & parcel,std::string * errorMsg)1205 status_t RpcState::validateParcel(const sp<RpcSession>& session, const Parcel& parcel,
1206                                   std::string* errorMsg) {
1207     auto* rpcFields = parcel.maybeRpcFields();
1208     if (rpcFields == nullptr) {
1209         *errorMsg = "Parcel not crafted for RPC call";
1210         return BAD_TYPE;
1211     }
1212 
1213     if (rpcFields->mSession != session) {
1214         *errorMsg = "Parcel's session doesn't match";
1215         return BAD_TYPE;
1216     }
1217 
1218     uint32_t protocolVersion = session->getProtocolVersion().value();
1219     if (protocolVersion < RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE &&
1220         !rpcFields->mObjectPositions.empty()) {
1221         std::stringstream ss;
1222         ss << "Parcel has attached objects but the session's protocol version (" << protocolVersion
1223            << ") is too old, must be at least "
1224            << RPC_WIRE_PROTOCOL_VERSION_RPC_HEADER_FEATURE_EXPLICIT_PARCEL_SIZE;
1225         *errorMsg = ss.str();
1226         return BAD_VALUE;
1227     }
1228 
1229     if (rpcFields->mFds && !rpcFields->mFds->empty()) {
1230         switch (session->getFileDescriptorTransportMode()) {
1231             case RpcSession::FileDescriptorTransportMode::NONE:
1232                 *errorMsg =
1233                         "Parcel has file descriptors, but no file descriptor transport is enabled";
1234                 return FDS_NOT_ALLOWED;
1235             case RpcSession::FileDescriptorTransportMode::UNIX: {
1236                 constexpr size_t kMaxFdsPerMsg = 253;
1237                 if (rpcFields->mFds->size() > kMaxFdsPerMsg) {
1238                     std::stringstream ss;
1239                     ss << "Too many file descriptors in Parcel for unix domain socket: "
1240                        << rpcFields->mFds->size() << " (max is " << kMaxFdsPerMsg << ")";
1241                     *errorMsg = ss.str();
1242                     return BAD_VALUE;
1243                 }
1244                 break;
1245             }
1246             case RpcSession::FileDescriptorTransportMode::TRUSTY: {
1247                 // Keep this in sync with trusty_ipc.h!!!
1248                 // We could import that file here on Trusty, but it's not
1249                 // available on Android
1250                 constexpr size_t kMaxFdsPerMsg = 8;
1251                 if (rpcFields->mFds->size() > kMaxFdsPerMsg) {
1252                     std::stringstream ss;
1253                     ss << "Too many file descriptors in Parcel for Trusty IPC connection: "
1254                        << rpcFields->mFds->size() << " (max is " << kMaxFdsPerMsg << ")";
1255                     *errorMsg = ss.str();
1256                     return BAD_VALUE;
1257                 }
1258                 break;
1259             }
1260         }
1261     }
1262 
1263     return OK;
1264 }
1265 
tryEraseNode(const sp<RpcSession> & session,RpcMutexUniqueLock nodeLock,std::map<uint64_t,BinderNode>::iterator & it)1266 sp<IBinder> RpcState::tryEraseNode(const sp<RpcSession>& session, RpcMutexUniqueLock nodeLock,
1267                                    std::map<uint64_t, BinderNode>::iterator& it) {
1268     bool shouldShutdown = false;
1269 
1270     sp<IBinder> ref;
1271 
1272     if (it->second.timesSent == 0) {
1273         ref = std::move(it->second.sentRef);
1274 
1275         if (it->second.timesRecd == 0) {
1276             LOG_ALWAYS_FATAL_IF(!it->second.asyncTodo.empty(),
1277                                 "Can't delete binder w/ pending async transactions");
1278             mNodeForAddress.erase(it);
1279 
1280             if (mNodeForAddress.size() == 0) {
1281                 shouldShutdown = true;
1282             }
1283         }
1284     }
1285 
1286     // If we shutdown, prevent RpcState from being re-used. This prevents another
1287     // thread from getting the root object again.
1288     if (shouldShutdown) {
1289         clear(std::move(nodeLock));
1290     } else {
1291         nodeLock.unlock(); // explicit
1292     }
1293     // LOCK IS RELEASED
1294 
1295     if (shouldShutdown) {
1296         ALOGI("RpcState has no binders left, so triggering shutdown...");
1297         (void)session->shutdownAndWait(false);
1298     }
1299 
1300     return ref;
1301 }
1302 
nodeProgressAsyncNumber(BinderNode * node)1303 bool RpcState::nodeProgressAsyncNumber(BinderNode* node) {
1304     // 2**64 =~ 10**19 =~ 1000 transactions per second for 585 million years to
1305     // a single binder
1306     if (node->asyncNumber >= std::numeric_limits<decltype(node->asyncNumber)>::max()) {
1307         ALOGE("Out of async transaction IDs. Terminating");
1308         return false;
1309     }
1310     node->asyncNumber++;
1311     return true;
1312 }
1313 
1314 } // namespace android
1315