• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RpcState"
18 
19 #include "RpcState.h"
20 
21 #include <binder/BpBinder.h>
22 #include <binder/RpcServer.h>
23 
24 #include "Debug.h"
25 #include "RpcWireFormat.h"
26 
27 #include <inttypes.h>
28 
29 namespace android {
30 
RpcState()31 RpcState::RpcState() {}
~RpcState()32 RpcState::~RpcState() {}
33 
onBinderLeaving(const sp<RpcSession> & session,const sp<IBinder> & binder,RpcAddress * outAddress)34 status_t RpcState::onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
35                                    RpcAddress* outAddress) {
36     bool isRemote = binder->remoteBinder();
37     bool isRpc = isRemote && binder->remoteBinder()->isRpcBinder();
38 
39     if (isRpc && binder->remoteBinder()->getPrivateAccessorForId().rpcSession() != session) {
40         // We need to be able to send instructions over the socket for how to
41         // connect to a different server, and we also need to let the host
42         // process know that this is happening.
43         ALOGE("Cannot send binder from unrelated binder RPC session.");
44         return INVALID_OPERATION;
45     }
46 
47     if (isRemote && !isRpc) {
48         // Without additional work, this would have the effect of using this
49         // process to proxy calls from the socket over to the other process, and
50         // it would make those calls look like they come from us (not over the
51         // sockets). In order to make this work transparently like binder, we
52         // would instead need to send instructions over the socket for how to
53         // connect to the host process, and we also need to let the host process
54         // know this was happening.
55         ALOGE("Cannot send binder proxy %p over sockets", binder.get());
56         return INVALID_OPERATION;
57     }
58 
59     std::lock_guard<std::mutex> _l(mNodeMutex);
60 
61     // TODO(b/182939933): maybe move address out of BpBinder, and keep binder->address map
62     // in RpcState
63     for (auto& [addr, node] : mNodeForAddress) {
64         if (binder == node.binder) {
65             if (isRpc) {
66                 const RpcAddress& actualAddr =
67                         binder->remoteBinder()->getPrivateAccessorForId().rpcAddress();
68                 // TODO(b/182939933): this is only checking integrity of data structure
69                 // a different data structure doesn't need this
70                 LOG_ALWAYS_FATAL_IF(addr < actualAddr, "Address mismatch");
71                 LOG_ALWAYS_FATAL_IF(actualAddr < addr, "Address mismatch");
72             }
73             node.timesSent++;
74             node.sentRef = binder; // might already be set
75             *outAddress = addr;
76             return OK;
77         }
78     }
79     LOG_ALWAYS_FATAL_IF(isRpc, "RPC binder must have known address at this point");
80 
81     auto&& [it, inserted] = mNodeForAddress.insert({RpcAddress::unique(),
82                                                     BinderNode{
83                                                             .binder = binder,
84                                                             .timesSent = 1,
85                                                             .sentRef = binder,
86                                                     }});
87     // TODO(b/182939933): better organization could avoid needing this log
88     LOG_ALWAYS_FATAL_IF(!inserted);
89 
90     *outAddress = it->first;
91     return OK;
92 }
93 
onBinderEntering(const sp<RpcSession> & session,const RpcAddress & address)94 sp<IBinder> RpcState::onBinderEntering(const sp<RpcSession>& session, const RpcAddress& address) {
95     std::unique_lock<std::mutex> _l(mNodeMutex);
96 
97     if (auto it = mNodeForAddress.find(address); it != mNodeForAddress.end()) {
98         sp<IBinder> binder = it->second.binder.promote();
99 
100         // implicitly have strong RPC refcount, since we received this binder
101         it->second.timesRecd++;
102 
103         _l.unlock();
104 
105         // We have timesRecd RPC refcounts, but we only need to hold on to one
106         // when we keep the object. All additional dec strongs are sent
107         // immediately, we wait to send the last one in BpBinder::onLastDecStrong.
108         (void)session->sendDecStrong(address);
109 
110         return binder;
111     }
112 
113     auto&& [it, inserted] = mNodeForAddress.insert({address, BinderNode{}});
114     LOG_ALWAYS_FATAL_IF(!inserted, "Failed to insert binder when creating proxy");
115 
116     // Currently, all binders are assumed to be part of the same session (no
117     // device global binders in the RPC world).
118     sp<IBinder> binder = BpBinder::create(session, it->first);
119     it->second.binder = binder;
120     it->second.timesRecd = 1;
121     return binder;
122 }
123 
countBinders()124 size_t RpcState::countBinders() {
125     std::lock_guard<std::mutex> _l(mNodeMutex);
126     return mNodeForAddress.size();
127 }
128 
dump()129 void RpcState::dump() {
130     std::lock_guard<std::mutex> _l(mNodeMutex);
131     ALOGE("DUMP OF RpcState %p", this);
132     ALOGE("DUMP OF RpcState (%zu nodes)", mNodeForAddress.size());
133     for (const auto& [address, node] : mNodeForAddress) {
134         sp<IBinder> binder = node.binder.promote();
135 
136         const char* desc;
137         if (binder) {
138             if (binder->remoteBinder()) {
139                 if (binder->remoteBinder()->isRpcBinder()) {
140                     desc = "(rpc binder proxy)";
141                 } else {
142                     desc = "(binder proxy)";
143                 }
144             } else {
145                 desc = "(local binder)";
146             }
147         } else {
148             desc = "(null)";
149         }
150 
151         ALOGE("- BINDER NODE: %p times sent:%zu times recd: %zu a:%s type:%s",
152               node.binder.unsafe_get(), node.timesSent, node.timesRecd, address.toString().c_str(),
153               desc);
154     }
155     ALOGE("END DUMP OF RpcState");
156 }
157 
terminate()158 void RpcState::terminate() {
159     if (SHOULD_LOG_RPC_DETAIL) {
160         ALOGE("RpcState::terminate()");
161         dump();
162     }
163 
164     // if the destructor of a binder object makes another RPC call, then calling
165     // decStrong could deadlock. So, we must hold onto these binders until
166     // mNodeMutex is no longer taken.
167     std::vector<sp<IBinder>> tempHoldBinder;
168 
169     {
170         std::lock_guard<std::mutex> _l(mNodeMutex);
171         mTerminated = true;
172         for (auto& [address, node] : mNodeForAddress) {
173             sp<IBinder> binder = node.binder.promote();
174             LOG_ALWAYS_FATAL_IF(binder == nullptr, "Binder %p expected to be owned.", binder.get());
175 
176             if (node.sentRef != nullptr) {
177                 tempHoldBinder.push_back(node.sentRef);
178             }
179         }
180 
181         mNodeForAddress.clear();
182     }
183 }
184 
CommandData(size_t size)185 RpcState::CommandData::CommandData(size_t size) : mSize(size) {
186     // The maximum size for regular binder is 1MB for all concurrent
187     // transactions. A very small proportion of transactions are even
188     // larger than a page, but we need to avoid allocating too much
189     // data on behalf of an arbitrary client, or we could risk being in
190     // a position where a single additional allocation could run out of
191     // memory.
192     //
193     // Note, this limit may not reflect the total amount of data allocated for a
194     // transaction (in some cases, additional fixed size amounts are added),
195     // though for rough consistency, we should avoid cases where this data type
196     // is used for multiple dynamic allocations for a single transaction.
197     constexpr size_t kMaxTransactionAllocation = 100 * 1000;
198     if (size == 0) return;
199     if (size > kMaxTransactionAllocation) {
200         ALOGW("Transaction requested too much data allocation %zu", size);
201         return;
202     }
203     mData.reset(new (std::nothrow) uint8_t[size]);
204 }
205 
rpcSend(const base::unique_fd & fd,const char * what,const void * data,size_t size)206 bool RpcState::rpcSend(const base::unique_fd& fd, const char* what, const void* data, size_t size) {
207     LOG_RPC_DETAIL("Sending %s on fd %d: %s", what, fd.get(), hexString(data, size).c_str());
208 
209     if (size > std::numeric_limits<ssize_t>::max()) {
210         ALOGE("Cannot send %s at size %zu (too big)", what, size);
211         terminate();
212         return false;
213     }
214 
215     ssize_t sent = TEMP_FAILURE_RETRY(send(fd.get(), data, size, MSG_NOSIGNAL));
216 
217     if (sent < 0 || sent != static_cast<ssize_t>(size)) {
218         ALOGE("Failed to send %s (sent %zd of %zu bytes) on fd %d, error: %s", what, sent, size,
219               fd.get(), strerror(errno));
220 
221         terminate();
222         return false;
223     }
224 
225     return true;
226 }
227 
rpcRec(const base::unique_fd & fd,const char * what,void * data,size_t size)228 bool RpcState::rpcRec(const base::unique_fd& fd, const char* what, void* data, size_t size) {
229     if (size > std::numeric_limits<ssize_t>::max()) {
230         ALOGE("Cannot rec %s at size %zu (too big)", what, size);
231         terminate();
232         return false;
233     }
234 
235     ssize_t recd = TEMP_FAILURE_RETRY(recv(fd.get(), data, size, MSG_WAITALL | MSG_NOSIGNAL));
236 
237     if (recd < 0 || recd != static_cast<ssize_t>(size)) {
238         terminate();
239 
240         if (recd == 0 && errno == 0) {
241             LOG_RPC_DETAIL("No more data when trying to read %s on fd %d", what, fd.get());
242             return false;
243         }
244 
245         ALOGE("Failed to read %s (received %zd of %zu bytes) on fd %d, error: %s", what, recd, size,
246               fd.get(), strerror(errno));
247         return false;
248     } else {
249         LOG_RPC_DETAIL("Received %s on fd %d: %s", what, fd.get(), hexString(data, size).c_str());
250     }
251 
252     return true;
253 }
254 
getRootObject(const base::unique_fd & fd,const sp<RpcSession> & session)255 sp<IBinder> RpcState::getRootObject(const base::unique_fd& fd, const sp<RpcSession>& session) {
256     Parcel data;
257     data.markForRpc(session);
258     Parcel reply;
259 
260     status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_ROOT, data, session,
261                                &reply, 0);
262     if (status != OK) {
263         ALOGE("Error getting root object: %s", statusToString(status).c_str());
264         return nullptr;
265     }
266 
267     return reply.readStrongBinder();
268 }
269 
getMaxThreads(const base::unique_fd & fd,const sp<RpcSession> & session,size_t * maxThreadsOut)270 status_t RpcState::getMaxThreads(const base::unique_fd& fd, const sp<RpcSession>& session,
271                                  size_t* maxThreadsOut) {
272     Parcel data;
273     data.markForRpc(session);
274     Parcel reply;
275 
276     status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
277                                session, &reply, 0);
278     if (status != OK) {
279         ALOGE("Error getting max threads: %s", statusToString(status).c_str());
280         return status;
281     }
282 
283     int32_t maxThreads;
284     status = reply.readInt32(&maxThreads);
285     if (status != OK) return status;
286     if (maxThreads <= 0) {
287         ALOGE("Error invalid max maxThreads: %d", maxThreads);
288         return BAD_VALUE;
289     }
290 
291     *maxThreadsOut = maxThreads;
292     return OK;
293 }
294 
getSessionId(const base::unique_fd & fd,const sp<RpcSession> & session,int32_t * sessionIdOut)295 status_t RpcState::getSessionId(const base::unique_fd& fd, const sp<RpcSession>& session,
296                                 int32_t* sessionIdOut) {
297     Parcel data;
298     data.markForRpc(session);
299     Parcel reply;
300 
301     status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_SESSION_ID, data,
302                                session, &reply, 0);
303     if (status != OK) {
304         ALOGE("Error getting session ID: %s", statusToString(status).c_str());
305         return status;
306     }
307 
308     int32_t sessionId;
309     status = reply.readInt32(&sessionId);
310     if (status != OK) return status;
311 
312     *sessionIdOut = sessionId;
313     return OK;
314 }
315 
transact(const base::unique_fd & fd,const RpcAddress & address,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)316 status_t RpcState::transact(const base::unique_fd& fd, const RpcAddress& address, uint32_t code,
317                             const Parcel& data, const sp<RpcSession>& session, Parcel* reply,
318                             uint32_t flags) {
319     uint64_t asyncNumber = 0;
320 
321     if (!address.isZero()) {
322         std::lock_guard<std::mutex> _l(mNodeMutex);
323         if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
324         auto it = mNodeForAddress.find(address);
325         LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Sending transact on unknown address %s",
326                             address.toString().c_str());
327 
328         if (flags & IBinder::FLAG_ONEWAY) {
329             asyncNumber = it->second.asyncNumber++;
330         }
331     }
332 
333     if (!data.isForRpc()) {
334         ALOGE("Refusing to send RPC with parcel not crafted for RPC");
335         return BAD_TYPE;
336     }
337 
338     if (data.objectsCount() != 0) {
339         ALOGE("Parcel at %p has attached objects but is being used in an RPC call", &data);
340         return BAD_TYPE;
341     }
342 
343     RpcWireTransaction transaction{
344             .address = address.viewRawEmbedded(),
345             .code = code,
346             .flags = flags,
347             .asyncNumber = asyncNumber,
348     };
349 
350     CommandData transactionData(sizeof(RpcWireTransaction) + data.dataSize());
351     if (!transactionData.valid()) {
352         return NO_MEMORY;
353     }
354 
355     memcpy(transactionData.data() + 0, &transaction, sizeof(RpcWireTransaction));
356     memcpy(transactionData.data() + sizeof(RpcWireTransaction), data.data(), data.dataSize());
357 
358     if (transactionData.size() > std::numeric_limits<uint32_t>::max()) {
359         ALOGE("Transaction size too big %zu", transactionData.size());
360         return BAD_VALUE;
361     }
362 
363     RpcWireHeader command{
364             .command = RPC_COMMAND_TRANSACT,
365             .bodySize = static_cast<uint32_t>(transactionData.size()),
366     };
367 
368     if (!rpcSend(fd, "transact header", &command, sizeof(command))) {
369         return DEAD_OBJECT;
370     }
371     if (!rpcSend(fd, "command body", transactionData.data(), transactionData.size())) {
372         return DEAD_OBJECT;
373     }
374 
375     if (flags & IBinder::FLAG_ONEWAY) {
376         return OK; // do not wait for result
377     }
378 
379     LOG_ALWAYS_FATAL_IF(reply == nullptr, "Reply parcel must be used for synchronous transaction.");
380 
381     return waitForReply(fd, session, reply);
382 }
383 
cleanup_reply_data(Parcel * p,const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)384 static void cleanup_reply_data(Parcel* p, const uint8_t* data, size_t dataSize,
385                                const binder_size_t* objects, size_t objectsCount) {
386     (void)p;
387     delete[] const_cast<uint8_t*>(data - offsetof(RpcWireReply, data));
388     (void)dataSize;
389     LOG_ALWAYS_FATAL_IF(objects != nullptr);
390     LOG_ALWAYS_FATAL_IF(objectsCount, 0);
391 }
392 
waitForReply(const base::unique_fd & fd,const sp<RpcSession> & session,Parcel * reply)393 status_t RpcState::waitForReply(const base::unique_fd& fd, const sp<RpcSession>& session,
394                                 Parcel* reply) {
395     RpcWireHeader command;
396     while (true) {
397         if (!rpcRec(fd, "command header", &command, sizeof(command))) {
398             return DEAD_OBJECT;
399         }
400 
401         if (command.command == RPC_COMMAND_REPLY) break;
402 
403         status_t status = processServerCommand(fd, session, command);
404         if (status != OK) return status;
405     }
406 
407     CommandData data(command.bodySize);
408     if (!data.valid()) {
409         return NO_MEMORY;
410     }
411 
412     if (!rpcRec(fd, "reply body", data.data(), command.bodySize)) {
413         return DEAD_OBJECT;
414     }
415 
416     if (command.bodySize < sizeof(RpcWireReply)) {
417         ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireReply. Terminating!",
418               sizeof(RpcWireReply), command.bodySize);
419         terminate();
420         return BAD_VALUE;
421     }
422     RpcWireReply* rpcReply = reinterpret_cast<RpcWireReply*>(data.data());
423     if (rpcReply->status != OK) return rpcReply->status;
424 
425     data.release();
426     reply->ipcSetDataReference(rpcReply->data, command.bodySize - offsetof(RpcWireReply, data),
427                                nullptr, 0, cleanup_reply_data);
428 
429     reply->markForRpc(session);
430 
431     return OK;
432 }
433 
sendDecStrong(const base::unique_fd & fd,const RpcAddress & addr)434 status_t RpcState::sendDecStrong(const base::unique_fd& fd, const RpcAddress& addr) {
435     {
436         std::lock_guard<std::mutex> _l(mNodeMutex);
437         if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
438         auto it = mNodeForAddress.find(addr);
439         LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Sending dec strong on unknown address %s",
440                             addr.toString().c_str());
441         LOG_ALWAYS_FATAL_IF(it->second.timesRecd <= 0, "Bad dec strong %s",
442                             addr.toString().c_str());
443 
444         it->second.timesRecd--;
445         if (it->second.timesRecd == 0 && it->second.timesSent == 0) {
446             mNodeForAddress.erase(it);
447         }
448     }
449 
450     RpcWireHeader cmd = {
451             .command = RPC_COMMAND_DEC_STRONG,
452             .bodySize = sizeof(RpcWireAddress),
453     };
454     if (!rpcSend(fd, "dec ref header", &cmd, sizeof(cmd))) return DEAD_OBJECT;
455     if (!rpcSend(fd, "dec ref body", &addr.viewRawEmbedded(), sizeof(RpcWireAddress)))
456         return DEAD_OBJECT;
457     return OK;
458 }
459 
getAndExecuteCommand(const base::unique_fd & fd,const sp<RpcSession> & session)460 status_t RpcState::getAndExecuteCommand(const base::unique_fd& fd, const sp<RpcSession>& session) {
461     LOG_RPC_DETAIL("getAndExecuteCommand on fd %d", fd.get());
462 
463     RpcWireHeader command;
464     if (!rpcRec(fd, "command header", &command, sizeof(command))) {
465         return DEAD_OBJECT;
466     }
467 
468     return processServerCommand(fd, session, command);
469 }
470 
processServerCommand(const base::unique_fd & fd,const sp<RpcSession> & session,const RpcWireHeader & command)471 status_t RpcState::processServerCommand(const base::unique_fd& fd, const sp<RpcSession>& session,
472                                         const RpcWireHeader& command) {
473     switch (command.command) {
474         case RPC_COMMAND_TRANSACT:
475             return processTransact(fd, session, command);
476         case RPC_COMMAND_DEC_STRONG:
477             return processDecStrong(fd, command);
478     }
479 
480     // We should always know the version of the opposing side, and since the
481     // RPC-binder-level wire protocol is not self synchronizing, we have no way
482     // to understand where the current command ends and the next one begins. We
483     // also can't consider it a fatal error because this would allow any client
484     // to kill us, so ending the session for misbehaving client.
485     ALOGE("Unknown RPC command %d - terminating session", command.command);
486     terminate();
487     return DEAD_OBJECT;
488 }
processTransact(const base::unique_fd & fd,const sp<RpcSession> & session,const RpcWireHeader & command)489 status_t RpcState::processTransact(const base::unique_fd& fd, const sp<RpcSession>& session,
490                                    const RpcWireHeader& command) {
491     LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_TRANSACT, "command: %d", command.command);
492 
493     CommandData transactionData(command.bodySize);
494     if (!transactionData.valid()) {
495         return NO_MEMORY;
496     }
497     if (!rpcRec(fd, "transaction body", transactionData.data(), transactionData.size())) {
498         return DEAD_OBJECT;
499     }
500 
501     return processTransactInternal(fd, session, std::move(transactionData));
502 }
503 
do_nothing_to_transact_data(Parcel * p,const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)504 static void do_nothing_to_transact_data(Parcel* p, const uint8_t* data, size_t dataSize,
505                                         const binder_size_t* objects, size_t objectsCount) {
506     (void)p;
507     (void)data;
508     (void)dataSize;
509     (void)objects;
510     (void)objectsCount;
511 }
512 
processTransactInternal(const base::unique_fd & fd,const sp<RpcSession> & session,CommandData transactionData)513 status_t RpcState::processTransactInternal(const base::unique_fd& fd, const sp<RpcSession>& session,
514                                            CommandData transactionData) {
515     if (transactionData.size() < sizeof(RpcWireTransaction)) {
516         ALOGE("Expecting %zu but got %zu bytes for RpcWireTransaction. Terminating!",
517               sizeof(RpcWireTransaction), transactionData.size());
518         terminate();
519         return BAD_VALUE;
520     }
521     RpcWireTransaction* transaction = reinterpret_cast<RpcWireTransaction*>(transactionData.data());
522 
523     // TODO(b/182939933): heap allocation just for lookup in mNodeForAddress,
524     // maybe add an RpcAddress 'view' if the type remains 'heavy'
525     auto addr = RpcAddress::fromRawEmbedded(&transaction->address);
526 
527     status_t replyStatus = OK;
528     sp<IBinder> target;
529     if (!addr.isZero()) {
530         std::lock_guard<std::mutex> _l(mNodeMutex);
531 
532         auto it = mNodeForAddress.find(addr);
533         if (it == mNodeForAddress.end()) {
534             ALOGE("Unknown binder address %s.", addr.toString().c_str());
535             replyStatus = BAD_VALUE;
536         } else {
537             target = it->second.binder.promote();
538             if (target == nullptr) {
539                 // This can happen if the binder is remote in this process, and
540                 // another thread has called the last decStrong on this binder.
541                 // However, for local binders, it indicates a misbehaving client
542                 // (any binder which is being transacted on should be holding a
543                 // strong ref count), so in either case, terminating the
544                 // session.
545                 ALOGE("While transacting, binder has been deleted at address %s. Terminating!",
546                       addr.toString().c_str());
547                 terminate();
548                 replyStatus = BAD_VALUE;
549             } else if (target->localBinder() == nullptr) {
550                 ALOGE("Transactions can only go to local binders, not address %s. Terminating!",
551                       addr.toString().c_str());
552                 terminate();
553                 replyStatus = BAD_VALUE;
554             } else if (transaction->flags & IBinder::FLAG_ONEWAY) {
555                 if (transaction->asyncNumber != it->second.asyncNumber) {
556                     // we need to process some other asynchronous transaction
557                     // first
558                     // TODO(b/183140903): limit enqueues/detect overfill for bad client
559                     // TODO(b/183140903): detect when an object is deleted when it still has
560                     //        pending async transactions
561                     it->second.asyncTodo.push(BinderNode::AsyncTodo{
562                             .data = std::move(transactionData),
563                             .asyncNumber = transaction->asyncNumber,
564                     });
565                     LOG_RPC_DETAIL("Enqueuing %" PRId64 " on %s", transaction->asyncNumber,
566                                    addr.toString().c_str());
567                     return OK;
568                 }
569             }
570         }
571     }
572 
573     Parcel reply;
574     reply.markForRpc(session);
575 
576     if (replyStatus == OK) {
577         Parcel data;
578         // transaction->data is owned by this function. Parcel borrows this data and
579         // only holds onto it for the duration of this function call. Parcel will be
580         // deleted before the 'transactionData' object.
581         data.ipcSetDataReference(transaction->data,
582                                  transactionData.size() - offsetof(RpcWireTransaction, data),
583                                  nullptr /*object*/, 0 /*objectCount*/,
584                                  do_nothing_to_transact_data);
585         data.markForRpc(session);
586 
587         if (target) {
588             replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
589         } else {
590             LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
591 
592             sp<RpcServer> server = session->server().promote();
593             if (server) {
594                 // special case for 'zero' address (special server commands)
595                 switch (transaction->code) {
596                     case RPC_SPECIAL_TRANSACT_GET_ROOT: {
597                         replyStatus = reply.writeStrongBinder(server->getRootObject());
598                         break;
599                     }
600                     case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
601                         replyStatus = reply.writeInt32(server->getMaxThreads());
602                         break;
603                     }
604                     case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: {
605                         // only sessions w/ services can be the source of a
606                         // session ID (so still guarded by non-null server)
607                         //
608                         // sessions associated with servers must have an ID
609                         // (hence abort)
610                         int32_t id = session->getPrivateAccessorForId().get().value();
611                         replyStatus = reply.writeInt32(id);
612                         break;
613                     }
614                     default: {
615                         replyStatus = UNKNOWN_TRANSACTION;
616                     }
617                 }
618             } else {
619                 ALOGE("Special command sent, but no server object attached.");
620             }
621         }
622     }
623 
624     if (transaction->flags & IBinder::FLAG_ONEWAY) {
625         if (replyStatus != OK) {
626             ALOGW("Oneway call failed with error: %d", replyStatus);
627         }
628 
629         LOG_RPC_DETAIL("Processed async transaction %" PRId64 " on %s", transaction->asyncNumber,
630                        addr.toString().c_str());
631 
632         // Check to see if there is another asynchronous transaction to process.
633         // This behavior differs from binder behavior, since in the binder
634         // driver, asynchronous transactions will be processed after existing
635         // pending binder transactions on the queue. The downside of this is
636         // that asynchronous transactions can be drowned out by synchronous
637         // transactions. However, we have no easy way to queue these
638         // transactions after the synchronous transactions we may want to read
639         // from the wire. So, in socket binder here, we have the opposite
640         // downside: asynchronous transactions may drown out synchronous
641         // transactions.
642         {
643             std::unique_lock<std::mutex> _l(mNodeMutex);
644             auto it = mNodeForAddress.find(addr);
645             // last refcount dropped after this transaction happened
646             if (it == mNodeForAddress.end()) return OK;
647 
648             // note - only updated now, instead of later, so that other threads
649             // will queue any later transactions
650 
651             // TODO(b/183140903): support > 2**64 async transactions
652             //     (we can do this by allowing asyncNumber to wrap, since we
653             //     don't expect more than 2**64 simultaneous transactions)
654             it->second.asyncNumber++;
655 
656             if (it->second.asyncTodo.size() == 0) return OK;
657             if (it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
658                 LOG_RPC_DETAIL("Found next async transaction %" PRId64 " on %s",
659                                it->second.asyncNumber, addr.toString().c_str());
660 
661                 // justification for const_cast (consider avoiding priority_queue):
662                 // - AsyncTodo operator< doesn't depend on 'data' object
663                 // - gotta go fast
664                 CommandData data = std::move(
665                         const_cast<BinderNode::AsyncTodo&>(it->second.asyncTodo.top()).data);
666                 it->second.asyncTodo.pop();
667                 _l.unlock();
668                 return processTransactInternal(fd, session, std::move(data));
669             }
670         }
671         return OK;
672     }
673 
674     RpcWireReply rpcReply{
675             .status = replyStatus,
676     };
677 
678     CommandData replyData(sizeof(RpcWireReply) + reply.dataSize());
679     if (!replyData.valid()) {
680         return NO_MEMORY;
681     }
682     memcpy(replyData.data() + 0, &rpcReply, sizeof(RpcWireReply));
683     memcpy(replyData.data() + sizeof(RpcWireReply), reply.data(), reply.dataSize());
684 
685     if (replyData.size() > std::numeric_limits<uint32_t>::max()) {
686         ALOGE("Reply size too big %zu", transactionData.size());
687         terminate();
688         return BAD_VALUE;
689     }
690 
691     RpcWireHeader cmdReply{
692             .command = RPC_COMMAND_REPLY,
693             .bodySize = static_cast<uint32_t>(replyData.size()),
694     };
695 
696     if (!rpcSend(fd, "reply header", &cmdReply, sizeof(RpcWireHeader))) {
697         return DEAD_OBJECT;
698     }
699     if (!rpcSend(fd, "reply body", replyData.data(), replyData.size())) {
700         return DEAD_OBJECT;
701     }
702     return OK;
703 }
704 
processDecStrong(const base::unique_fd & fd,const RpcWireHeader & command)705 status_t RpcState::processDecStrong(const base::unique_fd& fd, const RpcWireHeader& command) {
706     LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_DEC_STRONG, "command: %d", command.command);
707 
708     CommandData commandData(command.bodySize);
709     if (!commandData.valid()) {
710         return NO_MEMORY;
711     }
712     if (!rpcRec(fd, "dec ref body", commandData.data(), commandData.size())) {
713         return DEAD_OBJECT;
714     }
715 
716     if (command.bodySize < sizeof(RpcWireAddress)) {
717         ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireAddress. Terminating!",
718               sizeof(RpcWireAddress), command.bodySize);
719         terminate();
720         return BAD_VALUE;
721     }
722     RpcWireAddress* address = reinterpret_cast<RpcWireAddress*>(commandData.data());
723 
724     // TODO(b/182939933): heap allocation just for lookup
725     auto addr = RpcAddress::fromRawEmbedded(address);
726     std::unique_lock<std::mutex> _l(mNodeMutex);
727     auto it = mNodeForAddress.find(addr);
728     if (it == mNodeForAddress.end()) {
729         ALOGE("Unknown binder address %s for dec strong.", addr.toString().c_str());
730         return OK;
731     }
732 
733     sp<IBinder> target = it->second.binder.promote();
734     if (target == nullptr) {
735         ALOGE("While requesting dec strong, binder has been deleted at address %s. Terminating!",
736               addr.toString().c_str());
737         terminate();
738         return BAD_VALUE;
739     }
740 
741     if (it->second.timesSent == 0) {
742         ALOGE("No record of sending binder, but requested decStrong: %s", addr.toString().c_str());
743         return OK;
744     }
745 
746     LOG_ALWAYS_FATAL_IF(it->second.sentRef == nullptr, "Inconsistent state, lost ref for %s",
747                         addr.toString().c_str());
748 
749     sp<IBinder> tempHold;
750 
751     it->second.timesSent--;
752     if (it->second.timesSent == 0) {
753         tempHold = it->second.sentRef;
754         it->second.sentRef = nullptr;
755 
756         if (it->second.timesRecd == 0) {
757             mNodeForAddress.erase(it);
758         }
759     }
760 
761     _l.unlock();
762     tempHold = nullptr; // destructor may make binder calls on this session
763 
764     return OK;
765 }
766 
767 } // namespace android
768