1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "RpcState"
18
19 #include "RpcState.h"
20
21 #include <android-base/hex.h>
22 #include <android-base/macros.h>
23 #include <android-base/scopeguard.h>
24 #include <binder/BpBinder.h>
25 #include <binder/IPCThreadState.h>
26 #include <binder/RpcServer.h>
27
28 #include "Debug.h"
29 #include "RpcWireFormat.h"
30
31 #include <random>
32
33 #include <inttypes.h>
34
35 namespace android {
36
37 using base::ScopeGuard;
38
39 #if RPC_FLAKE_PRONE
rpcMaybeWaitToFlake()40 void rpcMaybeWaitToFlake() {
41 [[clang::no_destroy]] static std::random_device r;
42 [[clang::no_destroy]] static std::mutex m;
43 unsigned num;
44 {
45 std::lock_guard<std::mutex> lock(m);
46 num = r();
47 }
48 if (num % 10 == 0) usleep(num % 1000);
49 }
50 #endif
51
RpcState()52 RpcState::RpcState() {}
~RpcState()53 RpcState::~RpcState() {}
54
onBinderLeaving(const sp<RpcSession> & session,const sp<IBinder> & binder,uint64_t * outAddress)55 status_t RpcState::onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
56 uint64_t* outAddress) {
57 bool isRemote = binder->remoteBinder();
58 bool isRpc = isRemote && binder->remoteBinder()->isRpcBinder();
59
60 if (isRpc && binder->remoteBinder()->getPrivateAccessor().rpcSession() != session) {
61 // We need to be able to send instructions over the socket for how to
62 // connect to a different server, and we also need to let the host
63 // process know that this is happening.
64 ALOGE("Cannot send binder from unrelated binder RPC session.");
65 return INVALID_OPERATION;
66 }
67
68 if (isRemote && !isRpc) {
69 // Without additional work, this would have the effect of using this
70 // process to proxy calls from the socket over to the other process, and
71 // it would make those calls look like they come from us (not over the
72 // sockets). In order to make this work transparently like binder, we
73 // would instead need to send instructions over the socket for how to
74 // connect to the host process, and we also need to let the host process
75 // know this was happening.
76 ALOGE("Cannot send binder proxy %p over sockets", binder.get());
77 return INVALID_OPERATION;
78 }
79
80 std::lock_guard<std::mutex> _l(mNodeMutex);
81 if (mTerminated) return DEAD_OBJECT;
82
83 // TODO(b/182939933): maybe move address out of BpBinder, and keep binder->address map
84 // in RpcState
85 for (auto& [addr, node] : mNodeForAddress) {
86 if (binder == node.binder) {
87 if (isRpc) {
88 // check integrity of data structure
89 uint64_t actualAddr = binder->remoteBinder()->getPrivateAccessor().rpcAddress();
90 LOG_ALWAYS_FATAL_IF(addr != actualAddr, "Address mismatch %" PRIu64 " vs %" PRIu64,
91 addr, actualAddr);
92 }
93 node.timesSent++;
94 node.sentRef = binder; // might already be set
95 *outAddress = addr;
96 return OK;
97 }
98 }
99 LOG_ALWAYS_FATAL_IF(isRpc, "RPC binder must have known address at this point");
100
101 bool forServer = session->server() != nullptr;
102
103 // arbitrary limit for maximum number of nodes in a process (otherwise we
104 // might run out of addresses)
105 if (mNodeForAddress.size() > 100000) {
106 return NO_MEMORY;
107 }
108
109 while (true) {
110 RpcWireAddress address{
111 .options = RPC_WIRE_ADDRESS_OPTION_CREATED,
112 .address = mNextId,
113 };
114 if (forServer) {
115 address.options |= RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
116 }
117
118 // avoid ubsan abort
119 if (mNextId >= std::numeric_limits<uint32_t>::max()) {
120 mNextId = 0;
121 } else {
122 mNextId++;
123 }
124
125 auto&& [it, inserted] = mNodeForAddress.insert({RpcWireAddress::toRaw(address),
126 BinderNode{
127 .binder = binder,
128 .sentRef = binder,
129 .timesSent = 1,
130 }});
131 if (inserted) {
132 *outAddress = it->first;
133 return OK;
134 }
135 }
136 }
137
onBinderEntering(const sp<RpcSession> & session,uint64_t address,sp<IBinder> * out)138 status_t RpcState::onBinderEntering(const sp<RpcSession>& session, uint64_t address,
139 sp<IBinder>* out) {
140 // ensure that: if we want to use addresses for something else in the future (for
141 // instance, allowing transitive binder sends), that we don't accidentally
142 // send those addresses to old server. Accidentally ignoring this in that
143 // case and considering the binder to be recognized could cause this
144 // process to accidentally proxy transactions for that binder. Of course,
145 // if we communicate with a binder, it could always be proxying
146 // information. However, we want to make sure that isn't done on accident
147 // by a client.
148 RpcWireAddress addr = RpcWireAddress::fromRaw(address);
149 constexpr uint32_t kKnownOptions =
150 RPC_WIRE_ADDRESS_OPTION_CREATED | RPC_WIRE_ADDRESS_OPTION_FOR_SERVER;
151 if (addr.options & ~kKnownOptions) {
152 ALOGE("Address is of an unknown type, rejecting: %" PRIu64, address);
153 return BAD_VALUE;
154 }
155
156 std::lock_guard<std::mutex> _l(mNodeMutex);
157 if (mTerminated) return DEAD_OBJECT;
158
159 if (auto it = mNodeForAddress.find(address); it != mNodeForAddress.end()) {
160 *out = it->second.binder.promote();
161
162 // implicitly have strong RPC refcount, since we received this binder
163 it->second.timesRecd++;
164 return OK;
165 }
166
167 // we don't know about this binder, so the other side of the connection
168 // should have created it.
169 if ((addr.options & RPC_WIRE_ADDRESS_OPTION_FOR_SERVER) == !!session->server()) {
170 ALOGE("Server received unrecognized address which we should own the creation of %" PRIu64,
171 address);
172 return BAD_VALUE;
173 }
174
175 auto&& [it, inserted] = mNodeForAddress.insert({address, BinderNode{}});
176 LOG_ALWAYS_FATAL_IF(!inserted, "Failed to insert binder when creating proxy");
177
178 // Currently, all binders are assumed to be part of the same session (no
179 // device global binders in the RPC world).
180 it->second.binder = *out = BpBinder::PrivateAccessor::create(session, it->first);
181 it->second.timesRecd = 1;
182 return OK;
183 }
184
flushExcessBinderRefs(const sp<RpcSession> & session,uint64_t address,const sp<IBinder> & binder)185 status_t RpcState::flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
186 const sp<IBinder>& binder) {
187 // We can flush all references when the binder is destroyed. No need to send
188 // extra reference counting packets now.
189 if (binder->remoteBinder()) return OK;
190
191 std::unique_lock<std::mutex> _l(mNodeMutex);
192 if (mTerminated) return DEAD_OBJECT;
193
194 auto it = mNodeForAddress.find(address);
195
196 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Can't be deleted while we hold sp<>");
197 LOG_ALWAYS_FATAL_IF(it->second.binder != binder,
198 "Caller of flushExcessBinderRefs using inconsistent arguments");
199
200 LOG_ALWAYS_FATAL_IF(it->second.timesSent <= 0, "Local binder must have been sent %p",
201 binder.get());
202
203 // For a local binder, we only need to know that we sent it. Now that we
204 // have an sp<> for this call, we don't need anything more. If the other
205 // process is done with this binder, it needs to know we received the
206 // refcount associated with this call, so we can acknowledge that we
207 // received it. Once (or if) it has no other refcounts, it would reply with
208 // its own decStrong so that it could be removed from this session.
209 if (it->second.timesRecd != 0) {
210 _l.unlock();
211
212 return session->sendDecStrongToTarget(address, 0);
213 }
214
215 return OK;
216 }
217
countBinders()218 size_t RpcState::countBinders() {
219 std::lock_guard<std::mutex> _l(mNodeMutex);
220 return mNodeForAddress.size();
221 }
222
dump()223 void RpcState::dump() {
224 std::lock_guard<std::mutex> _l(mNodeMutex);
225 dumpLocked();
226 }
227
clear()228 void RpcState::clear() {
229 std::unique_lock<std::mutex> _l(mNodeMutex);
230
231 if (mTerminated) {
232 LOG_ALWAYS_FATAL_IF(!mNodeForAddress.empty(),
233 "New state should be impossible after terminating!");
234 return;
235 }
236
237 if (SHOULD_LOG_RPC_DETAIL) {
238 ALOGE("RpcState::clear()");
239 dumpLocked();
240 }
241
242 // if the destructor of a binder object makes another RPC call, then calling
243 // decStrong could deadlock. So, we must hold onto these binders until
244 // mNodeMutex is no longer taken.
245 std::vector<sp<IBinder>> tempHoldBinder;
246
247 mTerminated = true;
248 for (auto& [address, node] : mNodeForAddress) {
249 sp<IBinder> binder = node.binder.promote();
250 LOG_ALWAYS_FATAL_IF(binder == nullptr, "Binder %p expected to be owned.", binder.get());
251
252 if (node.sentRef != nullptr) {
253 tempHoldBinder.push_back(node.sentRef);
254 }
255 }
256
257 mNodeForAddress.clear();
258
259 _l.unlock();
260 tempHoldBinder.clear(); // explicit
261 }
262
dumpLocked()263 void RpcState::dumpLocked() {
264 ALOGE("DUMP OF RpcState %p", this);
265 ALOGE("DUMP OF RpcState (%zu nodes)", mNodeForAddress.size());
266 for (const auto& [address, node] : mNodeForAddress) {
267 sp<IBinder> binder = node.binder.promote();
268
269 const char* desc;
270 if (binder) {
271 if (binder->remoteBinder()) {
272 if (binder->remoteBinder()->isRpcBinder()) {
273 desc = "(rpc binder proxy)";
274 } else {
275 desc = "(binder proxy)";
276 }
277 } else {
278 desc = "(local binder)";
279 }
280 } else {
281 desc = "(null)";
282 }
283
284 ALOGE("- BINDER NODE: %p times sent:%zu times recd: %zu a: %" PRIu64 " type: %s",
285 node.binder.unsafe_get(), node.timesSent, node.timesRecd, address, desc);
286 }
287 ALOGE("END DUMP OF RpcState");
288 }
289
290
CommandData(size_t size)291 RpcState::CommandData::CommandData(size_t size) : mSize(size) {
292 // The maximum size for regular binder is 1MB for all concurrent
293 // transactions. A very small proportion of transactions are even
294 // larger than a page, but we need to avoid allocating too much
295 // data on behalf of an arbitrary client, or we could risk being in
296 // a position where a single additional allocation could run out of
297 // memory.
298 //
299 // Note, this limit may not reflect the total amount of data allocated for a
300 // transaction (in some cases, additional fixed size amounts are added),
301 // though for rough consistency, we should avoid cases where this data type
302 // is used for multiple dynamic allocations for a single transaction.
303 constexpr size_t kMaxTransactionAllocation = 100 * 1000;
304 if (size == 0) return;
305 if (size > kMaxTransactionAllocation) {
306 ALOGW("Transaction requested too much data allocation %zu", size);
307 return;
308 }
309 mData.reset(new (std::nothrow) uint8_t[size]);
310 }
311
rpcSend(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs,const std::function<status_t ()> & altPoll)312 status_t RpcState::rpcSend(const sp<RpcSession::RpcConnection>& connection,
313 const sp<RpcSession>& session, const char* what, iovec* iovs, int niovs,
314 const std::function<status_t()>& altPoll) {
315 for (int i = 0; i < niovs; i++) {
316 LOG_RPC_DETAIL("Sending %s (part %d of %d) on RpcTransport %p: %s",
317 what, i + 1, niovs, connection->rpcTransport.get(),
318 android::base::HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
319 }
320
321 if (status_t status =
322 connection->rpcTransport->interruptableWriteFully(session->mShutdownTrigger.get(),
323 iovs, niovs, altPoll);
324 status != OK) {
325 LOG_RPC_DETAIL("Failed to write %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
326 connection->rpcTransport.get(), statusToString(status).c_str());
327 (void)session->shutdownAndWait(false);
328 return status;
329 }
330
331 return OK;
332 }
333
rpcRec(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const char * what,iovec * iovs,int niovs)334 status_t RpcState::rpcRec(const sp<RpcSession::RpcConnection>& connection,
335 const sp<RpcSession>& session, const char* what, iovec* iovs, int niovs) {
336 if (status_t status =
337 connection->rpcTransport->interruptableReadFully(session->mShutdownTrigger.get(),
338 iovs, niovs, {});
339 status != OK) {
340 LOG_RPC_DETAIL("Failed to read %s (%d iovs) on RpcTransport %p, error: %s", what, niovs,
341 connection->rpcTransport.get(), statusToString(status).c_str());
342 (void)session->shutdownAndWait(false);
343 return status;
344 }
345
346 for (int i = 0; i < niovs; i++) {
347 LOG_RPC_DETAIL("Received %s (part %d of %d) on RpcTransport %p: %s",
348 what, i + 1, niovs, connection->rpcTransport.get(),
349 android::base::HexString(iovs[i].iov_base, iovs[i].iov_len).c_str());
350 }
351 return OK;
352 }
353
readNewSessionResponse(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint32_t * version)354 status_t RpcState::readNewSessionResponse(const sp<RpcSession::RpcConnection>& connection,
355 const sp<RpcSession>& session, uint32_t* version) {
356 RpcNewSessionResponse response;
357 iovec iov{&response, sizeof(response)};
358 if (status_t status = rpcRec(connection, session, "new session response", &iov, 1);
359 status != OK) {
360 return status;
361 }
362 *version = response.version;
363 return OK;
364 }
365
sendConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)366 status_t RpcState::sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
367 const sp<RpcSession>& session) {
368 RpcOutgoingConnectionInit init{
369 .msg = RPC_CONNECTION_INIT_OKAY,
370 };
371 iovec iov{&init, sizeof(init)};
372 return rpcSend(connection, session, "connection init", &iov, 1);
373 }
374
readConnectionInit(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)375 status_t RpcState::readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
376 const sp<RpcSession>& session) {
377 RpcOutgoingConnectionInit init;
378 iovec iov{&init, sizeof(init)};
379 if (status_t status = rpcRec(connection, session, "connection init", &iov, 1); status != OK)
380 return status;
381
382 static_assert(sizeof(init.msg) == sizeof(RPC_CONNECTION_INIT_OKAY));
383 if (0 != strncmp(init.msg, RPC_CONNECTION_INIT_OKAY, sizeof(init.msg))) {
384 ALOGE("Connection init message unrecognized %.*s", static_cast<int>(sizeof(init.msg)),
385 init.msg);
386 return BAD_VALUE;
387 }
388 return OK;
389 }
390
getRootObject(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session)391 sp<IBinder> RpcState::getRootObject(const sp<RpcSession::RpcConnection>& connection,
392 const sp<RpcSession>& session) {
393 Parcel data;
394 data.markForRpc(session);
395 Parcel reply;
396
397 status_t status =
398 transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_ROOT, data, session, &reply, 0);
399 if (status != OK) {
400 ALOGE("Error getting root object: %s", statusToString(status).c_str());
401 return nullptr;
402 }
403
404 return reply.readStrongBinder();
405 }
406
getMaxThreads(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,size_t * maxThreadsOut)407 status_t RpcState::getMaxThreads(const sp<RpcSession::RpcConnection>& connection,
408 const sp<RpcSession>& session, size_t* maxThreadsOut) {
409 Parcel data;
410 data.markForRpc(session);
411 Parcel reply;
412
413 status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
414 session, &reply, 0);
415 if (status != OK) {
416 ALOGE("Error getting max threads: %s", statusToString(status).c_str());
417 return status;
418 }
419
420 int32_t maxThreads;
421 status = reply.readInt32(&maxThreads);
422 if (status != OK) return status;
423 if (maxThreads <= 0) {
424 ALOGE("Error invalid max maxThreads: %d", maxThreads);
425 return BAD_VALUE;
426 }
427
428 *maxThreadsOut = maxThreads;
429 return OK;
430 }
431
getSessionId(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,std::vector<uint8_t> * sessionIdOut)432 status_t RpcState::getSessionId(const sp<RpcSession::RpcConnection>& connection,
433 const sp<RpcSession>& session, std::vector<uint8_t>* sessionIdOut) {
434 Parcel data;
435 data.markForRpc(session);
436 Parcel reply;
437
438 status_t status = transactAddress(connection, 0, RPC_SPECIAL_TRANSACT_GET_SESSION_ID, data,
439 session, &reply, 0);
440 if (status != OK) {
441 ALOGE("Error getting session ID: %s", statusToString(status).c_str());
442 return status;
443 }
444
445 return reply.readByteVector(sessionIdOut);
446 }
447
transact(const sp<RpcSession::RpcConnection> & connection,const sp<IBinder> & binder,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)448 status_t RpcState::transact(const sp<RpcSession::RpcConnection>& connection,
449 const sp<IBinder>& binder, uint32_t code, const Parcel& data,
450 const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
451 if (!data.isForRpc()) {
452 ALOGE("Refusing to send RPC with parcel not crafted for RPC call on binder %p code "
453 "%" PRIu32,
454 binder.get(), code);
455 return BAD_TYPE;
456 }
457
458 if (data.objectsCount() != 0) {
459 ALOGE("Parcel at %p has attached objects but is being used in an RPC call on binder %p "
460 "code %" PRIu32,
461 &data, binder.get(), code);
462 return BAD_TYPE;
463 }
464
465 uint64_t address;
466 if (status_t status = onBinderLeaving(session, binder, &address); status != OK) return status;
467
468 return transactAddress(connection, address, code, data, session, reply, flags);
469 }
470
transactAddress(const sp<RpcSession::RpcConnection> & connection,uint64_t address,uint32_t code,const Parcel & data,const sp<RpcSession> & session,Parcel * reply,uint32_t flags)471 status_t RpcState::transactAddress(const sp<RpcSession::RpcConnection>& connection,
472 uint64_t address, uint32_t code, const Parcel& data,
473 const sp<RpcSession>& session, Parcel* reply, uint32_t flags) {
474 LOG_ALWAYS_FATAL_IF(!data.isForRpc());
475 LOG_ALWAYS_FATAL_IF(data.objectsCount() != 0);
476
477 uint64_t asyncNumber = 0;
478
479 if (address != 0) {
480 std::unique_lock<std::mutex> _l(mNodeMutex);
481 if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
482 auto it = mNodeForAddress.find(address);
483 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
484 "Sending transact on unknown address %" PRIu64, address);
485
486 if (flags & IBinder::FLAG_ONEWAY) {
487 asyncNumber = it->second.asyncNumber;
488 if (!nodeProgressAsyncNumber(&it->second)) {
489 _l.unlock();
490 (void)session->shutdownAndWait(false);
491 return DEAD_OBJECT;
492 }
493 }
494 }
495
496 LOG_ALWAYS_FATAL_IF(std::numeric_limits<int32_t>::max() - sizeof(RpcWireHeader) -
497 sizeof(RpcWireTransaction) <
498 data.dataSize(),
499 "Too much data %zu", data.dataSize());
500
501 RpcWireHeader command{
502 .command = RPC_COMMAND_TRANSACT,
503 .bodySize = static_cast<uint32_t>(sizeof(RpcWireTransaction) + data.dataSize()),
504 };
505
506 RpcWireTransaction transaction{
507 .address = RpcWireAddress::fromRaw(address),
508 .code = code,
509 .flags = flags,
510 .asyncNumber = asyncNumber,
511 };
512
513 constexpr size_t kWaitMaxUs = 1000000;
514 constexpr size_t kWaitLogUs = 10000;
515 size_t waitUs = 0;
516
517 // Oneway calls have no sync point, so if many are sent before, whether this
518 // is a twoway or oneway transaction, they may have filled up the socket.
519 // So, make sure we drain them before polling.
520 std::function<status_t()> drainRefs = [&] {
521 if (waitUs > kWaitLogUs) {
522 ALOGE("Cannot send command, trying to process pending refcounts. Waiting %zuus. Too "
523 "many oneway calls?",
524 waitUs);
525 }
526
527 if (waitUs > 0) {
528 usleep(waitUs);
529 waitUs = std::min(kWaitMaxUs, waitUs * 2);
530 } else {
531 waitUs = 1;
532 }
533
534 return drainCommands(connection, session, CommandType::CONTROL_ONLY);
535 };
536
537 iovec iovs[]{
538 {&command, sizeof(RpcWireHeader)},
539 {&transaction, sizeof(RpcWireTransaction)},
540 {const_cast<uint8_t*>(data.data()), data.dataSize()},
541 };
542 if (status_t status =
543 rpcSend(connection, session, "transaction", iovs, arraysize(iovs), drainRefs);
544 status != OK) {
545 // TODO(b/167966510): need to undo onBinderLeaving - we know the
546 // refcount isn't successfully transferred.
547 return status;
548 }
549
550 if (flags & IBinder::FLAG_ONEWAY) {
551 LOG_RPC_DETAIL("Oneway command, so no longer waiting on RpcTransport %p",
552 connection->rpcTransport.get());
553
554 // Do not wait on result.
555 return OK;
556 }
557
558 LOG_ALWAYS_FATAL_IF(reply == nullptr, "Reply parcel must be used for synchronous transaction.");
559
560 return waitForReply(connection, session, reply);
561 }
562
cleanup_reply_data(Parcel * p,const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)563 static void cleanup_reply_data(Parcel* p, const uint8_t* data, size_t dataSize,
564 const binder_size_t* objects, size_t objectsCount) {
565 (void)p;
566 delete[] const_cast<uint8_t*>(data - offsetof(RpcWireReply, data));
567 (void)dataSize;
568 LOG_ALWAYS_FATAL_IF(objects != nullptr);
569 LOG_ALWAYS_FATAL_IF(objectsCount != 0, "%zu objects remaining", objectsCount);
570 }
571
waitForReply(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,Parcel * reply)572 status_t RpcState::waitForReply(const sp<RpcSession::RpcConnection>& connection,
573 const sp<RpcSession>& session, Parcel* reply) {
574 RpcWireHeader command;
575 while (true) {
576 iovec iov{&command, sizeof(command)};
577 if (status_t status = rpcRec(connection, session, "command header (for reply)", &iov, 1);
578 status != OK)
579 return status;
580
581 if (command.command == RPC_COMMAND_REPLY) break;
582
583 if (status_t status = processCommand(connection, session, command, CommandType::ANY);
584 status != OK)
585 return status;
586 }
587
588 CommandData data(command.bodySize);
589 if (!data.valid()) return NO_MEMORY;
590
591 iovec iov{data.data(), command.bodySize};
592 if (status_t status = rpcRec(connection, session, "reply body", &iov, 1); status != OK)
593 return status;
594
595 if (command.bodySize < sizeof(RpcWireReply)) {
596 ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireReply. Terminating!",
597 sizeof(RpcWireReply), command.bodySize);
598 (void)session->shutdownAndWait(false);
599 return BAD_VALUE;
600 }
601 RpcWireReply* rpcReply = reinterpret_cast<RpcWireReply*>(data.data());
602 if (rpcReply->status != OK) return rpcReply->status;
603
604 data.release();
605 reply->ipcSetDataReference(rpcReply->data, command.bodySize - offsetof(RpcWireReply, data),
606 nullptr, 0, cleanup_reply_data);
607
608 reply->markForRpc(session);
609
610 return OK;
611 }
612
sendDecStrongToTarget(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,uint64_t addr,size_t target)613 status_t RpcState::sendDecStrongToTarget(const sp<RpcSession::RpcConnection>& connection,
614 const sp<RpcSession>& session, uint64_t addr,
615 size_t target) {
616 RpcDecStrong body = {
617 .address = RpcWireAddress::fromRaw(addr),
618 };
619
620 {
621 std::lock_guard<std::mutex> _l(mNodeMutex);
622 if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
623 auto it = mNodeForAddress.find(addr);
624 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(),
625 "Sending dec strong on unknown address %" PRIu64, addr);
626
627 LOG_ALWAYS_FATAL_IF(it->second.timesRecd < target, "Can't dec count of %zu to %zu.",
628 it->second.timesRecd, target);
629
630 // typically this happens when multiple threads send dec refs at the
631 // same time - the transactions will get combined automatically
632 if (it->second.timesRecd == target) return OK;
633
634 body.amount = it->second.timesRecd - target;
635 it->second.timesRecd = target;
636
637 LOG_ALWAYS_FATAL_IF(nullptr != tryEraseNode(it),
638 "Bad state. RpcState shouldn't own received binder");
639 }
640
641 RpcWireHeader cmd = {
642 .command = RPC_COMMAND_DEC_STRONG,
643 .bodySize = sizeof(RpcDecStrong),
644 };
645 iovec iovs[]{{&cmd, sizeof(cmd)}, {&body, sizeof(body)}};
646 return rpcSend(connection, session, "dec ref", iovs, arraysize(iovs));
647 }
648
getAndExecuteCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)649 status_t RpcState::getAndExecuteCommand(const sp<RpcSession::RpcConnection>& connection,
650 const sp<RpcSession>& session, CommandType type) {
651 LOG_RPC_DETAIL("getAndExecuteCommand on RpcTransport %p", connection->rpcTransport.get());
652
653 RpcWireHeader command;
654 iovec iov{&command, sizeof(command)};
655 if (status_t status = rpcRec(connection, session, "command header (for server)", &iov, 1);
656 status != OK)
657 return status;
658
659 return processCommand(connection, session, command, type);
660 }
661
drainCommands(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandType type)662 status_t RpcState::drainCommands(const sp<RpcSession::RpcConnection>& connection,
663 const sp<RpcSession>& session, CommandType type) {
664 uint8_t buf;
665 while (true) {
666 size_t num_bytes;
667 status_t status = connection->rpcTransport->peek(&buf, sizeof(buf), &num_bytes);
668 if (status == WOULD_BLOCK) break;
669 if (status != OK) return status;
670 if (!num_bytes) break;
671
672 status = getAndExecuteCommand(connection, session, type);
673 if (status != OK) return status;
674 }
675 return OK;
676 }
677
processCommand(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command,CommandType type)678 status_t RpcState::processCommand(const sp<RpcSession::RpcConnection>& connection,
679 const sp<RpcSession>& session, const RpcWireHeader& command,
680 CommandType type) {
681 IPCThreadState* kernelBinderState = IPCThreadState::selfOrNull();
682 IPCThreadState::SpGuard spGuard{
683 .address = __builtin_frame_address(0),
684 .context = "processing binder RPC command",
685 };
686 const IPCThreadState::SpGuard* origGuard;
687 if (kernelBinderState != nullptr) {
688 origGuard = kernelBinderState->pushGetCallingSpGuard(&spGuard);
689 }
690 ScopeGuard guardUnguard = [&]() {
691 if (kernelBinderState != nullptr) {
692 kernelBinderState->restoreGetCallingSpGuard(origGuard);
693 }
694 };
695
696 switch (command.command) {
697 case RPC_COMMAND_TRANSACT:
698 if (type != CommandType::ANY) return BAD_TYPE;
699 return processTransact(connection, session, command);
700 case RPC_COMMAND_DEC_STRONG:
701 return processDecStrong(connection, session, command);
702 }
703
704 // We should always know the version of the opposing side, and since the
705 // RPC-binder-level wire protocol is not self synchronizing, we have no way
706 // to understand where the current command ends and the next one begins. We
707 // also can't consider it a fatal error because this would allow any client
708 // to kill us, so ending the session for misbehaving client.
709 ALOGE("Unknown RPC command %d - terminating session", command.command);
710 (void)session->shutdownAndWait(false);
711 return DEAD_OBJECT;
712 }
processTransact(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command)713 status_t RpcState::processTransact(const sp<RpcSession::RpcConnection>& connection,
714 const sp<RpcSession>& session, const RpcWireHeader& command) {
715 LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_TRANSACT, "command: %d", command.command);
716
717 CommandData transactionData(command.bodySize);
718 if (!transactionData.valid()) {
719 return NO_MEMORY;
720 }
721 iovec iov{transactionData.data(), transactionData.size()};
722 if (status_t status = rpcRec(connection, session, "transaction body", &iov, 1); status != OK)
723 return status;
724
725 return processTransactInternal(connection, session, std::move(transactionData));
726 }
727
do_nothing_to_transact_data(Parcel * p,const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)728 static void do_nothing_to_transact_data(Parcel* p, const uint8_t* data, size_t dataSize,
729 const binder_size_t* objects, size_t objectsCount) {
730 (void)p;
731 (void)data;
732 (void)dataSize;
733 (void)objects;
734 (void)objectsCount;
735 }
736
processTransactInternal(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,CommandData transactionData)737 status_t RpcState::processTransactInternal(const sp<RpcSession::RpcConnection>& connection,
738 const sp<RpcSession>& session,
739 CommandData transactionData) {
740 // for 'recursive' calls to this, we have already read and processed the
741 // binder from the transaction data and taken reference counts into account,
742 // so it is cached here.
743 sp<IBinder> target;
744 processTransactInternalTailCall:
745
746 if (transactionData.size() < sizeof(RpcWireTransaction)) {
747 ALOGE("Expecting %zu but got %zu bytes for RpcWireTransaction. Terminating!",
748 sizeof(RpcWireTransaction), transactionData.size());
749 (void)session->shutdownAndWait(false);
750 return BAD_VALUE;
751 }
752 RpcWireTransaction* transaction = reinterpret_cast<RpcWireTransaction*>(transactionData.data());
753
754 uint64_t addr = RpcWireAddress::toRaw(transaction->address);
755 bool oneway = transaction->flags & IBinder::FLAG_ONEWAY;
756
757 status_t replyStatus = OK;
758 if (addr != 0) {
759 if (!target) {
760 replyStatus = onBinderEntering(session, addr, &target);
761 }
762
763 if (replyStatus != OK) {
764 // do nothing
765 } else if (target == nullptr) {
766 // This can happen if the binder is remote in this process, and
767 // another thread has called the last decStrong on this binder.
768 // However, for local binders, it indicates a misbehaving client
769 // (any binder which is being transacted on should be holding a
770 // strong ref count), so in either case, terminating the
771 // session.
772 ALOGE("While transacting, binder has been deleted at address %" PRIu64 ". Terminating!",
773 addr);
774 (void)session->shutdownAndWait(false);
775 replyStatus = BAD_VALUE;
776 } else if (target->localBinder() == nullptr) {
777 ALOGE("Unknown binder address or non-local binder, not address %" PRIu64
778 ". Terminating!",
779 addr);
780 (void)session->shutdownAndWait(false);
781 replyStatus = BAD_VALUE;
782 } else if (oneway) {
783 std::unique_lock<std::mutex> _l(mNodeMutex);
784 auto it = mNodeForAddress.find(addr);
785 if (it->second.binder.promote() != target) {
786 ALOGE("Binder became invalid during transaction. Bad client? %" PRIu64, addr);
787 replyStatus = BAD_VALUE;
788 } else if (transaction->asyncNumber != it->second.asyncNumber) {
789 // we need to process some other asynchronous transaction
790 // first
791 it->second.asyncTodo.push(BinderNode::AsyncTodo{
792 .ref = target,
793 .data = std::move(transactionData),
794 .asyncNumber = transaction->asyncNumber,
795 });
796
797 size_t numPending = it->second.asyncTodo.size();
798 LOG_RPC_DETAIL("Enqueuing %" PRIu64 " on %" PRIu64 " (%zu pending)",
799 transaction->asyncNumber, addr, numPending);
800
801 constexpr size_t kArbitraryOnewayCallTerminateLevel = 10000;
802 constexpr size_t kArbitraryOnewayCallWarnLevel = 1000;
803 constexpr size_t kArbitraryOnewayCallWarnPer = 1000;
804
805 if (numPending >= kArbitraryOnewayCallWarnLevel) {
806 if (numPending >= kArbitraryOnewayCallTerminateLevel) {
807 ALOGE("WARNING: %zu pending oneway transactions. Terminating!", numPending);
808 _l.unlock();
809 (void)session->shutdownAndWait(false);
810 return FAILED_TRANSACTION;
811 }
812
813 if (numPending % kArbitraryOnewayCallWarnPer == 0) {
814 ALOGW("Warning: many oneway transactions built up on %p (%zu)",
815 target.get(), numPending);
816 }
817 }
818 return OK;
819 }
820 }
821 }
822
823 Parcel reply;
824 reply.markForRpc(session);
825
826 if (replyStatus == OK) {
827 Parcel data;
828 // transaction->data is owned by this function. Parcel borrows this data and
829 // only holds onto it for the duration of this function call. Parcel will be
830 // deleted before the 'transactionData' object.
831 data.ipcSetDataReference(transaction->data,
832 transactionData.size() - offsetof(RpcWireTransaction, data),
833 nullptr /*object*/, 0 /*objectCount*/,
834 do_nothing_to_transact_data);
835 data.markForRpc(session);
836
837 if (target) {
838 bool origAllowNested = connection->allowNested;
839 connection->allowNested = !oneway;
840
841 replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
842
843 connection->allowNested = origAllowNested;
844 } else {
845 LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
846
847 switch (transaction->code) {
848 case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
849 replyStatus = reply.writeInt32(session->getMaxIncomingThreads());
850 break;
851 }
852 case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: {
853 // for client connections, this should always report the value
854 // originally returned from the server, so this is asserting
855 // that it exists
856 replyStatus = reply.writeByteVector(session->mId);
857 break;
858 }
859 default: {
860 sp<RpcServer> server = session->server();
861 if (server) {
862 switch (transaction->code) {
863 case RPC_SPECIAL_TRANSACT_GET_ROOT: {
864 sp<IBinder> root = session->mSessionSpecificRootObject
865 ?: server->getRootObject();
866 replyStatus = reply.writeStrongBinder(root);
867 break;
868 }
869 default: {
870 replyStatus = UNKNOWN_TRANSACTION;
871 }
872 }
873 } else {
874 ALOGE("Special command sent, but no server object attached.");
875 }
876 }
877 }
878 }
879 }
880
881 if (oneway) {
882 if (replyStatus != OK) {
883 ALOGW("Oneway call failed with error: %d", replyStatus);
884 }
885
886 LOG_RPC_DETAIL("Processed async transaction %" PRIu64 " on %" PRIu64,
887 transaction->asyncNumber, addr);
888
889 // Check to see if there is another asynchronous transaction to process.
890 // This behavior differs from binder behavior, since in the binder
891 // driver, asynchronous transactions will be processed after existing
892 // pending binder transactions on the queue. The downside of this is
893 // that asynchronous transactions can be drowned out by synchronous
894 // transactions. However, we have no easy way to queue these
895 // transactions after the synchronous transactions we may want to read
896 // from the wire. So, in socket binder here, we have the opposite
897 // downside: asynchronous transactions may drown out synchronous
898 // transactions.
899 {
900 std::unique_lock<std::mutex> _l(mNodeMutex);
901 auto it = mNodeForAddress.find(addr);
902 // last refcount dropped after this transaction happened
903 if (it == mNodeForAddress.end()) return OK;
904
905 if (!nodeProgressAsyncNumber(&it->second)) {
906 _l.unlock();
907 (void)session->shutdownAndWait(false);
908 return DEAD_OBJECT;
909 }
910
911 if (it->second.asyncTodo.size() == 0) return OK;
912 if (it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
913 LOG_RPC_DETAIL("Found next async transaction %" PRIu64 " on %" PRIu64,
914 it->second.asyncNumber, addr);
915
916 // justification for const_cast (consider avoiding priority_queue):
917 // - AsyncTodo operator< doesn't depend on 'data' or 'ref' objects
918 // - gotta go fast
919 auto& todo = const_cast<BinderNode::AsyncTodo&>(it->second.asyncTodo.top());
920
921 // reset up arguments
922 transactionData = std::move(todo.data);
923 LOG_ALWAYS_FATAL_IF(target != todo.ref,
924 "async list should be associated with a binder");
925
926 it->second.asyncTodo.pop();
927 goto processTransactInternalTailCall;
928 }
929 }
930
931 // done processing all the async commands on this binder that we can, so
932 // write decstrongs on the binder
933 if (addr != 0 && replyStatus == OK) {
934 return flushExcessBinderRefs(session, addr, target);
935 }
936
937 return OK;
938 }
939
940 // Binder refs are flushed for oneway calls only after all calls which are
941 // built up are executed. Otherwise, they fill up the binder buffer.
942 if (addr != 0 && replyStatus == OK) {
943 replyStatus = flushExcessBinderRefs(session, addr, target);
944 }
945
946 LOG_ALWAYS_FATAL_IF(std::numeric_limits<int32_t>::max() - sizeof(RpcWireHeader) -
947 sizeof(RpcWireReply) <
948 reply.dataSize(),
949 "Too much data for reply %zu", reply.dataSize());
950
951 RpcWireHeader cmdReply{
952 .command = RPC_COMMAND_REPLY,
953 .bodySize = static_cast<uint32_t>(sizeof(RpcWireReply) + reply.dataSize()),
954 };
955 RpcWireReply rpcReply{
956 .status = replyStatus,
957 };
958
959 iovec iovs[]{
960 {&cmdReply, sizeof(RpcWireHeader)},
961 {&rpcReply, sizeof(RpcWireReply)},
962 {const_cast<uint8_t*>(reply.data()), reply.dataSize()},
963 };
964 return rpcSend(connection, session, "reply", iovs, arraysize(iovs));
965 }
966
processDecStrong(const sp<RpcSession::RpcConnection> & connection,const sp<RpcSession> & session,const RpcWireHeader & command)967 status_t RpcState::processDecStrong(const sp<RpcSession::RpcConnection>& connection,
968 const sp<RpcSession>& session, const RpcWireHeader& command) {
969 LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_DEC_STRONG, "command: %d", command.command);
970
971 CommandData commandData(command.bodySize);
972 if (!commandData.valid()) {
973 return NO_MEMORY;
974 }
975 iovec iov{commandData.data(), commandData.size()};
976 if (status_t status = rpcRec(connection, session, "dec ref body", &iov, 1); status != OK)
977 return status;
978
979 if (command.bodySize != sizeof(RpcDecStrong)) {
980 ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcDecStrong. Terminating!",
981 sizeof(RpcDecStrong), command.bodySize);
982 (void)session->shutdownAndWait(false);
983 return BAD_VALUE;
984 }
985 RpcDecStrong* body = reinterpret_cast<RpcDecStrong*>(commandData.data());
986
987 uint64_t addr = RpcWireAddress::toRaw(body->address);
988 std::unique_lock<std::mutex> _l(mNodeMutex);
989 auto it = mNodeForAddress.find(addr);
990 if (it == mNodeForAddress.end()) {
991 ALOGE("Unknown binder address %" PRIu64 " for dec strong.", addr);
992 return OK;
993 }
994
995 sp<IBinder> target = it->second.binder.promote();
996 if (target == nullptr) {
997 ALOGE("While requesting dec strong, binder has been deleted at address %" PRIu64
998 ". Terminating!",
999 addr);
1000 _l.unlock();
1001 (void)session->shutdownAndWait(false);
1002 return BAD_VALUE;
1003 }
1004
1005 if (it->second.timesSent < body->amount) {
1006 ALOGE("Record of sending binder %zu times, but requested decStrong for %" PRIu64 " of %u",
1007 it->second.timesSent, addr, body->amount);
1008 return OK;
1009 }
1010
1011 LOG_ALWAYS_FATAL_IF(it->second.sentRef == nullptr, "Inconsistent state, lost ref for %" PRIu64,
1012 addr);
1013
1014 LOG_RPC_DETAIL("Processing dec strong of %" PRIu64 " by %u from %zu", addr, body->amount,
1015 it->second.timesSent);
1016
1017 it->second.timesSent -= body->amount;
1018 sp<IBinder> tempHold = tryEraseNode(it);
1019 _l.unlock();
1020 tempHold = nullptr; // destructor may make binder calls on this session
1021
1022 return OK;
1023 }
1024
tryEraseNode(std::map<uint64_t,BinderNode>::iterator & it)1025 sp<IBinder> RpcState::tryEraseNode(std::map<uint64_t, BinderNode>::iterator& it) {
1026 sp<IBinder> ref;
1027
1028 if (it->second.timesSent == 0) {
1029 ref = std::move(it->second.sentRef);
1030
1031 if (it->second.timesRecd == 0) {
1032 LOG_ALWAYS_FATAL_IF(!it->second.asyncTodo.empty(),
1033 "Can't delete binder w/ pending async transactions");
1034 mNodeForAddress.erase(it);
1035 }
1036 }
1037
1038 return ref;
1039 }
1040
nodeProgressAsyncNumber(BinderNode * node)1041 bool RpcState::nodeProgressAsyncNumber(BinderNode* node) {
1042 // 2**64 =~ 10**19 =~ 1000 transactions per second for 585 million years to
1043 // a single binder
1044 if (node->asyncNumber >= std::numeric_limits<decltype(node->asyncNumber)>::max()) {
1045 ALOGE("Out of async transaction IDs. Terminating");
1046 return false;
1047 }
1048 node->asyncNumber++;
1049 return true;
1050 }
1051
1052 } // namespace android
1053