1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "BpBinder"
18 //#define LOG_NDEBUG 0
19
20 #include <binder/BpBinder.h>
21
22 #include <binder/IPCThreadState.h>
23 #include <binder/IResultReceiver.h>
24 #include <binder/RpcSession.h>
25 #include <binder/Stability.h>
26 #include <cutils/compiler.h>
27 #include <utils/Log.h>
28
29 #include <stdio.h>
30
31 #include "BuildFlags.h"
32
33 #include <android-base/file.h>
34
35 //#undef ALOGV
36 //#define ALOGV(...) fprintf(stderr, __VA_ARGS__)
37
38 namespace android {
39
40 // ---------------------------------------------------------------------------
41
42 Mutex BpBinder::sTrackingLock;
43 std::unordered_map<int32_t, uint32_t> BpBinder::sTrackingMap;
44 std::unordered_map<int32_t, uint32_t> BpBinder::sLastLimitCallbackMap;
45 int BpBinder::sNumTrackedUids = 0;
46 std::atomic_bool BpBinder::sCountByUidEnabled(false);
47 binder_proxy_limit_callback BpBinder::sLimitCallback;
48 bool BpBinder::sBinderProxyThrottleCreate = false;
49
50 static StaticString16 kDescriptorUninit(u"");
51
52 // Arbitrarily high value that probably distinguishes a bad behaving app
53 uint32_t BpBinder::sBinderProxyCountHighWatermark = 2500;
54 // Another arbitrary value a binder count needs to drop below before another callback will be called
55 uint32_t BpBinder::sBinderProxyCountLowWatermark = 2000;
56
57 // Log any transactions for which the data exceeds this size
58 #define LOG_TRANSACTIONS_OVER_SIZE (300 * 1024)
59
60 enum {
61 LIMIT_REACHED_MASK = 0x80000000, // A flag denoting that the limit has been reached
62 COUNTING_VALUE_MASK = 0x7FFFFFFF, // A mask of the remaining bits for the count value
63 };
64
ObjectManager()65 BpBinder::ObjectManager::ObjectManager()
66 {
67 }
68
~ObjectManager()69 BpBinder::ObjectManager::~ObjectManager()
70 {
71 kill();
72 }
73
attach(const void * objectID,void * object,void * cleanupCookie,IBinder::object_cleanup_func func)74 void* BpBinder::ObjectManager::attach(const void* objectID, void* object, void* cleanupCookie,
75 IBinder::object_cleanup_func func) {
76 entry_t e;
77 e.object = object;
78 e.cleanupCookie = cleanupCookie;
79 e.func = func;
80
81 if (mObjects.find(objectID) != mObjects.end()) {
82 ALOGI("Trying to attach object ID %p to binder ObjectManager %p with object %p, but object "
83 "ID already in use",
84 objectID, this, object);
85 return mObjects[objectID].object;
86 }
87
88 mObjects.insert({objectID, e});
89 return nullptr;
90 }
91
find(const void * objectID) const92 void* BpBinder::ObjectManager::find(const void* objectID) const
93 {
94 auto i = mObjects.find(objectID);
95 if (i == mObjects.end()) return nullptr;
96 return i->second.object;
97 }
98
detach(const void * objectID)99 void* BpBinder::ObjectManager::detach(const void* objectID) {
100 auto i = mObjects.find(objectID);
101 if (i == mObjects.end()) return nullptr;
102 void* value = i->second.object;
103 mObjects.erase(i);
104 return value;
105 }
106
107 namespace {
108 struct Tag {
109 wp<IBinder> binder;
110 };
111 } // namespace
112
cleanWeak(const void *,void * obj,void *)113 static void cleanWeak(const void* /* id */, void* obj, void* /* cookie */) {
114 delete static_cast<Tag*>(obj);
115 }
116
lookupOrCreateWeak(const void * objectID,object_make_func make,const void * makeArgs)117 sp<IBinder> BpBinder::ObjectManager::lookupOrCreateWeak(const void* objectID, object_make_func make,
118 const void* makeArgs) {
119 entry_t& e = mObjects[objectID];
120 if (e.object != nullptr) {
121 if (auto attached = static_cast<Tag*>(e.object)->binder.promote()) {
122 return attached;
123 }
124 } else {
125 e.object = new Tag;
126 LOG_ALWAYS_FATAL_IF(!e.object, "no more memory");
127 }
128 sp<IBinder> newObj = make(makeArgs);
129
130 static_cast<Tag*>(e.object)->binder = newObj;
131 e.cleanupCookie = nullptr;
132 e.func = cleanWeak;
133
134 return newObj;
135 }
136
kill()137 void BpBinder::ObjectManager::kill()
138 {
139 const size_t N = mObjects.size();
140 ALOGV("Killing %zu objects in manager %p", N, this);
141 for (auto i : mObjects) {
142 const entry_t& e = i.second;
143 if (e.func != nullptr) {
144 e.func(i.first, e.object, e.cleanupCookie);
145 }
146 }
147
148 mObjects.clear();
149 }
150
151 // ---------------------------------------------------------------------------
152
create(int32_t handle)153 sp<BpBinder> BpBinder::create(int32_t handle) {
154 if constexpr (!kEnableKernelIpc) {
155 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
156 return nullptr;
157 }
158
159 int32_t trackedUid = -1;
160 if (sCountByUidEnabled) {
161 trackedUid = IPCThreadState::self()->getCallingUid();
162 AutoMutex _l(sTrackingLock);
163 uint32_t trackedValue = sTrackingMap[trackedUid];
164 if (CC_UNLIKELY(trackedValue & LIMIT_REACHED_MASK)) {
165 if (sBinderProxyThrottleCreate) {
166 return nullptr;
167 }
168 trackedValue = trackedValue & COUNTING_VALUE_MASK;
169 uint32_t lastLimitCallbackAt = sLastLimitCallbackMap[trackedUid];
170
171 if (trackedValue > lastLimitCallbackAt &&
172 (trackedValue - lastLimitCallbackAt > sBinderProxyCountHighWatermark)) {
173 ALOGE("Still too many binder proxy objects sent to uid %d from uid %d (%d proxies "
174 "held)",
175 getuid(), trackedUid, trackedValue);
176 if (sLimitCallback) sLimitCallback(trackedUid);
177 sLastLimitCallbackMap[trackedUid] = trackedValue;
178 }
179 } else {
180 if ((trackedValue & COUNTING_VALUE_MASK) >= sBinderProxyCountHighWatermark) {
181 ALOGE("Too many binder proxy objects sent to uid %d from uid %d (%d proxies held)",
182 getuid(), trackedUid, trackedValue);
183 sTrackingMap[trackedUid] |= LIMIT_REACHED_MASK;
184 if (sLimitCallback) sLimitCallback(trackedUid);
185 sLastLimitCallbackMap[trackedUid] = trackedValue & COUNTING_VALUE_MASK;
186 if (sBinderProxyThrottleCreate) {
187 ALOGI("Throttling binder proxy creates from uid %d in uid %d until binder proxy"
188 " count drops below %d",
189 trackedUid, getuid(), sBinderProxyCountLowWatermark);
190 return nullptr;
191 }
192 }
193 }
194 sTrackingMap[trackedUid]++;
195 }
196 return sp<BpBinder>::make(BinderHandle{handle}, trackedUid);
197 }
198
create(const sp<RpcSession> & session,uint64_t address)199 sp<BpBinder> BpBinder::create(const sp<RpcSession>& session, uint64_t address) {
200 LOG_ALWAYS_FATAL_IF(session == nullptr, "BpBinder::create null session");
201
202 // These are not currently tracked, since there is no UID or other
203 // identifier to track them with. However, if similar functionality is
204 // needed, session objects keep track of all BpBinder objects on a
205 // per-session basis.
206
207 return sp<BpBinder>::make(RpcHandle{session, address});
208 }
209
BpBinder(Handle && handle)210 BpBinder::BpBinder(Handle&& handle)
211 : mStability(0),
212 mHandle(handle),
213 mAlive(true),
214 mObitsSent(false),
215 mObituaries(nullptr),
216 mDescriptorCache(kDescriptorUninit),
217 mTrackedUid(-1) {
218 extendObjectLifetime(OBJECT_LIFETIME_WEAK);
219 }
220
BpBinder(BinderHandle && handle,int32_t trackedUid)221 BpBinder::BpBinder(BinderHandle&& handle, int32_t trackedUid) : BpBinder(Handle(handle)) {
222 if constexpr (!kEnableKernelIpc) {
223 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
224 return;
225 }
226
227 mTrackedUid = trackedUid;
228
229 ALOGV("Creating BpBinder %p handle %d\n", this, this->binderHandle());
230
231 IPCThreadState::self()->incWeakHandle(this->binderHandle(), this);
232 }
233
BpBinder(RpcHandle && handle)234 BpBinder::BpBinder(RpcHandle&& handle) : BpBinder(Handle(handle)) {
235 LOG_ALWAYS_FATAL_IF(rpcSession() == nullptr, "BpBinder created w/o session object");
236 }
237
isRpcBinder() const238 bool BpBinder::isRpcBinder() const {
239 return std::holds_alternative<RpcHandle>(mHandle);
240 }
241
rpcAddress() const242 uint64_t BpBinder::rpcAddress() const {
243 return std::get<RpcHandle>(mHandle).address;
244 }
245
rpcSession() const246 const sp<RpcSession>& BpBinder::rpcSession() const {
247 return std::get<RpcHandle>(mHandle).session;
248 }
249
binderHandle() const250 int32_t BpBinder::binderHandle() const {
251 return std::get<BinderHandle>(mHandle).handle;
252 }
253
getDebugBinderHandle() const254 std::optional<int32_t> BpBinder::getDebugBinderHandle() const {
255 if (!isRpcBinder()) {
256 return binderHandle();
257 } else {
258 return std::nullopt;
259 }
260 }
261
isDescriptorCached() const262 bool BpBinder::isDescriptorCached() const {
263 Mutex::Autolock _l(mLock);
264 return mDescriptorCache.string() != kDescriptorUninit.string();
265 }
266
getInterfaceDescriptor() const267 const String16& BpBinder::getInterfaceDescriptor() const
268 {
269 if (!isDescriptorCached()) {
270 sp<BpBinder> thiz = sp<BpBinder>::fromExisting(const_cast<BpBinder*>(this));
271
272 Parcel data;
273 data.markForBinder(thiz);
274 Parcel reply;
275 // do the IPC without a lock held.
276 status_t err = thiz->transact(INTERFACE_TRANSACTION, data, &reply);
277 if (err == NO_ERROR) {
278 String16 res(reply.readString16());
279 Mutex::Autolock _l(mLock);
280 // mDescriptorCache could have been assigned while the lock was
281 // released.
282 if (mDescriptorCache.string() == kDescriptorUninit.string()) mDescriptorCache = res;
283 }
284 }
285
286 // we're returning a reference to a non-static object here. Usually this
287 // is not something smart to do, however, with binder objects it is
288 // (usually) safe because they are reference-counted.
289
290 return mDescriptorCache;
291 }
292
isBinderAlive() const293 bool BpBinder::isBinderAlive() const
294 {
295 return mAlive != 0;
296 }
297
pingBinder()298 status_t BpBinder::pingBinder()
299 {
300 Parcel data;
301 data.markForBinder(sp<BpBinder>::fromExisting(this));
302 Parcel reply;
303 return transact(PING_TRANSACTION, data, &reply);
304 }
305
startRecordingBinder(const android::base::unique_fd & fd)306 status_t BpBinder::startRecordingBinder(const android::base::unique_fd& fd) {
307 Parcel send, reply;
308 send.writeUniqueFileDescriptor(fd);
309 return transact(START_RECORDING_TRANSACTION, send, &reply);
310 }
311
stopRecordingBinder()312 status_t BpBinder::stopRecordingBinder() {
313 Parcel data, reply;
314 data.markForBinder(sp<BpBinder>::fromExisting(this));
315 return transact(STOP_RECORDING_TRANSACTION, data, &reply);
316 }
317
dump(int fd,const Vector<String16> & args)318 status_t BpBinder::dump(int fd, const Vector<String16>& args)
319 {
320 Parcel send;
321 Parcel reply;
322 send.writeFileDescriptor(fd);
323 const size_t numArgs = args.size();
324 send.writeInt32(numArgs);
325 for (size_t i = 0; i < numArgs; i++) {
326 send.writeString16(args[i]);
327 }
328 status_t err = transact(DUMP_TRANSACTION, send, &reply);
329 return err;
330 }
331
332 // NOLINTNEXTLINE(google-default-arguments)
transact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)333 status_t BpBinder::transact(
334 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
335 {
336 // Once a binder has died, it will never come back to life.
337 if (mAlive) {
338 bool privateVendor = flags & FLAG_PRIVATE_VENDOR;
339 // don't send userspace flags to the kernel
340 flags = flags & ~static_cast<uint32_t>(FLAG_PRIVATE_VENDOR);
341
342 // user transactions require a given stability level
343 if (code >= FIRST_CALL_TRANSACTION && code <= LAST_CALL_TRANSACTION) {
344 using android::internal::Stability;
345
346 int16_t stability = Stability::getRepr(this);
347 Stability::Level required = privateVendor ? Stability::VENDOR
348 : Stability::getLocalLevel();
349
350 if (CC_UNLIKELY(!Stability::check(stability, required))) {
351 ALOGE("Cannot do a user transaction on a %s binder (%s) in a %s context.",
352 Stability::levelString(stability).c_str(),
353 String8(getInterfaceDescriptor()).c_str(),
354 Stability::levelString(required).c_str());
355 return BAD_TYPE;
356 }
357 }
358
359 status_t status;
360 if (CC_UNLIKELY(isRpcBinder())) {
361 status = rpcSession()->transact(sp<IBinder>::fromExisting(this), code, data, reply,
362 flags);
363 } else {
364 if constexpr (!kEnableKernelIpc) {
365 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
366 return INVALID_OPERATION;
367 }
368
369 status = IPCThreadState::self()->transact(binderHandle(), code, data, reply, flags);
370 }
371 if (data.dataSize() > LOG_TRANSACTIONS_OVER_SIZE) {
372 Mutex::Autolock _l(mLock);
373 ALOGW("Large outgoing transaction of %zu bytes, interface descriptor %s, code %d",
374 data.dataSize(), String8(mDescriptorCache).c_str(), code);
375 }
376
377 if (status == DEAD_OBJECT) mAlive = 0;
378
379 return status;
380 }
381
382 return DEAD_OBJECT;
383 }
384
385 // NOLINTNEXTLINE(google-default-arguments)
linkToDeath(const sp<DeathRecipient> & recipient,void * cookie,uint32_t flags)386 status_t BpBinder::linkToDeath(
387 const sp<DeathRecipient>& recipient, void* cookie, uint32_t flags)
388 {
389 if (isRpcBinder()) {
390 if (rpcSession()->getMaxIncomingThreads() < 1) {
391 ALOGE("Cannot register a DeathRecipient without any incoming threads. Need to set max "
392 "incoming threads to a value greater than 0 before calling linkToDeath.");
393 return INVALID_OPERATION;
394 }
395 } else if constexpr (!kEnableKernelIpc) {
396 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
397 return INVALID_OPERATION;
398 } else {
399 if (ProcessState::self()->getThreadPoolMaxTotalThreadCount() == 0) {
400 ALOGW("Linking to death on %s but there are no threads (yet?) listening to incoming "
401 "transactions. See ProcessState::startThreadPool and "
402 "ProcessState::setThreadPoolMaxThreadCount. Generally you should setup the "
403 "binder "
404 "threadpool before other initialization steps.",
405 String8(getInterfaceDescriptor()).c_str());
406 }
407 }
408
409 Obituary ob;
410 ob.recipient = recipient;
411 ob.cookie = cookie;
412 ob.flags = flags;
413
414 LOG_ALWAYS_FATAL_IF(recipient == nullptr,
415 "linkToDeath(): recipient must be non-NULL");
416
417 {
418 AutoMutex _l(mLock);
419
420 if (!mObitsSent) {
421 if (!mObituaries) {
422 mObituaries = new Vector<Obituary>;
423 if (!mObituaries) {
424 return NO_MEMORY;
425 }
426 ALOGV("Requesting death notification: %p handle %d\n", this, binderHandle());
427 if (!isRpcBinder()) {
428 if constexpr (kEnableKernelIpc) {
429 getWeakRefs()->incWeak(this);
430 IPCThreadState* self = IPCThreadState::self();
431 self->requestDeathNotification(binderHandle(), this);
432 self->flushCommands();
433 }
434 }
435 }
436 ssize_t res = mObituaries->add(ob);
437 return res >= (ssize_t)NO_ERROR ? (status_t)NO_ERROR : res;
438 }
439 }
440
441 return DEAD_OBJECT;
442 }
443
444 // NOLINTNEXTLINE(google-default-arguments)
unlinkToDeath(const wp<DeathRecipient> & recipient,void * cookie,uint32_t flags,wp<DeathRecipient> * outRecipient)445 status_t BpBinder::unlinkToDeath(
446 const wp<DeathRecipient>& recipient, void* cookie, uint32_t flags,
447 wp<DeathRecipient>* outRecipient)
448 {
449 if (!kEnableKernelIpc && !isRpcBinder()) {
450 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
451 return INVALID_OPERATION;
452 }
453
454 AutoMutex _l(mLock);
455
456 if (mObitsSent) {
457 return DEAD_OBJECT;
458 }
459
460 const size_t N = mObituaries ? mObituaries->size() : 0;
461 for (size_t i=0; i<N; i++) {
462 const Obituary& obit = mObituaries->itemAt(i);
463 if ((obit.recipient == recipient
464 || (recipient == nullptr && obit.cookie == cookie))
465 && obit.flags == flags) {
466 if (outRecipient != nullptr) {
467 *outRecipient = mObituaries->itemAt(i).recipient;
468 }
469 mObituaries->removeAt(i);
470 if (mObituaries->size() == 0) {
471 ALOGV("Clearing death notification: %p handle %d\n", this, binderHandle());
472 if (!isRpcBinder()) {
473 if constexpr (kEnableKernelIpc) {
474 IPCThreadState* self = IPCThreadState::self();
475 self->clearDeathNotification(binderHandle(), this);
476 self->flushCommands();
477 }
478 }
479 delete mObituaries;
480 mObituaries = nullptr;
481 }
482 return NO_ERROR;
483 }
484 }
485
486 return NAME_NOT_FOUND;
487 }
488
sendObituary()489 void BpBinder::sendObituary()
490 {
491 if (!kEnableKernelIpc && !isRpcBinder()) {
492 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
493 return;
494 }
495
496 ALOGV("Sending obituary for proxy %p handle %d, mObitsSent=%s\n", this, binderHandle(),
497 mObitsSent ? "true" : "false");
498
499 mAlive = 0;
500 if (mObitsSent) return;
501
502 mLock.lock();
503 Vector<Obituary>* obits = mObituaries;
504 if(obits != nullptr) {
505 ALOGV("Clearing sent death notification: %p handle %d\n", this, binderHandle());
506 if (!isRpcBinder()) {
507 if constexpr (kEnableKernelIpc) {
508 IPCThreadState* self = IPCThreadState::self();
509 self->clearDeathNotification(binderHandle(), this);
510 self->flushCommands();
511 }
512 }
513 mObituaries = nullptr;
514 }
515 mObitsSent = 1;
516 mLock.unlock();
517
518 ALOGV("Reporting death of proxy %p for %zu recipients\n",
519 this, obits ? obits->size() : 0U);
520
521 if (obits != nullptr) {
522 const size_t N = obits->size();
523 for (size_t i=0; i<N; i++) {
524 reportOneDeath(obits->itemAt(i));
525 }
526
527 delete obits;
528 }
529 }
530
reportOneDeath(const Obituary & obit)531 void BpBinder::reportOneDeath(const Obituary& obit)
532 {
533 sp<DeathRecipient> recipient = obit.recipient.promote();
534 ALOGV("Reporting death to recipient: %p\n", recipient.get());
535 if (recipient == nullptr) return;
536
537 recipient->binderDied(wp<BpBinder>::fromExisting(this));
538 }
539
attachObject(const void * objectID,void * object,void * cleanupCookie,object_cleanup_func func)540 void* BpBinder::attachObject(const void* objectID, void* object, void* cleanupCookie,
541 object_cleanup_func func) {
542 AutoMutex _l(mLock);
543 ALOGV("Attaching object %p to binder %p (manager=%p)", object, this, &mObjects);
544 return mObjects.attach(objectID, object, cleanupCookie, func);
545 }
546
findObject(const void * objectID) const547 void* BpBinder::findObject(const void* objectID) const
548 {
549 AutoMutex _l(mLock);
550 return mObjects.find(objectID);
551 }
552
detachObject(const void * objectID)553 void* BpBinder::detachObject(const void* objectID) {
554 AutoMutex _l(mLock);
555 return mObjects.detach(objectID);
556 }
557
withLock(const std::function<void ()> & doWithLock)558 void BpBinder::withLock(const std::function<void()>& doWithLock) {
559 AutoMutex _l(mLock);
560 doWithLock();
561 }
562
lookupOrCreateWeak(const void * objectID,object_make_func make,const void * makeArgs)563 sp<IBinder> BpBinder::lookupOrCreateWeak(const void* objectID, object_make_func make,
564 const void* makeArgs) {
565 AutoMutex _l(mLock);
566 return mObjects.lookupOrCreateWeak(objectID, make, makeArgs);
567 }
568
remoteBinder()569 BpBinder* BpBinder::remoteBinder()
570 {
571 return this;
572 }
573
~BpBinder()574 BpBinder::~BpBinder() {
575 if (CC_UNLIKELY(isRpcBinder())) return;
576
577 if constexpr (!kEnableKernelIpc) {
578 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
579 return;
580 }
581
582 ALOGV("Destroying BpBinder %p handle %d\n", this, binderHandle());
583
584 IPCThreadState* ipc = IPCThreadState::self();
585
586 if (mTrackedUid >= 0) {
587 AutoMutex _l(sTrackingLock);
588 uint32_t trackedValue = sTrackingMap[mTrackedUid];
589 if (CC_UNLIKELY((trackedValue & COUNTING_VALUE_MASK) == 0)) {
590 ALOGE("Unexpected Binder Proxy tracking decrement in %p handle %d\n", this,
591 binderHandle());
592 } else {
593 if (CC_UNLIKELY(
594 (trackedValue & LIMIT_REACHED_MASK) &&
595 ((trackedValue & COUNTING_VALUE_MASK) <= sBinderProxyCountLowWatermark)
596 )) {
597 ALOGI("Limit reached bit reset for uid %d (fewer than %d proxies from uid %d held)",
598 getuid(), sBinderProxyCountLowWatermark, mTrackedUid);
599 sTrackingMap[mTrackedUid] &= ~LIMIT_REACHED_MASK;
600 sLastLimitCallbackMap.erase(mTrackedUid);
601 }
602 if (--sTrackingMap[mTrackedUid] == 0) {
603 sTrackingMap.erase(mTrackedUid);
604 }
605 }
606 }
607
608 if (ipc) {
609 ipc->expungeHandle(binderHandle(), this);
610 ipc->decWeakHandle(binderHandle());
611 }
612 }
613
onFirstRef()614 void BpBinder::onFirstRef() {
615 if (CC_UNLIKELY(isRpcBinder())) return;
616
617 if constexpr (!kEnableKernelIpc) {
618 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
619 return;
620 }
621
622 ALOGV("onFirstRef BpBinder %p handle %d\n", this, binderHandle());
623 IPCThreadState* ipc = IPCThreadState::self();
624 if (ipc) ipc->incStrongHandle(binderHandle(), this);
625 }
626
onLastStrongRef(const void *)627 void BpBinder::onLastStrongRef(const void* /*id*/) {
628 if (CC_UNLIKELY(isRpcBinder())) {
629 (void)rpcSession()->sendDecStrong(this);
630 return;
631 }
632
633 if constexpr (!kEnableKernelIpc) {
634 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
635 return;
636 }
637
638 ALOGV("onLastStrongRef BpBinder %p handle %d\n", this, binderHandle());
639 IF_ALOGV() {
640 printRefs();
641 }
642 IPCThreadState* ipc = IPCThreadState::self();
643 if (ipc) ipc->decStrongHandle(binderHandle());
644
645 mLock.lock();
646 Vector<Obituary>* obits = mObituaries;
647 if(obits != nullptr) {
648 if (!obits->isEmpty()) {
649 ALOGI("onLastStrongRef automatically unlinking death recipients: %s",
650 String8(mDescriptorCache).c_str());
651 }
652
653 if (ipc) ipc->clearDeathNotification(binderHandle(), this);
654 mObituaries = nullptr;
655 }
656 mLock.unlock();
657
658 if (obits != nullptr) {
659 // XXX Should we tell any remaining DeathRecipient
660 // objects that the last strong ref has gone away, so they
661 // are no longer linked?
662 delete obits;
663 }
664 }
665
onIncStrongAttempted(uint32_t,const void *)666 bool BpBinder::onIncStrongAttempted(uint32_t /*flags*/, const void* /*id*/)
667 {
668 // RPC binder doesn't currently support inc from weak binders
669 if (CC_UNLIKELY(isRpcBinder())) return false;
670
671 if constexpr (!kEnableKernelIpc) {
672 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
673 return false;
674 }
675
676 ALOGV("onIncStrongAttempted BpBinder %p handle %d\n", this, binderHandle());
677 IPCThreadState* ipc = IPCThreadState::self();
678 return ipc ? ipc->attemptIncStrongHandle(binderHandle()) == NO_ERROR : false;
679 }
680
getBinderProxyCount(uint32_t uid)681 uint32_t BpBinder::getBinderProxyCount(uint32_t uid)
682 {
683 AutoMutex _l(sTrackingLock);
684 auto it = sTrackingMap.find(uid);
685 if (it != sTrackingMap.end()) {
686 return it->second & COUNTING_VALUE_MASK;
687 }
688 return 0;
689 }
690
getCountByUid(Vector<uint32_t> & uids,Vector<uint32_t> & counts)691 void BpBinder::getCountByUid(Vector<uint32_t>& uids, Vector<uint32_t>& counts)
692 {
693 AutoMutex _l(sTrackingLock);
694 uids.setCapacity(sTrackingMap.size());
695 counts.setCapacity(sTrackingMap.size());
696 for (const auto& it : sTrackingMap) {
697 uids.push_back(it.first);
698 counts.push_back(it.second & COUNTING_VALUE_MASK);
699 }
700 }
701
enableCountByUid()702 void BpBinder::enableCountByUid() { sCountByUidEnabled.store(true); }
disableCountByUid()703 void BpBinder::disableCountByUid() { sCountByUidEnabled.store(false); }
setCountByUidEnabled(bool enable)704 void BpBinder::setCountByUidEnabled(bool enable) { sCountByUidEnabled.store(enable); }
705
setLimitCallback(binder_proxy_limit_callback cb)706 void BpBinder::setLimitCallback(binder_proxy_limit_callback cb) {
707 AutoMutex _l(sTrackingLock);
708 sLimitCallback = cb;
709 }
710
setBinderProxyCountWatermarks(int high,int low)711 void BpBinder::setBinderProxyCountWatermarks(int high, int low) {
712 AutoMutex _l(sTrackingLock);
713 sBinderProxyCountHighWatermark = high;
714 sBinderProxyCountLowWatermark = low;
715 }
716
717 // ---------------------------------------------------------------------------
718
719 } // namespace android
720