1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "IMemory"
18
19 #include <atomic>
20 #include <stdatomic.h>
21
22 #include <fcntl.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <sys/types.h>
27 #include <sys/mman.h>
28 #include <unistd.h>
29
30 #include <binder/IMemory.h>
31 #include <binder/Parcel.h>
32 #include <log/log.h>
33
34 #include <utils/Mutex.h>
35
36 #include <map>
37
38 #define VERBOSE 0
39
40 namespace android {
41 // ---------------------------------------------------------------------------
42
43 class HeapCache : public IBinder::DeathRecipient
44 {
45 public:
46 HeapCache();
47 virtual ~HeapCache();
48
49 virtual void binderDied(const wp<IBinder>& who);
50
51 sp<IMemoryHeap> find_heap(const sp<IBinder>& binder);
52 void free_heap(const sp<IBinder>& binder);
53 sp<IMemoryHeap> get_heap(const sp<IBinder>& binder);
54 void dump_heaps();
55
56 private:
57 // For IMemory.cpp
58 struct heap_info_t {
59 sp<IMemoryHeap> heap;
60 int32_t count;
61 // Note that this cannot be meaningfully copied.
62 };
63
64 void free_heap(const wp<IBinder>& binder);
65
66 Mutex mHeapCacheLock; // Protects entire vector below.
67 std::map<wp<IBinder>, heap_info_t> mHeapCache;
68 // We do not use the copy-on-write capabilities of KeyedVector.
69 // TODO: Reimplemement based on standard C++ container?
70 };
71
72 static sp<HeapCache> gHeapCache = sp<HeapCache>::make();
73
74 /******************************************************************************/
75
76 enum {
77 HEAP_ID = IBinder::FIRST_CALL_TRANSACTION
78 };
79
80 class BpMemoryHeap : public BpInterface<IMemoryHeap>
81 {
82 public:
83 explicit BpMemoryHeap(const sp<IBinder>& impl);
84 virtual ~BpMemoryHeap();
85
86 int getHeapID() const override;
87 void* getBase() const override;
88 size_t getSize() const override;
89 uint32_t getFlags() const override;
90 off_t getOffset() const override;
91
92 private:
93 friend class IMemory;
94 friend class HeapCache;
95
96 // for debugging in this module
find_heap(const sp<IBinder> & binder)97 static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) {
98 return gHeapCache->find_heap(binder);
99 }
free_heap(const sp<IBinder> & binder)100 static inline void free_heap(const sp<IBinder>& binder) {
101 gHeapCache->free_heap(binder);
102 }
get_heap(const sp<IBinder> & binder)103 static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) {
104 return gHeapCache->get_heap(binder);
105 }
dump_heaps()106 static inline void dump_heaps() {
107 gHeapCache->dump_heaps();
108 }
109
110 void assertMapped() const;
111 void assertReallyMapped() const;
112
113 mutable std::atomic<int32_t> mHeapId;
114 mutable void* mBase;
115 mutable size_t mSize;
116 mutable uint32_t mFlags;
117 mutable off_t mOffset;
118 mutable bool mRealHeap;
119 mutable Mutex mLock;
120 };
121
122 // ----------------------------------------------------------------------------
123
124 enum {
125 GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION
126 };
127
128 class BpMemory : public BpInterface<IMemory>
129 {
130 public:
131 explicit BpMemory(const sp<IBinder>& impl);
132 virtual ~BpMemory();
133 // NOLINTNEXTLINE(google-default-arguments)
134 virtual sp<IMemoryHeap> getMemory(ssize_t* offset=nullptr, size_t* size=nullptr) const;
135
136 private:
137 mutable sp<IMemoryHeap> mHeap;
138 mutable ssize_t mOffset;
139 mutable size_t mSize;
140 };
141
142 /******************************************************************************/
143
fastPointer(const sp<IBinder> & binder,ssize_t offset) const144 void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const
145 {
146 sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder);
147 void* const base = realHeap->base();
148 if (base == MAP_FAILED)
149 return nullptr;
150 return static_cast<char*>(base) + offset;
151 }
152
unsecurePointer() const153 void* IMemory::unsecurePointer() const {
154 ssize_t offset;
155 sp<IMemoryHeap> heap = getMemory(&offset);
156 void* const base = heap!=nullptr ? heap->base() : MAP_FAILED;
157 if (base == MAP_FAILED)
158 return nullptr;
159 return static_cast<char*>(base) + offset;
160 }
161
pointer() const162 void* IMemory::pointer() const { return unsecurePointer(); }
163
size() const164 size_t IMemory::size() const {
165 size_t size;
166 getMemory(nullptr, &size);
167 return size;
168 }
169
offset() const170 ssize_t IMemory::offset() const {
171 ssize_t offset;
172 getMemory(&offset);
173 return offset;
174 }
175
176 /******************************************************************************/
177
BpMemory(const sp<IBinder> & impl)178 BpMemory::BpMemory(const sp<IBinder>& impl)
179 : BpInterface<IMemory>(impl), mOffset(0), mSize(0)
180 {
181 }
182
~BpMemory()183 BpMemory::~BpMemory()
184 {
185 }
186
187 // NOLINTNEXTLINE(google-default-arguments)
getMemory(ssize_t * offset,size_t * size) const188 sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const
189 {
190 if (mHeap == nullptr) {
191 Parcel data, reply;
192 data.writeInterfaceToken(IMemory::getInterfaceDescriptor());
193 if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) {
194 sp<IBinder> heap = reply.readStrongBinder();
195 if (heap != nullptr) {
196 mHeap = interface_cast<IMemoryHeap>(heap);
197 if (mHeap != nullptr) {
198 const int64_t offset64 = reply.readInt64();
199 const uint64_t size64 = reply.readUint64();
200 const ssize_t o = (ssize_t)offset64;
201 const size_t s = (size_t)size64;
202 size_t heapSize = mHeap->getSize();
203 if (s == size64 && o == offset64 // ILP32 bounds check
204 && s <= heapSize
205 && o >= 0
206 && (static_cast<size_t>(o) <= heapSize - s)) {
207 mOffset = o;
208 mSize = s;
209 } else {
210 // Hm.
211 android_errorWriteWithInfoLog(0x534e4554,
212 "26877992", -1, nullptr, 0);
213 mOffset = 0;
214 mSize = 0;
215 }
216 }
217 }
218 }
219 }
220 if (offset) *offset = mOffset;
221 if (size) *size = mSize;
222 return (mSize > 0) ? mHeap : nullptr;
223 }
224
225 // ---------------------------------------------------------------------------
226
227 IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory")
228
BnMemory()229 BnMemory::BnMemory() {
230 }
231
~BnMemory()232 BnMemory::~BnMemory() {
233 }
234
235 // NOLINTNEXTLINE(google-default-arguments)
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)236 status_t BnMemory::onTransact(
237 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
238 {
239 switch(code) {
240 case GET_MEMORY: {
241 CHECK_INTERFACE(IMemory, data, reply);
242 ssize_t offset;
243 size_t size;
244 reply->writeStrongBinder( IInterface::asBinder(getMemory(&offset, &size)) );
245 reply->writeInt64(offset);
246 reply->writeUint64(size);
247 return NO_ERROR;
248 } break;
249 default:
250 return BBinder::onTransact(code, data, reply, flags);
251 }
252 }
253
254
255 /******************************************************************************/
256
BpMemoryHeap(const sp<IBinder> & impl)257 BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
258 : BpInterface<IMemoryHeap>(impl),
259 mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false)
260 {
261 }
262
~BpMemoryHeap()263 BpMemoryHeap::~BpMemoryHeap() {
264 int32_t heapId = mHeapId.load(memory_order_relaxed);
265 if (heapId != -1) {
266 close(heapId);
267 if (mRealHeap) {
268 // by construction we're the last one
269 if (mBase != MAP_FAILED) {
270 sp<IBinder> binder = IInterface::asBinder(this);
271
272 if (VERBOSE) {
273 ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
274 binder.get(), this, mSize, heapId);
275 }
276
277 munmap(mBase, mSize);
278 }
279 } else {
280 // remove from list only if it was mapped before
281 sp<IBinder> binder = IInterface::asBinder(this);
282 free_heap(binder);
283 }
284 }
285 }
286
assertMapped() const287 void BpMemoryHeap::assertMapped() const
288 {
289 int32_t heapId = mHeapId.load(memory_order_acquire);
290 if (heapId == -1) {
291 sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
292 sp<BpMemoryHeap> heap = sp<BpMemoryHeap>::cast(find_heap(binder));
293 heap->assertReallyMapped();
294 if (heap->mBase != MAP_FAILED) {
295 Mutex::Autolock _l(mLock);
296 if (mHeapId.load(memory_order_relaxed) == -1) {
297 mBase = heap->mBase;
298 mSize = heap->mSize;
299 mOffset = heap->mOffset;
300 int fd = fcntl(heap->mHeapId.load(memory_order_relaxed), F_DUPFD_CLOEXEC, 0);
301 ALOGE_IF(fd==-1, "cannot dup fd=%d",
302 heap->mHeapId.load(memory_order_relaxed));
303 mHeapId.store(fd, memory_order_release);
304 }
305 } else {
306 // something went wrong
307 free_heap(binder);
308 }
309 }
310 }
311
assertReallyMapped() const312 void BpMemoryHeap::assertReallyMapped() const
313 {
314 int32_t heapId = mHeapId.load(memory_order_acquire);
315 if (heapId == -1) {
316
317 // remote call without mLock held, worse case scenario, we end up
318 // calling transact() from multiple threads, but that's not a problem,
319 // only mmap below must be in the critical section.
320
321 Parcel data, reply;
322 data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
323 status_t err = remote()->transact(HEAP_ID, data, &reply);
324 int parcel_fd = reply.readFileDescriptor();
325 const uint64_t size64 = reply.readUint64();
326 const int64_t offset64 = reply.readInt64();
327 const uint32_t flags = reply.readUint32();
328 const size_t size = (size_t)size64;
329 const off_t offset = (off_t)offset64;
330 if (err != NO_ERROR || // failed transaction
331 size != size64 || offset != offset64) { // ILP32 size check
332 ALOGE("binder=%p transaction failed fd=%d, size=%zu, err=%d (%s)",
333 IInterface::asBinder(this).get(),
334 parcel_fd, size, err, strerror(-err));
335 return;
336 }
337
338 Mutex::Autolock _l(mLock);
339 if (mHeapId.load(memory_order_relaxed) == -1) {
340 int fd = fcntl(parcel_fd, F_DUPFD_CLOEXEC, 0);
341 ALOGE_IF(fd == -1, "cannot dup fd=%d, size=%zu, err=%d (%s)",
342 parcel_fd, size, err, strerror(errno));
343
344 int access = PROT_READ;
345 if (!(flags & READ_ONLY)) {
346 access |= PROT_WRITE;
347 }
348 mRealHeap = true;
349 mBase = mmap(nullptr, size, access, MAP_SHARED, fd, offset);
350 if (mBase == MAP_FAILED) {
351 ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zu, fd=%d (%s)",
352 IInterface::asBinder(this).get(), size, fd, strerror(errno));
353 close(fd);
354 } else {
355 mSize = size;
356 mFlags = flags;
357 mOffset = offset;
358 mHeapId.store(fd, memory_order_release);
359 }
360 }
361 }
362 }
363
getHeapID() const364 int BpMemoryHeap::getHeapID() const {
365 assertMapped();
366 // We either stored mHeapId ourselves, or loaded it with acquire semantics.
367 return mHeapId.load(memory_order_relaxed);
368 }
369
getBase() const370 void* BpMemoryHeap::getBase() const {
371 assertMapped();
372 return mBase;
373 }
374
getSize() const375 size_t BpMemoryHeap::getSize() const {
376 assertMapped();
377 return mSize;
378 }
379
getFlags() const380 uint32_t BpMemoryHeap::getFlags() const {
381 assertMapped();
382 return mFlags;
383 }
384
getOffset() const385 off_t BpMemoryHeap::getOffset() const {
386 assertMapped();
387 return mOffset;
388 }
389
390 // ---------------------------------------------------------------------------
391
392 IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap")
393
BnMemoryHeap()394 BnMemoryHeap::BnMemoryHeap() {
395 }
396
~BnMemoryHeap()397 BnMemoryHeap::~BnMemoryHeap() {
398 }
399
400 // NOLINTNEXTLINE(google-default-arguments)
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)401 status_t BnMemoryHeap::onTransact(
402 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
403 {
404 switch(code) {
405 case HEAP_ID: {
406 CHECK_INTERFACE(IMemoryHeap, data, reply);
407 reply->writeFileDescriptor(getHeapID());
408 reply->writeUint64(getSize());
409 reply->writeInt64(getOffset());
410 reply->writeUint32(getFlags());
411 return NO_ERROR;
412 } break;
413 default:
414 return BBinder::onTransact(code, data, reply, flags);
415 }
416 }
417
418 /*****************************************************************************/
419
HeapCache()420 HeapCache::HeapCache()
421 : DeathRecipient()
422 {
423 }
424
~HeapCache()425 HeapCache::~HeapCache()
426 {
427 }
428
binderDied(const wp<IBinder> & binder)429 void HeapCache::binderDied(const wp<IBinder>& binder)
430 {
431 //ALOGD("binderDied binder=%p", binder.unsafe_get());
432 free_heap(binder);
433 }
434
find_heap(const sp<IBinder> & binder)435 sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
436 {
437 Mutex::Autolock _l(mHeapCacheLock);
438 auto i = mHeapCache.find(binder);
439 if (i != mHeapCache.end()) {
440 heap_info_t& info = i->second;
441 ALOGD_IF(VERBOSE,
442 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
443 binder.get(), info.heap.get(),
444 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
445 static_cast<BpMemoryHeap*>(info.heap.get())
446 ->mHeapId.load(memory_order_relaxed),
447 info.count);
448 ++info.count;
449 return info.heap;
450 } else {
451 heap_info_t info;
452 info.heap = interface_cast<IMemoryHeap>(binder);
453 info.count = 1;
454 //ALOGD("adding binder=%p, heap=%p, count=%d",
455 // binder.get(), info.heap.get(), info.count);
456 mHeapCache.insert({binder, info});
457 return info.heap;
458 }
459 }
460
free_heap(const sp<IBinder> & binder)461 void HeapCache::free_heap(const sp<IBinder>& binder) {
462 free_heap( wp<IBinder>(binder) );
463 }
464
free_heap(const wp<IBinder> & binder)465 void HeapCache::free_heap(const wp<IBinder>& binder)
466 {
467 sp<IMemoryHeap> rel;
468 {
469 Mutex::Autolock _l(mHeapCacheLock);
470 auto i = mHeapCache.find(binder);
471 if (i != mHeapCache.end()) {
472 heap_info_t& info = i->second;
473 if (--info.count == 0) {
474 ALOGD_IF(VERBOSE,
475 "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
476 binder.unsafe_get(), info.heap.get(),
477 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
478 static_cast<BpMemoryHeap*>(info.heap.get())
479 ->mHeapId.load(memory_order_relaxed),
480 info.count);
481 rel = i->second.heap;
482 mHeapCache.erase(i);
483 }
484 } else {
485 ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
486 }
487 }
488 }
489
get_heap(const sp<IBinder> & binder)490 sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder)
491 {
492 sp<IMemoryHeap> realHeap;
493 Mutex::Autolock _l(mHeapCacheLock);
494 auto i = mHeapCache.find(binder);
495 if (i != mHeapCache.end())
496 realHeap = i->second.heap;
497 else
498 realHeap = interface_cast<IMemoryHeap>(binder);
499 return realHeap;
500 }
501
dump_heaps()502 void HeapCache::dump_heaps()
503 {
504 Mutex::Autolock _l(mHeapCacheLock);
505 for (const auto& i : mHeapCache) {
506 const heap_info_t& info = i.second;
507 BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get()));
508 ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)", i.first.unsafe_get(),
509 info.heap.get(), info.count, h->mHeapId.load(memory_order_relaxed), h->mBase,
510 h->mSize);
511 }
512 }
513
514
515 // ---------------------------------------------------------------------------
516 } // namespace android
517