• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "IMemory"
18 
19 #include <atomic>
20 #include <stdatomic.h>
21 
22 #include <fcntl.h>
23 #include <stdint.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <sys/types.h>
27 #include <sys/mman.h>
28 #include <unistd.h>
29 
30 #include <binder/IMemory.h>
31 #include <binder/Parcel.h>
32 #include <log/log.h>
33 
34 #include <utils/CallStack.h>
35 #include <utils/KeyedVector.h>
36 #include <utils/threads.h>
37 
38 #define VERBOSE   0
39 
40 namespace android {
41 // ---------------------------------------------------------------------------
42 
43 class HeapCache : public IBinder::DeathRecipient
44 {
45 public:
46     HeapCache();
47     virtual ~HeapCache();
48 
49     virtual void binderDied(const wp<IBinder>& who);
50 
51     sp<IMemoryHeap> find_heap(const sp<IBinder>& binder);
52     void free_heap(const sp<IBinder>& binder);
53     sp<IMemoryHeap> get_heap(const sp<IBinder>& binder);
54     void dump_heaps();
55 
56 private:
57     // For IMemory.cpp
58     struct heap_info_t {
59         sp<IMemoryHeap> heap;
60         int32_t         count;
61         // Note that this cannot be meaningfully copied.
62     };
63 
64     void free_heap(const wp<IBinder>& binder);
65 
66     Mutex mHeapCacheLock;  // Protects entire vector below.
67     KeyedVector< wp<IBinder>, heap_info_t > mHeapCache;
68     // We do not use the copy-on-write capabilities of KeyedVector.
69     // TODO: Reimplemement based on standard C++ container?
70 };
71 
72 static sp<HeapCache> gHeapCache = new HeapCache();
73 
74 /******************************************************************************/
75 
76 enum {
77     HEAP_ID = IBinder::FIRST_CALL_TRANSACTION
78 };
79 
80 class BpMemoryHeap : public BpInterface<IMemoryHeap>
81 {
82 public:
83     explicit BpMemoryHeap(const sp<IBinder>& impl);
84     virtual ~BpMemoryHeap();
85 
86     virtual int getHeapID() const;
87     virtual void* getBase() const;
88     virtual size_t getSize() const;
89     virtual uint32_t getFlags() const;
90     virtual uint32_t getOffset() const;
91 
92 private:
93     friend class IMemory;
94     friend class HeapCache;
95 
96     // for debugging in this module
find_heap(const sp<IBinder> & binder)97     static inline sp<IMemoryHeap> find_heap(const sp<IBinder>& binder) {
98         return gHeapCache->find_heap(binder);
99     }
free_heap(const sp<IBinder> & binder)100     static inline void free_heap(const sp<IBinder>& binder) {
101         gHeapCache->free_heap(binder);
102     }
get_heap(const sp<IBinder> & binder)103     static inline sp<IMemoryHeap> get_heap(const sp<IBinder>& binder) {
104         return gHeapCache->get_heap(binder);
105     }
dump_heaps()106     static inline void dump_heaps() {
107         gHeapCache->dump_heaps();
108     }
109 
110     void assertMapped() const;
111     void assertReallyMapped() const;
112 
113     mutable std::atomic<int32_t> mHeapId;
114     mutable void*       mBase;
115     mutable size_t      mSize;
116     mutable uint32_t    mFlags;
117     mutable uint32_t    mOffset;
118     mutable bool        mRealHeap;
119     mutable Mutex       mLock;
120 };
121 
122 // ----------------------------------------------------------------------------
123 
124 enum {
125     GET_MEMORY = IBinder::FIRST_CALL_TRANSACTION
126 };
127 
128 class BpMemory : public BpInterface<IMemory>
129 {
130 public:
131     explicit BpMemory(const sp<IBinder>& impl);
132     virtual ~BpMemory();
133     virtual sp<IMemoryHeap> getMemory(ssize_t* offset=0, size_t* size=0) const;
134 
135 private:
136     mutable sp<IMemoryHeap> mHeap;
137     mutable ssize_t mOffset;
138     mutable size_t mSize;
139 };
140 
141 /******************************************************************************/
142 
fastPointer(const sp<IBinder> & binder,ssize_t offset) const143 void* IMemory::fastPointer(const sp<IBinder>& binder, ssize_t offset) const
144 {
145     sp<IMemoryHeap> realHeap = BpMemoryHeap::get_heap(binder);
146     void* const base = realHeap->base();
147     if (base == MAP_FAILED)
148         return 0;
149     return static_cast<char*>(base) + offset;
150 }
151 
pointer() const152 void* IMemory::pointer() const {
153     ssize_t offset;
154     sp<IMemoryHeap> heap = getMemory(&offset);
155     void* const base = heap!=0 ? heap->base() : MAP_FAILED;
156     if (base == MAP_FAILED)
157         return 0;
158     return static_cast<char*>(base) + offset;
159 }
160 
size() const161 size_t IMemory::size() const {
162     size_t size;
163     getMemory(NULL, &size);
164     return size;
165 }
166 
offset() const167 ssize_t IMemory::offset() const {
168     ssize_t offset;
169     getMemory(&offset);
170     return offset;
171 }
172 
173 /******************************************************************************/
174 
BpMemory(const sp<IBinder> & impl)175 BpMemory::BpMemory(const sp<IBinder>& impl)
176     : BpInterface<IMemory>(impl), mOffset(0), mSize(0)
177 {
178 }
179 
~BpMemory()180 BpMemory::~BpMemory()
181 {
182 }
183 
getMemory(ssize_t * offset,size_t * size) const184 sp<IMemoryHeap> BpMemory::getMemory(ssize_t* offset, size_t* size) const
185 {
186     if (mHeap == 0) {
187         Parcel data, reply;
188         data.writeInterfaceToken(IMemory::getInterfaceDescriptor());
189         if (remote()->transact(GET_MEMORY, data, &reply) == NO_ERROR) {
190             sp<IBinder> heap = reply.readStrongBinder();
191             ssize_t o = reply.readInt32();
192             size_t s = reply.readInt32();
193             if (heap != 0) {
194                 mHeap = interface_cast<IMemoryHeap>(heap);
195                 if (mHeap != 0) {
196                     size_t heapSize = mHeap->getSize();
197                     if (s <= heapSize
198                             && o >= 0
199                             && (static_cast<size_t>(o) <= heapSize - s)) {
200                         mOffset = o;
201                         mSize = s;
202                     } else {
203                         // Hm.
204                         android_errorWriteWithInfoLog(0x534e4554,
205                             "26877992", -1, NULL, 0);
206                         mOffset = 0;
207                         mSize = 0;
208                     }
209                 }
210             }
211         }
212     }
213     if (offset) *offset = mOffset;
214     if (size) *size = mSize;
215     return (mSize > 0) ? mHeap : 0;
216 }
217 
218 // ---------------------------------------------------------------------------
219 
220 IMPLEMENT_META_INTERFACE(Memory, "android.utils.IMemory");
221 
BnMemory()222 BnMemory::BnMemory() {
223 }
224 
~BnMemory()225 BnMemory::~BnMemory() {
226 }
227 
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)228 status_t BnMemory::onTransact(
229     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
230 {
231     switch(code) {
232         case GET_MEMORY: {
233             CHECK_INTERFACE(IMemory, data, reply);
234             ssize_t offset;
235             size_t size;
236             reply->writeStrongBinder( IInterface::asBinder(getMemory(&offset, &size)) );
237             reply->writeInt32(offset);
238             reply->writeInt32(size);
239             return NO_ERROR;
240         } break;
241         default:
242             return BBinder::onTransact(code, data, reply, flags);
243     }
244 }
245 
246 
247 /******************************************************************************/
248 
BpMemoryHeap(const sp<IBinder> & impl)249 BpMemoryHeap::BpMemoryHeap(const sp<IBinder>& impl)
250     : BpInterface<IMemoryHeap>(impl),
251         mHeapId(-1), mBase(MAP_FAILED), mSize(0), mFlags(0), mOffset(0), mRealHeap(false)
252 {
253 }
254 
~BpMemoryHeap()255 BpMemoryHeap::~BpMemoryHeap() {
256     int32_t heapId = mHeapId.load(memory_order_relaxed);
257     if (heapId != -1) {
258         close(heapId);
259         if (mRealHeap) {
260             // by construction we're the last one
261             if (mBase != MAP_FAILED) {
262                 sp<IBinder> binder = IInterface::asBinder(this);
263 
264                 if (VERBOSE) {
265                     ALOGD("UNMAPPING binder=%p, heap=%p, size=%zu, fd=%d",
266                             binder.get(), this, mSize, heapId);
267                     CallStack stack(LOG_TAG);
268                 }
269 
270                 munmap(mBase, mSize);
271             }
272         } else {
273             // remove from list only if it was mapped before
274             sp<IBinder> binder = IInterface::asBinder(this);
275             free_heap(binder);
276         }
277     }
278 }
279 
assertMapped() const280 void BpMemoryHeap::assertMapped() const
281 {
282     int32_t heapId = mHeapId.load(memory_order_acquire);
283     if (heapId == -1) {
284         sp<IBinder> binder(IInterface::asBinder(const_cast<BpMemoryHeap*>(this)));
285         sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get()));
286         heap->assertReallyMapped();
287         if (heap->mBase != MAP_FAILED) {
288             Mutex::Autolock _l(mLock);
289             if (mHeapId.load(memory_order_relaxed) == -1) {
290                 mBase   = heap->mBase;
291                 mSize   = heap->mSize;
292                 mOffset = heap->mOffset;
293                 int fd = fcntl(heap->mHeapId.load(memory_order_relaxed), F_DUPFD_CLOEXEC, 0);
294                 ALOGE_IF(fd==-1, "cannot dup fd=%d",
295                         heap->mHeapId.load(memory_order_relaxed));
296                 mHeapId.store(fd, memory_order_release);
297             }
298         } else {
299             // something went wrong
300             free_heap(binder);
301         }
302     }
303 }
304 
assertReallyMapped() const305 void BpMemoryHeap::assertReallyMapped() const
306 {
307     int32_t heapId = mHeapId.load(memory_order_acquire);
308     if (heapId == -1) {
309 
310         // remote call without mLock held, worse case scenario, we end up
311         // calling transact() from multiple threads, but that's not a problem,
312         // only mmap below must be in the critical section.
313 
314         Parcel data, reply;
315         data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor());
316         status_t err = remote()->transact(HEAP_ID, data, &reply);
317         int parcel_fd = reply.readFileDescriptor();
318         ssize_t size = reply.readInt32();
319         uint32_t flags = reply.readInt32();
320         uint32_t offset = reply.readInt32();
321 
322         ALOGE_IF(err, "binder=%p transaction failed fd=%d, size=%zd, err=%d (%s)",
323                 IInterface::asBinder(this).get(),
324                 parcel_fd, size, err, strerror(-err));
325 
326         Mutex::Autolock _l(mLock);
327         if (mHeapId.load(memory_order_relaxed) == -1) {
328             int fd = fcntl(parcel_fd, F_DUPFD_CLOEXEC, 0);
329             ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%zd, err=%d (%s)",
330                     parcel_fd, size, err, strerror(errno));
331 
332             int access = PROT_READ;
333             if (!(flags & READ_ONLY)) {
334                 access |= PROT_WRITE;
335             }
336             mRealHeap = true;
337             mBase = mmap(0, size, access, MAP_SHARED, fd, offset);
338             if (mBase == MAP_FAILED) {
339                 ALOGE("cannot map BpMemoryHeap (binder=%p), size=%zd, fd=%d (%s)",
340                         IInterface::asBinder(this).get(), size, fd, strerror(errno));
341                 close(fd);
342             } else {
343                 mSize = size;
344                 mFlags = flags;
345                 mOffset = offset;
346                 mHeapId.store(fd, memory_order_release);
347             }
348         }
349     }
350 }
351 
getHeapID() const352 int BpMemoryHeap::getHeapID() const {
353     assertMapped();
354     // We either stored mHeapId ourselves, or loaded it with acquire semantics.
355     return mHeapId.load(memory_order_relaxed);
356 }
357 
getBase() const358 void* BpMemoryHeap::getBase() const {
359     assertMapped();
360     return mBase;
361 }
362 
getSize() const363 size_t BpMemoryHeap::getSize() const {
364     assertMapped();
365     return mSize;
366 }
367 
getFlags() const368 uint32_t BpMemoryHeap::getFlags() const {
369     assertMapped();
370     return mFlags;
371 }
372 
getOffset() const373 uint32_t BpMemoryHeap::getOffset() const {
374     assertMapped();
375     return mOffset;
376 }
377 
378 // ---------------------------------------------------------------------------
379 
380 IMPLEMENT_META_INTERFACE(MemoryHeap, "android.utils.IMemoryHeap");
381 
BnMemoryHeap()382 BnMemoryHeap::BnMemoryHeap() {
383 }
384 
~BnMemoryHeap()385 BnMemoryHeap::~BnMemoryHeap() {
386 }
387 
onTransact(uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)388 status_t BnMemoryHeap::onTransact(
389         uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
390 {
391     switch(code) {
392        case HEAP_ID: {
393             CHECK_INTERFACE(IMemoryHeap, data, reply);
394             reply->writeFileDescriptor(getHeapID());
395             reply->writeInt32(getSize());
396             reply->writeInt32(getFlags());
397             reply->writeInt32(getOffset());
398             return NO_ERROR;
399         } break;
400         default:
401             return BBinder::onTransact(code, data, reply, flags);
402     }
403 }
404 
405 /*****************************************************************************/
406 
HeapCache()407 HeapCache::HeapCache()
408     : DeathRecipient()
409 {
410 }
411 
~HeapCache()412 HeapCache::~HeapCache()
413 {
414 }
415 
binderDied(const wp<IBinder> & binder)416 void HeapCache::binderDied(const wp<IBinder>& binder)
417 {
418     //ALOGD("binderDied binder=%p", binder.unsafe_get());
419     free_heap(binder);
420 }
421 
find_heap(const sp<IBinder> & binder)422 sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder)
423 {
424     Mutex::Autolock _l(mHeapCacheLock);
425     ssize_t i = mHeapCache.indexOfKey(binder);
426     if (i>=0) {
427         heap_info_t& info = mHeapCache.editValueAt(i);
428         ALOGD_IF(VERBOSE,
429                 "found binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
430                 binder.get(), info.heap.get(),
431                 static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
432                 static_cast<BpMemoryHeap*>(info.heap.get())
433                     ->mHeapId.load(memory_order_relaxed),
434                 info.count);
435         ++info.count;
436         return info.heap;
437     } else {
438         heap_info_t info;
439         info.heap = interface_cast<IMemoryHeap>(binder);
440         info.count = 1;
441         //ALOGD("adding binder=%p, heap=%p, count=%d",
442         //      binder.get(), info.heap.get(), info.count);
443         mHeapCache.add(binder, info);
444         return info.heap;
445     }
446 }
447 
free_heap(const sp<IBinder> & binder)448 void HeapCache::free_heap(const sp<IBinder>& binder)  {
449     free_heap( wp<IBinder>(binder) );
450 }
451 
free_heap(const wp<IBinder> & binder)452 void HeapCache::free_heap(const wp<IBinder>& binder)
453 {
454     sp<IMemoryHeap> rel;
455     {
456         Mutex::Autolock _l(mHeapCacheLock);
457         ssize_t i = mHeapCache.indexOfKey(binder);
458         if (i>=0) {
459             heap_info_t& info(mHeapCache.editValueAt(i));
460             if (--info.count == 0) {
461                 ALOGD_IF(VERBOSE,
462                         "removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
463                         binder.unsafe_get(), info.heap.get(),
464                         static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
465                         static_cast<BpMemoryHeap*>(info.heap.get())
466                             ->mHeapId.load(memory_order_relaxed),
467                         info.count);
468                 rel = mHeapCache.valueAt(i).heap;
469                 mHeapCache.removeItemsAt(i);
470             }
471         } else {
472             ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
473         }
474     }
475 }
476 
get_heap(const sp<IBinder> & binder)477 sp<IMemoryHeap> HeapCache::get_heap(const sp<IBinder>& binder)
478 {
479     sp<IMemoryHeap> realHeap;
480     Mutex::Autolock _l(mHeapCacheLock);
481     ssize_t i = mHeapCache.indexOfKey(binder);
482     if (i>=0)   realHeap = mHeapCache.valueAt(i).heap;
483     else        realHeap = interface_cast<IMemoryHeap>(binder);
484     return realHeap;
485 }
486 
dump_heaps()487 void HeapCache::dump_heaps()
488 {
489     Mutex::Autolock _l(mHeapCacheLock);
490     int c = mHeapCache.size();
491     for (int i=0 ; i<c ; i++) {
492         const heap_info_t& info = mHeapCache.valueAt(i);
493         BpMemoryHeap const* h(static_cast<BpMemoryHeap const *>(info.heap.get()));
494         ALOGD("hey=%p, heap=%p, count=%d, (fd=%d, base=%p, size=%zu)",
495                 mHeapCache.keyAt(i).unsafe_get(),
496                 info.heap.get(), info.count,
497                 h->mHeapId.load(memory_order_relaxed), h->mBase, h->mSize);
498     }
499 }
500 
501 
502 // ---------------------------------------------------------------------------
503 }; // namespace android
504