• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2AllocatorIon"
19 #include <utils/Log.h>
20 
21 #include <list>
22 
23 #include <ion/ion.h>
24 #include <sys/mman.h>
25 #include <unistd.h> // getpagesize, size_t, close, dup
26 
27 #include <C2AllocatorIon.h>
28 #include <C2Buffer.h>
29 #include <C2Debug.h>
30 #include <C2ErrnoUtils.h>
31 
32 namespace android {
33 
34 namespace {
35     constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
36 }
37 
38 /* size_t <=> int(lo), int(hi) conversions */
size2intLo(size_t s)39 constexpr inline int size2intLo(size_t s) {
40     return int(s & 0xFFFFFFFF);
41 }
42 
size2intHi(size_t s)43 constexpr inline int size2intHi(size_t s) {
44     // cast to uint64_t as size_t may be 32 bits wide
45     return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
46 }
47 
ints2size(int intLo,int intHi)48 constexpr inline size_t ints2size(int intLo, int intHi) {
49     // convert in 2 stages to 64 bits as intHi may be negative
50     return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
51 }
52 
53 /* ========================================= ION HANDLE ======================================== */
54 /**
55  * ION handle
56  *
57  * There can be only a sole ion client per process, this is captured in the ion fd that is passed
58  * to the constructor, but this should be managed by the ion buffer allocator/mapper.
59  *
60  * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
61  * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
62  * a refcount.
63  *
64  * This handle will not capture mapped fd-s as updating that would require a global mutex.
65  */
66 
67 struct C2HandleIon : public C2Handle {
68     // ion handle owns ionFd(!) and bufferFd
C2HandleIonandroid::C2HandleIon69     C2HandleIon(int bufferFd, size_t size)
70         : C2Handle(cHeader),
71           mFds{ bufferFd },
72           mInts{ int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic } { }
73 
74     static bool isValid(const C2Handle * const o);
75 
bufferFdandroid::C2HandleIon76     int bufferFd() const { return mFds.mBuffer; }
sizeandroid::C2HandleIon77     size_t size() const {
78         return size_t(unsigned(mInts.mSizeLo))
79                 | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
80     }
81 
82 protected:
83     struct {
84         int mBuffer; // shared ion buffer
85     } mFds;
86     struct {
87         int mSizeLo; // low 32-bits of size
88         int mSizeHi; // high 32-bits of size
89         int mMagic;
90     } mInts;
91 
92 private:
93     typedef C2HandleIon _type;
94     enum {
95         kMagic = '\xc2io\x00',
96         numFds = sizeof(mFds) / sizeof(int),
97         numInts = sizeof(mInts) / sizeof(int),
98         version = sizeof(C2Handle)
99     };
100     //constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
101     const static C2Handle cHeader;
102 };
103 
104 const C2Handle C2HandleIon::cHeader = {
105     C2HandleIon::version,
106     C2HandleIon::numFds,
107     C2HandleIon::numInts,
108     {}
109 };
110 
111 // static
isValid(const C2Handle * const o)112 bool C2HandleIon::isValid(const C2Handle * const o) {
113     if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
114         return false;
115     }
116     const C2HandleIon *other = static_cast<const C2HandleIon*>(o);
117     return other->mInts.mMagic == kMagic;
118 }
119 
120 // TODO: is the dup of an ion fd identical to ion_share?
121 
122 /* ======================================= ION ALLOCATION ====================================== */
123 class C2AllocationIon : public C2LinearAllocation {
124 public:
125     /* Interface methods */
126     virtual c2_status_t map(
127         size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence,
128         void **addr /* nonnull */) override;
129     virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override;
130     virtual ~C2AllocationIon() override;
131     virtual const C2Handle *handle() const override;
132     virtual id_t getAllocatorId() const override;
133     virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override;
134 
135     // internal methods
136     C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
137     C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id);
138 
139     c2_status_t status() const;
140 
141 protected:
142     class Impl;
143     Impl *mImpl;
144 
145     // TODO: we could make this encapsulate shared_ptr and copiable
146     C2_DO_NOT_COPY(C2AllocationIon);
147 };
148 
149 class C2AllocationIon::Impl {
150 private:
151     /**
152      * Constructs an ion allocation.
153      *
154      * \note We always create an ion allocation, even if the allocation or import fails
155      * so that we can capture the error.
156      *
157      * \param ionFd     ion client (ownership transferred to created object)
158      * \param capacity  size of allocation
159      * \param bufferFd  buffer handle (ownership transferred to created object). Must be
160      *                  invalid if err is not 0.
161      * \param buffer    ion buffer user handle (ownership transferred to created object). Must be
162      *                  invalid if err is not 0.
163      * \param err       errno during buffer allocation or import
164      */
Impl(int ionFd,size_t capacity,int bufferFd,ion_user_handle_t buffer,C2Allocator::id_t id,int err)165     Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err)
166         : mIonFd(ionFd),
167           mHandle(bufferFd, capacity),
168           mBuffer(buffer),
169           mId(id),
170           mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
171           mMapFd(-1) {
172         if (mInit != C2_OK) {
173             // close ionFd now on error
174             if (mIonFd >= 0) {
175                 close(mIonFd);
176                 mIonFd = -1;
177             }
178             // C2_CHECK(bufferFd < 0);
179             // C2_CHECK(buffer < 0);
180         }
181     }
182 
183 public:
184     /**
185      * Constructs an ion allocation by importing a shared buffer fd.
186      *
187      * \param ionFd     ion client (ownership transferred to created object)
188      * \param capacity  size of allocation
189      * \param bufferFd  buffer handle (ownership transferred to created object)
190      *
191      * \return created ion allocation (implementation) which may be invalid if the
192      * import failed.
193      */
Import(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id)194     static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id) {
195         ion_user_handle_t buffer = -1;
196         int ret = ion_import(ionFd, bufferFd, &buffer);
197         return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
198     }
199 
200     /**
201      * Constructs an ion allocation by allocating an ion buffer.
202      *
203      * \param ionFd     ion client (ownership transferred to created object)
204      * \param size      size of allocation
205      * \param align     desired alignment of allocation
206      * \param heapMask  mask of heaps considered
207      * \param flags     ion allocation flags
208      *
209      * \return created ion allocation (implementation) which may be invalid if the
210      * allocation failed.
211      */
Alloc(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)212     static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
213         int bufferFd = -1;
214         ion_user_handle_t buffer = -1;
215         int ret = ion_alloc(ionFd, size, align, heapMask, flags, &buffer);
216         ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
217               "returned (%d) ; buffer = %d",
218               ionFd, size, align, heapMask, flags, ret, buffer);
219         if (ret == 0) {
220             // get buffer fd for native handle constructor
221             ret = ion_share(ionFd, buffer, &bufferFd);
222             if (ret != 0) {
223                 ion_free(ionFd, buffer);
224                 buffer = -1;
225             }
226         }
227         return new Impl(ionFd, size, bufferFd, buffer, id, ret);
228     }
229 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)230     c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
231         (void)fence; // TODO: wait for fence
232         *addr = nullptr;
233         if (!mMappings.empty()) {
234             ALOGV("multiple map");
235             // TODO: technically we should return DUPLICATE here, but our block views don't
236             // actually unmap, so we end up remapping an ion buffer multiple times.
237             //
238             // return C2_DUPLICATE;
239         }
240         if (size == 0) {
241             return C2_BAD_VALUE;
242         }
243 
244         int prot = PROT_NONE;
245         int flags = MAP_SHARED;
246         if (usage.expected & C2MemoryUsage::CPU_READ) {
247             prot |= PROT_READ;
248         }
249         if (usage.expected & C2MemoryUsage::CPU_WRITE) {
250             prot |= PROT_WRITE;
251         }
252 
253         size_t alignmentBytes = offset % PAGE_SIZE;
254         size_t mapOffset = offset - alignmentBytes;
255         size_t mapSize = size + alignmentBytes;
256         Mapping map = { nullptr, alignmentBytes, mapSize };
257 
258         c2_status_t err = C2_OK;
259         if (mMapFd == -1) {
260             int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
261                               flags, mapOffset, (unsigned char**)&map.addr, &mMapFd);
262             ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
263                   "offset = %zu) returned (%d)",
264                   mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
265             if (ret) {
266                 mMapFd = -1;
267                 map.addr = *addr = nullptr;
268                 err = c2_map_errno<EINVAL>(-ret);
269             } else {
270                 *addr = (uint8_t *)map.addr + alignmentBytes;
271             }
272         } else {
273             map.addr = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
274             ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
275                   "returned (%d)",
276                   mapSize, prot, flags, mMapFd, mapOffset, errno);
277             if (map.addr == MAP_FAILED) {
278                 map.addr = *addr = nullptr;
279                 err = c2_map_errno<EINVAL>(errno);
280             } else {
281                 *addr = (uint8_t *)map.addr + alignmentBytes;
282             }
283         }
284         if (map.addr) {
285             mMappings.push_back(map);
286         }
287         return err;
288     }
289 
unmap(void * addr,size_t size,C2Fence * fence)290     c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
291         if (mMapFd < 0 || mMappings.empty()) {
292             ALOGD("tried to unmap unmapped buffer");
293             return C2_NOT_FOUND;
294         }
295         for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
296             if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
297                     size + it->alignmentBytes != it->size) {
298                 continue;
299             }
300             int err = munmap(it->addr, it->size);
301             if (err != 0) {
302                 ALOGD("munmap failed");
303                 return c2_map_errno<EINVAL>(errno);
304             }
305             if (fence) {
306                 *fence = C2Fence(); // not using fences
307             }
308             (void)mMappings.erase(it);
309             ALOGV("successfully unmapped: %d", mBuffer);
310             return C2_OK;
311         }
312         ALOGD("unmap failed to find specified map");
313         return C2_BAD_VALUE;
314     }
315 
~Impl()316     ~Impl() {
317         if (!mMappings.empty()) {
318             ALOGD("Dangling mappings!");
319             for (const Mapping &map : mMappings) {
320                 (void)munmap(map.addr, map.size);
321             }
322         }
323         if (mMapFd >= 0) {
324             close(mMapFd);
325             mMapFd = -1;
326         }
327         if (mInit == C2_OK) {
328             (void)ion_free(mIonFd, mBuffer);
329             native_handle_close(&mHandle);
330         }
331         if (mIonFd >= 0) {
332             close(mIonFd);
333         }
334     }
335 
status() const336     c2_status_t status() const {
337         return mInit;
338     }
339 
handle() const340     const C2Handle *handle() const {
341         return &mHandle;
342     }
343 
getAllocatorId() const344     C2Allocator::id_t getAllocatorId() const {
345         return mId;
346     }
347 
ionHandle() const348     ion_user_handle_t ionHandle() const {
349         return mBuffer;
350     }
351 
352 private:
353     int mIonFd;
354     C2HandleIon mHandle;
355     ion_user_handle_t mBuffer;
356     C2Allocator::id_t mId;
357     c2_status_t mInit;
358     int mMapFd; // only one for now
359     struct Mapping {
360         void *addr;
361         size_t alignmentBytes;
362         size_t size;
363     };
364     std::list<Mapping> mMappings;
365 };
366 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)367 c2_status_t C2AllocationIon::map(
368     size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
369     return mImpl->map(offset, size, usage, fence, addr);
370 }
371 
unmap(void * addr,size_t size,C2Fence * fence)372 c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) {
373     return mImpl->unmap(addr, size, fence);
374 }
375 
status() const376 c2_status_t C2AllocationIon::status() const {
377     return mImpl->status();
378 }
379 
getAllocatorId() const380 C2Allocator::id_t C2AllocationIon::getAllocatorId() const {
381     return mImpl->getAllocatorId();
382 }
383 
equals(const std::shared_ptr<C2LinearAllocation> & other) const384 bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const {
385     if (!other || other->getAllocatorId() != getAllocatorId()) {
386         return false;
387     }
388     // get user handle to compare objects
389     std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other);
390     return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle();
391 }
392 
handle() const393 const C2Handle *C2AllocationIon::handle() const {
394     return mImpl->handle();
395 }
396 
~C2AllocationIon()397 C2AllocationIon::~C2AllocationIon() {
398     delete mImpl;
399 }
400 
C2AllocationIon(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)401 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align,
402                                  unsigned heapMask, unsigned flags, C2Allocator::id_t id)
403     : C2LinearAllocation(size),
404       mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { }
405 
C2AllocationIon(int ionFd,size_t size,int shareFd,C2Allocator::id_t id)406 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id)
407     : C2LinearAllocation(size),
408       mImpl(Impl::Import(ionFd, size, shareFd, id)) { }
409 
410 /* ======================================= ION ALLOCATOR ====================================== */
C2AllocatorIon(id_t id)411 C2AllocatorIon::C2AllocatorIon(id_t id)
412     : mInit(C2_OK),
413       mIonFd(ion_open()) {
414     if (mIonFd < 0) {
415         switch (errno) {
416         case ENOENT:    mInit = C2_OMITTED; break;
417         default:        mInit = c2_map_errno<EACCES>(errno); break;
418         }
419     } else {
420         C2MemoryUsage minUsage = { 0, 0 };
421         C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
422         Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage };
423         mTraits = std::make_shared<Traits>(traits);
424         mBlockSize = ::getpagesize();
425     }
426 }
427 
~C2AllocatorIon()428 C2AllocatorIon::~C2AllocatorIon() {
429     if (mInit == C2_OK) {
430         ion_close(mIonFd);
431     }
432 }
433 
getId() const434 C2Allocator::id_t C2AllocatorIon::getId() const {
435     std::lock_guard<std::mutex> lock(mUsageMapperLock);
436     return mTraits->id;
437 }
438 
getName() const439 C2String C2AllocatorIon::getName() const {
440     std::lock_guard<std::mutex> lock(mUsageMapperLock);
441     return mTraits->name;
442 }
443 
getTraits() const444 std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const {
445     std::lock_guard<std::mutex> lock(mUsageMapperLock);
446     return mTraits;
447 }
448 
setUsageMapper(const UsageMapperFn & mapper,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)449 void C2AllocatorIon::setUsageMapper(
450         const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) {
451     std::lock_guard<std::mutex> lock(mUsageMapperLock);
452     mUsageMapperCache.clear();
453     mUsageMapperLru.clear();
454     mUsageMapper = mapper;
455     Traits traits = {
456         mTraits->name, mTraits->id, LINEAR,
457         C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage)
458     };
459     mTraits = std::make_shared<Traits>(traits);
460     mBlockSize = blockSize;
461 }
462 
operator ()(const MapperKey & k) const463 std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const {
464     return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
465 }
466 
mapUsage(C2MemoryUsage usage,size_t capacity,size_t * align,unsigned * heapMask,unsigned * flags)467 c2_status_t C2AllocatorIon::mapUsage(
468         C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) {
469     std::lock_guard<std::mutex> lock(mUsageMapperLock);
470     c2_status_t res = C2_OK;
471     // align capacity
472     capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
473     MapperKey key = std::make_pair(usage.expected, capacity);
474     auto entry = mUsageMapperCache.find(key);
475     if (entry == mUsageMapperCache.end()) {
476         if (mUsageMapper) {
477             res = mUsageMapper(usage, capacity, align, heapMask, flags);
478         } else {
479             *align = 0; // TODO make this 1
480             *heapMask = ~0; // default mask
481             *flags = 0; // default flags
482             res = C2_NO_INIT;
483         }
484         // add usage to cache
485         MapperValue value = std::make_tuple(*align, *heapMask, *flags, res);
486         mUsageMapperLru.emplace_front(key, value);
487         mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
488         if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
489             // remove LRU entry
490             MapperKey lruKey = mUsageMapperLru.front().first;
491             mUsageMapperCache.erase(lruKey);
492             mUsageMapperLru.pop_back();
493         }
494     } else {
495         // move entry to MRU
496         mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
497         const MapperValue &value = entry->second->second;
498         std::tie(*align, *heapMask, *flags, res) = value;
499     }
500     return res;
501 }
502 
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)503 c2_status_t C2AllocatorIon::newLinearAllocation(
504         uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) {
505     if (allocation == nullptr) {
506         return C2_BAD_VALUE;
507     }
508 
509     allocation->reset();
510     if (mInit != C2_OK) {
511         return mInit;
512     }
513 
514     size_t align = 0;
515     unsigned heapMask = ~0;
516     unsigned flags = 0;
517     c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags);
518     if (ret && ret != C2_NO_INIT) {
519         return ret;
520     }
521 
522     std::shared_ptr<C2AllocationIon> alloc
523         = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, mTraits->id);
524     ret = alloc->status();
525     if (ret == C2_OK) {
526         *allocation = alloc;
527     }
528     return ret;
529 }
530 
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)531 c2_status_t C2AllocatorIon::priorLinearAllocation(
532         const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) {
533     *allocation = nullptr;
534     if (mInit != C2_OK) {
535         return mInit;
536     }
537 
538     if (!C2HandleIon::isValid(handle)) {
539         return C2_BAD_VALUE;
540     }
541 
542     // TODO: get capacity and validate it
543     const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
544     std::shared_ptr<C2AllocationIon> alloc
545         = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), mTraits->id);
546     c2_status_t ret = alloc->status();
547     if (ret == C2_OK) {
548         *allocation = alloc;
549         native_handle_delete(const_cast<native_handle_t*>(
550                 reinterpret_cast<const native_handle_t*>(handle)));
551     }
552     return ret;
553 }
554 
isValid(const C2Handle * const o)555 bool C2AllocatorIon::isValid(const C2Handle* const o) {
556     return C2HandleIon::isValid(o);
557 }
558 
559 } // namespace android
560 
561