• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2AllocatorIon"
19 #include <utils/Log.h>
20 
21 #include <list>
22 
23 #include <ion/ion.h>
24 #include <sys/mman.h>
25 #include <unistd.h> // getpagesize, size_t, close, dup
26 
27 #include <C2AllocatorIon.h>
28 #include <C2Buffer.h>
29 #include <C2Debug.h>
30 #include <C2ErrnoUtils.h>
31 #include <C2HandleIonInternal.h>
32 
33 #include <android-base/properties.h>
34 #include <media/stagefright/foundation/Mutexed.h>
35 
36 namespace android {
37 
38 namespace {
39     constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
40 
41     // max padding after ion/dmabuf allocations in bytes
42     constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
43 }
44 
45 /* size_t <=> int(lo), int(hi) conversions */
size2intLo(size_t s)46 constexpr inline int size2intLo(size_t s) {
47     return int(s & 0xFFFFFFFF);
48 }
49 
size2intHi(size_t s)50 constexpr inline int size2intHi(size_t s) {
51     // cast to uint64_t as size_t may be 32 bits wide
52     return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
53 }
54 
ints2size(int intLo,int intHi)55 constexpr inline size_t ints2size(int intLo, int intHi) {
56     // convert in 2 stages to 64 bits as intHi may be negative
57     return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
58 }
59 
60 /* ========================================= ION HANDLE ======================================== */
61 /**
62  * ION handle
63  *
64  * There can be only a sole ion client per process, this is captured in the ion fd that is passed
65  * to the constructor, but this should be managed by the ion buffer allocator/mapper.
66  *
67  * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
68  * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
69  * a refcount.
70  *
71  * This handle will not capture mapped fd-s as updating that would require a global mutex.
72  */
73 
74 const C2Handle C2HandleIon::cHeader = {
75     C2HandleIon::version,
76     C2HandleIon::numFds,
77     C2HandleIon::numInts,
78     {}
79 };
80 
81 // static
IsValid(const C2Handle * const o)82 bool C2HandleIon::IsValid(const C2Handle * const o) {
83     if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
84         return false;
85     }
86     const C2HandleIon *other = static_cast<const C2HandleIon*>(o);
87     return other->mInts.mMagic == kMagic;
88 }
89 
90 // TODO: is the dup of an ion fd identical to ion_share?
91 
92 /* ======================================= ION ALLOCATION ====================================== */
93 class C2AllocationIon : public C2LinearAllocation {
94 public:
95     /* Interface methods */
96     virtual c2_status_t map(
97         size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence,
98         void **addr /* nonnull */) override;
99     virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override;
100     virtual ~C2AllocationIon() override;
101     virtual const C2Handle *handle() const override;
102     virtual id_t getAllocatorId() const override;
103     virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override;
104 
105     // internal methods
106     C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
107     C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id);
108 
109     c2_status_t status() const;
110 
111 protected:
112     class Impl;
113     class ImplV2;
114     Impl *mImpl;
115 
116     // TODO: we could make this encapsulate shared_ptr and copiable
117     C2_DO_NOT_COPY(C2AllocationIon);
118 };
119 
120 class C2AllocationIon::Impl {
121 protected:
122     /**
123      * Constructs an ion allocation.
124      *
125      * \note We always create an ion allocation, even if the allocation or import fails
126      * so that we can capture the error.
127      *
128      * \param ionFd     ion client (ownership transferred to created object)
129      * \param capacity  size of allocation
130      * \param bufferFd  buffer handle (ownership transferred to created object). Must be
131      *                  invalid if err is not 0.
132      * \param buffer    ion buffer user handle (ownership transferred to created object). Must be
133      *                  invalid if err is not 0.
134      * \param err       errno during buffer allocation or import
135      */
Impl(int ionFd,size_t capacity,int bufferFd,ion_user_handle_t buffer,C2Allocator::id_t id,int err)136     Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err)
137         : mIonFd(ionFd),
138           mHandle(bufferFd, capacity),
139           mBuffer(buffer),
140           mId(id),
141           mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
142           mMapFd(-1) {
143         if (mInit != C2_OK) {
144             // close ionFd now on error
145             if (mIonFd >= 0) {
146                 close(mIonFd);
147                 mIonFd = -1;
148             }
149             // C2_CHECK(bufferFd < 0);
150             // C2_CHECK(buffer < 0);
151         }
152     }
153 
154 public:
155     /**
156      * Constructs an ion allocation by importing a shared buffer fd.
157      *
158      * \param ionFd     ion client (ownership transferred to created object)
159      * \param capacity  size of allocation
160      * \param bufferFd  buffer handle (ownership transferred to created object)
161      *
162      * \return created ion allocation (implementation) which may be invalid if the
163      * import failed.
164      */
165     static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
166 
167     /**
168      * Constructs an ion allocation by allocating an ion buffer.
169      *
170      * \param ionFd     ion client (ownership transferred to created object)
171      * \param size      size of allocation
172      * \param align     desired alignment of allocation
173      * \param heapMask  mask of heaps considered
174      * \param flags     ion allocation flags
175      *
176      * \return created ion allocation (implementation) which may be invalid if the
177      * allocation failed.
178      */
179     static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
180 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)181     c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
182         (void)fence; // TODO: wait for fence
183         *addr = nullptr;
184         if (!mMappings.lock()->empty()) {
185             ALOGV("multiple map");
186             // TODO: technically we should return DUPLICATE here, but our block views don't
187             // actually unmap, so we end up remapping an ion buffer multiple times.
188             //
189             // return C2_DUPLICATE;
190         }
191         if (size == 0) {
192             return C2_BAD_VALUE;
193         }
194 
195         int prot = PROT_NONE;
196         int flags = MAP_SHARED;
197         if (usage.expected & C2MemoryUsage::CPU_READ) {
198             prot |= PROT_READ;
199         }
200         if (usage.expected & C2MemoryUsage::CPU_WRITE) {
201             prot |= PROT_WRITE;
202         }
203 
204         size_t alignmentBytes = offset % PAGE_SIZE;
205         size_t mapOffset = offset - alignmentBytes;
206         size_t mapSize = size + alignmentBytes;
207         Mapping map = { nullptr, alignmentBytes, mapSize };
208 
209         c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
210         if (map.addr) {
211             mMappings.lock()->push_back(map);
212         }
213         return err;
214     }
215 
unmap(void * addr,size_t size,C2Fence * fence)216     c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
217         Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
218         if (mappings->empty()) {
219             ALOGD("tried to unmap unmapped buffer");
220             return C2_NOT_FOUND;
221         }
222         for (auto it = mappings->begin(); it != mappings->end(); ++it) {
223             if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
224                     size + it->alignmentBytes != it->size) {
225                 continue;
226             }
227             int err = munmap(it->addr, it->size);
228             if (err != 0) {
229                 ALOGD("munmap failed");
230                 return c2_map_errno<EINVAL>(errno);
231             }
232             if (fence) {
233                 *fence = C2Fence(); // not using fences
234             }
235             (void)mappings->erase(it);
236             ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size,
237                       mHandle.bufferFd());
238             return C2_OK;
239         }
240         ALOGD("unmap failed to find specified map");
241         return C2_BAD_VALUE;
242     }
243 
~Impl()244     virtual ~Impl() {
245         Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
246         if (!mappings->empty()) {
247             ALOGD("Dangling mappings!");
248             for (const Mapping &map : *mappings) {
249                 (void)munmap(map.addr, map.size);
250             }
251         }
252         if (mMapFd >= 0) {
253             close(mMapFd);
254             mMapFd = -1;
255         }
256         if (mInit == C2_OK) {
257             if (mBuffer >= 0) {
258                 (void)ion_free(mIonFd, mBuffer);
259             }
260             native_handle_close(&mHandle);
261         }
262         if (mIonFd >= 0) {
263             close(mIonFd);
264         }
265     }
266 
status() const267     c2_status_t status() const {
268         return mInit;
269     }
270 
handle() const271     const C2Handle *handle() const {
272         return &mHandle;
273     }
274 
getAllocatorId() const275     C2Allocator::id_t getAllocatorId() const {
276         return mId;
277     }
278 
ionHandle() const279     virtual ion_user_handle_t ionHandle() const {
280         return mBuffer;
281     }
282 
283 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)284     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
285             int prot, int flags, void** base, void** addr) {
286         c2_status_t err = C2_OK;
287         if (mMapFd == -1) {
288             int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
289                               flags, mapOffset, (unsigned char**)base, &mMapFd);
290             ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
291                   "offset = %zu) returned (%d)",
292                   mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
293             if (ret) {
294                 mMapFd = -1;
295                 *base = *addr = nullptr;
296                 err = c2_map_errno<EINVAL>(-ret);
297             } else {
298                 *addr = (uint8_t *)*base + alignmentBytes;
299             }
300         } else {
301             *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
302             ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
303                   "returned (%d)",
304                   mapSize, prot, flags, mMapFd, mapOffset, errno);
305             if (*base == MAP_FAILED) {
306                 *base = *addr = nullptr;
307                 err = c2_map_errno<EINVAL>(errno);
308             } else {
309                 *addr = (uint8_t *)*base + alignmentBytes;
310             }
311         }
312         return err;
313     }
314 
315     int mIonFd;
316     C2HandleIon mHandle;
317     ion_user_handle_t mBuffer;
318     C2Allocator::id_t mId;
319     c2_status_t mInit;
320     int mMapFd; // only one for now
321     struct Mapping {
322         void *addr;
323         size_t alignmentBytes;
324         size_t size;
325     };
326     Mutexed<std::list<Mapping>> mMappings;
327 };
328 
329 class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
330 public:
331     /**
332      * Constructs an ion allocation for platforms with new (ion_4.12.h) api
333      *
334      * \note We always create an ion allocation, even if the allocation or import fails
335      * so that we can capture the error.
336      *
337      * \param ionFd     ion client (ownership transferred to created object)
338      * \param capacity  size of allocation
339      * \param bufferFd  buffer handle (ownership transferred to created object). Must be
340      *                  invalid if err is not 0.
341      * \param err       errno during buffer allocation or import
342      */
ImplV2(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id,int err)343     ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
344         : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
345     }
346 
347     virtual ~ImplV2() = default;
348 
ionHandle() const349     virtual ion_user_handle_t ionHandle() const {
350         return mHandle.bufferFd();
351     }
352 
353 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)354     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
355             int prot, int flags, void** base, void** addr) {
356         c2_status_t err = C2_OK;
357         *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
358         ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
359               "returned (%d)",
360               mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
361         if (*base == MAP_FAILED) {
362             *base = *addr = nullptr;
363             err = c2_map_errno<EINVAL>(errno);
364         } else {
365             *addr = (uint8_t *)*base + alignmentBytes;
366         }
367         return err;
368     }
369 
370 };
371 
Import(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id)372 C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
373         C2Allocator::id_t id) {
374     int ret = 0;
375     if (ion_is_legacy(ionFd)) {
376         ion_user_handle_t buffer = -1;
377         ret = ion_import(ionFd, bufferFd, &buffer);
378         return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
379     } else {
380         return new ImplV2(ionFd, capacity, bufferFd, id, ret);
381     }
382 }
383 
Alloc(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)384 C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
385         unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
386     int bufferFd = -1;
387     ion_user_handle_t buffer = -1;
388     // NOTE: read this property directly from the property as this code has to run on
389     // Android Q, but the sysprop was only introduced in Android S.
390     static size_t sPadding =
391         base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
392     if (sPadding > SIZE_MAX - size) {
393         ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
394         // use ImplV2 as there is no allocation anyways
395         return new ImplV2(ionFd, size, -1, id, -ENOMEM);
396     }
397 
398     size_t allocSize = size + sPadding;
399     if (align) {
400         if (align - 1 > SIZE_MAX - allocSize) {
401             ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
402                   size, sPadding, align);
403             // use ImplV2 as there is no allocation anyways
404             return new ImplV2(ionFd, size, -1, id, -ENOMEM);
405         }
406         allocSize += align - 1;
407         allocSize &= ~(align - 1);
408     }
409     int ret;
410 
411     if (ion_is_legacy(ionFd)) {
412         ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
413         ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
414               "returned (%d) ; buffer = %d",
415               ionFd, allocSize, align, heapMask, flags, ret, buffer);
416         if (ret == 0) {
417             // get buffer fd for native handle constructor
418             ret = ion_share(ionFd, buffer, &bufferFd);
419             if (ret != 0) {
420                 ion_free(ionFd, buffer);
421                 buffer = -1;
422             }
423         }
424         // the padding is not usable so deduct it from the advertised capacity
425         return new Impl(ionFd, allocSize - sPadding, bufferFd, buffer, id, ret);
426     } else {
427         ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
428         ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
429               "returned (%d) ; bufferFd = %d",
430               ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
431 
432         // the padding is not usable so deduct it from the advertised capacity
433         return new ImplV2(ionFd, allocSize - sPadding, bufferFd, id, ret);
434     }
435 }
436 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)437 c2_status_t C2AllocationIon::map(
438     size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
439     return mImpl->map(offset, size, usage, fence, addr);
440 }
441 
unmap(void * addr,size_t size,C2Fence * fence)442 c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) {
443     return mImpl->unmap(addr, size, fence);
444 }
445 
status() const446 c2_status_t C2AllocationIon::status() const {
447     return mImpl->status();
448 }
449 
getAllocatorId() const450 C2Allocator::id_t C2AllocationIon::getAllocatorId() const {
451     return mImpl->getAllocatorId();
452 }
453 
equals(const std::shared_ptr<C2LinearAllocation> & other) const454 bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const {
455     if (!other || other->getAllocatorId() != getAllocatorId()) {
456         return false;
457     }
458     // get user handle to compare objects
459     std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other);
460     return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle();
461 }
462 
handle() const463 const C2Handle *C2AllocationIon::handle() const {
464     return mImpl->handle();
465 }
466 
~C2AllocationIon()467 C2AllocationIon::~C2AllocationIon() {
468     delete mImpl;
469 }
470 
C2AllocationIon(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)471 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align,
472                                  unsigned heapMask, unsigned flags, C2Allocator::id_t id)
473     : C2LinearAllocation(size),
474       mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { }
475 
C2AllocationIon(int ionFd,size_t size,int shareFd,C2Allocator::id_t id)476 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id)
477     : C2LinearAllocation(size),
478       mImpl(Impl::Import(ionFd, size, shareFd, id)) { }
479 
480 /* ======================================= ION ALLOCATOR ====================================== */
C2AllocatorIon(id_t id)481 C2AllocatorIon::C2AllocatorIon(id_t id)
482     : mInit(C2_OK),
483       mIonFd(ion_open()) {
484     if (mIonFd < 0) {
485         switch (errno) {
486         case ENOENT:    mInit = C2_OMITTED; break;
487         default:        mInit = c2_map_errno<EACCES>(errno); break;
488         }
489     } else {
490         C2MemoryUsage minUsage = { 0, 0 };
491         C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
492         Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage };
493         mTraits = std::make_shared<Traits>(traits);
494         mBlockSize = ::getpagesize();
495     }
496 }
497 
~C2AllocatorIon()498 C2AllocatorIon::~C2AllocatorIon() {
499     if (mInit == C2_OK) {
500         ion_close(mIonFd);
501     }
502 }
503 
getId() const504 C2Allocator::id_t C2AllocatorIon::getId() const {
505     std::lock_guard<std::mutex> lock(mUsageMapperLock);
506     return mTraits->id;
507 }
508 
getName() const509 C2String C2AllocatorIon::getName() const {
510     std::lock_guard<std::mutex> lock(mUsageMapperLock);
511     return mTraits->name;
512 }
513 
getTraits() const514 std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const {
515     std::lock_guard<std::mutex> lock(mUsageMapperLock);
516     return mTraits;
517 }
518 
setUsageMapper(const UsageMapperFn & mapper,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)519 void C2AllocatorIon::setUsageMapper(
520         const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) {
521     std::lock_guard<std::mutex> lock(mUsageMapperLock);
522     mUsageMapperCache.clear();
523     mUsageMapperLru.clear();
524     mUsageMapper = mapper;
525     Traits traits = {
526         mTraits->name, mTraits->id, LINEAR,
527         C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage)
528     };
529     mTraits = std::make_shared<Traits>(traits);
530     mBlockSize = blockSize;
531 }
532 
operator ()(const MapperKey & k) const533 std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const {
534     return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
535 }
536 
mapUsage(C2MemoryUsage usage,size_t capacity,size_t * align,unsigned * heapMask,unsigned * flags)537 c2_status_t C2AllocatorIon::mapUsage(
538         C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) {
539     std::lock_guard<std::mutex> lock(mUsageMapperLock);
540     c2_status_t res = C2_OK;
541     // align capacity
542     capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
543     MapperKey key = std::make_pair(usage.expected, capacity);
544     auto entry = mUsageMapperCache.find(key);
545     if (entry == mUsageMapperCache.end()) {
546         if (mUsageMapper) {
547             res = mUsageMapper(usage, capacity, align, heapMask, flags);
548         } else {
549             *align = 0; // TODO make this 1
550             *heapMask = ~0; // default mask
551             if (usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)) {
552                 *flags = ION_FLAG_CACHED; // cache CPU accessed buffers
553             } else {
554                 *flags = 0;  // default flags
555             }
556             res = C2_NO_INIT;
557         }
558         // add usage to cache
559         MapperValue value = std::make_tuple(*align, *heapMask, *flags, res);
560         mUsageMapperLru.emplace_front(key, value);
561         mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
562         if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
563             // remove LRU entry
564             MapperKey lruKey = mUsageMapperLru.front().first;
565             mUsageMapperCache.erase(lruKey);
566             mUsageMapperLru.pop_back();
567         }
568     } else {
569         // move entry to MRU
570         mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
571         const MapperValue &value = entry->second->second;
572         std::tie(*align, *heapMask, *flags, res) = value;
573     }
574     return res;
575 }
576 
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)577 c2_status_t C2AllocatorIon::newLinearAllocation(
578         uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) {
579     if (allocation == nullptr) {
580         return C2_BAD_VALUE;
581     }
582 
583     allocation->reset();
584     if (mInit != C2_OK) {
585         return mInit;
586     }
587 
588     size_t align = 0;
589     unsigned heapMask = ~0;
590     unsigned flags = 0;
591     c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags);
592     if (ret && ret != C2_NO_INIT) {
593         return ret;
594     }
595 
596     std::shared_ptr<C2AllocationIon> alloc
597         = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
598     ret = alloc->status();
599     if (ret == C2_OK) {
600         *allocation = alloc;
601     }
602     return ret;
603 }
604 
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)605 c2_status_t C2AllocatorIon::priorLinearAllocation(
606         const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) {
607     *allocation = nullptr;
608     if (mInit != C2_OK) {
609         return mInit;
610     }
611 
612     if (!C2HandleIon::IsValid(handle)) {
613         return C2_BAD_VALUE;
614     }
615 
616     // TODO: get capacity and validate it
617     const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
618     std::shared_ptr<C2AllocationIon> alloc
619         = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
620     c2_status_t ret = alloc->status();
621     if (ret == C2_OK) {
622         *allocation = alloc;
623         native_handle_delete(const_cast<native_handle_t*>(
624                 reinterpret_cast<const native_handle_t*>(handle)));
625     }
626     return ret;
627 }
628 
CheckHandle(const C2Handle * const o)629 bool C2AllocatorIon::CheckHandle(const C2Handle* const o) {
630     return C2HandleIon::IsValid(o);
631 }
632 
633 } // namespace android
634