• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2AllocatorIon"
19 #include <utils/Log.h>
20 
21 #include <list>
22 
23 #include <ion/ion.h>
24 #include <sys/mman.h>
25 #include <unistd.h> // getpagesize, size_t, close, dup
26 
27 #include <C2AllocatorIon.h>
28 #include <C2Buffer.h>
29 #include <C2Debug.h>
30 #include <C2ErrnoUtils.h>
31 #include <C2HandleIonInternal.h>
32 
33 #include <android-base/properties.h>
34 #include <media/stagefright/foundation/Mutexed.h>
35 
36 namespace android {
37 
38 namespace {
39     constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
40 
41     // max padding after ion/dmabuf allocations in bytes
42     constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
43 }
44 
45 /* size_t <=> int(lo), int(hi) conversions */
size2intLo(size_t s)46 constexpr inline int size2intLo(size_t s) {
47     return int(s & 0xFFFFFFFF);
48 }
49 
size2intHi(size_t s)50 constexpr inline int size2intHi(size_t s) {
51     // cast to uint64_t as size_t may be 32 bits wide
52     return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
53 }
54 
ints2size(int intLo,int intHi)55 constexpr inline size_t ints2size(int intLo, int intHi) {
56     // convert in 2 stages to 64 bits as intHi may be negative
57     return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
58 }
59 
60 /* ========================================= ION HANDLE ======================================== */
61 /**
62  * ION handle
63  *
64  * There can be only a sole ion client per process, this is captured in the ion fd that is passed
65  * to the constructor, but this should be managed by the ion buffer allocator/mapper.
66  *
67  * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
68  * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
69  * a refcount.
70  *
71  * This handle will not capture mapped fd-s as updating that would require a global mutex.
72  */
73 
74 const C2Handle C2HandleIon::cHeader = {
75     C2HandleIon::version,
76     C2HandleIon::numFds,
77     C2HandleIon::numInts,
78     {}
79 };
80 
81 // static
IsValid(const C2Handle * const o)82 bool C2HandleIon::IsValid(const C2Handle * const o) {
83     if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
84         return false;
85     }
86     const C2HandleIon *other = static_cast<const C2HandleIon*>(o);
87     return other->mInts.mMagic == kMagic;
88 }
89 
90 // TODO: is the dup of an ion fd identical to ion_share?
91 
92 /* ======================================= ION ALLOCATION ====================================== */
93 class C2AllocationIon : public C2LinearAllocation {
94 public:
95     /* Interface methods */
96     virtual c2_status_t map(
97         size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence,
98         void **addr /* nonnull */) override;
99     virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override;
100     virtual ~C2AllocationIon() override;
101     virtual const C2Handle *handle() const override;
102     virtual id_t getAllocatorId() const override;
103     virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override;
104 
105     // internal methods
106     C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
107     C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id);
108 
109     c2_status_t status() const;
110 
111 protected:
112     class Impl;
113     class ImplV2;
114     Impl *mImpl;
115 
116     // TODO: we could make this encapsulate shared_ptr and copiable
117     C2_DO_NOT_COPY(C2AllocationIon);
118 };
119 
120 class C2AllocationIon::Impl {
121 protected:
122     /**
123      * Constructs an ion allocation.
124      *
125      * \note We always create an ion allocation, even if the allocation or import fails
126      * so that we can capture the error.
127      *
128      * \param ionFd     ion client (ownership transferred to created object)
129      * \param capacity  size of allocation
130      * \param bufferFd  buffer handle (ownership transferred to created object). Must be
131      *                  invalid if err is not 0.
132      * \param buffer    ion buffer user handle (ownership transferred to created object). Must be
133      *                  invalid if err is not 0.
134      * \param err       errno during buffer allocation or import
135      */
Impl(int ionFd,size_t capacity,int bufferFd,ion_user_handle_t buffer,C2Allocator::id_t id,int err)136     Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err)
137         : mIonFd(ionFd),
138           mHandle(bufferFd, capacity),
139           mBuffer(buffer),
140           mId(id),
141           mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
142           mMapFd(-1) {
143         if (mInit != C2_OK) {
144             // close ionFd now on error
145             if (mIonFd >= 0) {
146                 close(mIonFd);
147                 mIonFd = -1;
148             }
149             // C2_CHECK(bufferFd < 0);
150             // C2_CHECK(buffer < 0);
151         }
152     }
153 
154 public:
155     /**
156      * Constructs an ion allocation by importing a shared buffer fd.
157      *
158      * \param ionFd     ion client (ownership transferred to created object)
159      * \param capacity  size of allocation
160      * \param bufferFd  buffer handle (ownership transferred to created object)
161      *
162      * \return created ion allocation (implementation) which may be invalid if the
163      * import failed.
164      */
165     static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
166 
167     /**
168      * Constructs an ion allocation by allocating an ion buffer.
169      *
170      * \param ionFd     ion client (ownership transferred to created object)
171      * \param size      size of allocation
172      * \param align     desired alignment of allocation
173      * \param heapMask  mask of heaps considered
174      * \param flags     ion allocation flags
175      *
176      * \return created ion allocation (implementation) which may be invalid if the
177      * allocation failed.
178      */
179     static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
180 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)181     c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
182         static const size_t kPageSize = getpagesize();
183         (void)fence; // TODO: wait for fence
184         *addr = nullptr;
185         if (!mMappings.lock()->empty()) {
186             ALOGV("multiple map");
187             // TODO: technically we should return DUPLICATE here, but our block views don't
188             // actually unmap, so we end up remapping an ion buffer multiple times.
189             //
190             // return C2_DUPLICATE;
191         }
192         if (size == 0) {
193             return C2_BAD_VALUE;
194         }
195 
196         int prot = PROT_NONE;
197         int flags = MAP_SHARED;
198         if (usage.expected & C2MemoryUsage::CPU_READ) {
199             prot |= PROT_READ;
200         }
201         if (usage.expected & C2MemoryUsage::CPU_WRITE) {
202             prot |= PROT_WRITE;
203         }
204 
205         size_t alignmentBytes = offset % kPageSize;
206         size_t mapOffset = offset - alignmentBytes;
207         size_t mapSize = size + alignmentBytes;
208         Mapping map = { nullptr, alignmentBytes, mapSize };
209 
210         c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
211         if (map.addr) {
212             mMappings.lock()->push_back(map);
213         }
214         return err;
215     }
216 
unmap(void * addr,size_t size,C2Fence * fence)217     c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
218         Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
219         if (mappings->empty()) {
220             ALOGD("tried to unmap unmapped buffer");
221             return C2_NOT_FOUND;
222         }
223         for (auto it = mappings->begin(); it != mappings->end(); ++it) {
224             if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
225                     size + it->alignmentBytes != it->size) {
226                 continue;
227             }
228             int err = munmap(it->addr, it->size);
229             if (err != 0) {
230                 ALOGD("munmap failed");
231                 return c2_map_errno<EINVAL>(errno);
232             }
233             if (fence) {
234                 *fence = C2Fence(); // not using fences
235             }
236             (void)mappings->erase(it);
237             ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size,
238                       mHandle.bufferFd());
239             return C2_OK;
240         }
241         ALOGD("unmap failed to find specified map");
242         return C2_BAD_VALUE;
243     }
244 
~Impl()245     virtual ~Impl() {
246         Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
247         if (!mappings->empty()) {
248             ALOGD("Dangling mappings!");
249             for (const Mapping &map : *mappings) {
250                 (void)munmap(map.addr, map.size);
251             }
252         }
253         if (mMapFd >= 0) {
254             close(mMapFd);
255             mMapFd = -1;
256         }
257         if (mInit == C2_OK) {
258             if (mBuffer >= 0) {
259                 (void)ion_free(mIonFd, mBuffer);
260             }
261             native_handle_close(&mHandle);
262         }
263         if (mIonFd >= 0) {
264             close(mIonFd);
265         }
266     }
267 
status() const268     c2_status_t status() const {
269         return mInit;
270     }
271 
handle() const272     const C2Handle *handle() const {
273         return &mHandle;
274     }
275 
getAllocatorId() const276     C2Allocator::id_t getAllocatorId() const {
277         return mId;
278     }
279 
ionHandle() const280     virtual ion_user_handle_t ionHandle() const {
281         return mBuffer;
282     }
283 
284 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)285     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
286             int prot, int flags, void** base, void** addr) {
287         c2_status_t err = C2_OK;
288         if (mMapFd == -1) {
289             int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
290                               flags, mapOffset, (unsigned char**)base, &mMapFd);
291             ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
292                   "offset = %zu) returned (%d)",
293                   mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
294             if (ret) {
295                 mMapFd = -1;
296                 *base = *addr = nullptr;
297                 err = c2_map_errno<EINVAL>(-ret);
298             } else {
299                 *addr = (uint8_t *)*base + alignmentBytes;
300             }
301         } else {
302             *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
303             ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
304                   "returned (%d)",
305                   mapSize, prot, flags, mMapFd, mapOffset, errno);
306             if (*base == MAP_FAILED) {
307                 *base = *addr = nullptr;
308                 err = c2_map_errno<EINVAL>(errno);
309             } else {
310                 *addr = (uint8_t *)*base + alignmentBytes;
311             }
312         }
313         return err;
314     }
315 
316     int mIonFd;
317     C2HandleIon mHandle;
318     ion_user_handle_t mBuffer;
319     C2Allocator::id_t mId;
320     c2_status_t mInit;
321     int mMapFd; // only one for now
322     struct Mapping {
323         void *addr;
324         size_t alignmentBytes;
325         size_t size;
326     };
327     Mutexed<std::list<Mapping>> mMappings;
328 };
329 
330 class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
331 public:
332     /**
333      * Constructs an ion allocation for platforms with new (ion_4.12.h) api
334      *
335      * \note We always create an ion allocation, even if the allocation or import fails
336      * so that we can capture the error.
337      *
338      * \param ionFd     ion client (ownership transferred to created object)
339      * \param capacity  size of allocation
340      * \param bufferFd  buffer handle (ownership transferred to created object). Must be
341      *                  invalid if err is not 0.
342      * \param err       errno during buffer allocation or import
343      */
ImplV2(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id,int err)344     ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
345         : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
346     }
347 
348     virtual ~ImplV2() = default;
349 
ionHandle() const350     virtual ion_user_handle_t ionHandle() const {
351         return mHandle.bufferFd();
352     }
353 
354 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)355     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
356             int prot, int flags, void** base, void** addr) {
357         c2_status_t err = C2_OK;
358         *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
359         ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
360               "returned (%d)",
361               mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
362         if (*base == MAP_FAILED) {
363             *base = *addr = nullptr;
364             err = c2_map_errno<EINVAL>(errno);
365         } else {
366             *addr = (uint8_t *)*base + alignmentBytes;
367         }
368         return err;
369     }
370 
371 };
372 
Import(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id)373 C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
374         C2Allocator::id_t id) {
375     int ret = 0;
376     if (ion_is_legacy(ionFd)) {
377         ion_user_handle_t buffer = -1;
378         ret = ion_import(ionFd, bufferFd, &buffer);
379         return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
380     } else {
381         return new ImplV2(ionFd, capacity, bufferFd, id, ret);
382     }
383 }
384 
Alloc(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)385 C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
386         unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
387     int bufferFd = -1;
388     ion_user_handle_t buffer = -1;
389     // NOTE: read this property directly from the property as this code has to run on
390     // Android Q, but the sysprop was only introduced in Android S.
391     static size_t sPadding =
392         base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
393     if (sPadding > SIZE_MAX - size) {
394         ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
395         // use ImplV2 as there is no allocation anyways
396         return new ImplV2(ionFd, size, -1, id, -ENOMEM);
397     }
398 
399     size_t allocSize = size + sPadding;
400     if (align) {
401         if (align - 1 > SIZE_MAX - allocSize) {
402             ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
403                   size, sPadding, align);
404             // use ImplV2 as there is no allocation anyways
405             return new ImplV2(ionFd, size, -1, id, -ENOMEM);
406         }
407         allocSize += align - 1;
408         allocSize &= ~(align - 1);
409     }
410     int ret;
411 
412     if (ion_is_legacy(ionFd)) {
413         ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
414         ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
415               "returned (%d) ; buffer = %d",
416               ionFd, allocSize, align, heapMask, flags, ret, buffer);
417         if (ret == 0) {
418             // get buffer fd for native handle constructor
419             ret = ion_share(ionFd, buffer, &bufferFd);
420             if (ret != 0) {
421                 ion_free(ionFd, buffer);
422                 buffer = -1;
423             }
424         }
425         // the padding is not usable so deduct it from the advertised capacity
426         return new Impl(ionFd, allocSize - sPadding, bufferFd, buffer, id, ret);
427     } else {
428         ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
429         ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
430               "returned (%d) ; bufferFd = %d",
431               ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
432 
433         // the padding is not usable so deduct it from the advertised capacity
434         return new ImplV2(ionFd, allocSize - sPadding, bufferFd, id, ret);
435     }
436 }
437 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)438 c2_status_t C2AllocationIon::map(
439     size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
440     return mImpl->map(offset, size, usage, fence, addr);
441 }
442 
unmap(void * addr,size_t size,C2Fence * fence)443 c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) {
444     return mImpl->unmap(addr, size, fence);
445 }
446 
status() const447 c2_status_t C2AllocationIon::status() const {
448     return mImpl->status();
449 }
450 
getAllocatorId() const451 C2Allocator::id_t C2AllocationIon::getAllocatorId() const {
452     return mImpl->getAllocatorId();
453 }
454 
equals(const std::shared_ptr<C2LinearAllocation> & other) const455 bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const {
456     if (!other || other->getAllocatorId() != getAllocatorId()) {
457         return false;
458     }
459     // get user handle to compare objects
460     std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other);
461     return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle();
462 }
463 
handle() const464 const C2Handle *C2AllocationIon::handle() const {
465     return mImpl->handle();
466 }
467 
~C2AllocationIon()468 C2AllocationIon::~C2AllocationIon() {
469     delete mImpl;
470 }
471 
C2AllocationIon(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)472 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align,
473                                  unsigned heapMask, unsigned flags, C2Allocator::id_t id)
474     : C2LinearAllocation(size),
475       mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { }
476 
C2AllocationIon(int ionFd,size_t size,int shareFd,C2Allocator::id_t id)477 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id)
478     : C2LinearAllocation(size),
479       mImpl(Impl::Import(ionFd, size, shareFd, id)) { }
480 
481 /* ======================================= ION ALLOCATOR ====================================== */
C2AllocatorIon(id_t id)482 C2AllocatorIon::C2AllocatorIon(id_t id)
483     : mInit(C2_OK),
484       mIonFd(ion_open()) {
485     if (mIonFd < 0) {
486         switch (errno) {
487         case ENOENT:    mInit = C2_OMITTED; break;
488         default:        mInit = c2_map_errno<EACCES>(errno); break;
489         }
490     } else {
491         C2MemoryUsage minUsage = { 0, 0 };
492         C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
493         Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage };
494         mTraits = std::make_shared<Traits>(traits);
495         mBlockSize = ::getpagesize();
496     }
497 }
498 
~C2AllocatorIon()499 C2AllocatorIon::~C2AllocatorIon() {
500     if (mInit == C2_OK) {
501         ion_close(mIonFd);
502     }
503 }
504 
getId() const505 C2Allocator::id_t C2AllocatorIon::getId() const {
506     std::lock_guard<std::mutex> lock(mUsageMapperLock);
507     return mTraits->id;
508 }
509 
getName() const510 C2String C2AllocatorIon::getName() const {
511     std::lock_guard<std::mutex> lock(mUsageMapperLock);
512     return mTraits->name;
513 }
514 
getTraits() const515 std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const {
516     std::lock_guard<std::mutex> lock(mUsageMapperLock);
517     return mTraits;
518 }
519 
setUsageMapper(const UsageMapperFn & mapper,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)520 void C2AllocatorIon::setUsageMapper(
521         const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) {
522     std::lock_guard<std::mutex> lock(mUsageMapperLock);
523     mUsageMapperCache.clear();
524     mUsageMapperLru.clear();
525     mUsageMapper = mapper;
526     Traits traits = {
527         mTraits->name, mTraits->id, LINEAR,
528         C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage)
529     };
530     mTraits = std::make_shared<Traits>(traits);
531     mBlockSize = blockSize;
532 }
533 
operator ()(const MapperKey & k) const534 std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const {
535     return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
536 }
537 
mapUsage(C2MemoryUsage usage,size_t capacity,size_t * align,unsigned * heapMask,unsigned * flags)538 c2_status_t C2AllocatorIon::mapUsage(
539         C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) {
540     std::lock_guard<std::mutex> lock(mUsageMapperLock);
541     c2_status_t res = C2_OK;
542     // align capacity
543     capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
544     MapperKey key = std::make_pair(usage.expected, capacity);
545     auto entry = mUsageMapperCache.find(key);
546     if (entry == mUsageMapperCache.end()) {
547         if (mUsageMapper) {
548             res = mUsageMapper(usage, capacity, align, heapMask, flags);
549         } else {
550             *align = 0; // TODO make this 1
551             *heapMask = ~0; // default mask
552             if (usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)) {
553                 *flags = ION_FLAG_CACHED; // cache CPU accessed buffers
554             } else {
555                 *flags = 0;  // default flags
556             }
557             res = C2_NO_INIT;
558         }
559         // add usage to cache
560         MapperValue value = std::make_tuple(*align, *heapMask, *flags, res);
561         mUsageMapperLru.emplace_front(key, value);
562         mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
563         if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
564             // remove LRU entry
565             MapperKey lruKey = mUsageMapperLru.front().first;
566             mUsageMapperCache.erase(lruKey);
567             mUsageMapperLru.pop_back();
568         }
569     } else {
570         // move entry to MRU
571         mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
572         const MapperValue &value = entry->second->second;
573         std::tie(*align, *heapMask, *flags, res) = value;
574     }
575     return res;
576 }
577 
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)578 c2_status_t C2AllocatorIon::newLinearAllocation(
579         uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) {
580     if (allocation == nullptr) {
581         return C2_BAD_VALUE;
582     }
583 
584     allocation->reset();
585     if (mInit != C2_OK) {
586         return mInit;
587     }
588 
589     size_t align = 0;
590     unsigned heapMask = ~0;
591     unsigned flags = 0;
592     c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags);
593     if (ret && ret != C2_NO_INIT) {
594         return ret;
595     }
596 
597     std::shared_ptr<C2AllocationIon> alloc
598         = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
599     ret = alloc->status();
600     if (ret == C2_OK) {
601         *allocation = alloc;
602     }
603     return ret;
604 }
605 
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)606 c2_status_t C2AllocatorIon::priorLinearAllocation(
607         const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) {
608     *allocation = nullptr;
609     if (mInit != C2_OK) {
610         return mInit;
611     }
612 
613     if (!C2HandleIon::IsValid(handle)) {
614         return C2_BAD_VALUE;
615     }
616 
617     // TODO: get capacity and validate it
618     const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
619     std::shared_ptr<C2AllocationIon> alloc
620         = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
621     c2_status_t ret = alloc->status();
622     if (ret == C2_OK) {
623         *allocation = alloc;
624         native_handle_delete(const_cast<native_handle_t*>(
625                 reinterpret_cast<const native_handle_t*>(handle)));
626     }
627     return ret;
628 }
629 
CheckHandle(const C2Handle * const o)630 bool C2AllocatorIon::CheckHandle(const C2Handle* const o) {
631     return C2HandleIon::IsValid(o);
632 }
633 
634 } // namespace android
635