• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2AllocatorIon"
19 #include <utils/Log.h>
20 
21 #include <list>
22 
23 #include <ion/ion.h>
24 #include <sys/mman.h>
25 #include <unistd.h> // getpagesize, size_t, close, dup
26 
27 #include <C2AllocatorIon.h>
28 #include <C2Buffer.h>
29 #include <C2Debug.h>
30 #include <C2ErrnoUtils.h>
31 #include <C2HandleIonInternal.h>
32 
33 #include <android-base/properties.h>
34 
35 namespace android {
36 
37 namespace {
38     constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
39 
40     // max padding after ion/dmabuf allocations in bytes
41     constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
42 }
43 
44 /* size_t <=> int(lo), int(hi) conversions */
size2intLo(size_t s)45 constexpr inline int size2intLo(size_t s) {
46     return int(s & 0xFFFFFFFF);
47 }
48 
size2intHi(size_t s)49 constexpr inline int size2intHi(size_t s) {
50     // cast to uint64_t as size_t may be 32 bits wide
51     return int((uint64_t(s) >> 32) & 0xFFFFFFFF);
52 }
53 
ints2size(int intLo,int intHi)54 constexpr inline size_t ints2size(int intLo, int intHi) {
55     // convert in 2 stages to 64 bits as intHi may be negative
56     return size_t(unsigned(intLo)) | size_t(uint64_t(unsigned(intHi)) << 32);
57 }
58 
59 /* ========================================= ION HANDLE ======================================== */
60 /**
61  * ION handle
62  *
63  * There can be only a sole ion client per process, this is captured in the ion fd that is passed
64  * to the constructor, but this should be managed by the ion buffer allocator/mapper.
65  *
66  * ion uses ion_user_handle_t for buffers. We don't store this in the native handle as
67  * it requires an ion_free to decref. Instead, we share the buffer to get an fd that also holds
68  * a refcount.
69  *
70  * This handle will not capture mapped fd-s as updating that would require a global mutex.
71  */
72 
73 const C2Handle C2HandleIon::cHeader = {
74     C2HandleIon::version,
75     C2HandleIon::numFds,
76     C2HandleIon::numInts,
77     {}
78 };
79 
80 // static
IsValid(const C2Handle * const o)81 bool C2HandleIon::IsValid(const C2Handle * const o) {
82     if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
83         return false;
84     }
85     const C2HandleIon *other = static_cast<const C2HandleIon*>(o);
86     return other->mInts.mMagic == kMagic;
87 }
88 
89 // TODO: is the dup of an ion fd identical to ion_share?
90 
91 /* ======================================= ION ALLOCATION ====================================== */
92 class C2AllocationIon : public C2LinearAllocation {
93 public:
94     /* Interface methods */
95     virtual c2_status_t map(
96         size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence,
97         void **addr /* nonnull */) override;
98     virtual c2_status_t unmap(void *addr, size_t size, C2Fence *fenceFd) override;
99     virtual ~C2AllocationIon() override;
100     virtual const C2Handle *handle() const override;
101     virtual id_t getAllocatorId() const override;
102     virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const override;
103 
104     // internal methods
105     C2AllocationIon(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
106     C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id);
107 
108     c2_status_t status() const;
109 
110 protected:
111     class Impl;
112     class ImplV2;
113     Impl *mImpl;
114 
115     // TODO: we could make this encapsulate shared_ptr and copiable
116     C2_DO_NOT_COPY(C2AllocationIon);
117 };
118 
119 class C2AllocationIon::Impl {
120 protected:
121     /**
122      * Constructs an ion allocation.
123      *
124      * \note We always create an ion allocation, even if the allocation or import fails
125      * so that we can capture the error.
126      *
127      * \param ionFd     ion client (ownership transferred to created object)
128      * \param capacity  size of allocation
129      * \param bufferFd  buffer handle (ownership transferred to created object). Must be
130      *                  invalid if err is not 0.
131      * \param buffer    ion buffer user handle (ownership transferred to created object). Must be
132      *                  invalid if err is not 0.
133      * \param err       errno during buffer allocation or import
134      */
Impl(int ionFd,size_t capacity,int bufferFd,ion_user_handle_t buffer,C2Allocator::id_t id,int err)135     Impl(int ionFd, size_t capacity, int bufferFd, ion_user_handle_t buffer, C2Allocator::id_t id, int err)
136         : mIonFd(ionFd),
137           mHandle(bufferFd, capacity),
138           mBuffer(buffer),
139           mId(id),
140           mInit(c2_map_errno<ENOMEM, EACCES, EINVAL>(err)),
141           mMapFd(-1) {
142         if (mInit != C2_OK) {
143             // close ionFd now on error
144             if (mIonFd >= 0) {
145                 close(mIonFd);
146                 mIonFd = -1;
147             }
148             // C2_CHECK(bufferFd < 0);
149             // C2_CHECK(buffer < 0);
150         }
151     }
152 
153 public:
154     /**
155      * Constructs an ion allocation by importing a shared buffer fd.
156      *
157      * \param ionFd     ion client (ownership transferred to created object)
158      * \param capacity  size of allocation
159      * \param bufferFd  buffer handle (ownership transferred to created object)
160      *
161      * \return created ion allocation (implementation) which may be invalid if the
162      * import failed.
163      */
164     static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
165 
166     /**
167      * Constructs an ion allocation by allocating an ion buffer.
168      *
169      * \param ionFd     ion client (ownership transferred to created object)
170      * \param size      size of allocation
171      * \param align     desired alignment of allocation
172      * \param heapMask  mask of heaps considered
173      * \param flags     ion allocation flags
174      *
175      * \return created ion allocation (implementation) which may be invalid if the
176      * allocation failed.
177      */
178     static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
179 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)180     c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
181         (void)fence; // TODO: wait for fence
182         *addr = nullptr;
183         if (!mMappings.empty()) {
184             ALOGV("multiple map");
185             // TODO: technically we should return DUPLICATE here, but our block views don't
186             // actually unmap, so we end up remapping an ion buffer multiple times.
187             //
188             // return C2_DUPLICATE;
189         }
190         if (size == 0) {
191             return C2_BAD_VALUE;
192         }
193 
194         int prot = PROT_NONE;
195         int flags = MAP_SHARED;
196         if (usage.expected & C2MemoryUsage::CPU_READ) {
197             prot |= PROT_READ;
198         }
199         if (usage.expected & C2MemoryUsage::CPU_WRITE) {
200             prot |= PROT_WRITE;
201         }
202 
203         size_t alignmentBytes = offset % PAGE_SIZE;
204         size_t mapOffset = offset - alignmentBytes;
205         size_t mapSize = size + alignmentBytes;
206         Mapping map = { nullptr, alignmentBytes, mapSize };
207 
208         c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
209         if (map.addr) {
210             mMappings.push_back(map);
211         }
212         return err;
213     }
214 
unmap(void * addr,size_t size,C2Fence * fence)215     c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
216         if (mMappings.empty()) {
217             ALOGD("tried to unmap unmapped buffer");
218             return C2_NOT_FOUND;
219         }
220         for (auto it = mMappings.begin(); it != mMappings.end(); ++it) {
221             if (addr != (uint8_t *)it->addr + it->alignmentBytes ||
222                     size + it->alignmentBytes != it->size) {
223                 continue;
224             }
225             int err = munmap(it->addr, it->size);
226             if (err != 0) {
227                 ALOGD("munmap failed");
228                 return c2_map_errno<EINVAL>(errno);
229             }
230             if (fence) {
231                 *fence = C2Fence(); // not using fences
232             }
233             (void)mMappings.erase(it);
234             ALOGV("successfully unmapped: addr=%p size=%zu fd=%d", addr, size, mHandle.bufferFd());
235             return C2_OK;
236         }
237         ALOGD("unmap failed to find specified map");
238         return C2_BAD_VALUE;
239     }
240 
~Impl()241     virtual ~Impl() {
242         if (!mMappings.empty()) {
243             ALOGD("Dangling mappings!");
244             for (const Mapping &map : mMappings) {
245                 (void)munmap(map.addr, map.size);
246             }
247         }
248         if (mMapFd >= 0) {
249             close(mMapFd);
250             mMapFd = -1;
251         }
252         if (mInit == C2_OK) {
253             if (mBuffer >= 0) {
254                 (void)ion_free(mIonFd, mBuffer);
255             }
256             native_handle_close(&mHandle);
257         }
258         if (mIonFd >= 0) {
259             close(mIonFd);
260         }
261     }
262 
status() const263     c2_status_t status() const {
264         return mInit;
265     }
266 
handle() const267     const C2Handle *handle() const {
268         return &mHandle;
269     }
270 
getAllocatorId() const271     C2Allocator::id_t getAllocatorId() const {
272         return mId;
273     }
274 
ionHandle() const275     virtual ion_user_handle_t ionHandle() const {
276         return mBuffer;
277     }
278 
279 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)280     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
281             int prot, int flags, void** base, void** addr) {
282         c2_status_t err = C2_OK;
283         if (mMapFd == -1) {
284             int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
285                               flags, mapOffset, (unsigned char**)base, &mMapFd);
286             ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
287                   "offset = %zu) returned (%d)",
288                   mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
289             if (ret) {
290                 mMapFd = -1;
291                 *base = *addr = nullptr;
292                 err = c2_map_errno<EINVAL>(-ret);
293             } else {
294                 *addr = (uint8_t *)*base + alignmentBytes;
295             }
296         } else {
297             *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
298             ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
299                   "returned (%d)",
300                   mapSize, prot, flags, mMapFd, mapOffset, errno);
301             if (*base == MAP_FAILED) {
302                 *base = *addr = nullptr;
303                 err = c2_map_errno<EINVAL>(errno);
304             } else {
305                 *addr = (uint8_t *)*base + alignmentBytes;
306             }
307         }
308         return err;
309     }
310 
311     int mIonFd;
312     C2HandleIon mHandle;
313     ion_user_handle_t mBuffer;
314     C2Allocator::id_t mId;
315     c2_status_t mInit;
316     int mMapFd; // only one for now
317     struct Mapping {
318         void *addr;
319         size_t alignmentBytes;
320         size_t size;
321     };
322     std::list<Mapping> mMappings;
323 };
324 
325 class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
326 public:
327     /**
328      * Constructs an ion allocation for platforms with new (ion_4.12.h) api
329      *
330      * \note We always create an ion allocation, even if the allocation or import fails
331      * so that we can capture the error.
332      *
333      * \param ionFd     ion client (ownership transferred to created object)
334      * \param capacity  size of allocation
335      * \param bufferFd  buffer handle (ownership transferred to created object). Must be
336      *                  invalid if err is not 0.
337      * \param err       errno during buffer allocation or import
338      */
ImplV2(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id,int err)339     ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
340         : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
341     }
342 
343     virtual ~ImplV2() = default;
344 
ionHandle() const345     virtual ion_user_handle_t ionHandle() const {
346         return mHandle.bufferFd();
347     }
348 
349 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)350     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
351             int prot, int flags, void** base, void** addr) {
352         c2_status_t err = C2_OK;
353         *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
354         ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
355               "returned (%d)",
356               mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
357         if (*base == MAP_FAILED) {
358             *base = *addr = nullptr;
359             err = c2_map_errno<EINVAL>(errno);
360         } else {
361             *addr = (uint8_t *)*base + alignmentBytes;
362         }
363         return err;
364     }
365 
366 };
367 
Import(int ionFd,size_t capacity,int bufferFd,C2Allocator::id_t id)368 C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
369         C2Allocator::id_t id) {
370     int ret = 0;
371     if (ion_is_legacy(ionFd)) {
372         ion_user_handle_t buffer = -1;
373         ret = ion_import(ionFd, bufferFd, &buffer);
374         return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
375     } else {
376         return new ImplV2(ionFd, capacity, bufferFd, id, ret);
377     }
378 }
379 
Alloc(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)380 C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
381         unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
382     int bufferFd = -1;
383     ion_user_handle_t buffer = -1;
384     // NOTE: read this property directly from the property as this code has to run on
385     // Android Q, but the sysprop was only introduced in Android S.
386     static size_t sPadding =
387         base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
388     if (sPadding > SIZE_MAX - size) {
389         ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx", size, sPadding);
390         // use ImplV2 as there is no allocation anyways
391         return new ImplV2(ionFd, size, -1, id, -ENOMEM);
392     }
393 
394     size_t allocSize = size + sPadding;
395     if (align) {
396         if (align - 1 > SIZE_MAX - allocSize) {
397             ALOGD("ion_alloc: size %#zx cannot accommodate padding %#zx and alignment %#zx",
398                   size, sPadding, align);
399             // use ImplV2 as there is no allocation anyways
400             return new ImplV2(ionFd, size, -1, id, -ENOMEM);
401         }
402         allocSize += align - 1;
403         allocSize &= ~(align - 1);
404     }
405     int ret;
406 
407     if (ion_is_legacy(ionFd)) {
408         ret = ion_alloc(ionFd, allocSize, align, heapMask, flags, &buffer);
409         ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
410               "returned (%d) ; buffer = %d",
411               ionFd, allocSize, align, heapMask, flags, ret, buffer);
412         if (ret == 0) {
413             // get buffer fd for native handle constructor
414             ret = ion_share(ionFd, buffer, &bufferFd);
415             if (ret != 0) {
416                 ion_free(ionFd, buffer);
417                 buffer = -1;
418             }
419         }
420         // the padding is not usable so deduct it from the advertised capacity
421         return new Impl(ionFd, allocSize - sPadding, bufferFd, buffer, id, ret);
422     } else {
423         ret = ion_alloc_fd(ionFd, allocSize, align, heapMask, flags, &bufferFd);
424         ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
425               "returned (%d) ; bufferFd = %d",
426               ionFd, allocSize, align, heapMask, flags, ret, bufferFd);
427 
428         // the padding is not usable so deduct it from the advertised capacity
429         return new ImplV2(ionFd, allocSize - sPadding, bufferFd, id, ret);
430     }
431 }
432 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)433 c2_status_t C2AllocationIon::map(
434     size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
435     return mImpl->map(offset, size, usage, fence, addr);
436 }
437 
unmap(void * addr,size_t size,C2Fence * fence)438 c2_status_t C2AllocationIon::unmap(void *addr, size_t size, C2Fence *fence) {
439     return mImpl->unmap(addr, size, fence);
440 }
441 
status() const442 c2_status_t C2AllocationIon::status() const {
443     return mImpl->status();
444 }
445 
getAllocatorId() const446 C2Allocator::id_t C2AllocationIon::getAllocatorId() const {
447     return mImpl->getAllocatorId();
448 }
449 
equals(const std::shared_ptr<C2LinearAllocation> & other) const450 bool C2AllocationIon::equals(const std::shared_ptr<C2LinearAllocation> &other) const {
451     if (!other || other->getAllocatorId() != getAllocatorId()) {
452         return false;
453     }
454     // get user handle to compare objects
455     std::shared_ptr<C2AllocationIon> otherAsIon = std::static_pointer_cast<C2AllocationIon>(other);
456     return mImpl->ionHandle() == otherAsIon->mImpl->ionHandle();
457 }
458 
handle() const459 const C2Handle *C2AllocationIon::handle() const {
460     return mImpl->handle();
461 }
462 
~C2AllocationIon()463 C2AllocationIon::~C2AllocationIon() {
464     delete mImpl;
465 }
466 
C2AllocationIon(int ionFd,size_t size,size_t align,unsigned heapMask,unsigned flags,C2Allocator::id_t id)467 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, size_t align,
468                                  unsigned heapMask, unsigned flags, C2Allocator::id_t id)
469     : C2LinearAllocation(size),
470       mImpl(Impl::Alloc(ionFd, size, align, heapMask, flags, id)) { }
471 
C2AllocationIon(int ionFd,size_t size,int shareFd,C2Allocator::id_t id)472 C2AllocationIon::C2AllocationIon(int ionFd, size_t size, int shareFd, C2Allocator::id_t id)
473     : C2LinearAllocation(size),
474       mImpl(Impl::Import(ionFd, size, shareFd, id)) { }
475 
476 /* ======================================= ION ALLOCATOR ====================================== */
C2AllocatorIon(id_t id)477 C2AllocatorIon::C2AllocatorIon(id_t id)
478     : mInit(C2_OK),
479       mIonFd(ion_open()) {
480     if (mIonFd < 0) {
481         switch (errno) {
482         case ENOENT:    mInit = C2_OMITTED; break;
483         default:        mInit = c2_map_errno<EACCES>(errno); break;
484         }
485     } else {
486         C2MemoryUsage minUsage = { 0, 0 };
487         C2MemoryUsage maxUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
488         Traits traits = { "android.allocator.ion", id, LINEAR, minUsage, maxUsage };
489         mTraits = std::make_shared<Traits>(traits);
490         mBlockSize = ::getpagesize();
491     }
492 }
493 
~C2AllocatorIon()494 C2AllocatorIon::~C2AllocatorIon() {
495     if (mInit == C2_OK) {
496         ion_close(mIonFd);
497     }
498 }
499 
getId() const500 C2Allocator::id_t C2AllocatorIon::getId() const {
501     std::lock_guard<std::mutex> lock(mUsageMapperLock);
502     return mTraits->id;
503 }
504 
getName() const505 C2String C2AllocatorIon::getName() const {
506     std::lock_guard<std::mutex> lock(mUsageMapperLock);
507     return mTraits->name;
508 }
509 
getTraits() const510 std::shared_ptr<const C2Allocator::Traits> C2AllocatorIon::getTraits() const {
511     std::lock_guard<std::mutex> lock(mUsageMapperLock);
512     return mTraits;
513 }
514 
setUsageMapper(const UsageMapperFn & mapper,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)515 void C2AllocatorIon::setUsageMapper(
516         const UsageMapperFn &mapper, uint64_t minUsage, uint64_t maxUsage, uint64_t blockSize) {
517     std::lock_guard<std::mutex> lock(mUsageMapperLock);
518     mUsageMapperCache.clear();
519     mUsageMapperLru.clear();
520     mUsageMapper = mapper;
521     Traits traits = {
522         mTraits->name, mTraits->id, LINEAR,
523         C2MemoryUsage(minUsage), C2MemoryUsage(maxUsage)
524     };
525     mTraits = std::make_shared<Traits>(traits);
526     mBlockSize = blockSize;
527 }
528 
operator ()(const MapperKey & k) const529 std::size_t C2AllocatorIon::MapperKeyHash::operator()(const MapperKey &k) const {
530     return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
531 }
532 
mapUsage(C2MemoryUsage usage,size_t capacity,size_t * align,unsigned * heapMask,unsigned * flags)533 c2_status_t C2AllocatorIon::mapUsage(
534         C2MemoryUsage usage, size_t capacity, size_t *align, unsigned *heapMask, unsigned *flags) {
535     std::lock_guard<std::mutex> lock(mUsageMapperLock);
536     c2_status_t res = C2_OK;
537     // align capacity
538     capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
539     MapperKey key = std::make_pair(usage.expected, capacity);
540     auto entry = mUsageMapperCache.find(key);
541     if (entry == mUsageMapperCache.end()) {
542         if (mUsageMapper) {
543             res = mUsageMapper(usage, capacity, align, heapMask, flags);
544         } else {
545             *align = 0; // TODO make this 1
546             *heapMask = ~0; // default mask
547             if (usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)) {
548                 *flags = ION_FLAG_CACHED; // cache CPU accessed buffers
549             } else {
550                 *flags = 0;  // default flags
551             }
552             res = C2_NO_INIT;
553         }
554         // add usage to cache
555         MapperValue value = std::make_tuple(*align, *heapMask, *flags, res);
556         mUsageMapperLru.emplace_front(key, value);
557         mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
558         if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
559             // remove LRU entry
560             MapperKey lruKey = mUsageMapperLru.front().first;
561             mUsageMapperCache.erase(lruKey);
562             mUsageMapperLru.pop_back();
563         }
564     } else {
565         // move entry to MRU
566         mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
567         const MapperValue &value = entry->second->second;
568         std::tie(*align, *heapMask, *flags, res) = value;
569     }
570     return res;
571 }
572 
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)573 c2_status_t C2AllocatorIon::newLinearAllocation(
574         uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation> *allocation) {
575     if (allocation == nullptr) {
576         return C2_BAD_VALUE;
577     }
578 
579     allocation->reset();
580     if (mInit != C2_OK) {
581         return mInit;
582     }
583 
584     size_t align = 0;
585     unsigned heapMask = ~0;
586     unsigned flags = 0;
587     c2_status_t ret = mapUsage(usage, capacity, &align, &heapMask, &flags);
588     if (ret && ret != C2_NO_INIT) {
589         return ret;
590     }
591 
592     std::shared_ptr<C2AllocationIon> alloc
593         = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
594     ret = alloc->status();
595     if (ret == C2_OK) {
596         *allocation = alloc;
597     }
598     return ret;
599 }
600 
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)601 c2_status_t C2AllocatorIon::priorLinearAllocation(
602         const C2Handle *handle, std::shared_ptr<C2LinearAllocation> *allocation) {
603     *allocation = nullptr;
604     if (mInit != C2_OK) {
605         return mInit;
606     }
607 
608     if (!C2HandleIon::IsValid(handle)) {
609         return C2_BAD_VALUE;
610     }
611 
612     // TODO: get capacity and validate it
613     const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
614     std::shared_ptr<C2AllocationIon> alloc
615         = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
616     c2_status_t ret = alloc->status();
617     if (ret == C2_OK) {
618         *allocation = alloc;
619         native_handle_delete(const_cast<native_handle_t*>(
620                 reinterpret_cast<const native_handle_t*>(handle)));
621     }
622     return ret;
623 }
624 
CheckHandle(const C2Handle * const o)625 bool C2AllocatorIon::CheckHandle(const C2Handle* const o) {
626     return C2HandleIon::IsValid(o);
627 }
628 
629 } // namespace android
630