• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2DmaBufAllocator"
19 
20 #include <BufferAllocator/BufferAllocator.h>
21 #include <C2Buffer.h>
22 #include <C2Debug.h>
23 #include <C2DmaBufAllocator.h>
24 #include <C2ErrnoUtils.h>
25 
26 #include <linux/ion.h>
27 #include <sys/mman.h>
28 #include <unistd.h>  // getpagesize, size_t, close, dup
29 #include <utils/Log.h>
30 
31 #include <list>
32 
33 #include <android-base/properties.h>
34 #include <media/stagefright/foundation/Mutexed.h>
35 
36 namespace android {
37 
38 namespace {
39     constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
40 
41     // max padding after ion/dmabuf allocations in bytes
42     constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
43 }
44 
45 /* =========================== BUFFER HANDLE =========================== */
46 /**
47  * Buffer handle
48  *
49  * Stores dmabuf fd & metadata
50  *
51  * This handle will not capture mapped fd-s as updating that would require a
52  * global mutex.
53  */
54 
55 struct C2HandleBuf : public C2Handle {
C2HandleBufandroid::C2HandleBuf56     C2HandleBuf(int bufferFd, size_t size)
57         : C2Handle(cHeader),
58           mFds{bufferFd},
59           mInts{int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic} {}
60 
61     static bool IsValid(const C2Handle* const o);
62 
bufferFdandroid::C2HandleBuf63     int bufferFd() const { return mFds.mBuffer; }
sizeandroid::C2HandleBuf64     size_t size() const {
65         return size_t(unsigned(mInts.mSizeLo)) | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
66     }
67 
68    protected:
69     struct {
70         int mBuffer;  // dmabuf fd
71     } mFds;
72     struct {
73         int mSizeLo;  // low 32-bits of size
74         int mSizeHi;  // high 32-bits of size
75         int mMagic;
76     } mInts;
77 
78    private:
79     typedef C2HandleBuf _type;
80     enum {
81         kMagic = '\xc2io\x00',
82         numFds = sizeof(mFds) / sizeof(int),
83         numInts = sizeof(mInts) / sizeof(int),
84         version = sizeof(C2Handle)
85     };
86     // constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
87     const static C2Handle cHeader;
88 };
89 
90 const C2Handle C2HandleBuf::cHeader = {
91         C2HandleBuf::version, C2HandleBuf::numFds, C2HandleBuf::numInts, {}};
92 
93 // static
IsValid(const C2Handle * const o)94 bool C2HandleBuf::IsValid(const C2Handle* const o) {
95     if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
96         return false;
97     }
98     const C2HandleBuf* other = static_cast<const C2HandleBuf*>(o);
99     return other->mInts.mMagic == kMagic;
100 }
101 
102 /* =========================== DMABUF ALLOCATION =========================== */
103 class C2DmaBufAllocation : public C2LinearAllocation {
104    public:
105     /* Interface methods */
106     virtual c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
107                             void** addr /* nonnull */) override;
108     virtual c2_status_t unmap(void* addr, size_t size, C2Fence* fenceFd) override;
109     virtual ~C2DmaBufAllocation() override;
110     virtual const C2Handle* handle() const override;
111     virtual id_t getAllocatorId() const override;
112     virtual bool equals(const std::shared_ptr<C2LinearAllocation>& other) const override;
113 
114     // internal methods
115 
116     /**
117       * Constructs an allocation via a new allocation.
118       *
119       * @param alloc     allocator
120       * @param allocSize size used for the allocator
121       * @param capacity  capacity advertised to the client
122       * @param heap_name name of the dmabuf heap (device)
123       * @param flags     flags
124       * @param id        allocator id
125       */
126     C2DmaBufAllocation(BufferAllocator& alloc, size_t allocSize, size_t capacity,
127                        C2String heap_name, unsigned flags, C2Allocator::id_t id);
128 
129     /**
130       * Constructs an allocation by wrapping an existing allocation.
131       *
132       * @param size    capacity advertised to the client
133       * @param shareFd dmabuf fd of the wrapped allocation
134       * @param id      allocator id
135       */
136     C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id);
137 
138     c2_status_t status() const;
139 
140    protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)141     virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
142                                     int prot, int flags, void** base, void** addr) {
143         c2_status_t err = C2_OK;
144         *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
145         ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
146               "returned (%d)",
147               mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
148         if (*base == MAP_FAILED) {
149             *base = *addr = nullptr;
150             err = c2_map_errno<EINVAL>(errno);
151         } else {
152             *addr = (uint8_t*)*base + alignmentBytes;
153         }
154         return err;
155     }
156 
157     C2Allocator::id_t mId;
158     C2HandleBuf mHandle;
159     c2_status_t mInit;
160     struct Mapping {
161         void* addr;
162         size_t alignmentBytes;
163         size_t size;
164     };
165     Mutexed<std::list<Mapping>> mMappings;
166 
167     // TODO: we could make this encapsulate shared_ptr and copiable
168     C2_DO_NOT_COPY(C2DmaBufAllocation);
169 };
170 
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)171 c2_status_t C2DmaBufAllocation::map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
172                                     void** addr) {
173     (void)fence;  // TODO: wait for fence
174     *addr = nullptr;
175     if (!mMappings.lock()->empty()) {
176         ALOGV("multiple map");
177         // TODO: technically we should return DUPLICATE here, but our block views
178         // don't actually unmap, so we end up remapping the buffer multiple times.
179         //
180         // return C2_DUPLICATE;
181     }
182     if (size == 0) {
183         return C2_BAD_VALUE;
184     }
185 
186     int prot = PROT_NONE;
187     int flags = MAP_SHARED;
188     if (usage.expected & C2MemoryUsage::CPU_READ) {
189         prot |= PROT_READ;
190     }
191     if (usage.expected & C2MemoryUsage::CPU_WRITE) {
192         prot |= PROT_WRITE;
193     }
194 
195     size_t alignmentBytes = offset % PAGE_SIZE;
196     size_t mapOffset = offset - alignmentBytes;
197     size_t mapSize = size + alignmentBytes;
198     Mapping map = {nullptr, alignmentBytes, mapSize};
199 
200     c2_status_t err =
201             mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
202     if (map.addr) {
203         mMappings.lock()->push_back(map);
204     }
205     return err;
206 }
207 
unmap(void * addr,size_t size,C2Fence * fence)208 c2_status_t C2DmaBufAllocation::unmap(void* addr, size_t size, C2Fence* fence) {
209     Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
210     if (mappings->empty()) {
211         ALOGD("tried to unmap unmapped buffer");
212         return C2_NOT_FOUND;
213     }
214     for (auto it = mappings->begin(); it != mappings->end(); ++it) {
215         if (addr != (uint8_t*)it->addr + it->alignmentBytes ||
216             size + it->alignmentBytes != it->size) {
217             continue;
218         }
219         int err = munmap(it->addr, it->size);
220         if (err != 0) {
221             ALOGD("munmap failed");
222             return c2_map_errno<EINVAL>(errno);
223         }
224         if (fence) {
225             *fence = C2Fence();  // not using fences
226         }
227         (void)mappings->erase(it);
228         ALOGV("successfully unmapped: %d", mHandle.bufferFd());
229         return C2_OK;
230     }
231     ALOGD("unmap failed to find specified map");
232     return C2_BAD_VALUE;
233 }
234 
status() const235 c2_status_t C2DmaBufAllocation::status() const {
236     return mInit;
237 }
238 
getAllocatorId() const239 C2Allocator::id_t C2DmaBufAllocation::getAllocatorId() const {
240     return mId;
241 }
242 
equals(const std::shared_ptr<C2LinearAllocation> & other) const243 bool C2DmaBufAllocation::equals(const std::shared_ptr<C2LinearAllocation>& other) const {
244     if (!other || other->getAllocatorId() != getAllocatorId()) {
245         return false;
246     }
247     // get user handle to compare objects
248     std::shared_ptr<C2DmaBufAllocation> otherAsBuf =
249             std::static_pointer_cast<C2DmaBufAllocation>(other);
250     return mHandle.bufferFd() == otherAsBuf->mHandle.bufferFd();
251 }
252 
handle() const253 const C2Handle* C2DmaBufAllocation::handle() const {
254     return &mHandle;
255 }
256 
~C2DmaBufAllocation()257 C2DmaBufAllocation::~C2DmaBufAllocation() {
258     Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
259     if (!mappings->empty()) {
260         ALOGD("Dangling mappings!");
261         for (const Mapping& map : *mappings) {
262             int err = munmap(map.addr, map.size);
263             if (err) ALOGD("munmap failed");
264         }
265     }
266     if (mInit == C2_OK) {
267         native_handle_close(&mHandle);
268     }
269 }
270 
C2DmaBufAllocation(BufferAllocator & alloc,size_t allocSize,size_t capacity,C2String heap_name,unsigned flags,C2Allocator::id_t id)271 C2DmaBufAllocation::C2DmaBufAllocation(BufferAllocator& alloc, size_t allocSize, size_t capacity,
272                                        C2String heap_name, unsigned flags, C2Allocator::id_t id)
273     : C2LinearAllocation(capacity), mHandle(-1, 0) {
274     int bufferFd = -1;
275     int ret = 0;
276 
277     bufferFd = alloc.Alloc(heap_name, allocSize, flags);
278     if (bufferFd < 0) {
279         ret = bufferFd;
280     }
281 
282     // this may be a non-working handle if bufferFd is negative
283     mHandle = C2HandleBuf(bufferFd, capacity);
284     mId = id;
285     mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
286 }
287 
C2DmaBufAllocation(size_t size,int shareFd,C2Allocator::id_t id)288 C2DmaBufAllocation::C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id)
289     : C2LinearAllocation(size), mHandle(-1, 0) {
290     mHandle = C2HandleBuf(shareFd, size);
291     mId = id;
292     mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(0));
293 }
294 
295 /* =========================== DMABUF ALLOCATOR =========================== */
C2DmaBufAllocator(id_t id)296 C2DmaBufAllocator::C2DmaBufAllocator(id_t id) : mInit(C2_OK) {
297     C2MemoryUsage minUsage = {0, 0};
298     C2MemoryUsage maxUsage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
299     Traits traits = {"android.allocator.dmabuf", id, LINEAR, minUsage, maxUsage};
300     mTraits = std::make_shared<Traits>(traits);
301 }
302 
getId() const303 C2Allocator::id_t C2DmaBufAllocator::getId() const {
304     std::lock_guard<std::mutex> lock(mUsageMapperLock);
305     return mTraits->id;
306 }
307 
getName() const308 C2String C2DmaBufAllocator::getName() const {
309     std::lock_guard<std::mutex> lock(mUsageMapperLock);
310     return mTraits->name;
311 }
312 
getTraits() const313 std::shared_ptr<const C2Allocator::Traits> C2DmaBufAllocator::getTraits() const {
314     std::lock_guard<std::mutex> lock(mUsageMapperLock);
315     return mTraits;
316 }
317 
setUsageMapper(const UsageMapperFn & mapper __unused,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)318 void C2DmaBufAllocator::setUsageMapper(const UsageMapperFn& mapper __unused, uint64_t minUsage,
319                                        uint64_t maxUsage, uint64_t blockSize) {
320     std::lock_guard<std::mutex> lock(mUsageMapperLock);
321     mUsageMapperCache.clear();
322     mUsageMapperLru.clear();
323     mUsageMapper = mapper;
324     Traits traits = {mTraits->name, mTraits->id, LINEAR, C2MemoryUsage(minUsage),
325                      C2MemoryUsage(maxUsage)};
326     mTraits = std::make_shared<Traits>(traits);
327     mBlockSize = blockSize;
328 }
329 
operator ()(const MapperKey & k) const330 std::size_t C2DmaBufAllocator::MapperKeyHash::operator()(const MapperKey& k) const {
331     return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
332 }
333 
mapUsage(C2MemoryUsage usage,size_t capacity,C2String * heap_name,unsigned * flags)334 c2_status_t C2DmaBufAllocator::mapUsage(C2MemoryUsage usage, size_t capacity, C2String* heap_name,
335                                         unsigned* flags) {
336     std::lock_guard<std::mutex> lock(mUsageMapperLock);
337     c2_status_t res = C2_OK;
338     // align capacity
339     capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
340     MapperKey key = std::make_pair(usage.expected, capacity);
341     auto entry = mUsageMapperCache.find(key);
342     if (entry == mUsageMapperCache.end()) {
343         if (mUsageMapper) {
344             res = mUsageMapper(usage, capacity, heap_name, flags);
345         } else {
346             if (C2DmaBufAllocator::system_uncached_supported() &&
347                 !(usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)))
348                 *heap_name = "system-uncached";
349             else
350                 *heap_name = "system";
351             *flags = 0;
352             res = C2_NO_INIT;
353         }
354         // add usage to cache
355         MapperValue value = std::make_tuple(*heap_name, *flags, res);
356         mUsageMapperLru.emplace_front(key, value);
357         mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
358         if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
359             // remove LRU entry
360             MapperKey lruKey = mUsageMapperLru.front().first;
361             mUsageMapperCache.erase(lruKey);
362             mUsageMapperLru.pop_back();
363         }
364     } else {
365         // move entry to MRU
366         mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
367         const MapperValue& value = entry->second->second;
368         std::tie(*heap_name, *flags, res) = value;
369     }
370     return res;
371 }
372 
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)373 c2_status_t C2DmaBufAllocator::newLinearAllocation(
374         uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation>* allocation) {
375     if (allocation == nullptr) {
376         return C2_BAD_VALUE;
377     }
378 
379     allocation->reset();
380     if (mInit != C2_OK) {
381         return mInit;
382     }
383 
384     C2String heap_name;
385     unsigned flags = 0;
386     c2_status_t ret = mapUsage(usage, capacity, &heap_name, &flags);
387     if (ret && ret != C2_NO_INIT) {
388         return ret;
389     }
390 
391     // TODO: should we pad before mapping usage?
392 
393     // NOTE: read this property directly from the property as this code has to run on
394     // Android Q, but the sysprop was only introduced in Android S.
395     static size_t sPadding =
396         base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
397     if (sPadding > SIZE_MAX - capacity) {
398         // size would overflow
399         ALOGD("dmabuf_alloc: size #%x cannot accommodate padding #%zx", capacity, sPadding);
400         return C2_NO_MEMORY;
401     }
402 
403     size_t allocSize = (size_t)capacity + sPadding;
404     // TODO: should we align allocation size to mBlockSize to reflect the true allocation size?
405     std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
406             mBufferAllocator, allocSize, allocSize - sPadding, heap_name, flags, getId());
407     ret = alloc->status();
408     if (ret == C2_OK) {
409         *allocation = alloc;
410     }
411     return ret;
412 }
413 
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)414 c2_status_t C2DmaBufAllocator::priorLinearAllocation(
415         const C2Handle* handle, std::shared_ptr<C2LinearAllocation>* allocation) {
416     *allocation = nullptr;
417     if (mInit != C2_OK) {
418         return mInit;
419     }
420 
421     if (!C2HandleBuf::IsValid(handle)) {
422         return C2_BAD_VALUE;
423     }
424 
425     // TODO: get capacity and validate it
426     const C2HandleBuf* h = static_cast<const C2HandleBuf*>(handle);
427     std::shared_ptr<C2DmaBufAllocation> alloc =
428             std::make_shared<C2DmaBufAllocation>(h->size(), h->bufferFd(), getId());
429     c2_status_t ret = alloc->status();
430     if (ret == C2_OK) {
431         *allocation = alloc;
432         native_handle_delete(
433                 const_cast<native_handle_t*>(reinterpret_cast<const native_handle_t*>(handle)));
434     }
435     return ret;
436 }
437 
438 // static
CheckHandle(const C2Handle * const o)439 bool C2DmaBufAllocator::CheckHandle(const C2Handle* const o) {
440     return C2HandleBuf::IsValid(o);
441 }
442 
443 }  // namespace android
444