• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "C2VdaBqBlockPool"
7 
8 #include <v4l2_codec2/plugin_store/C2VdaBqBlockPool.h>
9 
10 #include <errno.h>
11 #include <string.h>
12 
13 #include <chrono>
14 #include <mutex>
15 #include <set>
16 #include <sstream>
17 #include <thread>
18 
19 #include <C2AllocatorGralloc.h>
20 #include <C2BlockInternal.h>
21 #include <C2SurfaceSyncObj.h>
22 #include <android/hardware/graphics/bufferqueue/2.0/IProducerListener.h>
23 #include <base/callback.h>
24 #include <log/log.h>
25 #include <ui/BufferQueueDefs.h>
26 
27 #include <v4l2_codec2/plugin_store/DrmGrallocHelpers.h>
28 #include <v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h>
29 #include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
30 
31 namespace android {
32 namespace {
33 
34 // The wait time for acquire fence in milliseconds. The normal display is 60Hz,
35 // which period is 16ms. We choose 2x period as timeout.
36 constexpr int kFenceWaitTimeMs = 32;
37 
38 // The default maximum dequeued buffer count of IGBP. Currently we don't use
39 // this value to restrict the count of allocated buffers, so we choose a huge
40 // enough value here.
41 constexpr int kMaxDequeuedBufferCount = 32u;
42 
43 }  // namespace
44 
45 using namespace std::chrono_literals;
46 
47 // We use the value of DRM handle as the unique ID of the graphic buffers.
48 using unique_id_t = uint32_t;
49 // Type for IGBP slot index.
50 using slot_t = int32_t;
51 
52 using ::android::BufferQueueDefs::BUFFER_NEEDS_REALLOCATION;
53 using ::android::BufferQueueDefs::NUM_BUFFER_SLOTS;
54 using ::android::hardware::Return;
55 using HProducerListener = ::android::hardware::graphics::bufferqueue::V2_0::IProducerListener;
56 
asC2Error(status_t err)57 static c2_status_t asC2Error(status_t err) {
58     switch (err) {
59     case OK:
60         return C2_OK;
61     case NO_INIT:
62         return C2_NO_INIT;
63     case BAD_VALUE:
64         return C2_BAD_VALUE;
65     case TIMED_OUT:
66         return C2_TIMED_OUT;
67     case WOULD_BLOCK:
68         return C2_BLOCKING;
69     case NO_MEMORY:
70         return C2_NO_MEMORY;
71     }
72     return C2_CORRUPTED;
73 }
74 
75 // Convert GraphicBuffer to C2GraphicAllocation and wrap producer id and slot index.
ConvertGraphicBuffer2C2Allocation(sp<GraphicBuffer> graphicBuffer,const uint64_t igbpId,const slot_t slot,C2Allocator * const allocator)76 std::shared_ptr<C2GraphicAllocation> ConvertGraphicBuffer2C2Allocation(
77         sp<GraphicBuffer> graphicBuffer, const uint64_t igbpId, const slot_t slot,
78         C2Allocator* const allocator) {
79     ALOGV("%s(idbpId=0x%" PRIx64 ", slot=%d)", __func__, igbpId, slot);
80 
81     C2Handle* c2Handle = WrapNativeCodec2GrallocHandle(
82             graphicBuffer->handle, graphicBuffer->width, graphicBuffer->height,
83             graphicBuffer->format, graphicBuffer->usage, graphicBuffer->stride,
84             graphicBuffer->getGenerationNumber(), igbpId, slot);
85     if (!c2Handle) {
86         ALOGE("WrapNativeCodec2GrallocHandle() failed");
87         return nullptr;
88     }
89 
90     std::shared_ptr<C2GraphicAllocation> allocation;
91     const auto err = allocator->priorGraphicAllocation(c2Handle, &allocation);
92     if (err != C2_OK) {
93         ALOGE("C2Allocator::priorGraphicAllocation() failed: %d", err);
94         native_handle_close(c2Handle);
95         native_handle_delete(c2Handle);
96         return nullptr;
97     }
98 
99     return allocation;
100 }
101 
102 // This class is used to notify the listener when a certain event happens.
103 class EventNotifier : public virtual android::RefBase {
104 public:
105     class Listener {
106     public:
107         virtual ~Listener() = default;
108 
109         // Called by EventNotifier when a certain event happens.
110         virtual void onEventNotified() = 0;
111     };
112 
EventNotifier(std::weak_ptr<Listener> listener)113     explicit EventNotifier(std::weak_ptr<Listener> listener) : mListener(std::move(listener)) {}
114     virtual ~EventNotifier() = default;
115 
116 protected:
notify()117     void notify() {
118         ALOGV("%s()", __func__);
119         std::shared_ptr<Listener> listener = mListener.lock();
120         if (listener) {
121             listener->onEventNotified();
122         }
123     }
124 
125     std::weak_ptr<Listener> mListener;
126 };
127 
128 // Notifies the listener when the connected IGBP releases buffers.
129 class BufferReleasedNotifier : public EventNotifier, public HProducerListener {
130 public:
131     using EventNotifier::EventNotifier;
132     ~BufferReleasedNotifier() override = default;
133 
134     // HProducerListener implementation
onBuffersReleased(uint32_t count)135     Return<void> onBuffersReleased(uint32_t count) override {
136         ALOGV("%s(%u)", __func__, count);
137         if (count > 0) {
138             notify();
139         }
140         return {};
141     }
142 };
143 
144 // IGBP expects its user (e.g. C2VdaBqBlockPool) to keep the mapping from dequeued slot index to
145 // graphic buffers. Also, C2VdaBqBlockPool guaratees to fetch N fixed set of buffers with buffer
146 // identifier. So this class stores the mapping from slot index to buffers and the mapping from
147 // buffer unique ID to buffers.
148 // This class also implements functionalities for buffer migration when surface switching. Buffers
149 // are owned by either component (i.e. local buffers) or CCodec framework (i.e. remote buffers).
150 // When switching surface, the ccodec framework migrates remote buffers to the new surfaces. Then
151 // C2VdaBqBlockPool migrates local buffers. However, some buffers might be lost during migration.
152 // We assume that there are enough buffers migrated to the new surface to continue the playback.
153 // After |NUM_BUFFER_SLOTS| amount of buffers are dequeued from new surface, all buffers should
154 // be dequeued at least once. Then we treat the missing buffer as lost, and attach these bufers to
155 // the new surface.
156 class TrackedGraphicBuffers {
157 public:
158     using value_type = std::tuple<slot_t, unique_id_t, std::shared_ptr<C2GraphicAllocation>>;
159 
160     TrackedGraphicBuffers() = default;
161     ~TrackedGraphicBuffers() = default;
162 
reset()163     void reset() {
164         mSlotId2GraphicBuffer.clear();
165         mSlotId2PoolData.clear();
166         mAllocationsRegistered.clear();
167         mAllocationsToBeMigrated.clear();
168         mMigrateLostBufferCounter = 0;
169         mGenerationToBeMigrated = 0;
170     }
171 
registerUniqueId(unique_id_t uniqueId,std::shared_ptr<C2GraphicAllocation> allocation)172     void registerUniqueId(unique_id_t uniqueId, std::shared_ptr<C2GraphicAllocation> allocation) {
173         ALOGV("%s(uniqueId=%u)", __func__, uniqueId);
174         ALOG_ASSERT(allocation != nullptr);
175 
176         mAllocationsRegistered[uniqueId] = std::move(allocation);
177     }
178 
getRegisteredAllocation(unique_id_t uniqueId)179     std::shared_ptr<C2GraphicAllocation> getRegisteredAllocation(unique_id_t uniqueId) {
180         const auto iter = mAllocationsRegistered.find(uniqueId);
181         ALOG_ASSERT(iter != mAllocationsRegistered.end());
182 
183         return iter->second;
184     }
185 
hasUniqueId(unique_id_t uniqueId) const186     bool hasUniqueId(unique_id_t uniqueId) const {
187         return mAllocationsRegistered.find(uniqueId) != mAllocationsRegistered.end() ||
188                mAllocationsToBeMigrated.find(uniqueId) != mAllocationsToBeMigrated.end();
189     }
190 
updateSlotBuffer(slot_t slotId,unique_id_t uniqueId,sp<GraphicBuffer> slotBuffer)191     void updateSlotBuffer(slot_t slotId, unique_id_t uniqueId, sp<GraphicBuffer> slotBuffer) {
192         ALOGV("%s(slotId=%d)", __func__, slotId);
193         ALOG_ASSERT(slotBuffer != nullptr);
194 
195         mSlotId2GraphicBuffer[slotId] = std::make_pair(uniqueId, std::move(slotBuffer));
196     }
197 
getSlotBuffer(slot_t slotId) const198     std::pair<unique_id_t, sp<GraphicBuffer>> getSlotBuffer(slot_t slotId) const {
199         const auto iter = mSlotId2GraphicBuffer.find(slotId);
200         ALOG_ASSERT(iter != mSlotId2GraphicBuffer.end());
201 
202         return iter->second;
203     }
204 
hasSlotId(slot_t slotId) const205     bool hasSlotId(slot_t slotId) const {
206         return mSlotId2GraphicBuffer.find(slotId) != mSlotId2GraphicBuffer.end();
207     }
208 
updatePoolData(slot_t slotId,std::weak_ptr<C2BufferQueueBlockPoolData> poolData)209     void updatePoolData(slot_t slotId, std::weak_ptr<C2BufferQueueBlockPoolData> poolData) {
210         ALOGV("%s(slotId=%d)", __func__, slotId);
211         ALOG_ASSERT(hasSlotId(slotId));
212 
213         mSlotId2PoolData[slotId] = std::move(poolData);
214     }
215 
migrateLocalBuffers(H2BGraphicBufferProducer * const producer,uint64_t producerId,uint32_t generation,uint64_t usage)216     bool migrateLocalBuffers(H2BGraphicBufferProducer* const producer, uint64_t producerId,
217                              uint32_t generation, uint64_t usage) {
218         ALOGV("%s(producerId=%" PRIx64 ", generation=%u, usage=%" PRIx64 ")", __func__, producerId,
219               generation, usage);
220 
221         mGenerationToBeMigrated = generation;
222         mUsageToBeMigrated = usage;
223 
224         // Move all buffers to mAllocationsToBeMigrated.
225         for (auto& pair : mAllocationsRegistered) {
226             if (!mAllocationsToBeMigrated.insert(pair).second) {
227                 ALOGE("%s() duplicated uniqueId=%u", __func__, pair.first);
228                 return false;
229             }
230         }
231         mAllocationsRegistered.clear();
232 
233         ALOGV("%s(producerId=%" PRIx64 ", generation=%u, usage=%" PRIx64 ") before %s", __func__,
234               producerId, generation, usage, debugString().c_str());
235 
236         // Migrate local buffers.
237         std::map<slot_t, std::pair<unique_id_t, sp<GraphicBuffer>>> newSlotId2GraphicBuffer;
238         std::map<slot_t, std::weak_ptr<C2BufferQueueBlockPoolData>> newSlotId2PoolData;
239         for (const auto& pair : mSlotId2PoolData) {
240             auto oldSlot = pair.first;
241             auto poolData = pair.second.lock();
242             if (!poolData) {
243                 continue;
244             }
245 
246             unique_id_t uniqueId;
247             sp<GraphicBuffer> slotBuffer;
248             std::shared_ptr<C2SurfaceSyncMemory> syncMem;
249             std::tie(uniqueId, slotBuffer) = getSlotBuffer(oldSlot);
250             slot_t newSlot = poolData->migrate(producer->getBase(), mGenerationToBeMigrated,
251                                                mUsageToBeMigrated, producerId, slotBuffer,
252                                                slotBuffer->getGenerationNumber(),
253                                                syncMem);
254             if (newSlot < 0) {
255                 ALOGW("%s() Failed to migrate local buffer: uniqueId=%u, oldSlot=%d", __func__,
256                       uniqueId, oldSlot);
257                 continue;
258             }
259 
260             ALOGV("%s() migrated buffer: uniqueId=%u, oldSlot=%d, newSlot=%d", __func__, uniqueId,
261                   oldSlot, newSlot);
262             newSlotId2GraphicBuffer[newSlot] = std::make_pair(uniqueId, std::move(slotBuffer));
263             newSlotId2PoolData[newSlot] = std::move(poolData);
264 
265             if (!moveBufferToRegistered(uniqueId)) {
266                 ALOGE("%s() failed to move buffer to registered, uniqueId=%u", __func__, uniqueId);
267                 return false;
268             }
269         }
270         mSlotId2GraphicBuffer = std::move(newSlotId2GraphicBuffer);
271         mSlotId2PoolData = std::move(newSlotId2PoolData);
272 
273         // Choose a big enough number to ensure all buffer should be dequeued at least once.
274         mMigrateLostBufferCounter = NUM_BUFFER_SLOTS;
275         ALOGD("%s() migrated %zu local buffers", __func__, mAllocationsRegistered.size());
276         return true;
277     }
278 
needMigrateLostBuffers() const279     bool needMigrateLostBuffers() const {
280         return mMigrateLostBufferCounter == 0 && !mAllocationsToBeMigrated.empty();
281     }
282 
migrateLostBuffer(C2Allocator * const allocator,H2BGraphicBufferProducer * const producer,const uint64_t producerId,slot_t * newSlot)283     status_t migrateLostBuffer(C2Allocator* const allocator,
284                                H2BGraphicBufferProducer* const producer, const uint64_t producerId,
285                                slot_t* newSlot) {
286         ALOGV("%s() %s", __func__, debugString().c_str());
287 
288         if (!needMigrateLostBuffers()) {
289             return NO_INIT;
290         }
291 
292         auto iter = mAllocationsToBeMigrated.begin();
293         const unique_id_t uniqueId = iter->first;
294         const C2Handle* c2Handle = iter->second->handle();
295 
296         // Convert C2GraphicAllocation to GraphicBuffer, and update generation and usage.
297         uint32_t width, height, format, stride, igbpSlot, generation;
298         uint64_t usage, igbpId;
299         _UnwrapNativeCodec2GrallocMetadata(c2Handle, &width, &height, &format, &usage, &stride,
300                                            &generation, &igbpId, &igbpSlot);
301         native_handle_t* grallocHandle = UnwrapNativeCodec2GrallocHandle(c2Handle);
302         sp<GraphicBuffer> graphicBuffer =
303                 new GraphicBuffer(grallocHandle, GraphicBuffer::CLONE_HANDLE, width, height, format,
304                                   1, mUsageToBeMigrated, stride);
305         native_handle_delete(grallocHandle);
306         if (graphicBuffer->initCheck() != android::NO_ERROR) {
307             ALOGE("Failed to create GraphicBuffer: %d", graphicBuffer->initCheck());
308             return false;
309         }
310         graphicBuffer->setGenerationNumber(mGenerationToBeMigrated);
311 
312         // Attach GraphicBuffer to producer.
313         const auto attachStatus = producer->attachBuffer(graphicBuffer, newSlot);
314         if (attachStatus == TIMED_OUT || attachStatus == INVALID_OPERATION) {
315             ALOGV("%s(): No free slot yet.", __func__);
316             return TIMED_OUT;
317         }
318         if (attachStatus != OK) {
319             ALOGE("%s(): Failed to attach buffer to new producer: %d", __func__, attachStatus);
320             return attachStatus;
321         }
322         ALOGD("%s(), migrated lost buffer uniqueId=%u to slot=%d", __func__, uniqueId, *newSlot);
323         updateSlotBuffer(*newSlot, uniqueId, graphicBuffer);
324 
325         // Wrap the new GraphicBuffer to C2GraphicAllocation and register it.
326         std::shared_ptr<C2GraphicAllocation> allocation =
327                 ConvertGraphicBuffer2C2Allocation(graphicBuffer, producerId, *newSlot, allocator);
328         if (!allocation) {
329             return UNKNOWN_ERROR;
330         }
331         registerUniqueId(uniqueId, std::move(allocation));
332 
333         // Note: C2ArcProtectedGraphicAllocator releases the protected buffers if all the
334         // corrresponding C2GraphicAllocations are released. To prevent the protected buffer is
335         // released and then allocated again, we release the old C2GraphicAllocation after the new
336         // one has been created.
337         mAllocationsToBeMigrated.erase(iter);
338 
339         return OK;
340     }
341 
onBufferDequeued(slot_t slotId)342     void onBufferDequeued(slot_t slotId) {
343         ALOGV("%s(slotId=%d)", __func__, slotId);
344         unique_id_t uniqueId;
345         std::tie(uniqueId, std::ignore) = getSlotBuffer(slotId);
346 
347         moveBufferToRegistered(uniqueId);
348         if (mMigrateLostBufferCounter > 0) {
349             --mMigrateLostBufferCounter;
350         }
351     }
352 
size() const353     size_t size() const { return mAllocationsRegistered.size() + mAllocationsToBeMigrated.size(); }
354 
debugString() const355     std::string debugString() const {
356         std::stringstream ss;
357         ss << "tracked size: " << size() << std::endl;
358         ss << "  registered uniqueIds: ";
359         for (const auto& pair : mAllocationsRegistered) {
360             ss << pair.first << ", ";
361         }
362         ss << std::endl;
363         ss << "  to-be-migrated uniqueIds: ";
364         for (const auto& pair : mAllocationsToBeMigrated) {
365             ss << pair.first << ", ";
366         }
367         ss << std::endl;
368         ss << "  Count down for lost buffer migration: " << mMigrateLostBufferCounter;
369         return ss.str();
370     }
371 
372 private:
moveBufferToRegistered(unique_id_t uniqueId)373     bool moveBufferToRegistered(unique_id_t uniqueId) {
374         ALOGV("%s(uniqueId=%u)", __func__, uniqueId);
375         auto iter = mAllocationsToBeMigrated.find(uniqueId);
376         if (iter == mAllocationsToBeMigrated.end()) {
377             return false;
378         }
379         if (!mAllocationsRegistered.insert(*iter).second) {
380             ALOGE("%s() duplicated uniqueId=%u", __func__, uniqueId);
381             return false;
382         }
383         mAllocationsToBeMigrated.erase(iter);
384 
385         return true;
386     }
387 
388     // Mapping from IGBP slots to the corresponding graphic buffers.
389     std::map<slot_t, std::pair<unique_id_t, sp<GraphicBuffer>>> mSlotId2GraphicBuffer;
390 
391     // Mapping from IGBP slots to the corresponding pool data.
392     std::map<slot_t, std::weak_ptr<C2BufferQueueBlockPoolData>> mSlotId2PoolData;
393 
394     // Track the buffers registered at the current producer.
395     std::map<unique_id_t, std::shared_ptr<C2GraphicAllocation>> mAllocationsRegistered;
396 
397     // Track the buffers that should be migrated to the current producer.
398     std::map<unique_id_t, std::shared_ptr<C2GraphicAllocation>> mAllocationsToBeMigrated;
399 
400     // The counter for migrating lost buffers. Count down when a buffer is
401     // dequeued from IGBP. When it goes to 0, then we treat the remaining
402     // buffers at |mAllocationsToBeMigrated| lost, and migrate them to
403     // current IGBP.
404     size_t mMigrateLostBufferCounter = 0;
405 
406     // The generation and usage of the current IGBP, used to migrate buffers.
407     uint32_t mGenerationToBeMigrated = 0;
408     uint64_t mUsageToBeMigrated = 0;
409 };
410 
411 class DrmHandleManager {
412 public:
DrmHandleManager()413     DrmHandleManager() { mRenderFd = openRenderFd(); }
414 
~DrmHandleManager()415     ~DrmHandleManager() {
416         closeAllHandles();
417         if (mRenderFd) {
418             close(*mRenderFd);
419         }
420     }
421 
getHandle(int primeFd)422     std::optional<unique_id_t> getHandle(int primeFd) {
423         if (!mRenderFd) {
424             return std::nullopt;
425         }
426 
427         std::optional<unique_id_t> handle = getDrmHandle(*mRenderFd, primeFd);
428         // Defer closing the handle until we don't need the buffer to keep the returned DRM handle
429         // the same.
430         if (handle) {
431             mHandles.insert(*handle);
432         }
433         return handle;
434     }
435 
closeAllHandles()436     void closeAllHandles() {
437         if (!mRenderFd) {
438             return;
439         }
440 
441         for (const unique_id_t& handle : mHandles) {
442             closeDrmHandle(*mRenderFd, handle);
443         }
444         mHandles.clear();
445     }
446 
447 private:
448     std::optional<int> mRenderFd;
449     std::set<unique_id_t> mHandles;
450 };
451 
452 class C2VdaBqBlockPool::Impl : public std::enable_shared_from_this<C2VdaBqBlockPool::Impl>,
453                                public EventNotifier::Listener {
454 public:
455     using HGraphicBufferProducer = C2VdaBqBlockPool::HGraphicBufferProducer;
456 
457     explicit Impl(const std::shared_ptr<C2Allocator>& allocator);
458     // TODO: should we detach buffers on producer if any on destructor?
459     ~Impl() = default;
460 
461     // EventNotifier::Listener implementation.
462     void onEventNotified() override;
463 
464     c2_status_t fetchGraphicBlock(uint32_t width, uint32_t height, uint32_t format,
465                                   C2MemoryUsage usage,
466                                   std::shared_ptr<C2GraphicBlock>* block /* nonnull */);
467     void setRenderCallback(const C2BufferQueueBlockPool::OnRenderCallback& renderCallback);
468     void configureProducer(const sp<HGraphicBufferProducer>& producer);
469     c2_status_t requestNewBufferSet(int32_t bufferCount, uint32_t width, uint32_t height,
470                                     uint32_t format, C2MemoryUsage usage);
471     bool setNotifyBlockAvailableCb(::base::OnceClosure cb);
472     std::optional<unique_id_t> getBufferIdFromGraphicBlock(const C2Block2D& block);
473 
474 private:
475     // Requested buffer formats.
476     struct BufferFormat {
BufferFormatandroid::C2VdaBqBlockPool::Impl::BufferFormat477         BufferFormat(uint32_t width, uint32_t height, uint32_t pixelFormat,
478                      C2AndroidMemoryUsage androidUsage)
479               : mWidth(width), mHeight(height), mPixelFormat(pixelFormat), mUsage(androidUsage) {}
480         BufferFormat() = default;
481 
482         uint32_t mWidth = 0;
483         uint32_t mHeight = 0;
484         uint32_t mPixelFormat = 0;
485         C2AndroidMemoryUsage mUsage = C2MemoryUsage(0);
486     };
487 
488     status_t getFreeSlotLocked(uint32_t width, uint32_t height, uint32_t format,
489                                C2MemoryUsage usage, slot_t* slot, sp<Fence>* fence);
490 
491     // Queries the generation and usage flags from the given producer by dequeuing and requesting a
492     // buffer (the buffer is then detached and freed).
493     status_t queryGenerationAndUsageLocked(uint32_t width, uint32_t height, uint32_t pixelFormat,
494                                            C2AndroidMemoryUsage androidUsage, uint32_t* generation,
495                                            uint64_t* usage);
496 
497     // Wait the fence. If any error occurs, cancel the buffer back to the producer.
498     status_t waitFence(slot_t slot, sp<Fence> fence);
499 
500     // Call mProducer's allowAllocation if needed.
501     status_t allowAllocation(bool allow);
502 
503     const std::shared_ptr<C2Allocator> mAllocator;
504 
505     std::unique_ptr<H2BGraphicBufferProducer> mProducer;
506     uint64_t mProducerId = 0;
507     bool mAllowAllocation = false;
508 
509     C2BufferQueueBlockPool::OnRenderCallback mRenderCallback;
510 
511     // Function mutex to lock at the start of each API function call for protecting the
512     // synchronization of all member variables.
513     std::mutex mMutex;
514 
515     TrackedGraphicBuffers mTrackedGraphicBuffers;
516 
517     // We treat DRM handle as uniqueId of GraphicBuffer.
518     DrmHandleManager mDrmHandleManager;
519 
520     // Number of buffers requested on requestNewBufferSet() call.
521     size_t mBuffersRequested = 0u;
522     // Currently requested buffer formats.
523     BufferFormat mBufferFormat;
524 
525     // Listener for buffer release events.
526     sp<EventNotifier> mFetchBufferNotifier;
527 
528     std::mutex mBufferReleaseMutex;
529     // Set to true when the buffer release event is triggered after dequeueing buffer from IGBP
530     // times out. Reset when fetching new slot times out, or |mNotifyBlockAvailableCb| is executed.
531     bool mBufferReleasedAfterTimedOut GUARDED_BY(mBufferReleaseMutex) = false;
532     // The callback to notify the caller the buffer is available.
533     ::base::OnceClosure mNotifyBlockAvailableCb GUARDED_BY(mBufferReleaseMutex);
534 
535     // Set to true if any error occurs at previous configureProducer().
536     bool mConfigureProducerError = false;
537 };
538 
Impl(const std::shared_ptr<C2Allocator> & allocator)539 C2VdaBqBlockPool::Impl::Impl(const std::shared_ptr<C2Allocator>& allocator)
540       : mAllocator(allocator) {}
541 
fetchGraphicBlock(uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage,std::shared_ptr<C2GraphicBlock> * block)542 c2_status_t C2VdaBqBlockPool::Impl::fetchGraphicBlock(
543         uint32_t width, uint32_t height, uint32_t format, C2MemoryUsage usage,
544         std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
545     ALOGV("%s(%ux%u)", __func__, width, height);
546     std::lock_guard<std::mutex> lock(mMutex);
547 
548     if (width != mBufferFormat.mWidth || height != mBufferFormat.mHeight ||
549         format != mBufferFormat.mPixelFormat || usage.expected != mBufferFormat.mUsage.expected) {
550         ALOGE("%s(): buffer format (%ux%u, format=%u, usage=%" PRIx64
551               ") is different from requested format (%ux%u, format=%u, usage=%" PRIx64 ")",
552               __func__, width, height, format, usage.expected, mBufferFormat.mWidth,
553               mBufferFormat.mHeight, mBufferFormat.mPixelFormat, mBufferFormat.mUsage.expected);
554         return C2_BAD_VALUE;
555     }
556     if (mConfigureProducerError || !mProducer) {
557         ALOGE("%s(): error occurred at previous configureProducer()", __func__);
558         return C2_CORRUPTED;
559     }
560 
561     slot_t slot;
562     sp<Fence> fence = new Fence();
563     const auto status = getFreeSlotLocked(width, height, format, usage, &slot, &fence);
564     if (status != OK) {
565         return asC2Error(status);
566     }
567 
568     unique_id_t uniqueId;
569     sp<GraphicBuffer> slotBuffer;
570     std::tie(uniqueId, slotBuffer) = mTrackedGraphicBuffers.getSlotBuffer(slot);
571     ALOGV("%s(): dequeued slot=%d uniqueId=%u", __func__, slot, uniqueId);
572 
573     if (!mTrackedGraphicBuffers.hasUniqueId(uniqueId)) {
574         if (mTrackedGraphicBuffers.size() >= mBuffersRequested) {
575             // The dequeued slot has a pre-allocated buffer whose size and format is as same as
576             // currently requested (but was not dequeued during allocation cycle). Just detach it to
577             // free this slot. And try dequeueBuffer again.
578             ALOGD("dequeued a new slot %d but already allocated enough buffers. Detach it.", slot);
579 
580             if (mProducer->detachBuffer(slot) != OK) {
581                 return C2_CORRUPTED;
582             }
583 
584             const auto allocationStatus = allowAllocation(false);
585             if (allocationStatus != OK) {
586                 return asC2Error(allocationStatus);
587             }
588             return C2_TIMED_OUT;
589         }
590 
591         std::shared_ptr<C2GraphicAllocation> allocation =
592                 ConvertGraphicBuffer2C2Allocation(slotBuffer, mProducerId, slot, mAllocator.get());
593         if (!allocation) {
594             return C2_CORRUPTED;
595         }
596         mTrackedGraphicBuffers.registerUniqueId(uniqueId, std::move(allocation));
597 
598         ALOGV("%s(): mTrackedGraphicBuffers.size=%zu", __func__, mTrackedGraphicBuffers.size());
599         if (mTrackedGraphicBuffers.size() == mBuffersRequested) {
600             ALOGV("Tracked IGBP slots: %s", mTrackedGraphicBuffers.debugString().c_str());
601             // Already allocated enough buffers, set allowAllocation to false to restrict the
602             // eligible slots to allocated ones for future dequeue.
603             const auto allocationStatus = allowAllocation(false);
604             if (allocationStatus != OK) {
605                 return asC2Error(allocationStatus);
606             }
607         }
608     }
609 
610     std::shared_ptr<C2SurfaceSyncMemory> syncMem;
611     std::shared_ptr<C2GraphicAllocation> allocation =
612             mTrackedGraphicBuffers.getRegisteredAllocation(uniqueId);
613     auto poolData = std::make_shared<C2BufferQueueBlockPoolData>(
614             slotBuffer->getGenerationNumber(), mProducerId, slot,
615             mProducer->getBase(), syncMem, 0);
616     mTrackedGraphicBuffers.updatePoolData(slot, poolData);
617     *block = _C2BlockFactory::CreateGraphicBlock(std::move(allocation), std::move(poolData));
618     if (*block == nullptr) {
619         ALOGE("failed to create GraphicBlock: no memory");
620         return C2_NO_MEMORY;
621     }
622 
623     // Wait for acquire fence at the last point of returning buffer.
624     if (fence) {
625         const auto fenceStatus = waitFence(slot, fence);
626         if (fenceStatus != OK) {
627             return asC2Error(fenceStatus);
628         }
629 
630         if (mRenderCallback) {
631             nsecs_t signalTime = fence->getSignalTime();
632             if (signalTime >= 0 && signalTime < INT64_MAX) {
633                 mRenderCallback(mProducerId, slot, signalTime);
634             } else {
635                 ALOGV("got fence signal time of %" PRId64 " nsec", signalTime);
636             }
637         }
638     }
639 
640     return C2_OK;
641 }
642 
getFreeSlotLocked(uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage,slot_t * slot,sp<Fence> * fence)643 status_t C2VdaBqBlockPool::Impl::getFreeSlotLocked(uint32_t width, uint32_t height, uint32_t format,
644                                                    C2MemoryUsage usage, slot_t* slot,
645                                                    sp<Fence>* fence) {
646     if (mTrackedGraphicBuffers.needMigrateLostBuffers()) {
647         slot_t newSlot;
648         if (mTrackedGraphicBuffers.migrateLostBuffer(mAllocator.get(), mProducer.get(), mProducerId,
649                                                      &newSlot) == OK) {
650             ALOGV("%s(): migrated buffer: slot=%d", __func__, newSlot);
651             *slot = newSlot;
652             return OK;
653         }
654     }
655 
656     // Dequeue a free slot from IGBP.
657     ALOGV("%s(): try to dequeue free slot from IGBP.", __func__);
658     const auto dequeueStatus = mProducer->dequeueBuffer(width, height, format, usage, slot, fence);
659     if (dequeueStatus == TIMED_OUT) {
660         std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
661         mBufferReleasedAfterTimedOut = false;
662     }
663     if (dequeueStatus != OK && dequeueStatus != BUFFER_NEEDS_REALLOCATION) {
664         return dequeueStatus;
665     }
666 
667     // Call requestBuffer to update GraphicBuffer for the slot and obtain the reference.
668     if (!mTrackedGraphicBuffers.hasSlotId(*slot) || dequeueStatus == BUFFER_NEEDS_REALLOCATION) {
669         sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
670         const auto requestStatus = mProducer->requestBuffer(*slot, &slotBuffer);
671         if (requestStatus != OK) {
672             mProducer->cancelBuffer(*slot, *fence);
673             return requestStatus;
674         }
675 
676         const auto uniqueId = mDrmHandleManager.getHandle(slotBuffer->handle->data[0]);
677         if (!uniqueId) {
678             ALOGE("%s(): failed to get uniqueId of GraphicBuffer from slot=%d", __func__, *slot);
679             return UNKNOWN_ERROR;
680         }
681         mTrackedGraphicBuffers.updateSlotBuffer(*slot, *uniqueId, std::move(slotBuffer));
682     }
683 
684     ALOGV("%s(%ux%u): dequeued slot=%d", __func__, mBufferFormat.mWidth, mBufferFormat.mHeight,
685           *slot);
686     mTrackedGraphicBuffers.onBufferDequeued(*slot);
687     return OK;
688 }
689 
onEventNotified()690 void C2VdaBqBlockPool::Impl::onEventNotified() {
691     ALOGV("%s()", __func__);
692     ::base::OnceClosure outputCb;
693     {
694         std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
695 
696         mBufferReleasedAfterTimedOut = true;
697         if (mNotifyBlockAvailableCb) {
698             mBufferReleasedAfterTimedOut = false;
699             outputCb = std::move(mNotifyBlockAvailableCb);
700         }
701     }
702 
703     // Calling the callback outside the lock to avoid the deadlock.
704     if (outputCb) {
705         std::move(outputCb).Run();
706     }
707 }
708 
queryGenerationAndUsageLocked(uint32_t width,uint32_t height,uint32_t pixelFormat,C2AndroidMemoryUsage androidUsage,uint32_t * generation,uint64_t * usage)709 status_t C2VdaBqBlockPool::Impl::queryGenerationAndUsageLocked(uint32_t width, uint32_t height,
710                                                                uint32_t pixelFormat,
711                                                                C2AndroidMemoryUsage androidUsage,
712                                                                uint32_t* generation,
713                                                                uint64_t* usage) {
714     ALOGV("%s()", __func__);
715 
716     sp<Fence> fence = new Fence();
717     slot_t slot;
718     const auto dequeueStatus =
719             mProducer->dequeueBuffer(width, height, pixelFormat, androidUsage, &slot, &fence);
720     if (dequeueStatus != OK && dequeueStatus != BUFFER_NEEDS_REALLOCATION) {
721         return dequeueStatus;
722     }
723 
724     // Call requestBuffer to allocate buffer for the slot and obtain the reference.
725     // Get generation number here.
726     sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
727     const auto requestStatus = mProducer->requestBuffer(slot, &slotBuffer);
728 
729     // Detach and delete the temporary buffer.
730     const auto detachStatus = mProducer->detachBuffer(slot);
731     if (detachStatus != OK) {
732         return detachStatus;
733     }
734 
735     // Check requestBuffer return flag.
736     if (requestStatus != OK) {
737         return requestStatus;
738     }
739 
740     // Get generation number and usage from the slot buffer.
741     *usage = slotBuffer->getUsage();
742     *generation = slotBuffer->getGenerationNumber();
743     ALOGV("Obtained from temp buffer: generation = %u, usage = %" PRIu64 "", *generation, *usage);
744     return OK;
745 }
746 
waitFence(slot_t slot,sp<Fence> fence)747 status_t C2VdaBqBlockPool::Impl::waitFence(slot_t slot, sp<Fence> fence) {
748     const auto fenceStatus = fence->wait(kFenceWaitTimeMs);
749     if (fenceStatus == OK) {
750         return OK;
751     }
752 
753     const auto cancelStatus = mProducer->cancelBuffer(slot, fence);
754     if (cancelStatus != OK) {
755         ALOGE("%s(): failed to cancelBuffer(slot=%d)", __func__, slot);
756         return cancelStatus;
757     }
758 
759     if (fenceStatus == -ETIME) {  // fence wait timed out
760         ALOGV("%s(): buffer (slot=%d) fence wait timed out", __func__, slot);
761         return TIMED_OUT;
762     }
763     ALOGE("buffer fence wait error: %d", fenceStatus);
764     return fenceStatus;
765 }
766 
setRenderCallback(const C2BufferQueueBlockPool::OnRenderCallback & renderCallback)767 void C2VdaBqBlockPool::Impl::setRenderCallback(
768         const C2BufferQueueBlockPool::OnRenderCallback& renderCallback) {
769     ALOGV("setRenderCallback");
770     std::lock_guard<std::mutex> lock(mMutex);
771     mRenderCallback = renderCallback;
772 }
773 
requestNewBufferSet(int32_t bufferCount,uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage)774 c2_status_t C2VdaBqBlockPool::Impl::requestNewBufferSet(int32_t bufferCount, uint32_t width,
775                                                         uint32_t height, uint32_t format,
776                                                         C2MemoryUsage usage) {
777     ALOGV("%s(bufferCount=%d, size=%ux%u, format=0x%x, usage=%" PRIu64 ")", __func__, bufferCount,
778           width, height, format, usage.expected);
779 
780     if (bufferCount <= 0) {
781         ALOGE("Invalid requested buffer count = %d", bufferCount);
782         return C2_BAD_VALUE;
783     }
784 
785     std::lock_guard<std::mutex> lock(mMutex);
786     if (!mProducer) {
787         ALOGD("No HGraphicBufferProducer is configured...");
788         return C2_NO_INIT;
789     }
790     if (mBuffersRequested == static_cast<size_t>(bufferCount) && mBufferFormat.mWidth == width &&
791         mBufferFormat.mHeight == height && mBufferFormat.mPixelFormat == format &&
792         mBufferFormat.mUsage.expected == usage.expected) {
793         ALOGD("%s() Request the same format and amount of buffers, skip", __func__);
794         return C2_OK;
795     }
796 
797     const auto status = allowAllocation(true);
798     if (status != OK) {
799         return asC2Error(status);
800     }
801 
802     // Release all remained slot buffer references here. CCodec should either cancel or queue its
803     // owned buffers from this set before the next resolution change.
804     mTrackedGraphicBuffers.reset();
805     mDrmHandleManager.closeAllHandles();
806 
807     mBuffersRequested = static_cast<size_t>(bufferCount);
808 
809     // Store buffer formats for future usage.
810     mBufferFormat = BufferFormat(width, height, format, C2AndroidMemoryUsage(usage));
811 
812     return C2_OK;
813 }
814 
configureProducer(const sp<HGraphicBufferProducer> & producer)815 void C2VdaBqBlockPool::Impl::configureProducer(const sp<HGraphicBufferProducer>& producer) {
816     ALOGV("%s(producer=%p)", __func__, producer.get());
817 
818     std::lock_guard<std::mutex> lock(mMutex);
819     if (producer == nullptr) {
820         ALOGI("input producer is nullptr...");
821 
822         mProducer = nullptr;
823         mProducerId = 0;
824         mTrackedGraphicBuffers.reset();
825         mDrmHandleManager.closeAllHandles();
826         return;
827     }
828 
829     auto newProducer = std::make_unique<H2BGraphicBufferProducer>(producer);
830     uint64_t newProducerId;
831     if (newProducer->getUniqueId(&newProducerId) != OK) {
832         ALOGE("%s(): failed to get IGBP ID", __func__);
833         mConfigureProducerError = true;
834         return;
835     }
836     if (newProducerId == mProducerId) {
837         ALOGI("%s(): configure the same producer, ignore", __func__);
838         return;
839     }
840 
841     ALOGI("Producer (Surface) is going to switch... ( 0x%" PRIx64 " -> 0x%" PRIx64 " )",
842           mProducerId, newProducerId);
843     mProducer = std::move(newProducer);
844     mProducerId = newProducerId;
845     mConfigureProducerError = false;
846     mAllowAllocation = false;
847 
848     // Set allowAllocation to new producer.
849     if (allowAllocation(true) != OK) {
850         ALOGE("%s(): failed to allowAllocation(true)", __func__);
851         mConfigureProducerError = true;
852         return;
853     }
854     if (mProducer->setDequeueTimeout(0) != OK) {
855         ALOGE("%s(): failed to setDequeueTimeout(0)", __func__);
856         mConfigureProducerError = true;
857         return;
858     }
859     if (mProducer->setMaxDequeuedBufferCount(kMaxDequeuedBufferCount) != OK) {
860         ALOGE("%s(): failed to setMaxDequeuedBufferCount(%d)", __func__, kMaxDequeuedBufferCount);
861         mConfigureProducerError = true;
862         return;
863     }
864 
865     // Migrate existing buffers to the new producer.
866     if (mTrackedGraphicBuffers.size() > 0) {
867         uint32_t newGeneration = 0;
868         uint64_t newUsage = 0;
869         const status_t err = queryGenerationAndUsageLocked(
870                 mBufferFormat.mWidth, mBufferFormat.mHeight, mBufferFormat.mPixelFormat,
871                 mBufferFormat.mUsage, &newGeneration, &newUsage);
872         if (err != OK) {
873             ALOGE("failed to query generation and usage: %d", err);
874             mConfigureProducerError = true;
875             return;
876         }
877 
878         if (!mTrackedGraphicBuffers.migrateLocalBuffers(mProducer.get(), mProducerId, newGeneration,
879                                                         newUsage)) {
880             ALOGE("%s(): failed to migrateLocalBuffers()", __func__);
881             mConfigureProducerError = true;
882             return;
883         }
884 
885         if (mTrackedGraphicBuffers.size() == mBuffersRequested) {
886             if (allowAllocation(false) != OK) {
887                 ALOGE("%s(): failed to allowAllocation(false)", __func__);
888                 mConfigureProducerError = true;
889                 return;
890             }
891         }
892     }
893 
894     // hack(b/146409777): Try to connect ARC-specific listener first.
895     sp<BufferReleasedNotifier> listener = new BufferReleasedNotifier(weak_from_this());
896     if (mProducer->connect(listener, 'ARC\0', false) == OK) {
897         ALOGI("connected to ARC-specific IGBP listener.");
898         mFetchBufferNotifier = listener;
899     }
900 
901     // There might be free buffers at the new producer, notify the client if needed.
902     onEventNotified();
903 }
904 
setNotifyBlockAvailableCb(::base::OnceClosure cb)905 bool C2VdaBqBlockPool::Impl::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
906     ALOGV("%s()", __func__);
907     if (mFetchBufferNotifier == nullptr) {
908         return false;
909     }
910 
911     ::base::OnceClosure outputCb;
912     {
913         std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
914 
915         // If there is any buffer released after dequeueBuffer() timed out, then we could notify the
916         // caller directly.
917         if (mBufferReleasedAfterTimedOut) {
918             mBufferReleasedAfterTimedOut = false;
919             outputCb = std::move(cb);
920         } else {
921             mNotifyBlockAvailableCb = std::move(cb);
922         }
923     }
924 
925     // Calling the callback outside the lock to avoid the deadlock.
926     if (outputCb) {
927         std::move(outputCb).Run();
928     }
929     return true;
930 }
931 
getBufferIdFromGraphicBlock(const C2Block2D & block)932 std::optional<unique_id_t> C2VdaBqBlockPool::Impl::getBufferIdFromGraphicBlock(
933         const C2Block2D& block) {
934     return mDrmHandleManager.getHandle(block.handle()->data[0]);
935 }
936 
allowAllocation(bool allow)937 status_t C2VdaBqBlockPool::Impl::allowAllocation(bool allow) {
938     ALOGV("%s(%d)", __func__, allow);
939 
940     if (!mProducer) {
941         ALOGW("%s() mProducer is not initiailzed", __func__);
942         return NO_INIT;
943     }
944     if (mAllowAllocation == allow) {
945         return OK;
946     }
947 
948     const auto status = mProducer->allowAllocation(allow);
949     if (status == OK) {
950         mAllowAllocation = allow;
951     }
952     return status;
953 }
954 
C2VdaBqBlockPool(const std::shared_ptr<C2Allocator> & allocator,const local_id_t localId)955 C2VdaBqBlockPool::C2VdaBqBlockPool(const std::shared_ptr<C2Allocator>& allocator,
956                                    const local_id_t localId)
957       : C2BufferQueueBlockPool(allocator, localId), mLocalId(localId), mImpl(new Impl(allocator)) {}
958 
fetchGraphicBlock(uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage,std::shared_ptr<C2GraphicBlock> * block)959 c2_status_t C2VdaBqBlockPool::fetchGraphicBlock(
960         uint32_t width, uint32_t height, uint32_t format, C2MemoryUsage usage,
961         std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
962     if (mImpl) {
963         return mImpl->fetchGraphicBlock(width, height, format, usage, block);
964     }
965     return C2_NO_INIT;
966 }
967 
setRenderCallback(const C2BufferQueueBlockPool::OnRenderCallback & renderCallback)968 void C2VdaBqBlockPool::setRenderCallback(
969         const C2BufferQueueBlockPool::OnRenderCallback& renderCallback) {
970     if (mImpl) {
971         mImpl->setRenderCallback(renderCallback);
972     }
973 }
974 
requestNewBufferSet(int32_t bufferCount,uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage)975 c2_status_t C2VdaBqBlockPool::requestNewBufferSet(int32_t bufferCount, uint32_t width,
976                                                   uint32_t height, uint32_t format,
977                                                   C2MemoryUsage usage) {
978     if (mImpl) {
979         return mImpl->requestNewBufferSet(bufferCount, width, height, format, usage);
980     }
981     return C2_NO_INIT;
982 }
983 
configureProducer(const sp<HGraphicBufferProducer> & producer)984 void C2VdaBqBlockPool::configureProducer(const sp<HGraphicBufferProducer>& producer) {
985     if (mImpl) {
986         mImpl->configureProducer(producer);
987     }
988 }
989 
setNotifyBlockAvailableCb(::base::OnceClosure cb)990 bool C2VdaBqBlockPool::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
991     if (mImpl) {
992         return mImpl->setNotifyBlockAvailableCb(std::move(cb));
993     }
994     return false;
995 }
996 
getBufferIdFromGraphicBlock(const C2Block2D & block)997 std::optional<unique_id_t> C2VdaBqBlockPool::getBufferIdFromGraphicBlock(const C2Block2D& block) {
998     if (mImpl) {
999         return mImpl->getBufferIdFromGraphicBlock(block);
1000     }
1001     return std::nullopt;
1002 }
1003 
1004 }  // namespace android
1005