1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "C2VdaBqBlockPool"
7
8 #include <v4l2_codec2/plugin_store/C2VdaBqBlockPool.h>
9
10 #include <errno.h>
11 #include <string.h>
12
13 #include <chrono>
14 #include <mutex>
15 #include <set>
16 #include <sstream>
17 #include <thread>
18
19 #include <C2AllocatorGralloc.h>
20 #include <C2BlockInternal.h>
21 #include <C2SurfaceSyncObj.h>
22 #include <android/hardware/graphics/bufferqueue/2.0/IProducerListener.h>
23 #include <base/callback.h>
24 #include <log/log.h>
25 #include <ui/BufferQueueDefs.h>
26
27 #include <v4l2_codec2/plugin_store/DmabufHelpers.h>
28 #include <v4l2_codec2/plugin_store/H2BGraphicBufferProducer.h>
29 #include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
30
31 namespace android {
32 namespace {
33
34 // The wait time for acquire fence in milliseconds. The normal display is 60Hz,
35 // which period is 16ms. We choose 2x period as timeout.
36 constexpr int kFenceWaitTimeMs = 32;
37
38 // The default maximum dequeued buffer count of IGBP. Currently we don't use
39 // this value to restrict the count of allocated buffers, so we choose a huge
40 // enough value here.
41 constexpr int kMaxDequeuedBufferCount = 32u;
42
43 } // namespace
44
45 using namespace std::chrono_literals;
46
47 // Type for IGBP slot index.
48 using slot_t = int32_t;
49
50 using ::android::BufferQueueDefs::BUFFER_NEEDS_REALLOCATION;
51 using ::android::BufferQueueDefs::NUM_BUFFER_SLOTS;
52 using ::android::hardware::Return;
53 using HProducerListener = ::android::hardware::graphics::bufferqueue::V2_0::IProducerListener;
54
asC2Error(status_t err)55 static c2_status_t asC2Error(status_t err) {
56 switch (err) {
57 case OK:
58 return C2_OK;
59 case NO_INIT:
60 return C2_NO_INIT;
61 case BAD_VALUE:
62 return C2_BAD_VALUE;
63 case TIMED_OUT:
64 return C2_TIMED_OUT;
65 case WOULD_BLOCK:
66 return C2_BLOCKING;
67 case NO_MEMORY:
68 return C2_NO_MEMORY;
69 }
70 return C2_CORRUPTED;
71 }
72
73 // Convert GraphicBuffer to C2GraphicAllocation and wrap producer id and slot index.
ConvertGraphicBuffer2C2Allocation(sp<GraphicBuffer> graphicBuffer,const uint64_t igbpId,const slot_t slot,C2Allocator * const allocator)74 std::shared_ptr<C2GraphicAllocation> ConvertGraphicBuffer2C2Allocation(
75 sp<GraphicBuffer> graphicBuffer, const uint64_t igbpId, const slot_t slot,
76 C2Allocator* const allocator) {
77 ALOGV("%s(idbpId=0x%" PRIx64 ", slot=%d)", __func__, igbpId, slot);
78
79 C2Handle* c2Handle = WrapNativeCodec2GrallocHandle(
80 graphicBuffer->handle, graphicBuffer->width, graphicBuffer->height,
81 graphicBuffer->format, graphicBuffer->usage, graphicBuffer->stride,
82 graphicBuffer->getGenerationNumber(), igbpId, slot);
83 if (!c2Handle) {
84 ALOGE("WrapNativeCodec2GrallocHandle() failed");
85 return nullptr;
86 }
87
88 std::shared_ptr<C2GraphicAllocation> allocation;
89 const auto err = allocator->priorGraphicAllocation(c2Handle, &allocation);
90 if (err != C2_OK) {
91 ALOGE("C2Allocator::priorGraphicAllocation() failed: %d", err);
92 native_handle_close(c2Handle);
93 native_handle_delete(c2Handle);
94 return nullptr;
95 }
96
97 return allocation;
98 }
99
100 // This class is used to notify the listener when a certain event happens.
101 class EventNotifier : public virtual android::RefBase {
102 public:
103 class Listener {
104 public:
105 virtual ~Listener() = default;
106
107 // Called by EventNotifier when a certain event happens.
108 virtual void onEventNotified() = 0;
109 };
110
EventNotifier(std::weak_ptr<Listener> listener)111 explicit EventNotifier(std::weak_ptr<Listener> listener) : mListener(std::move(listener)) {}
112 virtual ~EventNotifier() = default;
113
114 protected:
notify()115 void notify() {
116 ALOGV("%s()", __func__);
117 std::shared_ptr<Listener> listener = mListener.lock();
118 if (listener) {
119 listener->onEventNotified();
120 }
121 }
122
123 std::weak_ptr<Listener> mListener;
124 };
125
126 // Notifies the listener when the connected IGBP releases buffers.
127 class BufferReleasedNotifier : public EventNotifier, public HProducerListener {
128 public:
129 using EventNotifier::EventNotifier;
130 ~BufferReleasedNotifier() override = default;
131
132 // HProducerListener implementation
onBuffersReleased(uint32_t count)133 Return<void> onBuffersReleased(uint32_t count) override {
134 ALOGV("%s(%u)", __func__, count);
135 if (count > 0) {
136 notify();
137 }
138 return {};
139 }
140 };
141
142 // IGBP expects its user (e.g. C2VdaBqBlockPool) to keep the mapping from dequeued slot index to
143 // graphic buffers. Also, C2VdaBqBlockPool guaratees to fetch N fixed set of buffers with buffer
144 // identifier. So this class stores the mapping from slot index to buffers and the mapping from
145 // buffer unique ID to buffers.
146 // This class also implements functionalities for buffer migration when surface switching. Buffers
147 // are owned by either component (i.e. local buffers) or CCodec framework (i.e. remote buffers).
148 // When switching surface, the ccodec framework migrates remote buffers to the new surfaces. Then
149 // C2VdaBqBlockPool migrates local buffers. However, some buffers might be lost during migration.
150 // We assume that there are enough buffers migrated to the new surface to continue the playback.
151 // After |NUM_BUFFER_SLOTS| amount of buffers are dequeued from new surface, all buffers should
152 // be dequeued at least once. Then we treat the missing buffer as lost, and attach these bufers to
153 // the new surface.
154 class TrackedGraphicBuffers {
155 public:
156 using value_type = std::tuple<slot_t, unique_id_t, std::shared_ptr<C2GraphicAllocation>>;
157
158 TrackedGraphicBuffers() = default;
159 ~TrackedGraphicBuffers() = default;
160
reset()161 void reset() {
162 mSlotId2GraphicBuffer.clear();
163 mSlotId2PoolData.clear();
164 mAllocationsRegistered.clear();
165 mAllocationsToBeMigrated.clear();
166 mMigrateLostBufferCounter = 0;
167 mGenerationToBeMigrated = 0;
168 }
169
registerUniqueId(unique_id_t uniqueId,std::shared_ptr<C2GraphicAllocation> allocation)170 void registerUniqueId(unique_id_t uniqueId, std::shared_ptr<C2GraphicAllocation> allocation) {
171 ALOGV("%s(uniqueId=%u)", __func__, uniqueId);
172 ALOG_ASSERT(allocation != nullptr);
173
174 mAllocationsRegistered[uniqueId] = std::move(allocation);
175 }
176
getRegisteredAllocation(unique_id_t uniqueId)177 std::shared_ptr<C2GraphicAllocation> getRegisteredAllocation(unique_id_t uniqueId) {
178 const auto iter = mAllocationsRegistered.find(uniqueId);
179 ALOG_ASSERT(iter != mAllocationsRegistered.end());
180
181 return iter->second;
182 }
183
hasUniqueId(unique_id_t uniqueId) const184 bool hasUniqueId(unique_id_t uniqueId) const {
185 return mAllocationsRegistered.find(uniqueId) != mAllocationsRegistered.end() ||
186 mAllocationsToBeMigrated.find(uniqueId) != mAllocationsToBeMigrated.end();
187 }
188
updateSlotBuffer(slot_t slotId,unique_id_t uniqueId,sp<GraphicBuffer> slotBuffer)189 void updateSlotBuffer(slot_t slotId, unique_id_t uniqueId, sp<GraphicBuffer> slotBuffer) {
190 ALOGV("%s(slotId=%d)", __func__, slotId);
191 ALOG_ASSERT(slotBuffer != nullptr);
192
193 mSlotId2GraphicBuffer[slotId] = std::make_pair(uniqueId, std::move(slotBuffer));
194 }
195
getSlotBuffer(slot_t slotId) const196 std::pair<unique_id_t, sp<GraphicBuffer>> getSlotBuffer(slot_t slotId) const {
197 const auto iter = mSlotId2GraphicBuffer.find(slotId);
198 ALOG_ASSERT(iter != mSlotId2GraphicBuffer.end());
199
200 return iter->second;
201 }
202
hasSlotId(slot_t slotId) const203 bool hasSlotId(slot_t slotId) const {
204 return mSlotId2GraphicBuffer.find(slotId) != mSlotId2GraphicBuffer.end();
205 }
206
updatePoolData(slot_t slotId,std::weak_ptr<C2BufferQueueBlockPoolData> poolData)207 void updatePoolData(slot_t slotId, std::weak_ptr<C2BufferQueueBlockPoolData> poolData) {
208 ALOGV("%s(slotId=%d)", __func__, slotId);
209 ALOG_ASSERT(hasSlotId(slotId));
210
211 mSlotId2PoolData[slotId] = std::move(poolData);
212 }
213
migrateLocalBuffers(H2BGraphicBufferProducer * const producer,uint64_t producerId,uint32_t generation,uint64_t usage)214 bool migrateLocalBuffers(H2BGraphicBufferProducer* const producer, uint64_t producerId,
215 uint32_t generation, uint64_t usage) {
216 ALOGV("%s(producerId=%" PRIx64 ", generation=%u, usage=%" PRIx64 ")", __func__, producerId,
217 generation, usage);
218
219 mGenerationToBeMigrated = generation;
220 mUsageToBeMigrated = usage;
221
222 // Move all buffers to mAllocationsToBeMigrated.
223 for (auto& pair : mAllocationsRegistered) {
224 if (!mAllocationsToBeMigrated.insert(pair).second) {
225 ALOGE("%s() duplicated uniqueId=%u", __func__, pair.first);
226 return false;
227 }
228 }
229 mAllocationsRegistered.clear();
230
231 ALOGV("%s(producerId=%" PRIx64 ", generation=%u, usage=%" PRIx64 ") before %s", __func__,
232 producerId, generation, usage, debugString().c_str());
233
234 // Migrate local buffers.
235 std::map<slot_t, std::pair<unique_id_t, sp<GraphicBuffer>>> newSlotId2GraphicBuffer;
236 std::map<slot_t, std::weak_ptr<C2BufferQueueBlockPoolData>> newSlotId2PoolData;
237 for (const auto& pair : mSlotId2PoolData) {
238 auto oldSlot = pair.first;
239 auto poolData = pair.second.lock();
240 if (!poolData) {
241 continue;
242 }
243
244 unique_id_t uniqueId;
245 sp<GraphicBuffer> slotBuffer;
246 std::shared_ptr<C2SurfaceSyncMemory> syncMem;
247 std::tie(uniqueId, slotBuffer) = getSlotBuffer(oldSlot);
248 slot_t newSlot = poolData->migrate(producer->getBase(), mGenerationToBeMigrated,
249 mUsageToBeMigrated, producerId, slotBuffer,
250 slotBuffer->getGenerationNumber(),
251 syncMem);
252 if (newSlot < 0) {
253 ALOGW("%s() Failed to migrate local buffer: uniqueId=%u, oldSlot=%d", __func__,
254 uniqueId, oldSlot);
255 continue;
256 }
257
258 ALOGV("%s() migrated buffer: uniqueId=%u, oldSlot=%d, newSlot=%d", __func__, uniqueId,
259 oldSlot, newSlot);
260 newSlotId2GraphicBuffer[newSlot] = std::make_pair(uniqueId, std::move(slotBuffer));
261 newSlotId2PoolData[newSlot] = std::move(poolData);
262
263 if (!moveBufferToRegistered(uniqueId)) {
264 ALOGE("%s() failed to move buffer to registered, uniqueId=%u", __func__, uniqueId);
265 return false;
266 }
267 }
268 mSlotId2GraphicBuffer = std::move(newSlotId2GraphicBuffer);
269 mSlotId2PoolData = std::move(newSlotId2PoolData);
270
271 // Choose a big enough number to ensure all buffer should be dequeued at least once.
272 mMigrateLostBufferCounter = NUM_BUFFER_SLOTS;
273 ALOGD("%s() migrated %zu local buffers", __func__, mAllocationsRegistered.size());
274 return true;
275 }
276
needMigrateLostBuffers() const277 bool needMigrateLostBuffers() const {
278 return mMigrateLostBufferCounter == 0 && !mAllocationsToBeMigrated.empty();
279 }
280
migrateLostBuffer(C2Allocator * const allocator,H2BGraphicBufferProducer * const producer,const uint64_t producerId,slot_t * newSlot)281 status_t migrateLostBuffer(C2Allocator* const allocator,
282 H2BGraphicBufferProducer* const producer, const uint64_t producerId,
283 slot_t* newSlot) {
284 ALOGV("%s() %s", __func__, debugString().c_str());
285
286 if (!needMigrateLostBuffers()) {
287 return NO_INIT;
288 }
289
290 auto iter = mAllocationsToBeMigrated.begin();
291 const unique_id_t uniqueId = iter->first;
292 const C2Handle* c2Handle = iter->second->handle();
293
294 // Convert C2GraphicAllocation to GraphicBuffer, and update generation and usage.
295 uint32_t width, height, format, stride, igbpSlot, generation;
296 uint64_t usage, igbpId;
297 _UnwrapNativeCodec2GrallocMetadata(c2Handle, &width, &height, &format, &usage, &stride,
298 &generation, &igbpId, &igbpSlot);
299 native_handle_t* grallocHandle = UnwrapNativeCodec2GrallocHandle(c2Handle);
300 sp<GraphicBuffer> graphicBuffer =
301 new GraphicBuffer(grallocHandle, GraphicBuffer::CLONE_HANDLE, width, height, format,
302 1, mUsageToBeMigrated, stride);
303 native_handle_delete(grallocHandle);
304 if (graphicBuffer->initCheck() != android::NO_ERROR) {
305 ALOGE("Failed to create GraphicBuffer: %d", graphicBuffer->initCheck());
306 return false;
307 }
308 graphicBuffer->setGenerationNumber(mGenerationToBeMigrated);
309
310 // Attach GraphicBuffer to producer.
311 const auto attachStatus = producer->attachBuffer(graphicBuffer, newSlot);
312 if (attachStatus == TIMED_OUT || attachStatus == INVALID_OPERATION) {
313 ALOGV("%s(): No free slot yet.", __func__);
314 return TIMED_OUT;
315 }
316 if (attachStatus != OK) {
317 ALOGE("%s(): Failed to attach buffer to new producer: %d", __func__, attachStatus);
318 return attachStatus;
319 }
320 ALOGD("%s(), migrated lost buffer uniqueId=%u to slot=%d", __func__, uniqueId, *newSlot);
321 updateSlotBuffer(*newSlot, uniqueId, graphicBuffer);
322
323 // Wrap the new GraphicBuffer to C2GraphicAllocation and register it.
324 std::shared_ptr<C2GraphicAllocation> allocation =
325 ConvertGraphicBuffer2C2Allocation(graphicBuffer, producerId, *newSlot, allocator);
326 if (!allocation) {
327 return UNKNOWN_ERROR;
328 }
329 registerUniqueId(uniqueId, std::move(allocation));
330
331 // Note: C2ArcProtectedGraphicAllocator releases the protected buffers if all the
332 // corrresponding C2GraphicAllocations are released. To prevent the protected buffer is
333 // released and then allocated again, we release the old C2GraphicAllocation after the new
334 // one has been created.
335 mAllocationsToBeMigrated.erase(iter);
336
337 return OK;
338 }
339
onBufferDequeued(slot_t slotId)340 void onBufferDequeued(slot_t slotId) {
341 ALOGV("%s(slotId=%d)", __func__, slotId);
342 unique_id_t uniqueId;
343 std::tie(uniqueId, std::ignore) = getSlotBuffer(slotId);
344
345 moveBufferToRegistered(uniqueId);
346 if (mMigrateLostBufferCounter > 0) {
347 --mMigrateLostBufferCounter;
348 }
349 }
350
size() const351 size_t size() const { return mAllocationsRegistered.size() + mAllocationsToBeMigrated.size(); }
352
debugString() const353 std::string debugString() const {
354 std::stringstream ss;
355 ss << "tracked size: " << size() << std::endl;
356 ss << " registered uniqueIds: ";
357 for (const auto& pair : mAllocationsRegistered) {
358 ss << pair.first << ", ";
359 }
360 ss << std::endl;
361 ss << " to-be-migrated uniqueIds: ";
362 for (const auto& pair : mAllocationsToBeMigrated) {
363 ss << pair.first << ", ";
364 }
365 ss << std::endl;
366 ss << " Count down for lost buffer migration: " << mMigrateLostBufferCounter;
367 return ss.str();
368 }
369
370 private:
moveBufferToRegistered(unique_id_t uniqueId)371 bool moveBufferToRegistered(unique_id_t uniqueId) {
372 ALOGV("%s(uniqueId=%u)", __func__, uniqueId);
373 auto iter = mAllocationsToBeMigrated.find(uniqueId);
374 if (iter == mAllocationsToBeMigrated.end()) {
375 return false;
376 }
377 if (!mAllocationsRegistered.insert(*iter).second) {
378 ALOGE("%s() duplicated uniqueId=%u", __func__, uniqueId);
379 return false;
380 }
381 mAllocationsToBeMigrated.erase(iter);
382
383 return true;
384 }
385
386 // Mapping from IGBP slots to the corresponding graphic buffers.
387 std::map<slot_t, std::pair<unique_id_t, sp<GraphicBuffer>>> mSlotId2GraphicBuffer;
388
389 // Mapping from IGBP slots to the corresponding pool data.
390 std::map<slot_t, std::weak_ptr<C2BufferQueueBlockPoolData>> mSlotId2PoolData;
391
392 // Track the buffers registered at the current producer.
393 std::map<unique_id_t, std::shared_ptr<C2GraphicAllocation>> mAllocationsRegistered;
394
395 // Track the buffers that should be migrated to the current producer.
396 std::map<unique_id_t, std::shared_ptr<C2GraphicAllocation>> mAllocationsToBeMigrated;
397
398 // The counter for migrating lost buffers. Count down when a buffer is
399 // dequeued from IGBP. When it goes to 0, then we treat the remaining
400 // buffers at |mAllocationsToBeMigrated| lost, and migrate them to
401 // current IGBP.
402 size_t mMigrateLostBufferCounter = 0;
403
404 // The generation and usage of the current IGBP, used to migrate buffers.
405 uint32_t mGenerationToBeMigrated = 0;
406 uint64_t mUsageToBeMigrated = 0;
407 };
408
409 class C2VdaBqBlockPool::Impl : public std::enable_shared_from_this<C2VdaBqBlockPool::Impl>,
410 public EventNotifier::Listener {
411 public:
412 using HGraphicBufferProducer = C2VdaBqBlockPool::HGraphicBufferProducer;
413
414 explicit Impl(const std::shared_ptr<C2Allocator>& allocator);
415 // TODO: should we detach buffers on producer if any on destructor?
416 ~Impl() = default;
417
418 // EventNotifier::Listener implementation.
419 void onEventNotified() override;
420
421 c2_status_t fetchGraphicBlock(uint32_t width, uint32_t height, uint32_t format,
422 C2MemoryUsage usage,
423 std::shared_ptr<C2GraphicBlock>* block /* nonnull */);
424 void setRenderCallback(const C2BufferQueueBlockPool::OnRenderCallback& renderCallback);
425 void configureProducer(const sp<HGraphicBufferProducer>& producer);
426 c2_status_t requestNewBufferSet(int32_t bufferCount, uint32_t width, uint32_t height,
427 uint32_t format, C2MemoryUsage usage);
428 bool setNotifyBlockAvailableCb(::base::OnceClosure cb);
429 std::optional<unique_id_t> getBufferIdFromGraphicBlock(const C2Block2D& block);
430
431 private:
432 // Requested buffer formats.
433 struct BufferFormat {
BufferFormatandroid::C2VdaBqBlockPool::Impl::BufferFormat434 BufferFormat(uint32_t width, uint32_t height, uint32_t pixelFormat,
435 C2AndroidMemoryUsage androidUsage)
436 : mWidth(width), mHeight(height), mPixelFormat(pixelFormat), mUsage(androidUsage) {}
437 BufferFormat() = default;
438
439 uint32_t mWidth = 0;
440 uint32_t mHeight = 0;
441 uint32_t mPixelFormat = 0;
442 C2AndroidMemoryUsage mUsage = C2MemoryUsage(0);
443 };
444
445 status_t getFreeSlotLocked(uint32_t width, uint32_t height, uint32_t format,
446 C2MemoryUsage usage, slot_t* slot, sp<Fence>* fence);
447
448 // Queries the generation and usage flags from the given producer by dequeuing and requesting a
449 // buffer (the buffer is then detached and freed).
450 status_t queryGenerationAndUsageLocked(uint32_t width, uint32_t height, uint32_t pixelFormat,
451 C2AndroidMemoryUsage androidUsage, uint32_t* generation,
452 uint64_t* usage);
453
454 // Wait the fence. If any error occurs, cancel the buffer back to the producer.
455 status_t waitFence(slot_t slot, sp<Fence> fence);
456
457 // Call mProducer's allowAllocation if needed.
458 status_t allowAllocation(bool allow);
459
460 const std::shared_ptr<C2Allocator> mAllocator;
461
462 std::unique_ptr<H2BGraphicBufferProducer> mProducer;
463 uint64_t mProducerId = 0;
464 bool mAllowAllocation = false;
465
466 C2BufferQueueBlockPool::OnRenderCallback mRenderCallback;
467
468 // Function mutex to lock at the start of each API function call for protecting the
469 // synchronization of all member variables.
470 std::mutex mMutex;
471
472 TrackedGraphicBuffers mTrackedGraphicBuffers;
473
474 // Number of buffers requested on requestNewBufferSet() call.
475 size_t mBuffersRequested = 0u;
476 // Currently requested buffer formats.
477 BufferFormat mBufferFormat;
478
479 // Listener for buffer release events.
480 sp<EventNotifier> mFetchBufferNotifier;
481
482 std::mutex mBufferReleaseMutex;
483 // Set to true when the buffer release event is triggered after dequeueing buffer from IGBP
484 // times out. Reset when fetching new slot times out, or |mNotifyBlockAvailableCb| is executed.
485 bool mBufferReleasedAfterTimedOut GUARDED_BY(mBufferReleaseMutex) = false;
486 // The callback to notify the caller the buffer is available.
487 ::base::OnceClosure mNotifyBlockAvailableCb GUARDED_BY(mBufferReleaseMutex);
488
489 // Set to true if any error occurs at previous configureProducer().
490 bool mConfigureProducerError = false;
491 };
492
Impl(const std::shared_ptr<C2Allocator> & allocator)493 C2VdaBqBlockPool::Impl::Impl(const std::shared_ptr<C2Allocator>& allocator)
494 : mAllocator(allocator) {}
495
fetchGraphicBlock(uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage,std::shared_ptr<C2GraphicBlock> * block)496 c2_status_t C2VdaBqBlockPool::Impl::fetchGraphicBlock(
497 uint32_t width, uint32_t height, uint32_t format, C2MemoryUsage usage,
498 std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
499 ALOGV("%s(%ux%u)", __func__, width, height);
500 std::lock_guard<std::mutex> lock(mMutex);
501
502 if (width != mBufferFormat.mWidth || height != mBufferFormat.mHeight ||
503 format != mBufferFormat.mPixelFormat || usage.expected != mBufferFormat.mUsage.expected) {
504 ALOGE("%s(): buffer format (%ux%u, format=%u, usage=%" PRIx64
505 ") is different from requested format (%ux%u, format=%u, usage=%" PRIx64 ")",
506 __func__, width, height, format, usage.expected, mBufferFormat.mWidth,
507 mBufferFormat.mHeight, mBufferFormat.mPixelFormat, mBufferFormat.mUsage.expected);
508 return C2_BAD_VALUE;
509 }
510 if (mConfigureProducerError || !mProducer) {
511 ALOGE("%s(): error occurred at previous configureProducer()", __func__);
512 return C2_CORRUPTED;
513 }
514
515 slot_t slot;
516 sp<Fence> fence = new Fence();
517 const auto status = getFreeSlotLocked(width, height, format, usage, &slot, &fence);
518 if (status != OK) {
519 return asC2Error(status);
520 }
521
522 unique_id_t uniqueId;
523 sp<GraphicBuffer> slotBuffer;
524 std::tie(uniqueId, slotBuffer) = mTrackedGraphicBuffers.getSlotBuffer(slot);
525 ALOGV("%s(): dequeued slot=%d uniqueId=%u", __func__, slot, uniqueId);
526
527 if (!mTrackedGraphicBuffers.hasUniqueId(uniqueId)) {
528 if (mTrackedGraphicBuffers.size() >= mBuffersRequested) {
529 // The dequeued slot has a pre-allocated buffer whose size and format is as same as
530 // currently requested (but was not dequeued during allocation cycle). Just detach it to
531 // free this slot. And try dequeueBuffer again.
532 ALOGD("dequeued a new slot %d but already allocated enough buffers. Detach it.", slot);
533
534 if (mProducer->detachBuffer(slot) != OK) {
535 return C2_CORRUPTED;
536 }
537
538 const auto allocationStatus = allowAllocation(false);
539 if (allocationStatus != OK) {
540 return asC2Error(allocationStatus);
541 }
542 return C2_TIMED_OUT;
543 }
544
545 std::shared_ptr<C2GraphicAllocation> allocation =
546 ConvertGraphicBuffer2C2Allocation(slotBuffer, mProducerId, slot, mAllocator.get());
547 if (!allocation) {
548 return C2_CORRUPTED;
549 }
550 mTrackedGraphicBuffers.registerUniqueId(uniqueId, std::move(allocation));
551
552 ALOGV("%s(): mTrackedGraphicBuffers.size=%zu", __func__, mTrackedGraphicBuffers.size());
553 if (mTrackedGraphicBuffers.size() == mBuffersRequested) {
554 ALOGV("Tracked IGBP slots: %s", mTrackedGraphicBuffers.debugString().c_str());
555 // Already allocated enough buffers, set allowAllocation to false to restrict the
556 // eligible slots to allocated ones for future dequeue.
557 const auto allocationStatus = allowAllocation(false);
558 if (allocationStatus != OK) {
559 return asC2Error(allocationStatus);
560 }
561 }
562 }
563
564 std::shared_ptr<C2SurfaceSyncMemory> syncMem;
565 // TODO: the |owner| argument should be set correctly.
566 std::shared_ptr<C2GraphicAllocation> allocation =
567 mTrackedGraphicBuffers.getRegisteredAllocation(uniqueId);
568 auto poolData = std::make_shared<C2BufferQueueBlockPoolData>(
569 slotBuffer->getGenerationNumber(), mProducerId, slot, std::make_shared<int>(0),
570 mProducer->getBase(), syncMem);
571 mTrackedGraphicBuffers.updatePoolData(slot, poolData);
572 *block = _C2BlockFactory::CreateGraphicBlock(std::move(allocation), std::move(poolData));
573 if (*block == nullptr) {
574 ALOGE("failed to create GraphicBlock: no memory");
575 return C2_NO_MEMORY;
576 }
577
578 // Wait for acquire fence at the last point of returning buffer.
579 if (fence) {
580 const auto fenceStatus = waitFence(slot, fence);
581 if (fenceStatus != OK) {
582 return asC2Error(fenceStatus);
583 }
584
585 if (mRenderCallback) {
586 nsecs_t signalTime = fence->getSignalTime();
587 if (signalTime >= 0 && signalTime < INT64_MAX) {
588 mRenderCallback(mProducerId, slot, signalTime);
589 } else {
590 ALOGV("got fence signal time of %" PRId64 " nsec", signalTime);
591 }
592 }
593 }
594
595 return C2_OK;
596 }
597
getFreeSlotLocked(uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage,slot_t * slot,sp<Fence> * fence)598 status_t C2VdaBqBlockPool::Impl::getFreeSlotLocked(uint32_t width, uint32_t height, uint32_t format,
599 C2MemoryUsage usage, slot_t* slot,
600 sp<Fence>* fence) {
601 if (mTrackedGraphicBuffers.needMigrateLostBuffers()) {
602 slot_t newSlot;
603 if (mTrackedGraphicBuffers.migrateLostBuffer(mAllocator.get(), mProducer.get(), mProducerId,
604 &newSlot) == OK) {
605 ALOGV("%s(): migrated buffer: slot=%d", __func__, newSlot);
606 *slot = newSlot;
607 return OK;
608 }
609 }
610
611 // Dequeue a free slot from IGBP.
612 ALOGV("%s(): try to dequeue free slot from IGBP.", __func__);
613 const auto dequeueStatus = mProducer->dequeueBuffer(width, height, format, usage, slot, fence);
614 if (dequeueStatus == TIMED_OUT) {
615 std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
616 mBufferReleasedAfterTimedOut = false;
617 }
618 if (dequeueStatus != OK && dequeueStatus != BUFFER_NEEDS_REALLOCATION) {
619 return dequeueStatus;
620 }
621
622 // Call requestBuffer to update GraphicBuffer for the slot and obtain the reference.
623 if (!mTrackedGraphicBuffers.hasSlotId(*slot) || dequeueStatus == BUFFER_NEEDS_REALLOCATION) {
624 sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
625 const auto requestStatus = mProducer->requestBuffer(*slot, &slotBuffer);
626 if (requestStatus != OK) {
627 mProducer->cancelBuffer(*slot, *fence);
628 return requestStatus;
629 }
630
631 const auto uniqueId = getDmabufId(slotBuffer->handle->data[0]);
632 if (!uniqueId) {
633 ALOGE("%s(): failed to get uniqueId of GraphicBuffer from slot=%d", __func__, *slot);
634 return UNKNOWN_ERROR;
635 }
636 mTrackedGraphicBuffers.updateSlotBuffer(*slot, *uniqueId, std::move(slotBuffer));
637 }
638
639 ALOGV("%s(%ux%u): dequeued slot=%d", __func__, mBufferFormat.mWidth, mBufferFormat.mHeight,
640 *slot);
641 mTrackedGraphicBuffers.onBufferDequeued(*slot);
642 return OK;
643 }
644
onEventNotified()645 void C2VdaBqBlockPool::Impl::onEventNotified() {
646 ALOGV("%s()", __func__);
647 ::base::OnceClosure outputCb;
648 {
649 std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
650
651 mBufferReleasedAfterTimedOut = true;
652 if (mNotifyBlockAvailableCb) {
653 mBufferReleasedAfterTimedOut = false;
654 outputCb = std::move(mNotifyBlockAvailableCb);
655 }
656 }
657
658 // Calling the callback outside the lock to avoid the deadlock.
659 if (outputCb) {
660 std::move(outputCb).Run();
661 }
662 }
663
queryGenerationAndUsageLocked(uint32_t width,uint32_t height,uint32_t pixelFormat,C2AndroidMemoryUsage androidUsage,uint32_t * generation,uint64_t * usage)664 status_t C2VdaBqBlockPool::Impl::queryGenerationAndUsageLocked(uint32_t width, uint32_t height,
665 uint32_t pixelFormat,
666 C2AndroidMemoryUsage androidUsage,
667 uint32_t* generation,
668 uint64_t* usage) {
669 ALOGV("%s()", __func__);
670
671 sp<Fence> fence = new Fence();
672 slot_t slot;
673 const auto dequeueStatus =
674 mProducer->dequeueBuffer(width, height, pixelFormat, androidUsage, &slot, &fence);
675 if (dequeueStatus != OK && dequeueStatus != BUFFER_NEEDS_REALLOCATION) {
676 return dequeueStatus;
677 }
678
679 // Call requestBuffer to allocate buffer for the slot and obtain the reference.
680 // Get generation number here.
681 sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
682 const auto requestStatus = mProducer->requestBuffer(slot, &slotBuffer);
683
684 // Detach and delete the temporary buffer.
685 const auto detachStatus = mProducer->detachBuffer(slot);
686 if (detachStatus != OK) {
687 return detachStatus;
688 }
689
690 // Check requestBuffer return flag.
691 if (requestStatus != OK) {
692 return requestStatus;
693 }
694
695 // Get generation number and usage from the slot buffer.
696 *usage = slotBuffer->getUsage();
697 *generation = slotBuffer->getGenerationNumber();
698 ALOGV("Obtained from temp buffer: generation = %u, usage = %" PRIu64 "", *generation, *usage);
699 return OK;
700 }
701
waitFence(slot_t slot,sp<Fence> fence)702 status_t C2VdaBqBlockPool::Impl::waitFence(slot_t slot, sp<Fence> fence) {
703 const auto fenceStatus = fence->wait(kFenceWaitTimeMs);
704 if (fenceStatus == OK) {
705 return OK;
706 }
707
708 const auto cancelStatus = mProducer->cancelBuffer(slot, fence);
709 if (cancelStatus != OK) {
710 ALOGE("%s(): failed to cancelBuffer(slot=%d)", __func__, slot);
711 return cancelStatus;
712 }
713
714 if (fenceStatus == -ETIME) { // fence wait timed out
715 ALOGV("%s(): buffer (slot=%d) fence wait timed out", __func__, slot);
716 return TIMED_OUT;
717 }
718 ALOGE("buffer fence wait error: %d", fenceStatus);
719 return fenceStatus;
720 }
721
setRenderCallback(const C2BufferQueueBlockPool::OnRenderCallback & renderCallback)722 void C2VdaBqBlockPool::Impl::setRenderCallback(
723 const C2BufferQueueBlockPool::OnRenderCallback& renderCallback) {
724 ALOGV("setRenderCallback");
725 std::lock_guard<std::mutex> lock(mMutex);
726 mRenderCallback = renderCallback;
727 }
728
requestNewBufferSet(int32_t bufferCount,uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage)729 c2_status_t C2VdaBqBlockPool::Impl::requestNewBufferSet(int32_t bufferCount, uint32_t width,
730 uint32_t height, uint32_t format,
731 C2MemoryUsage usage) {
732 ALOGV("%s(bufferCount=%d, size=%ux%u, format=0x%x, usage=%" PRIu64 ")", __func__, bufferCount,
733 width, height, format, usage.expected);
734
735 if (bufferCount <= 0) {
736 ALOGE("Invalid requested buffer count = %d", bufferCount);
737 return C2_BAD_VALUE;
738 }
739
740 std::lock_guard<std::mutex> lock(mMutex);
741 if (!mProducer) {
742 ALOGD("No HGraphicBufferProducer is configured...");
743 return C2_NO_INIT;
744 }
745 if (mBuffersRequested == static_cast<size_t>(bufferCount) && mBufferFormat.mWidth == width &&
746 mBufferFormat.mHeight == height && mBufferFormat.mPixelFormat == format &&
747 mBufferFormat.mUsage.expected == usage.expected) {
748 ALOGD("%s() Request the same format and amount of buffers, skip", __func__);
749 return C2_OK;
750 }
751
752 const auto status = allowAllocation(true);
753 if (status != OK) {
754 return asC2Error(status);
755 }
756
757 // Release all remained slot buffer references here. CCodec should either cancel or queue its
758 // owned buffers from this set before the next resolution change.
759 mTrackedGraphicBuffers.reset();
760
761 mBuffersRequested = static_cast<size_t>(bufferCount);
762
763 // Store buffer formats for future usage.
764 mBufferFormat = BufferFormat(width, height, format, C2AndroidMemoryUsage(usage));
765
766 return C2_OK;
767 }
768
configureProducer(const sp<HGraphicBufferProducer> & producer)769 void C2VdaBqBlockPool::Impl::configureProducer(const sp<HGraphicBufferProducer>& producer) {
770 ALOGV("%s(producer=%p)", __func__, producer.get());
771
772 std::lock_guard<std::mutex> lock(mMutex);
773 if (producer == nullptr) {
774 ALOGI("input producer is nullptr...");
775
776 mProducer = nullptr;
777 mProducerId = 0;
778 mTrackedGraphicBuffers.reset();
779 return;
780 }
781
782 auto newProducer = std::make_unique<H2BGraphicBufferProducer>(producer);
783 uint64_t newProducerId;
784 if (newProducer->getUniqueId(&newProducerId) != OK) {
785 ALOGE("%s(): failed to get IGBP ID", __func__);
786 mConfigureProducerError = true;
787 return;
788 }
789 if (newProducerId == mProducerId) {
790 ALOGI("%s(): configure the same producer, ignore", __func__);
791 return;
792 }
793
794 ALOGI("Producer (Surface) is going to switch... ( 0x%" PRIx64 " -> 0x%" PRIx64 " )",
795 mProducerId, newProducerId);
796 mProducer = std::move(newProducer);
797 mProducerId = newProducerId;
798 mConfigureProducerError = false;
799 mAllowAllocation = false;
800
801 // Set allowAllocation to new producer.
802 if (allowAllocation(true) != OK) {
803 ALOGE("%s(): failed to allowAllocation(true)", __func__);
804 mConfigureProducerError = true;
805 return;
806 }
807 if (mProducer->setDequeueTimeout(0) != OK) {
808 ALOGE("%s(): failed to setDequeueTimeout(0)", __func__);
809 mConfigureProducerError = true;
810 return;
811 }
812 if (mProducer->setMaxDequeuedBufferCount(kMaxDequeuedBufferCount) != OK) {
813 ALOGE("%s(): failed to setMaxDequeuedBufferCount(%d)", __func__, kMaxDequeuedBufferCount);
814 mConfigureProducerError = true;
815 return;
816 }
817
818 // Migrate existing buffers to the new producer.
819 if (mTrackedGraphicBuffers.size() > 0) {
820 uint32_t newGeneration = 0;
821 uint64_t newUsage = 0;
822 const status_t err = queryGenerationAndUsageLocked(
823 mBufferFormat.mWidth, mBufferFormat.mHeight, mBufferFormat.mPixelFormat,
824 mBufferFormat.mUsage, &newGeneration, &newUsage);
825 if (err != OK) {
826 ALOGE("failed to query generation and usage: %d", err);
827 mConfigureProducerError = true;
828 return;
829 }
830
831 if (!mTrackedGraphicBuffers.migrateLocalBuffers(mProducer.get(), mProducerId, newGeneration,
832 newUsage)) {
833 ALOGE("%s(): failed to migrateLocalBuffers()", __func__);
834 mConfigureProducerError = true;
835 return;
836 }
837
838 if (mTrackedGraphicBuffers.size() == mBuffersRequested) {
839 if (allowAllocation(false) != OK) {
840 ALOGE("%s(): failed to allowAllocation(false)", __func__);
841 mConfigureProducerError = true;
842 return;
843 }
844 }
845 }
846
847 // hack(b/146409777): Try to connect ARC-specific listener first.
848 sp<BufferReleasedNotifier> listener = new BufferReleasedNotifier(weak_from_this());
849 if (mProducer->connect(listener, 'ARC\0', false) == OK) {
850 ALOGI("connected to ARC-specific IGBP listener.");
851 mFetchBufferNotifier = listener;
852 }
853
854 // There might be free buffers at the new producer, notify the client if needed.
855 onEventNotified();
856 }
857
setNotifyBlockAvailableCb(::base::OnceClosure cb)858 bool C2VdaBqBlockPool::Impl::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
859 ALOGV("%s()", __func__);
860 if (mFetchBufferNotifier == nullptr) {
861 return false;
862 }
863
864 ::base::OnceClosure outputCb;
865 {
866 std::lock_guard<std::mutex> lock(mBufferReleaseMutex);
867
868 // If there is any buffer released after dequeueBuffer() timed out, then we could notify the
869 // caller directly.
870 if (mBufferReleasedAfterTimedOut) {
871 mBufferReleasedAfterTimedOut = false;
872 outputCb = std::move(cb);
873 } else {
874 mNotifyBlockAvailableCb = std::move(cb);
875 }
876 }
877
878 // Calling the callback outside the lock to avoid the deadlock.
879 if (outputCb) {
880 std::move(outputCb).Run();
881 }
882 return true;
883 }
884
getBufferIdFromGraphicBlock(const C2Block2D & block)885 std::optional<unique_id_t> C2VdaBqBlockPool::Impl::getBufferIdFromGraphicBlock(
886 const C2Block2D& block) {
887 return getDmabufId(block.handle()->data[0]);
888 }
889
allowAllocation(bool allow)890 status_t C2VdaBqBlockPool::Impl::allowAllocation(bool allow) {
891 ALOGV("%s(%d)", __func__, allow);
892
893 if (!mProducer) {
894 ALOGW("%s() mProducer is not initiailzed", __func__);
895 return NO_INIT;
896 }
897 if (mAllowAllocation == allow) {
898 return OK;
899 }
900
901 const auto status = mProducer->allowAllocation(allow);
902 if (status == OK) {
903 mAllowAllocation = allow;
904 }
905 return status;
906 }
907
C2VdaBqBlockPool(const std::shared_ptr<C2Allocator> & allocator,const local_id_t localId)908 C2VdaBqBlockPool::C2VdaBqBlockPool(const std::shared_ptr<C2Allocator>& allocator,
909 const local_id_t localId)
910 : C2BufferQueueBlockPool(allocator, localId), mLocalId(localId), mImpl(new Impl(allocator)) {}
911
fetchGraphicBlock(uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage,std::shared_ptr<C2GraphicBlock> * block)912 c2_status_t C2VdaBqBlockPool::fetchGraphicBlock(
913 uint32_t width, uint32_t height, uint32_t format, C2MemoryUsage usage,
914 std::shared_ptr<C2GraphicBlock>* block /* nonnull */) {
915 if (mImpl) {
916 return mImpl->fetchGraphicBlock(width, height, format, usage, block);
917 }
918 return C2_NO_INIT;
919 }
920
setRenderCallback(const C2BufferQueueBlockPool::OnRenderCallback & renderCallback)921 void C2VdaBqBlockPool::setRenderCallback(
922 const C2BufferQueueBlockPool::OnRenderCallback& renderCallback) {
923 if (mImpl) {
924 mImpl->setRenderCallback(renderCallback);
925 }
926 }
927
requestNewBufferSet(int32_t bufferCount,uint32_t width,uint32_t height,uint32_t format,C2MemoryUsage usage)928 c2_status_t C2VdaBqBlockPool::requestNewBufferSet(int32_t bufferCount, uint32_t width,
929 uint32_t height, uint32_t format,
930 C2MemoryUsage usage) {
931 if (mImpl) {
932 return mImpl->requestNewBufferSet(bufferCount, width, height, format, usage);
933 }
934 return C2_NO_INIT;
935 }
936
configureProducer(const sp<HGraphicBufferProducer> & producer)937 void C2VdaBqBlockPool::configureProducer(const sp<HGraphicBufferProducer>& producer) {
938 if (mImpl) {
939 mImpl->configureProducer(producer);
940 }
941 }
942
setNotifyBlockAvailableCb(::base::OnceClosure cb)943 bool C2VdaBqBlockPool::setNotifyBlockAvailableCb(::base::OnceClosure cb) {
944 if (mImpl) {
945 return mImpl->setNotifyBlockAvailableCb(std::move(cb));
946 }
947 return false;
948 }
949
getBufferIdFromGraphicBlock(const C2Block2D & block)950 std::optional<unique_id_t> C2VdaBqBlockPool::getBufferIdFromGraphicBlock(const C2Block2D& block) {
951 if (mImpl) {
952 return mImpl->getBufferIdFromGraphicBlock(block);
953 }
954 return std::nullopt;
955 }
956
957 } // namespace android
958