1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 //#define LOG_NDEBUG 0
17 #define LOG_TAG "GraphicsTracker"
18 #include <fcntl.h>
19 #include <unistd.h>
20
21 #include <gui/BufferItemConsumer.h>
22 #include <gui/BufferQueue.h>
23 #include <gui/Surface.h>
24 #include <media/stagefright/foundation/ADebug.h>
25 #include <private/android/AHardwareBufferHelpers.h>
26 #include <vndk/hardware_buffer.h>
27
28 #include <C2BlockInternal.h>
29 #include <codec2/aidl/GraphicsTracker.h>
30
31 namespace aidl::android::hardware::media::c2::implementation {
32
33 namespace {
34
35 static constexpr int kMaxDequeueMin = 1;
36 static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
37
38 // Just some delay for HAL to receive the stop()/release() request.
39 static constexpr int kAllocateDirectDelayUs = 16666;
40
retrieveAHardwareBufferId(const C2ConstGraphicBlock & blk,uint64_t * bid)41 c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
42 std::shared_ptr<const _C2BlockPoolData> bpData = _C2BlockFactory::GetGraphicBlockPoolData(blk);
43 if (!bpData || bpData->getType() != _C2BlockPoolData::TYPE_AHWBUFFER) {
44 return C2_BAD_VALUE;
45 }
46 if (__builtin_available(android __ANDROID_API_T__, *)) {
47 AHardwareBuffer *pBuf;
48 if (!_C2BlockFactory::GetAHardwareBuffer(bpData, &pBuf)) {
49 return C2_CORRUPTED;
50 }
51 int ret = AHardwareBuffer_getId(pBuf, bid);
52 if (ret != ::android::OK) {
53 return C2_CORRUPTED;
54 }
55 return C2_OK;
56 } else {
57 return C2_OMITTED;
58 }
59 }
60
61 } // anonymous namespace
62
63 using ::android::BufferQueue;
64 using ::android::BufferItemConsumer;
65 using ::android::ConsumerListener;
66 using ::android::IConsumerListener;
67 using ::android::IGraphicBufferProducer;
68 using ::android::IGraphicBufferConsumer;
69 using ::android::Surface;
70
71 class GraphicsTracker::PlaceHolderSurface {
72 public:
73 static const int kMaxAcquiredBuffer = 2;
74 // Enough number to allocate in stop/release status.
75 static const int kMaxDequeuedBuffer = 16;
76
PlaceHolderSurface(uint64_t usage)77 explicit PlaceHolderSurface(uint64_t usage) : mUsage(usage) {}
78
~PlaceHolderSurface()79 ~PlaceHolderSurface() {
80 if (mInit == C2_NO_INIT) {
81 return;
82 }
83 if (mSurface) {
84 mSurface->disconnect(NATIVE_WINDOW_API_MEDIA);
85 }
86 }
87
allocate(uint32_t width,uint32_t height,uint32_t format,uint64_t usage,AHardwareBuffer ** pBuf,sp<Fence> * fence)88 c2_status_t allocate(uint32_t width, uint32_t height,
89 uint32_t format, uint64_t usage,
90 AHardwareBuffer **pBuf, sp<Fence> *fence) {
91 std::unique_lock<std::mutex> l(mLock);
92 if (mInit == C2_NO_INIT) {
93 mInit = init();
94 }
95
96 if (!mBufferItemConsumer || !mSurface) {
97 ALOGE("PlaceHolderSurface not properly initialized");
98 return C2_CORRUPTED;
99 }
100
101 native_window_set_usage(mSurface.get(), usage);
102 native_window_set_buffers_format(mSurface.get(), format);
103 native_window_set_buffers_dimensions(mSurface.get(), width, height);
104
105 ::android::status_t res;
106 std::vector<Surface::BatchBuffer> buffers(1);
107 res = mSurface->dequeueBuffers(&buffers);
108 if (res != ::android::OK) {
109 ALOGE("dequeueBuffers failed from PlaceHolderSurface %d", res);
110 return C2_CORRUPTED;
111 }
112 sp<GraphicBuffer> gb = GraphicBuffer::from(buffers[0].buffer);
113 *pBuf = AHardwareBuffer_from_GraphicBuffer(gb.get());
114 AHardwareBuffer_acquire(*pBuf);
115 *fence = new Fence(buffers[0].fenceFd);
116 return C2_OK;
117 }
118
119 private:
120 uint64_t mUsage;
121 sp<Surface> mSurface;
122 sp<BufferItemConsumer> mBufferItemConsumer;
123 c2_status_t mInit = C2_NO_INIT;
124 std::mutex mLock;
125
init()126 c2_status_t init() {
127 std::tie(mBufferItemConsumer, mSurface) =
128 BufferItemConsumer::create(mUsage, kMaxAcquiredBuffer);
129
130 if (mSurface) {
131 mSurface->connect(NATIVE_WINDOW_API_MEDIA, nullptr);
132 mSurface->setMaxDequeuedBufferCount(kMaxDequeuedBuffer);
133 }
134 return C2_OK;
135 }
136 };
137
138
BufferItem(uint32_t generation,int slot,const sp<GraphicBuffer> & buf,const sp<Fence> & fence)139 GraphicsTracker::BufferItem::BufferItem(
140 uint32_t generation, int slot, const sp<GraphicBuffer>& buf, const sp<Fence>& fence) :
141 mInit{false}, mGeneration{generation}, mSlot{slot} {
142 if (!buf) {
143 return;
144 }
145 if (__builtin_available(android __ANDROID_API_T__, *)) {
146 AHardwareBuffer *pBuf = AHardwareBuffer_from_GraphicBuffer(buf.get());
147 int ret = AHardwareBuffer_getId(pBuf, &mId);
148 if (ret != ::android::OK) {
149 return;
150 }
151 mUsage = buf->getUsage();
152 AHardwareBuffer_acquire(pBuf);
153 mBuf = pBuf;
154 mFence = fence;
155 mInit = true;
156 }
157 }
158
BufferItem(uint32_t generation,AHardwareBuffer * pBuf,uint64_t usage)159 GraphicsTracker::BufferItem::BufferItem(
160 uint32_t generation, AHardwareBuffer *pBuf, uint64_t usage) :
161 mInit{true}, mGeneration{generation}, mSlot{-1},
162 mBuf{pBuf}, mUsage{usage},
163 mFence{Fence::NO_FENCE} {
164 if (__builtin_available(android __ANDROID_API_T__, *)) {
165 int ret = AHardwareBuffer_getId(mBuf, &mId);
166 if (ret != ::android::OK) {
167 mInit = false;
168 mBuf = nullptr;
169 return;
170 }
171 }
172 AHardwareBuffer_acquire(mBuf);
173 }
174
~BufferItem()175 GraphicsTracker::BufferItem::~BufferItem() {
176 if (mInit) {
177 AHardwareBuffer_release(mBuf);
178 }
179 }
180
181
migrateBuffer(uint64_t newUsage,uint32_t newGeneration)182 std::shared_ptr<GraphicsTracker::BufferItem> GraphicsTracker::BufferItem::migrateBuffer(
183 uint64_t newUsage, uint32_t newGeneration) {
184 if (!mInit) {
185 return nullptr;
186 }
187 newUsage |= mUsage;
188 uint64_t ahbUsage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(newUsage);
189 AHardwareBuffer_Desc desc;
190 AHardwareBuffer_describe(mBuf, &desc);
191 // TODO: we need well-established buffer migration features from graphics.
192 // (b/273776738)
193 desc.usage = ahbUsage;
194 const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mBuf);
195 if (!handle) {
196 return nullptr;
197 }
198
199 AHardwareBuffer *newBuf;
200 int err = AHardwareBuffer_createFromHandle(&desc, handle,
201 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
202 &newBuf);
203 if (err != ::android::NO_ERROR) {
204 return nullptr;
205 }
206
207 std::shared_ptr<BufferItem> newBuffer =
208 std::make_shared<BufferItem>(newGeneration, newBuf, newUsage);
209 AHardwareBuffer_release(newBuf);
210 return newBuffer;
211 }
212
getGraphicBuffer()213 sp<GraphicBuffer> GraphicsTracker::BufferItem::getGraphicBuffer() {
214 if (!mInit) {
215 return nullptr;
216 }
217 GraphicBuffer *gb = ::android::AHardwareBuffer_to_GraphicBuffer(mBuf);
218 if (!gb) {
219 return nullptr;
220 }
221 gb->setGenerationNumber(mGeneration);
222 return gb;
223 }
224
~BufferCache()225 GraphicsTracker::BufferCache::~BufferCache() {
226 ALOGV("BufferCache destruction: generation(%d), igbp(%d)", mGeneration, (bool)mIgbp);
227 }
228
waitOnSlot(int slot)229 void GraphicsTracker::BufferCache::waitOnSlot(int slot) {
230 // TODO: log
231 CHECK(0 <= slot && slot < kNumSlots);
232 BlockedSlot *p = &mBlockedSlots[slot];
233 std::unique_lock<std::mutex> l(p->l);
234 while (p->blocked) {
235 p->cv.wait(l);
236 }
237 }
238
blockSlot(int slot)239 void GraphicsTracker::BufferCache::blockSlot(int slot) {
240 CHECK(0 <= slot && slot < kNumSlots);
241 ALOGV("block slot %d", slot);
242 BlockedSlot *p = &mBlockedSlots[slot];
243 std::unique_lock<std::mutex> l(p->l);
244 p->blocked = true;
245 }
246
unblockSlot(int slot)247 void GraphicsTracker::BufferCache::unblockSlot(int slot) {
248 CHECK(0 <= slot && slot < kNumSlots);
249 ALOGV("unblock slot %d", slot);
250 BlockedSlot *p = &mBlockedSlots[slot];
251 std::unique_lock<std::mutex> l(p->l);
252 p->blocked = false;
253 l.unlock();
254 p->cv.notify_one();
255 }
256
GraphicsTracker(int maxDequeueCount)257 GraphicsTracker::GraphicsTracker(int maxDequeueCount)
258 : mBufferCache(new BufferCache()), mNumDequeueing{0}, mMaxDequeue{maxDequeueCount},
259 mMaxDequeueCommitted{maxDequeueCount},
260 mDequeueable{maxDequeueCount},
261 mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
262 mInConfig{false}, mStopped{false}, mStopRequested{false}, mAllocAfterStopRequested{0} {
263 if (maxDequeueCount < kMaxDequeueMin) {
264 mMaxDequeue = kMaxDequeueMin;
265 mMaxDequeueCommitted = kMaxDequeueMin;
266 mDequeueable = kMaxDequeueMin;
267 } else if(maxDequeueCount > kMaxDequeueMax) {
268 mMaxDequeue = kMaxDequeueMax;
269 mMaxDequeueCommitted = kMaxDequeueMax;
270 mDequeueable = kMaxDequeueMax;
271 }
272 int pipefd[2] = { -1, -1};
273 int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
274
275 mReadPipeFd.reset(pipefd[0]);
276 mWritePipeFd.reset(pipefd[1]);
277
278 // ctor does not require lock to be held.
279 writeIncDequeueableLocked(mDequeueable);
280
281 CHECK(ret >= 0);
282 }
283
~GraphicsTracker()284 GraphicsTracker::~GraphicsTracker() {
285 stop();
286 }
287
adjustDequeueConfLocked(bool * updateDequeue)288 bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
289 // TODO: can't we adjust during config? not committing it may safe?
290 *updateDequeue = false;
291 if (!mInConfig && mMaxDequeueRequested.has_value() && mMaxDequeueRequested < mMaxDequeue) {
292 int delta = mMaxDequeue - mMaxDequeueRequested.value();
293 int drained = 0;
294 // Since we are supposed to increase mDequeuable by one already
295 int adjustable = mDequeueable + 1;
296 if (adjustable >= delta) {
297 mMaxDequeue = mMaxDequeueRequested.value();
298 mDequeueable -= (delta - 1);
299 drained = delta - 1;
300 } else {
301 mMaxDequeue -= adjustable;
302 drained = mDequeueable;
303 mDequeueable = 0;
304 }
305 if (drained > 0) {
306 drainDequeueableLocked(drained);
307 }
308 if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
309 *updateDequeue = true;
310 }
311 return true;
312 }
313 return false;
314 }
315
configureGraphics(const sp<IGraphicBufferProducer> & igbp,uint32_t generation)316 c2_status_t GraphicsTracker::configureGraphics(
317 const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
318 // TODO: wait until operations to previous IGBP is completed.
319 std::shared_ptr<BufferCache> prevCache;
320 int prevDequeueRequested = 0;
321 int prevDequeueCommitted;
322
323 std::unique_lock<std::mutex> cl(mConfigLock);
324 {
325 std::unique_lock<std::mutex> l(mLock);
326 mInConfig = true;
327 prevCache = mBufferCache;
328 prevDequeueCommitted = mMaxDequeueCommitted;
329 if (mMaxDequeueRequested.has_value()) {
330 prevDequeueRequested = mMaxDequeueRequested.value();
331 }
332 }
333 // NOTE: Switching to the same surface is blocked from MediaCodec.
334 // Switching to the same surface might not work if tried, since disconnect()
335 // to the old surface in MediaCodec and allocate from the new surface from
336 // GraphicsTracker cannot be synchronized properly.
337 uint64_t bqId{0ULL};
338 uint64_t bqUsage{0ULL};
339 ::android::status_t ret = ::android::OK;
340 if (igbp) {
341 ret = igbp->getUniqueId(&bqId);
342 if (ret == ::android::OK) {
343 (void)igbp->getConsumerUsage(&bqUsage);
344 }
345 }
346 if (ret != ::android::OK ||
347 prevCache->mGeneration == generation) {
348 ALOGE("new surface configure fail due to wrong or same bqId or same generation:"
349 "igbp(%d:%llu -> %llu), gen(%lu -> %lu)", (bool)igbp,
350 (unsigned long long)prevCache->mBqId, (unsigned long long)bqId,
351 (unsigned long)prevCache->mGeneration, (unsigned long)generation);
352 std::unique_lock<std::mutex> l(mLock);
353 mInConfig = false;
354 return C2_BAD_VALUE;
355 }
356 ALOGD("new surface in configuration: maxDequeueRequested(%d), maxDequeueCommitted(%d)",
357 prevDequeueRequested, prevDequeueCommitted);
358 if (prevDequeueRequested > 0 && prevDequeueRequested > prevDequeueCommitted) {
359 prevDequeueCommitted = prevDequeueRequested;
360 }
361 if (igbp) {
362 ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
363 if (ret != ::android::OK) {
364 ALOGE("new surface maxDequeueBufferCount configure fail");
365 // TODO: sort out the error from igbp and return an error accordingly.
366 std::unique_lock<std::mutex> l(mLock);
367 mInConfig = false;
368 return C2_CORRUPTED;
369 }
370 }
371 ALOGD("new surface configured with id:%llu gen:%lu maxDequeue:%d",
372 (unsigned long long)bqId, (unsigned long)generation, prevDequeueCommitted);
373 std::shared_ptr<BufferCache> newCache =
374 std::make_shared<BufferCache>(bqId, bqUsage, generation, igbp);
375 {
376 std::unique_lock<std::mutex> l(mLock);
377 mInConfig = false;
378 mBufferCache = newCache;
379 // {@code dequeued} is the number of currently dequeued buffers.
380 // {@code prevDequeueCommitted} is max dequeued buffer at any moment
381 // from the new surface.
382 // {@code newDequeueable} is hence the current # of dequeueable buffers
383 // if no change occurs.
384 int dequeued = mDequeued.size() + mNumDequeueing;
385 int newDequeueable = prevDequeueCommitted - dequeued;
386 if (newDequeueable < 0) {
387 // This will not happen.
388 // But if this happens, we respect the value and try to continue.
389 ALOGE("calculated new dequeueable is negative: %d max(%d),dequeued(%d)",
390 newDequeueable, prevDequeueCommitted, dequeued);
391 }
392
393 if (mMaxDequeueRequested.has_value() && mMaxDequeueRequested == prevDequeueCommitted) {
394 mMaxDequeueRequested.reset();
395 }
396 mMaxDequeue = mMaxDequeueCommitted = prevDequeueCommitted;
397
398 int delta = newDequeueable - mDequeueable;
399 if (delta > 0) {
400 writeIncDequeueableLocked(delta);
401 } else if (delta < 0) {
402 drainDequeueableLocked(-delta);
403 }
404 ALOGV("new surfcace dequeueable %d(delta %d), maxDequeue %d",
405 newDequeueable, delta, mMaxDequeue);
406 mDequeueable = newDequeueable;
407 }
408 return C2_OK;
409 }
410
configureMaxDequeueCount(int maxDequeueCount)411 c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
412 std::shared_ptr<BufferCache> cache;
413
414 if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
415 ALOGE("max dequeue count %d is not valid", maxDequeueCount);
416 return C2_BAD_VALUE;
417 }
418
419 // max dequeue count which can be committed to IGBP.
420 // (Sometimes maxDequeueCount cannot be committed if the number of
421 // dequeued buffer count is bigger.)
422 int maxDequeueToCommit;
423 std::unique_lock<std::mutex> cl(mConfigLock);
424 {
425 std::unique_lock<std::mutex> l(mLock);
426 if (mMaxDequeueRequested.has_value()) {
427 if (mMaxDequeueRequested == maxDequeueCount) {
428 ALOGD("maxDequeueCount requested with %d already", maxDequeueCount);
429 return C2_OK;
430 }
431 } else if (mMaxDequeue == maxDequeueCount) {
432 ALOGD("maxDequeueCount is already %d", maxDequeueCount);
433 return C2_OK;
434 }
435 mInConfig = true;
436 mMaxDequeueRequested = maxDequeueCount;
437 cache = mBufferCache;
438 if (mMaxDequeue <= maxDequeueCount) {
439 maxDequeueToCommit = maxDequeueCount;
440 } else {
441 // Since mDequeuable is decreasing,
442 // a delievered ready to allocate event may not be fulfilled.
443 // Another waiting via a waitable object may be necessary in the case.
444 int delta = std::min(mMaxDequeue - maxDequeueCount, mDequeueable);
445 maxDequeueToCommit = mMaxDequeue - delta;
446 mDequeueable -= delta;
447 if (delta > 0) {
448 drainDequeueableLocked(delta);
449 }
450 }
451 }
452
453 bool committed = true;
454 if (cache->mIgbp && maxDequeueToCommit != mMaxDequeueCommitted) {
455 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
456 committed = (ret == ::android::OK);
457 if (committed) {
458 ALOGD("maxDequeueCount committed to IGBP: %d", maxDequeueToCommit);
459 } else {
460 // This should not happen.
461 ALOGE("maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
462 }
463 }
464
465 int oldMaxDequeue = 0;
466 int requested = 0;
467 {
468 std::unique_lock<std::mutex> l(mLock);
469 mInConfig = false;
470 oldMaxDequeue = mMaxDequeue;
471 mMaxDequeue = maxDequeueToCommit; // we already drained dequeueable
472 if (committed) {
473 clearCacheIfNecessaryLocked(cache, maxDequeueToCommit);
474 mMaxDequeueCommitted = maxDequeueToCommit;
475 if (mMaxDequeueRequested == mMaxDequeueCommitted &&
476 mMaxDequeueRequested == mMaxDequeue) {
477 mMaxDequeueRequested.reset();
478 }
479 if (mMaxDequeueRequested.has_value()) {
480 requested = mMaxDequeueRequested.value();
481 }
482 int delta = mMaxDequeueCommitted - oldMaxDequeue;
483 if (delta > 0) {
484 mDequeueable += delta;
485 writeIncDequeueableLocked(delta);
486 }
487 }
488 }
489 ALOGD("maxDqueueCount change %d -> %d: pending: %d",
490 oldMaxDequeue, maxDequeueToCommit, requested);
491
492 if (!committed) {
493 return C2_CORRUPTED;
494 }
495 return C2_OK;
496 }
497
updateDequeueConf()498 void GraphicsTracker::updateDequeueConf() {
499 std::shared_ptr<BufferCache> cache;
500 int dequeueCommit;
501 ALOGV("trying to update max dequeue count");
502 std::unique_lock<std::mutex> cl(mConfigLock);
503 {
504 std::unique_lock<std::mutex> l(mLock);
505 if (!mMaxDequeueRequested.has_value() || mMaxDequeue != mMaxDequeueRequested) {
506 return;
507 }
508 if (mMaxDequeueCommitted == mMaxDequeueRequested) {
509 // already committed. may not happen.
510 mMaxDequeueRequested.reset();
511 return;
512 }
513 dequeueCommit = mMaxDequeue;
514 mInConfig = true;
515 cache = mBufferCache;
516 }
517 bool committed = true;
518 if (cache->mIgbp) {
519 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
520 committed = (ret == ::android::OK);
521 if (committed) {
522 ALOGD("delayed maxDequeueCount update to IGBP: %d", dequeueCommit);
523 } else {
524 // This should not happen.
525 ALOGE("delayed maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
526 }
527 }
528 {
529 // cache == mCache here, since we locked config.
530 std::unique_lock<std::mutex> l(mLock);
531 mInConfig = false;
532 if (committed) {
533 clearCacheIfNecessaryLocked(cache, dequeueCommit);
534 mMaxDequeueCommitted = dequeueCommit;
535 }
536 mMaxDequeueRequested.reset();
537 }
538 }
539
clearCacheIfNecessaryLocked(const std::shared_ptr<BufferCache> & cache,int maxDequeueCommitted)540 void GraphicsTracker::clearCacheIfNecessaryLocked(const std::shared_ptr<BufferCache> &cache,
541 int maxDequeueCommitted) {
542 int cleared = 0;
543 size_t origCacheSize = cache->mBuffers.size();
544 if (cache->mIgbp && maxDequeueCommitted < mMaxDequeueCommitted) {
545 // we are shrinking # of buffers in the case, so evict the previous
546 // cached buffers.
547 for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
548 uint64_t bid = it->second->mId;
549 if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
550 ++cleared;
551 it = cache->mBuffers.erase(it);
552 } else {
553 ++it;
554 }
555 }
556 }
557 ALOGD("Cache size %zu -> %zu: maybe_cleared(%d), dequeued(%zu)",
558 origCacheSize, cache->mBuffers.size(), cleared, mDequeued.size());
559 }
560
getCurDequeueable()561 int GraphicsTracker::getCurDequeueable() {
562 std::unique_lock<std::mutex> l(mLock);
563 return mDequeueable;
564 }
565
stop()566 void GraphicsTracker::stop() {
567 // TODO: wait until all operation to current IGBP
568 // being completed.
569 std::unique_lock<std::mutex> l(mLock);
570 if (mStopped) {
571 return;
572 }
573 mStopped = true;
574 int writeFd = mWritePipeFd.release();
575 if (writeFd >= 0) {
576 ::close(writeFd);
577 }
578 }
579
onRequestStop()580 void GraphicsTracker::onRequestStop() {
581 std::unique_lock<std::mutex> l(mLock);
582 if (mStopped) {
583 return;
584 }
585 if (mStopRequested) {
586 return;
587 }
588 if (mBufferCache && mBufferCache->mBqId != 0) {
589 mReleaseSurface.reset(new PlaceHolderSurface(mBufferCache->mUsage));
590 }
591 mStopRequested = true;
592 writeIncDequeueableLocked(kMaxDequeueMax - 1);
593 }
594
writeIncDequeueableLocked(int inc)595 void GraphicsTracker::writeIncDequeueableLocked(int inc) {
596 CHECK(inc > 0 && inc < kMaxDequeueMax);
597 thread_local char buf[kMaxDequeueMax];
598 if (mStopped) { // reading end closed;
599 return;
600 }
601 int writeFd = mWritePipeFd.get();
602 if (writeFd < 0) {
603 // initialization fail and not valid though.
604 return;
605 }
606 int ret = ::write(writeFd, buf, inc);
607 // Since this is non-blocking i/o, it never returns EINTR.
608 //
609 // ::write() to pipe guarantee to succeed atomically if it writes less than
610 // the given PIPE_BUF. And the buffer size in pipe/fifo is at least 4K and our total
611 // max pending buffer size is 64. So it never returns EAGAIN here either.
612 // See pipe(7) for further information.
613 //
614 // Other errors are serious errors and we cannot synchronize mDequeueable to
615 // length of pending buffer in pipe/fifo anymore. So better to abort here.
616 // TODO: do not abort here. (b/318717399)
617 CHECK(ret == inc);
618 }
619
drainDequeueableLocked(int dec)620 void GraphicsTracker::drainDequeueableLocked(int dec) {
621 CHECK(dec > 0 && dec < kMaxDequeueMax);
622 thread_local char buf[kMaxDequeueMax];
623 if (mStopped) {
624 return;
625 }
626 int readFd = mReadPipeFd.get();
627 if (readFd < 0) {
628 // initializationf fail and not valid though.
629 return;
630 }
631 int ret = ::read(readFd, buf, dec);
632 // TODO: no dot abort here. (b/318717399)
633 CHECK(ret == dec);
634 }
635
getWaitableFd(int * pipeFd)636 c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
637 *pipeFd = ::dup(mReadPipeFd.get());
638 if (*pipeFd < 0) {
639 if (mReadPipeFd.get() < 0) {
640 return C2_BAD_STATE;
641 }
642 // dup error
643 ALOGE("dup() for the reading end failed %d", errno);
644 return C2_NO_MEMORY;
645 }
646 return C2_OK;
647 }
648
requestAllocateLocked(std::shared_ptr<BufferCache> * cache)649 c2_status_t GraphicsTracker::requestAllocateLocked(std::shared_ptr<BufferCache> *cache) {
650 if (mDequeueable > 0) {
651 char buf[1];
652 int ret = ::read(mReadPipeFd.get(), buf, 1);
653 if (ret < 0) {
654 if (errno == EINTR) {
655 // Do we really need to care for cancel due to signal handling?
656 return C2_CANCELED;
657 }
658 if (errno == EAGAIN) {
659 // proper usage of waitable object should not return this.
660 // but there could be alloc requests from HAL ignoring the internal status.
661 return C2_BLOCKING;
662 }
663 CHECK(errno != 0);
664 }
665 if (ret == 0) {
666 // writing end is closed
667 ALOGE("writing end for the waitable object seems to be closed");
668 return C2_BAD_STATE;
669 }
670 mNumDequeueing++;
671 mDequeueable--;
672 *cache = mBufferCache;
673 return C2_OK;
674 }
675 return C2_BLOCKING;
676 }
677
678 // If {@code cached} is {@code true}, {@code pBuffer} should be read from the
679 // current cached status. Otherwise, {@code pBuffer} should be written to
680 // current caches status.
commitAllocate(c2_status_t res,const std::shared_ptr<BufferCache> & cache,bool cached,int slot,const sp<Fence> & fence,std::shared_ptr<BufferItem> * pBuffer,bool * updateDequeue)681 void GraphicsTracker::commitAllocate(c2_status_t res, const std::shared_ptr<BufferCache> &cache,
682 bool cached, int slot, const sp<Fence> &fence,
683 std::shared_ptr<BufferItem> *pBuffer, bool *updateDequeue) {
684 std::unique_lock<std::mutex> l(mLock);
685 mNumDequeueing--;
686 if (res == C2_OK) {
687 if (cached) {
688 auto it = cache->mBuffers.find(slot);
689 CHECK(it != cache->mBuffers.end());
690 it->second->mFence = fence;
691 *pBuffer = it->second;
692 ALOGV("an allocated buffer already cached, updated Fence");
693 } else if (cache.get() == mBufferCache.get() && mBufferCache->mIgbp) {
694 // Cache the buffer if it is allocated from the current IGBP
695 CHECK(slot >= 0);
696 auto ret = mBufferCache->mBuffers.emplace(slot, *pBuffer);
697 if (!ret.second) {
698 ret.first->second = *pBuffer;
699 }
700 ALOGV("an allocated buffer not cached from the current IGBP");
701 }
702 uint64_t bid = (*pBuffer)->mId;
703 auto mapRet = mDequeued.emplace(bid, *pBuffer);
704 CHECK(mapRet.second);
705 } else {
706 ALOGD("allocate error(%d): Dequeued(%zu), Dequeuable(%d)",
707 (int)res, mDequeued.size(), mDequeueable + 1);
708 if (adjustDequeueConfLocked(updateDequeue)) {
709 return;
710 }
711 mDequeueable++;
712 writeIncDequeueableLocked(1);
713 }
714 }
715
716
717 // if a buffer is newly allocated, {@code cached} is {@code false},
718 // and the buffer is in the {@code buffer}
719 // otherwise, {@code cached} is {@code false} and the buffer should be
720 // retrieved by commitAllocate();
_allocate(const std::shared_ptr<BufferCache> & cache,uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,bool * cached,int * rSlotId,sp<Fence> * rFence,std::shared_ptr<BufferItem> * buffer)721 c2_status_t GraphicsTracker::_allocate(const std::shared_ptr<BufferCache> &cache,
722 uint32_t width, uint32_t height, PixelFormat format,
723 uint64_t usage,
724 bool *cached,
725 int *rSlotId,
726 sp<Fence> *rFence,
727 std::shared_ptr<BufferItem> *buffer) {
728 ::android::sp<IGraphicBufferProducer> igbp = cache->mIgbp;
729 uint32_t generation = cache->mGeneration;
730 if (!igbp) {
731 // allocate directly
732 AHardwareBuffer_Desc desc;
733 desc.width = width;
734 desc.height = height;
735 desc.layers = 1u;
736 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
737 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
738 desc.rfu0 = 0;
739 desc.rfu1 = 0;
740
741 AHardwareBuffer *buf;
742 int ret = AHardwareBuffer_allocate(&desc, &buf);
743 if (ret != ::android::OK) {
744 ALOGE("direct allocation of AHB failed(%d)", ret);
745 return ret == ::android::NO_MEMORY ? C2_NO_MEMORY : C2_CORRUPTED;
746 }
747 *cached = false;
748 *rSlotId = -1;
749 *rFence = Fence::NO_FENCE;
750 *buffer = std::make_shared<BufferItem>(generation, buf, usage);
751 AHardwareBuffer_release(buf); // remove an acquire count from
752 // AHwb_allocate().
753 if (!*buffer) {
754 ALOGE("direct allocation of AHB successful, but failed to create BufferItem");
755 return C2_NO_MEMORY;
756 }
757 if (!(*buffer)->mInit) {
758 ALOGE("direct allocation of AHB successful, but BufferItem init failed");
759 buffer->reset();
760 return C2_CORRUPTED;
761 }
762 ALOGV("allocate: direct allocate without igbp");
763 return C2_OK;
764 }
765
766 int slotId;
767 uint64_t outBufferAge;
768 sp<Fence> fence;
769
770 ::android::status_t status = igbp->dequeueBuffer(
771 &slotId, &fence, width, height, format, usage, &outBufferAge, nullptr);
772 if (status < ::android::OK) {
773 if (status == ::android::TIMED_OUT || status == ::android::WOULD_BLOCK) {
774 ALOGW("BQ might not be ready for dequeueBuffer()");
775 return C2_BLOCKING;
776 }
777 bool cacheExpired = false;
778 {
779 std::unique_lock<std::mutex> l(mLock);
780 cacheExpired = (mBufferCache.get() != cache.get());
781 }
782 if (cacheExpired) {
783 ALOGW("a new BQ is configured. dequeueBuffer() error %d", (int)status);
784 return C2_BLOCKING;
785 }
786 ALOGE("BQ in inconsistent status. dequeueBuffer() error %d", (int)status);
787 return C2_CORRUPTED;
788 }
789 cache->waitOnSlot(slotId);
790 bool exists = false;
791 {
792 std::unique_lock<std::mutex> l(mLock);
793 if (cache.get() == mBufferCache.get() &&
794 cache->mBuffers.find(slotId) != cache->mBuffers.end()) {
795 exists = true;
796 }
797 }
798 bool needsRealloc = status & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
799 if (needsRealloc || !exists) {
800 sp<GraphicBuffer> realloced;
801 status = igbp->requestBuffer(slotId, &realloced);
802 if (status != ::android::OK) {
803 ALOGE("allocate by dequeueBuffer() successful, but requestBuffer() failed %d",
804 status);
805 igbp->cancelBuffer(slotId, fence);
806 // This might be due to life-cycle end and/or surface switching.
807 return C2_BLOCKING;
808 }
809 *buffer = std::make_shared<BufferItem>(generation, slotId, realloced, fence);
810 if (!*buffer) {
811 ALOGE("allocate by dequeueBuffer() successful, but creating BufferItem failed");
812 igbp->cancelBuffer(slotId, fence);
813 return C2_NO_MEMORY;
814 }
815 if (!(*buffer)->mInit) {
816 ALOGE("allocate by dequeueBuffer() successful, but BufferItem init failed");
817 buffer->reset();
818 igbp->cancelBuffer(slotId, fence);
819 return C2_CORRUPTED;
820 }
821 *cached = false;
822 } else {
823 *cached = true;
824 }
825 ALOGV("allocate: a new allocated buffer from igbp cached %d, slot: %d",
826 *cached, slotId);
827 *rSlotId = slotId;
828 *rFence = fence;
829 return C2_OK;
830 }
831
_allocateDirect(uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,AHardwareBuffer ** buf,sp<Fence> * rFence)832 c2_status_t GraphicsTracker::_allocateDirect(
833 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
834 AHardwareBuffer **buf, sp<Fence> *rFence) {
835 AHardwareBuffer_Desc desc;
836 desc.width = width;
837 desc.height = height;
838 desc.layers = 1u;
839 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
840 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
841 desc.rfu0 = 0;
842 desc.rfu1 = 0;
843
844 int res = AHardwareBuffer_allocate(&desc, buf);
845 if (res != ::android::OK) {
846 ALOGE("_allocateDirect() failed(%d)", res);
847 if (res == ::android::NO_MEMORY) {
848 return C2_NO_MEMORY;
849 } else {
850 return C2_CORRUPTED;
851 }
852 }
853
854 *rFence = Fence::NO_FENCE;
855 return C2_OK;
856 }
857
allocate(uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,AHardwareBuffer ** buf,sp<Fence> * rFence)858 c2_status_t GraphicsTracker::allocate(
859 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
860 AHardwareBuffer **buf, sp<Fence> *rFence) {
861 if (mStopped.load() == true) {
862 ALOGE("cannot allocate due to being stopped");
863 return C2_BAD_STATE;
864 }
865 c2_status_t res = C2_OK;
866 std::shared_ptr<BufferCache> cache;
867 {
868 std::unique_lock<std::mutex> l(mLock);
869 if (mStopRequested) {
870 l.unlock();
871 if (mReleaseSurface) {
872 res = mReleaseSurface->allocate(width, height, format, usage, buf, rFence);
873 } else {
874 res = _allocateDirect(width, height, format, usage, buf, rFence);
875 }
876 if (res == C2_OK) {
877 ALOGD("allocateed %d buffer after stop", ++mAllocAfterStopRequested);
878 }
879 // Delay a little bit for HAL to receive stop()/release() request.
880 ::usleep(kAllocateDirectDelayUs);
881 return res;
882 }
883 c2_status_t res = requestAllocateLocked(&cache);
884 if (res != C2_OK) {
885 return res;
886 }
887 }
888 ALOGV("allocatable or dequeueable");
889
890 bool cached = false;
891 int slotId;
892 sp<Fence> fence;
893 std::shared_ptr<BufferItem> buffer;
894 bool updateDequeue;
895 res = _allocate(cache, width, height, format, usage, &cached, &slotId, &fence, &buffer);
896 commitAllocate(res, cache, cached, slotId, fence, &buffer, &updateDequeue);
897 if (res == C2_OK) {
898 ALOGV("allocated a buffer width:%u height:%u pixelformat:%d usage:%llu",
899 width, height, format, (unsigned long long)usage);
900 *buf = buffer->mBuf;
901 *rFence = buffer->mFence;
902 // *buf should be valid even if buffer is dtor-ed.
903 AHardwareBuffer_acquire(*buf);
904 }
905 if (updateDequeue) {
906 updateDequeueConf();
907 }
908 return res;
909 }
910
requestDeallocate(uint64_t bid,const sp<Fence> & fence,bool * completed,bool * updateDequeue,std::shared_ptr<BufferCache> * cache,int * slotId,sp<Fence> * rFence)911 c2_status_t GraphicsTracker::requestDeallocate(uint64_t bid, const sp<Fence> &fence,
912 bool *completed, bool *updateDequeue,
913 std::shared_ptr<BufferCache> *cache, int *slotId,
914 sp<Fence> *rFence) {
915 std::unique_lock<std::mutex> l(mLock);
916 if (mDeallocating.find(bid) != mDeallocating.end()) {
917 ALOGE("Tries to deallocate a buffer which is already deallocating or rendering");
918 return C2_DUPLICATE;
919 }
920 auto it = mDequeued.find(bid);
921 if (it == mDequeued.end()) {
922 ALOGE("Tried to deallocate non dequeued buffer");
923 return C2_NOT_FOUND;
924 }
925
926 std::shared_ptr<BufferItem> buffer = it->second;
927 if (buffer->mGeneration == mBufferCache->mGeneration && mBufferCache->mIgbp) {
928 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
929 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
930 *cache = mBufferCache;
931 *slotId = buffer->mSlot;
932 *rFence = ( fence == Fence::NO_FENCE) ? buffer->mFence : fence;
933 // mark this deallocating
934 mDeallocating.emplace(bid);
935 mBufferCache->blockSlot(buffer->mSlot);
936 *completed = false;
937 } else { // buffer is not from the current underlying Graphics.
938 mDequeued.erase(bid);
939 *completed = true;
940 if (adjustDequeueConfLocked(updateDequeue)) {
941 return C2_OK;
942 }
943 mDequeueable++;
944 writeIncDequeueableLocked(1);
945 }
946 return C2_OK;
947 }
948
commitDeallocate(std::shared_ptr<BufferCache> & cache,int slotId,uint64_t bid,bool * updateDequeue)949 void GraphicsTracker::commitDeallocate(
950 std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid, bool *updateDequeue) {
951 std::unique_lock<std::mutex> l(mLock);
952 size_t del1 = mDequeued.erase(bid);
953 size_t del2 = mDeallocating.erase(bid);
954 CHECK(del1 > 0 && del2 > 0);
955 if (cache) {
956 cache->unblockSlot(slotId);
957 }
958 if (adjustDequeueConfLocked(updateDequeue)) {
959 return;
960 }
961 mDequeueable++;
962 writeIncDequeueableLocked(1);
963 }
964
965
deallocate(uint64_t bid,const sp<Fence> & fence)966 c2_status_t GraphicsTracker::deallocate(uint64_t bid, const sp<Fence> &fence) {
967 bool completed;
968 bool updateDequeue;
969 std::shared_ptr<BufferCache> cache;
970 int slotId;
971 sp<Fence> rFence;
972 if (mStopped.load() == true) {
973 ALOGE("cannot deallocate due to being stopped");
974 return C2_BAD_STATE;
975 }
976 c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
977 &cache, &slotId, &rFence);
978 if (res != C2_OK) {
979 return res;
980 }
981 if (completed == true) {
982 if (updateDequeue) {
983 updateDequeueConf();
984 }
985 return C2_OK;
986 }
987
988 // ignore return value since IGBP could be already stale.
989 // cache->mIgbp is not null, if completed is false.
990 (void)cache->mIgbp->cancelBuffer(slotId, rFence);
991
992 commitDeallocate(cache, slotId, bid, &updateDequeue);
993 if (updateDequeue) {
994 updateDequeueConf();
995 }
996 return C2_OK;
997 }
998
requestRender(uint64_t bid,std::shared_ptr<BufferCache> * cache,std::shared_ptr<BufferItem> * pBuffer,bool * fromCache,bool * updateDequeue)999 c2_status_t GraphicsTracker::requestRender(uint64_t bid, std::shared_ptr<BufferCache> *cache,
1000 std::shared_ptr<BufferItem> *pBuffer,
1001 bool *fromCache,
1002 bool *updateDequeue) {
1003 std::unique_lock<std::mutex> l(mLock);
1004 if (mDeallocating.find(bid) != mDeallocating.end()) {
1005 ALOGE("Tries to render a buffer which is already deallocating or rendering");
1006 return C2_DUPLICATE;
1007 }
1008 auto it = mDequeued.find(bid);
1009 if (it == mDequeued.end()) {
1010 ALOGE("Tried to render non dequeued buffer");
1011 return C2_NOT_FOUND;
1012 }
1013 if (!mBufferCache->mIgbp) {
1014 // Render requested without surface.
1015 // reclaim the buffer for dequeue.
1016 // TODO: is this correct for API wise?
1017 mDequeued.erase(it);
1018 if (adjustDequeueConfLocked(updateDequeue)) {
1019 return C2_BAD_STATE;
1020 }
1021 mDequeueable++;
1022 writeIncDequeueableLocked(1);
1023 return C2_BAD_STATE;
1024 }
1025 std::shared_ptr<BufferItem> buffer = it->second;
1026 *cache = mBufferCache;
1027 if (buffer->mGeneration == mBufferCache->mGeneration) {
1028 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
1029 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
1030 mBufferCache->blockSlot(buffer->mSlot);
1031 *fromCache = true;
1032 } else {
1033 *fromCache = false;
1034 }
1035 *pBuffer = buffer;
1036 mDeallocating.emplace(bid);
1037 return C2_OK;
1038 }
1039
commitRender(const std::shared_ptr<BufferCache> & cache,const std::shared_ptr<BufferItem> & buffer,const std::shared_ptr<BufferItem> & oldBuffer,bool bufferReplaced,bool * updateDequeue)1040 void GraphicsTracker::commitRender(const std::shared_ptr<BufferCache> &cache,
1041 const std::shared_ptr<BufferItem> &buffer,
1042 const std::shared_ptr<BufferItem> &oldBuffer,
1043 bool bufferReplaced,
1044 bool *updateDequeue) {
1045 std::unique_lock<std::mutex> l(mLock);
1046 uint64_t origBid = oldBuffer ? oldBuffer->mId : buffer->mId;
1047
1048 if (cache) {
1049 cache->unblockSlot(buffer->mSlot);
1050 if (oldBuffer) {
1051 // migrated, register the new buffer to the cache.
1052 auto ret = cache->mBuffers.emplace(buffer->mSlot, buffer);
1053 if (!ret.second) {
1054 ret.first->second = buffer;
1055 }
1056 }
1057 }
1058 mDeallocating.erase(origBid);
1059 mDequeued.erase(origBid);
1060
1061 if (cache.get() != mBufferCache.get() || bufferReplaced) {
1062 // Surface changed, no need to wait for buffer being released.
1063 if (adjustDequeueConfLocked(updateDequeue)) {
1064 return;
1065 }
1066 mDequeueable++;
1067 writeIncDequeueableLocked(1);
1068 return;
1069 }
1070 }
1071
render(const C2ConstGraphicBlock & blk,const IGraphicBufferProducer::QueueBufferInput & input,IGraphicBufferProducer::QueueBufferOutput * output)1072 c2_status_t GraphicsTracker::render(const C2ConstGraphicBlock& blk,
1073 const IGraphicBufferProducer::QueueBufferInput &input,
1074 IGraphicBufferProducer::QueueBufferOutput *output) {
1075 uint64_t bid;
1076 c2_status_t res = retrieveAHardwareBufferId(blk, &bid);
1077 if (res != C2_OK) {
1078 ALOGE("retrieving AHB-ID for GraphicBlock failed");
1079 return C2_CORRUPTED;
1080 }
1081 std::shared_ptr<_C2BlockPoolData> poolData =
1082 _C2BlockFactory::GetGraphicBlockPoolData(blk);
1083 _C2BlockFactory::DisownIgbaBlock(poolData);
1084 std::shared_ptr<BufferCache> cache;
1085 std::shared_ptr<BufferItem> buffer;
1086 std::shared_ptr<BufferItem> oldBuffer;
1087 bool updateDequeue = false;
1088 bool fromCache = false;
1089 res = requestRender(bid, &cache, &buffer, &fromCache, &updateDequeue);
1090 if (res != C2_OK) {
1091 if (updateDequeue) {
1092 updateDequeueConf();
1093 }
1094 return res;
1095 }
1096 int cacheSlotId = fromCache ? buffer->mSlot : -1;
1097 ALOGV("render prepared: igbp(%d) slot(%d)", bool(cache->mIgbp), cacheSlotId);
1098 if (!fromCache) {
1099 // The buffer does not come from the current cache.
1100 // The buffer is needed to be migrated(attached).
1101 uint64_t newUsage = 0ULL;
1102
1103 (void) cache->mIgbp->getConsumerUsage(&newUsage);
1104 std::shared_ptr<BufferItem> newBuffer =
1105 buffer->migrateBuffer(newUsage, cache->mGeneration);
1106 sp<GraphicBuffer> gb = newBuffer ? newBuffer->getGraphicBuffer() : nullptr;
1107
1108 if (!gb) {
1109 ALOGE("render: realloc-ing a new buffer for migration failed");
1110 std::shared_ptr<BufferCache> nullCache;
1111 commitDeallocate(nullCache, -1, bid, &updateDequeue);
1112 if (updateDequeue) {
1113 updateDequeueConf();
1114 }
1115 return C2_REFUSED;
1116 }
1117 if (cache->mIgbp->attachBuffer(&(newBuffer->mSlot), gb) != ::android::OK) {
1118 ALOGE("render: attaching a new buffer to IGBP failed");
1119 std::shared_ptr<BufferCache> nullCache;
1120 commitDeallocate(nullCache, -1, bid, &updateDequeue);
1121 if (updateDequeue) {
1122 updateDequeueConf();
1123 }
1124 return C2_REFUSED;
1125 }
1126 cache->waitOnSlot(newBuffer->mSlot);
1127 cache->blockSlot(newBuffer->mSlot);
1128 oldBuffer = buffer;
1129 buffer = newBuffer;
1130 }
1131 ::android::status_t renderRes = cache->mIgbp->queueBuffer(buffer->mSlot, input, output);
1132 ALOGV("render done: migration(%d), render(err = %d)", !fromCache, renderRes);
1133 if (renderRes != ::android::OK) {
1134 CHECK(renderRes != ::android::BAD_VALUE);
1135 ALOGE("render: failed to queueBuffer() err = %d", renderRes);
1136 (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
1137 commitDeallocate(cache, buffer->mSlot, bid, &updateDequeue);
1138 if (updateDequeue) {
1139 updateDequeueConf();
1140 }
1141 return C2_REFUSED;
1142 }
1143
1144 commitRender(cache, buffer, oldBuffer, output->bufferReplaced, &updateDequeue);
1145 if (updateDequeue) {
1146 updateDequeueConf();
1147 }
1148 return C2_OK;
1149 }
1150
pollForRenderedFrames(FrameEventHistoryDelta * delta)1151 void GraphicsTracker::pollForRenderedFrames(FrameEventHistoryDelta* delta) {
1152 sp<IGraphicBufferProducer> igbp;
1153 {
1154 std::unique_lock<std::mutex> l(mLock);
1155 if (mBufferCache) {
1156 igbp = mBufferCache->mIgbp;
1157 }
1158 }
1159 if (igbp) {
1160 igbp->getFrameTimestamps(delta);
1161 }
1162 }
1163
onReleased(uint32_t generation)1164 void GraphicsTracker::onReleased(uint32_t generation) {
1165 bool updateDequeue = false;
1166 {
1167 std::unique_lock<std::mutex> l(mLock);
1168 if (mBufferCache->mGeneration == generation) {
1169 if (mBufferCache->mNumAttached > 0) {
1170 ALOGV("one onReleased() ignored for each prior onAttached().");
1171 mBufferCache->mNumAttached--;
1172 return;
1173 }
1174 if (!adjustDequeueConfLocked(&updateDequeue)) {
1175 mDequeueable++;
1176 writeIncDequeueableLocked(1);
1177 }
1178 }
1179 }
1180 if (updateDequeue) {
1181 updateDequeueConf();
1182 }
1183 }
1184
onAttached(uint32_t generation)1185 void GraphicsTracker::onAttached(uint32_t generation) {
1186 std::unique_lock<std::mutex> l(mLock);
1187 if (mBufferCache->mGeneration == generation) {
1188 ALOGV("buffer attached");
1189 mBufferCache->mNumAttached++;
1190 }
1191 }
1192
1193 } // namespace aidl::android::hardware::media::c2::implementation
1194