1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #undef LOG_TAG
18 #define LOG_TAG "BLASTBufferQueue"
19
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21 //#define LOG_NDEBUG 0
22
23 #include <com_android_graphics_libgui_flags.h>
24 #include <cutils/atomic.h>
25 #include <ftl/fake_guard.h>
26 #include <gui/BLASTBufferQueue.h>
27 #include <gui/BufferItemConsumer.h>
28 #include <gui/BufferQueueConsumer.h>
29 #include <gui/BufferQueueCore.h>
30 #include <gui/BufferQueueProducer.h>
31 #include <sys/epoll.h>
32 #include <sys/eventfd.h>
33
34 #include <gui/FrameRateUtils.h>
35 #include <gui/GLConsumer.h>
36 #include <gui/IProducerListener.h>
37 #include <gui/Surface.h>
38 #include <gui/TraceUtils.h>
39 #include <utils/Singleton.h>
40 #include <utils/Trace.h>
41
42 #include <private/gui/ComposerService.h>
43 #include <private/gui/ComposerServiceAIDL.h>
44
45 #include <android-base/thread_annotations.h>
46
47 #include <com_android_graphics_libgui_flags.h>
48
49 using namespace com::android::graphics::libgui;
50 using namespace std::chrono_literals;
51
52 namespace {
53
54 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
55 template <class Mutex>
56 class UnlockGuard {
57 public:
UnlockGuard(Mutex & lock)58 explicit UnlockGuard(Mutex& lock) : mLock{lock} { mLock.unlock(); }
59
~UnlockGuard()60 ~UnlockGuard() { mLock.lock(); }
61
62 UnlockGuard(const UnlockGuard&) = delete;
63 UnlockGuard& operator=(const UnlockGuard&) = delete;
64
65 private:
66 Mutex& mLock;
67 };
68 #endif
69
boolToString(bool b)70 inline const char* boolToString(bool b) {
71 return b ? "true" : "false";
72 }
73
74 } // namespace
75
76 namespace android {
77
78 // Macros to include adapter info in log messages
79 #define BQA_LOGD(x, ...) \
80 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
81 #define BQA_LOGV(x, ...) \
82 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
83 // enable logs for a single layer
84 //#define BQA_LOGV(x, ...) \
85 // ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
86 // mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
87 #define BQA_LOGE(x, ...) \
88 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
89
90 #define BBQ_TRACE(x, ...) \
91 ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
92 mNumAcquired, ##__VA_ARGS__)
93
94 #define UNIQUE_LOCK_WITH_ASSERTION(mutex) \
95 std::unique_lock _lock{mutex}; \
96 base::ScopedLockAssertion assumeLocked(mutex);
97
onDisconnect()98 void BLASTBufferItemConsumer::onDisconnect() {
99 Mutex::Autolock lock(mMutex);
100 mPreviouslyConnected = mCurrentlyConnected;
101 mCurrentlyConnected = false;
102 if (mPreviouslyConnected) {
103 mDisconnectEvents.push(mCurrentFrameNumber);
104 }
105 mFrameEventHistory.onDisconnect();
106 }
107
addAndGetFrameTimestamps(const NewFrameEventsEntry * newTimestamps,FrameEventHistoryDelta * outDelta)108 void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
109 FrameEventHistoryDelta* outDelta) {
110 Mutex::Autolock lock(mMutex);
111 if (newTimestamps) {
112 // BufferQueueProducer only adds a new timestamp on
113 // queueBuffer
114 mCurrentFrameNumber = newTimestamps->frameNumber;
115 mFrameEventHistory.addQueue(*newTimestamps);
116 }
117 if (outDelta) {
118 // frame event histories will be processed
119 // only after the producer connects and requests
120 // deltas for the first time. Forward this intent
121 // to SF-side to turn event processing back on
122 mPreviouslyConnected = mCurrentlyConnected;
123 mCurrentlyConnected = true;
124 mFrameEventHistory.getAndResetDelta(outDelta);
125 }
126 }
127
updateFrameTimestamps(uint64_t frameNumber,uint64_t previousFrameNumber,nsecs_t refreshStartTime,const sp<Fence> & glDoneFence,const sp<Fence> & presentFence,const sp<Fence> & prevReleaseFence,CompositorTiming compositorTiming,nsecs_t latchTime,nsecs_t dequeueReadyTime)128 void BLASTBufferItemConsumer::updateFrameTimestamps(
129 uint64_t frameNumber, uint64_t previousFrameNumber, nsecs_t refreshStartTime,
130 const sp<Fence>& glDoneFence, const sp<Fence>& presentFence,
131 const sp<Fence>& prevReleaseFence, CompositorTiming compositorTiming, nsecs_t latchTime,
132 nsecs_t dequeueReadyTime) {
133 Mutex::Autolock lock(mMutex);
134
135 // if the producer is not connected, don't bother updating,
136 // the next producer that connects won't access this frame event
137 if (!mCurrentlyConnected) return;
138 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
139 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
140 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
141
142 mFrameEventHistory.addLatch(frameNumber, latchTime);
143 if (flags::frametimestamps_previousrelease()) {
144 if (previousFrameNumber > 0) {
145 mFrameEventHistory.addRelease(previousFrameNumber, dequeueReadyTime,
146 std::move(releaseFenceTime));
147 }
148 } else {
149 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
150 }
151
152 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
153 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
154 compositorTiming);
155 }
156
getConnectionEvents(uint64_t frameNumber,bool * needsDisconnect)157 void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
158 bool disconnect = false;
159 Mutex::Autolock lock(mMutex);
160 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
161 disconnect = true;
162 mDisconnectEvents.pop();
163 }
164 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
165 }
166
onSidebandStreamChanged()167 void BLASTBufferItemConsumer::onSidebandStreamChanged() {
168 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
169 if (bbq != nullptr) {
170 sp<NativeHandle> stream = getSidebandStream();
171 bbq->setSidebandStream(stream);
172 }
173 }
174
175 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_SETFRAMERATE)
onSetFrameRate(float frameRate,int8_t compatibility,int8_t changeFrameRateStrategy)176 void BLASTBufferItemConsumer::onSetFrameRate(float frameRate, int8_t compatibility,
177 int8_t changeFrameRateStrategy) {
178 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
179 if (bbq != nullptr) {
180 bbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
181 }
182 }
183 #endif
184
resizeFrameEventHistory(size_t newSize)185 void BLASTBufferItemConsumer::resizeFrameEventHistory(size_t newSize) {
186 Mutex::Autolock lock(mMutex);
187 mFrameEventHistory.resize(newSize);
188 }
189
BLASTBufferQueue(const std::string & name,bool updateDestinationFrame)190 BLASTBufferQueue::BLASTBufferQueue(const std::string& name, bool updateDestinationFrame)
191 : mSurfaceControl(nullptr),
192 mSize(1, 1),
193 mRequestedSize(mSize),
194 mFormat(PIXEL_FORMAT_RGBA_8888),
195 mTransactionReadyCallback(nullptr),
196 mSyncTransaction(nullptr),
197 mUpdateDestinationFrame(updateDestinationFrame) {
198 createBufferQueue(&mProducer, &mConsumer);
199 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
200 mBufferItemConsumer = sp<BLASTBufferItemConsumer>::make(mProducer, mConsumer,
201 GraphicBuffer::USAGE_HW_COMPOSER |
202 GraphicBuffer::USAGE_HW_TEXTURE,
203 1, false, this);
204 #else
205 mBufferItemConsumer = sp<BLASTBufferItemConsumer>::make(mConsumer,
206 GraphicBuffer::USAGE_HW_COMPOSER |
207 GraphicBuffer::USAGE_HW_TEXTURE,
208 1, false, this);
209 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
210 // since the adapter is in the client process, set dequeue timeout
211 // explicitly so that dequeueBuffer will block
212 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
213
214 static std::atomic<uint32_t> nextId = 0;
215 mProducerId = nextId++;
216 mName = name + "#" + std::to_string(mProducerId);
217 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(mProducerId);
218 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(mProducerId);
219 mBufferItemConsumer->setName(String8(consumerName.c_str()));
220 mBufferItemConsumer->setFrameAvailableListener(this);
221
222 ComposerServiceAIDL::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
223 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
224 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
225
226 TransactionCompletedListener::getInstance()->addQueueStallListener(
227 [&](const std::string& reason) {
228 std::function<void(const std::string&)> callbackCopy;
229 {
230 std::unique_lock _lock{mMutex};
231 callbackCopy = mTransactionHangCallback;
232 }
233 if (callbackCopy) callbackCopy(reason);
234 },
235 this);
236
237 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
238 gui::BufferReleaseChannel::open(mName, mBufferReleaseConsumer, mBufferReleaseProducer);
239 mBufferReleaseReader.emplace(*this);
240 #endif
241
242 BQA_LOGV("BLASTBufferQueue created");
243 }
244
~BLASTBufferQueue()245 BLASTBufferQueue::~BLASTBufferQueue() {
246 TransactionCompletedListener::getInstance()->removeQueueStallListener(this);
247 if (mPendingTransactions.empty()) {
248 return;
249 }
250 BQA_LOGE("Applying pending transactions on dtor %d",
251 static_cast<uint32_t>(mPendingTransactions.size()));
252 SurfaceComposerClient::Transaction t;
253 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
254 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
255 t.setApplyToken(mApplyToken).apply(false, true);
256
257 if (mTransactionReadyCallback) {
258 mTransactionReadyCallback(mSyncTransaction);
259 }
260 }
261
onFirstRef()262 void BLASTBufferQueue::onFirstRef() {
263 // safe default, most producers are expected to override this
264 mProducer->setMaxDequeuedBufferCount(2);
265 }
266
update(const sp<SurfaceControl> & surface,uint32_t width,uint32_t height,int32_t format)267 void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
268 int32_t format) {
269 LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
270
271 std::lock_guard _lock{mMutex};
272 if (mFormat != format) {
273 mFormat = format;
274 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
275 }
276
277 const bool surfaceControlChanged = !SurfaceControl::isSameSurface(mSurfaceControl, surface);
278 if (surfaceControlChanged && mSurfaceControl != nullptr) {
279 BQA_LOGD("Updating SurfaceControl without recreating BBQ");
280 }
281
282 // Always update the native object even though they might have the same layer handle, so we can
283 // get the updated transform hint from WM.
284 mSurfaceControl = surface;
285 SurfaceComposerClient::Transaction t;
286 bool applyTransaction = false;
287 if (surfaceControlChanged) {
288 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
289 updateBufferReleaseProducer();
290 #endif
291 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
292 layer_state_t::eEnableBackpressure);
293 // Migrate the picture profile handle to the new surface control.
294 if (com_android_graphics_libgui_flags_apply_picture_profiles() &&
295 mPictureProfileHandle.has_value()) {
296 t.setPictureProfileHandle(mSurfaceControl, *mPictureProfileHandle);
297 }
298 applyTransaction = true;
299 }
300 mTransformHint = mSurfaceControl->getTransformHint();
301 mBufferItemConsumer->setTransformHint(mTransformHint);
302 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
303 mTransformHint);
304
305 ui::Size newSize(width, height);
306 if (mRequestedSize != newSize) {
307 mRequestedSize.set(newSize);
308 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
309 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
310 // If the buffer supports scaling, update the frame immediately since the client may
311 // want to scale the existing buffer to the new size.
312 mSize = mRequestedSize;
313 if (mUpdateDestinationFrame) {
314 t.setDestinationFrame(mSurfaceControl, Rect(newSize));
315 applyTransaction = true;
316 }
317 }
318 }
319 if (applyTransaction) {
320 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
321 t.setApplyToken(mApplyToken).apply(false /* synchronous */, true /* oneWay */);
322 }
323 }
324
findMatchingStat(const std::vector<SurfaceControlStats> & stats,const sp<SurfaceControl> & sc)325 static std::optional<SurfaceControlStats> findMatchingStat(
326 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
327 for (auto stat : stats) {
328 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
329 return stat;
330 }
331 }
332 return std::nullopt;
333 }
334
makeTransactionCommittedCallbackThunk()335 TransactionCompletedCallbackTakesContext BLASTBufferQueue::makeTransactionCommittedCallbackThunk() {
336 return [bbq = sp<BLASTBufferQueue>::fromExisting(
337 this)](void* /*context*/, nsecs_t latchTime, const sp<Fence>& presentFence,
338 const std::vector<SurfaceControlStats>& stats) {
339 bbq->transactionCommittedCallback(latchTime, presentFence, stats);
340 };
341 }
342
transactionCommittedCallback(nsecs_t,const sp<Fence> &,const std::vector<SurfaceControlStats> & stats)343 void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
344 const sp<Fence>& /*presentFence*/,
345 const std::vector<SurfaceControlStats>& stats) {
346 {
347 std::lock_guard _lock{mMutex};
348 BBQ_TRACE();
349 BQA_LOGV("transactionCommittedCallback");
350 if (!mSurfaceControlsWithPendingCallback.empty()) {
351 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
352 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
353 if (stat) {
354 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
355
356 // We need to check if we were waiting for a transaction callback in order to
357 // process any pending buffers and unblock. It's possible to get transaction
358 // callbacks for previous requests so we need to ensure that there are no pending
359 // frame numbers that were in a sync. We remove the frame from mSyncedFrameNumbers
360 // set and then check if it's empty. If there are no more pending syncs, we can
361 // proceed with flushing the shadow queue.
362 mSyncedFrameNumbers.erase(currFrameNumber);
363 if (mSyncedFrameNumbers.empty()) {
364 flushShadowQueue();
365 }
366 } else {
367 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
368 }
369 } else {
370 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
371 "empty.");
372 }
373 }
374 }
375
makeTransactionCallbackThunk()376 TransactionCompletedCallbackTakesContext BLASTBufferQueue::makeTransactionCallbackThunk() {
377 return [bbq = sp<BLASTBufferQueue>::fromExisting(
378 this)](void* /*context*/, nsecs_t latchTime, const sp<Fence>& presentFence,
379 const std::vector<SurfaceControlStats>& stats) {
380 bbq->transactionCallback(latchTime, presentFence, stats);
381 };
382 }
383
transactionCallback(nsecs_t,const sp<Fence> &,const std::vector<SurfaceControlStats> & stats)384 void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
385 const std::vector<SurfaceControlStats>& stats) {
386 {
387 std::lock_guard _lock{mMutex};
388 BBQ_TRACE();
389 BQA_LOGV("transactionCallback");
390
391 if (!mSurfaceControlsWithPendingCallback.empty()) {
392 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
393 mSurfaceControlsWithPendingCallback.pop();
394 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
395 if (statsOptional) {
396 SurfaceControlStats stat = *statsOptional;
397 if (stat.transformHint) {
398 mTransformHint = *stat.transformHint;
399 mBufferItemConsumer->setTransformHint(mTransformHint);
400 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
401 }
402 // Update frametime stamps if the frame was latched and presented, indicated by a
403 // valid latch time.
404 if (stat.latchTime > 0) {
405 mBufferItemConsumer
406 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
407 stat.frameEventStats.previousFrameNumber,
408 stat.frameEventStats.refreshStartTime,
409 stat.frameEventStats.gpuCompositionDoneFence,
410 stat.presentFence, stat.previousReleaseFence,
411 stat.frameEventStats.compositorTiming,
412 stat.latchTime,
413 stat.frameEventStats.dequeueReadyTime);
414 }
415 auto currFrameNumber = stat.frameEventStats.frameNumber;
416 // Release stale buffers.
417 for (const auto& [key, _] : mSubmitted) {
418 if (currFrameNumber <= key.framenumber) {
419 continue; // not stale.
420 }
421 releaseBufferCallbackLocked(key,
422 stat.previousReleaseFence
423 ? stat.previousReleaseFence
424 : Fence::NO_FENCE,
425 stat.currentMaxAcquiredBufferCount,
426 true /* fakeRelease */);
427 }
428 } else {
429 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
430 }
431 } else {
432 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
433 "empty.");
434 }
435 }
436 }
437
flushShadowQueue()438 void BLASTBufferQueue::flushShadowQueue() {
439 BQA_LOGV("flushShadowQueue");
440 int32_t numFramesToFlush = mNumFrameAvailable;
441 while (numFramesToFlush > 0) {
442 acquireNextBufferLocked(std::nullopt);
443 numFramesToFlush--;
444 }
445 }
446
447 // Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
448 // BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
449 // So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
450 // Otherwise, this is a no-op.
makeReleaseBufferCallbackThunk()451 ReleaseBufferCallback BLASTBufferQueue::makeReleaseBufferCallbackThunk() {
452 return [weakBbq = wp<BLASTBufferQueue>::fromExisting(
453 this)](const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
454 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
455 sp<BLASTBufferQueue> bbq = weakBbq.promote();
456 if (!bbq) {
457 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
458 return;
459 }
460 bbq->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
461 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
462 bbq->drainBufferReleaseConsumer();
463 #endif
464 };
465 }
466
releaseBufferCallback(const ReleaseCallbackId & id,const sp<Fence> & releaseFence,std::optional<uint32_t> currentMaxAcquiredBufferCount)467 void BLASTBufferQueue::releaseBufferCallback(
468 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
469 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
470 std::lock_guard _lock{mMutex};
471 BBQ_TRACE();
472 releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
473 false /* fakeRelease */);
474 }
475
releaseBufferCallbackLocked(const ReleaseCallbackId & id,const sp<Fence> & releaseFence,std::optional<uint32_t> currentMaxAcquiredBufferCount,bool fakeRelease)476 void BLASTBufferQueue::releaseBufferCallbackLocked(
477 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
478 std::optional<uint32_t> currentMaxAcquiredBufferCount, bool fakeRelease) {
479 ATRACE_CALL();
480 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
481
482 // Calculate how many buffers we need to hold before we release them back
483 // to the buffer queue. This will prevent higher latency when we are running
484 // on a lower refresh rate than the max supported. We only do that for EGL
485 // clients as others don't care about latency
486 const auto it = mSubmitted.find(id);
487 const bool isEGL = it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
488
489 if (currentMaxAcquiredBufferCount) {
490 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
491 }
492
493 const uint32_t numPendingBuffersToHold =
494 isEGL ? std::max(0, mMaxAcquiredBuffers - (int32_t)mCurrentMaxAcquiredBufferCount) : 0;
495
496 auto rb = ReleasedBuffer{id, releaseFence};
497 if (std::find(mPendingRelease.begin(), mPendingRelease.end(), rb) == mPendingRelease.end()) {
498 mPendingRelease.emplace_back(rb);
499 if (fakeRelease) {
500 BQA_LOGE("Faking releaseBufferCallback from transactionCompleteCallback %" PRIu64,
501 id.framenumber);
502 BBQ_TRACE("FakeReleaseCallback");
503 }
504 }
505
506 // Release all buffers that are beyond the ones that we need to hold
507 while (mPendingRelease.size() > numPendingBuffersToHold) {
508 const auto releasedBuffer = mPendingRelease.front();
509 mPendingRelease.pop_front();
510 releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
511 // Don't process the transactions here if mSyncedFrameNumbers is not empty. That means
512 // are still transactions that have sync buffers in them that have not been applied or
513 // dropped. Instead, let onFrameAvailable handle processing them since it will merge with
514 // the syncTransaction.
515 if (mSyncedFrameNumbers.empty()) {
516 acquireNextBufferLocked(std::nullopt);
517 }
518 }
519
520 ATRACE_INT("PendingRelease", mPendingRelease.size());
521 ATRACE_INT(mQueuedBufferTrace.c_str(),
522 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
523 mCallbackCV.notify_all();
524 }
525
releaseBuffer(const ReleaseCallbackId & callbackId,const sp<Fence> & releaseFence)526 void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
527 const sp<Fence>& releaseFence) {
528 auto it = mSubmitted.find(callbackId);
529 if (it == mSubmitted.end()) {
530 return;
531 }
532 mNumAcquired--;
533 BBQ_TRACE("frame=%" PRIu64, callbackId.framenumber);
534 BQA_LOGV("released %s", callbackId.to_string().c_str());
535 mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
536 mSubmitted.erase(it);
537 // Remove the frame number from mSyncedFrameNumbers since we can get a release callback
538 // without getting a transaction committed if the buffer was dropped.
539 mSyncedFrameNumbers.erase(callbackId.framenumber);
540 }
541
getBufferSize(const BufferItem & item)542 static ui::Size getBufferSize(const BufferItem& item) {
543 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
544 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
545
546 // Take the buffer's orientation into account
547 if (item.mTransform & ui::Transform::ROT_90) {
548 std::swap(bufWidth, bufHeight);
549 }
550 return ui::Size(bufWidth, bufHeight);
551 }
552
acquireNextBufferLocked(const std::optional<SurfaceComposerClient::Transaction * > transaction)553 status_t BLASTBufferQueue::acquireNextBufferLocked(
554 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
555 // Check if we have frames available and we have not acquired the maximum number of buffers.
556 // Even with this check, the consumer can fail to acquire an additional buffer if the consumer
557 // has already acquired (mMaxAcquiredBuffers + 1) and the new buffer is not droppable. In this
558 // case mBufferItemConsumer->acquireBuffer will return with NO_BUFFER_AVAILABLE.
559 if (mNumFrameAvailable == 0) {
560 BQA_LOGV("Can't acquire next buffer. No available frames");
561 return BufferQueue::NO_BUFFER_AVAILABLE;
562 }
563
564 if (mNumAcquired >= (mMaxAcquiredBuffers + 2)) {
565 BQA_LOGV("Can't acquire next buffer. Already acquired max frames %d max:%d + 2",
566 mNumAcquired, mMaxAcquiredBuffers);
567 return BufferQueue::NO_BUFFER_AVAILABLE;
568 }
569
570 if (mSurfaceControl == nullptr) {
571 BQA_LOGE("ERROR : surface control is null");
572 return NAME_NOT_FOUND;
573 }
574
575 SurfaceComposerClient::Transaction localTransaction;
576 bool applyTransaction = true;
577 SurfaceComposerClient::Transaction* t = &localTransaction;
578 if (transaction) {
579 t = *transaction;
580 applyTransaction = false;
581 }
582
583 BufferItem bufferItem;
584
585 status_t status =
586 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
587 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
588 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
589 return status;
590 } else if (status != OK) {
591 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
592 return status;
593 }
594
595 auto buffer = bufferItem.mGraphicBuffer;
596 mNumFrameAvailable--;
597 BBQ_TRACE("frame=%" PRIu64, bufferItem.mFrameNumber);
598
599 if (buffer == nullptr) {
600 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
601 BQA_LOGE("Buffer was empty");
602 return BAD_VALUE;
603 }
604
605 if (rejectBuffer(bufferItem)) {
606 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
607 "buffer{size=%dx%d transform=%d}",
608 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
609 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
610 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
611 return acquireNextBufferLocked(transaction);
612 }
613
614 mNumAcquired++;
615 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
616 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
617 mSubmitted.emplace_or_replace(releaseCallbackId, bufferItem);
618
619 bool needsDisconnect = false;
620 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
621
622 // if producer disconnected before, notify SurfaceFlinger
623 if (needsDisconnect) {
624 t->notifyProducerDisconnect(mSurfaceControl);
625 }
626
627 // Only update mSize for destination bounds if the incoming buffer matches the requested size.
628 // Otherwise, it could cause stretching since the destination bounds will update before the
629 // buffer with the new size is acquired.
630 if (mRequestedSize == getBufferSize(bufferItem) ||
631 bufferItem.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
632 mSize = mRequestedSize;
633 }
634 Rect crop = computeCrop(bufferItem);
635 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
636 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
637 bufferItem.mScalingMode, crop);
638
639 auto releaseBufferCallback = makeReleaseBufferCallbackThunk();
640 sp<Fence> fence =
641 bufferItem.mFence ? sp<Fence>::make(bufferItem.mFence->dup()) : Fence::NO_FENCE;
642
643 nsecs_t dequeueTime = -1;
644 {
645 std::lock_guard _lock{mTimestampMutex};
646 auto dequeueTimeIt = mDequeueTimestamps.find(buffer->getId());
647 if (dequeueTimeIt != mDequeueTimestamps.end()) {
648 dequeueTime = dequeueTimeIt->second;
649 mDequeueTimestamps.erase(dequeueTimeIt);
650 }
651 }
652
653 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, mProducerId,
654 releaseBufferCallback, dequeueTime);
655 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
656 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
657 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
658 t->addTransactionCompletedCallback(makeTransactionCallbackThunk(), nullptr);
659
660 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
661
662 if (mUpdateDestinationFrame) {
663 t->setDestinationFrame(mSurfaceControl, Rect(mSize));
664 } else {
665 const bool ignoreDestinationFrame =
666 bufferItem.mScalingMode == NATIVE_WINDOW_SCALING_MODE_FREEZE;
667 t->setFlags(mSurfaceControl,
668 ignoreDestinationFrame ? layer_state_t::eIgnoreDestinationFrame : 0,
669 layer_state_t::eIgnoreDestinationFrame);
670 }
671 t->setBufferCrop(mSurfaceControl, crop);
672 t->setTransform(mSurfaceControl, bufferItem.mTransform);
673 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
674 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
675 if (!bufferItem.mIsAutoTimestamp) {
676 t->setDesiredPresentTime(bufferItem.mTimestamp);
677 }
678 if (com_android_graphics_libgui_flags_apply_picture_profiles() &&
679 bufferItem.mPictureProfileHandle.has_value()) {
680 t->setPictureProfileHandle(mSurfaceControl, *bufferItem.mPictureProfileHandle);
681 // The current picture profile must be maintained in case the BBQ gets its
682 // SurfaceControl switched out.
683 mPictureProfileHandle = bufferItem.mPictureProfileHandle;
684 // Clear out the picture profile if the requestor has asked for it to be cleared
685 if (mPictureProfileHandle == PictureProfileHandle::NONE) {
686 mPictureProfileHandle = std::nullopt;
687 }
688 }
689
690 // Drop stale frame timeline infos
691 while (!mPendingFrameTimelines.empty() &&
692 mPendingFrameTimelines.front().first < bufferItem.mFrameNumber) {
693 ATRACE_FORMAT_INSTANT("dropping stale frameNumber: %" PRIu64 " vsyncId: %" PRId64,
694 mPendingFrameTimelines.front().first,
695 mPendingFrameTimelines.front().second.vsyncId);
696 mPendingFrameTimelines.pop();
697 }
698
699 if (!mPendingFrameTimelines.empty() &&
700 mPendingFrameTimelines.front().first == bufferItem.mFrameNumber) {
701 ATRACE_FORMAT_INSTANT("Transaction::setFrameTimelineInfo frameNumber: %" PRIu64
702 " vsyncId: %" PRId64,
703 bufferItem.mFrameNumber,
704 mPendingFrameTimelines.front().second.vsyncId);
705 t->setFrameTimelineInfo(mPendingFrameTimelines.front().second);
706 mPendingFrameTimelines.pop();
707 }
708
709 mergePendingTransactions(t, bufferItem.mFrameNumber);
710 if (applyTransaction) {
711 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
712 t->setApplyToken(mApplyToken).apply(false, true);
713 mAppliedLastTransaction = true;
714 mLastAppliedFrameNumber = bufferItem.mFrameNumber;
715 } else {
716 t->setBufferHasBarrier(mSurfaceControl, mLastAppliedFrameNumber);
717 mAppliedLastTransaction = false;
718 }
719
720 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
721 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
722 " graphicBufferId=%" PRIu64 "%s transform=%d",
723 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
724 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
725 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
726 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
727 return OK;
728 }
729
computeCrop(const BufferItem & item)730 Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
731 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
732 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
733 }
734 return item.mCrop;
735 }
736
acquireAndReleaseBuffer()737 void BLASTBufferQueue::acquireAndReleaseBuffer() {
738 BBQ_TRACE();
739 BufferItem bufferItem;
740 status_t status =
741 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
742 if (status != OK) {
743 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
744 statusToString(status).c_str());
745 return;
746 }
747 mNumFrameAvailable--;
748 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
749 }
750
onFrameAvailable(const BufferItem & item)751 void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
752 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
753 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
754
755 {
756 UNIQUE_LOCK_WITH_ASSERTION(mMutex);
757 BBQ_TRACE();
758 bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
759
760 const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
761 BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
762
763 if (syncTransactionSet) {
764 // If we are going to re-use the same mSyncTransaction, release the buffer that may
765 // already be set in the Transaction. This is to allow us a free slot early to continue
766 // processing a new buffer.
767 if (!mAcquireSingleBuffer) {
768 auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
769 if (bufferData) {
770 BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
771 bufferData->frameNumber);
772 releaseBuffer(bufferData->generateReleaseCallbackId(),
773 bufferData->acquireFence);
774 }
775 }
776
777 if (waitForTransactionCallback) {
778 // We are waiting on a previous sync's transaction callback so allow another sync
779 // transaction to proceed.
780 //
781 // We need to first flush out the transactions that were in between the two syncs.
782 // We do this by merging them into mSyncTransaction so any buffer merging will get
783 // a release callback invoked.
784 while (mNumFrameAvailable > 0) {
785 // flush out the shadow queue
786 acquireAndReleaseBuffer();
787 }
788 } else {
789 // Make sure the frame available count is 0 before proceeding with a sync to ensure
790 // the correct frame is used for the sync. The only way mNumFrameAvailable would be
791 // greater than 0 is if we already ran out of buffers previously. This means we
792 // need to flush the buffers before proceeding with the sync.
793 while (mNumFrameAvailable > 0) {
794 BQA_LOGD("waiting until no queued buffers");
795 mCallbackCV.wait(_lock);
796 }
797 }
798 }
799
800 // add to shadow queue
801 mNumFrameAvailable++;
802 if (waitForTransactionCallback && mNumFrameAvailable >= 2) {
803 acquireAndReleaseBuffer();
804 }
805 ATRACE_INT(mQueuedBufferTrace.c_str(),
806 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
807
808 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s",
809 item.mFrameNumber, boolToString(syncTransactionSet));
810
811 if (syncTransactionSet) {
812 // Add to mSyncedFrameNumbers before waiting in case any buffers are released
813 // while waiting for a free buffer. The release and commit callback will try to
814 // acquire buffers if there are any available, but we don't want it to acquire
815 // in the case where a sync transaction wants the buffer.
816 mSyncedFrameNumbers.emplace(item.mFrameNumber);
817 // If there's no available buffer and we're in a sync transaction, we need to wait
818 // instead of returning since we guarantee a buffer will be acquired for the sync.
819 while (acquireNextBufferLocked(mSyncTransaction) == BufferQueue::NO_BUFFER_AVAILABLE) {
820 BQA_LOGD("waiting for available buffer");
821 mCallbackCV.wait(_lock);
822 }
823
824 // Only need a commit callback when syncing to ensure the buffer that's synced has been
825 // sent to SF
826 mSyncTransaction
827 ->addTransactionCommittedCallback(makeTransactionCommittedCallbackThunk(),
828 nullptr);
829 if (mAcquireSingleBuffer) {
830 prevCallback = mTransactionReadyCallback;
831 prevTransaction = mSyncTransaction;
832 mTransactionReadyCallback = nullptr;
833 mSyncTransaction = nullptr;
834 }
835 } else if (!waitForTransactionCallback) {
836 acquireNextBufferLocked(std::nullopt);
837 }
838 }
839 if (prevCallback) {
840 prevCallback(prevTransaction);
841 }
842 }
843
onFrameReplaced(const BufferItem & item)844 void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
845 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
846 // Do nothing since we are not storing unacquired buffer items locally.
847 }
848
onFrameDequeued(const uint64_t bufferId)849 void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
850 std::lock_guard _lock{mTimestampMutex};
851 mDequeueTimestamps.emplace_or_replace(bufferId, systemTime());
852 };
853
onFrameCancelled(const uint64_t bufferId)854 void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
855 std::lock_guard _lock{mTimestampMutex};
856 mDequeueTimestamps.erase(bufferId);
857 }
858
syncNextTransaction(std::function<void (SurfaceComposerClient::Transaction *)> callback,bool acquireSingleBuffer)859 bool BLASTBufferQueue::syncNextTransaction(
860 std::function<void(SurfaceComposerClient::Transaction*)> callback,
861 bool acquireSingleBuffer) {
862 LOG_ALWAYS_FATAL_IF(!callback,
863 "BLASTBufferQueue: callback passed in to syncNextTransaction must not be "
864 "NULL");
865
866 std::lock_guard _lock{mMutex};
867 BBQ_TRACE();
868 if (mTransactionReadyCallback) {
869 ALOGW("Attempting to overwrite transaction callback in syncNextTransaction");
870 return false;
871 }
872
873 mTransactionReadyCallback = callback;
874 mSyncTransaction = new SurfaceComposerClient::Transaction();
875 mAcquireSingleBuffer = acquireSingleBuffer;
876 return true;
877 }
878
stopContinuousSyncTransaction()879 void BLASTBufferQueue::stopContinuousSyncTransaction() {
880 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
881 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
882 {
883 std::lock_guard _lock{mMutex};
884 if (mAcquireSingleBuffer || !mTransactionReadyCallback) {
885 ALOGW("Attempting to stop continuous sync when none are active");
886 return;
887 }
888
889 prevCallback = mTransactionReadyCallback;
890 prevTransaction = mSyncTransaction;
891
892 mTransactionReadyCallback = nullptr;
893 mSyncTransaction = nullptr;
894 mAcquireSingleBuffer = true;
895 }
896
897 if (prevCallback) {
898 prevCallback(prevTransaction);
899 }
900 }
901
clearSyncTransaction()902 void BLASTBufferQueue::clearSyncTransaction() {
903 std::lock_guard _lock{mMutex};
904 if (!mAcquireSingleBuffer) {
905 ALOGW("Attempting to clear sync transaction when none are active");
906 return;
907 }
908
909 mTransactionReadyCallback = nullptr;
910 mSyncTransaction = nullptr;
911 }
912
rejectBuffer(const BufferItem & item)913 bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
914 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
915 // Only reject buffers if scaling mode is freeze.
916 return false;
917 }
918
919 ui::Size bufferSize = getBufferSize(item);
920 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
921 return false;
922 }
923
924 // reject buffers if the buffer size doesn't match.
925 return mSize != bufferSize;
926 }
927
928 class BBQSurface : public Surface {
929 private:
930 std::mutex mMutex;
931 sp<BLASTBufferQueue> mBbq GUARDED_BY(mMutex);
932 bool mDestroyed GUARDED_BY(mMutex) = false;
933
934 public:
BBQSurface(const sp<IGraphicBufferProducer> & igbp,bool controlledByApp,const sp<IBinder> & scHandle,const sp<BLASTBufferQueue> & bbq)935 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
936 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
937 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
938
allocateBuffers()939 void allocateBuffers() override {
940 ATRACE_CALL();
941 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
942 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
943 auto gbp = getIGraphicBufferProducer();
944 std::thread allocateThread([reqWidth, reqHeight, gbp = getIGraphicBufferProducer(),
945 reqFormat = mReqFormat, reqUsage = mReqUsage]() {
946 if (com_android_graphics_libgui_flags_allocate_buffer_priority()) {
947 androidSetThreadName("allocateBuffers");
948 pid_t tid = gettid();
949 androidSetThreadPriority(tid, ANDROID_PRIORITY_DISPLAY);
950 }
951
952 gbp->allocateBuffers(reqWidth, reqHeight,
953 reqFormat, reqUsage);
954 });
955 allocateThread.detach();
956 }
957
setFrameRate(float frameRate,int8_t compatibility,int8_t changeFrameRateStrategy)958 status_t setFrameRate(float frameRate, int8_t compatibility,
959 int8_t changeFrameRateStrategy) override {
960 if (flags::bq_setframerate()) {
961 return Surface::setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
962 }
963
964 std::lock_guard _lock{mMutex};
965 if (mDestroyed) {
966 return DEAD_OBJECT;
967 }
968 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
969 "BBQSurface::setFrameRate")) {
970 return BAD_VALUE;
971 }
972 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
973 }
974
setFrameTimelineInfo(uint64_t frameNumber,const FrameTimelineInfo & frameTimelineInfo)975 status_t setFrameTimelineInfo(uint64_t frameNumber,
976 const FrameTimelineInfo& frameTimelineInfo) override {
977 std::lock_guard _lock{mMutex};
978 if (mDestroyed) {
979 return DEAD_OBJECT;
980 }
981 return mBbq->setFrameTimelineInfo(frameNumber, frameTimelineInfo);
982 }
983
destroy()984 void destroy() override {
985 Surface::destroy();
986
987 std::lock_guard _lock{mMutex};
988 mDestroyed = true;
989 mBbq = nullptr;
990 }
991 };
992
993 // TODO: Can we coalesce this with frame updates? Need to confirm
994 // no timing issues.
setFrameRate(float frameRate,int8_t compatibility,bool shouldBeSeamless)995 status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
996 bool shouldBeSeamless) {
997 std::lock_guard _lock{mMutex};
998 SurfaceComposerClient::Transaction t;
999
1000 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
1001 }
1002
setFrameTimelineInfo(uint64_t frameNumber,const FrameTimelineInfo & frameTimelineInfo)1003 status_t BLASTBufferQueue::setFrameTimelineInfo(uint64_t frameNumber,
1004 const FrameTimelineInfo& frameTimelineInfo) {
1005 ATRACE_FORMAT("%s(%s) frameNumber: %" PRIu64 " vsyncId: %" PRId64, __func__, mName.c_str(),
1006 frameNumber, frameTimelineInfo.vsyncId);
1007 std::lock_guard _lock{mMutex};
1008 mPendingFrameTimelines.push({frameNumber, frameTimelineInfo});
1009 return OK;
1010 }
1011
setSidebandStream(const sp<NativeHandle> & stream)1012 void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
1013 std::lock_guard _lock{mMutex};
1014 SurfaceComposerClient::Transaction t;
1015
1016 t.setSidebandStream(mSurfaceControl, stream).apply();
1017 }
1018
getSurface(bool includeSurfaceControlHandle)1019 sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
1020 std::lock_guard _lock{mMutex};
1021 sp<IBinder> scHandle = nullptr;
1022 if (includeSurfaceControlHandle && mSurfaceControl) {
1023 scHandle = mSurfaceControl->getHandle();
1024 }
1025 return sp<BBQSurface>::make(mProducer, true, scHandle, this);
1026 }
1027
mergeWithNextTransaction(SurfaceComposerClient::Transaction * t,uint64_t frameNumber)1028 void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
1029 uint64_t frameNumber) {
1030 std::lock_guard _lock{mMutex};
1031 if (mLastAcquiredFrameNumber >= frameNumber) {
1032 // Apply the transaction since we have already acquired the desired frame.
1033 t->setApplyToken(mApplyToken).apply();
1034 } else {
1035 mPendingTransactions.emplace_back(frameNumber, std::move(*t));
1036 // Clear the transaction so it can't be applied elsewhere.
1037 t->clear();
1038 }
1039 }
1040
applyPendingTransactions(uint64_t frameNumber)1041 void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
1042 std::lock_guard _lock{mMutex};
1043
1044 SurfaceComposerClient::Transaction t;
1045 mergePendingTransactions(&t, frameNumber);
1046 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
1047 t.setApplyToken(mApplyToken).apply(false, true);
1048 }
1049
mergePendingTransactions(SurfaceComposerClient::Transaction * t,uint64_t frameNumber)1050 void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
1051 uint64_t frameNumber) {
1052 auto mergeTransaction =
1053 [t, currentFrameNumber = frameNumber](
1054 std::pair<uint64_t, SurfaceComposerClient::Transaction>& pendingTransaction) {
1055 auto& [targetFrameNumber, transaction] = pendingTransaction;
1056 if (currentFrameNumber < targetFrameNumber) {
1057 return false;
1058 }
1059 t->merge(std::move(transaction));
1060 return true;
1061 };
1062
1063 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
1064 mPendingTransactions.end(), mergeTransaction),
1065 mPendingTransactions.end());
1066 }
1067
gatherPendingTransactions(uint64_t frameNumber)1068 SurfaceComposerClient::Transaction* BLASTBufferQueue::gatherPendingTransactions(
1069 uint64_t frameNumber) {
1070 std::lock_guard _lock{mMutex};
1071 SurfaceComposerClient::Transaction* t = new SurfaceComposerClient::Transaction();
1072 mergePendingTransactions(t, frameNumber);
1073 return t;
1074 }
1075
1076 // Maintains a single worker thread per process that services a list of runnables.
1077 class AsyncWorker : public Singleton<AsyncWorker> {
1078 private:
1079 std::thread mThread;
1080 bool mDone = false;
1081 std::deque<std::function<void()>> mRunnables;
1082 std::mutex mMutex;
1083 std::condition_variable mCv;
run()1084 void run() {
1085 std::unique_lock<std::mutex> lock(mMutex);
1086 while (!mDone) {
1087 while (!mRunnables.empty()) {
1088 std::deque<std::function<void()>> runnables = std::move(mRunnables);
1089 mRunnables.clear();
1090 lock.unlock();
1091 // Run outside the lock since the runnable might trigger another
1092 // post to the async worker.
1093 execute(runnables);
1094 lock.lock();
1095 }
1096 mCv.wait(lock);
1097 }
1098 }
1099
execute(std::deque<std::function<void ()>> & runnables)1100 void execute(std::deque<std::function<void()>>& runnables) {
1101 while (!runnables.empty()) {
1102 std::function<void()> runnable = runnables.front();
1103 runnables.pop_front();
1104 runnable();
1105 }
1106 }
1107
1108 public:
AsyncWorker()1109 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
1110
~AsyncWorker()1111 ~AsyncWorker() {
1112 mDone = true;
1113 mCv.notify_all();
1114 if (mThread.joinable()) {
1115 mThread.join();
1116 }
1117 }
1118
post(std::function<void ()> runnable)1119 void post(std::function<void()> runnable) {
1120 std::unique_lock<std::mutex> lock(mMutex);
1121 mRunnables.emplace_back(std::move(runnable));
1122 mCv.notify_one();
1123 }
1124 };
1125 ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
1126
1127 // Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
1128 class AsyncProducerListener : public BnProducerListener {
1129 private:
1130 const sp<IProducerListener> mListener;
AsyncProducerListener(const sp<IProducerListener> & listener)1131 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
1132 friend class sp<AsyncProducerListener>;
1133
1134 public:
onBufferReleased()1135 void onBufferReleased() override {
1136 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
1137 }
1138
onBuffersDiscarded(const std::vector<int32_t> & slots)1139 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
1140 AsyncWorker::getInstance().post(
1141 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
1142 }
1143
onBufferDetached(int slot)1144 void onBufferDetached(int slot) override {
1145 AsyncWorker::getInstance().post(
1146 [listener = mListener, slot = slot]() { listener->onBufferDetached(slot); });
1147 }
1148
1149 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_CONSUMER_ATTACH_CALLBACK)
onBufferAttached()1150 void onBufferAttached() override {
1151 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferAttached(); });
1152 }
1153 #endif
1154 };
1155
1156 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
1157 class BBQBufferQueueCore : public BufferQueueCore {
1158 public:
BBQBufferQueueCore(const wp<BLASTBufferQueue> & bbq)1159 explicit BBQBufferQueueCore(const wp<BLASTBufferQueue>& bbq) : mBLASTBufferQueue{bbq} {}
1160
notifyBufferReleased() const1161 void notifyBufferReleased() const override {
1162 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1163 if (!bbq) {
1164 return;
1165 }
1166 bbq->mBufferReleaseReader->interruptBlockingRead();
1167 }
1168
1169 private:
1170 wp<BLASTBufferQueue> mBLASTBufferQueue;
1171 };
1172 #endif
1173
1174 // Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
1175 // can be non-blocking when the producer is in the client process.
1176 class BBQBufferQueueProducer : public BufferQueueProducer {
1177 public:
BBQBufferQueueProducer(const sp<BufferQueueCore> & core,const wp<BLASTBufferQueue> & bbq)1178 BBQBufferQueueProducer(const sp<BufferQueueCore>& core, const wp<BLASTBufferQueue>& bbq)
1179 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/),
1180 mBLASTBufferQueue(bbq) {}
1181
connect(const sp<IProducerListener> & listener,int api,bool producerControlledByApp,QueueBufferOutput * output)1182 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
1183 QueueBufferOutput* output) override {
1184 if (!listener) {
1185 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
1186 }
1187
1188 return BufferQueueProducer::connect(sp<AsyncProducerListener>::make(listener), api,
1189 producerControlledByApp, output);
1190 }
1191
1192 // We want to resize the frame history when changing the size of the buffer queue
setMaxDequeuedBufferCount(int maxDequeuedBufferCount)1193 status_t setMaxDequeuedBufferCount(int maxDequeuedBufferCount) override {
1194 int maxBufferCount;
1195 if (status_t status = BufferQueueProducer::setMaxDequeuedBufferCount(maxDequeuedBufferCount,
1196 &maxBufferCount);
1197 status != OK) {
1198 return status;
1199 }
1200
1201 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1202 if (!bbq) {
1203 return OK;
1204 }
1205
1206 // if we can't determine the max buffer count, then just skip growing the history size
1207 size_t newFrameHistorySize = maxBufferCount + 2; // +2 because triple buffer rendering
1208 // optimize away resizing the frame history unless it will grow
1209 if (newFrameHistorySize > FrameEventHistory::INITIAL_MAX_FRAME_HISTORY) {
1210 ALOGV("increasing frame history size to %zu", newFrameHistorySize);
1211 bbq->resizeFrameEventHistory(newFrameHistorySize);
1212 }
1213
1214 return OK;
1215 }
1216
query(int what,int * value)1217 int query(int what, int* value) override {
1218 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
1219 *value = 1;
1220 return OK;
1221 }
1222 return BufferQueueProducer::query(what, value);
1223 }
1224
1225 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
waitForBufferRelease(std::unique_lock<std::mutex> & bufferQueueLock,nsecs_t timeout) const1226 status_t waitForBufferRelease(std::unique_lock<std::mutex>& bufferQueueLock,
1227 nsecs_t timeout) const override {
1228 const auto startTime = std::chrono::steady_clock::now();
1229 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1230 if (!bbq) {
1231 return OK;
1232 }
1233
1234 // BufferQueue has already checked if we have a free buffer. If there's an unread interrupt,
1235 // we want to ignore it. This must be done before unlocking the BufferQueue lock to ensure
1236 // we don't miss an interrupt.
1237 bbq->mBufferReleaseReader->clearInterrupts();
1238 UnlockGuard unlockGuard{bufferQueueLock};
1239
1240 ATRACE_FORMAT("waiting for free buffer");
1241 ReleaseCallbackId id;
1242 sp<Fence> fence;
1243 uint32_t maxAcquiredBufferCount;
1244 status_t status =
1245 bbq->mBufferReleaseReader->readBlocking(id, fence, maxAcquiredBufferCount, timeout);
1246 if (status == TIMED_OUT) {
1247 return TIMED_OUT;
1248 } else if (status != OK) {
1249 // Waiting was interrupted or an error occurred. BufferQueueProducer will check if we
1250 // have a free buffer and call this method again if not.
1251 return OK;
1252 }
1253
1254 bbq->releaseBufferCallback(id, fence, maxAcquiredBufferCount);
1255 const nsecs_t durationNanos = std::chrono::duration_cast<std::chrono::nanoseconds>(
1256 std::chrono::steady_clock::now() - startTime)
1257 .count();
1258 // Provide a callback for Choreographer to start buffer stuffing recovery when blocked
1259 // on buffer release.
1260 std::function<void(const nsecs_t)> callbackCopy = bbq->getWaitForBufferReleaseCallback();
1261 if (callbackCopy) callbackCopy(durationNanos);
1262
1263 return OK;
1264 }
1265 #endif
1266
1267 private:
1268 const wp<BLASTBufferQueue> mBLASTBufferQueue;
1269 };
1270
1271 // Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
1272 // This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
1273 // emulating one way binder call behavior. Without this, if the listener calls back into the queue,
1274 // we can deadlock.
createBufferQueue(sp<IGraphicBufferProducer> * outProducer,sp<IGraphicBufferConsumer> * outConsumer)1275 void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
1276 sp<IGraphicBufferConsumer>* outConsumer) {
1277 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
1278 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
1279
1280 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
1281 auto core = sp<BBQBufferQueueCore>::make(this);
1282 #else
1283 auto core = sp<BufferQueueCore>::make();
1284 #endif
1285 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
1286
1287 auto producer = sp<BBQBufferQueueProducer>::make(core, this);
1288 LOG_ALWAYS_FATAL_IF(producer == nullptr,
1289 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
1290
1291 auto consumer = sp<BufferQueueConsumer>::make(core);
1292 consumer->setAllowExtraAcquire(true);
1293 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
1294 "BLASTBufferQueue: failed to create BufferQueueConsumer");
1295
1296 *outProducer = producer;
1297 *outConsumer = consumer;
1298 }
1299
resizeFrameEventHistory(size_t newSize)1300 void BLASTBufferQueue::resizeFrameEventHistory(size_t newSize) {
1301 // This can be null during creation of the buffer queue, but resizing won't do anything at that
1302 // point in time, so just ignore. This can go away once the class relationships and lifetimes of
1303 // objects are cleaned up with a major refactor of BufferQueue as a whole.
1304 if (mBufferItemConsumer != nullptr) {
1305 std::unique_lock _lock{mMutex};
1306 mBufferItemConsumer->resizeFrameEventHistory(newSize);
1307 }
1308 }
1309
convertBufferFormat(PixelFormat & format)1310 PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
1311 PixelFormat convertedFormat = format;
1312 switch (format) {
1313 case PIXEL_FORMAT_TRANSPARENT:
1314 case PIXEL_FORMAT_TRANSLUCENT:
1315 convertedFormat = PIXEL_FORMAT_RGBA_8888;
1316 break;
1317 case PIXEL_FORMAT_OPAQUE:
1318 convertedFormat = PIXEL_FORMAT_RGBX_8888;
1319 break;
1320 }
1321 return convertedFormat;
1322 }
1323
getLastTransformHint() const1324 uint32_t BLASTBufferQueue::getLastTransformHint() const {
1325 std::lock_guard _lock{mMutex};
1326 if (mSurfaceControl != nullptr) {
1327 return mSurfaceControl->getTransformHint();
1328 } else {
1329 return 0;
1330 }
1331 }
1332
getLastAcquiredFrameNum()1333 uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
1334 std::lock_guard _lock{mMutex};
1335 return mLastAcquiredFrameNumber;
1336 }
1337
isSameSurfaceControl(const sp<SurfaceControl> & surfaceControl) const1338 bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
1339 std::lock_guard _lock{mMutex};
1340 return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
1341 }
1342
setTransactionHangCallback(std::function<void (const std::string &)> callback)1343 void BLASTBufferQueue::setTransactionHangCallback(
1344 std::function<void(const std::string&)> callback) {
1345 std::lock_guard _lock{mMutex};
1346 mTransactionHangCallback = std::move(callback);
1347 }
1348
setApplyToken(sp<IBinder> applyToken)1349 void BLASTBufferQueue::setApplyToken(sp<IBinder> applyToken) {
1350 std::lock_guard _lock{mMutex};
1351 mApplyToken = std::move(applyToken);
1352 }
1353
setWaitForBufferReleaseCallback(std::function<void (const nsecs_t)> callback)1354 void BLASTBufferQueue::setWaitForBufferReleaseCallback(
1355 std::function<void(const nsecs_t)> callback) {
1356 std::lock_guard _lock{mWaitForBufferReleaseMutex};
1357 mWaitForBufferReleaseCallback = std::move(callback);
1358 }
1359
getWaitForBufferReleaseCallback() const1360 std::function<void(const nsecs_t)> BLASTBufferQueue::getWaitForBufferReleaseCallback() const {
1361 std::lock_guard _lock{mWaitForBufferReleaseMutex};
1362 return mWaitForBufferReleaseCallback;
1363 }
1364
1365 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
1366
updateBufferReleaseProducer()1367 void BLASTBufferQueue::updateBufferReleaseProducer() {
1368 // SELinux policy may prevent this process from sending the BufferReleaseChannel's file
1369 // descriptor to SurfaceFlinger, causing the entire transaction to be dropped. We send this
1370 // transaction independently of any other updates to ensure those updates aren't lost.
1371 SurfaceComposerClient::Transaction t;
1372 status_t status = t.setApplyToken(mApplyToken)
1373 .setBufferReleaseChannel(mSurfaceControl, mBufferReleaseProducer)
1374 .apply(false /* synchronous */, true /* oneWay */);
1375 if (status != OK) {
1376 ALOGW("[%s] %s - failed to set buffer release channel on %s", mName.c_str(),
1377 statusToString(status).c_str(), mSurfaceControl->getName().c_str());
1378 }
1379 }
1380
drainBufferReleaseConsumer()1381 void BLASTBufferQueue::drainBufferReleaseConsumer() {
1382 ATRACE_CALL();
1383 while (true) {
1384 ReleaseCallbackId id;
1385 sp<Fence> fence;
1386 uint32_t maxAcquiredBufferCount;
1387 status_t status =
1388 mBufferReleaseConsumer->readReleaseFence(id, fence, maxAcquiredBufferCount);
1389 if (status != OK) {
1390 return;
1391 }
1392 releaseBufferCallback(id, fence, maxAcquiredBufferCount);
1393 }
1394 }
1395
BufferReleaseReader(BLASTBufferQueue & bbq)1396 BLASTBufferQueue::BufferReleaseReader::BufferReleaseReader(BLASTBufferQueue& bbq) : mBbq{bbq} {
1397 mEpollFd = android::base::unique_fd{epoll_create1(EPOLL_CLOEXEC)};
1398 LOG_ALWAYS_FATAL_IF(!mEpollFd.ok(),
1399 "Failed to create buffer release epoll file descriptor. errno=%d "
1400 "message='%s'",
1401 errno, strerror(errno));
1402
1403 epoll_event registerEndpointFd{};
1404 registerEndpointFd.events = EPOLLIN;
1405 registerEndpointFd.data.fd = mBbq.mBufferReleaseConsumer->getFd();
1406 status_t status = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mBbq.mBufferReleaseConsumer->getFd(),
1407 ®isterEndpointFd);
1408 LOG_ALWAYS_FATAL_IF(status == -1,
1409 "Failed to register buffer release consumer file descriptor with epoll. "
1410 "errno=%d message='%s'",
1411 errno, strerror(errno));
1412
1413 mEventFd = android::base::unique_fd(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
1414 LOG_ALWAYS_FATAL_IF(!mEventFd.ok(),
1415 "Failed to create buffer release event file descriptor. errno=%d "
1416 "message='%s'",
1417 errno, strerror(errno));
1418
1419 epoll_event registerEventFd{};
1420 registerEventFd.events = EPOLLIN;
1421 registerEventFd.data.fd = mEventFd.get();
1422 status = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mEventFd.get(), ®isterEventFd);
1423 LOG_ALWAYS_FATAL_IF(status == -1,
1424 "Failed to register buffer release event file descriptor with epoll. "
1425 "errno=%d message='%s'",
1426 errno, strerror(errno));
1427 }
1428
readBlocking(ReleaseCallbackId & outId,sp<Fence> & outFence,uint32_t & outMaxAcquiredBufferCount,nsecs_t timeout)1429 status_t BLASTBufferQueue::BufferReleaseReader::readBlocking(ReleaseCallbackId& outId,
1430 sp<Fence>& outFence,
1431 uint32_t& outMaxAcquiredBufferCount,
1432 nsecs_t timeout) {
1433 // TODO(b/363290953) epoll_wait only has millisecond timeout precision. If timeout is less than
1434 // 1ms, then we round timeout up to 1ms. Otherwise, we round timeout to the nearest
1435 // millisecond. Once epoll_pwait2 can be used in libgui, we can specify timeout with nanosecond
1436 // precision.
1437 int timeoutMs = -1;
1438 if (timeout == 0) {
1439 timeoutMs = 0;
1440 } else if (timeout > 0) {
1441 const int nsPerMs = 1000000;
1442 if (timeout < nsPerMs) {
1443 timeoutMs = 1;
1444 } else {
1445 timeoutMs = static_cast<int>(
1446 std::chrono::round<std::chrono::milliseconds>(std::chrono::nanoseconds{timeout})
1447 .count());
1448 }
1449 }
1450
1451 epoll_event event{};
1452 int eventCount;
1453 do {
1454 eventCount = epoll_wait(mEpollFd.get(), &event, 1 /*maxevents*/, timeoutMs);
1455 } while (eventCount == -1 && errno != EINTR);
1456
1457 if (eventCount == -1) {
1458 ALOGE("epoll_wait error while waiting for buffer release. errno=%d message='%s'", errno,
1459 strerror(errno));
1460 return UNKNOWN_ERROR;
1461 }
1462
1463 if (eventCount == 0) {
1464 return TIMED_OUT;
1465 }
1466
1467 if (event.data.fd == mEventFd.get()) {
1468 clearInterrupts();
1469 return WOULD_BLOCK;
1470 }
1471
1472 return mBbq.mBufferReleaseConsumer->readReleaseFence(outId, outFence,
1473 outMaxAcquiredBufferCount);
1474 }
1475
interruptBlockingRead()1476 void BLASTBufferQueue::BufferReleaseReader::interruptBlockingRead() {
1477 if (eventfd_write(mEventFd.get(), 1) == -1) {
1478 ALOGE("failed to notify dequeue event. errno=%d message='%s'", errno, strerror(errno));
1479 }
1480 }
1481
clearInterrupts()1482 void BLASTBufferQueue::BufferReleaseReader::clearInterrupts() {
1483 eventfd_t value;
1484 if (eventfd_read(mEventFd.get(), &value) == -1 && errno != EWOULDBLOCK) {
1485 ALOGE("error while reading from eventfd. errno=%d message='%s'", errno, strerror(errno));
1486 }
1487 }
1488
1489 #endif
1490
1491 } // namespace android
1492