1 /*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "BufferQueueConsumer"
18 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
19 //#define LOG_NDEBUG 0
20
21 #if DEBUG_ONLY_CODE
22 #define VALIDATE_CONSISTENCY() do { mCore->validateConsistencyLocked(); } while (0)
23 #else
24 #define VALIDATE_CONSISTENCY()
25 #endif
26
27 #define EGL_EGLEXT_PROTOTYPES
28 #include <EGL/egl.h>
29 #include <EGL/eglext.h>
30
31 #include <gui/BufferItem.h>
32 #include <gui/BufferQueueConsumer.h>
33 #include <gui/BufferQueueCore.h>
34 #include <gui/IConsumerListener.h>
35 #include <gui/IProducerListener.h>
36 #include <gui/TraceUtils.h>
37
38 #include <private/gui/BufferQueueThreadState.h>
39 #if !defined(__ANDROID_VNDK__) && !defined(NO_BINDER)
40 #include <binder/PermissionCache.h>
41 #endif
42
43 #include <system/window.h>
44
45 #include <com_android_graphics_libgui_flags.h>
46
47 #include <inttypes.h>
48 #include <pwd.h>
49 #include <sys/types.h>
50 #include <optional>
51
52 namespace android {
53
54 // Macros for include BufferQueueCore information in log messages
55 #define BQ_LOGV(x, ...) \
56 ALOGV("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
57 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
58 ##__VA_ARGS__)
59 #define BQ_LOGD(x, ...) \
60 ALOGD("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
61 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
62 ##__VA_ARGS__)
63 #define BQ_LOGI(x, ...) \
64 ALOGI("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
65 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
66 ##__VA_ARGS__)
67 #define BQ_LOGW(x, ...) \
68 ALOGW("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
69 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
70 ##__VA_ARGS__)
71 #define BQ_LOGE(x, ...) \
72 ALOGE("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
73 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
74 ##__VA_ARGS__)
75
76 ConsumerListener::~ConsumerListener() = default;
77
BufferQueueConsumer(const sp<BufferQueueCore> & core)78 BufferQueueConsumer::BufferQueueConsumer(const sp<BufferQueueCore>& core) :
79 mCore(core),
80 mSlots(core->mSlots),
81 mConsumerName() {}
82
~BufferQueueConsumer()83 BufferQueueConsumer::~BufferQueueConsumer() {}
84
acquireBuffer(BufferItem * outBuffer,nsecs_t expectedPresent,uint64_t maxFrameNumber)85 status_t BufferQueueConsumer::acquireBuffer(BufferItem* outBuffer,
86 nsecs_t expectedPresent, uint64_t maxFrameNumber) {
87 ATRACE_CALL();
88
89 int numDroppedBuffers = 0;
90 sp<IProducerListener> listener;
91 {
92 std::unique_lock<std::mutex> lock(mCore->mMutex);
93
94 // Check that the consumer doesn't currently have the maximum number of
95 // buffers acquired. We allow the max buffer count to be exceeded by one
96 // buffer so that the consumer can successfully set up the newly acquired
97 // buffer before releasing the old one.
98 int numAcquiredBuffers = 0;
99 for (int s : mCore->mActiveBuffers) {
100 if (mSlots[s].mBufferState.isAcquired()) {
101 ++numAcquiredBuffers;
102 }
103 }
104 const bool acquireNonDroppableBuffer = mCore->mAllowExtraAcquire &&
105 numAcquiredBuffers == mCore->mMaxAcquiredBufferCount + 1;
106 if (numAcquiredBuffers >= mCore->mMaxAcquiredBufferCount + 1 &&
107 !acquireNonDroppableBuffer) {
108 BQ_LOGE("acquireBuffer: max acquired buffer count reached: %d (max %d)",
109 numAcquiredBuffers, mCore->mMaxAcquiredBufferCount);
110 return INVALID_OPERATION;
111 }
112
113 bool sharedBufferAvailable = mCore->mSharedBufferMode &&
114 mCore->mAutoRefresh && mCore->mSharedBufferSlot !=
115 BufferQueueCore::INVALID_BUFFER_SLOT;
116
117 // In asynchronous mode the list is guaranteed to be one buffer deep,
118 // while in synchronous mode we use the oldest buffer.
119 if (mCore->mQueue.empty() && !sharedBufferAvailable) {
120 return NO_BUFFER_AVAILABLE;
121 }
122
123 BufferQueueCore::Fifo::iterator front(mCore->mQueue.begin());
124
125 // If expectedPresent is specified, we may not want to return a buffer yet.
126 // If it's specified and there's more than one buffer queued, we may want
127 // to drop a buffer.
128 // Skip this if we're in shared buffer mode and the queue is empty,
129 // since in that case we'll just return the shared buffer.
130 if (expectedPresent != 0 && !mCore->mQueue.empty()) {
131 // The 'expectedPresent' argument indicates when the buffer is expected
132 // to be presented on-screen. If the buffer's desired present time is
133 // earlier (less) than expectedPresent -- meaning it will be displayed
134 // on time or possibly late if we show it as soon as possible -- we
135 // acquire and return it. If we don't want to display it until after the
136 // expectedPresent time, we return PRESENT_LATER without acquiring it.
137 //
138 // To be safe, we don't defer acquisition if expectedPresent is more
139 // than one second in the future beyond the desired present time
140 // (i.e., we'd be holding the buffer for a long time).
141 //
142 // NOTE: Code assumes monotonic time values from the system clock
143 // are positive.
144
145 // Start by checking to see if we can drop frames. We skip this check if
146 // the timestamps are being auto-generated by Surface. If the app isn't
147 // generating timestamps explicitly, it probably doesn't want frames to
148 // be discarded based on them.
149 while (mCore->mQueue.size() > 1 && !mCore->mQueue[0].mIsAutoTimestamp) {
150 const BufferItem& bufferItem(mCore->mQueue[1]);
151
152 // If dropping entry[0] would leave us with a buffer that the
153 // consumer is not yet ready for, don't drop it.
154 if (maxFrameNumber && bufferItem.mFrameNumber > maxFrameNumber) {
155 break;
156 }
157
158 // If entry[1] is timely, drop entry[0] (and repeat). We apply an
159 // additional criterion here: we only drop the earlier buffer if our
160 // desiredPresent falls within +/- 1 second of the expected present.
161 // Otherwise, bogus desiredPresent times (e.g., 0 or a small
162 // relative timestamp), which normally mean "ignore the timestamp
163 // and acquire immediately", would cause us to drop frames.
164 //
165 // We may want to add an additional criterion: don't drop the
166 // earlier buffer if entry[1]'s fence hasn't signaled yet.
167 nsecs_t desiredPresent = bufferItem.mTimestamp;
168 if (desiredPresent < expectedPresent - MAX_REASONABLE_NSEC ||
169 desiredPresent > expectedPresent) {
170 // This buffer is set to display in the near future, or
171 // desiredPresent is garbage. Either way we don't want to drop
172 // the previous buffer just to get this on the screen sooner.
173 BQ_LOGV("acquireBuffer: nodrop desire=%" PRId64 " expect=%"
174 PRId64 " (%" PRId64 ") now=%" PRId64,
175 desiredPresent, expectedPresent,
176 desiredPresent - expectedPresent,
177 systemTime(CLOCK_MONOTONIC));
178 break;
179 }
180
181 BQ_LOGV("acquireBuffer: drop desire=%" PRId64 " expect=%" PRId64
182 " size=%zu",
183 desiredPresent, expectedPresent, mCore->mQueue.size());
184
185 if (!front->mIsStale) {
186 // Front buffer is still in mSlots, so mark the slot as free
187 mSlots[front->mSlot].mBufferState.freeQueued();
188
189 // After leaving shared buffer mode, the shared buffer will
190 // still be around. Mark it as no longer shared if this
191 // operation causes it to be free.
192 if (!mCore->mSharedBufferMode &&
193 mSlots[front->mSlot].mBufferState.isFree()) {
194 mSlots[front->mSlot].mBufferState.mShared = false;
195 }
196
197 // Don't put the shared buffer on the free list
198 if (!mSlots[front->mSlot].mBufferState.isShared()) {
199 mCore->mActiveBuffers.erase(front->mSlot);
200 mCore->mFreeBuffers.push_back(front->mSlot);
201 }
202
203 if (mCore->mBufferReleasedCbEnabled) {
204 listener = mCore->mConnectedProducerListener;
205 }
206 ++numDroppedBuffers;
207 }
208
209 mCore->mQueue.erase(front);
210 front = mCore->mQueue.begin();
211 }
212
213 // See if the front buffer is ready to be acquired
214 nsecs_t desiredPresent = front->mTimestamp;
215 bool bufferIsDue = desiredPresent <= expectedPresent ||
216 desiredPresent > expectedPresent + MAX_REASONABLE_NSEC;
217 bool consumerIsReady = maxFrameNumber > 0 ?
218 front->mFrameNumber <= maxFrameNumber : true;
219 if (!bufferIsDue || !consumerIsReady) {
220 BQ_LOGV("acquireBuffer: defer desire=%" PRId64 " expect=%" PRId64
221 " (%" PRId64 ") now=%" PRId64 " frame=%" PRIu64
222 " consumer=%" PRIu64,
223 desiredPresent, expectedPresent,
224 desiredPresent - expectedPresent,
225 systemTime(CLOCK_MONOTONIC),
226 front->mFrameNumber, maxFrameNumber);
227 ATRACE_NAME("PRESENT_LATER");
228 return PRESENT_LATER;
229 }
230
231 BQ_LOGV("acquireBuffer: accept desire=%" PRId64 " expect=%" PRId64 " "
232 "(%" PRId64 ") now=%" PRId64, desiredPresent, expectedPresent,
233 desiredPresent - expectedPresent,
234 systemTime(CLOCK_MONOTONIC));
235 }
236
237 int slot = BufferQueueCore::INVALID_BUFFER_SLOT;
238
239 if (sharedBufferAvailable && mCore->mQueue.empty()) {
240 // make sure the buffer has finished allocating before acquiring it
241 mCore->waitWhileAllocatingLocked(lock);
242
243 slot = mCore->mSharedBufferSlot;
244
245 // Recreate the BufferItem for the shared buffer from the data that
246 // was cached when it was last queued.
247 outBuffer->mGraphicBuffer = mSlots[slot].mGraphicBuffer;
248 outBuffer->mFence = Fence::NO_FENCE;
249 outBuffer->mFenceTime = FenceTime::NO_FENCE;
250 outBuffer->mCrop = mCore->mSharedBufferCache.crop;
251 outBuffer->mTransform = mCore->mSharedBufferCache.transform &
252 ~static_cast<uint32_t>(
253 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY);
254 outBuffer->mScalingMode = mCore->mSharedBufferCache.scalingMode;
255 outBuffer->mDataSpace = mCore->mSharedBufferCache.dataspace;
256 outBuffer->mFrameNumber = mCore->mFrameCounter;
257 outBuffer->mSlot = slot;
258 outBuffer->mAcquireCalled = mSlots[slot].mAcquireCalled;
259 outBuffer->mTransformToDisplayInverse =
260 (mCore->mSharedBufferCache.transform &
261 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) != 0;
262 outBuffer->mSurfaceDamage = Region::INVALID_REGION;
263 outBuffer->mQueuedBuffer = false;
264 outBuffer->mIsStale = false;
265 outBuffer->mAutoRefresh = mCore->mSharedBufferMode &&
266 mCore->mAutoRefresh;
267 } else if (acquireNonDroppableBuffer && front->mIsDroppable) {
268 BQ_LOGV("acquireBuffer: front buffer is not droppable");
269 return NO_BUFFER_AVAILABLE;
270 } else {
271 slot = front->mSlot;
272 *outBuffer = *front;
273 }
274
275 ATRACE_BUFFER_INDEX(slot);
276
277 BQ_LOGV("acquireBuffer: acquiring { slot=%d/%" PRIu64 " buffer=%p }",
278 slot, outBuffer->mFrameNumber, outBuffer->mGraphicBuffer->handle);
279
280 if (!outBuffer->mIsStale) {
281 mSlots[slot].mAcquireCalled = true;
282 // Don't decrease the queue count if the BufferItem wasn't
283 // previously in the queue. This happens in shared buffer mode when
284 // the queue is empty and the BufferItem is created above.
285 if (mCore->mQueue.empty()) {
286 mSlots[slot].mBufferState.acquireNotInQueue();
287 } else {
288 mSlots[slot].mBufferState.acquire();
289 }
290 mSlots[slot].mFence = Fence::NO_FENCE;
291 }
292
293 // If the buffer has previously been acquired by the consumer, set
294 // mGraphicBuffer to NULL to avoid unnecessarily remapping this buffer
295 // on the consumer side
296 if (outBuffer->mAcquireCalled) {
297 outBuffer->mGraphicBuffer = nullptr;
298 }
299
300 mCore->mQueue.erase(front);
301
302 // We might have freed a slot while dropping old buffers, or the producer
303 // may be blocked waiting for the number of buffers in the queue to
304 // decrease.
305 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
306 mCore->notifyBufferReleased();
307 #else
308 mCore->mDequeueCondition.notify_all();
309 #endif
310
311 ATRACE_INT(mCore->mConsumerName.c_str(), static_cast<int32_t>(mCore->mQueue.size()));
312 #ifndef NO_BINDER
313 mCore->mOccupancyTracker.registerOccupancyChange(mCore->mQueue.size());
314 #endif
315 VALIDATE_CONSISTENCY();
316 }
317
318 if (listener != nullptr) {
319 for (int i = 0; i < numDroppedBuffers; ++i) {
320 listener->onBufferReleased();
321 }
322 }
323
324 return NO_ERROR;
325 }
326
detachBuffer(int slot)327 status_t BufferQueueConsumer::detachBuffer(int slot) {
328 ATRACE_CALL();
329 ATRACE_BUFFER_INDEX(slot);
330 BQ_LOGV("detachBuffer: slot %d", slot);
331 sp<IProducerListener> listener;
332 {
333 std::lock_guard<std::mutex> lock(mCore->mMutex);
334
335 if (mCore->mIsAbandoned) {
336 BQ_LOGE("detachBuffer: BufferQueue has been abandoned");
337 return NO_INIT;
338 }
339
340 if (mCore->mSharedBufferMode || slot == mCore->mSharedBufferSlot) {
341 BQ_LOGE("detachBuffer: detachBuffer not allowed in shared buffer mode");
342 return BAD_VALUE;
343 }
344
345 const int totalSlotCount = mCore->getTotalSlotCountLocked();
346 if (slot < 0 || slot >= totalSlotCount) {
347 BQ_LOGE("detachBuffer: slot index %d out of range [0, %d)", slot, totalSlotCount);
348 return BAD_VALUE;
349 } else if (!mSlots[slot].mBufferState.isAcquired()) {
350 BQ_LOGE("detachBuffer: slot %d is not owned by the consumer "
351 "(state = %s)", slot, mSlots[slot].mBufferState.string());
352 return BAD_VALUE;
353 }
354 if (mCore->mBufferReleasedCbEnabled) {
355 listener = mCore->mConnectedProducerListener;
356 }
357
358 mSlots[slot].mBufferState.detachConsumer();
359 mCore->mActiveBuffers.erase(slot);
360 mCore->mFreeSlots.insert(slot);
361 mCore->clearBufferSlotLocked(slot);
362 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
363 mCore->notifyBufferReleased();
364 #else
365 mCore->mDequeueCondition.notify_all();
366 #endif
367
368 VALIDATE_CONSISTENCY();
369 }
370
371 if (listener) {
372 listener->onBufferDetached(slot);
373 }
374 return NO_ERROR;
375 }
376
attachBuffer(int * outSlot,const sp<android::GraphicBuffer> & buffer)377 status_t BufferQueueConsumer::attachBuffer(int* outSlot,
378 const sp<android::GraphicBuffer>& buffer) {
379 ATRACE_CALL();
380
381 if (outSlot == nullptr) {
382 BQ_LOGE("attachBuffer: outSlot must not be NULL");
383 return BAD_VALUE;
384 } else if (buffer == nullptr) {
385 BQ_LOGE("attachBuffer: cannot attach NULL buffer");
386 return BAD_VALUE;
387 }
388
389 sp<IProducerListener> listener;
390 {
391 std::lock_guard<std::mutex> lock(mCore->mMutex);
392
393 if (mCore->mSharedBufferMode) {
394 BQ_LOGE("attachBuffer: cannot attach a buffer in shared buffer mode");
395 return BAD_VALUE;
396 }
397
398 // Make sure we don't have too many acquired buffers
399 int numAcquiredBuffers = 0;
400 for (int s : mCore->mActiveBuffers) {
401 if (mSlots[s].mBufferState.isAcquired()) {
402 ++numAcquiredBuffers;
403 }
404 }
405
406 if (numAcquiredBuffers >= mCore->mMaxAcquiredBufferCount + 1) {
407 BQ_LOGE("attachBuffer: max acquired buffer count reached: %d "
408 "(max %d)", numAcquiredBuffers,
409 mCore->mMaxAcquiredBufferCount);
410 return INVALID_OPERATION;
411 }
412
413 if (buffer->getGenerationNumber() != mCore->mGenerationNumber) {
414 BQ_LOGE("attachBuffer: generation number mismatch [buffer %u] "
415 "[queue %u]", buffer->getGenerationNumber(),
416 mCore->mGenerationNumber);
417 return BAD_VALUE;
418 }
419
420 // Find a free slot to put the buffer into
421 int found = BufferQueueCore::INVALID_BUFFER_SLOT;
422 if (!mCore->mFreeSlots.empty()) {
423 auto slot = mCore->mFreeSlots.begin();
424 found = *slot;
425 mCore->mFreeSlots.erase(slot);
426 } else if (!mCore->mFreeBuffers.empty()) {
427 found = mCore->mFreeBuffers.front();
428 mCore->mFreeBuffers.remove(found);
429 }
430 if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
431 BQ_LOGE("attachBuffer: could not find free buffer slot");
432 return NO_MEMORY;
433 }
434
435 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_CONSUMER_ATTACH_CALLBACK)
436 if (mCore->mBufferAttachedCbEnabled) {
437 listener = mCore->mConnectedProducerListener;
438 }
439 #endif
440
441 mCore->mActiveBuffers.insert(found);
442 *outSlot = found;
443 ATRACE_BUFFER_INDEX(*outSlot);
444 BQ_LOGV("attachBuffer: returning slot %d", *outSlot);
445
446 mSlots[*outSlot].mGraphicBuffer = buffer;
447 mSlots[*outSlot].mBufferState.attachConsumer();
448 mSlots[*outSlot].mNeedsReallocation = true;
449 mSlots[*outSlot].mFence = Fence::NO_FENCE;
450 mSlots[*outSlot].mFrameNumber = 0;
451
452 // mAcquireCalled tells BufferQueue that it doesn't need to send a valid
453 // GraphicBuffer pointer on the next acquireBuffer call, which decreases
454 // Binder traffic by not un/flattening the GraphicBuffer. However, it
455 // requires that the consumer maintain a cached copy of the slot <--> buffer
456 // mappings, which is why the consumer doesn't need the valid pointer on
457 // acquire.
458 //
459 // The StreamSplitter is one of the primary users of the attach/detach
460 // logic, and while it is running, all buffers it acquires are immediately
461 // detached, and all buffers it eventually releases are ones that were
462 // attached (as opposed to having been obtained from acquireBuffer), so it
463 // doesn't make sense to maintain the slot/buffer mappings, which would
464 // become invalid for every buffer during detach/attach. By setting this to
465 // false, the valid GraphicBuffer pointer will always be sent with acquire
466 // for attached buffers.
467 mSlots[*outSlot].mAcquireCalled = false;
468
469 VALIDATE_CONSISTENCY();
470 }
471
472 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_CONSUMER_ATTACH_CALLBACK)
473 if (listener != nullptr) {
474 listener->onBufferAttached();
475 }
476 #endif
477
478 return NO_ERROR;
479 }
480
481 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_GL_FENCE_CLEANUP)
releaseBuffer(int slot,uint64_t frameNumber,const sp<Fence> & releaseFence)482 status_t BufferQueueConsumer::releaseBuffer(int slot, uint64_t frameNumber,
483 const sp<Fence>& releaseFence) {
484 #else
485 status_t BufferQueueConsumer::releaseBuffer(int slot, uint64_t frameNumber,
486 const sp<Fence>& releaseFence, EGLDisplay eglDisplay,
487 EGLSyncKHR eglFence) {
488 #endif
489 ATRACE_CALL();
490 ATRACE_BUFFER_INDEX(slot);
491
492 const int totalSlotCount = mCore->getTotalSlotCountLocked();
493 if (slot < 0 || slot >= totalSlotCount) {
494 BQ_LOGE("releaseBuffer: slot index %d out of range [0, %d)", slot, totalSlotCount);
495 return BAD_VALUE;
496 }
497 if (releaseFence == nullptr) {
498 BQ_LOGE("releaseBuffer: slot %d fence %p NULL", slot, releaseFence.get());
499 return BAD_VALUE;
500 }
501
502 sp<IProducerListener> listener;
503 { // Autolock scope
504 std::lock_guard<std::mutex> lock(mCore->mMutex);
505
506 const int totalSlotCount = mCore->getTotalSlotCountLocked();
507 if (slot < 0 || slot >= totalSlotCount || releaseFence == nullptr) {
508 BQ_LOGE("releaseBuffer: slot %d out of range [0, %d) or fence %p NULL", slot,
509 totalSlotCount, releaseFence.get());
510 return BAD_VALUE;
511 }
512
513 // If the frame number has changed because the buffer has been reallocated,
514 // we can ignore this releaseBuffer for the old buffer.
515 // Ignore this for the shared buffer where the frame number can easily
516 // get out of sync due to the buffer being queued and acquired at the
517 // same time.
518 if (frameNumber != mSlots[slot].mFrameNumber &&
519 !mSlots[slot].mBufferState.isShared()) {
520 return STALE_BUFFER_SLOT;
521 }
522
523 if (!mSlots[slot].mBufferState.isAcquired()) {
524 BQ_LOGE("releaseBuffer: attempted to release buffer slot %d "
525 "but its state was %s", slot,
526 mSlots[slot].mBufferState.string());
527 return BAD_VALUE;
528 }
529
530 #if !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_GL_FENCE_CLEANUP)
531 mSlots[slot].mEglDisplay = eglDisplay;
532 mSlots[slot].mEglFence = eglFence;
533 #endif
534 mSlots[slot].mFence = releaseFence;
535 mSlots[slot].mBufferState.release();
536
537 // After leaving shared buffer mode, the shared buffer will
538 // still be around. Mark it as no longer shared if this
539 // operation causes it to be free.
540 if (!mCore->mSharedBufferMode && mSlots[slot].mBufferState.isFree()) {
541 mSlots[slot].mBufferState.mShared = false;
542 }
543 // Don't put the shared buffer on the free list.
544 if (!mSlots[slot].mBufferState.isShared()) {
545 mCore->mActiveBuffers.erase(slot);
546 mCore->mFreeBuffers.push_back(slot);
547 }
548
549 if (mCore->mBufferReleasedCbEnabled) {
550 listener = mCore->mConnectedProducerListener;
551 }
552 BQ_LOGV("releaseBuffer: releasing slot %d", slot);
553
554 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
555 mCore->notifyBufferReleased();
556 #else
557 mCore->mDequeueCondition.notify_all();
558 #endif
559
560 VALIDATE_CONSISTENCY();
561 } // Autolock scope
562
563 // Call back without lock held
564 if (listener != nullptr) {
565 listener->onBufferReleased();
566 }
567
568 return NO_ERROR;
569 }
570
571 status_t BufferQueueConsumer::connect(
572 const sp<IConsumerListener>& consumerListener, bool controlledByApp) {
573 ATRACE_CALL();
574
575 if (consumerListener == nullptr) {
576 BQ_LOGE("connect: consumerListener may not be NULL");
577 return BAD_VALUE;
578 }
579
580 BQ_LOGV("connect: controlledByApp=%s",
581 controlledByApp ? "true" : "false");
582
583 std::lock_guard<std::mutex> lock(mCore->mMutex);
584
585 if (mCore->mIsAbandoned) {
586 BQ_LOGE("connect: BufferQueue has been abandoned");
587 return NO_INIT;
588 }
589
590 mCore->mConsumerListener = consumerListener;
591 mCore->mConsumerControlledByApp = controlledByApp;
592
593 return NO_ERROR;
594 }
595
596 status_t BufferQueueConsumer::disconnect() {
597 ATRACE_CALL();
598
599 BQ_LOGV("disconnect");
600
601 std::lock_guard<std::mutex> lock(mCore->mMutex);
602
603 if (mCore->mConsumerListener == nullptr) {
604 BQ_LOGE("disconnect: no consumer is connected");
605 return BAD_VALUE;
606 }
607
608 mCore->mIsAbandoned = true;
609 mCore->mConsumerListener = nullptr;
610 mCore->mQueue.clear();
611 mCore->freeAllBuffersLocked();
612 mCore->mSharedBufferSlot = BufferQueueCore::INVALID_BUFFER_SLOT;
613 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
614 mCore->notifyBufferReleased();
615 #else
616 mCore->mDequeueCondition.notify_all();
617 #endif
618 return NO_ERROR;
619 }
620
621 status_t BufferQueueConsumer::getReleasedBuffers(uint64_t *outSlotMask) {
622 ATRACE_CALL();
623
624 if (outSlotMask == nullptr) {
625 BQ_LOGE("getReleasedBuffers: outSlotMask may not be NULL");
626 return BAD_VALUE;
627 }
628
629 std::lock_guard<std::mutex> lock(mCore->mMutex);
630
631 if (mCore->mIsAbandoned) {
632 BQ_LOGE("getReleasedBuffers: BufferQueue has been abandoned");
633 return NO_INIT;
634 }
635
636 uint64_t mask = 0;
637 for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
638 if (!mSlots[s].mAcquireCalled) {
639 mask |= (1ULL << s);
640 }
641 }
642
643 // Remove from the mask queued buffers for which acquire has been called,
644 // since the consumer will not receive their buffer addresses and so must
645 // retain their cached information
646 BufferQueueCore::Fifo::iterator current(mCore->mQueue.begin());
647 while (current != mCore->mQueue.end()) {
648 if (current->mAcquireCalled) {
649 mask &= ~(1ULL << current->mSlot);
650 }
651 ++current;
652 }
653
654 BQ_LOGV("getReleasedBuffers: returning mask %#" PRIx64, mask);
655 *outSlotMask = mask;
656 return NO_ERROR;
657 }
658
659 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
660 status_t BufferQueueConsumer::getReleasedBuffersExtended(std::vector<bool>* outSlotMask) {
661 ATRACE_CALL();
662
663 if (outSlotMask == nullptr) {
664 BQ_LOGE("getReleasedBuffersExtended: outSlotMask may not be NULL");
665 return BAD_VALUE;
666 }
667
668 std::lock_guard<std::mutex> lock(mCore->mMutex);
669
670 if (mCore->mIsAbandoned) {
671 BQ_LOGE("getReleasedBuffersExtended: BufferQueue has been abandoned");
672 return NO_INIT;
673 }
674
675 const int totalSlotCount = mCore->getTotalSlotCountLocked();
676 outSlotMask->resize(totalSlotCount);
677 for (int s = 0; s < totalSlotCount; ++s) {
678 (*outSlotMask)[s] = !mSlots[s].mAcquireCalled;
679 }
680
681 // Remove from the mask queued buffers for which acquire has been called,
682 // since the consumer will not receive their buffer addresses and so must
683 // retain their cached information
684 BufferQueueCore::Fifo::iterator current(mCore->mQueue.begin());
685 while (current != mCore->mQueue.end()) {
686 if (current->mAcquireCalled) {
687 (*outSlotMask)[current->mSlot] = false;
688 }
689 ++current;
690 }
691
692 return NO_ERROR;
693 }
694 #endif
695
696 status_t BufferQueueConsumer::setDefaultBufferSize(uint32_t width,
697 uint32_t height) {
698 ATRACE_CALL();
699
700 if (width == 0 || height == 0) {
701 BQ_LOGV("setDefaultBufferSize: dimensions cannot be 0 (width=%u "
702 "height=%u)", width, height);
703 return BAD_VALUE;
704 }
705
706 BQ_LOGV("setDefaultBufferSize: width=%u height=%u", width, height);
707
708 std::lock_guard<std::mutex> lock(mCore->mMutex);
709 mCore->mDefaultWidth = width;
710 mCore->mDefaultHeight = height;
711 return NO_ERROR;
712 }
713
714 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
715 status_t BufferQueueConsumer::allowUnlimitedSlots(bool allowUnlimitedSlots) {
716 ATRACE_CALL();
717 BQ_LOGV("allowUnlimitedSlots: %d", allowUnlimitedSlots);
718 std::lock_guard<std::mutex> lock(mCore->mMutex);
719
720 if (mCore->mIsAbandoned) {
721 BQ_LOGE("allowUnlimitedSlots: BufferQueue has been abandoned");
722 return NO_INIT;
723 }
724
725 if (mCore->mConnectedApi != BufferQueueCore::NO_CONNECTED_API) {
726 BQ_LOGE("allowUnlimitedSlots: BufferQueue already connected");
727 return INVALID_OPERATION;
728 }
729
730 mCore->mAllowExtendedSlotCount = allowUnlimitedSlots;
731
732 return OK;
733 }
734 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
735
736 status_t BufferQueueConsumer::setMaxBufferCount(int bufferCount) {
737 ATRACE_CALL();
738
739 if (bufferCount < 1 || bufferCount > BufferQueueDefs::NUM_BUFFER_SLOTS) {
740 BQ_LOGE("setMaxBufferCount: invalid count %d", bufferCount);
741 return BAD_VALUE;
742 }
743
744 std::lock_guard<std::mutex> lock(mCore->mMutex);
745
746 if (mCore->mConnectedApi != BufferQueueCore::NO_CONNECTED_API) {
747 BQ_LOGE("setMaxBufferCount: producer is already connected");
748 return INVALID_OPERATION;
749 }
750
751 if (bufferCount < mCore->mMaxAcquiredBufferCount) {
752 BQ_LOGE("setMaxBufferCount: invalid buffer count (%d) less than"
753 "mMaxAcquiredBufferCount (%d)", bufferCount,
754 mCore->mMaxAcquiredBufferCount);
755 return BAD_VALUE;
756 }
757
758 int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode,
759 mCore->mDequeueBufferCannotBlock, bufferCount) -
760 mCore->getMaxBufferCountLocked();
761 if (!mCore->adjustAvailableSlotsLocked(delta)) {
762 BQ_LOGE("setMaxBufferCount: BufferQueue failed to adjust the number of "
763 "available slots. Delta = %d", delta);
764 return BAD_VALUE;
765 }
766
767 mCore->mMaxBufferCount = bufferCount;
768 return NO_ERROR;
769 }
770
771 status_t BufferQueueConsumer::setMaxAcquiredBufferCount(int maxAcquiredBuffers) {
772 return setMaxAcquiredBufferCount(maxAcquiredBuffers, std::nullopt);
773 }
774
775 status_t BufferQueueConsumer::setMaxAcquiredBufferCount(
776 int maxAcquiredBuffers, std::optional<OnBufferReleasedCallback> onBuffersReleasedCallback) {
777 ATRACE_FORMAT("%s(%d)", __func__, maxAcquiredBuffers);
778
779 std::optional<OnBufferReleasedCallback> callback;
780 { // Autolock scope
781 std::unique_lock<std::mutex> lock(mCore->mMutex);
782
783 // We reserve two slots in order to guarantee that the producer and
784 // consumer can run asynchronously.
785 int maxMaxAcquiredBuffers =
786 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
787 mCore->getTotalSlotCountLocked() - 2;
788 #else
789 BufferQueueCore::MAX_MAX_ACQUIRED_BUFFERS;
790 #endif
791 if (maxAcquiredBuffers < 1 || maxAcquiredBuffers > maxMaxAcquiredBuffers) {
792 BQ_LOGE("setMaxAcquiredBufferCount: invalid count %d", maxAcquiredBuffers);
793 return BAD_VALUE;
794 }
795
796 mCore->waitWhileAllocatingLocked(lock);
797
798 if (mCore->mIsAbandoned) {
799 BQ_LOGE("setMaxAcquiredBufferCount: consumer is abandoned");
800 return NO_INIT;
801 }
802
803 if (maxAcquiredBuffers == mCore->mMaxAcquiredBufferCount) {
804 return NO_ERROR;
805 }
806
807 // The new maxAcquiredBuffers count should not be violated by the number
808 // of currently acquired buffers
809 int acquiredCount = 0;
810 for (int slot : mCore->mActiveBuffers) {
811 if (mSlots[slot].mBufferState.isAcquired()) {
812 acquiredCount++;
813 }
814 }
815 if (acquiredCount > maxAcquiredBuffers) {
816 BQ_LOGE("setMaxAcquiredBufferCount: the requested maxAcquiredBuffer"
817 "count (%d) exceeds the current acquired buffer count (%d)",
818 maxAcquiredBuffers, acquiredCount);
819 return BAD_VALUE;
820 }
821
822 if ((maxAcquiredBuffers + mCore->mMaxDequeuedBufferCount +
823 (mCore->mAsyncMode || mCore->mDequeueBufferCannotBlock ? 1 : 0))
824 > mCore->mMaxBufferCount) {
825 BQ_LOGE("setMaxAcquiredBufferCount: %d acquired buffers would "
826 "exceed the maxBufferCount (%d) (maxDequeued %d async %d)",
827 maxAcquiredBuffers, mCore->mMaxBufferCount,
828 mCore->mMaxDequeuedBufferCount, mCore->mAsyncMode ||
829 mCore->mDequeueBufferCannotBlock);
830 return BAD_VALUE;
831 }
832
833 int delta = maxAcquiredBuffers - mCore->mMaxAcquiredBufferCount;
834 if (!mCore->adjustAvailableSlotsLocked(delta)) {
835 return BAD_VALUE;
836 }
837
838 BQ_LOGV("setMaxAcquiredBufferCount: %d", maxAcquiredBuffers);
839 mCore->mMaxAcquiredBufferCount = maxAcquiredBuffers;
840 VALIDATE_CONSISTENCY();
841 if (delta < 0) {
842 if (onBuffersReleasedCallback) {
843 callback = std::move(onBuffersReleasedCallback);
844 } else if (mCore->mBufferReleasedCbEnabled) {
845 callback = [listener = mCore->mConsumerListener]() {
846 listener->onBuffersReleased();
847 };
848 }
849 }
850 }
851
852 // Call back without lock held
853 if (callback) {
854 (*callback)();
855 }
856
857 return NO_ERROR;
858 }
859
860 status_t BufferQueueConsumer::setConsumerName(const String8& name) {
861 ATRACE_CALL();
862 BQ_LOGV("setConsumerName: '%s'", name.c_str());
863 std::lock_guard<std::mutex> lock(mCore->mMutex);
864 mCore->mConsumerName = name;
865 mConsumerName = name;
866 return NO_ERROR;
867 }
868
869 status_t BufferQueueConsumer::setDefaultBufferFormat(PixelFormat defaultFormat) {
870 ATRACE_CALL();
871 BQ_LOGV("setDefaultBufferFormat: %u", defaultFormat);
872 std::lock_guard<std::mutex> lock(mCore->mMutex);
873 mCore->mDefaultBufferFormat = defaultFormat;
874 return NO_ERROR;
875 }
876
877 status_t BufferQueueConsumer::setDefaultBufferDataSpace(
878 android_dataspace defaultDataSpace) {
879 ATRACE_CALL();
880 BQ_LOGV("setDefaultBufferDataSpace: %u", defaultDataSpace);
881 std::lock_guard<std::mutex> lock(mCore->mMutex);
882 mCore->mDefaultBufferDataSpace = defaultDataSpace;
883 return NO_ERROR;
884 }
885
886 status_t BufferQueueConsumer::setConsumerUsageBits(uint64_t usage) {
887 ATRACE_CALL();
888 BQ_LOGV("setConsumerUsageBits: %#" PRIx64, usage);
889 std::lock_guard<std::mutex> lock(mCore->mMutex);
890 mCore->mConsumerUsageBits = usage;
891 return NO_ERROR;
892 }
893
894 status_t BufferQueueConsumer::setConsumerIsProtected(bool isProtected) {
895 ATRACE_CALL();
896 BQ_LOGV("setConsumerIsProtected: %s", isProtected ? "true" : "false");
897 std::lock_guard<std::mutex> lock(mCore->mMutex);
898 mCore->mConsumerIsProtected = isProtected;
899 return NO_ERROR;
900 }
901
902 status_t BufferQueueConsumer::setTransformHint(uint32_t hint) {
903 ATRACE_CALL();
904 BQ_LOGV("setTransformHint: %#x", hint);
905 std::lock_guard<std::mutex> lock(mCore->mMutex);
906 mCore->mTransformHint = hint;
907 return NO_ERROR;
908 }
909
910 status_t BufferQueueConsumer::getSidebandStream(sp<NativeHandle>* outStream) const {
911 std::lock_guard<std::mutex> lock(mCore->mMutex);
912 *outStream = mCore->mSidebandStream;
913 return NO_ERROR;
914 }
915
916 status_t BufferQueueConsumer::getOccupancyHistory(bool forceFlush,
917 std::vector<OccupancyTracker::Segment>* outHistory) {
918 std::lock_guard<std::mutex> lock(mCore->mMutex);
919 #ifndef NO_BINDER
920 *outHistory = mCore->mOccupancyTracker.getSegmentHistory(forceFlush);
921 #else
922 (void)forceFlush;
923 outHistory->clear();
924 #endif
925 return NO_ERROR;
926 }
927
928 status_t BufferQueueConsumer::discardFreeBuffers() {
929 std::lock_guard<std::mutex> lock(mCore->mMutex);
930 mCore->discardFreeBuffersLocked();
931 return NO_ERROR;
932 }
933
934 status_t BufferQueueConsumer::dumpState(const String8& prefix, String8* outResult) const {
935 struct passwd* pwd = getpwnam("shell");
936 uid_t shellUid = pwd ? pwd->pw_uid : 0;
937 if (!shellUid) {
938 int savedErrno = errno;
939 BQ_LOGE("Cannot get AID_SHELL");
940 return savedErrno ? -savedErrno : UNKNOWN_ERROR;
941 }
942
943 bool denied = false;
944 const uid_t uid = BufferQueueThreadState::getCallingUid();
945 #if !defined(__ANDROID_VNDK__) && !defined(NO_BINDER)
946 // permission check can't be done for vendors as vendors have no access to
947 // the PermissionController.
948 const pid_t pid = BufferQueueThreadState::getCallingPid();
949 if ((uid != shellUid) &&
950 !PermissionCache::checkPermission(String16("android.permission.DUMP"), pid, uid)) {
951 outResult->appendFormat("Permission Denial: can't dump BufferQueueConsumer "
952 "from pid=%d, uid=%d\n",
953 pid, uid);
954 denied = true;
955 }
956 #else
957 if (uid != shellUid) {
958 denied = true;
959 }
960 #endif
961 if (denied) {
962 android_errorWriteWithInfoLog(0x534e4554, "27046057",
963 static_cast<int32_t>(uid), nullptr, 0);
964 return PERMISSION_DENIED;
965 }
966
967 mCore->dumpState(prefix, outResult);
968 return NO_ERROR;
969 }
970
971 void BufferQueueConsumer::setAllowExtraAcquire(bool allow) {
972 std::lock_guard<std::mutex> lock(mCore->mMutex);
973 mCore->mAllowExtraAcquire = allow;
974 }
975
976 } // namespace android
977