1 /*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <inttypes.h>
18
19 #define LOG_TAG "BufferQueueProducer"
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21 //#define LOG_NDEBUG 0
22
23 #if DEBUG_ONLY_CODE
24 #define VALIDATE_CONSISTENCY() do { mCore->validateConsistencyLocked(); } while (0)
25 #else
26 #define VALIDATE_CONSISTENCY()
27 #endif
28
29 #define EGL_EGLEXT_PROTOTYPES
30
31 #include <binder/IPCThreadState.h>
32 #include <gui/BufferItem.h>
33 #include <gui/BufferQueueCore.h>
34 #include <gui/BufferQueueProducer.h>
35 #include <gui/GLConsumer.h>
36 #include <gui/IConsumerListener.h>
37 #include <gui/IProducerListener.h>
38 #include <gui/TraceUtils.h>
39 #include <private/gui/BufferQueueThreadState.h>
40
41 #include <utils/Log.h>
42 #include <utils/Trace.h>
43
44 #include <system/window.h>
45
46 namespace android {
47
48 // Macros for include BufferQueueCore information in log messages
49 #define BQ_LOGV(x, ...) \
50 ALOGV("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.string(), \
51 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
52 ##__VA_ARGS__)
53 #define BQ_LOGD(x, ...) \
54 ALOGD("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.string(), \
55 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
56 ##__VA_ARGS__)
57 #define BQ_LOGI(x, ...) \
58 ALOGI("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.string(), \
59 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
60 ##__VA_ARGS__)
61 #define BQ_LOGW(x, ...) \
62 ALOGW("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.string(), \
63 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
64 ##__VA_ARGS__)
65 #define BQ_LOGE(x, ...) \
66 ALOGE("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.string(), \
67 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
68 ##__VA_ARGS__)
69
70 static constexpr uint32_t BQ_LAYER_COUNT = 1;
71 ProducerListener::~ProducerListener() = default;
72
BufferQueueProducer(const sp<BufferQueueCore> & core,bool consumerIsSurfaceFlinger)73 BufferQueueProducer::BufferQueueProducer(const sp<BufferQueueCore>& core,
74 bool consumerIsSurfaceFlinger) :
75 mCore(core),
76 mSlots(core->mSlots),
77 mConsumerName(),
78 mStickyTransform(0),
79 mConsumerIsSurfaceFlinger(consumerIsSurfaceFlinger),
80 mLastQueueBufferFence(Fence::NO_FENCE),
81 mLastQueuedTransform(0),
82 mCallbackMutex(),
83 mNextCallbackTicket(0),
84 mCurrentCallbackTicket(0),
85 mCallbackCondition(),
86 mDequeueTimeout(-1),
87 mDequeueWaitingForAllocation(false) {}
88
~BufferQueueProducer()89 BufferQueueProducer::~BufferQueueProducer() {}
90
requestBuffer(int slot,sp<GraphicBuffer> * buf)91 status_t BufferQueueProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
92 ATRACE_CALL();
93 BQ_LOGV("requestBuffer: slot %d", slot);
94 std::lock_guard<std::mutex> lock(mCore->mMutex);
95
96 if (mCore->mIsAbandoned) {
97 BQ_LOGE("requestBuffer: BufferQueue has been abandoned");
98 return NO_INIT;
99 }
100
101 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
102 BQ_LOGE("requestBuffer: BufferQueue has no connected producer");
103 return NO_INIT;
104 }
105
106 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
107 BQ_LOGE("requestBuffer: slot index %d out of range [0, %d)",
108 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
109 return BAD_VALUE;
110 } else if (!mSlots[slot].mBufferState.isDequeued()) {
111 BQ_LOGE("requestBuffer: slot %d is not owned by the producer "
112 "(state = %s)", slot, mSlots[slot].mBufferState.string());
113 return BAD_VALUE;
114 }
115
116 mSlots[slot].mRequestBufferCalled = true;
117 *buf = mSlots[slot].mGraphicBuffer;
118 return NO_ERROR;
119 }
120
setMaxDequeuedBufferCount(int maxDequeuedBuffers)121 status_t BufferQueueProducer::setMaxDequeuedBufferCount(
122 int maxDequeuedBuffers) {
123 int maxBufferCount;
124 return setMaxDequeuedBufferCount(maxDequeuedBuffers, &maxBufferCount);
125 }
126
setMaxDequeuedBufferCount(int maxDequeuedBuffers,int * maxBufferCount)127 status_t BufferQueueProducer::setMaxDequeuedBufferCount(int maxDequeuedBuffers,
128 int* maxBufferCount) {
129 ATRACE_FORMAT("%s(%d)", __func__, maxDequeuedBuffers);
130 BQ_LOGV("setMaxDequeuedBufferCount: maxDequeuedBuffers = %d",
131 maxDequeuedBuffers);
132
133 sp<IConsumerListener> listener;
134 { // Autolock scope
135 std::unique_lock<std::mutex> lock(mCore->mMutex);
136 mCore->waitWhileAllocatingLocked(lock);
137
138 if (mCore->mIsAbandoned) {
139 BQ_LOGE("setMaxDequeuedBufferCount: BufferQueue has been "
140 "abandoned");
141 return NO_INIT;
142 }
143
144 *maxBufferCount = mCore->getMaxBufferCountLocked();
145
146 if (maxDequeuedBuffers == mCore->mMaxDequeuedBufferCount) {
147 return NO_ERROR;
148 }
149
150 // The new maxDequeuedBuffer count should not be violated by the number
151 // of currently dequeued buffers
152 int dequeuedCount = 0;
153 for (int s : mCore->mActiveBuffers) {
154 if (mSlots[s].mBufferState.isDequeued()) {
155 dequeuedCount++;
156 }
157 }
158 if (dequeuedCount > maxDequeuedBuffers) {
159 BQ_LOGE("setMaxDequeuedBufferCount: the requested maxDequeuedBuffer"
160 "count (%d) exceeds the current dequeued buffer count (%d)",
161 maxDequeuedBuffers, dequeuedCount);
162 return BAD_VALUE;
163 }
164
165 int bufferCount = mCore->getMinUndequeuedBufferCountLocked();
166 bufferCount += maxDequeuedBuffers;
167
168 if (bufferCount > BufferQueueDefs::NUM_BUFFER_SLOTS) {
169 BQ_LOGE("setMaxDequeuedBufferCount: bufferCount %d too large "
170 "(max %d)", bufferCount, BufferQueueDefs::NUM_BUFFER_SLOTS);
171 return BAD_VALUE;
172 }
173
174 const int minBufferSlots = mCore->getMinMaxBufferCountLocked();
175 if (bufferCount < minBufferSlots) {
176 BQ_LOGE("setMaxDequeuedBufferCount: requested buffer count %d is "
177 "less than minimum %d", bufferCount, minBufferSlots);
178 return BAD_VALUE;
179 }
180
181 if (bufferCount > mCore->mMaxBufferCount) {
182 BQ_LOGE("setMaxDequeuedBufferCount: %d dequeued buffers would "
183 "exceed the maxBufferCount (%d) (maxAcquired %d async %d "
184 "mDequeuedBufferCannotBlock %d)", maxDequeuedBuffers,
185 mCore->mMaxBufferCount, mCore->mMaxAcquiredBufferCount,
186 mCore->mAsyncMode, mCore->mDequeueBufferCannotBlock);
187 return BAD_VALUE;
188 }
189
190 int delta = maxDequeuedBuffers - mCore->mMaxDequeuedBufferCount;
191 if (!mCore->adjustAvailableSlotsLocked(delta)) {
192 return BAD_VALUE;
193 }
194 mCore->mMaxDequeuedBufferCount = maxDequeuedBuffers;
195 *maxBufferCount = mCore->getMaxBufferCountLocked();
196 VALIDATE_CONSISTENCY();
197 if (delta < 0) {
198 listener = mCore->mConsumerListener;
199 }
200 mCore->mDequeueCondition.notify_all();
201 } // Autolock scope
202
203 // Call back without lock held
204 if (listener != nullptr) {
205 listener->onBuffersReleased();
206 }
207
208 return NO_ERROR;
209 }
210
setAsyncMode(bool async)211 status_t BufferQueueProducer::setAsyncMode(bool async) {
212 ATRACE_CALL();
213 BQ_LOGV("setAsyncMode: async = %d", async);
214
215 sp<IConsumerListener> listener;
216 { // Autolock scope
217 std::unique_lock<std::mutex> lock(mCore->mMutex);
218 mCore->waitWhileAllocatingLocked(lock);
219
220 if (mCore->mIsAbandoned) {
221 BQ_LOGE("setAsyncMode: BufferQueue has been abandoned");
222 return NO_INIT;
223 }
224
225 if (async == mCore->mAsyncMode) {
226 return NO_ERROR;
227 }
228
229 if ((mCore->mMaxAcquiredBufferCount + mCore->mMaxDequeuedBufferCount +
230 (async || mCore->mDequeueBufferCannotBlock ? 1 : 0)) >
231 mCore->mMaxBufferCount) {
232 BQ_LOGE("setAsyncMode(%d): this call would cause the "
233 "maxBufferCount (%d) to be exceeded (maxAcquired %d "
234 "maxDequeued %d mDequeueBufferCannotBlock %d)", async,
235 mCore->mMaxBufferCount, mCore->mMaxAcquiredBufferCount,
236 mCore->mMaxDequeuedBufferCount,
237 mCore->mDequeueBufferCannotBlock);
238 return BAD_VALUE;
239 }
240
241 int delta = mCore->getMaxBufferCountLocked(async,
242 mCore->mDequeueBufferCannotBlock, mCore->mMaxBufferCount)
243 - mCore->getMaxBufferCountLocked();
244
245 if (!mCore->adjustAvailableSlotsLocked(delta)) {
246 BQ_LOGE("setAsyncMode: BufferQueue failed to adjust the number of "
247 "available slots. Delta = %d", delta);
248 return BAD_VALUE;
249 }
250 mCore->mAsyncMode = async;
251 VALIDATE_CONSISTENCY();
252 mCore->mDequeueCondition.notify_all();
253 if (delta < 0) {
254 listener = mCore->mConsumerListener;
255 }
256 } // Autolock scope
257
258 // Call back without lock held
259 if (listener != nullptr) {
260 listener->onBuffersReleased();
261 }
262 return NO_ERROR;
263 }
264
getFreeBufferLocked() const265 int BufferQueueProducer::getFreeBufferLocked() const {
266 if (mCore->mFreeBuffers.empty()) {
267 return BufferQueueCore::INVALID_BUFFER_SLOT;
268 }
269 int slot = mCore->mFreeBuffers.front();
270 mCore->mFreeBuffers.pop_front();
271 return slot;
272 }
273
getFreeSlotLocked() const274 int BufferQueueProducer::getFreeSlotLocked() const {
275 if (mCore->mFreeSlots.empty()) {
276 return BufferQueueCore::INVALID_BUFFER_SLOT;
277 }
278 int slot = *(mCore->mFreeSlots.begin());
279 mCore->mFreeSlots.erase(slot);
280 return slot;
281 }
282
waitForFreeSlotThenRelock(FreeSlotCaller caller,std::unique_lock<std::mutex> & lock,int * found) const283 status_t BufferQueueProducer::waitForFreeSlotThenRelock(FreeSlotCaller caller,
284 std::unique_lock<std::mutex>& lock, int* found) const {
285 auto callerString = (caller == FreeSlotCaller::Dequeue) ?
286 "dequeueBuffer" : "attachBuffer";
287 bool tryAgain = true;
288 while (tryAgain) {
289 if (mCore->mIsAbandoned) {
290 BQ_LOGE("%s: BufferQueue has been abandoned", callerString);
291 return NO_INIT;
292 }
293
294 int dequeuedCount = 0;
295 int acquiredCount = 0;
296 for (int s : mCore->mActiveBuffers) {
297 if (mSlots[s].mBufferState.isDequeued()) {
298 ++dequeuedCount;
299 }
300 if (mSlots[s].mBufferState.isAcquired()) {
301 ++acquiredCount;
302 }
303 }
304
305 // Producers are not allowed to dequeue more than
306 // mMaxDequeuedBufferCount buffers.
307 // This check is only done if a buffer has already been queued
308 if (mCore->mBufferHasBeenQueued &&
309 dequeuedCount >= mCore->mMaxDequeuedBufferCount) {
310 // Supress error logs when timeout is non-negative.
311 if (mDequeueTimeout < 0) {
312 BQ_LOGE("%s: attempting to exceed the max dequeued buffer "
313 "count (%d)", callerString,
314 mCore->mMaxDequeuedBufferCount);
315 }
316 return INVALID_OPERATION;
317 }
318
319 *found = BufferQueueCore::INVALID_BUFFER_SLOT;
320
321 // If we disconnect and reconnect quickly, we can be in a state where
322 // our slots are empty but we have many buffers in the queue. This can
323 // cause us to run out of memory if we outrun the consumer. Wait here if
324 // it looks like we have too many buffers queued up.
325 const int maxBufferCount = mCore->getMaxBufferCountLocked();
326 bool tooManyBuffers = mCore->mQueue.size()
327 > static_cast<size_t>(maxBufferCount);
328 if (tooManyBuffers) {
329 BQ_LOGV("%s: queue size is %zu, waiting", callerString,
330 mCore->mQueue.size());
331 } else {
332 // If in shared buffer mode and a shared buffer exists, always
333 // return it.
334 if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot !=
335 BufferQueueCore::INVALID_BUFFER_SLOT) {
336 *found = mCore->mSharedBufferSlot;
337 } else {
338 if (caller == FreeSlotCaller::Dequeue) {
339 // If we're calling this from dequeue, prefer free buffers
340 int slot = getFreeBufferLocked();
341 if (slot != BufferQueueCore::INVALID_BUFFER_SLOT) {
342 *found = slot;
343 } else if (mCore->mAllowAllocation) {
344 *found = getFreeSlotLocked();
345 }
346 } else {
347 // If we're calling this from attach, prefer free slots
348 int slot = getFreeSlotLocked();
349 if (slot != BufferQueueCore::INVALID_BUFFER_SLOT) {
350 *found = slot;
351 } else {
352 *found = getFreeBufferLocked();
353 }
354 }
355 }
356 }
357
358 // If no buffer is found, or if the queue has too many buffers
359 // outstanding, wait for a buffer to be acquired or released, or for the
360 // max buffer count to change.
361 tryAgain = (*found == BufferQueueCore::INVALID_BUFFER_SLOT) ||
362 tooManyBuffers;
363 if (tryAgain) {
364 // Return an error if we're in non-blocking mode (producer and
365 // consumer are controlled by the application).
366 // However, the consumer is allowed to briefly acquire an extra
367 // buffer (which could cause us to have to wait here), which is
368 // okay, since it is only used to implement an atomic acquire +
369 // release (e.g., in GLConsumer::updateTexImage())
370 if ((mCore->mDequeueBufferCannotBlock || mCore->mAsyncMode) &&
371 (acquiredCount <= mCore->mMaxAcquiredBufferCount)) {
372 return WOULD_BLOCK;
373 }
374 if (mDequeueTimeout >= 0) {
375 std::cv_status result = mCore->mDequeueCondition.wait_for(lock,
376 std::chrono::nanoseconds(mDequeueTimeout));
377 if (result == std::cv_status::timeout) {
378 return TIMED_OUT;
379 }
380 } else {
381 mCore->mDequeueCondition.wait(lock);
382 }
383 }
384 } // while (tryAgain)
385
386 return NO_ERROR;
387 }
388
dequeueBuffer(int * outSlot,sp<android::Fence> * outFence,uint32_t width,uint32_t height,PixelFormat format,uint64_t usage,uint64_t * outBufferAge,FrameEventHistoryDelta * outTimestamps)389 status_t BufferQueueProducer::dequeueBuffer(int* outSlot, sp<android::Fence>* outFence,
390 uint32_t width, uint32_t height, PixelFormat format,
391 uint64_t usage, uint64_t* outBufferAge,
392 FrameEventHistoryDelta* outTimestamps) {
393 ATRACE_CALL();
394 { // Autolock scope
395 std::lock_guard<std::mutex> lock(mCore->mMutex);
396 mConsumerName = mCore->mConsumerName;
397
398 if (mCore->mIsAbandoned) {
399 BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
400 return NO_INIT;
401 }
402
403 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
404 BQ_LOGE("dequeueBuffer: BufferQueue has no connected producer");
405 return NO_INIT;
406 }
407 } // Autolock scope
408
409 BQ_LOGV("dequeueBuffer: w=%u h=%u format=%#x, usage=%#" PRIx64, width, height, format, usage);
410
411 if ((width && !height) || (!width && height)) {
412 BQ_LOGE("dequeueBuffer: invalid size: w=%u h=%u", width, height);
413 return BAD_VALUE;
414 }
415
416 status_t returnFlags = NO_ERROR;
417 EGLDisplay eglDisplay = EGL_NO_DISPLAY;
418 EGLSyncKHR eglFence = EGL_NO_SYNC_KHR;
419 bool attachedByConsumer = false;
420
421 sp<IConsumerListener> listener;
422 bool callOnFrameDequeued = false;
423 uint64_t bufferId = 0; // Only used if callOnFrameDequeued == true
424 { // Autolock scope
425 std::unique_lock<std::mutex> lock(mCore->mMutex);
426
427 // If we don't have a free buffer, but we are currently allocating, we wait until allocation
428 // is finished such that we don't allocate in parallel.
429 if (mCore->mFreeBuffers.empty() && mCore->mIsAllocating) {
430 mDequeueWaitingForAllocation = true;
431 mCore->waitWhileAllocatingLocked(lock);
432 mDequeueWaitingForAllocation = false;
433 mDequeueWaitingForAllocationCondition.notify_all();
434 }
435
436 if (format == 0) {
437 format = mCore->mDefaultBufferFormat;
438 }
439
440 // Enable the usage bits the consumer requested
441 usage |= mCore->mConsumerUsageBits;
442
443 const bool useDefaultSize = !width && !height;
444 if (useDefaultSize) {
445 width = mCore->mDefaultWidth;
446 height = mCore->mDefaultHeight;
447 if (mCore->mAutoPrerotation &&
448 (mCore->mTransformHintInUse & NATIVE_WINDOW_TRANSFORM_ROT_90)) {
449 std::swap(width, height);
450 }
451 }
452
453 int found = BufferItem::INVALID_BUFFER_SLOT;
454 while (found == BufferItem::INVALID_BUFFER_SLOT) {
455 status_t status = waitForFreeSlotThenRelock(FreeSlotCaller::Dequeue, lock, &found);
456 if (status != NO_ERROR) {
457 return status;
458 }
459
460 // This should not happen
461 if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
462 BQ_LOGE("dequeueBuffer: no available buffer slots");
463 return -EBUSY;
464 }
465
466 const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
467
468 // If we are not allowed to allocate new buffers,
469 // waitForFreeSlotThenRelock must have returned a slot containing a
470 // buffer. If this buffer would require reallocation to meet the
471 // requested attributes, we free it and attempt to get another one.
472 if (!mCore->mAllowAllocation) {
473 if (buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage)) {
474 if (mCore->mSharedBufferSlot == found) {
475 BQ_LOGE("dequeueBuffer: cannot re-allocate a sharedbuffer");
476 return BAD_VALUE;
477 }
478 mCore->mFreeSlots.insert(found);
479 mCore->clearBufferSlotLocked(found);
480 found = BufferItem::INVALID_BUFFER_SLOT;
481 continue;
482 }
483 }
484 }
485
486 const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
487 if (mCore->mSharedBufferSlot == found &&
488 buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage)) {
489 BQ_LOGE("dequeueBuffer: cannot re-allocate a shared"
490 "buffer");
491
492 return BAD_VALUE;
493 }
494
495 if (mCore->mSharedBufferSlot != found) {
496 mCore->mActiveBuffers.insert(found);
497 }
498 *outSlot = found;
499 ATRACE_BUFFER_INDEX(found);
500
501 attachedByConsumer = mSlots[found].mNeedsReallocation;
502 mSlots[found].mNeedsReallocation = false;
503
504 mSlots[found].mBufferState.dequeue();
505
506 if ((buffer == nullptr) ||
507 buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage))
508 {
509 if (CC_UNLIKELY(ATRACE_ENABLED())) {
510 if (buffer == nullptr) {
511 ATRACE_FORMAT_INSTANT("%s buffer reallocation: null", mConsumerName.string());
512 } else {
513 ATRACE_FORMAT_INSTANT("%s buffer reallocation actual %dx%d format:%d "
514 "layerCount:%d "
515 "usage:%d requested: %dx%d format:%d layerCount:%d "
516 "usage:%d ",
517 mConsumerName.string(), width, height, format,
518 BQ_LAYER_COUNT, usage, buffer->getWidth(),
519 buffer->getHeight(), buffer->getPixelFormat(),
520 buffer->getLayerCount(), buffer->getUsage());
521 }
522 }
523 mSlots[found].mAcquireCalled = false;
524 mSlots[found].mGraphicBuffer = nullptr;
525 mSlots[found].mRequestBufferCalled = false;
526 mSlots[found].mEglDisplay = EGL_NO_DISPLAY;
527 mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
528 mSlots[found].mFence = Fence::NO_FENCE;
529 mCore->mBufferAge = 0;
530 mCore->mIsAllocating = true;
531
532 returnFlags |= BUFFER_NEEDS_REALLOCATION;
533 } else {
534 // We add 1 because that will be the frame number when this buffer
535 // is queued
536 mCore->mBufferAge = mCore->mFrameCounter + 1 - mSlots[found].mFrameNumber;
537 }
538
539 BQ_LOGV("dequeueBuffer: setting buffer age to %" PRIu64,
540 mCore->mBufferAge);
541
542 if (CC_UNLIKELY(mSlots[found].mFence == nullptr)) {
543 BQ_LOGE("dequeueBuffer: about to return a NULL fence - "
544 "slot=%d w=%d h=%d format=%u",
545 found, buffer->width, buffer->height, buffer->format);
546 }
547
548 eglDisplay = mSlots[found].mEglDisplay;
549 eglFence = mSlots[found].mEglFence;
550 // Don't return a fence in shared buffer mode, except for the first
551 // frame.
552 *outFence = (mCore->mSharedBufferMode &&
553 mCore->mSharedBufferSlot == found) ?
554 Fence::NO_FENCE : mSlots[found].mFence;
555 mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
556 mSlots[found].mFence = Fence::NO_FENCE;
557
558 // If shared buffer mode has just been enabled, cache the slot of the
559 // first buffer that is dequeued and mark it as the shared buffer.
560 if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot ==
561 BufferQueueCore::INVALID_BUFFER_SLOT) {
562 mCore->mSharedBufferSlot = found;
563 mSlots[found].mBufferState.mShared = true;
564 }
565
566 if (!(returnFlags & BUFFER_NEEDS_REALLOCATION)) {
567 callOnFrameDequeued = true;
568 bufferId = mSlots[*outSlot].mGraphicBuffer->getId();
569 }
570
571 listener = mCore->mConsumerListener;
572 } // Autolock scope
573
574 if (returnFlags & BUFFER_NEEDS_REALLOCATION) {
575 BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot);
576 sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
577 width, height, format, BQ_LAYER_COUNT, usage,
578 {mConsumerName.string(), mConsumerName.size()});
579
580 status_t error = graphicBuffer->initCheck();
581
582 { // Autolock scope
583 std::lock_guard<std::mutex> lock(mCore->mMutex);
584
585 if (error == NO_ERROR && !mCore->mIsAbandoned) {
586 graphicBuffer->setGenerationNumber(mCore->mGenerationNumber);
587 mSlots[*outSlot].mGraphicBuffer = graphicBuffer;
588 callOnFrameDequeued = true;
589 bufferId = mSlots[*outSlot].mGraphicBuffer->getId();
590 }
591
592 mCore->mIsAllocating = false;
593 mCore->mIsAllocatingCondition.notify_all();
594
595 if (error != NO_ERROR) {
596 mCore->mFreeSlots.insert(*outSlot);
597 mCore->clearBufferSlotLocked(*outSlot);
598 BQ_LOGE("dequeueBuffer: createGraphicBuffer failed");
599 return error;
600 }
601
602 if (mCore->mIsAbandoned) {
603 mCore->mFreeSlots.insert(*outSlot);
604 mCore->clearBufferSlotLocked(*outSlot);
605 BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
606 return NO_INIT;
607 }
608
609 VALIDATE_CONSISTENCY();
610 } // Autolock scope
611 }
612
613 if (listener != nullptr && callOnFrameDequeued) {
614 listener->onFrameDequeued(bufferId);
615 }
616
617 if (attachedByConsumer) {
618 returnFlags |= BUFFER_NEEDS_REALLOCATION;
619 }
620
621 if (eglFence != EGL_NO_SYNC_KHR) {
622 EGLint result = eglClientWaitSyncKHR(eglDisplay, eglFence, 0,
623 1000000000);
624 // If something goes wrong, log the error, but return the buffer without
625 // synchronizing access to it. It's too late at this point to abort the
626 // dequeue operation.
627 if (result == EGL_FALSE) {
628 BQ_LOGE("dequeueBuffer: error %#x waiting for fence",
629 eglGetError());
630 } else if (result == EGL_TIMEOUT_EXPIRED_KHR) {
631 BQ_LOGE("dequeueBuffer: timeout waiting for fence");
632 }
633 eglDestroySyncKHR(eglDisplay, eglFence);
634 }
635
636 BQ_LOGV("dequeueBuffer: returning slot=%d/%" PRIu64 " buf=%p flags=%#x",
637 *outSlot,
638 mSlots[*outSlot].mFrameNumber,
639 mSlots[*outSlot].mGraphicBuffer->handle, returnFlags);
640
641 if (outBufferAge) {
642 *outBufferAge = mCore->mBufferAge;
643 }
644 addAndGetFrameTimestamps(nullptr, outTimestamps);
645
646 return returnFlags;
647 }
648
detachBuffer(int slot)649 status_t BufferQueueProducer::detachBuffer(int slot) {
650 ATRACE_CALL();
651 ATRACE_BUFFER_INDEX(slot);
652 BQ_LOGV("detachBuffer: slot %d", slot);
653
654 sp<IConsumerListener> listener;
655 bool callOnFrameDetached = false;
656 uint64_t bufferId = 0; // Only used if callOnFrameDetached is true
657 {
658 std::lock_guard<std::mutex> lock(mCore->mMutex);
659
660 if (mCore->mIsAbandoned) {
661 BQ_LOGE("detachBuffer: BufferQueue has been abandoned");
662 return NO_INIT;
663 }
664
665 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
666 BQ_LOGE("detachBuffer: BufferQueue has no connected producer");
667 return NO_INIT;
668 }
669
670 if (mCore->mSharedBufferMode || mCore->mSharedBufferSlot == slot) {
671 BQ_LOGE("detachBuffer: cannot detach a buffer in shared buffer mode");
672 return BAD_VALUE;
673 }
674
675 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
676 BQ_LOGE("detachBuffer: slot index %d out of range [0, %d)",
677 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
678 return BAD_VALUE;
679 } else if (!mSlots[slot].mBufferState.isDequeued()) {
680 // TODO(http://b/140581935): This message is BQ_LOGW because it
681 // often logs when no actionable errors are present. Return to
682 // using BQ_LOGE after ensuring this only logs during errors.
683 BQ_LOGW("detachBuffer: slot %d is not owned by the producer "
684 "(state = %s)", slot, mSlots[slot].mBufferState.string());
685 return BAD_VALUE;
686 } else if (!mSlots[slot].mRequestBufferCalled) {
687 BQ_LOGE("detachBuffer: buffer in slot %d has not been requested",
688 slot);
689 return BAD_VALUE;
690 }
691
692 listener = mCore->mConsumerListener;
693 auto gb = mSlots[slot].mGraphicBuffer;
694 if (gb != nullptr) {
695 callOnFrameDetached = true;
696 bufferId = gb->getId();
697 }
698 mSlots[slot].mBufferState.detachProducer();
699 mCore->mActiveBuffers.erase(slot);
700 mCore->mFreeSlots.insert(slot);
701 mCore->clearBufferSlotLocked(slot);
702 mCore->mDequeueCondition.notify_all();
703 VALIDATE_CONSISTENCY();
704 }
705
706 if (listener != nullptr && callOnFrameDetached) {
707 listener->onFrameDetached(bufferId);
708 }
709
710 if (listener != nullptr) {
711 listener->onBuffersReleased();
712 }
713
714 return NO_ERROR;
715 }
716
detachNextBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence)717 status_t BufferQueueProducer::detachNextBuffer(sp<GraphicBuffer>* outBuffer,
718 sp<Fence>* outFence) {
719 ATRACE_CALL();
720
721 if (outBuffer == nullptr) {
722 BQ_LOGE("detachNextBuffer: outBuffer must not be NULL");
723 return BAD_VALUE;
724 } else if (outFence == nullptr) {
725 BQ_LOGE("detachNextBuffer: outFence must not be NULL");
726 return BAD_VALUE;
727 }
728
729 sp<IConsumerListener> listener;
730 {
731 std::unique_lock<std::mutex> lock(mCore->mMutex);
732
733 if (mCore->mIsAbandoned) {
734 BQ_LOGE("detachNextBuffer: BufferQueue has been abandoned");
735 return NO_INIT;
736 }
737
738 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
739 BQ_LOGE("detachNextBuffer: BufferQueue has no connected producer");
740 return NO_INIT;
741 }
742
743 if (mCore->mSharedBufferMode) {
744 BQ_LOGE("detachNextBuffer: cannot detach a buffer in shared buffer "
745 "mode");
746 return BAD_VALUE;
747 }
748
749 mCore->waitWhileAllocatingLocked(lock);
750
751 if (mCore->mFreeBuffers.empty()) {
752 return NO_MEMORY;
753 }
754
755 int found = mCore->mFreeBuffers.front();
756 mCore->mFreeBuffers.remove(found);
757 mCore->mFreeSlots.insert(found);
758
759 BQ_LOGV("detachNextBuffer detached slot %d", found);
760
761 *outBuffer = mSlots[found].mGraphicBuffer;
762 *outFence = mSlots[found].mFence;
763 mCore->clearBufferSlotLocked(found);
764 VALIDATE_CONSISTENCY();
765 listener = mCore->mConsumerListener;
766 }
767
768 if (listener != nullptr) {
769 listener->onBuffersReleased();
770 }
771
772 return NO_ERROR;
773 }
774
attachBuffer(int * outSlot,const sp<android::GraphicBuffer> & buffer)775 status_t BufferQueueProducer::attachBuffer(int* outSlot,
776 const sp<android::GraphicBuffer>& buffer) {
777 ATRACE_CALL();
778
779 if (outSlot == nullptr) {
780 BQ_LOGE("attachBuffer: outSlot must not be NULL");
781 return BAD_VALUE;
782 } else if (buffer == nullptr) {
783 BQ_LOGE("attachBuffer: cannot attach NULL buffer");
784 return BAD_VALUE;
785 }
786
787 std::unique_lock<std::mutex> lock(mCore->mMutex);
788
789 if (mCore->mIsAbandoned) {
790 BQ_LOGE("attachBuffer: BufferQueue has been abandoned");
791 return NO_INIT;
792 }
793
794 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
795 BQ_LOGE("attachBuffer: BufferQueue has no connected producer");
796 return NO_INIT;
797 }
798
799 if (mCore->mSharedBufferMode) {
800 BQ_LOGE("attachBuffer: cannot attach a buffer in shared buffer mode");
801 return BAD_VALUE;
802 }
803
804 if (buffer->getGenerationNumber() != mCore->mGenerationNumber) {
805 BQ_LOGE("attachBuffer: generation number mismatch [buffer %u] "
806 "[queue %u]", buffer->getGenerationNumber(),
807 mCore->mGenerationNumber);
808 return BAD_VALUE;
809 }
810
811 mCore->waitWhileAllocatingLocked(lock);
812
813 status_t returnFlags = NO_ERROR;
814 int found;
815 status_t status = waitForFreeSlotThenRelock(FreeSlotCaller::Attach, lock, &found);
816 if (status != NO_ERROR) {
817 return status;
818 }
819
820 // This should not happen
821 if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
822 BQ_LOGE("attachBuffer: no available buffer slots");
823 return -EBUSY;
824 }
825
826 *outSlot = found;
827 ATRACE_BUFFER_INDEX(*outSlot);
828 BQ_LOGV("attachBuffer: returning slot %d flags=%#x",
829 *outSlot, returnFlags);
830
831 mSlots[*outSlot].mGraphicBuffer = buffer;
832 mSlots[*outSlot].mBufferState.attachProducer();
833 mSlots[*outSlot].mEglFence = EGL_NO_SYNC_KHR;
834 mSlots[*outSlot].mFence = Fence::NO_FENCE;
835 mSlots[*outSlot].mRequestBufferCalled = true;
836 mSlots[*outSlot].mAcquireCalled = false;
837 mSlots[*outSlot].mNeedsReallocation = false;
838 mCore->mActiveBuffers.insert(found);
839 VALIDATE_CONSISTENCY();
840
841 return returnFlags;
842 }
843
queueBuffer(int slot,const QueueBufferInput & input,QueueBufferOutput * output)844 status_t BufferQueueProducer::queueBuffer(int slot,
845 const QueueBufferInput &input, QueueBufferOutput *output) {
846 ATRACE_CALL();
847 ATRACE_BUFFER_INDEX(slot);
848
849 int64_t requestedPresentTimestamp;
850 bool isAutoTimestamp;
851 android_dataspace dataSpace;
852 Rect crop(Rect::EMPTY_RECT);
853 int scalingMode;
854 uint32_t transform;
855 uint32_t stickyTransform;
856 sp<Fence> acquireFence;
857 bool getFrameTimestamps = false;
858 input.deflate(&requestedPresentTimestamp, &isAutoTimestamp, &dataSpace,
859 &crop, &scalingMode, &transform, &acquireFence, &stickyTransform,
860 &getFrameTimestamps);
861 const Region& surfaceDamage = input.getSurfaceDamage();
862 const HdrMetadata& hdrMetadata = input.getHdrMetadata();
863
864 if (acquireFence == nullptr) {
865 BQ_LOGE("queueBuffer: fence is NULL");
866 return BAD_VALUE;
867 }
868
869 auto acquireFenceTime = std::make_shared<FenceTime>(acquireFence);
870
871 switch (scalingMode) {
872 case NATIVE_WINDOW_SCALING_MODE_FREEZE:
873 case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
874 case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
875 case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
876 break;
877 default:
878 BQ_LOGE("queueBuffer: unknown scaling mode %d", scalingMode);
879 return BAD_VALUE;
880 }
881
882 sp<IConsumerListener> frameAvailableListener;
883 sp<IConsumerListener> frameReplacedListener;
884 int callbackTicket = 0;
885 uint64_t currentFrameNumber = 0;
886 BufferItem item;
887 { // Autolock scope
888 std::lock_guard<std::mutex> lock(mCore->mMutex);
889
890 if (mCore->mIsAbandoned) {
891 BQ_LOGE("queueBuffer: BufferQueue has been abandoned");
892 return NO_INIT;
893 }
894
895 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
896 BQ_LOGE("queueBuffer: BufferQueue has no connected producer");
897 return NO_INIT;
898 }
899
900 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
901 BQ_LOGE("queueBuffer: slot index %d out of range [0, %d)",
902 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
903 return BAD_VALUE;
904 } else if (!mSlots[slot].mBufferState.isDequeued()) {
905 BQ_LOGE("queueBuffer: slot %d is not owned by the producer "
906 "(state = %s)", slot, mSlots[slot].mBufferState.string());
907 return BAD_VALUE;
908 } else if (!mSlots[slot].mRequestBufferCalled) {
909 BQ_LOGE("queueBuffer: slot %d was queued without requesting "
910 "a buffer", slot);
911 return BAD_VALUE;
912 }
913
914 // If shared buffer mode has just been enabled, cache the slot of the
915 // first buffer that is queued and mark it as the shared buffer.
916 if (mCore->mSharedBufferMode && mCore->mSharedBufferSlot ==
917 BufferQueueCore::INVALID_BUFFER_SLOT) {
918 mCore->mSharedBufferSlot = slot;
919 mSlots[slot].mBufferState.mShared = true;
920 }
921
922 BQ_LOGV("queueBuffer: slot=%d/%" PRIu64 " time=%" PRIu64 " dataSpace=%d"
923 " validHdrMetadataTypes=0x%x crop=[%d,%d,%d,%d] transform=%#x scale=%s",
924 slot, mCore->mFrameCounter + 1, requestedPresentTimestamp, dataSpace,
925 hdrMetadata.validTypes, crop.left, crop.top, crop.right, crop.bottom,
926 transform,
927 BufferItem::scalingModeName(static_cast<uint32_t>(scalingMode)));
928
929 const sp<GraphicBuffer>& graphicBuffer(mSlots[slot].mGraphicBuffer);
930 Rect bufferRect(graphicBuffer->getWidth(), graphicBuffer->getHeight());
931 Rect croppedRect(Rect::EMPTY_RECT);
932 crop.intersect(bufferRect, &croppedRect);
933 if (croppedRect != crop) {
934 BQ_LOGE("queueBuffer: crop rect is not contained within the "
935 "buffer in slot %d", slot);
936 return BAD_VALUE;
937 }
938
939 // Override UNKNOWN dataspace with consumer default
940 if (dataSpace == HAL_DATASPACE_UNKNOWN) {
941 dataSpace = mCore->mDefaultBufferDataSpace;
942 }
943
944 mSlots[slot].mFence = acquireFence;
945 mSlots[slot].mBufferState.queue();
946
947 // Increment the frame counter and store a local version of it
948 // for use outside the lock on mCore->mMutex.
949 ++mCore->mFrameCounter;
950 currentFrameNumber = mCore->mFrameCounter;
951 mSlots[slot].mFrameNumber = currentFrameNumber;
952
953 item.mAcquireCalled = mSlots[slot].mAcquireCalled;
954 item.mGraphicBuffer = mSlots[slot].mGraphicBuffer;
955 item.mCrop = crop;
956 item.mTransform = transform &
957 ~static_cast<uint32_t>(NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY);
958 item.mTransformToDisplayInverse =
959 (transform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) != 0;
960 item.mScalingMode = static_cast<uint32_t>(scalingMode);
961 item.mTimestamp = requestedPresentTimestamp;
962 item.mIsAutoTimestamp = isAutoTimestamp;
963 item.mDataSpace = dataSpace;
964 item.mHdrMetadata = hdrMetadata;
965 item.mFrameNumber = currentFrameNumber;
966 item.mSlot = slot;
967 item.mFence = acquireFence;
968 item.mFenceTime = acquireFenceTime;
969 item.mIsDroppable = mCore->mAsyncMode ||
970 (mConsumerIsSurfaceFlinger && mCore->mQueueBufferCanDrop) ||
971 (mCore->mLegacyBufferDrop && mCore->mQueueBufferCanDrop) ||
972 (mCore->mSharedBufferMode && mCore->mSharedBufferSlot == slot);
973 item.mSurfaceDamage = surfaceDamage;
974 item.mQueuedBuffer = true;
975 item.mAutoRefresh = mCore->mSharedBufferMode && mCore->mAutoRefresh;
976 item.mApi = mCore->mConnectedApi;
977
978 mStickyTransform = stickyTransform;
979
980 // Cache the shared buffer data so that the BufferItem can be recreated.
981 if (mCore->mSharedBufferMode) {
982 mCore->mSharedBufferCache.crop = crop;
983 mCore->mSharedBufferCache.transform = transform;
984 mCore->mSharedBufferCache.scalingMode = static_cast<uint32_t>(
985 scalingMode);
986 mCore->mSharedBufferCache.dataspace = dataSpace;
987 }
988
989 output->bufferReplaced = false;
990 if (mCore->mQueue.empty()) {
991 // When the queue is empty, we can ignore mDequeueBufferCannotBlock
992 // and simply queue this buffer
993 mCore->mQueue.push_back(item);
994 frameAvailableListener = mCore->mConsumerListener;
995 } else {
996 // When the queue is not empty, we need to look at the last buffer
997 // in the queue to see if we need to replace it
998 const BufferItem& last = mCore->mQueue.itemAt(
999 mCore->mQueue.size() - 1);
1000 if (last.mIsDroppable) {
1001
1002 if (!last.mIsStale) {
1003 mSlots[last.mSlot].mBufferState.freeQueued();
1004
1005 // After leaving shared buffer mode, the shared buffer will
1006 // still be around. Mark it as no longer shared if this
1007 // operation causes it to be free.
1008 if (!mCore->mSharedBufferMode &&
1009 mSlots[last.mSlot].mBufferState.isFree()) {
1010 mSlots[last.mSlot].mBufferState.mShared = false;
1011 }
1012 // Don't put the shared buffer on the free list.
1013 if (!mSlots[last.mSlot].mBufferState.isShared()) {
1014 mCore->mActiveBuffers.erase(last.mSlot);
1015 mCore->mFreeBuffers.push_back(last.mSlot);
1016 output->bufferReplaced = true;
1017 }
1018 }
1019
1020 // Make sure to merge the damage rect from the frame we're about
1021 // to drop into the new frame's damage rect.
1022 if (last.mSurfaceDamage.bounds() == Rect::INVALID_RECT ||
1023 item.mSurfaceDamage.bounds() == Rect::INVALID_RECT) {
1024 item.mSurfaceDamage = Region::INVALID_REGION;
1025 } else {
1026 item.mSurfaceDamage |= last.mSurfaceDamage;
1027 }
1028
1029 // Overwrite the droppable buffer with the incoming one
1030 mCore->mQueue.editItemAt(mCore->mQueue.size() - 1) = item;
1031 frameReplacedListener = mCore->mConsumerListener;
1032 } else {
1033 mCore->mQueue.push_back(item);
1034 frameAvailableListener = mCore->mConsumerListener;
1035 }
1036 }
1037
1038 mCore->mBufferHasBeenQueued = true;
1039 mCore->mDequeueCondition.notify_all();
1040 mCore->mLastQueuedSlot = slot;
1041
1042 output->width = mCore->mDefaultWidth;
1043 output->height = mCore->mDefaultHeight;
1044 output->transformHint = mCore->mTransformHintInUse = mCore->mTransformHint;
1045 output->numPendingBuffers = static_cast<uint32_t>(mCore->mQueue.size());
1046 output->nextFrameNumber = mCore->mFrameCounter + 1;
1047
1048 ATRACE_INT(mCore->mConsumerName.string(),
1049 static_cast<int32_t>(mCore->mQueue.size()));
1050 #ifndef NO_BINDER
1051 mCore->mOccupancyTracker.registerOccupancyChange(mCore->mQueue.size());
1052 #endif
1053 // Take a ticket for the callback functions
1054 callbackTicket = mNextCallbackTicket++;
1055
1056 VALIDATE_CONSISTENCY();
1057 } // Autolock scope
1058
1059 // It is okay not to clear the GraphicBuffer when the consumer is SurfaceFlinger because
1060 // it is guaranteed that the BufferQueue is inside SurfaceFlinger's process and
1061 // there will be no Binder call
1062 if (!mConsumerIsSurfaceFlinger) {
1063 item.mGraphicBuffer.clear();
1064 }
1065
1066 // Update and get FrameEventHistory.
1067 nsecs_t postedTime = systemTime(SYSTEM_TIME_MONOTONIC);
1068 NewFrameEventsEntry newFrameEventsEntry = {
1069 currentFrameNumber,
1070 postedTime,
1071 requestedPresentTimestamp,
1072 std::move(acquireFenceTime)
1073 };
1074 addAndGetFrameTimestamps(&newFrameEventsEntry,
1075 getFrameTimestamps ? &output->frameTimestamps : nullptr);
1076
1077 // Call back without the main BufferQueue lock held, but with the callback
1078 // lock held so we can ensure that callbacks occur in order
1079
1080 int connectedApi;
1081 sp<Fence> lastQueuedFence;
1082
1083 { // scope for the lock
1084 std::unique_lock<std::mutex> lock(mCallbackMutex);
1085 while (callbackTicket != mCurrentCallbackTicket) {
1086 mCallbackCondition.wait(lock);
1087 }
1088
1089 if (frameAvailableListener != nullptr) {
1090 frameAvailableListener->onFrameAvailable(item);
1091 } else if (frameReplacedListener != nullptr) {
1092 frameReplacedListener->onFrameReplaced(item);
1093 }
1094
1095 connectedApi = mCore->mConnectedApi;
1096 lastQueuedFence = std::move(mLastQueueBufferFence);
1097
1098 mLastQueueBufferFence = std::move(acquireFence);
1099 mLastQueuedCrop = item.mCrop;
1100 mLastQueuedTransform = item.mTransform;
1101
1102 ++mCurrentCallbackTicket;
1103 mCallbackCondition.notify_all();
1104 }
1105
1106 // Wait without lock held
1107 if (connectedApi == NATIVE_WINDOW_API_EGL) {
1108 // Waiting here allows for two full buffers to be queued but not a
1109 // third. In the event that frames take varying time, this makes a
1110 // small trade-off in favor of latency rather than throughput.
1111 lastQueuedFence->waitForever("Throttling EGL Production");
1112 }
1113
1114 return NO_ERROR;
1115 }
1116
cancelBuffer(int slot,const sp<Fence> & fence)1117 status_t BufferQueueProducer::cancelBuffer(int slot, const sp<Fence>& fence) {
1118 ATRACE_CALL();
1119 BQ_LOGV("cancelBuffer: slot %d", slot);
1120
1121 sp<IConsumerListener> listener;
1122 bool callOnFrameCancelled = false;
1123 uint64_t bufferId = 0; // Only used if callOnFrameCancelled == true
1124 {
1125 std::lock_guard<std::mutex> lock(mCore->mMutex);
1126
1127 if (mCore->mIsAbandoned) {
1128 BQ_LOGE("cancelBuffer: BufferQueue has been abandoned");
1129 return NO_INIT;
1130 }
1131
1132 if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
1133 BQ_LOGE("cancelBuffer: BufferQueue has no connected producer");
1134 return NO_INIT;
1135 }
1136
1137 if (mCore->mSharedBufferMode) {
1138 BQ_LOGE("cancelBuffer: cannot cancel a buffer in shared buffer mode");
1139 return BAD_VALUE;
1140 }
1141
1142 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
1143 BQ_LOGE("cancelBuffer: slot index %d out of range [0, %d)", slot,
1144 BufferQueueDefs::NUM_BUFFER_SLOTS);
1145 return BAD_VALUE;
1146 } else if (!mSlots[slot].mBufferState.isDequeued()) {
1147 BQ_LOGE("cancelBuffer: slot %d is not owned by the producer "
1148 "(state = %s)",
1149 slot, mSlots[slot].mBufferState.string());
1150 return BAD_VALUE;
1151 } else if (fence == nullptr) {
1152 BQ_LOGE("cancelBuffer: fence is NULL");
1153 return BAD_VALUE;
1154 }
1155
1156 mSlots[slot].mBufferState.cancel();
1157
1158 // After leaving shared buffer mode, the shared buffer will still be around.
1159 // Mark it as no longer shared if this operation causes it to be free.
1160 if (!mCore->mSharedBufferMode && mSlots[slot].mBufferState.isFree()) {
1161 mSlots[slot].mBufferState.mShared = false;
1162 }
1163
1164 // Don't put the shared buffer on the free list.
1165 if (!mSlots[slot].mBufferState.isShared()) {
1166 mCore->mActiveBuffers.erase(slot);
1167 mCore->mFreeBuffers.push_back(slot);
1168 }
1169
1170 auto gb = mSlots[slot].mGraphicBuffer;
1171 if (gb != nullptr) {
1172 callOnFrameCancelled = true;
1173 bufferId = gb->getId();
1174 }
1175 mSlots[slot].mFence = fence;
1176 mCore->mDequeueCondition.notify_all();
1177 listener = mCore->mConsumerListener;
1178 VALIDATE_CONSISTENCY();
1179 }
1180
1181 if (listener != nullptr && callOnFrameCancelled) {
1182 listener->onFrameCancelled(bufferId);
1183 }
1184
1185 return NO_ERROR;
1186 }
1187
query(int what,int * outValue)1188 int BufferQueueProducer::query(int what, int *outValue) {
1189 ATRACE_CALL();
1190 std::lock_guard<std::mutex> lock(mCore->mMutex);
1191
1192 if (outValue == nullptr) {
1193 BQ_LOGE("query: outValue was NULL");
1194 return BAD_VALUE;
1195 }
1196
1197 if (mCore->mIsAbandoned) {
1198 BQ_LOGE("query: BufferQueue has been abandoned");
1199 return NO_INIT;
1200 }
1201
1202 int value;
1203 switch (what) {
1204 case NATIVE_WINDOW_WIDTH:
1205 value = static_cast<int32_t>(mCore->mDefaultWidth);
1206 break;
1207 case NATIVE_WINDOW_HEIGHT:
1208 value = static_cast<int32_t>(mCore->mDefaultHeight);
1209 break;
1210 case NATIVE_WINDOW_FORMAT:
1211 value = static_cast<int32_t>(mCore->mDefaultBufferFormat);
1212 break;
1213 case NATIVE_WINDOW_LAYER_COUNT:
1214 // All BufferQueue buffers have a single layer.
1215 value = BQ_LAYER_COUNT;
1216 break;
1217 case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
1218 value = mCore->getMinUndequeuedBufferCountLocked();
1219 break;
1220 case NATIVE_WINDOW_STICKY_TRANSFORM:
1221 value = static_cast<int32_t>(mStickyTransform);
1222 break;
1223 case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND:
1224 value = (mCore->mQueue.size() > 1);
1225 break;
1226 case NATIVE_WINDOW_CONSUMER_USAGE_BITS:
1227 // deprecated; higher 32 bits are truncated
1228 value = static_cast<int32_t>(mCore->mConsumerUsageBits);
1229 break;
1230 case NATIVE_WINDOW_DEFAULT_DATASPACE:
1231 value = static_cast<int32_t>(mCore->mDefaultBufferDataSpace);
1232 break;
1233 case NATIVE_WINDOW_BUFFER_AGE:
1234 if (mCore->mBufferAge > INT32_MAX) {
1235 value = 0;
1236 } else {
1237 value = static_cast<int32_t>(mCore->mBufferAge);
1238 }
1239 break;
1240 case NATIVE_WINDOW_CONSUMER_IS_PROTECTED:
1241 value = static_cast<int32_t>(mCore->mConsumerIsProtected);
1242 break;
1243 default:
1244 return BAD_VALUE;
1245 }
1246
1247 BQ_LOGV("query: %d? %d", what, value);
1248 *outValue = value;
1249 return NO_ERROR;
1250 }
1251
connect(const sp<IProducerListener> & listener,int api,bool producerControlledByApp,QueueBufferOutput * output)1252 status_t BufferQueueProducer::connect(const sp<IProducerListener>& listener,
1253 int api, bool producerControlledByApp, QueueBufferOutput *output) {
1254 ATRACE_CALL();
1255 std::lock_guard<std::mutex> lock(mCore->mMutex);
1256 mConsumerName = mCore->mConsumerName;
1257 BQ_LOGV("connect: api=%d producerControlledByApp=%s", api,
1258 producerControlledByApp ? "true" : "false");
1259
1260 if (mCore->mIsAbandoned) {
1261 BQ_LOGE("connect: BufferQueue has been abandoned");
1262 return NO_INIT;
1263 }
1264
1265 if (mCore->mConsumerListener == nullptr) {
1266 BQ_LOGE("connect: BufferQueue has no consumer");
1267 return NO_INIT;
1268 }
1269
1270 if (output == nullptr) {
1271 BQ_LOGE("connect: output was NULL");
1272 return BAD_VALUE;
1273 }
1274
1275 if (mCore->mConnectedApi != BufferQueueCore::NO_CONNECTED_API) {
1276 BQ_LOGE("connect: already connected (cur=%d req=%d)",
1277 mCore->mConnectedApi, api);
1278 return BAD_VALUE;
1279 }
1280
1281 int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode,
1282 mDequeueTimeout < 0 ?
1283 mCore->mConsumerControlledByApp && producerControlledByApp : false,
1284 mCore->mMaxBufferCount) -
1285 mCore->getMaxBufferCountLocked();
1286 if (!mCore->adjustAvailableSlotsLocked(delta)) {
1287 BQ_LOGE("connect: BufferQueue failed to adjust the number of available "
1288 "slots. Delta = %d", delta);
1289 return BAD_VALUE;
1290 }
1291
1292 int status = NO_ERROR;
1293 switch (api) {
1294 case NATIVE_WINDOW_API_EGL:
1295 case NATIVE_WINDOW_API_CPU:
1296 case NATIVE_WINDOW_API_MEDIA:
1297 case NATIVE_WINDOW_API_CAMERA:
1298 mCore->mConnectedApi = api;
1299
1300 output->width = mCore->mDefaultWidth;
1301 output->height = mCore->mDefaultHeight;
1302 output->transformHint = mCore->mTransformHintInUse = mCore->mTransformHint;
1303 output->numPendingBuffers =
1304 static_cast<uint32_t>(mCore->mQueue.size());
1305 output->nextFrameNumber = mCore->mFrameCounter + 1;
1306 output->bufferReplaced = false;
1307 output->maxBufferCount = mCore->mMaxBufferCount;
1308
1309 if (listener != nullptr) {
1310 // Set up a death notification so that we can disconnect
1311 // automatically if the remote producer dies
1312 #ifndef NO_BINDER
1313 if (IInterface::asBinder(listener)->remoteBinder() != nullptr) {
1314 status = IInterface::asBinder(listener)->linkToDeath(
1315 static_cast<IBinder::DeathRecipient*>(this));
1316 if (status != NO_ERROR) {
1317 BQ_LOGE("connect: linkToDeath failed: %s (%d)",
1318 strerror(-status), status);
1319 }
1320 mCore->mLinkedToDeath = listener;
1321 }
1322 #endif
1323 mCore->mConnectedProducerListener = listener;
1324 mCore->mBufferReleasedCbEnabled = listener->needsReleaseNotify();
1325 }
1326 break;
1327 default:
1328 BQ_LOGE("connect: unknown API %d", api);
1329 status = BAD_VALUE;
1330 break;
1331 }
1332 mCore->mConnectedPid = BufferQueueThreadState::getCallingPid();
1333 mCore->mBufferHasBeenQueued = false;
1334 mCore->mDequeueBufferCannotBlock = false;
1335 mCore->mQueueBufferCanDrop = false;
1336 mCore->mLegacyBufferDrop = true;
1337 if (mCore->mConsumerControlledByApp && producerControlledByApp) {
1338 mCore->mDequeueBufferCannotBlock = mDequeueTimeout < 0;
1339 mCore->mQueueBufferCanDrop = mDequeueTimeout <= 0;
1340 }
1341
1342 mCore->mAllowAllocation = true;
1343 VALIDATE_CONSISTENCY();
1344 return status;
1345 }
1346
disconnect(int api,DisconnectMode mode)1347 status_t BufferQueueProducer::disconnect(int api, DisconnectMode mode) {
1348 ATRACE_CALL();
1349 BQ_LOGV("disconnect: api %d", api);
1350
1351 int status = NO_ERROR;
1352 sp<IConsumerListener> listener;
1353 { // Autolock scope
1354 std::unique_lock<std::mutex> lock(mCore->mMutex);
1355
1356 if (mode == DisconnectMode::AllLocal) {
1357 if (BufferQueueThreadState::getCallingPid() != mCore->mConnectedPid) {
1358 return NO_ERROR;
1359 }
1360 api = BufferQueueCore::CURRENTLY_CONNECTED_API;
1361 }
1362
1363 mCore->waitWhileAllocatingLocked(lock);
1364
1365 if (mCore->mIsAbandoned) {
1366 // It's not really an error to disconnect after the surface has
1367 // been abandoned; it should just be a no-op.
1368 return NO_ERROR;
1369 }
1370
1371 if (api == BufferQueueCore::CURRENTLY_CONNECTED_API) {
1372 if (mCore->mConnectedApi == NATIVE_WINDOW_API_MEDIA) {
1373 ALOGD("About to force-disconnect API_MEDIA, mode=%d", mode);
1374 }
1375 api = mCore->mConnectedApi;
1376 // If we're asked to disconnect the currently connected api but
1377 // nobody is connected, it's not really an error.
1378 if (api == BufferQueueCore::NO_CONNECTED_API) {
1379 return NO_ERROR;
1380 }
1381 }
1382
1383 switch (api) {
1384 case NATIVE_WINDOW_API_EGL:
1385 case NATIVE_WINDOW_API_CPU:
1386 case NATIVE_WINDOW_API_MEDIA:
1387 case NATIVE_WINDOW_API_CAMERA:
1388 if (mCore->mConnectedApi == api) {
1389 mCore->freeAllBuffersLocked();
1390
1391 #ifndef NO_BINDER
1392 // Remove our death notification callback if we have one
1393 if (mCore->mLinkedToDeath != nullptr) {
1394 sp<IBinder> token =
1395 IInterface::asBinder(mCore->mLinkedToDeath);
1396 // This can fail if we're here because of the death
1397 // notification, but we just ignore it
1398 token->unlinkToDeath(
1399 static_cast<IBinder::DeathRecipient*>(this));
1400 }
1401 #endif
1402 mCore->mSharedBufferSlot =
1403 BufferQueueCore::INVALID_BUFFER_SLOT;
1404 mCore->mLinkedToDeath = nullptr;
1405 mCore->mConnectedProducerListener = nullptr;
1406 mCore->mConnectedApi = BufferQueueCore::NO_CONNECTED_API;
1407 mCore->mConnectedPid = -1;
1408 mCore->mSidebandStream.clear();
1409 mCore->mDequeueCondition.notify_all();
1410 mCore->mAutoPrerotation = false;
1411 listener = mCore->mConsumerListener;
1412 } else if (mCore->mConnectedApi == BufferQueueCore::NO_CONNECTED_API) {
1413 BQ_LOGE("disconnect: not connected (req=%d)", api);
1414 status = NO_INIT;
1415 } else {
1416 BQ_LOGE("disconnect: still connected to another API "
1417 "(cur=%d req=%d)", mCore->mConnectedApi, api);
1418 status = BAD_VALUE;
1419 }
1420 break;
1421 default:
1422 BQ_LOGE("disconnect: unknown API %d", api);
1423 status = BAD_VALUE;
1424 break;
1425 }
1426 } // Autolock scope
1427
1428 // Call back without lock held
1429 if (listener != nullptr) {
1430 listener->onBuffersReleased();
1431 listener->onDisconnect();
1432 }
1433
1434 return status;
1435 }
1436
setSidebandStream(const sp<NativeHandle> & stream)1437 status_t BufferQueueProducer::setSidebandStream(const sp<NativeHandle>& stream) {
1438 sp<IConsumerListener> listener;
1439 { // Autolock scope
1440 std::lock_guard<std::mutex> _l(mCore->mMutex);
1441 mCore->mSidebandStream = stream;
1442 listener = mCore->mConsumerListener;
1443 } // Autolock scope
1444
1445 if (listener != nullptr) {
1446 listener->onSidebandStreamChanged();
1447 }
1448 return NO_ERROR;
1449 }
1450
allocateBuffers(uint32_t width,uint32_t height,PixelFormat format,uint64_t usage)1451 void BufferQueueProducer::allocateBuffers(uint32_t width, uint32_t height,
1452 PixelFormat format, uint64_t usage) {
1453 ATRACE_CALL();
1454
1455 const bool useDefaultSize = !width && !height;
1456 while (true) {
1457 size_t newBufferCount = 0;
1458 uint32_t allocWidth = 0;
1459 uint32_t allocHeight = 0;
1460 PixelFormat allocFormat = PIXEL_FORMAT_UNKNOWN;
1461 uint64_t allocUsage = 0;
1462 std::string allocName;
1463 { // Autolock scope
1464 std::unique_lock<std::mutex> lock(mCore->mMutex);
1465 mCore->waitWhileAllocatingLocked(lock);
1466
1467 if (!mCore->mAllowAllocation) {
1468 BQ_LOGE("allocateBuffers: allocation is not allowed for this "
1469 "BufferQueue");
1470 return;
1471 }
1472
1473 // Only allocate one buffer at a time to reduce risks of overlapping an allocation from
1474 // both allocateBuffers and dequeueBuffer.
1475 newBufferCount = mCore->mFreeSlots.empty() ? 0 : 1;
1476 if (newBufferCount == 0) {
1477 return;
1478 }
1479
1480 allocWidth = width > 0 ? width : mCore->mDefaultWidth;
1481 allocHeight = height > 0 ? height : mCore->mDefaultHeight;
1482 if (useDefaultSize && mCore->mAutoPrerotation &&
1483 (mCore->mTransformHintInUse & NATIVE_WINDOW_TRANSFORM_ROT_90)) {
1484 std::swap(allocWidth, allocHeight);
1485 }
1486
1487 allocFormat = format != 0 ? format : mCore->mDefaultBufferFormat;
1488 allocUsage = usage | mCore->mConsumerUsageBits;
1489 allocName.assign(mCore->mConsumerName.string(), mCore->mConsumerName.size());
1490
1491 mCore->mIsAllocating = true;
1492 } // Autolock scope
1493
1494 Vector<sp<GraphicBuffer>> buffers;
1495 for (size_t i = 0; i < newBufferCount; ++i) {
1496 sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
1497 allocWidth, allocHeight, allocFormat, BQ_LAYER_COUNT,
1498 allocUsage, allocName);
1499
1500 status_t result = graphicBuffer->initCheck();
1501
1502 if (result != NO_ERROR) {
1503 BQ_LOGE("allocateBuffers: failed to allocate buffer (%u x %u, format"
1504 " %u, usage %#" PRIx64 ")", width, height, format, usage);
1505 std::lock_guard<std::mutex> lock(mCore->mMutex);
1506 mCore->mIsAllocating = false;
1507 mCore->mIsAllocatingCondition.notify_all();
1508 return;
1509 }
1510 buffers.push_back(graphicBuffer);
1511 }
1512
1513 { // Autolock scope
1514 std::unique_lock<std::mutex> lock(mCore->mMutex);
1515 uint32_t checkWidth = width > 0 ? width : mCore->mDefaultWidth;
1516 uint32_t checkHeight = height > 0 ? height : mCore->mDefaultHeight;
1517 if (useDefaultSize && mCore->mAutoPrerotation &&
1518 (mCore->mTransformHintInUse & NATIVE_WINDOW_TRANSFORM_ROT_90)) {
1519 std::swap(checkWidth, checkHeight);
1520 }
1521
1522 PixelFormat checkFormat = format != 0 ?
1523 format : mCore->mDefaultBufferFormat;
1524 uint64_t checkUsage = usage | mCore->mConsumerUsageBits;
1525 if (checkWidth != allocWidth || checkHeight != allocHeight ||
1526 checkFormat != allocFormat || checkUsage != allocUsage) {
1527 // Something changed while we released the lock. Retry.
1528 BQ_LOGV("allocateBuffers: size/format/usage changed while allocating. Retrying.");
1529 mCore->mIsAllocating = false;
1530 mCore->mIsAllocatingCondition.notify_all();
1531 continue;
1532 }
1533
1534 for (size_t i = 0; i < newBufferCount; ++i) {
1535 if (mCore->mFreeSlots.empty()) {
1536 BQ_LOGV("allocateBuffers: a slot was occupied while "
1537 "allocating. Dropping allocated buffer.");
1538 continue;
1539 }
1540 auto slot = mCore->mFreeSlots.begin();
1541 mCore->clearBufferSlotLocked(*slot); // Clean up the slot first
1542 mSlots[*slot].mGraphicBuffer = buffers[i];
1543 mSlots[*slot].mFence = Fence::NO_FENCE;
1544
1545 // freeBufferLocked puts this slot on the free slots list. Since
1546 // we then attached a buffer, move the slot to free buffer list.
1547 mCore->mFreeBuffers.push_front(*slot);
1548
1549 BQ_LOGV("allocateBuffers: allocated a new buffer in slot %d",
1550 *slot);
1551
1552 // Make sure the erase is done after all uses of the slot
1553 // iterator since it will be invalid after this point.
1554 mCore->mFreeSlots.erase(slot);
1555 }
1556
1557 mCore->mIsAllocating = false;
1558 mCore->mIsAllocatingCondition.notify_all();
1559 VALIDATE_CONSISTENCY();
1560
1561 // If dequeue is waiting for to allocate a buffer, release the lock until it's not
1562 // waiting anymore so it can use the buffer we just allocated.
1563 while (mDequeueWaitingForAllocation) {
1564 mDequeueWaitingForAllocationCondition.wait(lock);
1565 }
1566 } // Autolock scope
1567 }
1568 }
1569
allowAllocation(bool allow)1570 status_t BufferQueueProducer::allowAllocation(bool allow) {
1571 ATRACE_CALL();
1572 BQ_LOGV("allowAllocation: %s", allow ? "true" : "false");
1573
1574 std::lock_guard<std::mutex> lock(mCore->mMutex);
1575 mCore->mAllowAllocation = allow;
1576 return NO_ERROR;
1577 }
1578
setGenerationNumber(uint32_t generationNumber)1579 status_t BufferQueueProducer::setGenerationNumber(uint32_t generationNumber) {
1580 ATRACE_CALL();
1581 BQ_LOGV("setGenerationNumber: %u", generationNumber);
1582
1583 std::lock_guard<std::mutex> lock(mCore->mMutex);
1584 mCore->mGenerationNumber = generationNumber;
1585 return NO_ERROR;
1586 }
1587
getConsumerName() const1588 String8 BufferQueueProducer::getConsumerName() const {
1589 ATRACE_CALL();
1590 std::lock_guard<std::mutex> lock(mCore->mMutex);
1591 BQ_LOGV("getConsumerName: %s", mConsumerName.string());
1592 return mConsumerName;
1593 }
1594
setSharedBufferMode(bool sharedBufferMode)1595 status_t BufferQueueProducer::setSharedBufferMode(bool sharedBufferMode) {
1596 ATRACE_CALL();
1597 BQ_LOGV("setSharedBufferMode: %d", sharedBufferMode);
1598
1599 std::lock_guard<std::mutex> lock(mCore->mMutex);
1600 if (!sharedBufferMode) {
1601 mCore->mSharedBufferSlot = BufferQueueCore::INVALID_BUFFER_SLOT;
1602 }
1603 mCore->mSharedBufferMode = sharedBufferMode;
1604 return NO_ERROR;
1605 }
1606
setAutoRefresh(bool autoRefresh)1607 status_t BufferQueueProducer::setAutoRefresh(bool autoRefresh) {
1608 ATRACE_CALL();
1609 BQ_LOGV("setAutoRefresh: %d", autoRefresh);
1610
1611 std::lock_guard<std::mutex> lock(mCore->mMutex);
1612
1613 mCore->mAutoRefresh = autoRefresh;
1614 return NO_ERROR;
1615 }
1616
setDequeueTimeout(nsecs_t timeout)1617 status_t BufferQueueProducer::setDequeueTimeout(nsecs_t timeout) {
1618 ATRACE_CALL();
1619 BQ_LOGV("setDequeueTimeout: %" PRId64, timeout);
1620
1621 std::lock_guard<std::mutex> lock(mCore->mMutex);
1622 bool dequeueBufferCannotBlock =
1623 timeout >= 0 ? false : mCore->mDequeueBufferCannotBlock;
1624 int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode, dequeueBufferCannotBlock,
1625 mCore->mMaxBufferCount) - mCore->getMaxBufferCountLocked();
1626 if (!mCore->adjustAvailableSlotsLocked(delta)) {
1627 BQ_LOGE("setDequeueTimeout: BufferQueue failed to adjust the number of "
1628 "available slots. Delta = %d", delta);
1629 return BAD_VALUE;
1630 }
1631
1632 mDequeueTimeout = timeout;
1633 mCore->mDequeueBufferCannotBlock = dequeueBufferCannotBlock;
1634 if (timeout > 0) {
1635 mCore->mQueueBufferCanDrop = false;
1636 }
1637
1638 VALIDATE_CONSISTENCY();
1639 return NO_ERROR;
1640 }
1641
setLegacyBufferDrop(bool drop)1642 status_t BufferQueueProducer::setLegacyBufferDrop(bool drop) {
1643 ATRACE_CALL();
1644 BQ_LOGV("setLegacyBufferDrop: drop = %d", drop);
1645
1646 std::lock_guard<std::mutex> lock(mCore->mMutex);
1647 mCore->mLegacyBufferDrop = drop;
1648 return NO_ERROR;
1649 }
1650
getLastQueuedBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence,float outTransformMatrix[16])1651 status_t BufferQueueProducer::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
1652 sp<Fence>* outFence, float outTransformMatrix[16]) {
1653 ATRACE_CALL();
1654 BQ_LOGV("getLastQueuedBuffer");
1655
1656 std::lock_guard<std::mutex> lock(mCore->mMutex);
1657 if (mCore->mLastQueuedSlot == BufferItem::INVALID_BUFFER_SLOT) {
1658 *outBuffer = nullptr;
1659 *outFence = Fence::NO_FENCE;
1660 return NO_ERROR;
1661 }
1662
1663 *outBuffer = mSlots[mCore->mLastQueuedSlot].mGraphicBuffer;
1664 *outFence = mLastQueueBufferFence;
1665
1666 // Currently only SurfaceFlinger internally ever changes
1667 // GLConsumer's filtering mode, so we just use 'true' here as
1668 // this is slightly specialized for the current client of this API,
1669 // which does want filtering.
1670 GLConsumer::computeTransformMatrix(outTransformMatrix,
1671 mSlots[mCore->mLastQueuedSlot].mGraphicBuffer, mLastQueuedCrop,
1672 mLastQueuedTransform, true /* filter */);
1673
1674 return NO_ERROR;
1675 }
1676
getLastQueuedBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence,Rect * outRect,uint32_t * outTransform)1677 status_t BufferQueueProducer::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer, sp<Fence>* outFence,
1678 Rect* outRect, uint32_t* outTransform) {
1679 ATRACE_CALL();
1680 BQ_LOGV("getLastQueuedBuffer");
1681
1682 std::lock_guard<std::mutex> lock(mCore->mMutex);
1683 if (mCore->mLastQueuedSlot == BufferItem::INVALID_BUFFER_SLOT) {
1684 *outBuffer = nullptr;
1685 *outFence = Fence::NO_FENCE;
1686 return NO_ERROR;
1687 }
1688
1689 *outBuffer = mSlots[mCore->mLastQueuedSlot].mGraphicBuffer;
1690 *outFence = mLastQueueBufferFence;
1691 *outRect = mLastQueuedCrop;
1692 *outTransform = mLastQueuedTransform;
1693
1694 return NO_ERROR;
1695 }
1696
getFrameTimestamps(FrameEventHistoryDelta * outDelta)1697 void BufferQueueProducer::getFrameTimestamps(FrameEventHistoryDelta* outDelta) {
1698 addAndGetFrameTimestamps(nullptr, outDelta);
1699 }
1700
addAndGetFrameTimestamps(const NewFrameEventsEntry * newTimestamps,FrameEventHistoryDelta * outDelta)1701 void BufferQueueProducer::addAndGetFrameTimestamps(
1702 const NewFrameEventsEntry* newTimestamps,
1703 FrameEventHistoryDelta* outDelta) {
1704 if (newTimestamps == nullptr && outDelta == nullptr) {
1705 return;
1706 }
1707
1708 ATRACE_CALL();
1709 BQ_LOGV("addAndGetFrameTimestamps");
1710 sp<IConsumerListener> listener;
1711 {
1712 std::lock_guard<std::mutex> lock(mCore->mMutex);
1713 listener = mCore->mConsumerListener;
1714 }
1715 if (listener != nullptr) {
1716 listener->addAndGetFrameTimestamps(newTimestamps, outDelta);
1717 }
1718 }
1719
binderDied(const wp<android::IBinder> &)1720 void BufferQueueProducer::binderDied(const wp<android::IBinder>& /* who */) {
1721 // If we're here, it means that a producer we were connected to died.
1722 // We're guaranteed that we are still connected to it because we remove
1723 // this callback upon disconnect. It's therefore safe to read mConnectedApi
1724 // without synchronization here.
1725 int api = mCore->mConnectedApi;
1726 disconnect(api);
1727 }
1728
getUniqueId(uint64_t * outId) const1729 status_t BufferQueueProducer::getUniqueId(uint64_t* outId) const {
1730 BQ_LOGV("getUniqueId");
1731
1732 *outId = mCore->mUniqueId;
1733 return NO_ERROR;
1734 }
1735
getConsumerUsage(uint64_t * outUsage) const1736 status_t BufferQueueProducer::getConsumerUsage(uint64_t* outUsage) const {
1737 BQ_LOGV("getConsumerUsage");
1738
1739 std::lock_guard<std::mutex> lock(mCore->mMutex);
1740 *outUsage = mCore->mConsumerUsageBits;
1741 return NO_ERROR;
1742 }
1743
setAutoPrerotation(bool autoPrerotation)1744 status_t BufferQueueProducer::setAutoPrerotation(bool autoPrerotation) {
1745 ATRACE_CALL();
1746 BQ_LOGV("setAutoPrerotation: %d", autoPrerotation);
1747
1748 std::lock_guard<std::mutex> lock(mCore->mMutex);
1749
1750 mCore->mAutoPrerotation = autoPrerotation;
1751 return NO_ERROR;
1752 }
1753
1754 } // namespace android
1755