1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Surface"
18 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
19 //#define LOG_NDEBUG 0
20
21 #include <gui/Surface.h>
22
23 #include <condition_variable>
24 #include <cstddef>
25 #include <cstdint>
26 #include <deque>
27 #include <mutex>
28 #include <thread>
29
30 #include <inttypes.h>
31
32 #include <android/gui/DisplayStatInfo.h>
33 #include <android/native_window.h>
34
35 #include <gui/FenceMonitor.h>
36 #include <gui/TraceUtils.h>
37 #include <utils/Log.h>
38 #include <utils/NativeHandle.h>
39 #include <utils/Trace.h>
40
41 #include <ui/BufferQueueDefs.h>
42 #include <ui/DynamicDisplayInfo.h>
43 #include <ui/Fence.h>
44 #include <ui/GraphicBuffer.h>
45 #include <ui/Region.h>
46
47 #include <gui/AidlUtil.h>
48 #include <gui/BufferItem.h>
49
50 #include <gui/ISurfaceComposer.h>
51 #include <gui/LayerState.h>
52 #include <private/gui/ComposerService.h>
53 #include <private/gui/ComposerServiceAIDL.h>
54
55 #include <com_android_graphics_libgui_flags.h>
56
57 namespace android {
58
59 using namespace com::android::graphics::libgui;
60 using gui::aidl_utils::statusTFromBinderStatus;
61 using ui::Dataspace;
62
63 namespace {
64
65 enum {
66 // moved from nativewindow/include/system/window.h, to be removed
67 NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT = 28,
68 NATIVE_WINDOW_GET_HDR_SUPPORT = 29,
69 };
70
isInterceptorRegistrationOp(int op)71 bool isInterceptorRegistrationOp(int op) {
72 return op == NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR ||
73 op == NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR ||
74 op == NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR ||
75 op == NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR ||
76 op == NATIVE_WINDOW_SET_QUERY_INTERCEPTOR;
77 }
78
79 } // namespace
80
81 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
ProducerDeathListenerProxy(wp<SurfaceListener> surfaceListener)82 Surface::ProducerDeathListenerProxy::ProducerDeathListenerProxy(wp<SurfaceListener> surfaceListener)
83 : mSurfaceListener(surfaceListener) {}
84
binderDied(const wp<IBinder> &)85 void Surface::ProducerDeathListenerProxy::binderDied(const wp<IBinder>&) {
86 sp<SurfaceListener> surfaceListener = mSurfaceListener.promote();
87 if (!surfaceListener) {
88 return;
89 }
90
91 if (surfaceListener->needsDeathNotify()) {
92 surfaceListener->onRemoteDied();
93 }
94 }
95 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
96
Surface(const sp<IGraphicBufferProducer> & bufferProducer,bool controlledByApp,const sp<IBinder> & surfaceControlHandle)97 Surface::Surface(const sp<IGraphicBufferProducer>& bufferProducer, bool controlledByApp,
98 const sp<IBinder>& surfaceControlHandle)
99 : mGraphicBufferProducer(bufferProducer),
100 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
101 mSurfaceDeathListener(nullptr),
102 #endif
103 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
104 mSlots(NUM_BUFFER_SLOTS),
105 #endif
106 mCrop(Rect::EMPTY_RECT),
107 mBufferAge(0),
108 mGenerationNumber(0),
109 mSharedBufferMode(false),
110 mAutoRefresh(false),
111 mAutoPrerotation(false),
112 mSharedBufferSlot(BufferItem::INVALID_BUFFER_SLOT),
113 mSharedBufferHasBeenQueued(false),
114 mQueriedSupportedTimestamps(false),
115 mFrameTimestampsSupportsPresent(false),
116 mEnableFrameTimestamps(false),
117 mFrameEventHistory(std::make_unique<ProducerFrameEventHistory>()) {
118 // Initialize the ANativeWindow function pointers.
119 ANativeWindow::setSwapInterval = hook_setSwapInterval;
120 ANativeWindow::dequeueBuffer = hook_dequeueBuffer;
121 ANativeWindow::cancelBuffer = hook_cancelBuffer;
122 ANativeWindow::queueBuffer = hook_queueBuffer;
123 ANativeWindow::query = hook_query;
124 ANativeWindow::perform = hook_perform;
125
126 ANativeWindow::dequeueBuffer_DEPRECATED = hook_dequeueBuffer_DEPRECATED;
127 ANativeWindow::cancelBuffer_DEPRECATED = hook_cancelBuffer_DEPRECATED;
128 ANativeWindow::lockBuffer_DEPRECATED = hook_lockBuffer_DEPRECATED;
129 ANativeWindow::queueBuffer_DEPRECATED = hook_queueBuffer_DEPRECATED;
130
131 const_cast<int&>(ANativeWindow::minSwapInterval) = 0;
132 const_cast<int&>(ANativeWindow::maxSwapInterval) = 1;
133
134 mReqWidth = 0;
135 mReqHeight = 0;
136 mReqFormat = 0;
137 mReqUsage = 0;
138 mTimestamp = NATIVE_WINDOW_TIMESTAMP_AUTO;
139 mDataSpace = Dataspace::UNKNOWN;
140 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
141 mTransform = 0;
142 mStickyTransform = 0;
143 mDefaultWidth = 0;
144 mDefaultHeight = 0;
145 mUserWidth = 0;
146 mUserHeight = 0;
147 mTransformHint = 0;
148 mConsumerRunningBehind = false;
149 mConnectedToCpu = false;
150 mProducerControlledByApp = controlledByApp;
151 mSwapIntervalZero = false;
152 mMaxBufferCount = NUM_BUFFER_SLOTS;
153 mSurfaceControlHandle = surfaceControlHandle;
154 }
155
~Surface()156 Surface::~Surface() {
157 if (mConnectedToCpu) {
158 Surface::disconnect(NATIVE_WINDOW_API_CPU);
159 }
160 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
161 if (mSurfaceDeathListener != nullptr) {
162 IInterface::asBinder(mGraphicBufferProducer)->unlinkToDeath(mSurfaceDeathListener);
163 mSurfaceDeathListener = nullptr;
164 }
165 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
166 }
167
composerService() const168 sp<ISurfaceComposer> Surface::composerService() const {
169 return ComposerService::getComposerService();
170 }
171
composerServiceAIDL() const172 sp<gui::ISurfaceComposer> Surface::composerServiceAIDL() const {
173 return ComposerServiceAIDL::getComposerService();
174 }
175
now() const176 nsecs_t Surface::now() const {
177 return systemTime();
178 }
179
getIGraphicBufferProducer() const180 sp<IGraphicBufferProducer> Surface::getIGraphicBufferProducer() const {
181 return mGraphicBufferProducer;
182 }
183
setSidebandStream(const sp<NativeHandle> & stream)184 void Surface::setSidebandStream(const sp<NativeHandle>& stream) {
185 mGraphicBufferProducer->setSidebandStream(stream);
186 }
187
allocateBuffers()188 void Surface::allocateBuffers() {
189 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
190 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
191 mGraphicBufferProducer->allocateBuffers(reqWidth, reqHeight,
192 mReqFormat, mReqUsage);
193 }
194
195 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
allowAllocation(bool allowAllocation)196 status_t Surface::allowAllocation(bool allowAllocation) {
197 return mGraphicBufferProducer->allowAllocation(allowAllocation);
198 }
199 #endif
200
setGenerationNumber(uint32_t generation)201 status_t Surface::setGenerationNumber(uint32_t generation) {
202 status_t result = mGraphicBufferProducer->setGenerationNumber(generation);
203 if (result == NO_ERROR) {
204 mGenerationNumber = generation;
205 }
206 return result;
207 }
208
getNextFrameNumber() const209 uint64_t Surface::getNextFrameNumber() const {
210 Mutex::Autolock lock(mMutex);
211 return mNextFrameNumber;
212 }
213
getConsumerName() const214 String8 Surface::getConsumerName() const {
215 return mGraphicBufferProducer->getConsumerName();
216 }
217
setDequeueTimeout(nsecs_t timeout)218 status_t Surface::setDequeueTimeout(nsecs_t timeout) {
219 return mGraphicBufferProducer->setDequeueTimeout(timeout);
220 }
221
getLastQueuedBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence,float outTransformMatrix[16])222 status_t Surface::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
223 sp<Fence>* outFence, float outTransformMatrix[16]) {
224 return mGraphicBufferProducer->getLastQueuedBuffer(outBuffer, outFence,
225 outTransformMatrix);
226 }
227
getDisplayRefreshCycleDuration(nsecs_t * outRefreshDuration)228 status_t Surface::getDisplayRefreshCycleDuration(nsecs_t* outRefreshDuration) {
229 ATRACE_CALL();
230
231 gui::DisplayStatInfo stats;
232 binder::Status status = composerServiceAIDL()->getDisplayStats(nullptr, &stats);
233 if (!status.isOk()) {
234 return statusTFromBinderStatus(status);
235 }
236
237 *outRefreshDuration = stats.vsyncPeriod;
238
239 return NO_ERROR;
240 }
241
enableFrameTimestamps(bool enable)242 void Surface::enableFrameTimestamps(bool enable) {
243 Mutex::Autolock lock(mMutex);
244 // If going from disabled to enabled, get the initial values for
245 // compositor and display timing.
246 if (!mEnableFrameTimestamps && enable) {
247 FrameEventHistoryDelta delta;
248 mGraphicBufferProducer->getFrameTimestamps(&delta);
249 mFrameEventHistory->applyDelta(delta);
250 }
251 mEnableFrameTimestamps = enable;
252 }
253
getCompositorTiming(nsecs_t * compositeDeadline,nsecs_t * compositeInterval,nsecs_t * compositeToPresentLatency)254 status_t Surface::getCompositorTiming(
255 nsecs_t* compositeDeadline, nsecs_t* compositeInterval,
256 nsecs_t* compositeToPresentLatency) {
257 Mutex::Autolock lock(mMutex);
258 if (!mEnableFrameTimestamps) {
259 return INVALID_OPERATION;
260 }
261
262 if (compositeDeadline != nullptr) {
263 *compositeDeadline =
264 mFrameEventHistory->getNextCompositeDeadline(now());
265 }
266 if (compositeInterval != nullptr) {
267 *compositeInterval = mFrameEventHistory->getCompositeInterval();
268 }
269 if (compositeToPresentLatency != nullptr) {
270 *compositeToPresentLatency =
271 mFrameEventHistory->getCompositeToPresentLatency();
272 }
273 return NO_ERROR;
274 }
275
checkConsumerForUpdates(const FrameEvents * e,const uint64_t lastFrameNumber,const nsecs_t * outLatchTime,const nsecs_t * outFirstRefreshStartTime,const nsecs_t * outLastRefreshStartTime,const nsecs_t * outGpuCompositionDoneTime,const nsecs_t * outDisplayPresentTime,const nsecs_t * outDequeueReadyTime,const nsecs_t * outReleaseTime)276 static bool checkConsumerForUpdates(
277 const FrameEvents* e, const uint64_t lastFrameNumber,
278 const nsecs_t* outLatchTime,
279 const nsecs_t* outFirstRefreshStartTime,
280 const nsecs_t* outLastRefreshStartTime,
281 const nsecs_t* outGpuCompositionDoneTime,
282 const nsecs_t* outDisplayPresentTime,
283 const nsecs_t* outDequeueReadyTime,
284 const nsecs_t* outReleaseTime) {
285 bool checkForLatch = (outLatchTime != nullptr) && !e->hasLatchInfo();
286 bool checkForFirstRefreshStart = (outFirstRefreshStartTime != nullptr) &&
287 !e->hasFirstRefreshStartInfo();
288 bool checkForGpuCompositionDone = (outGpuCompositionDoneTime != nullptr) &&
289 !e->hasGpuCompositionDoneInfo();
290 bool checkForDisplayPresent = (outDisplayPresentTime != nullptr) &&
291 !e->hasDisplayPresentInfo();
292
293 // LastRefreshStart, DequeueReady, and Release are never available for the
294 // last frame.
295 bool checkForLastRefreshStart = (outLastRefreshStartTime != nullptr) &&
296 !e->hasLastRefreshStartInfo() &&
297 (e->frameNumber != lastFrameNumber);
298 bool checkForDequeueReady = (outDequeueReadyTime != nullptr) &&
299 !e->hasDequeueReadyInfo() && (e->frameNumber != lastFrameNumber);
300 bool checkForRelease = (outReleaseTime != nullptr) &&
301 !e->hasReleaseInfo() && (e->frameNumber != lastFrameNumber);
302
303 // RequestedPresent and Acquire info are always available producer-side.
304 return checkForLatch || checkForFirstRefreshStart ||
305 checkForLastRefreshStart || checkForGpuCompositionDone ||
306 checkForDisplayPresent || checkForDequeueReady || checkForRelease;
307 }
308
getFrameTimestamp(nsecs_t * dst,const nsecs_t & src)309 static void getFrameTimestamp(nsecs_t *dst, const nsecs_t& src) {
310 if (dst != nullptr) {
311 // We always get valid timestamps for these eventually.
312 *dst = (src == FrameEvents::TIMESTAMP_PENDING) ?
313 NATIVE_WINDOW_TIMESTAMP_PENDING : src;
314 }
315 }
316
getFrameTimestampFence(nsecs_t * dst,const std::shared_ptr<FenceTime> & src,bool fenceShouldBeKnown)317 static void getFrameTimestampFence(nsecs_t *dst,
318 const std::shared_ptr<FenceTime>& src, bool fenceShouldBeKnown) {
319 if (dst != nullptr) {
320 if (!fenceShouldBeKnown) {
321 *dst = NATIVE_WINDOW_TIMESTAMP_PENDING;
322 return;
323 }
324
325 nsecs_t signalTime = src->getSignalTime();
326 *dst = (signalTime == Fence::SIGNAL_TIME_PENDING) ?
327 NATIVE_WINDOW_TIMESTAMP_PENDING :
328 (signalTime == Fence::SIGNAL_TIME_INVALID) ?
329 NATIVE_WINDOW_TIMESTAMP_INVALID :
330 signalTime;
331 }
332 }
333
getFrameTimestamps(uint64_t frameNumber,nsecs_t * outRequestedPresentTime,nsecs_t * outAcquireTime,nsecs_t * outLatchTime,nsecs_t * outFirstRefreshStartTime,nsecs_t * outLastRefreshStartTime,nsecs_t * outGpuCompositionDoneTime,nsecs_t * outDisplayPresentTime,nsecs_t * outDequeueReadyTime,nsecs_t * outReleaseTime)334 status_t Surface::getFrameTimestamps(uint64_t frameNumber,
335 nsecs_t* outRequestedPresentTime, nsecs_t* outAcquireTime,
336 nsecs_t* outLatchTime, nsecs_t* outFirstRefreshStartTime,
337 nsecs_t* outLastRefreshStartTime, nsecs_t* outGpuCompositionDoneTime,
338 nsecs_t* outDisplayPresentTime, nsecs_t* outDequeueReadyTime,
339 nsecs_t* outReleaseTime) {
340 ATRACE_CALL();
341
342 Mutex::Autolock lock(mMutex);
343
344 if (!mEnableFrameTimestamps) {
345 return INVALID_OPERATION;
346 }
347
348 // Verify the requested timestamps are supported.
349 querySupportedTimestampsLocked();
350 if (outDisplayPresentTime != nullptr && !mFrameTimestampsSupportsPresent) {
351 return BAD_VALUE;
352 }
353
354 FrameEvents* events = mFrameEventHistory->getFrame(frameNumber);
355 if (events == nullptr) {
356 // If the entry isn't available in the producer, it's definitely not
357 // available in the consumer.
358 return NAME_NOT_FOUND;
359 }
360
361 // Update our cache of events if the requested events are not available.
362 if (checkConsumerForUpdates(events, mLastFrameNumber,
363 outLatchTime, outFirstRefreshStartTime, outLastRefreshStartTime,
364 outGpuCompositionDoneTime, outDisplayPresentTime,
365 outDequeueReadyTime, outReleaseTime)) {
366 FrameEventHistoryDelta delta;
367 mGraphicBufferProducer->getFrameTimestamps(&delta);
368 mFrameEventHistory->applyDelta(delta);
369 events = mFrameEventHistory->getFrame(frameNumber);
370 }
371
372 if (events == nullptr) {
373 // The entry was available before the update, but was overwritten
374 // after the update. Make sure not to send the wrong frame's data.
375 return NAME_NOT_FOUND;
376 }
377
378 getFrameTimestamp(outRequestedPresentTime, events->requestedPresentTime);
379 getFrameTimestamp(outLatchTime, events->latchTime);
380
381 nsecs_t firstRefreshStartTime = NATIVE_WINDOW_TIMESTAMP_INVALID;
382 getFrameTimestamp(&firstRefreshStartTime, events->firstRefreshStartTime);
383 if (outFirstRefreshStartTime) {
384 *outFirstRefreshStartTime = firstRefreshStartTime;
385 }
386
387 getFrameTimestamp(outLastRefreshStartTime, events->lastRefreshStartTime);
388 getFrameTimestamp(outDequeueReadyTime, events->dequeueReadyTime);
389
390 nsecs_t acquireTime = NATIVE_WINDOW_TIMESTAMP_INVALID;
391 getFrameTimestampFence(&acquireTime, events->acquireFence,
392 events->hasAcquireInfo());
393 if (outAcquireTime != nullptr) {
394 *outAcquireTime = acquireTime;
395 }
396
397 getFrameTimestampFence(outGpuCompositionDoneTime,
398 events->gpuCompositionDoneFence,
399 events->hasGpuCompositionDoneInfo());
400 getFrameTimestampFence(outDisplayPresentTime, events->displayPresentFence,
401 events->hasDisplayPresentInfo());
402 getFrameTimestampFence(outReleaseTime, events->releaseFence,
403 events->hasReleaseInfo());
404
405 // Fix up the GPU completion fence at this layer -- eglGetFrameTimestampsANDROID() expects
406 // that EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID > EGL_RENDERING_COMPLETE_TIME_ANDROID.
407 // This is typically true, but SurfaceFlinger may opt to cache prior GPU composition results,
408 // which breaks that assumption, so zero out GPU composition time.
409 if (outGpuCompositionDoneTime != nullptr
410 && *outGpuCompositionDoneTime > 0 && (acquireTime > 0 || firstRefreshStartTime > 0)
411 && *outGpuCompositionDoneTime <= std::max(acquireTime, firstRefreshStartTime)) {
412 *outGpuCompositionDoneTime = 0;
413 }
414
415 return NO_ERROR;
416 }
417
418 // Deprecated(b/242763577): to be removed, this method should not be used
419 // The reason this method still exists here is to support compiled vndk
420 // Surface support should not be tied to the display
421 // Return true since most displays should have this support
getWideColorSupport(bool * supported)422 status_t Surface::getWideColorSupport(bool* supported) {
423 ATRACE_CALL();
424
425 *supported = true;
426 return NO_ERROR;
427 }
428
429 // Deprecated(b/242763577): to be removed, this method should not be used
430 // The reason this method still exists here is to support compiled vndk
431 // Surface support should not be tied to the display
432 // Return true since most displays should have this support
getHdrSupport(bool * supported)433 status_t Surface::getHdrSupport(bool* supported) {
434 ATRACE_CALL();
435
436 *supported = true;
437 return NO_ERROR;
438 }
439
hook_setSwapInterval(ANativeWindow * window,int interval)440 int Surface::hook_setSwapInterval(ANativeWindow* window, int interval) {
441 Surface* c = getSelf(window);
442 return c->setSwapInterval(interval);
443 }
444
hook_dequeueBuffer(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)445 int Surface::hook_dequeueBuffer(ANativeWindow* window,
446 ANativeWindowBuffer** buffer, int* fenceFd) {
447 Surface* c = getSelf(window);
448 {
449 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
450 if (c->mDequeueInterceptor != nullptr) {
451 auto interceptor = c->mDequeueInterceptor;
452 auto data = c->mDequeueInterceptorData;
453 return interceptor(window, Surface::dequeueBufferInternal, data, buffer, fenceFd);
454 }
455 }
456 return c->dequeueBuffer(buffer, fenceFd);
457 }
458
dequeueBufferInternal(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)459 int Surface::dequeueBufferInternal(ANativeWindow* window, ANativeWindowBuffer** buffer,
460 int* fenceFd) {
461 Surface* c = getSelf(window);
462 return c->dequeueBuffer(buffer, fenceFd);
463 }
464
hook_cancelBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)465 int Surface::hook_cancelBuffer(ANativeWindow* window,
466 ANativeWindowBuffer* buffer, int fenceFd) {
467 Surface* c = getSelf(window);
468 {
469 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
470 if (c->mCancelInterceptor != nullptr) {
471 auto interceptor = c->mCancelInterceptor;
472 auto data = c->mCancelInterceptorData;
473 return interceptor(window, Surface::cancelBufferInternal, data, buffer, fenceFd);
474 }
475 }
476 return c->cancelBuffer(buffer, fenceFd);
477 }
478
cancelBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)479 int Surface::cancelBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
480 Surface* c = getSelf(window);
481 return c->cancelBuffer(buffer, fenceFd);
482 }
483
hook_queueBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)484 int Surface::hook_queueBuffer(ANativeWindow* window,
485 ANativeWindowBuffer* buffer, int fenceFd) {
486 Surface* c = getSelf(window);
487 {
488 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
489 if (c->mQueueInterceptor != nullptr) {
490 auto interceptor = c->mQueueInterceptor;
491 auto data = c->mQueueInterceptorData;
492 return interceptor(window, Surface::queueBufferInternal, data, buffer, fenceFd);
493 }
494 }
495 return c->queueBuffer(buffer, fenceFd);
496 }
497
queueBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)498 int Surface::queueBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
499 Surface* c = getSelf(window);
500 return c->queueBuffer(buffer, fenceFd);
501 }
502
hook_dequeueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer ** buffer)503 int Surface::hook_dequeueBuffer_DEPRECATED(ANativeWindow* window,
504 ANativeWindowBuffer** buffer) {
505 Surface* c = getSelf(window);
506 ANativeWindowBuffer* buf;
507 int fenceFd = -1;
508 int result = c->dequeueBuffer(&buf, &fenceFd);
509 if (result != OK) {
510 return result;
511 }
512 sp<Fence> fence = sp<Fence>::make(fenceFd);
513 int waitResult = fence->waitForever("dequeueBuffer_DEPRECATED");
514 if (waitResult != OK) {
515 ALOGE("dequeueBuffer_DEPRECATED: Fence::wait returned an error: %d",
516 waitResult);
517 c->cancelBuffer(buf, -1);
518 return waitResult;
519 }
520 *buffer = buf;
521 return result;
522 }
523
hook_cancelBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)524 int Surface::hook_cancelBuffer_DEPRECATED(ANativeWindow* window,
525 ANativeWindowBuffer* buffer) {
526 Surface* c = getSelf(window);
527 return c->cancelBuffer(buffer, -1);
528 }
529
hook_lockBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)530 int Surface::hook_lockBuffer_DEPRECATED(ANativeWindow* window,
531 ANativeWindowBuffer* buffer) {
532 Surface* c = getSelf(window);
533 return c->lockBuffer_DEPRECATED(buffer);
534 }
535
hook_queueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)536 int Surface::hook_queueBuffer_DEPRECATED(ANativeWindow* window,
537 ANativeWindowBuffer* buffer) {
538 Surface* c = getSelf(window);
539 return c->queueBuffer(buffer, -1);
540 }
541
hook_perform(ANativeWindow * window,int operation,...)542 int Surface::hook_perform(ANativeWindow* window, int operation, ...) {
543 va_list args;
544 va_start(args, operation);
545 Surface* c = getSelf(window);
546 int result;
547 // Don't acquire shared ownership of the interceptor mutex if we're going to
548 // do interceptor registration, as otherwise we'll deadlock on acquiring
549 // exclusive ownership.
550 if (!isInterceptorRegistrationOp(operation)) {
551 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
552 if (c->mPerformInterceptor != nullptr) {
553 result = c->mPerformInterceptor(window, Surface::performInternal,
554 c->mPerformInterceptorData, operation, args);
555 va_end(args);
556 return result;
557 }
558 }
559 result = c->perform(operation, args);
560 va_end(args);
561 return result;
562 }
563
performInternal(ANativeWindow * window,int operation,va_list args)564 int Surface::performInternal(ANativeWindow* window, int operation, va_list args) {
565 Surface* c = getSelf(window);
566 return c->perform(operation, args);
567 }
568
hook_query(const ANativeWindow * window,int what,int * value)569 int Surface::hook_query(const ANativeWindow* window, int what, int* value) {
570 const Surface* c = getSelf(window);
571 {
572 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
573 if (c->mQueryInterceptor != nullptr) {
574 auto interceptor = c->mQueryInterceptor;
575 auto data = c->mQueryInterceptorData;
576 return interceptor(window, Surface::queryInternal, data, what, value);
577 }
578 }
579 return c->query(what, value);
580 }
581
queryInternal(const ANativeWindow * window,int what,int * value)582 int Surface::queryInternal(const ANativeWindow* window, int what, int* value) {
583 const Surface* c = getSelf(window);
584 return c->query(what, value);
585 }
586
setSwapInterval(int interval)587 int Surface::setSwapInterval(int interval) {
588 ATRACE_CALL();
589 // EGL specification states:
590 // interval is silently clamped to minimum and maximum implementation
591 // dependent values before being stored.
592
593 if (interval < minSwapInterval)
594 interval = minSwapInterval;
595
596 if (interval > maxSwapInterval)
597 interval = maxSwapInterval;
598
599 const bool wasSwapIntervalZero = mSwapIntervalZero;
600 mSwapIntervalZero = (interval == 0);
601
602 if (mSwapIntervalZero != wasSwapIntervalZero) {
603 mGraphicBufferProducer->setAsyncMode(mSwapIntervalZero);
604 }
605
606 return NO_ERROR;
607 }
608
getDequeueBufferInputLocked(IGraphicBufferProducer::DequeueBufferInput * dequeueInput)609 void Surface::getDequeueBufferInputLocked(
610 IGraphicBufferProducer::DequeueBufferInput* dequeueInput) {
611 LOG_ALWAYS_FATAL_IF(dequeueInput == nullptr, "input is null");
612
613 dequeueInput->width = mReqWidth ? mReqWidth : mUserWidth;
614 dequeueInput->height = mReqHeight ? mReqHeight : mUserHeight;
615
616 dequeueInput->format = mReqFormat;
617 dequeueInput->usage = mReqUsage;
618
619 dequeueInput->getTimestamps = mEnableFrameTimestamps;
620 }
621
dequeueBuffer(android_native_buffer_t ** buffer,int * fenceFd)622 int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
623 ATRACE_FORMAT("dequeueBuffer - %s", getDebugName());
624 ALOGV("Surface::dequeueBuffer");
625
626 IGraphicBufferProducer::DequeueBufferInput dqInput;
627 {
628 Mutex::Autolock lock(mMutex);
629 if (mReportRemovedBuffers) {
630 mRemovedBuffers.clear();
631 }
632
633 getDequeueBufferInputLocked(&dqInput);
634
635 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot !=
636 BufferItem::INVALID_BUFFER_SLOT) {
637 sp<GraphicBuffer>& gbuf(mSlots[mSharedBufferSlot].buffer);
638 if (gbuf != nullptr) {
639 *buffer = gbuf.get();
640 *fenceFd = -1;
641 return OK;
642 }
643 }
644 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffer
645
646 int buf = -1;
647 sp<Fence> fence;
648 nsecs_t startTime = systemTime();
649
650 FrameEventHistoryDelta frameTimestamps;
651 status_t result = mGraphicBufferProducer->dequeueBuffer(&buf, &fence, dqInput.width,
652 dqInput.height, dqInput.format,
653 dqInput.usage, &mBufferAge,
654 dqInput.getTimestamps ?
655 &frameTimestamps : nullptr);
656 mLastDequeueDuration = systemTime() - startTime;
657
658 if (result < 0) {
659 ALOGV("dequeueBuffer: IGraphicBufferProducer::dequeueBuffer"
660 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
661 dqInput.width, dqInput.height, dqInput.format, dqInput.usage, result);
662 return result;
663 }
664
665 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
666 if (buf < 0 || buf >= (int)mSlots.size()) {
667 #else
668 if (buf < 0 || buf >= NUM_BUFFER_SLOTS) {
669 #endif
670 ALOGE("dequeueBuffer: IGraphicBufferProducer returned invalid slot number %d", buf);
671 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
672 return FAILED_TRANSACTION;
673 }
674
675 Mutex::Autolock lock(mMutex);
676
677 // Write this while holding the mutex
678 mLastDequeueStartTime = startTime;
679
680 sp<GraphicBuffer>& gbuf(mSlots[buf].buffer);
681
682 // this should never happen
683 ALOGE_IF(fence == nullptr, "Surface::dequeueBuffer: received null Fence! buf=%d", buf);
684
685 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
686 static gui::FenceMonitor hwcReleaseThread("HWC release");
687 hwcReleaseThread.queueFence(fence);
688 }
689
690 if (result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
691 freeAllBuffers();
692 }
693
694 if (dqInput.getTimestamps) {
695 mFrameEventHistory->applyDelta(frameTimestamps);
696 }
697
698 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
699 if (mReportRemovedBuffers && (gbuf != nullptr)) {
700 mRemovedBuffers.push_back(gbuf);
701 }
702 result = mGraphicBufferProducer->requestBuffer(buf, &gbuf);
703 if (result != NO_ERROR) {
704 ALOGE("dequeueBuffer: IGraphicBufferProducer::requestBuffer failed: %d", result);
705 mGraphicBufferProducer->cancelBuffer(buf, fence);
706 return result;
707 }
708 }
709
710 if (fence->isValid()) {
711 *fenceFd = fence->dup();
712 if (*fenceFd == -1) {
713 ALOGE("dequeueBuffer: error duping fence: %d", errno);
714 // dup() should never fail; something is badly wrong. Soldier on
715 // and hope for the best; the worst that should happen is some
716 // visible corruption that lasts until the next frame.
717 }
718 } else {
719 *fenceFd = -1;
720 }
721
722 *buffer = gbuf.get();
723
724 if (mSharedBufferMode && mAutoRefresh) {
725 mSharedBufferSlot = buf;
726 mSharedBufferHasBeenQueued = false;
727 } else if (mSharedBufferSlot == buf) {
728 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
729 mSharedBufferHasBeenQueued = false;
730 }
731
732 mDequeuedSlots.insert(buf);
733
734 return OK;
735 }
736
737 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
738
739 status_t Surface::dequeueBuffer(sp<GraphicBuffer>* buffer, sp<Fence>* outFence) {
740 if (buffer == nullptr || outFence == nullptr) {
741 return BAD_VALUE;
742 }
743
744 android_native_buffer_t* anb;
745 int fd = -1;
746 status_t res = dequeueBuffer(&anb, &fd);
747 *buffer = GraphicBuffer::from(anb);
748 *outFence = sp<Fence>::make(fd);
749 return res;
750 }
751
752 status_t Surface::queueBuffer(const sp<GraphicBuffer>& buffer, const sp<Fence>& fd,
753 SurfaceQueueBufferOutput* output) {
754 if (buffer == nullptr) {
755 return BAD_VALUE;
756 }
757 return queueBuffer(buffer.get(), fd ? fd->get() : -1, output);
758 }
759
760 status_t Surface::detachBuffer(const sp<GraphicBuffer>& buffer) {
761 if (nullptr == buffer) {
762 return BAD_VALUE;
763 }
764
765 Mutex::Autolock lock(mMutex);
766
767 uint64_t bufferId = buffer->getId();
768 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
769 for (int slot = 0; slot < (int)mSlots.size(); ++slot) {
770 #else
771 for (int slot = 0; slot < Surface::NUM_BUFFER_SLOTS; ++slot) {
772 #endif
773 auto& bufferSlot = mSlots[slot];
774 if (bufferSlot.buffer != nullptr && bufferSlot.buffer->getId() == bufferId) {
775 bufferSlot.buffer = nullptr;
776 bufferSlot.dirtyRegion = Region::INVALID_REGION;
777 return mGraphicBufferProducer->detachBuffer(slot);
778 }
779 }
780
781 return BAD_VALUE;
782 }
783
784 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
785
786 int Surface::dequeueBuffers(std::vector<BatchBuffer>* buffers) {
787 using DequeueBufferInput = IGraphicBufferProducer::DequeueBufferInput;
788 using DequeueBufferOutput = IGraphicBufferProducer::DequeueBufferOutput;
789 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
790 using RequestBufferOutput = IGraphicBufferProducer::RequestBufferOutput;
791
792 ATRACE_CALL();
793 ALOGV("Surface::dequeueBuffers");
794
795 if (buffers->size() == 0) {
796 ALOGE("%s: must dequeue at least 1 buffer!", __FUNCTION__);
797 return BAD_VALUE;
798 }
799
800 if (mSharedBufferMode) {
801 ALOGE("%s: batch operation is not supported in shared buffer mode!",
802 __FUNCTION__);
803 return INVALID_OPERATION;
804 }
805
806 size_t numBufferRequested = buffers->size();
807 DequeueBufferInput input;
808
809 {
810 Mutex::Autolock lock(mMutex);
811 if (mReportRemovedBuffers) {
812 mRemovedBuffers.clear();
813 }
814
815 getDequeueBufferInputLocked(&input);
816 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffers
817
818 std::vector<DequeueBufferInput> dequeueInput(numBufferRequested, input);
819 std::vector<DequeueBufferOutput> dequeueOutput;
820
821 nsecs_t startTime = systemTime();
822
823 status_t result = mGraphicBufferProducer->dequeueBuffers(dequeueInput, &dequeueOutput);
824
825 mLastDequeueDuration = systemTime() - startTime;
826
827 if (result < 0) {
828 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
829 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
830 __FUNCTION__, input.width, input.height, input.format, input.usage, result);
831 return result;
832 }
833
834 std::vector<CancelBufferInput> cancelBufferInputs;
835 cancelBufferInputs.reserve(numBufferRequested);
836 std::vector<status_t> cancelBufferOutputs;
837 for (size_t i = 0; i < numBufferRequested; i++) {
838 if (dequeueOutput[i].result >= 0) {
839 CancelBufferInput& input = cancelBufferInputs.emplace_back();
840 input.slot = dequeueOutput[i].slot;
841 input.fence = dequeueOutput[i].fence;
842 }
843 }
844
845 for (const auto& output : dequeueOutput) {
846 if (output.result < 0) {
847 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
848 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
849 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
850 __FUNCTION__, input.width, input.height, input.format, input.usage,
851 output.result);
852 return output.result;
853 }
854
855 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
856 if (output.slot < 0 || output.slot >= (int)mSlots.size()) {
857 #else
858 if (output.slot < 0 || output.slot >= NUM_BUFFER_SLOTS) {
859 #endif
860 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
861 ALOGE("%s: IGraphicBufferProducer returned invalid slot number %d",
862 __FUNCTION__, output.slot);
863 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
864 return FAILED_TRANSACTION;
865 }
866
867 if (input.getTimestamps && !output.timestamps.has_value()) {
868 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
869 ALOGE("%s: no frame timestamp returns!", __FUNCTION__);
870 return FAILED_TRANSACTION;
871 }
872
873 // this should never happen
874 ALOGE_IF(output.fence == nullptr,
875 "%s: received null Fence! slot=%d", __FUNCTION__, output.slot);
876 }
877
878 Mutex::Autolock lock(mMutex);
879
880 // Write this while holding the mutex
881 mLastDequeueStartTime = startTime;
882
883 std::vector<int32_t> requestBufferSlots;
884 requestBufferSlots.reserve(numBufferRequested);
885 // handle release all buffers and request buffers
886 for (const auto& output : dequeueOutput) {
887 if (output.result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
888 ALOGV("%s: RELEASE_ALL_BUFFERS during batch operation", __FUNCTION__);
889 freeAllBuffers();
890 break;
891 }
892 }
893
894 for (const auto& output : dequeueOutput) {
895 // Collect slots that needs requesting buffer
896 sp<GraphicBuffer>& gbuf(mSlots[output.slot].buffer);
897 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
898 if (mReportRemovedBuffers && (gbuf != nullptr)) {
899 mRemovedBuffers.push_back(gbuf);
900 }
901 requestBufferSlots.push_back(output.slot);
902 }
903 }
904
905 // Batch request Buffer
906 std::vector<RequestBufferOutput> reqBufferOutput;
907 if (requestBufferSlots.size() > 0) {
908 result = mGraphicBufferProducer->requestBuffers(requestBufferSlots, &reqBufferOutput);
909 if (result != NO_ERROR) {
910 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed: %d",
911 __FUNCTION__, result);
912 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
913 return result;
914 }
915
916 // Check if we have any single failure
917 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
918 if (reqBufferOutput[i].result != OK) {
919 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed at %zu-th buffer, slot %d",
920 __FUNCTION__, i, requestBufferSlots[i]);
921 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
922 return reqBufferOutput[i].result;
923 }
924 }
925
926 // Fill request buffer results to mSlots
927 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
928 mSlots[requestBufferSlots[i]].buffer = reqBufferOutput[i].buffer;
929 }
930 }
931
932 for (size_t batchIdx = 0; batchIdx < numBufferRequested; batchIdx++) {
933 const auto& output = dequeueOutput[batchIdx];
934 int slot = output.slot;
935 sp<GraphicBuffer>& gbuf(mSlots[slot].buffer);
936
937 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
938 static gui::FenceMonitor hwcReleaseThread("HWC release");
939 hwcReleaseThread.queueFence(output.fence);
940 }
941
942 if (input.getTimestamps) {
943 mFrameEventHistory->applyDelta(output.timestamps.value());
944 }
945
946 if (output.fence->isValid()) {
947 buffers->at(batchIdx).fenceFd = output.fence->dup();
948 if (buffers->at(batchIdx).fenceFd == -1) {
949 ALOGE("%s: error duping fence: %d", __FUNCTION__, errno);
950 // dup() should never fail; something is badly wrong. Soldier on
951 // and hope for the best; the worst that should happen is some
952 // visible corruption that lasts until the next frame.
953 }
954 } else {
955 buffers->at(batchIdx).fenceFd = -1;
956 }
957
958 buffers->at(batchIdx).buffer = gbuf.get();
959 mDequeuedSlots.insert(slot);
960 }
961 return OK;
962 }
963
964 int Surface::cancelBuffer(android_native_buffer_t* buffer,
965 int fenceFd) {
966 ATRACE_CALL();
967 ALOGV("Surface::cancelBuffer");
968 Mutex::Autolock lock(mMutex);
969 int i = getSlotFromBufferLocked(buffer);
970 if (i < 0) {
971 if (fenceFd >= 0) {
972 close(fenceFd);
973 }
974 return i;
975 }
976 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
977 if (fenceFd >= 0) {
978 close(fenceFd);
979 }
980 return OK;
981 }
982 sp<Fence> fence(fenceFd >= 0 ? sp<Fence>::make(fenceFd) : Fence::NO_FENCE);
983 mGraphicBufferProducer->cancelBuffer(i, fence);
984
985 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == i) {
986 mSharedBufferHasBeenQueued = true;
987 }
988
989 mDequeuedSlots.erase(i);
990
991 return OK;
992 }
993
994 int Surface::cancelBuffers(const std::vector<BatchBuffer>& buffers) {
995 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
996 ATRACE_CALL();
997 ALOGV("Surface::cancelBuffers");
998
999 if (mSharedBufferMode) {
1000 ALOGE("%s: batch operation is not supported in shared buffer mode!",
1001 __FUNCTION__);
1002 return INVALID_OPERATION;
1003 }
1004
1005 size_t numBuffers = buffers.size();
1006 std::vector<CancelBufferInput> cancelBufferInputs(numBuffers);
1007 std::vector<status_t> cancelBufferOutputs;
1008 size_t numBuffersCancelled = 0;
1009 int badSlotResult = 0;
1010 for (size_t i = 0; i < numBuffers; i++) {
1011 int slot = getSlotFromBufferLocked(buffers[i].buffer);
1012 int fenceFd = buffers[i].fenceFd;
1013 if (slot < 0) {
1014 if (fenceFd >= 0) {
1015 close(fenceFd);
1016 }
1017 ALOGE("%s: cannot find slot number for cancelled buffer", __FUNCTION__);
1018 badSlotResult = slot;
1019 } else {
1020 sp<Fence> fence(fenceFd >= 0 ? sp<Fence>::make(fenceFd) : Fence::NO_FENCE);
1021 cancelBufferInputs[numBuffersCancelled].slot = slot;
1022 cancelBufferInputs[numBuffersCancelled++].fence = fence;
1023 }
1024 }
1025 cancelBufferInputs.resize(numBuffersCancelled);
1026 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
1027
1028
1029 for (size_t i = 0; i < numBuffersCancelled; i++) {
1030 mDequeuedSlots.erase(cancelBufferInputs[i].slot);
1031 }
1032
1033 if (badSlotResult != 0) {
1034 return badSlotResult;
1035 }
1036 return OK;
1037 }
1038
1039 int Surface::getSlotFromBufferLocked(
1040 android_native_buffer_t* buffer) const {
1041 if (buffer == nullptr) {
1042 ALOGE("%s: input buffer is null!", __FUNCTION__);
1043 return BAD_VALUE;
1044 }
1045
1046 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
1047 for (int i = 0; i < (int)mSlots.size(); i++) {
1048 #else
1049 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
1050 #endif
1051 if (mSlots[i].buffer != nullptr &&
1052 mSlots[i].buffer->handle == buffer->handle) {
1053 return i;
1054 }
1055 }
1056 ALOGE("%s: unknown buffer: %p", __FUNCTION__, buffer->handle);
1057 return BAD_VALUE;
1058 }
1059
1060 int Surface::lockBuffer_DEPRECATED(android_native_buffer_t* buffer __attribute__((unused))) {
1061 ALOGV("Surface::lockBuffer");
1062 Mutex::Autolock lock(mMutex);
1063 return OK;
1064 }
1065
1066 void Surface::getQueueBufferInputLocked(android_native_buffer_t* buffer, int fenceFd,
1067 nsecs_t timestamp, IGraphicBufferProducer::QueueBufferInput* out) {
1068 bool isAutoTimestamp = false;
1069
1070 if (timestamp == NATIVE_WINDOW_TIMESTAMP_AUTO) {
1071 timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
1072 isAutoTimestamp = true;
1073 ALOGV("Surface::queueBuffer making up timestamp: %.2f ms",
1074 timestamp / 1000000.0);
1075 }
1076
1077 // Make sure the crop rectangle is entirely inside the buffer.
1078 Rect crop(Rect::EMPTY_RECT);
1079 mCrop.intersect(Rect(buffer->width, buffer->height), &crop);
1080
1081 sp<Fence> fence(fenceFd >= 0 ? sp<Fence>::make(fenceFd) : Fence::NO_FENCE);
1082 IGraphicBufferProducer::QueueBufferInput input(timestamp, isAutoTimestamp,
1083 static_cast<android_dataspace>(mDataSpace), crop, mScalingMode,
1084 mTransform ^ mStickyTransform, fence, mStickyTransform,
1085 mEnableFrameTimestamps);
1086
1087 // we should send HDR metadata as needed if this becomes a bottleneck
1088 input.setHdrMetadata(mHdrMetadata);
1089
1090 if (mConnectedToCpu || mDirtyRegion.bounds() == Rect::INVALID_RECT) {
1091 input.setSurfaceDamage(Region::INVALID_REGION);
1092 } else {
1093 // Here we do two things:
1094 // 1) The surface damage was specified using the OpenGL ES convention of
1095 // the origin being in the bottom-left corner. Here we flip to the
1096 // convention that the rest of the system uses (top-left corner) by
1097 // subtracting all top/bottom coordinates from the buffer height.
1098 // 2) If the buffer is coming in rotated (for example, because the EGL
1099 // implementation is reacting to the transform hint coming back from
1100 // SurfaceFlinger), the surface damage needs to be rotated the
1101 // opposite direction, since it was generated assuming an unrotated
1102 // buffer (the app doesn't know that the EGL implementation is
1103 // reacting to the transform hint behind its back). The
1104 // transformations in the switch statement below apply those
1105 // complementary rotations (e.g., if 90 degrees, rotate 270 degrees).
1106
1107 int width = buffer->width;
1108 int height = buffer->height;
1109 bool rotated90 = (mTransform ^ mStickyTransform) &
1110 NATIVE_WINDOW_TRANSFORM_ROT_90;
1111 if (rotated90) {
1112 std::swap(width, height);
1113 }
1114
1115 Region flippedRegion;
1116 for (auto rect : mDirtyRegion) {
1117 int left = rect.left;
1118 int right = rect.right;
1119 int top = height - rect.bottom; // Flip from OpenGL convention
1120 int bottom = height - rect.top; // Flip from OpenGL convention
1121 switch (mTransform ^ mStickyTransform) {
1122 case NATIVE_WINDOW_TRANSFORM_ROT_90: {
1123 // Rotate 270 degrees
1124 Rect flippedRect{top, width - right, bottom, width - left};
1125 flippedRegion.orSelf(flippedRect);
1126 break;
1127 }
1128 case NATIVE_WINDOW_TRANSFORM_ROT_180: {
1129 // Rotate 180 degrees
1130 Rect flippedRect{width - right, height - bottom,
1131 width - left, height - top};
1132 flippedRegion.orSelf(flippedRect);
1133 break;
1134 }
1135 case NATIVE_WINDOW_TRANSFORM_ROT_270: {
1136 // Rotate 90 degrees
1137 Rect flippedRect{height - bottom, left,
1138 height - top, right};
1139 flippedRegion.orSelf(flippedRect);
1140 break;
1141 }
1142 default: {
1143 Rect flippedRect{left, top, right, bottom};
1144 flippedRegion.orSelf(flippedRect);
1145 break;
1146 }
1147 }
1148 }
1149
1150 input.setSurfaceDamage(flippedRegion);
1151 }
1152 *out = input;
1153 }
1154
1155 void Surface::applyGrallocMetadataLocked(
1156 android_native_buffer_t* buffer,
1157 const IGraphicBufferProducer::QueueBufferInput& queueBufferInput) {
1158 ATRACE_CALL();
1159 auto& mapper = GraphicBufferMapper::get();
1160 mapper.setDataspace(buffer->handle, static_cast<ui::Dataspace>(queueBufferInput.dataSpace));
1161 if (mHdrMetadataIsSet & HdrMetadata::SMPTE2086)
1162 mapper.setSmpte2086(buffer->handle, queueBufferInput.getHdrMetadata().getSmpte2086());
1163 if (mHdrMetadataIsSet & HdrMetadata::CTA861_3)
1164 mapper.setCta861_3(buffer->handle, queueBufferInput.getHdrMetadata().getCta8613());
1165 if (mHdrMetadataIsSet & HdrMetadata::HDR10PLUS)
1166 mapper.setSmpte2094_40(buffer->handle, queueBufferInput.getHdrMetadata().getHdr10Plus());
1167 }
1168
1169 void Surface::onBufferQueuedLocked(int slot, sp<Fence> fence,
1170 const IGraphicBufferProducer::QueueBufferOutput& output) {
1171 mDequeuedSlots.erase(slot);
1172
1173 if (mEnableFrameTimestamps) {
1174 mFrameEventHistory->applyDelta(output.frameTimestamps);
1175 // Update timestamps with the local acquire fence.
1176 // The consumer doesn't send it back to prevent us from having two
1177 // file descriptors of the same fence.
1178 mFrameEventHistory->updateAcquireFence(mNextFrameNumber,
1179 std::make_shared<FenceTime>(fence));
1180
1181 // Cache timestamps of signaled fences so we can close their file
1182 // descriptors.
1183 mFrameEventHistory->updateSignalTimes();
1184 }
1185
1186 mLastFrameNumber = mNextFrameNumber;
1187
1188 mDefaultWidth = output.width;
1189 mDefaultHeight = output.height;
1190 mNextFrameNumber = output.nextFrameNumber;
1191
1192 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
1193 // set.
1194 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
1195 mTransformHint = output.transformHint;
1196 }
1197
1198 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
1199
1200 if (!mConnectedToCpu) {
1201 // Clear surface damage back to full-buffer
1202 mDirtyRegion = Region::INVALID_REGION;
1203 }
1204
1205 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == slot) {
1206 mSharedBufferHasBeenQueued = true;
1207 }
1208
1209 mQueueBufferCondition.broadcast();
1210
1211 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
1212 static gui::FenceMonitor gpuCompletionThread("GPU completion");
1213 gpuCompletionThread.queueFence(fence);
1214 }
1215 }
1216
1217 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
1218
1219 int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd,
1220 SurfaceQueueBufferOutput* surfaceOutput) {
1221 ATRACE_CALL();
1222 ALOGV("Surface::queueBuffer");
1223
1224 IGraphicBufferProducer::QueueBufferOutput output;
1225 IGraphicBufferProducer::QueueBufferInput input;
1226 int slot;
1227 sp<Fence> fence;
1228 {
1229 Mutex::Autolock lock(mMutex);
1230
1231 slot = getSlotFromBufferLocked(buffer);
1232 if (slot < 0) {
1233 if (fenceFd >= 0) {
1234 close(fenceFd);
1235 }
1236 return slot;
1237 }
1238 if (mSharedBufferSlot == slot && mSharedBufferHasBeenQueued) {
1239 if (fenceFd >= 0) {
1240 close(fenceFd);
1241 }
1242 return OK;
1243 }
1244
1245 getQueueBufferInputLocked(buffer, fenceFd, mTimestamp, &input);
1246 applyGrallocMetadataLocked(buffer, input);
1247 fence = input.fence;
1248 }
1249 nsecs_t now = systemTime();
1250 // Drop the lock temporarily while we touch the underlying producer. In the case of a local
1251 // BufferQueue, the following should be allowable:
1252 //
1253 // Surface::queueBuffer
1254 // -> IConsumerListener::onFrameAvailable callback triggers automatically
1255 // -> implementation calls IGraphicBufferConsumer::acquire/release immediately
1256 // -> SurfaceListener::onBufferRelesed callback triggers automatically
1257 // -> implementation calls Surface::dequeueBuffer
1258 status_t err = mGraphicBufferProducer->queueBuffer(slot, input, &output);
1259 {
1260 Mutex::Autolock lock(mMutex);
1261
1262 mLastQueueDuration = systemTime() - now;
1263 if (err != OK) {
1264 ALOGE("queueBuffer: error queuing buffer, %d", err);
1265 }
1266
1267 onBufferQueuedLocked(slot, fence, output);
1268 }
1269
1270 if (surfaceOutput != nullptr) {
1271 *surfaceOutput = {.bufferReplaced = output.bufferReplaced};
1272 }
1273
1274 return err;
1275 }
1276
1277 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
1278 int Surface::queueBuffers(const std::vector<BatchQueuedBuffer>& buffers,
1279 std::vector<SurfaceQueueBufferOutput>* queueBufferOutputs)
1280 #else
1281 int Surface::queueBuffers(const std::vector<BatchQueuedBuffer>& buffers)
1282 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
1283 {
1284 ATRACE_CALL();
1285 ALOGV("Surface::queueBuffers");
1286
1287 size_t numBuffers = buffers.size();
1288 std::vector<IGraphicBufferProducer::QueueBufferInput> igbpQueueBufferInputs(numBuffers);
1289 std::vector<IGraphicBufferProducer::QueueBufferOutput> igbpQueueBufferOutputs;
1290 std::vector<int> bufferSlots(numBuffers, -1);
1291 std::vector<sp<Fence>> bufferFences(numBuffers);
1292
1293 int err;
1294 {
1295 Mutex::Autolock lock(mMutex);
1296
1297 if (mSharedBufferMode) {
1298 ALOGE("%s: batched operation is not supported in shared buffer mode", __FUNCTION__);
1299 return INVALID_OPERATION;
1300 }
1301
1302 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1303 int i = getSlotFromBufferLocked(buffers[batchIdx].buffer);
1304 if (i < 0) {
1305 if (buffers[batchIdx].fenceFd >= 0) {
1306 close(buffers[batchIdx].fenceFd);
1307 }
1308 return i;
1309 }
1310 bufferSlots[batchIdx] = i;
1311
1312 IGraphicBufferProducer::QueueBufferInput input;
1313 getQueueBufferInputLocked(buffers[batchIdx].buffer, buffers[batchIdx].fenceFd,
1314 buffers[batchIdx].timestamp, &input);
1315 input.slot = i;
1316 bufferFences[batchIdx] = input.fence;
1317 igbpQueueBufferInputs[batchIdx] = input;
1318 }
1319 }
1320 nsecs_t now = systemTime();
1321 err = mGraphicBufferProducer->queueBuffers(igbpQueueBufferInputs, &igbpQueueBufferOutputs);
1322 {
1323 Mutex::Autolock lock(mMutex);
1324 mLastQueueDuration = systemTime() - now;
1325 if (err != OK) {
1326 ALOGE("%s: error queuing buffer, %d", __FUNCTION__, err);
1327 }
1328
1329 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1330 onBufferQueuedLocked(bufferSlots[batchIdx], bufferFences[batchIdx],
1331 igbpQueueBufferOutputs[batchIdx]);
1332 }
1333 }
1334
1335 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
1336 if (queueBufferOutputs != nullptr) {
1337 queueBufferOutputs->clear();
1338 queueBufferOutputs->resize(numBuffers);
1339 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1340 (*queueBufferOutputs)[batchIdx].bufferReplaced =
1341 igbpQueueBufferOutputs[batchIdx].bufferReplaced;
1342 }
1343 }
1344 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
1345
1346 return err;
1347 }
1348
1349 #else
1350
1351 int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd) {
1352 ATRACE_CALL();
1353 ALOGV("Surface::queueBuffer");
1354 Mutex::Autolock lock(mMutex);
1355
1356 int i = getSlotFromBufferLocked(buffer);
1357 if (i < 0) {
1358 if (fenceFd >= 0) {
1359 close(fenceFd);
1360 }
1361 return i;
1362 }
1363 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
1364 if (fenceFd >= 0) {
1365 close(fenceFd);
1366 }
1367 return OK;
1368 }
1369
1370 IGraphicBufferProducer::QueueBufferOutput output;
1371 IGraphicBufferProducer::QueueBufferInput input;
1372 getQueueBufferInputLocked(buffer, fenceFd, mTimestamp, &input);
1373 applyGrallocMetadataLocked(buffer, input);
1374 sp<Fence> fence = input.fence;
1375
1376 nsecs_t now = systemTime();
1377
1378 status_t err = mGraphicBufferProducer->queueBuffer(i, input, &output);
1379 mLastQueueDuration = systemTime() - now;
1380 if (err != OK) {
1381 ALOGE("queueBuffer: error queuing buffer, %d", err);
1382 }
1383
1384 onBufferQueuedLocked(i, fence, output);
1385 return err;
1386 }
1387
1388 int Surface::queueBuffers(const std::vector<BatchQueuedBuffer>& buffers) {
1389 ATRACE_CALL();
1390 ALOGV("Surface::queueBuffers");
1391 Mutex::Autolock lock(mMutex);
1392
1393 if (mSharedBufferMode) {
1394 ALOGE("%s: batched operation is not supported in shared buffer mode", __FUNCTION__);
1395 return INVALID_OPERATION;
1396 }
1397
1398 size_t numBuffers = buffers.size();
1399 std::vector<IGraphicBufferProducer::QueueBufferInput> queueBufferInputs(numBuffers);
1400 std::vector<IGraphicBufferProducer::QueueBufferOutput> queueBufferOutputs;
1401 std::vector<int> bufferSlots(numBuffers, -1);
1402 std::vector<sp<Fence>> bufferFences(numBuffers);
1403
1404 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1405 int i = getSlotFromBufferLocked(buffers[batchIdx].buffer);
1406 if (i < 0) {
1407 if (buffers[batchIdx].fenceFd >= 0) {
1408 close(buffers[batchIdx].fenceFd);
1409 }
1410 return i;
1411 }
1412 bufferSlots[batchIdx] = i;
1413
1414 IGraphicBufferProducer::QueueBufferInput input;
1415 getQueueBufferInputLocked(
1416 buffers[batchIdx].buffer, buffers[batchIdx].fenceFd, buffers[batchIdx].timestamp,
1417 &input);
1418 bufferFences[batchIdx] = input.fence;
1419 queueBufferInputs[batchIdx] = input;
1420 }
1421
1422 nsecs_t now = systemTime();
1423 status_t err = mGraphicBufferProducer->queueBuffers(queueBufferInputs, &queueBufferOutputs);
1424 mLastQueueDuration = systemTime() - now;
1425 if (err != OK) {
1426 ALOGE("%s: error queuing buffer, %d", __FUNCTION__, err);
1427 }
1428
1429
1430 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1431 onBufferQueuedLocked(bufferSlots[batchIdx], bufferFences[batchIdx],
1432 queueBufferOutputs[batchIdx]);
1433 }
1434
1435 return err;
1436 }
1437
1438 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
1439
1440 void Surface::querySupportedTimestampsLocked() const {
1441 // mMutex must be locked when calling this method.
1442
1443 if (mQueriedSupportedTimestamps) {
1444 return;
1445 }
1446 mQueriedSupportedTimestamps = true;
1447
1448 std::vector<FrameEvent> supportedFrameTimestamps;
1449 binder::Status status =
1450 composerServiceAIDL()->getSupportedFrameTimestamps(&supportedFrameTimestamps);
1451
1452 if (!status.isOk()) {
1453 return;
1454 }
1455
1456 for (auto sft : supportedFrameTimestamps) {
1457 if (sft == FrameEvent::DISPLAY_PRESENT) {
1458 mFrameTimestampsSupportsPresent = true;
1459 }
1460 }
1461 }
1462
1463 int Surface::query(int what, int* value) const {
1464 ATRACE_CALL();
1465 ALOGV("Surface::query");
1466 { // scope for the lock
1467 Mutex::Autolock lock(mMutex);
1468 switch (what) {
1469 case NATIVE_WINDOW_FORMAT:
1470 if (mReqFormat) {
1471 *value = static_cast<int>(mReqFormat);
1472 return NO_ERROR;
1473 }
1474 break;
1475 case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER: {
1476 status_t err = mGraphicBufferProducer->query(what, value);
1477 if (err == NO_ERROR) {
1478 return NO_ERROR;
1479 }
1480 sp<gui::ISurfaceComposer> surfaceComposer = composerServiceAIDL();
1481 if (surfaceComposer == nullptr) {
1482 return -EPERM; // likely permissions error
1483 }
1484 // ISurfaceComposer no longer supports authenticateSurfaceTexture
1485 *value = 0;
1486 return NO_ERROR;
1487 }
1488 case NATIVE_WINDOW_CONCRETE_TYPE:
1489 *value = NATIVE_WINDOW_SURFACE;
1490 return NO_ERROR;
1491 case NATIVE_WINDOW_DEFAULT_WIDTH:
1492 *value = static_cast<int>(
1493 mUserWidth ? mUserWidth : mDefaultWidth);
1494 return NO_ERROR;
1495 case NATIVE_WINDOW_DEFAULT_HEIGHT:
1496 *value = static_cast<int>(
1497 mUserHeight ? mUserHeight : mDefaultHeight);
1498 return NO_ERROR;
1499 case NATIVE_WINDOW_TRANSFORM_HINT:
1500 *value = static_cast<int>(getTransformHint());
1501 return NO_ERROR;
1502 case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND: {
1503 status_t err = NO_ERROR;
1504 if (!mConsumerRunningBehind) {
1505 *value = 0;
1506 } else {
1507 err = mGraphicBufferProducer->query(what, value);
1508 if (err == NO_ERROR) {
1509 mConsumerRunningBehind = *value;
1510 }
1511 }
1512 return err;
1513 }
1514 case NATIVE_WINDOW_BUFFER_AGE: {
1515 if (mBufferAge > INT32_MAX) {
1516 *value = 0;
1517 } else {
1518 *value = static_cast<int32_t>(mBufferAge);
1519 }
1520 return NO_ERROR;
1521 }
1522 case NATIVE_WINDOW_LAST_DEQUEUE_DURATION: {
1523 int64_t durationUs = mLastDequeueDuration / 1000;
1524 *value = durationUs > std::numeric_limits<int>::max() ?
1525 std::numeric_limits<int>::max() :
1526 static_cast<int>(durationUs);
1527 return NO_ERROR;
1528 }
1529 case NATIVE_WINDOW_LAST_QUEUE_DURATION: {
1530 int64_t durationUs = mLastQueueDuration / 1000;
1531 *value = durationUs > std::numeric_limits<int>::max() ?
1532 std::numeric_limits<int>::max() :
1533 static_cast<int>(durationUs);
1534 return NO_ERROR;
1535 }
1536 case NATIVE_WINDOW_FRAME_TIMESTAMPS_SUPPORTS_PRESENT: {
1537 querySupportedTimestampsLocked();
1538 *value = mFrameTimestampsSupportsPresent ? 1 : 0;
1539 return NO_ERROR;
1540 }
1541 case NATIVE_WINDOW_IS_VALID: {
1542 *value = mGraphicBufferProducer != nullptr ? 1 : 0;
1543 return NO_ERROR;
1544 }
1545 case NATIVE_WINDOW_DATASPACE: {
1546 *value = static_cast<int>(mDataSpace);
1547 return NO_ERROR;
1548 }
1549 case NATIVE_WINDOW_MAX_BUFFER_COUNT: {
1550 *value = mMaxBufferCount;
1551 return NO_ERROR;
1552 }
1553 }
1554 }
1555 return mGraphicBufferProducer->query(what, value);
1556 }
1557
1558 int Surface::perform(int operation, va_list args)
1559 {
1560 int res = NO_ERROR;
1561 switch (operation) {
1562 case NATIVE_WINDOW_CONNECT:
1563 // deprecated. must return NO_ERROR.
1564 break;
1565 case NATIVE_WINDOW_DISCONNECT:
1566 // deprecated. must return NO_ERROR.
1567 break;
1568 case NATIVE_WINDOW_SET_USAGE:
1569 res = dispatchSetUsage(args);
1570 break;
1571 case NATIVE_WINDOW_SET_CROP:
1572 res = dispatchSetCrop(args);
1573 break;
1574 case NATIVE_WINDOW_SET_BUFFER_COUNT:
1575 res = dispatchSetBufferCount(args);
1576 break;
1577 case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
1578 res = dispatchSetBuffersGeometry(args);
1579 break;
1580 case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
1581 res = dispatchSetBuffersTransform(args);
1582 break;
1583 case NATIVE_WINDOW_SET_BUFFERS_STICKY_TRANSFORM:
1584 res = dispatchSetBuffersStickyTransform(args);
1585 break;
1586 case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
1587 res = dispatchSetBuffersTimestamp(args);
1588 break;
1589 case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
1590 res = dispatchSetBuffersDimensions(args);
1591 break;
1592 case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS:
1593 res = dispatchSetBuffersUserDimensions(args);
1594 break;
1595 case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
1596 res = dispatchSetBuffersFormat(args);
1597 break;
1598 case NATIVE_WINDOW_LOCK:
1599 res = dispatchLock(args);
1600 break;
1601 case NATIVE_WINDOW_UNLOCK_AND_POST:
1602 res = dispatchUnlockAndPost(args);
1603 break;
1604 case NATIVE_WINDOW_SET_SCALING_MODE:
1605 res = dispatchSetScalingMode(args);
1606 break;
1607 case NATIVE_WINDOW_API_CONNECT:
1608 res = dispatchConnect(args);
1609 break;
1610 case NATIVE_WINDOW_API_DISCONNECT:
1611 res = dispatchDisconnect(args);
1612 break;
1613 case NATIVE_WINDOW_SET_SIDEBAND_STREAM:
1614 res = dispatchSetSidebandStream(args);
1615 break;
1616 case NATIVE_WINDOW_SET_BUFFERS_DATASPACE:
1617 res = dispatchSetBuffersDataSpace(args);
1618 break;
1619 case NATIVE_WINDOW_SET_BUFFERS_SMPTE2086_METADATA:
1620 res = dispatchSetBuffersSmpte2086Metadata(args);
1621 break;
1622 case NATIVE_WINDOW_SET_BUFFERS_CTA861_3_METADATA:
1623 res = dispatchSetBuffersCta8613Metadata(args);
1624 break;
1625 case NATIVE_WINDOW_SET_BUFFERS_HDR10_PLUS_METADATA:
1626 res = dispatchSetBuffersHdr10PlusMetadata(args);
1627 break;
1628 case NATIVE_WINDOW_SET_SURFACE_DAMAGE:
1629 res = dispatchSetSurfaceDamage(args);
1630 break;
1631 case NATIVE_WINDOW_SET_SHARED_BUFFER_MODE:
1632 res = dispatchSetSharedBufferMode(args);
1633 break;
1634 case NATIVE_WINDOW_SET_AUTO_REFRESH:
1635 res = dispatchSetAutoRefresh(args);
1636 break;
1637 case NATIVE_WINDOW_GET_REFRESH_CYCLE_DURATION:
1638 res = dispatchGetDisplayRefreshCycleDuration(args);
1639 break;
1640 case NATIVE_WINDOW_GET_NEXT_FRAME_ID:
1641 res = dispatchGetNextFrameId(args);
1642 break;
1643 case NATIVE_WINDOW_ENABLE_FRAME_TIMESTAMPS:
1644 res = dispatchEnableFrameTimestamps(args);
1645 break;
1646 case NATIVE_WINDOW_GET_COMPOSITOR_TIMING:
1647 res = dispatchGetCompositorTiming(args);
1648 break;
1649 case NATIVE_WINDOW_GET_FRAME_TIMESTAMPS:
1650 res = dispatchGetFrameTimestamps(args);
1651 break;
1652 case NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT:
1653 res = dispatchGetWideColorSupport(args);
1654 break;
1655 case NATIVE_WINDOW_GET_HDR_SUPPORT:
1656 res = dispatchGetHdrSupport(args);
1657 break;
1658 case NATIVE_WINDOW_SET_USAGE64:
1659 res = dispatchSetUsage64(args);
1660 break;
1661 case NATIVE_WINDOW_GET_CONSUMER_USAGE64:
1662 res = dispatchGetConsumerUsage64(args);
1663 break;
1664 case NATIVE_WINDOW_SET_AUTO_PREROTATION:
1665 res = dispatchSetAutoPrerotation(args);
1666 break;
1667 case NATIVE_WINDOW_GET_LAST_DEQUEUE_START:
1668 res = dispatchGetLastDequeueStartTime(args);
1669 break;
1670 case NATIVE_WINDOW_SET_DEQUEUE_TIMEOUT:
1671 res = dispatchSetDequeueTimeout(args);
1672 break;
1673 case NATIVE_WINDOW_GET_LAST_DEQUEUE_DURATION:
1674 res = dispatchGetLastDequeueDuration(args);
1675 break;
1676 case NATIVE_WINDOW_GET_LAST_QUEUE_DURATION:
1677 res = dispatchGetLastQueueDuration(args);
1678 break;
1679 case NATIVE_WINDOW_SET_FRAME_RATE:
1680 res = dispatchSetFrameRate(args);
1681 break;
1682 case NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR:
1683 res = dispatchAddCancelInterceptor(args);
1684 break;
1685 case NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR:
1686 res = dispatchAddDequeueInterceptor(args);
1687 break;
1688 case NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR:
1689 res = dispatchAddPerformInterceptor(args);
1690 break;
1691 case NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR:
1692 res = dispatchAddQueueInterceptor(args);
1693 break;
1694 case NATIVE_WINDOW_SET_QUERY_INTERCEPTOR:
1695 res = dispatchAddQueryInterceptor(args);
1696 break;
1697 case NATIVE_WINDOW_ALLOCATE_BUFFERS:
1698 allocateBuffers();
1699 res = NO_ERROR;
1700 break;
1701 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER:
1702 res = dispatchGetLastQueuedBuffer(args);
1703 break;
1704 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER2:
1705 res = dispatchGetLastQueuedBuffer2(args);
1706 break;
1707 case NATIVE_WINDOW_SET_FRAME_TIMELINE_INFO:
1708 res = dispatchSetFrameTimelineInfo(args);
1709 break;
1710 case NATIVE_WINDOW_SET_BUFFERS_ADDITIONAL_OPTIONS:
1711 res = dispatchSetAdditionalOptions(args);
1712 break;
1713 default:
1714 res = NAME_NOT_FOUND;
1715 break;
1716 }
1717 return res;
1718 }
1719
1720 int Surface::dispatchConnect(va_list args) {
1721 int api = va_arg(args, int);
1722 return connect(api);
1723 }
1724
1725 int Surface::dispatchDisconnect(va_list args) {
1726 int api = va_arg(args, int);
1727 return disconnect(api);
1728 }
1729
1730 int Surface::dispatchSetUsage(va_list args) {
1731 uint64_t usage = va_arg(args, uint32_t);
1732 return setUsage(usage);
1733 }
1734
1735 int Surface::dispatchSetUsage64(va_list args) {
1736 uint64_t usage = va_arg(args, uint64_t);
1737 return setUsage(usage);
1738 }
1739
1740 int Surface::dispatchSetCrop(va_list args) {
1741 android_native_rect_t const* rect = va_arg(args, android_native_rect_t*);
1742 return setCrop(reinterpret_cast<Rect const*>(rect));
1743 }
1744
1745 int Surface::dispatchSetBufferCount(va_list args) {
1746 size_t bufferCount = va_arg(args, size_t);
1747 return setBufferCount(static_cast<int32_t>(bufferCount));
1748 }
1749
1750 int Surface::dispatchSetBuffersGeometry(va_list args) {
1751 uint32_t width = va_arg(args, uint32_t);
1752 uint32_t height = va_arg(args, uint32_t);
1753 PixelFormat format = va_arg(args, PixelFormat);
1754 int err = setBuffersDimensions(width, height);
1755 if (err != 0) {
1756 return err;
1757 }
1758 return setBuffersFormat(format);
1759 }
1760
1761 int Surface::dispatchSetBuffersDimensions(va_list args) {
1762 uint32_t width = va_arg(args, uint32_t);
1763 uint32_t height = va_arg(args, uint32_t);
1764 return setBuffersDimensions(width, height);
1765 }
1766
1767 int Surface::dispatchSetBuffersUserDimensions(va_list args) {
1768 uint32_t width = va_arg(args, uint32_t);
1769 uint32_t height = va_arg(args, uint32_t);
1770 return setBuffersUserDimensions(width, height);
1771 }
1772
1773 int Surface::dispatchSetBuffersFormat(va_list args) {
1774 PixelFormat format = va_arg(args, PixelFormat);
1775 return setBuffersFormat(format);
1776 }
1777
1778 int Surface::dispatchSetScalingMode(va_list args) {
1779 int mode = va_arg(args, int);
1780 return setScalingMode(mode);
1781 }
1782
1783 int Surface::dispatchSetBuffersTransform(va_list args) {
1784 uint32_t transform = va_arg(args, uint32_t);
1785 return setBuffersTransform(transform);
1786 }
1787
1788 int Surface::dispatchSetBuffersStickyTransform(va_list args) {
1789 uint32_t transform = va_arg(args, uint32_t);
1790 return setBuffersStickyTransform(transform);
1791 }
1792
1793 int Surface::dispatchSetBuffersTimestamp(va_list args) {
1794 int64_t timestamp = va_arg(args, int64_t);
1795 return setBuffersTimestamp(timestamp);
1796 }
1797
1798 int Surface::dispatchLock(va_list args) {
1799 ANativeWindow_Buffer* outBuffer = va_arg(args, ANativeWindow_Buffer*);
1800 ARect* inOutDirtyBounds = va_arg(args, ARect*);
1801 return lock(outBuffer, inOutDirtyBounds);
1802 }
1803
1804 int Surface::dispatchUnlockAndPost(va_list args __attribute__((unused))) {
1805 return unlockAndPost();
1806 }
1807
1808 int Surface::dispatchSetSidebandStream(va_list args) {
1809 native_handle_t* sH = va_arg(args, native_handle_t*);
1810 sp<NativeHandle> sidebandHandle = NativeHandle::create(sH, false);
1811 setSidebandStream(sidebandHandle);
1812 return OK;
1813 }
1814
1815 int Surface::dispatchSetBuffersDataSpace(va_list args) {
1816 Dataspace dataspace = static_cast<Dataspace>(va_arg(args, int));
1817 return setBuffersDataSpace(dataspace);
1818 }
1819
1820 int Surface::dispatchSetBuffersSmpte2086Metadata(va_list args) {
1821 const android_smpte2086_metadata* metadata =
1822 va_arg(args, const android_smpte2086_metadata*);
1823 return setBuffersSmpte2086Metadata(metadata);
1824 }
1825
1826 int Surface::dispatchSetBuffersCta8613Metadata(va_list args) {
1827 const android_cta861_3_metadata* metadata =
1828 va_arg(args, const android_cta861_3_metadata*);
1829 return setBuffersCta8613Metadata(metadata);
1830 }
1831
1832 int Surface::dispatchSetBuffersHdr10PlusMetadata(va_list args) {
1833 const size_t size = va_arg(args, size_t);
1834 const uint8_t* metadata = va_arg(args, const uint8_t*);
1835 return setBuffersHdr10PlusMetadata(size, metadata);
1836 }
1837
1838 int Surface::dispatchSetSurfaceDamage(va_list args) {
1839 android_native_rect_t* rects = va_arg(args, android_native_rect_t*);
1840 size_t numRects = va_arg(args, size_t);
1841 setSurfaceDamage(rects, numRects);
1842 return NO_ERROR;
1843 }
1844
1845 int Surface::dispatchSetSharedBufferMode(va_list args) {
1846 bool sharedBufferMode = va_arg(args, int);
1847 return setSharedBufferMode(sharedBufferMode);
1848 }
1849
1850 int Surface::dispatchSetAutoRefresh(va_list args) {
1851 bool autoRefresh = va_arg(args, int);
1852 return setAutoRefresh(autoRefresh);
1853 }
1854
1855 int Surface::dispatchGetDisplayRefreshCycleDuration(va_list args) {
1856 nsecs_t* outRefreshDuration = va_arg(args, int64_t*);
1857 return getDisplayRefreshCycleDuration(outRefreshDuration);
1858 }
1859
1860 int Surface::dispatchGetNextFrameId(va_list args) {
1861 uint64_t* nextFrameId = va_arg(args, uint64_t*);
1862 *nextFrameId = getNextFrameNumber();
1863 return NO_ERROR;
1864 }
1865
1866 int Surface::dispatchEnableFrameTimestamps(va_list args) {
1867 bool enable = va_arg(args, int);
1868 enableFrameTimestamps(enable);
1869 return NO_ERROR;
1870 }
1871
1872 int Surface::dispatchGetCompositorTiming(va_list args) {
1873 nsecs_t* compositeDeadline = va_arg(args, int64_t*);
1874 nsecs_t* compositeInterval = va_arg(args, int64_t*);
1875 nsecs_t* compositeToPresentLatency = va_arg(args, int64_t*);
1876 return getCompositorTiming(compositeDeadline, compositeInterval,
1877 compositeToPresentLatency);
1878 }
1879
1880 int Surface::dispatchGetFrameTimestamps(va_list args) {
1881 uint64_t frameId = va_arg(args, uint64_t);
1882 nsecs_t* outRequestedPresentTime = va_arg(args, int64_t*);
1883 nsecs_t* outAcquireTime = va_arg(args, int64_t*);
1884 nsecs_t* outLatchTime = va_arg(args, int64_t*);
1885 nsecs_t* outFirstRefreshStartTime = va_arg(args, int64_t*);
1886 nsecs_t* outLastRefreshStartTime = va_arg(args, int64_t*);
1887 nsecs_t* outGpuCompositionDoneTime = va_arg(args, int64_t*);
1888 nsecs_t* outDisplayPresentTime = va_arg(args, int64_t*);
1889 nsecs_t* outDequeueReadyTime = va_arg(args, int64_t*);
1890 nsecs_t* outReleaseTime = va_arg(args, int64_t*);
1891 return getFrameTimestamps(frameId,
1892 outRequestedPresentTime, outAcquireTime, outLatchTime,
1893 outFirstRefreshStartTime, outLastRefreshStartTime,
1894 outGpuCompositionDoneTime, outDisplayPresentTime,
1895 outDequeueReadyTime, outReleaseTime);
1896 }
1897
1898 int Surface::dispatchGetWideColorSupport(va_list args) {
1899 bool* outSupport = va_arg(args, bool*);
1900 return getWideColorSupport(outSupport);
1901 }
1902
1903 int Surface::dispatchGetHdrSupport(va_list args) {
1904 bool* outSupport = va_arg(args, bool*);
1905 return getHdrSupport(outSupport);
1906 }
1907
1908 int Surface::dispatchGetConsumerUsage64(va_list args) {
1909 uint64_t* usage = va_arg(args, uint64_t*);
1910 return getConsumerUsage(usage);
1911 }
1912
1913 int Surface::dispatchSetAutoPrerotation(va_list args) {
1914 bool autoPrerotation = va_arg(args, int);
1915 return setAutoPrerotation(autoPrerotation);
1916 }
1917
1918 int Surface::dispatchGetLastDequeueStartTime(va_list args) {
1919 int64_t* lastDequeueStartTime = va_arg(args, int64_t*);
1920 *lastDequeueStartTime = mLastDequeueStartTime;
1921 return NO_ERROR;
1922 }
1923
1924 int Surface::dispatchSetDequeueTimeout(va_list args) {
1925 nsecs_t timeout = va_arg(args, int64_t);
1926 return setDequeueTimeout(timeout);
1927 }
1928
1929 int Surface::dispatchGetLastDequeueDuration(va_list args) {
1930 int64_t* lastDequeueDuration = va_arg(args, int64_t*);
1931 *lastDequeueDuration = mLastDequeueDuration;
1932 return NO_ERROR;
1933 }
1934
1935 int Surface::dispatchGetLastQueueDuration(va_list args) {
1936 int64_t* lastQueueDuration = va_arg(args, int64_t*);
1937 *lastQueueDuration = mLastQueueDuration;
1938 return NO_ERROR;
1939 }
1940
1941 int Surface::dispatchSetFrameRate(va_list args) {
1942 float frameRate = static_cast<float>(va_arg(args, double));
1943 int8_t compatibility = static_cast<int8_t>(va_arg(args, int));
1944 int8_t changeFrameRateStrategy = static_cast<int8_t>(va_arg(args, int));
1945 return setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
1946 }
1947
1948 int Surface::dispatchAddCancelInterceptor(va_list args) {
1949 ANativeWindow_cancelBufferInterceptor interceptor =
1950 va_arg(args, ANativeWindow_cancelBufferInterceptor);
1951 void* data = va_arg(args, void*);
1952 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1953 mCancelInterceptor = interceptor;
1954 mCancelInterceptorData = data;
1955 return NO_ERROR;
1956 }
1957
1958 int Surface::dispatchAddDequeueInterceptor(va_list args) {
1959 ANativeWindow_dequeueBufferInterceptor interceptor =
1960 va_arg(args, ANativeWindow_dequeueBufferInterceptor);
1961 void* data = va_arg(args, void*);
1962 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1963 mDequeueInterceptor = interceptor;
1964 mDequeueInterceptorData = data;
1965 return NO_ERROR;
1966 }
1967
1968 int Surface::dispatchAddPerformInterceptor(va_list args) {
1969 ANativeWindow_performInterceptor interceptor = va_arg(args, ANativeWindow_performInterceptor);
1970 void* data = va_arg(args, void*);
1971 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1972 mPerformInterceptor = interceptor;
1973 mPerformInterceptorData = data;
1974 return NO_ERROR;
1975 }
1976
1977 int Surface::dispatchAddQueueInterceptor(va_list args) {
1978 ANativeWindow_queueBufferInterceptor interceptor =
1979 va_arg(args, ANativeWindow_queueBufferInterceptor);
1980 void* data = va_arg(args, void*);
1981 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1982 mQueueInterceptor = interceptor;
1983 mQueueInterceptorData = data;
1984 return NO_ERROR;
1985 }
1986
1987 int Surface::dispatchAddQueryInterceptor(va_list args) {
1988 ANativeWindow_queryInterceptor interceptor = va_arg(args, ANativeWindow_queryInterceptor);
1989 void* data = va_arg(args, void*);
1990 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1991 mQueryInterceptor = interceptor;
1992 mQueryInterceptorData = data;
1993 return NO_ERROR;
1994 }
1995
1996 int Surface::dispatchGetLastQueuedBuffer(va_list args) {
1997 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
1998 int* fence = va_arg(args, int*);
1999 float* matrix = va_arg(args, float*);
2000 sp<GraphicBuffer> graphicBuffer;
2001 sp<Fence> spFence;
2002
2003 int result = mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, matrix);
2004
2005 if (graphicBuffer != nullptr) {
2006 *buffer = graphicBuffer->toAHardwareBuffer();
2007 AHardwareBuffer_acquire(*buffer);
2008 } else {
2009 *buffer = nullptr;
2010 }
2011
2012 if (spFence != nullptr) {
2013 *fence = spFence->dup();
2014 } else {
2015 *fence = -1;
2016 }
2017 return result;
2018 }
2019
2020 int Surface::dispatchGetLastQueuedBuffer2(va_list args) {
2021 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
2022 int* fence = va_arg(args, int*);
2023 ARect* crop = va_arg(args, ARect*);
2024 uint32_t* transform = va_arg(args, uint32_t*);
2025 sp<GraphicBuffer> graphicBuffer;
2026 sp<Fence> spFence;
2027
2028 Rect r;
2029 int result =
2030 mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, &r, transform);
2031
2032 if (graphicBuffer != nullptr) {
2033 *buffer = graphicBuffer->toAHardwareBuffer();
2034 AHardwareBuffer_acquire(*buffer);
2035
2036 // Avoid setting crop* unless buffer is valid (matches IGBP behavior)
2037 crop->left = r.left;
2038 crop->top = r.top;
2039 crop->right = r.right;
2040 crop->bottom = r.bottom;
2041 } else {
2042 *buffer = nullptr;
2043 }
2044
2045 if (spFence != nullptr) {
2046 *fence = spFence->dup();
2047 } else {
2048 *fence = -1;
2049 }
2050 return result;
2051 }
2052
2053 int Surface::dispatchSetFrameTimelineInfo(va_list args) {
2054 ATRACE_CALL();
2055 ALOGV("Surface::%s", __func__);
2056
2057 const auto nativeWindowFtlInfo = static_cast<ANativeWindowFrameTimelineInfo>(
2058 va_arg(args, ANativeWindowFrameTimelineInfo));
2059
2060 FrameTimelineInfo ftlInfo;
2061 ftlInfo.vsyncId = nativeWindowFtlInfo.frameTimelineVsyncId;
2062 ftlInfo.inputEventId = nativeWindowFtlInfo.inputEventId;
2063 ftlInfo.startTimeNanos = nativeWindowFtlInfo.startTimeNanos;
2064 ftlInfo.useForRefreshRateSelection = nativeWindowFtlInfo.useForRefreshRateSelection;
2065 ftlInfo.skippedFrameVsyncId = nativeWindowFtlInfo.skippedFrameVsyncId;
2066 ftlInfo.skippedFrameStartTimeNanos = nativeWindowFtlInfo.skippedFrameStartTimeNanos;
2067
2068 return setFrameTimelineInfo(nativeWindowFtlInfo.frameNumber, ftlInfo);
2069 }
2070
2071 int Surface::dispatchSetAdditionalOptions(va_list args) {
2072 ATRACE_CALL();
2073
2074 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_EXTENDEDALLOCATE)
2075 const AHardwareBufferLongOptions* opts = va_arg(args, const AHardwareBufferLongOptions*);
2076 const size_t optsSize = va_arg(args, size_t);
2077 std::vector<gui::AdditionalOptions> convertedOpts;
2078 convertedOpts.reserve(optsSize);
2079 for (size_t i = 0; i < optsSize; i++) {
2080 convertedOpts.emplace_back(opts[i].name, opts[i].value);
2081 }
2082 return setAdditionalOptions(convertedOpts);
2083 #else
2084 (void)args;
2085 return INVALID_OPERATION;
2086 #endif
2087 }
2088
2089 bool Surface::transformToDisplayInverse() const {
2090 return (mTransform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) ==
2091 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
2092 }
2093
2094 int Surface::connect(int api) {
2095 static sp<SurfaceListener> listener = sp<StubSurfaceListener>::make();
2096 return connect(api, listener);
2097 }
2098
2099 int Surface::connect(int api, const sp<SurfaceListener>& listener, bool reportBufferRemoval) {
2100 ATRACE_CALL();
2101 ALOGV("Surface::connect");
2102 Mutex::Autolock lock(mMutex);
2103 IGraphicBufferProducer::QueueBufferOutput output;
2104 mReportRemovedBuffers = reportBufferRemoval;
2105
2106 if (listener != nullptr) {
2107 mListenerProxy = sp<ProducerListenerProxy>::make(this, listener);
2108 }
2109
2110 int err =
2111 mGraphicBufferProducer->connect(mListenerProxy, api, mProducerControlledByApp, &output);
2112 if (err == NO_ERROR) {
2113 mDefaultWidth = output.width;
2114 mDefaultHeight = output.height;
2115 mNextFrameNumber = output.nextFrameNumber;
2116 mMaxBufferCount = output.maxBufferCount;
2117 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
2118 mIsSlotExpansionAllowed = output.isSlotExpansionAllowed;
2119 #endif
2120
2121 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
2122 // set. Transform hint should be ignored if the client is expected to always submit buffers
2123 // in the same orientation.
2124 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
2125 mTransformHint = output.transformHint;
2126 }
2127
2128 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
2129
2130 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
2131 if (listener && listener->needsDeathNotify()) {
2132 mSurfaceDeathListener = sp<ProducerDeathListenerProxy>::make(listener);
2133 IInterface::asBinder(mGraphicBufferProducer)->linkToDeath(mSurfaceDeathListener);
2134 }
2135 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
2136 }
2137 if (!err && api == NATIVE_WINDOW_API_CPU) {
2138 mConnectedToCpu = true;
2139 // Clear the dirty region in case we're switching from a non-CPU API
2140 mDirtyRegion.clear();
2141 } else if (!err) {
2142 // Initialize the dirty region for tracking surface damage
2143 mDirtyRegion = Region::INVALID_REGION;
2144 }
2145
2146 return err;
2147 }
2148
2149 int Surface::disconnect(int api, IGraphicBufferProducer::DisconnectMode mode) {
2150 ATRACE_CALL();
2151 ALOGV("Surface::disconnect");
2152 Mutex::Autolock lock(mMutex);
2153 mRemovedBuffers.clear();
2154 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2155 mSharedBufferHasBeenQueued = false;
2156 freeAllBuffers();
2157 int err = mGraphicBufferProducer->disconnect(api, mode);
2158 if (!err) {
2159 mReqFormat = 0;
2160 mReqWidth = 0;
2161 mReqHeight = 0;
2162 mReqUsage = 0;
2163 mCrop.clear();
2164 mDataSpace = Dataspace::UNKNOWN;
2165 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
2166 mTransform = 0;
2167 mStickyTransform = 0;
2168 mAutoPrerotation = false;
2169 mEnableFrameTimestamps = false;
2170 mMaxBufferCount = NUM_BUFFER_SLOTS;
2171
2172 if (api == NATIVE_WINDOW_API_CPU) {
2173 mConnectedToCpu = false;
2174 }
2175 }
2176
2177 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
2178 if (mSurfaceDeathListener != nullptr) {
2179 IInterface::asBinder(mGraphicBufferProducer)->unlinkToDeath(mSurfaceDeathListener);
2180 mSurfaceDeathListener = nullptr;
2181 }
2182 #endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_PLATFORM_API_IMPROVEMENTS)
2183
2184 return err;
2185 }
2186
2187 int Surface::detachNextBuffer(sp<GraphicBuffer>* outBuffer,
2188 sp<Fence>* outFence) {
2189 ATRACE_CALL();
2190 ALOGV("Surface::detachNextBuffer");
2191
2192 if (outBuffer == nullptr || outFence == nullptr) {
2193 return BAD_VALUE;
2194 }
2195
2196 Mutex::Autolock lock(mMutex);
2197 if (mReportRemovedBuffers) {
2198 mRemovedBuffers.clear();
2199 }
2200
2201 sp<GraphicBuffer> buffer(nullptr);
2202 sp<Fence> fence(nullptr);
2203 status_t result = mGraphicBufferProducer->detachNextBuffer(
2204 &buffer, &fence);
2205 if (result != NO_ERROR) {
2206 return result;
2207 }
2208
2209 *outBuffer = buffer;
2210 if (fence != nullptr && fence->isValid()) {
2211 *outFence = fence;
2212 } else {
2213 *outFence = Fence::NO_FENCE;
2214 }
2215
2216 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
2217 for (int i = 0; i < (int)mSlots.size(); i++) {
2218 #else
2219 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
2220 #endif
2221 if (mSlots[i].buffer != nullptr &&
2222 mSlots[i].buffer->getId() == buffer->getId()) {
2223 if (mReportRemovedBuffers) {
2224 mRemovedBuffers.push_back(mSlots[i].buffer);
2225 }
2226 mSlots[i].buffer = nullptr;
2227 }
2228 }
2229
2230 return NO_ERROR;
2231 }
2232
2233 int Surface::isBufferOwned(const sp<GraphicBuffer>& buffer, bool* outIsOwned) const {
2234 ATRACE_CALL();
2235
2236 if (buffer == nullptr) {
2237 ALOGE("%s: Bad input, buffer was null", __FUNCTION__);
2238 return BAD_VALUE;
2239 }
2240 if (outIsOwned == nullptr) {
2241 ALOGE("%s: Bad input, output was null", __FUNCTION__);
2242 return BAD_VALUE;
2243 }
2244
2245 Mutex::Autolock lock(mMutex);
2246
2247 int slot = this->getSlotFromBufferLocked(buffer->getNativeBuffer());
2248 if (slot == BAD_VALUE) {
2249 ALOGV("%s: Buffer %" PRIu64 " is not owned", __FUNCTION__, buffer->getId());
2250 *outIsOwned = false;
2251 return NO_ERROR;
2252 } else if (slot < 0) {
2253 ALOGV("%s: Buffer %" PRIu64 " look up failed (%d)", __FUNCTION__, buffer->getId(), slot);
2254 *outIsOwned = false;
2255 return slot;
2256 }
2257
2258 *outIsOwned = true;
2259 return NO_ERROR;
2260 }
2261
2262 int Surface::attachBuffer(ANativeWindowBuffer* buffer)
2263 {
2264 ATRACE_CALL();
2265 sp<GraphicBuffer> graphicBuffer(static_cast<GraphicBuffer*>(buffer));
2266
2267 ALOGV("Surface::attachBuffer bufferId=%" PRIu64, graphicBuffer->getId());
2268
2269 Mutex::Autolock lock(mMutex);
2270 if (mReportRemovedBuffers) {
2271 mRemovedBuffers.clear();
2272 }
2273
2274 uint32_t priorGeneration = graphicBuffer->mGenerationNumber;
2275 graphicBuffer->mGenerationNumber = mGenerationNumber;
2276 int32_t attachedSlot = -1;
2277 status_t result = mGraphicBufferProducer->attachBuffer(&attachedSlot, graphicBuffer);
2278 if (result != NO_ERROR) {
2279 ALOGE("attachBuffer: IGraphicBufferProducer call failed (%d)", result);
2280 graphicBuffer->mGenerationNumber = priorGeneration;
2281 return result;
2282 }
2283 if (mReportRemovedBuffers && (mSlots[attachedSlot].buffer != nullptr)) {
2284 mRemovedBuffers.push_back(mSlots[attachedSlot].buffer);
2285 }
2286 mSlots[attachedSlot].buffer = graphicBuffer;
2287 mDequeuedSlots.insert(attachedSlot);
2288
2289 return NO_ERROR;
2290 }
2291
2292 int Surface::setUsage(uint64_t reqUsage)
2293 {
2294 ALOGV("Surface::setUsage");
2295 Mutex::Autolock lock(mMutex);
2296 if (reqUsage != mReqUsage) {
2297 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2298 }
2299 mReqUsage = reqUsage;
2300 return OK;
2301 }
2302
2303 int Surface::setCrop(Rect const* rect)
2304 {
2305 ATRACE_CALL();
2306
2307 Rect realRect(Rect::EMPTY_RECT);
2308 if (rect == nullptr || rect->isEmpty()) {
2309 realRect.clear();
2310 } else {
2311 realRect = *rect;
2312 }
2313
2314 ALOGV("Surface::setCrop rect=[%d %d %d %d]",
2315 realRect.left, realRect.top, realRect.right, realRect.bottom);
2316
2317 Mutex::Autolock lock(mMutex);
2318 mCrop = realRect;
2319 return NO_ERROR;
2320 }
2321
2322 int Surface::setBufferCount(int bufferCount)
2323 {
2324 ATRACE_CALL();
2325 ALOGV("Surface::setBufferCount");
2326 Mutex::Autolock lock(mMutex);
2327
2328 status_t err = NO_ERROR;
2329 if (bufferCount == 0) {
2330 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(1);
2331 } else {
2332 int minUndequeuedBuffers = 0;
2333 err = mGraphicBufferProducer->query(
2334 NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
2335 if (err == NO_ERROR) {
2336 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(
2337 bufferCount - minUndequeuedBuffers);
2338 }
2339 }
2340
2341 ALOGE_IF(err, "IGraphicBufferProducer::setBufferCount(%d) returned %s",
2342 bufferCount, strerror(-err));
2343
2344 return err;
2345 }
2346
2347 int Surface::setMaxDequeuedBufferCount(int maxDequeuedBuffers) {
2348 ATRACE_CALL();
2349 ALOGV("Surface::setMaxDequeuedBufferCount");
2350 Mutex::Autolock lock(mMutex);
2351
2352 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
2353 if (maxDequeuedBuffers > BufferQueueDefs::NUM_BUFFER_SLOTS && !mIsSlotExpansionAllowed) {
2354 return BAD_VALUE;
2355 }
2356
2357 int minUndequeuedBuffers = 0;
2358 status_t err = mGraphicBufferProducer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
2359 &minUndequeuedBuffers);
2360 if (err != OK) {
2361 ALOGE("IGraphicBufferProducer::query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS) returned %s",
2362 strerror(-err));
2363 return err;
2364 }
2365
2366 if (maxDequeuedBuffers > (int)mSlots.size()) {
2367 int newSlotCount = minUndequeuedBuffers + maxDequeuedBuffers;
2368 err = mGraphicBufferProducer->extendSlotCount(newSlotCount);
2369 if (err != OK) {
2370 ALOGE("IGraphicBufferProducer::extendSlotCount(%d) returned %s", newSlotCount,
2371 strerror(-err));
2372 return err;
2373 }
2374
2375 mSlots.resize(newSlotCount);
2376 }
2377 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(maxDequeuedBuffers);
2378 #else
2379 status_t err = mGraphicBufferProducer->setMaxDequeuedBufferCount(maxDequeuedBuffers);
2380 #endif
2381 ALOGE_IF(err, "IGraphicBufferProducer::setMaxDequeuedBufferCount(%d) "
2382 "returned %s", maxDequeuedBuffers, strerror(-err));
2383
2384 return err;
2385 }
2386
2387 int Surface::setAsyncMode(bool async) {
2388 ATRACE_CALL();
2389 ALOGV("Surface::setAsyncMode");
2390 Mutex::Autolock lock(mMutex);
2391
2392 status_t err = mGraphicBufferProducer->setAsyncMode(async);
2393 ALOGE_IF(err, "IGraphicBufferProducer::setAsyncMode(%d) returned %s",
2394 async, strerror(-err));
2395
2396 return err;
2397 }
2398
2399 int Surface::setSharedBufferMode(bool sharedBufferMode) {
2400 ATRACE_CALL();
2401 ALOGV("Surface::setSharedBufferMode (%d)", sharedBufferMode);
2402 Mutex::Autolock lock(mMutex);
2403
2404 status_t err = mGraphicBufferProducer->setSharedBufferMode(
2405 sharedBufferMode);
2406 if (err == NO_ERROR) {
2407 mSharedBufferMode = sharedBufferMode;
2408 }
2409 ALOGE_IF(err, "IGraphicBufferProducer::setSharedBufferMode(%d) returned"
2410 "%s", sharedBufferMode, strerror(-err));
2411
2412 return err;
2413 }
2414
2415 int Surface::setAutoRefresh(bool autoRefresh) {
2416 ATRACE_CALL();
2417 ALOGV("Surface::setAutoRefresh (%d)", autoRefresh);
2418 Mutex::Autolock lock(mMutex);
2419
2420 status_t err = mGraphicBufferProducer->setAutoRefresh(autoRefresh);
2421 if (err == NO_ERROR) {
2422 mAutoRefresh = autoRefresh;
2423 }
2424 ALOGE_IF(err, "IGraphicBufferProducer::setAutoRefresh(%d) returned %s",
2425 autoRefresh, strerror(-err));
2426 return err;
2427 }
2428
2429 int Surface::setBuffersDimensions(uint32_t width, uint32_t height)
2430 {
2431 ATRACE_CALL();
2432 ALOGV("Surface::setBuffersDimensions");
2433
2434 if ((width && !height) || (!width && height))
2435 return BAD_VALUE;
2436
2437 Mutex::Autolock lock(mMutex);
2438 if (width != mReqWidth || height != mReqHeight) {
2439 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2440 }
2441 mReqWidth = width;
2442 mReqHeight = height;
2443 return NO_ERROR;
2444 }
2445
2446 int Surface::setBuffersUserDimensions(uint32_t width, uint32_t height)
2447 {
2448 ATRACE_CALL();
2449 ALOGV("Surface::setBuffersUserDimensions");
2450
2451 if ((width && !height) || (!width && height))
2452 return BAD_VALUE;
2453
2454 Mutex::Autolock lock(mMutex);
2455 if (width != mUserWidth || height != mUserHeight) {
2456 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2457 }
2458 mUserWidth = width;
2459 mUserHeight = height;
2460 return NO_ERROR;
2461 }
2462
2463 int Surface::setBuffersFormat(PixelFormat format)
2464 {
2465 ALOGV("Surface::setBuffersFormat");
2466
2467 Mutex::Autolock lock(mMutex);
2468 if (format != mReqFormat) {
2469 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2470 }
2471 mReqFormat = format;
2472 return NO_ERROR;
2473 }
2474
2475 int Surface::setScalingMode(int mode)
2476 {
2477 ATRACE_CALL();
2478 ALOGV("Surface::setScalingMode(%d)", mode);
2479
2480 switch (mode) {
2481 case NATIVE_WINDOW_SCALING_MODE_FREEZE:
2482 case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
2483 case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
2484 case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
2485 break;
2486 default:
2487 ALOGE("unknown scaling mode: %d", mode);
2488 return BAD_VALUE;
2489 }
2490
2491 Mutex::Autolock lock(mMutex);
2492 mScalingMode = mode;
2493 return NO_ERROR;
2494 }
2495
2496 int Surface::setBuffersTransform(uint32_t transform)
2497 {
2498 ATRACE_CALL();
2499 ALOGV("Surface::setBuffersTransform");
2500 Mutex::Autolock lock(mMutex);
2501 // Ensure NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY is sticky. If the client sets the flag, do not
2502 // override it until the surface is disconnected. This is a temporary workaround for camera
2503 // until they switch to using Buffer State Layers. Currently if client sets the buffer transform
2504 // it may be overriden by the buffer producer when the producer sets the buffer transform.
2505 if (transformToDisplayInverse()) {
2506 transform |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
2507 }
2508 mTransform = transform;
2509 return NO_ERROR;
2510 }
2511
2512 int Surface::setBuffersStickyTransform(uint32_t transform)
2513 {
2514 ATRACE_CALL();
2515 ALOGV("Surface::setBuffersStickyTransform");
2516 Mutex::Autolock lock(mMutex);
2517 mStickyTransform = transform;
2518 return NO_ERROR;
2519 }
2520
2521 int Surface::setBuffersTimestamp(int64_t timestamp)
2522 {
2523 ALOGV("Surface::setBuffersTimestamp");
2524 Mutex::Autolock lock(mMutex);
2525 mTimestamp = timestamp;
2526 return NO_ERROR;
2527 }
2528
2529 int Surface::setBuffersDataSpace(Dataspace dataSpace)
2530 {
2531 ALOGV("Surface::setBuffersDataSpace");
2532 Mutex::Autolock lock(mMutex);
2533 mDataSpace = dataSpace;
2534 return NO_ERROR;
2535 }
2536
2537 int Surface::setBuffersSmpte2086Metadata(const android_smpte2086_metadata* metadata) {
2538 ALOGV("Surface::setBuffersSmpte2086Metadata");
2539 Mutex::Autolock lock(mMutex);
2540 mHdrMetadataIsSet |= HdrMetadata::SMPTE2086;
2541 if (metadata) {
2542 mHdrMetadata.smpte2086 = *metadata;
2543 mHdrMetadata.validTypes |= HdrMetadata::SMPTE2086;
2544 } else {
2545 mHdrMetadata.validTypes &= ~HdrMetadata::SMPTE2086;
2546 }
2547 return NO_ERROR;
2548 }
2549
2550 int Surface::setBuffersCta8613Metadata(const android_cta861_3_metadata* metadata) {
2551 ALOGV("Surface::setBuffersCta8613Metadata");
2552 Mutex::Autolock lock(mMutex);
2553 mHdrMetadataIsSet |= HdrMetadata::CTA861_3;
2554 if (metadata) {
2555 mHdrMetadata.cta8613 = *metadata;
2556 mHdrMetadata.validTypes |= HdrMetadata::CTA861_3;
2557 } else {
2558 mHdrMetadata.validTypes &= ~HdrMetadata::CTA861_3;
2559 }
2560 return NO_ERROR;
2561 }
2562
2563 int Surface::setBuffersHdr10PlusMetadata(const size_t size, const uint8_t* metadata) {
2564 ALOGV("Surface::setBuffersBlobMetadata");
2565 Mutex::Autolock lock(mMutex);
2566 mHdrMetadataIsSet |= HdrMetadata::HDR10PLUS;
2567 if (size > 0) {
2568 mHdrMetadata.hdr10plus.assign(metadata, metadata + size);
2569 mHdrMetadata.validTypes |= HdrMetadata::HDR10PLUS;
2570 } else {
2571 mHdrMetadata.validTypes &= ~HdrMetadata::HDR10PLUS;
2572 mHdrMetadata.hdr10plus.clear();
2573 }
2574 return NO_ERROR;
2575 }
2576
2577 Dataspace Surface::getBuffersDataSpace() {
2578 ALOGV("Surface::getBuffersDataSpace");
2579 Mutex::Autolock lock(mMutex);
2580 return mDataSpace;
2581 }
2582
2583 void Surface::freeAllBuffers() {
2584 if (!mDequeuedSlots.empty()) {
2585 ALOGE("%s: %zu buffers were freed while being dequeued!",
2586 __FUNCTION__, mDequeuedSlots.size());
2587 }
2588 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
2589 for (int i = 0; i < (int)mSlots.size(); i++) {
2590 #else
2591 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
2592 #endif
2593 mSlots[i].buffer = nullptr;
2594 }
2595 }
2596
2597 status_t Surface::getAndFlushBuffersFromSlots(const std::vector<int32_t>& slots,
2598 std::vector<sp<GraphicBuffer>>* outBuffers) {
2599 ALOGV("Surface::getAndFlushBuffersFromSlots");
2600 for (int32_t i : slots) {
2601 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
2602 if (i < 0 || i >= (int)mSlots.size()) {
2603 #else
2604 if (i < 0 || i >= NUM_BUFFER_SLOTS) {
2605 #endif
2606 ALOGE("%s: Invalid slotIndex: %d", __FUNCTION__, i);
2607 return BAD_VALUE;
2608 }
2609 }
2610
2611 Mutex::Autolock lock(mMutex);
2612 for (int32_t i : slots) {
2613 if (mSlots[i].buffer == nullptr) {
2614 ALOGW("%s: Discarded slot %d doesn't contain buffer!", __FUNCTION__, i);
2615 continue;
2616 }
2617 // Don't flush currently dequeued buffers
2618 if (mDequeuedSlots.count(i) > 0) {
2619 continue;
2620 }
2621 outBuffers->push_back(mSlots[i].buffer);
2622 mSlots[i].buffer = nullptr;
2623 }
2624 return OK;
2625 }
2626
2627 void Surface::setSurfaceDamage(android_native_rect_t* rects, size_t numRects) {
2628 ATRACE_CALL();
2629 ALOGV("Surface::setSurfaceDamage");
2630 Mutex::Autolock lock(mMutex);
2631
2632 if (mConnectedToCpu || numRects == 0) {
2633 mDirtyRegion = Region::INVALID_REGION;
2634 return;
2635 }
2636
2637 mDirtyRegion.clear();
2638 for (size_t r = 0; r < numRects; ++r) {
2639 // We intentionally flip top and bottom here, since because they're
2640 // specified with a bottom-left origin, top > bottom, which fails
2641 // validation in the Region class. We will fix this up when we flip to a
2642 // top-left origin in queueBuffer.
2643 Rect rect(rects[r].left, rects[r].bottom, rects[r].right, rects[r].top);
2644 mDirtyRegion.orSelf(rect);
2645 }
2646 }
2647
2648 // ----------------------------------------------------------------------
2649 // the lock/unlock APIs must be used from the same thread
2650
2651 static status_t copyBlt(
2652 const sp<GraphicBuffer>& dst,
2653 const sp<GraphicBuffer>& src,
2654 const Region& reg,
2655 int *dstFenceFd)
2656 {
2657 if (dst->getId() == src->getId())
2658 return OK;
2659
2660 // src and dst with, height and format must be identical. no verification
2661 // is done here.
2662 status_t err;
2663 uint8_t* src_bits = nullptr;
2664 err = src->lock(GRALLOC_USAGE_SW_READ_OFTEN, reg.bounds(),
2665 reinterpret_cast<void**>(&src_bits));
2666 ALOGE_IF(err, "error locking src buffer %s", strerror(-err));
2667
2668 uint8_t* dst_bits = nullptr;
2669 err = dst->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, reg.bounds(),
2670 reinterpret_cast<void**>(&dst_bits), *dstFenceFd);
2671 ALOGE_IF(err, "error locking dst buffer %s", strerror(-err));
2672 *dstFenceFd = -1;
2673
2674 Region::const_iterator head(reg.begin());
2675 Region::const_iterator tail(reg.end());
2676 if (head != tail && src_bits && dst_bits) {
2677 const size_t bpp = bytesPerPixel(src->format);
2678 const size_t dbpr = static_cast<uint32_t>(dst->stride) * bpp;
2679 const size_t sbpr = static_cast<uint32_t>(src->stride) * bpp;
2680
2681 while (head != tail) {
2682 const Rect& r(*head++);
2683 int32_t h = r.height();
2684 if (h <= 0) continue;
2685 size_t size = static_cast<uint32_t>(r.width()) * bpp;
2686 uint8_t const * s = src_bits +
2687 static_cast<uint32_t>(r.left + src->stride * r.top) * bpp;
2688 uint8_t * d = dst_bits +
2689 static_cast<uint32_t>(r.left + dst->stride * r.top) * bpp;
2690 if (dbpr==sbpr && size==sbpr) {
2691 size *= static_cast<size_t>(h);
2692 h = 1;
2693 }
2694 do {
2695 memcpy(d, s, size);
2696 d += dbpr;
2697 s += sbpr;
2698 } while (--h > 0);
2699 }
2700 }
2701
2702 if (src_bits)
2703 src->unlock();
2704
2705 if (dst_bits)
2706 dst->unlockAsync(dstFenceFd);
2707
2708 return err;
2709 }
2710
2711 // ----------------------------------------------------------------------------
2712
2713 status_t Surface::lock(
2714 ANativeWindow_Buffer* outBuffer, ARect* inOutDirtyBounds)
2715 {
2716 if (mLockedBuffer != nullptr) {
2717 ALOGE("Surface::lock failed, already locked");
2718 return INVALID_OPERATION;
2719 }
2720
2721 if (!mConnectedToCpu) {
2722 int err = Surface::connect(NATIVE_WINDOW_API_CPU);
2723 if (err) {
2724 return err;
2725 }
2726 // we're intending to do software rendering from this point
2727 setUsage(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN);
2728 }
2729
2730 ANativeWindowBuffer* out;
2731 int fenceFd = -1;
2732 status_t err = dequeueBuffer(&out, &fenceFd);
2733 ALOGE_IF(err, "dequeueBuffer failed (%s)", strerror(-err));
2734 if (err == NO_ERROR) {
2735 sp<GraphicBuffer> backBuffer(GraphicBuffer::getSelf(out));
2736 const Rect bounds(backBuffer->width, backBuffer->height);
2737
2738 Region newDirtyRegion;
2739 if (inOutDirtyBounds) {
2740 newDirtyRegion.set(static_cast<Rect const&>(*inOutDirtyBounds));
2741 newDirtyRegion.andSelf(bounds);
2742 } else {
2743 newDirtyRegion.set(bounds);
2744 }
2745
2746 // figure out if we can copy the frontbuffer back
2747 const sp<GraphicBuffer>& frontBuffer(mPostedBuffer);
2748 const bool canCopyBack = (frontBuffer != nullptr &&
2749 backBuffer->width == frontBuffer->width &&
2750 backBuffer->height == frontBuffer->height &&
2751 backBuffer->format == frontBuffer->format);
2752
2753 if (canCopyBack) {
2754 // copy the area that is invalid and not repainted this round
2755 const Region copyback(mDirtyRegion.subtract(newDirtyRegion));
2756 if (!copyback.isEmpty()) {
2757 copyBlt(backBuffer, frontBuffer, copyback, &fenceFd);
2758 }
2759 } else {
2760 // if we can't copy-back anything, modify the user's dirty
2761 // region to make sure they redraw the whole buffer
2762 newDirtyRegion.set(bounds);
2763 mDirtyRegion.clear();
2764 Mutex::Autolock lock(mMutex);
2765 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_UNLIMITED_SLOTS)
2766 for (int i = 0; i < (int)mSlots.size(); i++) {
2767 #else
2768 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
2769 #endif
2770 mSlots[i].dirtyRegion.clear();
2771 }
2772 }
2773
2774
2775 { // scope for the lock
2776 Mutex::Autolock lock(mMutex);
2777 int backBufferSlot(getSlotFromBufferLocked(backBuffer.get()));
2778 if (backBufferSlot >= 0) {
2779 Region& dirtyRegion(mSlots[backBufferSlot].dirtyRegion);
2780 mDirtyRegion.subtract(dirtyRegion);
2781 dirtyRegion = newDirtyRegion;
2782 }
2783 }
2784
2785 mDirtyRegion.orSelf(newDirtyRegion);
2786 if (inOutDirtyBounds) {
2787 *inOutDirtyBounds = newDirtyRegion.getBounds();
2788 }
2789
2790 void* vaddr;
2791 status_t res = backBuffer->lockAsync(
2792 GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
2793 newDirtyRegion.bounds(), &vaddr, fenceFd);
2794
2795 ALOGW_IF(res, "failed locking buffer (handle = %p)",
2796 backBuffer->handle);
2797
2798 if (res != 0) {
2799 err = INVALID_OPERATION;
2800 } else {
2801 mLockedBuffer = backBuffer;
2802 outBuffer->width = backBuffer->width;
2803 outBuffer->height = backBuffer->height;
2804 outBuffer->stride = backBuffer->stride;
2805 outBuffer->format = backBuffer->format;
2806 outBuffer->bits = vaddr;
2807 }
2808 }
2809 return err;
2810 }
2811
2812 status_t Surface::unlockAndPost()
2813 {
2814 if (mLockedBuffer == nullptr) {
2815 ALOGE("Surface::unlockAndPost failed, no locked buffer");
2816 return INVALID_OPERATION;
2817 }
2818
2819 int fd = -1;
2820 status_t err = mLockedBuffer->unlockAsync(&fd);
2821 ALOGE_IF(err, "failed unlocking buffer (%p)", mLockedBuffer->handle);
2822
2823 err = queueBuffer(mLockedBuffer.get(), fd);
2824 ALOGE_IF(err, "queueBuffer (handle=%p) failed (%s)",
2825 mLockedBuffer->handle, strerror(-err));
2826
2827 mPostedBuffer = mLockedBuffer;
2828 mLockedBuffer = nullptr;
2829 return err;
2830 }
2831
2832 bool Surface::waitForNextFrame(uint64_t lastFrame, nsecs_t timeout) {
2833 Mutex::Autolock lock(mMutex);
2834 if (mLastFrameNumber > lastFrame) {
2835 return true;
2836 }
2837 return mQueueBufferCondition.waitRelative(mMutex, timeout) == OK;
2838 }
2839
2840 status_t Surface::getUniqueId(uint64_t* outId) const {
2841 Mutex::Autolock lock(mMutex);
2842 return mGraphicBufferProducer->getUniqueId(outId);
2843 }
2844
2845 int Surface::getConsumerUsage(uint64_t* outUsage) const {
2846 Mutex::Autolock lock(mMutex);
2847 return mGraphicBufferProducer->getConsumerUsage(outUsage);
2848 }
2849
2850 status_t Surface::getAndFlushRemovedBuffers(std::vector<sp<GraphicBuffer>>* out) {
2851 if (out == nullptr) {
2852 ALOGE("%s: out must not be null!", __FUNCTION__);
2853 return BAD_VALUE;
2854 }
2855
2856 Mutex::Autolock lock(mMutex);
2857 *out = mRemovedBuffers;
2858 mRemovedBuffers.clear();
2859 return OK;
2860 }
2861
2862 status_t Surface::attachAndQueueBufferWithDataspace(Surface* surface, sp<GraphicBuffer> buffer,
2863 Dataspace dataspace) {
2864 if (buffer == nullptr) {
2865 return BAD_VALUE;
2866 }
2867 int err = static_cast<ANativeWindow*>(surface)->perform(surface, NATIVE_WINDOW_API_CONNECT,
2868 NATIVE_WINDOW_API_CPU);
2869 if (err != OK) {
2870 return err;
2871 }
2872 ui::Dataspace tmpDataspace = surface->getBuffersDataSpace();
2873 err = surface->setBuffersDataSpace(dataspace);
2874 if (err != OK) {
2875 return err;
2876 }
2877 err = surface->attachBuffer(buffer->getNativeBuffer());
2878 if (err != OK) {
2879 return err;
2880 }
2881 err = static_cast<ANativeWindow*>(surface)->queueBuffer(surface, buffer->getNativeBuffer(), -1);
2882 if (err != OK) {
2883 return err;
2884 }
2885 err = surface->setBuffersDataSpace(tmpDataspace);
2886 if (err != OK) {
2887 return err;
2888 }
2889 err = surface->disconnect(NATIVE_WINDOW_API_CPU);
2890 return err;
2891 }
2892
2893 int Surface::setAutoPrerotation(bool autoPrerotation) {
2894 ATRACE_CALL();
2895 ALOGV("Surface::setAutoPrerotation (%d)", autoPrerotation);
2896 Mutex::Autolock lock(mMutex);
2897
2898 if (mAutoPrerotation == autoPrerotation) {
2899 return OK;
2900 }
2901
2902 status_t err = mGraphicBufferProducer->setAutoPrerotation(autoPrerotation);
2903 if (err == NO_ERROR) {
2904 mAutoPrerotation = autoPrerotation;
2905 }
2906 ALOGE_IF(err, "IGraphicBufferProducer::setAutoPrerotation(%d) returned %s", autoPrerotation,
2907 strerror(-err));
2908 return err;
2909 }
2910
2911 void Surface::ProducerListenerProxy::onBuffersDiscarded(const std::vector<int32_t>& slots) {
2912 ATRACE_CALL();
2913 sp<Surface> parent = mParent.promote();
2914 if (parent == nullptr) {
2915 return;
2916 }
2917
2918 std::vector<sp<GraphicBuffer>> discardedBufs;
2919 status_t res = parent->getAndFlushBuffersFromSlots(slots, &discardedBufs);
2920 if (res != OK) {
2921 ALOGE("%s: Failed to get buffers from slots: %s(%d)", __FUNCTION__,
2922 strerror(-res), res);
2923 return;
2924 }
2925
2926 mSurfaceListener->onBuffersDiscarded(discardedBufs);
2927 }
2928
2929 status_t Surface::setFrameRate(float frameRate, int8_t compatibility,
2930 int8_t changeFrameRateStrategy) {
2931 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_SETFRAMERATE)
2932 if (flags::bq_setframerate()) {
2933 status_t err = mGraphicBufferProducer->setFrameRate(frameRate, compatibility,
2934 changeFrameRateStrategy);
2935 ALOGE_IF(err, "IGraphicBufferProducer::setFrameRate(%.2f) returned %s", frameRate,
2936 strerror(-err));
2937 return err;
2938 }
2939 #else
2940 static_cast<void>(frameRate);
2941 static_cast<void>(compatibility);
2942 static_cast<void>(changeFrameRateStrategy);
2943 #endif
2944
2945 ALOGI("Surface::setFrameRate is deprecated, setFrameRate hint is dropped as destination is not "
2946 "SurfaceFlinger");
2947 // ISurfaceComposer no longer supports setFrameRate, we will return NO_ERROR when the api is
2948 // called to avoid apps crashing, as BAD_VALUE can generate fatal exception in apps.
2949 return NO_ERROR;
2950 }
2951
2952 status_t Surface::setFrameTimelineInfo(uint64_t /*frameNumber*/,
2953 const FrameTimelineInfo& /*frameTimelineInfo*/) {
2954 // ISurfaceComposer no longer supports setFrameTimelineInfo
2955 return BAD_VALUE;
2956 }
2957
2958 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_EXTENDEDALLOCATE)
2959 status_t Surface::setAdditionalOptions(const std::vector<gui::AdditionalOptions>& options) {
2960 if (!GraphicBufferAllocator::get().supportsAdditionalOptions()) {
2961 return INVALID_OPERATION;
2962 }
2963
2964 Mutex::Autolock lock(mMutex);
2965 return mGraphicBufferProducer->setAdditionalOptions(options);
2966 }
2967 #endif
2968
2969 sp<IBinder> Surface::getSurfaceControlHandle() const {
2970 Mutex::Autolock lock(mMutex);
2971 return mSurfaceControlHandle;
2972 }
2973
2974 void Surface::destroy() {
2975 Mutex::Autolock lock(mMutex);
2976 mSurfaceControlHandle = nullptr;
2977 }
2978
2979 const char* Surface::getDebugName() {
2980 std::unique_lock lock{mNameMutex};
2981 if (mName.empty()) {
2982 mName = getConsumerName();
2983 }
2984 return mName.c_str();
2985 }
2986
2987 }; // namespace android
2988