1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Surface"
18 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
19 //#define LOG_NDEBUG 0
20
21 #include <gui/Surface.h>
22
23 #include <condition_variable>
24 #include <deque>
25 #include <mutex>
26 #include <thread>
27
28 #include <inttypes.h>
29
30 #include <android/native_window.h>
31
32 #include <utils/Log.h>
33 #include <utils/Trace.h>
34 #include <utils/NativeHandle.h>
35
36 #include <ui/DisplayStatInfo.h>
37 #include <ui/DynamicDisplayInfo.h>
38 #include <ui/Fence.h>
39 #include <ui/GraphicBuffer.h>
40 #include <ui/Region.h>
41
42 #include <gui/BufferItem.h>
43 #include <gui/IProducerListener.h>
44
45 #include <gui/ISurfaceComposer.h>
46 #include <gui/LayerState.h>
47 #include <private/gui/ComposerService.h>
48
49 namespace android {
50
51 using ui::Dataspace;
52
53 namespace {
54
isInterceptorRegistrationOp(int op)55 bool isInterceptorRegistrationOp(int op) {
56 return op == NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR ||
57 op == NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR ||
58 op == NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR ||
59 op == NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR ||
60 op == NATIVE_WINDOW_SET_QUERY_INTERCEPTOR;
61 }
62
63 } // namespace
64
Surface(const sp<IGraphicBufferProducer> & bufferProducer,bool controlledByApp,const sp<IBinder> & surfaceControlHandle)65 Surface::Surface(const sp<IGraphicBufferProducer>& bufferProducer, bool controlledByApp,
66 const sp<IBinder>& surfaceControlHandle)
67 : mGraphicBufferProducer(bufferProducer),
68 mCrop(Rect::EMPTY_RECT),
69 mBufferAge(0),
70 mGenerationNumber(0),
71 mSharedBufferMode(false),
72 mAutoRefresh(false),
73 mAutoPrerotation(false),
74 mSharedBufferSlot(BufferItem::INVALID_BUFFER_SLOT),
75 mSharedBufferHasBeenQueued(false),
76 mQueriedSupportedTimestamps(false),
77 mFrameTimestampsSupportsPresent(false),
78 mEnableFrameTimestamps(false),
79 mFrameEventHistory(std::make_unique<ProducerFrameEventHistory>()) {
80 // Initialize the ANativeWindow function pointers.
81 ANativeWindow::setSwapInterval = hook_setSwapInterval;
82 ANativeWindow::dequeueBuffer = hook_dequeueBuffer;
83 ANativeWindow::cancelBuffer = hook_cancelBuffer;
84 ANativeWindow::queueBuffer = hook_queueBuffer;
85 ANativeWindow::query = hook_query;
86 ANativeWindow::perform = hook_perform;
87
88 ANativeWindow::dequeueBuffer_DEPRECATED = hook_dequeueBuffer_DEPRECATED;
89 ANativeWindow::cancelBuffer_DEPRECATED = hook_cancelBuffer_DEPRECATED;
90 ANativeWindow::lockBuffer_DEPRECATED = hook_lockBuffer_DEPRECATED;
91 ANativeWindow::queueBuffer_DEPRECATED = hook_queueBuffer_DEPRECATED;
92
93 const_cast<int&>(ANativeWindow::minSwapInterval) = 0;
94 const_cast<int&>(ANativeWindow::maxSwapInterval) = 1;
95
96 mReqWidth = 0;
97 mReqHeight = 0;
98 mReqFormat = 0;
99 mReqUsage = 0;
100 mTimestamp = NATIVE_WINDOW_TIMESTAMP_AUTO;
101 mDataSpace = Dataspace::UNKNOWN;
102 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
103 mTransform = 0;
104 mStickyTransform = 0;
105 mDefaultWidth = 0;
106 mDefaultHeight = 0;
107 mUserWidth = 0;
108 mUserHeight = 0;
109 mTransformHint = 0;
110 mConsumerRunningBehind = false;
111 mConnectedToCpu = false;
112 mProducerControlledByApp = controlledByApp;
113 mSwapIntervalZero = false;
114 mMaxBufferCount = NUM_BUFFER_SLOTS;
115 mSurfaceControlHandle = surfaceControlHandle;
116 }
117
~Surface()118 Surface::~Surface() {
119 if (mConnectedToCpu) {
120 Surface::disconnect(NATIVE_WINDOW_API_CPU);
121 }
122 }
123
composerService() const124 sp<ISurfaceComposer> Surface::composerService() const {
125 return ComposerService::getComposerService();
126 }
127
now() const128 nsecs_t Surface::now() const {
129 return systemTime();
130 }
131
getIGraphicBufferProducer() const132 sp<IGraphicBufferProducer> Surface::getIGraphicBufferProducer() const {
133 return mGraphicBufferProducer;
134 }
135
setSidebandStream(const sp<NativeHandle> & stream)136 void Surface::setSidebandStream(const sp<NativeHandle>& stream) {
137 mGraphicBufferProducer->setSidebandStream(stream);
138 }
139
allocateBuffers()140 void Surface::allocateBuffers() {
141 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
142 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
143 mGraphicBufferProducer->allocateBuffers(reqWidth, reqHeight,
144 mReqFormat, mReqUsage);
145 }
146
setGenerationNumber(uint32_t generation)147 status_t Surface::setGenerationNumber(uint32_t generation) {
148 status_t result = mGraphicBufferProducer->setGenerationNumber(generation);
149 if (result == NO_ERROR) {
150 mGenerationNumber = generation;
151 }
152 return result;
153 }
154
getNextFrameNumber() const155 uint64_t Surface::getNextFrameNumber() const {
156 Mutex::Autolock lock(mMutex);
157 return mNextFrameNumber;
158 }
159
getConsumerName() const160 String8 Surface::getConsumerName() const {
161 return mGraphicBufferProducer->getConsumerName();
162 }
163
setDequeueTimeout(nsecs_t timeout)164 status_t Surface::setDequeueTimeout(nsecs_t timeout) {
165 return mGraphicBufferProducer->setDequeueTimeout(timeout);
166 }
167
getLastQueuedBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence,float outTransformMatrix[16])168 status_t Surface::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
169 sp<Fence>* outFence, float outTransformMatrix[16]) {
170 return mGraphicBufferProducer->getLastQueuedBuffer(outBuffer, outFence,
171 outTransformMatrix);
172 }
173
getDisplayRefreshCycleDuration(nsecs_t * outRefreshDuration)174 status_t Surface::getDisplayRefreshCycleDuration(nsecs_t* outRefreshDuration) {
175 ATRACE_CALL();
176
177 DisplayStatInfo stats;
178 status_t result = composerService()->getDisplayStats(nullptr, &stats);
179 if (result != NO_ERROR) {
180 return result;
181 }
182
183 *outRefreshDuration = stats.vsyncPeriod;
184
185 return NO_ERROR;
186 }
187
enableFrameTimestamps(bool enable)188 void Surface::enableFrameTimestamps(bool enable) {
189 Mutex::Autolock lock(mMutex);
190 // If going from disabled to enabled, get the initial values for
191 // compositor and display timing.
192 if (!mEnableFrameTimestamps && enable) {
193 FrameEventHistoryDelta delta;
194 mGraphicBufferProducer->getFrameTimestamps(&delta);
195 mFrameEventHistory->applyDelta(delta);
196 }
197 mEnableFrameTimestamps = enable;
198 }
199
getCompositorTiming(nsecs_t * compositeDeadline,nsecs_t * compositeInterval,nsecs_t * compositeToPresentLatency)200 status_t Surface::getCompositorTiming(
201 nsecs_t* compositeDeadline, nsecs_t* compositeInterval,
202 nsecs_t* compositeToPresentLatency) {
203 Mutex::Autolock lock(mMutex);
204 if (!mEnableFrameTimestamps) {
205 return INVALID_OPERATION;
206 }
207
208 if (compositeDeadline != nullptr) {
209 *compositeDeadline =
210 mFrameEventHistory->getNextCompositeDeadline(now());
211 }
212 if (compositeInterval != nullptr) {
213 *compositeInterval = mFrameEventHistory->getCompositeInterval();
214 }
215 if (compositeToPresentLatency != nullptr) {
216 *compositeToPresentLatency =
217 mFrameEventHistory->getCompositeToPresentLatency();
218 }
219 return NO_ERROR;
220 }
221
checkConsumerForUpdates(const FrameEvents * e,const uint64_t lastFrameNumber,const nsecs_t * outLatchTime,const nsecs_t * outFirstRefreshStartTime,const nsecs_t * outLastRefreshStartTime,const nsecs_t * outGpuCompositionDoneTime,const nsecs_t * outDisplayPresentTime,const nsecs_t * outDequeueReadyTime,const nsecs_t * outReleaseTime)222 static bool checkConsumerForUpdates(
223 const FrameEvents* e, const uint64_t lastFrameNumber,
224 const nsecs_t* outLatchTime,
225 const nsecs_t* outFirstRefreshStartTime,
226 const nsecs_t* outLastRefreshStartTime,
227 const nsecs_t* outGpuCompositionDoneTime,
228 const nsecs_t* outDisplayPresentTime,
229 const nsecs_t* outDequeueReadyTime,
230 const nsecs_t* outReleaseTime) {
231 bool checkForLatch = (outLatchTime != nullptr) && !e->hasLatchInfo();
232 bool checkForFirstRefreshStart = (outFirstRefreshStartTime != nullptr) &&
233 !e->hasFirstRefreshStartInfo();
234 bool checkForGpuCompositionDone = (outGpuCompositionDoneTime != nullptr) &&
235 !e->hasGpuCompositionDoneInfo();
236 bool checkForDisplayPresent = (outDisplayPresentTime != nullptr) &&
237 !e->hasDisplayPresentInfo();
238
239 // LastRefreshStart, DequeueReady, and Release are never available for the
240 // last frame.
241 bool checkForLastRefreshStart = (outLastRefreshStartTime != nullptr) &&
242 !e->hasLastRefreshStartInfo() &&
243 (e->frameNumber != lastFrameNumber);
244 bool checkForDequeueReady = (outDequeueReadyTime != nullptr) &&
245 !e->hasDequeueReadyInfo() && (e->frameNumber != lastFrameNumber);
246 bool checkForRelease = (outReleaseTime != nullptr) &&
247 !e->hasReleaseInfo() && (e->frameNumber != lastFrameNumber);
248
249 // RequestedPresent and Acquire info are always available producer-side.
250 return checkForLatch || checkForFirstRefreshStart ||
251 checkForLastRefreshStart || checkForGpuCompositionDone ||
252 checkForDisplayPresent || checkForDequeueReady || checkForRelease;
253 }
254
getFrameTimestamp(nsecs_t * dst,const nsecs_t & src)255 static void getFrameTimestamp(nsecs_t *dst, const nsecs_t& src) {
256 if (dst != nullptr) {
257 // We always get valid timestamps for these eventually.
258 *dst = (src == FrameEvents::TIMESTAMP_PENDING) ?
259 NATIVE_WINDOW_TIMESTAMP_PENDING : src;
260 }
261 }
262
getFrameTimestampFence(nsecs_t * dst,const std::shared_ptr<FenceTime> & src,bool fenceShouldBeKnown)263 static void getFrameTimestampFence(nsecs_t *dst,
264 const std::shared_ptr<FenceTime>& src, bool fenceShouldBeKnown) {
265 if (dst != nullptr) {
266 if (!fenceShouldBeKnown) {
267 *dst = NATIVE_WINDOW_TIMESTAMP_PENDING;
268 return;
269 }
270
271 nsecs_t signalTime = src->getSignalTime();
272 *dst = (signalTime == Fence::SIGNAL_TIME_PENDING) ?
273 NATIVE_WINDOW_TIMESTAMP_PENDING :
274 (signalTime == Fence::SIGNAL_TIME_INVALID) ?
275 NATIVE_WINDOW_TIMESTAMP_INVALID :
276 signalTime;
277 }
278 }
279
getFrameTimestamps(uint64_t frameNumber,nsecs_t * outRequestedPresentTime,nsecs_t * outAcquireTime,nsecs_t * outLatchTime,nsecs_t * outFirstRefreshStartTime,nsecs_t * outLastRefreshStartTime,nsecs_t * outGpuCompositionDoneTime,nsecs_t * outDisplayPresentTime,nsecs_t * outDequeueReadyTime,nsecs_t * outReleaseTime)280 status_t Surface::getFrameTimestamps(uint64_t frameNumber,
281 nsecs_t* outRequestedPresentTime, nsecs_t* outAcquireTime,
282 nsecs_t* outLatchTime, nsecs_t* outFirstRefreshStartTime,
283 nsecs_t* outLastRefreshStartTime, nsecs_t* outGpuCompositionDoneTime,
284 nsecs_t* outDisplayPresentTime, nsecs_t* outDequeueReadyTime,
285 nsecs_t* outReleaseTime) {
286 ATRACE_CALL();
287
288 Mutex::Autolock lock(mMutex);
289
290 if (!mEnableFrameTimestamps) {
291 return INVALID_OPERATION;
292 }
293
294 // Verify the requested timestamps are supported.
295 querySupportedTimestampsLocked();
296 if (outDisplayPresentTime != nullptr && !mFrameTimestampsSupportsPresent) {
297 return BAD_VALUE;
298 }
299
300 FrameEvents* events = mFrameEventHistory->getFrame(frameNumber);
301 if (events == nullptr) {
302 // If the entry isn't available in the producer, it's definitely not
303 // available in the consumer.
304 return NAME_NOT_FOUND;
305 }
306
307 // Update our cache of events if the requested events are not available.
308 if (checkConsumerForUpdates(events, mLastFrameNumber,
309 outLatchTime, outFirstRefreshStartTime, outLastRefreshStartTime,
310 outGpuCompositionDoneTime, outDisplayPresentTime,
311 outDequeueReadyTime, outReleaseTime)) {
312 FrameEventHistoryDelta delta;
313 mGraphicBufferProducer->getFrameTimestamps(&delta);
314 mFrameEventHistory->applyDelta(delta);
315 events = mFrameEventHistory->getFrame(frameNumber);
316 }
317
318 if (events == nullptr) {
319 // The entry was available before the update, but was overwritten
320 // after the update. Make sure not to send the wrong frame's data.
321 return NAME_NOT_FOUND;
322 }
323
324 getFrameTimestamp(outRequestedPresentTime, events->requestedPresentTime);
325 getFrameTimestamp(outLatchTime, events->latchTime);
326 getFrameTimestamp(outFirstRefreshStartTime, events->firstRefreshStartTime);
327 getFrameTimestamp(outLastRefreshStartTime, events->lastRefreshStartTime);
328 getFrameTimestamp(outDequeueReadyTime, events->dequeueReadyTime);
329
330 getFrameTimestampFence(outAcquireTime, events->acquireFence,
331 events->hasAcquireInfo());
332 getFrameTimestampFence(outGpuCompositionDoneTime,
333 events->gpuCompositionDoneFence,
334 events->hasGpuCompositionDoneInfo());
335 getFrameTimestampFence(outDisplayPresentTime, events->displayPresentFence,
336 events->hasDisplayPresentInfo());
337 getFrameTimestampFence(outReleaseTime, events->releaseFence,
338 events->hasReleaseInfo());
339
340 return NO_ERROR;
341 }
342
getWideColorSupport(bool * supported)343 status_t Surface::getWideColorSupport(bool* supported) {
344 ATRACE_CALL();
345
346 const sp<IBinder> display = composerService()->getInternalDisplayToken();
347 if (display == nullptr) {
348 return NAME_NOT_FOUND;
349 }
350
351 *supported = false;
352 status_t error = composerService()->isWideColorDisplay(display, supported);
353 return error;
354 }
355
getHdrSupport(bool * supported)356 status_t Surface::getHdrSupport(bool* supported) {
357 ATRACE_CALL();
358
359 const sp<IBinder> display = composerService()->getInternalDisplayToken();
360 if (display == nullptr) {
361 return NAME_NOT_FOUND;
362 }
363
364 ui::DynamicDisplayInfo info;
365 if (status_t err = composerService()->getDynamicDisplayInfo(display, &info); err != NO_ERROR) {
366 return err;
367 }
368
369 *supported = !info.hdrCapabilities.getSupportedHdrTypes().empty();
370 return NO_ERROR;
371 }
372
hook_setSwapInterval(ANativeWindow * window,int interval)373 int Surface::hook_setSwapInterval(ANativeWindow* window, int interval) {
374 Surface* c = getSelf(window);
375 return c->setSwapInterval(interval);
376 }
377
hook_dequeueBuffer(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)378 int Surface::hook_dequeueBuffer(ANativeWindow* window,
379 ANativeWindowBuffer** buffer, int* fenceFd) {
380 Surface* c = getSelf(window);
381 {
382 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
383 if (c->mDequeueInterceptor != nullptr) {
384 auto interceptor = c->mDequeueInterceptor;
385 auto data = c->mDequeueInterceptorData;
386 return interceptor(window, Surface::dequeueBufferInternal, data, buffer, fenceFd);
387 }
388 }
389 return c->dequeueBuffer(buffer, fenceFd);
390 }
391
dequeueBufferInternal(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)392 int Surface::dequeueBufferInternal(ANativeWindow* window, ANativeWindowBuffer** buffer,
393 int* fenceFd) {
394 Surface* c = getSelf(window);
395 return c->dequeueBuffer(buffer, fenceFd);
396 }
397
hook_cancelBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)398 int Surface::hook_cancelBuffer(ANativeWindow* window,
399 ANativeWindowBuffer* buffer, int fenceFd) {
400 Surface* c = getSelf(window);
401 {
402 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
403 if (c->mCancelInterceptor != nullptr) {
404 auto interceptor = c->mCancelInterceptor;
405 auto data = c->mCancelInterceptorData;
406 return interceptor(window, Surface::cancelBufferInternal, data, buffer, fenceFd);
407 }
408 }
409 return c->cancelBuffer(buffer, fenceFd);
410 }
411
cancelBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)412 int Surface::cancelBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
413 Surface* c = getSelf(window);
414 return c->cancelBuffer(buffer, fenceFd);
415 }
416
hook_queueBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)417 int Surface::hook_queueBuffer(ANativeWindow* window,
418 ANativeWindowBuffer* buffer, int fenceFd) {
419 Surface* c = getSelf(window);
420 {
421 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
422 if (c->mQueueInterceptor != nullptr) {
423 auto interceptor = c->mQueueInterceptor;
424 auto data = c->mQueueInterceptorData;
425 return interceptor(window, Surface::queueBufferInternal, data, buffer, fenceFd);
426 }
427 }
428 return c->queueBuffer(buffer, fenceFd);
429 }
430
queueBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)431 int Surface::queueBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
432 Surface* c = getSelf(window);
433 return c->queueBuffer(buffer, fenceFd);
434 }
435
hook_dequeueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer ** buffer)436 int Surface::hook_dequeueBuffer_DEPRECATED(ANativeWindow* window,
437 ANativeWindowBuffer** buffer) {
438 Surface* c = getSelf(window);
439 ANativeWindowBuffer* buf;
440 int fenceFd = -1;
441 int result = c->dequeueBuffer(&buf, &fenceFd);
442 if (result != OK) {
443 return result;
444 }
445 sp<Fence> fence(new Fence(fenceFd));
446 int waitResult = fence->waitForever("dequeueBuffer_DEPRECATED");
447 if (waitResult != OK) {
448 ALOGE("dequeueBuffer_DEPRECATED: Fence::wait returned an error: %d",
449 waitResult);
450 c->cancelBuffer(buf, -1);
451 return waitResult;
452 }
453 *buffer = buf;
454 return result;
455 }
456
hook_cancelBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)457 int Surface::hook_cancelBuffer_DEPRECATED(ANativeWindow* window,
458 ANativeWindowBuffer* buffer) {
459 Surface* c = getSelf(window);
460 return c->cancelBuffer(buffer, -1);
461 }
462
hook_lockBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)463 int Surface::hook_lockBuffer_DEPRECATED(ANativeWindow* window,
464 ANativeWindowBuffer* buffer) {
465 Surface* c = getSelf(window);
466 return c->lockBuffer_DEPRECATED(buffer);
467 }
468
hook_queueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)469 int Surface::hook_queueBuffer_DEPRECATED(ANativeWindow* window,
470 ANativeWindowBuffer* buffer) {
471 Surface* c = getSelf(window);
472 return c->queueBuffer(buffer, -1);
473 }
474
hook_perform(ANativeWindow * window,int operation,...)475 int Surface::hook_perform(ANativeWindow* window, int operation, ...) {
476 va_list args;
477 va_start(args, operation);
478 Surface* c = getSelf(window);
479 int result;
480 // Don't acquire shared ownership of the interceptor mutex if we're going to
481 // do interceptor registration, as otherwise we'll deadlock on acquiring
482 // exclusive ownership.
483 if (!isInterceptorRegistrationOp(operation)) {
484 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
485 if (c->mPerformInterceptor != nullptr) {
486 result = c->mPerformInterceptor(window, Surface::performInternal,
487 c->mPerformInterceptorData, operation, args);
488 va_end(args);
489 return result;
490 }
491 }
492 result = c->perform(operation, args);
493 va_end(args);
494 return result;
495 }
496
performInternal(ANativeWindow * window,int operation,va_list args)497 int Surface::performInternal(ANativeWindow* window, int operation, va_list args) {
498 Surface* c = getSelf(window);
499 return c->perform(operation, args);
500 }
501
hook_query(const ANativeWindow * window,int what,int * value)502 int Surface::hook_query(const ANativeWindow* window, int what, int* value) {
503 const Surface* c = getSelf(window);
504 {
505 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
506 if (c->mQueryInterceptor != nullptr) {
507 auto interceptor = c->mQueryInterceptor;
508 auto data = c->mQueryInterceptorData;
509 return interceptor(window, Surface::queryInternal, data, what, value);
510 }
511 }
512 return c->query(what, value);
513 }
514
queryInternal(const ANativeWindow * window,int what,int * value)515 int Surface::queryInternal(const ANativeWindow* window, int what, int* value) {
516 const Surface* c = getSelf(window);
517 return c->query(what, value);
518 }
519
setSwapInterval(int interval)520 int Surface::setSwapInterval(int interval) {
521 ATRACE_CALL();
522 // EGL specification states:
523 // interval is silently clamped to minimum and maximum implementation
524 // dependent values before being stored.
525
526 if (interval < minSwapInterval)
527 interval = minSwapInterval;
528
529 if (interval > maxSwapInterval)
530 interval = maxSwapInterval;
531
532 const bool wasSwapIntervalZero = mSwapIntervalZero;
533 mSwapIntervalZero = (interval == 0);
534
535 if (mSwapIntervalZero != wasSwapIntervalZero) {
536 mGraphicBufferProducer->setAsyncMode(mSwapIntervalZero);
537 }
538
539 return NO_ERROR;
540 }
541
542 class FenceMonitor {
543 public:
FenceMonitor(const char * name)544 explicit FenceMonitor(const char* name) : mName(name), mFencesQueued(0), mFencesSignaled(0) {
545 std::thread thread(&FenceMonitor::loop, this);
546 pthread_setname_np(thread.native_handle(), mName);
547 thread.detach();
548 }
549
queueFence(const sp<Fence> & fence)550 void queueFence(const sp<Fence>& fence) {
551 char message[64];
552
553 std::lock_guard<std::mutex> lock(mMutex);
554 if (fence->getSignalTime() != Fence::SIGNAL_TIME_PENDING) {
555 snprintf(message, sizeof(message), "%s fence %u has signaled", mName, mFencesQueued);
556 ATRACE_NAME(message);
557 // Need an increment on both to make the trace number correct.
558 mFencesQueued++;
559 mFencesSignaled++;
560 return;
561 }
562 snprintf(message, sizeof(message), "Trace %s fence %u", mName, mFencesQueued);
563 ATRACE_NAME(message);
564
565 mQueue.push_back(fence);
566 mCondition.notify_one();
567 mFencesQueued++;
568 ATRACE_INT(mName, int32_t(mQueue.size()));
569 }
570
571 private:
572 #pragma clang diagnostic push
573 #pragma clang diagnostic ignored "-Wmissing-noreturn"
loop()574 void loop() {
575 while (true) {
576 threadLoop();
577 }
578 }
579 #pragma clang diagnostic pop
580
threadLoop()581 void threadLoop() {
582 sp<Fence> fence;
583 uint32_t fenceNum;
584 {
585 std::unique_lock<std::mutex> lock(mMutex);
586 while (mQueue.empty()) {
587 mCondition.wait(lock);
588 }
589 fence = mQueue[0];
590 fenceNum = mFencesSignaled;
591 }
592 {
593 char message[64];
594 snprintf(message, sizeof(message), "waiting for %s %u", mName, fenceNum);
595 ATRACE_NAME(message);
596
597 status_t result = fence->waitForever(message);
598 if (result != OK) {
599 ALOGE("Error waiting for fence: %d", result);
600 }
601 }
602 {
603 std::lock_guard<std::mutex> lock(mMutex);
604 mQueue.pop_front();
605 mFencesSignaled++;
606 ATRACE_INT(mName, int32_t(mQueue.size()));
607 }
608 }
609
610 const char* mName;
611 uint32_t mFencesQueued;
612 uint32_t mFencesSignaled;
613 std::deque<sp<Fence>> mQueue;
614 std::condition_variable mCondition;
615 std::mutex mMutex;
616 };
617
getDequeueBufferInputLocked(IGraphicBufferProducer::DequeueBufferInput * dequeueInput)618 void Surface::getDequeueBufferInputLocked(
619 IGraphicBufferProducer::DequeueBufferInput* dequeueInput) {
620 LOG_ALWAYS_FATAL_IF(dequeueInput == nullptr, "input is null");
621
622 dequeueInput->width = mReqWidth ? mReqWidth : mUserWidth;
623 dequeueInput->height = mReqHeight ? mReqHeight : mUserHeight;
624
625 dequeueInput->format = mReqFormat;
626 dequeueInput->usage = mReqUsage;
627
628 dequeueInput->getTimestamps = mEnableFrameTimestamps;
629 }
630
dequeueBuffer(android_native_buffer_t ** buffer,int * fenceFd)631 int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
632 ATRACE_CALL();
633 ALOGV("Surface::dequeueBuffer");
634
635 IGraphicBufferProducer::DequeueBufferInput dqInput;
636 {
637 Mutex::Autolock lock(mMutex);
638 if (mReportRemovedBuffers) {
639 mRemovedBuffers.clear();
640 }
641
642 getDequeueBufferInputLocked(&dqInput);
643
644 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot !=
645 BufferItem::INVALID_BUFFER_SLOT) {
646 sp<GraphicBuffer>& gbuf(mSlots[mSharedBufferSlot].buffer);
647 if (gbuf != nullptr) {
648 *buffer = gbuf.get();
649 *fenceFd = -1;
650 return OK;
651 }
652 }
653 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffer
654
655 int buf = -1;
656 sp<Fence> fence;
657 nsecs_t startTime = systemTime();
658
659 FrameEventHistoryDelta frameTimestamps;
660 status_t result = mGraphicBufferProducer->dequeueBuffer(&buf, &fence, dqInput.width,
661 dqInput.height, dqInput.format,
662 dqInput.usage, &mBufferAge,
663 dqInput.getTimestamps ?
664 &frameTimestamps : nullptr);
665 mLastDequeueDuration = systemTime() - startTime;
666
667 if (result < 0) {
668 ALOGV("dequeueBuffer: IGraphicBufferProducer::dequeueBuffer"
669 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
670 dqInput.width, dqInput.height, dqInput.format, dqInput.usage, result);
671 return result;
672 }
673
674 if (buf < 0 || buf >= NUM_BUFFER_SLOTS) {
675 ALOGE("dequeueBuffer: IGraphicBufferProducer returned invalid slot number %d", buf);
676 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
677 return FAILED_TRANSACTION;
678 }
679
680 Mutex::Autolock lock(mMutex);
681
682 // Write this while holding the mutex
683 mLastDequeueStartTime = startTime;
684
685 sp<GraphicBuffer>& gbuf(mSlots[buf].buffer);
686
687 // this should never happen
688 ALOGE_IF(fence == nullptr, "Surface::dequeueBuffer: received null Fence! buf=%d", buf);
689
690 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
691 static FenceMonitor hwcReleaseThread("HWC release");
692 hwcReleaseThread.queueFence(fence);
693 }
694
695 if (result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
696 freeAllBuffers();
697 }
698
699 if (dqInput.getTimestamps) {
700 mFrameEventHistory->applyDelta(frameTimestamps);
701 }
702
703 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
704 if (mReportRemovedBuffers && (gbuf != nullptr)) {
705 mRemovedBuffers.push_back(gbuf);
706 }
707 result = mGraphicBufferProducer->requestBuffer(buf, &gbuf);
708 if (result != NO_ERROR) {
709 ALOGE("dequeueBuffer: IGraphicBufferProducer::requestBuffer failed: %d", result);
710 mGraphicBufferProducer->cancelBuffer(buf, fence);
711 return result;
712 }
713 }
714
715 if (fence->isValid()) {
716 *fenceFd = fence->dup();
717 if (*fenceFd == -1) {
718 ALOGE("dequeueBuffer: error duping fence: %d", errno);
719 // dup() should never fail; something is badly wrong. Soldier on
720 // and hope for the best; the worst that should happen is some
721 // visible corruption that lasts until the next frame.
722 }
723 } else {
724 *fenceFd = -1;
725 }
726
727 *buffer = gbuf.get();
728
729 if (mSharedBufferMode && mAutoRefresh) {
730 mSharedBufferSlot = buf;
731 mSharedBufferHasBeenQueued = false;
732 } else if (mSharedBufferSlot == buf) {
733 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
734 mSharedBufferHasBeenQueued = false;
735 }
736
737 mDequeuedSlots.insert(buf);
738
739 return OK;
740 }
741
dequeueBuffers(std::vector<BatchBuffer> * buffers)742 int Surface::dequeueBuffers(std::vector<BatchBuffer>* buffers) {
743 using DequeueBufferInput = IGraphicBufferProducer::DequeueBufferInput;
744 using DequeueBufferOutput = IGraphicBufferProducer::DequeueBufferOutput;
745 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
746 using RequestBufferOutput = IGraphicBufferProducer::RequestBufferOutput;
747
748 ATRACE_CALL();
749 ALOGV("Surface::dequeueBuffers");
750
751 if (buffers->size() == 0) {
752 ALOGE("%s: must dequeue at least 1 buffer!", __FUNCTION__);
753 return BAD_VALUE;
754 }
755
756 if (mSharedBufferMode) {
757 ALOGE("%s: batch operation is not supported in shared buffer mode!",
758 __FUNCTION__);
759 return INVALID_OPERATION;
760 }
761
762 size_t numBufferRequested = buffers->size();
763 DequeueBufferInput input;
764
765 {
766 Mutex::Autolock lock(mMutex);
767 if (mReportRemovedBuffers) {
768 mRemovedBuffers.clear();
769 }
770
771 getDequeueBufferInputLocked(&input);
772 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffers
773
774 std::vector<DequeueBufferInput> dequeueInput(numBufferRequested, input);
775 std::vector<DequeueBufferOutput> dequeueOutput;
776
777 nsecs_t startTime = systemTime();
778
779 status_t result = mGraphicBufferProducer->dequeueBuffers(dequeueInput, &dequeueOutput);
780
781 mLastDequeueDuration = systemTime() - startTime;
782
783 if (result < 0) {
784 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
785 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
786 __FUNCTION__, input.width, input.height, input.format, input.usage, result);
787 return result;
788 }
789
790 std::vector<CancelBufferInput> cancelBufferInputs(numBufferRequested);
791 std::vector<status_t> cancelBufferOutputs;
792 for (size_t i = 0; i < numBufferRequested; i++) {
793 cancelBufferInputs[i].slot = dequeueOutput[i].slot;
794 cancelBufferInputs[i].fence = dequeueOutput[i].fence;
795 }
796
797 for (const auto& output : dequeueOutput) {
798 if (output.result < 0) {
799 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
800 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
801 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
802 __FUNCTION__, input.width, input.height, input.format, input.usage,
803 output.result);
804 return output.result;
805 }
806
807 if (output.slot < 0 || output.slot >= NUM_BUFFER_SLOTS) {
808 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
809 ALOGE("%s: IGraphicBufferProducer returned invalid slot number %d",
810 __FUNCTION__, output.slot);
811 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
812 return FAILED_TRANSACTION;
813 }
814
815 if (input.getTimestamps && !output.timestamps.has_value()) {
816 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
817 ALOGE("%s: no frame timestamp returns!", __FUNCTION__);
818 return FAILED_TRANSACTION;
819 }
820
821 // this should never happen
822 ALOGE_IF(output.fence == nullptr,
823 "%s: received null Fence! slot=%d", __FUNCTION__, output.slot);
824 }
825
826 Mutex::Autolock lock(mMutex);
827
828 // Write this while holding the mutex
829 mLastDequeueStartTime = startTime;
830
831 std::vector<int32_t> requestBufferSlots;
832 requestBufferSlots.reserve(numBufferRequested);
833 // handle release all buffers and request buffers
834 for (const auto& output : dequeueOutput) {
835 if (output.result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
836 ALOGV("%s: RELEASE_ALL_BUFFERS during batch operation", __FUNCTION__);
837 freeAllBuffers();
838 break;
839 }
840 }
841
842 for (const auto& output : dequeueOutput) {
843 // Collect slots that needs requesting buffer
844 sp<GraphicBuffer>& gbuf(mSlots[output.slot].buffer);
845 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
846 if (mReportRemovedBuffers && (gbuf != nullptr)) {
847 mRemovedBuffers.push_back(gbuf);
848 }
849 requestBufferSlots.push_back(output.slot);
850 }
851 }
852
853 // Batch request Buffer
854 std::vector<RequestBufferOutput> reqBufferOutput;
855 if (requestBufferSlots.size() > 0) {
856 result = mGraphicBufferProducer->requestBuffers(requestBufferSlots, &reqBufferOutput);
857 if (result != NO_ERROR) {
858 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed: %d",
859 __FUNCTION__, result);
860 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
861 return result;
862 }
863
864 // Check if we have any single failure
865 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
866 if (reqBufferOutput[i].result != OK) {
867 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed at %zu-th buffer, slot %d",
868 __FUNCTION__, i, requestBufferSlots[i]);
869 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
870 return reqBufferOutput[i].result;
871 }
872 }
873
874 // Fill request buffer results to mSlots
875 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
876 mSlots[requestBufferSlots[i]].buffer = reqBufferOutput[i].buffer;
877 }
878 }
879
880 for (size_t batchIdx = 0; batchIdx < numBufferRequested; batchIdx++) {
881 const auto& output = dequeueOutput[batchIdx];
882 int slot = output.slot;
883 sp<GraphicBuffer>& gbuf(mSlots[slot].buffer);
884
885 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
886 static FenceMonitor hwcReleaseThread("HWC release");
887 hwcReleaseThread.queueFence(output.fence);
888 }
889
890 if (input.getTimestamps) {
891 mFrameEventHistory->applyDelta(output.timestamps.value());
892 }
893
894 if (output.fence->isValid()) {
895 buffers->at(batchIdx).fenceFd = output.fence->dup();
896 if (buffers->at(batchIdx).fenceFd == -1) {
897 ALOGE("%s: error duping fence: %d", __FUNCTION__, errno);
898 // dup() should never fail; something is badly wrong. Soldier on
899 // and hope for the best; the worst that should happen is some
900 // visible corruption that lasts until the next frame.
901 }
902 } else {
903 buffers->at(batchIdx).fenceFd = -1;
904 }
905
906 buffers->at(batchIdx).buffer = gbuf.get();
907 mDequeuedSlots.insert(slot);
908 }
909 return OK;
910 }
911
cancelBuffer(android_native_buffer_t * buffer,int fenceFd)912 int Surface::cancelBuffer(android_native_buffer_t* buffer,
913 int fenceFd) {
914 ATRACE_CALL();
915 ALOGV("Surface::cancelBuffer");
916 Mutex::Autolock lock(mMutex);
917 int i = getSlotFromBufferLocked(buffer);
918 if (i < 0) {
919 if (fenceFd >= 0) {
920 close(fenceFd);
921 }
922 return i;
923 }
924 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
925 if (fenceFd >= 0) {
926 close(fenceFd);
927 }
928 return OK;
929 }
930 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
931 mGraphicBufferProducer->cancelBuffer(i, fence);
932
933 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == i) {
934 mSharedBufferHasBeenQueued = true;
935 }
936
937 mDequeuedSlots.erase(i);
938
939 return OK;
940 }
941
cancelBuffers(const std::vector<BatchBuffer> & buffers)942 int Surface::cancelBuffers(const std::vector<BatchBuffer>& buffers) {
943 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
944 ATRACE_CALL();
945 ALOGV("Surface::cancelBuffers");
946
947 if (mSharedBufferMode) {
948 ALOGE("%s: batch operation is not supported in shared buffer mode!",
949 __FUNCTION__);
950 return INVALID_OPERATION;
951 }
952
953 size_t numBuffers = buffers.size();
954 std::vector<CancelBufferInput> cancelBufferInputs(numBuffers);
955 std::vector<status_t> cancelBufferOutputs;
956 size_t numBuffersCancelled = 0;
957 int badSlotResult = 0;
958 for (size_t i = 0; i < numBuffers; i++) {
959 int slot = getSlotFromBufferLocked(buffers[i].buffer);
960 int fenceFd = buffers[i].fenceFd;
961 if (slot < 0) {
962 if (fenceFd >= 0) {
963 close(fenceFd);
964 }
965 ALOGE("%s: cannot find slot number for cancelled buffer", __FUNCTION__);
966 badSlotResult = slot;
967 } else {
968 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
969 cancelBufferInputs[numBuffersCancelled].slot = slot;
970 cancelBufferInputs[numBuffersCancelled++].fence = fence;
971 }
972 }
973 cancelBufferInputs.resize(numBuffersCancelled);
974 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
975
976
977 for (size_t i = 0; i < numBuffersCancelled; i++) {
978 mDequeuedSlots.erase(cancelBufferInputs[i].slot);
979 }
980
981 if (badSlotResult != 0) {
982 return badSlotResult;
983 }
984 return OK;
985 }
986
getSlotFromBufferLocked(android_native_buffer_t * buffer) const987 int Surface::getSlotFromBufferLocked(
988 android_native_buffer_t* buffer) const {
989 if (buffer == nullptr) {
990 ALOGE("%s: input buffer is null!", __FUNCTION__);
991 return BAD_VALUE;
992 }
993
994 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
995 if (mSlots[i].buffer != nullptr &&
996 mSlots[i].buffer->handle == buffer->handle) {
997 return i;
998 }
999 }
1000 ALOGE("%s: unknown buffer: %p", __FUNCTION__, buffer->handle);
1001 return BAD_VALUE;
1002 }
1003
lockBuffer_DEPRECATED(android_native_buffer_t * buffer)1004 int Surface::lockBuffer_DEPRECATED(android_native_buffer_t* buffer __attribute__((unused))) {
1005 ALOGV("Surface::lockBuffer");
1006 Mutex::Autolock lock(mMutex);
1007 return OK;
1008 }
1009
getQueueBufferInputLocked(android_native_buffer_t * buffer,int fenceFd,nsecs_t timestamp,IGraphicBufferProducer::QueueBufferInput * out)1010 void Surface::getQueueBufferInputLocked(android_native_buffer_t* buffer, int fenceFd,
1011 nsecs_t timestamp, IGraphicBufferProducer::QueueBufferInput* out) {
1012 bool isAutoTimestamp = false;
1013
1014 if (timestamp == NATIVE_WINDOW_TIMESTAMP_AUTO) {
1015 timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
1016 isAutoTimestamp = true;
1017 ALOGV("Surface::queueBuffer making up timestamp: %.2f ms",
1018 timestamp / 1000000.0);
1019 }
1020
1021 // Make sure the crop rectangle is entirely inside the buffer.
1022 Rect crop(Rect::EMPTY_RECT);
1023 mCrop.intersect(Rect(buffer->width, buffer->height), &crop);
1024
1025 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
1026 IGraphicBufferProducer::QueueBufferInput input(timestamp, isAutoTimestamp,
1027 static_cast<android_dataspace>(mDataSpace), crop, mScalingMode,
1028 mTransform ^ mStickyTransform, fence, mStickyTransform,
1029 mEnableFrameTimestamps);
1030
1031 // we should send HDR metadata as needed if this becomes a bottleneck
1032 input.setHdrMetadata(mHdrMetadata);
1033
1034 if (mConnectedToCpu || mDirtyRegion.bounds() == Rect::INVALID_RECT) {
1035 input.setSurfaceDamage(Region::INVALID_REGION);
1036 } else {
1037 // Here we do two things:
1038 // 1) The surface damage was specified using the OpenGL ES convention of
1039 // the origin being in the bottom-left corner. Here we flip to the
1040 // convention that the rest of the system uses (top-left corner) by
1041 // subtracting all top/bottom coordinates from the buffer height.
1042 // 2) If the buffer is coming in rotated (for example, because the EGL
1043 // implementation is reacting to the transform hint coming back from
1044 // SurfaceFlinger), the surface damage needs to be rotated the
1045 // opposite direction, since it was generated assuming an unrotated
1046 // buffer (the app doesn't know that the EGL implementation is
1047 // reacting to the transform hint behind its back). The
1048 // transformations in the switch statement below apply those
1049 // complementary rotations (e.g., if 90 degrees, rotate 270 degrees).
1050
1051 int width = buffer->width;
1052 int height = buffer->height;
1053 bool rotated90 = (mTransform ^ mStickyTransform) &
1054 NATIVE_WINDOW_TRANSFORM_ROT_90;
1055 if (rotated90) {
1056 std::swap(width, height);
1057 }
1058
1059 Region flippedRegion;
1060 for (auto rect : mDirtyRegion) {
1061 int left = rect.left;
1062 int right = rect.right;
1063 int top = height - rect.bottom; // Flip from OpenGL convention
1064 int bottom = height - rect.top; // Flip from OpenGL convention
1065 switch (mTransform ^ mStickyTransform) {
1066 case NATIVE_WINDOW_TRANSFORM_ROT_90: {
1067 // Rotate 270 degrees
1068 Rect flippedRect{top, width - right, bottom, width - left};
1069 flippedRegion.orSelf(flippedRect);
1070 break;
1071 }
1072 case NATIVE_WINDOW_TRANSFORM_ROT_180: {
1073 // Rotate 180 degrees
1074 Rect flippedRect{width - right, height - bottom,
1075 width - left, height - top};
1076 flippedRegion.orSelf(flippedRect);
1077 break;
1078 }
1079 case NATIVE_WINDOW_TRANSFORM_ROT_270: {
1080 // Rotate 90 degrees
1081 Rect flippedRect{height - bottom, left,
1082 height - top, right};
1083 flippedRegion.orSelf(flippedRect);
1084 break;
1085 }
1086 default: {
1087 Rect flippedRect{left, top, right, bottom};
1088 flippedRegion.orSelf(flippedRect);
1089 break;
1090 }
1091 }
1092 }
1093
1094 input.setSurfaceDamage(flippedRegion);
1095 }
1096 *out = input;
1097 }
1098
onBufferQueuedLocked(int slot,sp<Fence> fence,const IGraphicBufferProducer::QueueBufferOutput & output)1099 void Surface::onBufferQueuedLocked(int slot, sp<Fence> fence,
1100 const IGraphicBufferProducer::QueueBufferOutput& output) {
1101 mDequeuedSlots.erase(slot);
1102
1103 if (mEnableFrameTimestamps) {
1104 mFrameEventHistory->applyDelta(output.frameTimestamps);
1105 // Update timestamps with the local acquire fence.
1106 // The consumer doesn't send it back to prevent us from having two
1107 // file descriptors of the same fence.
1108 mFrameEventHistory->updateAcquireFence(mNextFrameNumber,
1109 std::make_shared<FenceTime>(fence));
1110
1111 // Cache timestamps of signaled fences so we can close their file
1112 // descriptors.
1113 mFrameEventHistory->updateSignalTimes();
1114 }
1115
1116 mLastFrameNumber = mNextFrameNumber;
1117
1118 mDefaultWidth = output.width;
1119 mDefaultHeight = output.height;
1120 mNextFrameNumber = output.nextFrameNumber;
1121
1122 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
1123 // set.
1124 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
1125 mTransformHint = output.transformHint;
1126 }
1127
1128 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
1129
1130 if (!mConnectedToCpu) {
1131 // Clear surface damage back to full-buffer
1132 mDirtyRegion = Region::INVALID_REGION;
1133 }
1134
1135 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == slot) {
1136 mSharedBufferHasBeenQueued = true;
1137 }
1138
1139 mQueueBufferCondition.broadcast();
1140
1141 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
1142 static FenceMonitor gpuCompletionThread("GPU completion");
1143 gpuCompletionThread.queueFence(fence);
1144 }
1145 }
1146
queueBuffer(android_native_buffer_t * buffer,int fenceFd)1147 int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd) {
1148 ATRACE_CALL();
1149 ALOGV("Surface::queueBuffer");
1150 Mutex::Autolock lock(mMutex);
1151
1152 int i = getSlotFromBufferLocked(buffer);
1153 if (i < 0) {
1154 if (fenceFd >= 0) {
1155 close(fenceFd);
1156 }
1157 return i;
1158 }
1159 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
1160 if (fenceFd >= 0) {
1161 close(fenceFd);
1162 }
1163 return OK;
1164 }
1165
1166 IGraphicBufferProducer::QueueBufferOutput output;
1167 IGraphicBufferProducer::QueueBufferInput input;
1168 getQueueBufferInputLocked(buffer, fenceFd, mTimestamp, &input);
1169 sp<Fence> fence = input.fence;
1170
1171 nsecs_t now = systemTime();
1172 status_t err = mGraphicBufferProducer->queueBuffer(i, input, &output);
1173 mLastQueueDuration = systemTime() - now;
1174 if (err != OK) {
1175 ALOGE("queueBuffer: error queuing buffer, %d", err);
1176 }
1177
1178 onBufferQueuedLocked(i, fence, output);
1179 return err;
1180 }
1181
queueBuffers(const std::vector<BatchQueuedBuffer> & buffers)1182 int Surface::queueBuffers(const std::vector<BatchQueuedBuffer>& buffers) {
1183 ATRACE_CALL();
1184 ALOGV("Surface::queueBuffers");
1185 Mutex::Autolock lock(mMutex);
1186
1187 if (mSharedBufferMode) {
1188 ALOGE("%s: batched operation is not supported in shared buffer mode", __FUNCTION__);
1189 return INVALID_OPERATION;
1190 }
1191
1192 size_t numBuffers = buffers.size();
1193 std::vector<IGraphicBufferProducer::QueueBufferInput> queueBufferInputs(numBuffers);
1194 std::vector<IGraphicBufferProducer::QueueBufferOutput> queueBufferOutputs;
1195 std::vector<int> bufferSlots(numBuffers, -1);
1196 std::vector<sp<Fence>> bufferFences(numBuffers);
1197
1198 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1199 int i = getSlotFromBufferLocked(buffers[batchIdx].buffer);
1200 if (i < 0) {
1201 if (buffers[batchIdx].fenceFd >= 0) {
1202 close(buffers[batchIdx].fenceFd);
1203 }
1204 return i;
1205 }
1206 bufferSlots[batchIdx] = i;
1207
1208 IGraphicBufferProducer::QueueBufferInput input;
1209 getQueueBufferInputLocked(
1210 buffers[batchIdx].buffer, buffers[batchIdx].fenceFd, buffers[batchIdx].timestamp,
1211 &input);
1212 bufferFences[batchIdx] = input.fence;
1213 queueBufferInputs[batchIdx] = input;
1214 }
1215
1216 nsecs_t now = systemTime();
1217 status_t err = mGraphicBufferProducer->queueBuffers(queueBufferInputs, &queueBufferOutputs);
1218 mLastQueueDuration = systemTime() - now;
1219 if (err != OK) {
1220 ALOGE("%s: error queuing buffer, %d", __FUNCTION__, err);
1221 }
1222
1223
1224 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1225 onBufferQueuedLocked(bufferSlots[batchIdx], bufferFences[batchIdx],
1226 queueBufferOutputs[batchIdx]);
1227 }
1228
1229 return err;
1230 }
1231
querySupportedTimestampsLocked() const1232 void Surface::querySupportedTimestampsLocked() const {
1233 // mMutex must be locked when calling this method.
1234
1235 if (mQueriedSupportedTimestamps) {
1236 return;
1237 }
1238 mQueriedSupportedTimestamps = true;
1239
1240 std::vector<FrameEvent> supportedFrameTimestamps;
1241 status_t err = composerService()->getSupportedFrameTimestamps(
1242 &supportedFrameTimestamps);
1243
1244 if (err != NO_ERROR) {
1245 return;
1246 }
1247
1248 for (auto sft : supportedFrameTimestamps) {
1249 if (sft == FrameEvent::DISPLAY_PRESENT) {
1250 mFrameTimestampsSupportsPresent = true;
1251 }
1252 }
1253 }
1254
query(int what,int * value) const1255 int Surface::query(int what, int* value) const {
1256 ATRACE_CALL();
1257 ALOGV("Surface::query");
1258 { // scope for the lock
1259 Mutex::Autolock lock(mMutex);
1260 switch (what) {
1261 case NATIVE_WINDOW_FORMAT:
1262 if (mReqFormat) {
1263 *value = static_cast<int>(mReqFormat);
1264 return NO_ERROR;
1265 }
1266 break;
1267 case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER: {
1268 status_t err = mGraphicBufferProducer->query(what, value);
1269 if (err == NO_ERROR) {
1270 return NO_ERROR;
1271 }
1272 sp<ISurfaceComposer> surfaceComposer = composerService();
1273 if (surfaceComposer == nullptr) {
1274 return -EPERM; // likely permissions error
1275 }
1276 if (surfaceComposer->authenticateSurfaceTexture(mGraphicBufferProducer)) {
1277 *value = 1;
1278 } else {
1279 *value = 0;
1280 }
1281 return NO_ERROR;
1282 }
1283 case NATIVE_WINDOW_CONCRETE_TYPE:
1284 *value = NATIVE_WINDOW_SURFACE;
1285 return NO_ERROR;
1286 case NATIVE_WINDOW_DEFAULT_WIDTH:
1287 *value = static_cast<int>(
1288 mUserWidth ? mUserWidth : mDefaultWidth);
1289 return NO_ERROR;
1290 case NATIVE_WINDOW_DEFAULT_HEIGHT:
1291 *value = static_cast<int>(
1292 mUserHeight ? mUserHeight : mDefaultHeight);
1293 return NO_ERROR;
1294 case NATIVE_WINDOW_TRANSFORM_HINT:
1295 *value = static_cast<int>(getTransformHint());
1296 return NO_ERROR;
1297 case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND: {
1298 status_t err = NO_ERROR;
1299 if (!mConsumerRunningBehind) {
1300 *value = 0;
1301 } else {
1302 err = mGraphicBufferProducer->query(what, value);
1303 if (err == NO_ERROR) {
1304 mConsumerRunningBehind = *value;
1305 }
1306 }
1307 return err;
1308 }
1309 case NATIVE_WINDOW_BUFFER_AGE: {
1310 if (mBufferAge > INT32_MAX) {
1311 *value = 0;
1312 } else {
1313 *value = static_cast<int32_t>(mBufferAge);
1314 }
1315 return NO_ERROR;
1316 }
1317 case NATIVE_WINDOW_LAST_DEQUEUE_DURATION: {
1318 int64_t durationUs = mLastDequeueDuration / 1000;
1319 *value = durationUs > std::numeric_limits<int>::max() ?
1320 std::numeric_limits<int>::max() :
1321 static_cast<int>(durationUs);
1322 return NO_ERROR;
1323 }
1324 case NATIVE_WINDOW_LAST_QUEUE_DURATION: {
1325 int64_t durationUs = mLastQueueDuration / 1000;
1326 *value = durationUs > std::numeric_limits<int>::max() ?
1327 std::numeric_limits<int>::max() :
1328 static_cast<int>(durationUs);
1329 return NO_ERROR;
1330 }
1331 case NATIVE_WINDOW_FRAME_TIMESTAMPS_SUPPORTS_PRESENT: {
1332 querySupportedTimestampsLocked();
1333 *value = mFrameTimestampsSupportsPresent ? 1 : 0;
1334 return NO_ERROR;
1335 }
1336 case NATIVE_WINDOW_IS_VALID: {
1337 *value = mGraphicBufferProducer != nullptr ? 1 : 0;
1338 return NO_ERROR;
1339 }
1340 case NATIVE_WINDOW_DATASPACE: {
1341 *value = static_cast<int>(mDataSpace);
1342 return NO_ERROR;
1343 }
1344 case NATIVE_WINDOW_MAX_BUFFER_COUNT: {
1345 *value = mMaxBufferCount;
1346 return NO_ERROR;
1347 }
1348 }
1349 }
1350 return mGraphicBufferProducer->query(what, value);
1351 }
1352
perform(int operation,va_list args)1353 int Surface::perform(int operation, va_list args)
1354 {
1355 int res = NO_ERROR;
1356 switch (operation) {
1357 case NATIVE_WINDOW_CONNECT:
1358 // deprecated. must return NO_ERROR.
1359 break;
1360 case NATIVE_WINDOW_DISCONNECT:
1361 // deprecated. must return NO_ERROR.
1362 break;
1363 case NATIVE_WINDOW_SET_USAGE:
1364 res = dispatchSetUsage(args);
1365 break;
1366 case NATIVE_WINDOW_SET_CROP:
1367 res = dispatchSetCrop(args);
1368 break;
1369 case NATIVE_WINDOW_SET_BUFFER_COUNT:
1370 res = dispatchSetBufferCount(args);
1371 break;
1372 case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
1373 res = dispatchSetBuffersGeometry(args);
1374 break;
1375 case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
1376 res = dispatchSetBuffersTransform(args);
1377 break;
1378 case NATIVE_WINDOW_SET_BUFFERS_STICKY_TRANSFORM:
1379 res = dispatchSetBuffersStickyTransform(args);
1380 break;
1381 case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
1382 res = dispatchSetBuffersTimestamp(args);
1383 break;
1384 case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
1385 res = dispatchSetBuffersDimensions(args);
1386 break;
1387 case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS:
1388 res = dispatchSetBuffersUserDimensions(args);
1389 break;
1390 case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
1391 res = dispatchSetBuffersFormat(args);
1392 break;
1393 case NATIVE_WINDOW_LOCK:
1394 res = dispatchLock(args);
1395 break;
1396 case NATIVE_WINDOW_UNLOCK_AND_POST:
1397 res = dispatchUnlockAndPost(args);
1398 break;
1399 case NATIVE_WINDOW_SET_SCALING_MODE:
1400 res = dispatchSetScalingMode(args);
1401 break;
1402 case NATIVE_WINDOW_API_CONNECT:
1403 res = dispatchConnect(args);
1404 break;
1405 case NATIVE_WINDOW_API_DISCONNECT:
1406 res = dispatchDisconnect(args);
1407 break;
1408 case NATIVE_WINDOW_SET_SIDEBAND_STREAM:
1409 res = dispatchSetSidebandStream(args);
1410 break;
1411 case NATIVE_WINDOW_SET_BUFFERS_DATASPACE:
1412 res = dispatchSetBuffersDataSpace(args);
1413 break;
1414 case NATIVE_WINDOW_SET_BUFFERS_SMPTE2086_METADATA:
1415 res = dispatchSetBuffersSmpte2086Metadata(args);
1416 break;
1417 case NATIVE_WINDOW_SET_BUFFERS_CTA861_3_METADATA:
1418 res = dispatchSetBuffersCta8613Metadata(args);
1419 break;
1420 case NATIVE_WINDOW_SET_BUFFERS_HDR10_PLUS_METADATA:
1421 res = dispatchSetBuffersHdr10PlusMetadata(args);
1422 break;
1423 case NATIVE_WINDOW_SET_SURFACE_DAMAGE:
1424 res = dispatchSetSurfaceDamage(args);
1425 break;
1426 case NATIVE_WINDOW_SET_SHARED_BUFFER_MODE:
1427 res = dispatchSetSharedBufferMode(args);
1428 break;
1429 case NATIVE_WINDOW_SET_AUTO_REFRESH:
1430 res = dispatchSetAutoRefresh(args);
1431 break;
1432 case NATIVE_WINDOW_GET_REFRESH_CYCLE_DURATION:
1433 res = dispatchGetDisplayRefreshCycleDuration(args);
1434 break;
1435 case NATIVE_WINDOW_GET_NEXT_FRAME_ID:
1436 res = dispatchGetNextFrameId(args);
1437 break;
1438 case NATIVE_WINDOW_ENABLE_FRAME_TIMESTAMPS:
1439 res = dispatchEnableFrameTimestamps(args);
1440 break;
1441 case NATIVE_WINDOW_GET_COMPOSITOR_TIMING:
1442 res = dispatchGetCompositorTiming(args);
1443 break;
1444 case NATIVE_WINDOW_GET_FRAME_TIMESTAMPS:
1445 res = dispatchGetFrameTimestamps(args);
1446 break;
1447 case NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT:
1448 res = dispatchGetWideColorSupport(args);
1449 break;
1450 case NATIVE_WINDOW_GET_HDR_SUPPORT:
1451 res = dispatchGetHdrSupport(args);
1452 break;
1453 case NATIVE_WINDOW_SET_USAGE64:
1454 res = dispatchSetUsage64(args);
1455 break;
1456 case NATIVE_WINDOW_GET_CONSUMER_USAGE64:
1457 res = dispatchGetConsumerUsage64(args);
1458 break;
1459 case NATIVE_WINDOW_SET_AUTO_PREROTATION:
1460 res = dispatchSetAutoPrerotation(args);
1461 break;
1462 case NATIVE_WINDOW_GET_LAST_DEQUEUE_START:
1463 res = dispatchGetLastDequeueStartTime(args);
1464 break;
1465 case NATIVE_WINDOW_SET_DEQUEUE_TIMEOUT:
1466 res = dispatchSetDequeueTimeout(args);
1467 break;
1468 case NATIVE_WINDOW_GET_LAST_DEQUEUE_DURATION:
1469 res = dispatchGetLastDequeueDuration(args);
1470 break;
1471 case NATIVE_WINDOW_GET_LAST_QUEUE_DURATION:
1472 res = dispatchGetLastQueueDuration(args);
1473 break;
1474 case NATIVE_WINDOW_SET_FRAME_RATE:
1475 res = dispatchSetFrameRate(args);
1476 break;
1477 case NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR:
1478 res = dispatchAddCancelInterceptor(args);
1479 break;
1480 case NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR:
1481 res = dispatchAddDequeueInterceptor(args);
1482 break;
1483 case NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR:
1484 res = dispatchAddPerformInterceptor(args);
1485 break;
1486 case NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR:
1487 res = dispatchAddQueueInterceptor(args);
1488 break;
1489 case NATIVE_WINDOW_SET_QUERY_INTERCEPTOR:
1490 res = dispatchAddQueryInterceptor(args);
1491 break;
1492 case NATIVE_WINDOW_ALLOCATE_BUFFERS:
1493 allocateBuffers();
1494 res = NO_ERROR;
1495 break;
1496 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER:
1497 res = dispatchGetLastQueuedBuffer(args);
1498 break;
1499 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER2:
1500 res = dispatchGetLastQueuedBuffer2(args);
1501 break;
1502 case NATIVE_WINDOW_SET_FRAME_TIMELINE_INFO:
1503 res = dispatchSetFrameTimelineInfo(args);
1504 break;
1505 default:
1506 res = NAME_NOT_FOUND;
1507 break;
1508 }
1509 return res;
1510 }
1511
dispatchConnect(va_list args)1512 int Surface::dispatchConnect(va_list args) {
1513 int api = va_arg(args, int);
1514 return connect(api);
1515 }
1516
dispatchDisconnect(va_list args)1517 int Surface::dispatchDisconnect(va_list args) {
1518 int api = va_arg(args, int);
1519 return disconnect(api);
1520 }
1521
dispatchSetUsage(va_list args)1522 int Surface::dispatchSetUsage(va_list args) {
1523 uint64_t usage = va_arg(args, uint32_t);
1524 return setUsage(usage);
1525 }
1526
dispatchSetUsage64(va_list args)1527 int Surface::dispatchSetUsage64(va_list args) {
1528 uint64_t usage = va_arg(args, uint64_t);
1529 return setUsage(usage);
1530 }
1531
dispatchSetCrop(va_list args)1532 int Surface::dispatchSetCrop(va_list args) {
1533 android_native_rect_t const* rect = va_arg(args, android_native_rect_t*);
1534 return setCrop(reinterpret_cast<Rect const*>(rect));
1535 }
1536
dispatchSetBufferCount(va_list args)1537 int Surface::dispatchSetBufferCount(va_list args) {
1538 size_t bufferCount = va_arg(args, size_t);
1539 return setBufferCount(static_cast<int32_t>(bufferCount));
1540 }
1541
dispatchSetBuffersGeometry(va_list args)1542 int Surface::dispatchSetBuffersGeometry(va_list args) {
1543 uint32_t width = va_arg(args, uint32_t);
1544 uint32_t height = va_arg(args, uint32_t);
1545 PixelFormat format = va_arg(args, PixelFormat);
1546 int err = setBuffersDimensions(width, height);
1547 if (err != 0) {
1548 return err;
1549 }
1550 return setBuffersFormat(format);
1551 }
1552
dispatchSetBuffersDimensions(va_list args)1553 int Surface::dispatchSetBuffersDimensions(va_list args) {
1554 uint32_t width = va_arg(args, uint32_t);
1555 uint32_t height = va_arg(args, uint32_t);
1556 return setBuffersDimensions(width, height);
1557 }
1558
dispatchSetBuffersUserDimensions(va_list args)1559 int Surface::dispatchSetBuffersUserDimensions(va_list args) {
1560 uint32_t width = va_arg(args, uint32_t);
1561 uint32_t height = va_arg(args, uint32_t);
1562 return setBuffersUserDimensions(width, height);
1563 }
1564
dispatchSetBuffersFormat(va_list args)1565 int Surface::dispatchSetBuffersFormat(va_list args) {
1566 PixelFormat format = va_arg(args, PixelFormat);
1567 return setBuffersFormat(format);
1568 }
1569
dispatchSetScalingMode(va_list args)1570 int Surface::dispatchSetScalingMode(va_list args) {
1571 int mode = va_arg(args, int);
1572 return setScalingMode(mode);
1573 }
1574
dispatchSetBuffersTransform(va_list args)1575 int Surface::dispatchSetBuffersTransform(va_list args) {
1576 uint32_t transform = va_arg(args, uint32_t);
1577 return setBuffersTransform(transform);
1578 }
1579
dispatchSetBuffersStickyTransform(va_list args)1580 int Surface::dispatchSetBuffersStickyTransform(va_list args) {
1581 uint32_t transform = va_arg(args, uint32_t);
1582 return setBuffersStickyTransform(transform);
1583 }
1584
dispatchSetBuffersTimestamp(va_list args)1585 int Surface::dispatchSetBuffersTimestamp(va_list args) {
1586 int64_t timestamp = va_arg(args, int64_t);
1587 return setBuffersTimestamp(timestamp);
1588 }
1589
dispatchLock(va_list args)1590 int Surface::dispatchLock(va_list args) {
1591 ANativeWindow_Buffer* outBuffer = va_arg(args, ANativeWindow_Buffer*);
1592 ARect* inOutDirtyBounds = va_arg(args, ARect*);
1593 return lock(outBuffer, inOutDirtyBounds);
1594 }
1595
dispatchUnlockAndPost(va_list args)1596 int Surface::dispatchUnlockAndPost(va_list args __attribute__((unused))) {
1597 return unlockAndPost();
1598 }
1599
dispatchSetSidebandStream(va_list args)1600 int Surface::dispatchSetSidebandStream(va_list args) {
1601 native_handle_t* sH = va_arg(args, native_handle_t*);
1602 sp<NativeHandle> sidebandHandle = NativeHandle::create(sH, false);
1603 setSidebandStream(sidebandHandle);
1604 return OK;
1605 }
1606
dispatchSetBuffersDataSpace(va_list args)1607 int Surface::dispatchSetBuffersDataSpace(va_list args) {
1608 Dataspace dataspace = static_cast<Dataspace>(va_arg(args, int));
1609 return setBuffersDataSpace(dataspace);
1610 }
1611
dispatchSetBuffersSmpte2086Metadata(va_list args)1612 int Surface::dispatchSetBuffersSmpte2086Metadata(va_list args) {
1613 const android_smpte2086_metadata* metadata =
1614 va_arg(args, const android_smpte2086_metadata*);
1615 return setBuffersSmpte2086Metadata(metadata);
1616 }
1617
dispatchSetBuffersCta8613Metadata(va_list args)1618 int Surface::dispatchSetBuffersCta8613Metadata(va_list args) {
1619 const android_cta861_3_metadata* metadata =
1620 va_arg(args, const android_cta861_3_metadata*);
1621 return setBuffersCta8613Metadata(metadata);
1622 }
1623
dispatchSetBuffersHdr10PlusMetadata(va_list args)1624 int Surface::dispatchSetBuffersHdr10PlusMetadata(va_list args) {
1625 const size_t size = va_arg(args, size_t);
1626 const uint8_t* metadata = va_arg(args, const uint8_t*);
1627 return setBuffersHdr10PlusMetadata(size, metadata);
1628 }
1629
dispatchSetSurfaceDamage(va_list args)1630 int Surface::dispatchSetSurfaceDamage(va_list args) {
1631 android_native_rect_t* rects = va_arg(args, android_native_rect_t*);
1632 size_t numRects = va_arg(args, size_t);
1633 setSurfaceDamage(rects, numRects);
1634 return NO_ERROR;
1635 }
1636
dispatchSetSharedBufferMode(va_list args)1637 int Surface::dispatchSetSharedBufferMode(va_list args) {
1638 bool sharedBufferMode = va_arg(args, int);
1639 return setSharedBufferMode(sharedBufferMode);
1640 }
1641
dispatchSetAutoRefresh(va_list args)1642 int Surface::dispatchSetAutoRefresh(va_list args) {
1643 bool autoRefresh = va_arg(args, int);
1644 return setAutoRefresh(autoRefresh);
1645 }
1646
dispatchGetDisplayRefreshCycleDuration(va_list args)1647 int Surface::dispatchGetDisplayRefreshCycleDuration(va_list args) {
1648 nsecs_t* outRefreshDuration = va_arg(args, int64_t*);
1649 return getDisplayRefreshCycleDuration(outRefreshDuration);
1650 }
1651
dispatchGetNextFrameId(va_list args)1652 int Surface::dispatchGetNextFrameId(va_list args) {
1653 uint64_t* nextFrameId = va_arg(args, uint64_t*);
1654 *nextFrameId = getNextFrameNumber();
1655 return NO_ERROR;
1656 }
1657
dispatchEnableFrameTimestamps(va_list args)1658 int Surface::dispatchEnableFrameTimestamps(va_list args) {
1659 bool enable = va_arg(args, int);
1660 enableFrameTimestamps(enable);
1661 return NO_ERROR;
1662 }
1663
dispatchGetCompositorTiming(va_list args)1664 int Surface::dispatchGetCompositorTiming(va_list args) {
1665 nsecs_t* compositeDeadline = va_arg(args, int64_t*);
1666 nsecs_t* compositeInterval = va_arg(args, int64_t*);
1667 nsecs_t* compositeToPresentLatency = va_arg(args, int64_t*);
1668 return getCompositorTiming(compositeDeadline, compositeInterval,
1669 compositeToPresentLatency);
1670 }
1671
dispatchGetFrameTimestamps(va_list args)1672 int Surface::dispatchGetFrameTimestamps(va_list args) {
1673 uint64_t frameId = va_arg(args, uint64_t);
1674 nsecs_t* outRequestedPresentTime = va_arg(args, int64_t*);
1675 nsecs_t* outAcquireTime = va_arg(args, int64_t*);
1676 nsecs_t* outLatchTime = va_arg(args, int64_t*);
1677 nsecs_t* outFirstRefreshStartTime = va_arg(args, int64_t*);
1678 nsecs_t* outLastRefreshStartTime = va_arg(args, int64_t*);
1679 nsecs_t* outGpuCompositionDoneTime = va_arg(args, int64_t*);
1680 nsecs_t* outDisplayPresentTime = va_arg(args, int64_t*);
1681 nsecs_t* outDequeueReadyTime = va_arg(args, int64_t*);
1682 nsecs_t* outReleaseTime = va_arg(args, int64_t*);
1683 return getFrameTimestamps(frameId,
1684 outRequestedPresentTime, outAcquireTime, outLatchTime,
1685 outFirstRefreshStartTime, outLastRefreshStartTime,
1686 outGpuCompositionDoneTime, outDisplayPresentTime,
1687 outDequeueReadyTime, outReleaseTime);
1688 }
1689
dispatchGetWideColorSupport(va_list args)1690 int Surface::dispatchGetWideColorSupport(va_list args) {
1691 bool* outSupport = va_arg(args, bool*);
1692 return getWideColorSupport(outSupport);
1693 }
1694
dispatchGetHdrSupport(va_list args)1695 int Surface::dispatchGetHdrSupport(va_list args) {
1696 bool* outSupport = va_arg(args, bool*);
1697 return getHdrSupport(outSupport);
1698 }
1699
dispatchGetConsumerUsage64(va_list args)1700 int Surface::dispatchGetConsumerUsage64(va_list args) {
1701 uint64_t* usage = va_arg(args, uint64_t*);
1702 return getConsumerUsage(usage);
1703 }
1704
dispatchSetAutoPrerotation(va_list args)1705 int Surface::dispatchSetAutoPrerotation(va_list args) {
1706 bool autoPrerotation = va_arg(args, int);
1707 return setAutoPrerotation(autoPrerotation);
1708 }
1709
dispatchGetLastDequeueStartTime(va_list args)1710 int Surface::dispatchGetLastDequeueStartTime(va_list args) {
1711 int64_t* lastDequeueStartTime = va_arg(args, int64_t*);
1712 *lastDequeueStartTime = mLastDequeueStartTime;
1713 return NO_ERROR;
1714 }
1715
dispatchSetDequeueTimeout(va_list args)1716 int Surface::dispatchSetDequeueTimeout(va_list args) {
1717 nsecs_t timeout = va_arg(args, int64_t);
1718 return setDequeueTimeout(timeout);
1719 }
1720
dispatchGetLastDequeueDuration(va_list args)1721 int Surface::dispatchGetLastDequeueDuration(va_list args) {
1722 int64_t* lastDequeueDuration = va_arg(args, int64_t*);
1723 *lastDequeueDuration = mLastDequeueDuration;
1724 return NO_ERROR;
1725 }
1726
dispatchGetLastQueueDuration(va_list args)1727 int Surface::dispatchGetLastQueueDuration(va_list args) {
1728 int64_t* lastQueueDuration = va_arg(args, int64_t*);
1729 *lastQueueDuration = mLastQueueDuration;
1730 return NO_ERROR;
1731 }
1732
dispatchSetFrameRate(va_list args)1733 int Surface::dispatchSetFrameRate(va_list args) {
1734 float frameRate = static_cast<float>(va_arg(args, double));
1735 int8_t compatibility = static_cast<int8_t>(va_arg(args, int));
1736 int8_t changeFrameRateStrategy = static_cast<int8_t>(va_arg(args, int));
1737 return setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
1738 }
1739
dispatchAddCancelInterceptor(va_list args)1740 int Surface::dispatchAddCancelInterceptor(va_list args) {
1741 ANativeWindow_cancelBufferInterceptor interceptor =
1742 va_arg(args, ANativeWindow_cancelBufferInterceptor);
1743 void* data = va_arg(args, void*);
1744 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1745 mCancelInterceptor = interceptor;
1746 mCancelInterceptorData = data;
1747 return NO_ERROR;
1748 }
1749
dispatchAddDequeueInterceptor(va_list args)1750 int Surface::dispatchAddDequeueInterceptor(va_list args) {
1751 ANativeWindow_dequeueBufferInterceptor interceptor =
1752 va_arg(args, ANativeWindow_dequeueBufferInterceptor);
1753 void* data = va_arg(args, void*);
1754 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1755 mDequeueInterceptor = interceptor;
1756 mDequeueInterceptorData = data;
1757 return NO_ERROR;
1758 }
1759
dispatchAddPerformInterceptor(va_list args)1760 int Surface::dispatchAddPerformInterceptor(va_list args) {
1761 ANativeWindow_performInterceptor interceptor = va_arg(args, ANativeWindow_performInterceptor);
1762 void* data = va_arg(args, void*);
1763 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1764 mPerformInterceptor = interceptor;
1765 mPerformInterceptorData = data;
1766 return NO_ERROR;
1767 }
1768
dispatchAddQueueInterceptor(va_list args)1769 int Surface::dispatchAddQueueInterceptor(va_list args) {
1770 ANativeWindow_queueBufferInterceptor interceptor =
1771 va_arg(args, ANativeWindow_queueBufferInterceptor);
1772 void* data = va_arg(args, void*);
1773 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1774 mQueueInterceptor = interceptor;
1775 mQueueInterceptorData = data;
1776 return NO_ERROR;
1777 }
1778
dispatchAddQueryInterceptor(va_list args)1779 int Surface::dispatchAddQueryInterceptor(va_list args) {
1780 ANativeWindow_queryInterceptor interceptor = va_arg(args, ANativeWindow_queryInterceptor);
1781 void* data = va_arg(args, void*);
1782 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1783 mQueryInterceptor = interceptor;
1784 mQueryInterceptorData = data;
1785 return NO_ERROR;
1786 }
1787
dispatchGetLastQueuedBuffer(va_list args)1788 int Surface::dispatchGetLastQueuedBuffer(va_list args) {
1789 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
1790 int* fence = va_arg(args, int*);
1791 float* matrix = va_arg(args, float*);
1792 sp<GraphicBuffer> graphicBuffer;
1793 sp<Fence> spFence;
1794
1795 int result = mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, matrix);
1796
1797 if (graphicBuffer != nullptr) {
1798 *buffer = graphicBuffer->toAHardwareBuffer();
1799 AHardwareBuffer_acquire(*buffer);
1800 } else {
1801 *buffer = nullptr;
1802 }
1803
1804 if (spFence != nullptr) {
1805 *fence = spFence->dup();
1806 } else {
1807 *fence = -1;
1808 }
1809 return result;
1810 }
1811
dispatchGetLastQueuedBuffer2(va_list args)1812 int Surface::dispatchGetLastQueuedBuffer2(va_list args) {
1813 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
1814 int* fence = va_arg(args, int*);
1815 ARect* crop = va_arg(args, ARect*);
1816 uint32_t* transform = va_arg(args, uint32_t*);
1817 sp<GraphicBuffer> graphicBuffer;
1818 sp<Fence> spFence;
1819
1820 Rect r;
1821 int result =
1822 mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, &r, transform);
1823
1824 if (graphicBuffer != nullptr) {
1825 *buffer = graphicBuffer->toAHardwareBuffer();
1826 AHardwareBuffer_acquire(*buffer);
1827
1828 // Avoid setting crop* unless buffer is valid (matches IGBP behavior)
1829 crop->left = r.left;
1830 crop->top = r.top;
1831 crop->right = r.right;
1832 crop->bottom = r.bottom;
1833 } else {
1834 *buffer = nullptr;
1835 }
1836
1837 if (spFence != nullptr) {
1838 *fence = spFence->dup();
1839 } else {
1840 *fence = -1;
1841 }
1842 return result;
1843 }
1844
dispatchSetFrameTimelineInfo(va_list args)1845 int Surface::dispatchSetFrameTimelineInfo(va_list args) {
1846 ATRACE_CALL();
1847 auto frameTimelineVsyncId = static_cast<int64_t>(va_arg(args, int64_t));
1848 auto inputEventId = static_cast<int32_t>(va_arg(args, int32_t));
1849
1850 ALOGV("Surface::%s", __func__);
1851 return setFrameTimelineInfo({frameTimelineVsyncId, inputEventId});
1852 }
1853
transformToDisplayInverse() const1854 bool Surface::transformToDisplayInverse() const {
1855 return (mTransform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) ==
1856 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
1857 }
1858
connect(int api)1859 int Surface::connect(int api) {
1860 static sp<IProducerListener> listener = new StubProducerListener();
1861 return connect(api, listener);
1862 }
1863
connect(int api,const sp<IProducerListener> & listener)1864 int Surface::connect(int api, const sp<IProducerListener>& listener) {
1865 return connect(api, listener, false);
1866 }
1867
connect(int api,bool reportBufferRemoval,const sp<SurfaceListener> & sListener)1868 int Surface::connect(
1869 int api, bool reportBufferRemoval, const sp<SurfaceListener>& sListener) {
1870 if (sListener != nullptr) {
1871 mListenerProxy = new ProducerListenerProxy(this, sListener);
1872 }
1873 return connect(api, mListenerProxy, reportBufferRemoval);
1874 }
1875
connect(int api,const sp<IProducerListener> & listener,bool reportBufferRemoval)1876 int Surface::connect(
1877 int api, const sp<IProducerListener>& listener, bool reportBufferRemoval) {
1878 ATRACE_CALL();
1879 ALOGV("Surface::connect");
1880 Mutex::Autolock lock(mMutex);
1881 IGraphicBufferProducer::QueueBufferOutput output;
1882 mReportRemovedBuffers = reportBufferRemoval;
1883 int err = mGraphicBufferProducer->connect(listener, api, mProducerControlledByApp, &output);
1884 if (err == NO_ERROR) {
1885 mDefaultWidth = output.width;
1886 mDefaultHeight = output.height;
1887 mNextFrameNumber = output.nextFrameNumber;
1888 mMaxBufferCount = output.maxBufferCount;
1889
1890 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
1891 // set. Transform hint should be ignored if the client is expected to always submit buffers
1892 // in the same orientation.
1893 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
1894 mTransformHint = output.transformHint;
1895 }
1896
1897 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
1898 }
1899 if (!err && api == NATIVE_WINDOW_API_CPU) {
1900 mConnectedToCpu = true;
1901 // Clear the dirty region in case we're switching from a non-CPU API
1902 mDirtyRegion.clear();
1903 } else if (!err) {
1904 // Initialize the dirty region for tracking surface damage
1905 mDirtyRegion = Region::INVALID_REGION;
1906 }
1907
1908 return err;
1909 }
1910
1911
disconnect(int api,IGraphicBufferProducer::DisconnectMode mode)1912 int Surface::disconnect(int api, IGraphicBufferProducer::DisconnectMode mode) {
1913 ATRACE_CALL();
1914 ALOGV("Surface::disconnect");
1915 Mutex::Autolock lock(mMutex);
1916 mRemovedBuffers.clear();
1917 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
1918 mSharedBufferHasBeenQueued = false;
1919 freeAllBuffers();
1920 int err = mGraphicBufferProducer->disconnect(api, mode);
1921 if (!err) {
1922 mReqFormat = 0;
1923 mReqWidth = 0;
1924 mReqHeight = 0;
1925 mReqUsage = 0;
1926 mCrop.clear();
1927 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
1928 mTransform = 0;
1929 mStickyTransform = 0;
1930 mAutoPrerotation = false;
1931 mEnableFrameTimestamps = false;
1932 mMaxBufferCount = NUM_BUFFER_SLOTS;
1933
1934 if (api == NATIVE_WINDOW_API_CPU) {
1935 mConnectedToCpu = false;
1936 }
1937 }
1938 return err;
1939 }
1940
detachNextBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence)1941 int Surface::detachNextBuffer(sp<GraphicBuffer>* outBuffer,
1942 sp<Fence>* outFence) {
1943 ATRACE_CALL();
1944 ALOGV("Surface::detachNextBuffer");
1945
1946 if (outBuffer == nullptr || outFence == nullptr) {
1947 return BAD_VALUE;
1948 }
1949
1950 Mutex::Autolock lock(mMutex);
1951 if (mReportRemovedBuffers) {
1952 mRemovedBuffers.clear();
1953 }
1954
1955 sp<GraphicBuffer> buffer(nullptr);
1956 sp<Fence> fence(nullptr);
1957 status_t result = mGraphicBufferProducer->detachNextBuffer(
1958 &buffer, &fence);
1959 if (result != NO_ERROR) {
1960 return result;
1961 }
1962
1963 *outBuffer = buffer;
1964 if (fence != nullptr && fence->isValid()) {
1965 *outFence = fence;
1966 } else {
1967 *outFence = Fence::NO_FENCE;
1968 }
1969
1970 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
1971 if (mSlots[i].buffer != nullptr &&
1972 mSlots[i].buffer->getId() == buffer->getId()) {
1973 if (mReportRemovedBuffers) {
1974 mRemovedBuffers.push_back(mSlots[i].buffer);
1975 }
1976 mSlots[i].buffer = nullptr;
1977 }
1978 }
1979
1980 return NO_ERROR;
1981 }
1982
attachBuffer(ANativeWindowBuffer * buffer)1983 int Surface::attachBuffer(ANativeWindowBuffer* buffer)
1984 {
1985 ATRACE_CALL();
1986 ALOGV("Surface::attachBuffer");
1987
1988 Mutex::Autolock lock(mMutex);
1989 if (mReportRemovedBuffers) {
1990 mRemovedBuffers.clear();
1991 }
1992
1993 sp<GraphicBuffer> graphicBuffer(static_cast<GraphicBuffer*>(buffer));
1994 uint32_t priorGeneration = graphicBuffer->mGenerationNumber;
1995 graphicBuffer->mGenerationNumber = mGenerationNumber;
1996 int32_t attachedSlot = -1;
1997 status_t result = mGraphicBufferProducer->attachBuffer(&attachedSlot, graphicBuffer);
1998 if (result != NO_ERROR) {
1999 ALOGE("attachBuffer: IGraphicBufferProducer call failed (%d)", result);
2000 graphicBuffer->mGenerationNumber = priorGeneration;
2001 return result;
2002 }
2003 if (mReportRemovedBuffers && (mSlots[attachedSlot].buffer != nullptr)) {
2004 mRemovedBuffers.push_back(mSlots[attachedSlot].buffer);
2005 }
2006 mSlots[attachedSlot].buffer = graphicBuffer;
2007 mDequeuedSlots.insert(attachedSlot);
2008
2009 return NO_ERROR;
2010 }
2011
setUsage(uint64_t reqUsage)2012 int Surface::setUsage(uint64_t reqUsage)
2013 {
2014 ALOGV("Surface::setUsage");
2015 Mutex::Autolock lock(mMutex);
2016 if (reqUsage != mReqUsage) {
2017 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2018 }
2019 mReqUsage = reqUsage;
2020 return OK;
2021 }
2022
setCrop(Rect const * rect)2023 int Surface::setCrop(Rect const* rect)
2024 {
2025 ATRACE_CALL();
2026
2027 Rect realRect(Rect::EMPTY_RECT);
2028 if (rect == nullptr || rect->isEmpty()) {
2029 realRect.clear();
2030 } else {
2031 realRect = *rect;
2032 }
2033
2034 ALOGV("Surface::setCrop rect=[%d %d %d %d]",
2035 realRect.left, realRect.top, realRect.right, realRect.bottom);
2036
2037 Mutex::Autolock lock(mMutex);
2038 mCrop = realRect;
2039 return NO_ERROR;
2040 }
2041
setBufferCount(int bufferCount)2042 int Surface::setBufferCount(int bufferCount)
2043 {
2044 ATRACE_CALL();
2045 ALOGV("Surface::setBufferCount");
2046 Mutex::Autolock lock(mMutex);
2047
2048 status_t err = NO_ERROR;
2049 if (bufferCount == 0) {
2050 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(1);
2051 } else {
2052 int minUndequeuedBuffers = 0;
2053 err = mGraphicBufferProducer->query(
2054 NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
2055 if (err == NO_ERROR) {
2056 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(
2057 bufferCount - minUndequeuedBuffers);
2058 }
2059 }
2060
2061 ALOGE_IF(err, "IGraphicBufferProducer::setBufferCount(%d) returned %s",
2062 bufferCount, strerror(-err));
2063
2064 return err;
2065 }
2066
setMaxDequeuedBufferCount(int maxDequeuedBuffers)2067 int Surface::setMaxDequeuedBufferCount(int maxDequeuedBuffers) {
2068 ATRACE_CALL();
2069 ALOGV("Surface::setMaxDequeuedBufferCount");
2070 Mutex::Autolock lock(mMutex);
2071
2072 status_t err = mGraphicBufferProducer->setMaxDequeuedBufferCount(
2073 maxDequeuedBuffers);
2074 ALOGE_IF(err, "IGraphicBufferProducer::setMaxDequeuedBufferCount(%d) "
2075 "returned %s", maxDequeuedBuffers, strerror(-err));
2076
2077 return err;
2078 }
2079
setAsyncMode(bool async)2080 int Surface::setAsyncMode(bool async) {
2081 ATRACE_CALL();
2082 ALOGV("Surface::setAsyncMode");
2083 Mutex::Autolock lock(mMutex);
2084
2085 status_t err = mGraphicBufferProducer->setAsyncMode(async);
2086 ALOGE_IF(err, "IGraphicBufferProducer::setAsyncMode(%d) returned %s",
2087 async, strerror(-err));
2088
2089 return err;
2090 }
2091
setSharedBufferMode(bool sharedBufferMode)2092 int Surface::setSharedBufferMode(bool sharedBufferMode) {
2093 ATRACE_CALL();
2094 ALOGV("Surface::setSharedBufferMode (%d)", sharedBufferMode);
2095 Mutex::Autolock lock(mMutex);
2096
2097 status_t err = mGraphicBufferProducer->setSharedBufferMode(
2098 sharedBufferMode);
2099 if (err == NO_ERROR) {
2100 mSharedBufferMode = sharedBufferMode;
2101 }
2102 ALOGE_IF(err, "IGraphicBufferProducer::setSharedBufferMode(%d) returned"
2103 "%s", sharedBufferMode, strerror(-err));
2104
2105 return err;
2106 }
2107
setAutoRefresh(bool autoRefresh)2108 int Surface::setAutoRefresh(bool autoRefresh) {
2109 ATRACE_CALL();
2110 ALOGV("Surface::setAutoRefresh (%d)", autoRefresh);
2111 Mutex::Autolock lock(mMutex);
2112
2113 status_t err = mGraphicBufferProducer->setAutoRefresh(autoRefresh);
2114 if (err == NO_ERROR) {
2115 mAutoRefresh = autoRefresh;
2116 }
2117 ALOGE_IF(err, "IGraphicBufferProducer::setAutoRefresh(%d) returned %s",
2118 autoRefresh, strerror(-err));
2119 return err;
2120 }
2121
setBuffersDimensions(uint32_t width,uint32_t height)2122 int Surface::setBuffersDimensions(uint32_t width, uint32_t height)
2123 {
2124 ATRACE_CALL();
2125 ALOGV("Surface::setBuffersDimensions");
2126
2127 if ((width && !height) || (!width && height))
2128 return BAD_VALUE;
2129
2130 Mutex::Autolock lock(mMutex);
2131 if (width != mReqWidth || height != mReqHeight) {
2132 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2133 }
2134 mReqWidth = width;
2135 mReqHeight = height;
2136 return NO_ERROR;
2137 }
2138
setBuffersUserDimensions(uint32_t width,uint32_t height)2139 int Surface::setBuffersUserDimensions(uint32_t width, uint32_t height)
2140 {
2141 ATRACE_CALL();
2142 ALOGV("Surface::setBuffersUserDimensions");
2143
2144 if ((width && !height) || (!width && height))
2145 return BAD_VALUE;
2146
2147 Mutex::Autolock lock(mMutex);
2148 if (width != mUserWidth || height != mUserHeight) {
2149 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2150 }
2151 mUserWidth = width;
2152 mUserHeight = height;
2153 return NO_ERROR;
2154 }
2155
setBuffersFormat(PixelFormat format)2156 int Surface::setBuffersFormat(PixelFormat format)
2157 {
2158 ALOGV("Surface::setBuffersFormat");
2159
2160 Mutex::Autolock lock(mMutex);
2161 if (format != mReqFormat) {
2162 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2163 }
2164 mReqFormat = format;
2165 return NO_ERROR;
2166 }
2167
setScalingMode(int mode)2168 int Surface::setScalingMode(int mode)
2169 {
2170 ATRACE_CALL();
2171 ALOGV("Surface::setScalingMode(%d)", mode);
2172
2173 switch (mode) {
2174 case NATIVE_WINDOW_SCALING_MODE_FREEZE:
2175 case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
2176 case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
2177 case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
2178 break;
2179 default:
2180 ALOGE("unknown scaling mode: %d", mode);
2181 return BAD_VALUE;
2182 }
2183
2184 Mutex::Autolock lock(mMutex);
2185 mScalingMode = mode;
2186 return NO_ERROR;
2187 }
2188
setBuffersTransform(uint32_t transform)2189 int Surface::setBuffersTransform(uint32_t transform)
2190 {
2191 ATRACE_CALL();
2192 ALOGV("Surface::setBuffersTransform");
2193 Mutex::Autolock lock(mMutex);
2194 // Ensure NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY is sticky. If the client sets the flag, do not
2195 // override it until the surface is disconnected. This is a temporary workaround for camera
2196 // until they switch to using Buffer State Layers. Currently if client sets the buffer transform
2197 // it may be overriden by the buffer producer when the producer sets the buffer transform.
2198 if (transformToDisplayInverse()) {
2199 transform |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
2200 }
2201 mTransform = transform;
2202 return NO_ERROR;
2203 }
2204
setBuffersStickyTransform(uint32_t transform)2205 int Surface::setBuffersStickyTransform(uint32_t transform)
2206 {
2207 ATRACE_CALL();
2208 ALOGV("Surface::setBuffersStickyTransform");
2209 Mutex::Autolock lock(mMutex);
2210 mStickyTransform = transform;
2211 return NO_ERROR;
2212 }
2213
setBuffersTimestamp(int64_t timestamp)2214 int Surface::setBuffersTimestamp(int64_t timestamp)
2215 {
2216 ALOGV("Surface::setBuffersTimestamp");
2217 Mutex::Autolock lock(mMutex);
2218 mTimestamp = timestamp;
2219 return NO_ERROR;
2220 }
2221
setBuffersDataSpace(Dataspace dataSpace)2222 int Surface::setBuffersDataSpace(Dataspace dataSpace)
2223 {
2224 ALOGV("Surface::setBuffersDataSpace");
2225 Mutex::Autolock lock(mMutex);
2226 mDataSpace = dataSpace;
2227 return NO_ERROR;
2228 }
2229
setBuffersSmpte2086Metadata(const android_smpte2086_metadata * metadata)2230 int Surface::setBuffersSmpte2086Metadata(const android_smpte2086_metadata* metadata) {
2231 ALOGV("Surface::setBuffersSmpte2086Metadata");
2232 Mutex::Autolock lock(mMutex);
2233 if (metadata) {
2234 mHdrMetadata.smpte2086 = *metadata;
2235 mHdrMetadata.validTypes |= HdrMetadata::SMPTE2086;
2236 } else {
2237 mHdrMetadata.validTypes &= ~HdrMetadata::SMPTE2086;
2238 }
2239 return NO_ERROR;
2240 }
2241
setBuffersCta8613Metadata(const android_cta861_3_metadata * metadata)2242 int Surface::setBuffersCta8613Metadata(const android_cta861_3_metadata* metadata) {
2243 ALOGV("Surface::setBuffersCta8613Metadata");
2244 Mutex::Autolock lock(mMutex);
2245 if (metadata) {
2246 mHdrMetadata.cta8613 = *metadata;
2247 mHdrMetadata.validTypes |= HdrMetadata::CTA861_3;
2248 } else {
2249 mHdrMetadata.validTypes &= ~HdrMetadata::CTA861_3;
2250 }
2251 return NO_ERROR;
2252 }
2253
setBuffersHdr10PlusMetadata(const size_t size,const uint8_t * metadata)2254 int Surface::setBuffersHdr10PlusMetadata(const size_t size, const uint8_t* metadata) {
2255 ALOGV("Surface::setBuffersBlobMetadata");
2256 Mutex::Autolock lock(mMutex);
2257 if (size > 0) {
2258 mHdrMetadata.hdr10plus.assign(metadata, metadata + size);
2259 mHdrMetadata.validTypes |= HdrMetadata::HDR10PLUS;
2260 } else {
2261 mHdrMetadata.validTypes &= ~HdrMetadata::HDR10PLUS;
2262 mHdrMetadata.hdr10plus.clear();
2263 }
2264 return NO_ERROR;
2265 }
2266
getBuffersDataSpace()2267 Dataspace Surface::getBuffersDataSpace() {
2268 ALOGV("Surface::getBuffersDataSpace");
2269 Mutex::Autolock lock(mMutex);
2270 return mDataSpace;
2271 }
2272
freeAllBuffers()2273 void Surface::freeAllBuffers() {
2274 if (!mDequeuedSlots.empty()) {
2275 ALOGE("%s: %zu buffers were freed while being dequeued!",
2276 __FUNCTION__, mDequeuedSlots.size());
2277 }
2278 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
2279 mSlots[i].buffer = nullptr;
2280 }
2281 }
2282
getAndFlushBuffersFromSlots(const std::vector<int32_t> & slots,std::vector<sp<GraphicBuffer>> * outBuffers)2283 status_t Surface::getAndFlushBuffersFromSlots(const std::vector<int32_t>& slots,
2284 std::vector<sp<GraphicBuffer>>* outBuffers) {
2285 ALOGV("Surface::getAndFlushBuffersFromSlots");
2286 for (int32_t i : slots) {
2287 if (i < 0 || i >= NUM_BUFFER_SLOTS) {
2288 ALOGE("%s: Invalid slotIndex: %d", __FUNCTION__, i);
2289 return BAD_VALUE;
2290 }
2291 }
2292
2293 Mutex::Autolock lock(mMutex);
2294 for (int32_t i : slots) {
2295 if (mSlots[i].buffer == nullptr) {
2296 ALOGW("%s: Discarded slot %d doesn't contain buffer!", __FUNCTION__, i);
2297 continue;
2298 }
2299 // Don't flush currently dequeued buffers
2300 if (mDequeuedSlots.count(i) > 0) {
2301 continue;
2302 }
2303 outBuffers->push_back(mSlots[i].buffer);
2304 mSlots[i].buffer = nullptr;
2305 }
2306 return OK;
2307 }
2308
setSurfaceDamage(android_native_rect_t * rects,size_t numRects)2309 void Surface::setSurfaceDamage(android_native_rect_t* rects, size_t numRects) {
2310 ATRACE_CALL();
2311 ALOGV("Surface::setSurfaceDamage");
2312 Mutex::Autolock lock(mMutex);
2313
2314 if (mConnectedToCpu || numRects == 0) {
2315 mDirtyRegion = Region::INVALID_REGION;
2316 return;
2317 }
2318
2319 mDirtyRegion.clear();
2320 for (size_t r = 0; r < numRects; ++r) {
2321 // We intentionally flip top and bottom here, since because they're
2322 // specified with a bottom-left origin, top > bottom, which fails
2323 // validation in the Region class. We will fix this up when we flip to a
2324 // top-left origin in queueBuffer.
2325 Rect rect(rects[r].left, rects[r].bottom, rects[r].right, rects[r].top);
2326 mDirtyRegion.orSelf(rect);
2327 }
2328 }
2329
2330 // ----------------------------------------------------------------------
2331 // the lock/unlock APIs must be used from the same thread
2332
copyBlt(const sp<GraphicBuffer> & dst,const sp<GraphicBuffer> & src,const Region & reg,int * dstFenceFd)2333 static status_t copyBlt(
2334 const sp<GraphicBuffer>& dst,
2335 const sp<GraphicBuffer>& src,
2336 const Region& reg,
2337 int *dstFenceFd)
2338 {
2339 if (dst->getId() == src->getId())
2340 return OK;
2341
2342 // src and dst with, height and format must be identical. no verification
2343 // is done here.
2344 status_t err;
2345 uint8_t* src_bits = nullptr;
2346 err = src->lock(GRALLOC_USAGE_SW_READ_OFTEN, reg.bounds(),
2347 reinterpret_cast<void**>(&src_bits));
2348 ALOGE_IF(err, "error locking src buffer %s", strerror(-err));
2349
2350 uint8_t* dst_bits = nullptr;
2351 err = dst->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, reg.bounds(),
2352 reinterpret_cast<void**>(&dst_bits), *dstFenceFd);
2353 ALOGE_IF(err, "error locking dst buffer %s", strerror(-err));
2354 *dstFenceFd = -1;
2355
2356 Region::const_iterator head(reg.begin());
2357 Region::const_iterator tail(reg.end());
2358 if (head != tail && src_bits && dst_bits) {
2359 const size_t bpp = bytesPerPixel(src->format);
2360 const size_t dbpr = static_cast<uint32_t>(dst->stride) * bpp;
2361 const size_t sbpr = static_cast<uint32_t>(src->stride) * bpp;
2362
2363 while (head != tail) {
2364 const Rect& r(*head++);
2365 int32_t h = r.height();
2366 if (h <= 0) continue;
2367 size_t size = static_cast<uint32_t>(r.width()) * bpp;
2368 uint8_t const * s = src_bits +
2369 static_cast<uint32_t>(r.left + src->stride * r.top) * bpp;
2370 uint8_t * d = dst_bits +
2371 static_cast<uint32_t>(r.left + dst->stride * r.top) * bpp;
2372 if (dbpr==sbpr && size==sbpr) {
2373 size *= static_cast<size_t>(h);
2374 h = 1;
2375 }
2376 do {
2377 memcpy(d, s, size);
2378 d += dbpr;
2379 s += sbpr;
2380 } while (--h > 0);
2381 }
2382 }
2383
2384 if (src_bits)
2385 src->unlock();
2386
2387 if (dst_bits)
2388 dst->unlockAsync(dstFenceFd);
2389
2390 return err;
2391 }
2392
2393 // ----------------------------------------------------------------------------
2394
lock(ANativeWindow_Buffer * outBuffer,ARect * inOutDirtyBounds)2395 status_t Surface::lock(
2396 ANativeWindow_Buffer* outBuffer, ARect* inOutDirtyBounds)
2397 {
2398 if (mLockedBuffer != nullptr) {
2399 ALOGE("Surface::lock failed, already locked");
2400 return INVALID_OPERATION;
2401 }
2402
2403 if (!mConnectedToCpu) {
2404 int err = Surface::connect(NATIVE_WINDOW_API_CPU);
2405 if (err) {
2406 return err;
2407 }
2408 // we're intending to do software rendering from this point
2409 setUsage(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN);
2410 }
2411
2412 ANativeWindowBuffer* out;
2413 int fenceFd = -1;
2414 status_t err = dequeueBuffer(&out, &fenceFd);
2415 ALOGE_IF(err, "dequeueBuffer failed (%s)", strerror(-err));
2416 if (err == NO_ERROR) {
2417 sp<GraphicBuffer> backBuffer(GraphicBuffer::getSelf(out));
2418 const Rect bounds(backBuffer->width, backBuffer->height);
2419
2420 Region newDirtyRegion;
2421 if (inOutDirtyBounds) {
2422 newDirtyRegion.set(static_cast<Rect const&>(*inOutDirtyBounds));
2423 newDirtyRegion.andSelf(bounds);
2424 } else {
2425 newDirtyRegion.set(bounds);
2426 }
2427
2428 // figure out if we can copy the frontbuffer back
2429 const sp<GraphicBuffer>& frontBuffer(mPostedBuffer);
2430 const bool canCopyBack = (frontBuffer != nullptr &&
2431 backBuffer->width == frontBuffer->width &&
2432 backBuffer->height == frontBuffer->height &&
2433 backBuffer->format == frontBuffer->format);
2434
2435 if (canCopyBack) {
2436 // copy the area that is invalid and not repainted this round
2437 const Region copyback(mDirtyRegion.subtract(newDirtyRegion));
2438 if (!copyback.isEmpty()) {
2439 copyBlt(backBuffer, frontBuffer, copyback, &fenceFd);
2440 }
2441 } else {
2442 // if we can't copy-back anything, modify the user's dirty
2443 // region to make sure they redraw the whole buffer
2444 newDirtyRegion.set(bounds);
2445 mDirtyRegion.clear();
2446 Mutex::Autolock lock(mMutex);
2447 for (size_t i=0 ; i<NUM_BUFFER_SLOTS ; i++) {
2448 mSlots[i].dirtyRegion.clear();
2449 }
2450 }
2451
2452
2453 { // scope for the lock
2454 Mutex::Autolock lock(mMutex);
2455 int backBufferSlot(getSlotFromBufferLocked(backBuffer.get()));
2456 if (backBufferSlot >= 0) {
2457 Region& dirtyRegion(mSlots[backBufferSlot].dirtyRegion);
2458 mDirtyRegion.subtract(dirtyRegion);
2459 dirtyRegion = newDirtyRegion;
2460 }
2461 }
2462
2463 mDirtyRegion.orSelf(newDirtyRegion);
2464 if (inOutDirtyBounds) {
2465 *inOutDirtyBounds = newDirtyRegion.getBounds();
2466 }
2467
2468 void* vaddr;
2469 status_t res = backBuffer->lockAsync(
2470 GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
2471 newDirtyRegion.bounds(), &vaddr, fenceFd);
2472
2473 ALOGW_IF(res, "failed locking buffer (handle = %p)",
2474 backBuffer->handle);
2475
2476 if (res != 0) {
2477 err = INVALID_OPERATION;
2478 } else {
2479 mLockedBuffer = backBuffer;
2480 outBuffer->width = backBuffer->width;
2481 outBuffer->height = backBuffer->height;
2482 outBuffer->stride = backBuffer->stride;
2483 outBuffer->format = backBuffer->format;
2484 outBuffer->bits = vaddr;
2485 }
2486 }
2487 return err;
2488 }
2489
unlockAndPost()2490 status_t Surface::unlockAndPost()
2491 {
2492 if (mLockedBuffer == nullptr) {
2493 ALOGE("Surface::unlockAndPost failed, no locked buffer");
2494 return INVALID_OPERATION;
2495 }
2496
2497 int fd = -1;
2498 status_t err = mLockedBuffer->unlockAsync(&fd);
2499 ALOGE_IF(err, "failed unlocking buffer (%p)", mLockedBuffer->handle);
2500
2501 err = queueBuffer(mLockedBuffer.get(), fd);
2502 ALOGE_IF(err, "queueBuffer (handle=%p) failed (%s)",
2503 mLockedBuffer->handle, strerror(-err));
2504
2505 mPostedBuffer = mLockedBuffer;
2506 mLockedBuffer = nullptr;
2507 return err;
2508 }
2509
waitForNextFrame(uint64_t lastFrame,nsecs_t timeout)2510 bool Surface::waitForNextFrame(uint64_t lastFrame, nsecs_t timeout) {
2511 Mutex::Autolock lock(mMutex);
2512 if (mNextFrameNumber > lastFrame) {
2513 return true;
2514 }
2515 return mQueueBufferCondition.waitRelative(mMutex, timeout) == OK;
2516 }
2517
getUniqueId(uint64_t * outId) const2518 status_t Surface::getUniqueId(uint64_t* outId) const {
2519 Mutex::Autolock lock(mMutex);
2520 return mGraphicBufferProducer->getUniqueId(outId);
2521 }
2522
getConsumerUsage(uint64_t * outUsage) const2523 int Surface::getConsumerUsage(uint64_t* outUsage) const {
2524 Mutex::Autolock lock(mMutex);
2525 return mGraphicBufferProducer->getConsumerUsage(outUsage);
2526 }
2527
getAndFlushRemovedBuffers(std::vector<sp<GraphicBuffer>> * out)2528 status_t Surface::getAndFlushRemovedBuffers(std::vector<sp<GraphicBuffer>>* out) {
2529 if (out == nullptr) {
2530 ALOGE("%s: out must not be null!", __FUNCTION__);
2531 return BAD_VALUE;
2532 }
2533
2534 Mutex::Autolock lock(mMutex);
2535 *out = mRemovedBuffers;
2536 mRemovedBuffers.clear();
2537 return OK;
2538 }
2539
attachAndQueueBufferWithDataspace(Surface * surface,sp<GraphicBuffer> buffer,Dataspace dataspace)2540 status_t Surface::attachAndQueueBufferWithDataspace(Surface* surface, sp<GraphicBuffer> buffer,
2541 Dataspace dataspace) {
2542 if (buffer == nullptr) {
2543 return BAD_VALUE;
2544 }
2545 int err = static_cast<ANativeWindow*>(surface)->perform(surface, NATIVE_WINDOW_API_CONNECT,
2546 NATIVE_WINDOW_API_CPU);
2547 if (err != OK) {
2548 return err;
2549 }
2550 ui::Dataspace tmpDataspace = surface->getBuffersDataSpace();
2551 err = surface->setBuffersDataSpace(dataspace);
2552 if (err != OK) {
2553 return err;
2554 }
2555 err = surface->attachBuffer(buffer->getNativeBuffer());
2556 if (err != OK) {
2557 return err;
2558 }
2559 err = static_cast<ANativeWindow*>(surface)->queueBuffer(surface, buffer->getNativeBuffer(), -1);
2560 if (err != OK) {
2561 return err;
2562 }
2563 err = surface->setBuffersDataSpace(tmpDataspace);
2564 if (err != OK) {
2565 return err;
2566 }
2567 err = surface->disconnect(NATIVE_WINDOW_API_CPU);
2568 return err;
2569 }
2570
setAutoPrerotation(bool autoPrerotation)2571 int Surface::setAutoPrerotation(bool autoPrerotation) {
2572 ATRACE_CALL();
2573 ALOGV("Surface::setAutoPrerotation (%d)", autoPrerotation);
2574 Mutex::Autolock lock(mMutex);
2575
2576 if (mAutoPrerotation == autoPrerotation) {
2577 return OK;
2578 }
2579
2580 status_t err = mGraphicBufferProducer->setAutoPrerotation(autoPrerotation);
2581 if (err == NO_ERROR) {
2582 mAutoPrerotation = autoPrerotation;
2583 }
2584 ALOGE_IF(err, "IGraphicBufferProducer::setAutoPrerotation(%d) returned %s", autoPrerotation,
2585 strerror(-err));
2586 return err;
2587 }
2588
onBuffersDiscarded(const std::vector<int32_t> & slots)2589 void Surface::ProducerListenerProxy::onBuffersDiscarded(const std::vector<int32_t>& slots) {
2590 ATRACE_CALL();
2591 sp<Surface> parent = mParent.promote();
2592 if (parent == nullptr) {
2593 return;
2594 }
2595
2596 std::vector<sp<GraphicBuffer>> discardedBufs;
2597 status_t res = parent->getAndFlushBuffersFromSlots(slots, &discardedBufs);
2598 if (res != OK) {
2599 ALOGE("%s: Failed to get buffers from slots: %s(%d)", __FUNCTION__,
2600 strerror(-res), res);
2601 return;
2602 }
2603
2604 mSurfaceListener->onBuffersDiscarded(discardedBufs);
2605 }
2606
setFrameRate(float frameRate,int8_t compatibility,int8_t changeFrameRateStrategy)2607 status_t Surface::setFrameRate(float frameRate, int8_t compatibility,
2608 int8_t changeFrameRateStrategy) {
2609 ATRACE_CALL();
2610 ALOGV("Surface::setFrameRate");
2611
2612 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
2613 "Surface::setFrameRate")) {
2614 return BAD_VALUE;
2615 }
2616
2617 return composerService()->setFrameRate(mGraphicBufferProducer, frameRate, compatibility,
2618 changeFrameRateStrategy);
2619 }
2620
setFrameTimelineInfo(const FrameTimelineInfo & frameTimelineInfo)2621 status_t Surface::setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) {
2622 return composerService()->setFrameTimelineInfo(mGraphicBufferProducer, frameTimelineInfo);
2623 }
2624
2625 }; // namespace android
2626