1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Surface"
18 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
19 //#define LOG_NDEBUG 0
20
21 #include <gui/Surface.h>
22
23 #include <condition_variable>
24 #include <deque>
25 #include <mutex>
26 #include <thread>
27
28 #include <inttypes.h>
29
30 #include <android/gui/DisplayStatInfo.h>
31 #include <android/native_window.h>
32
33 #include <utils/Log.h>
34 #include <utils/Trace.h>
35 #include <utils/NativeHandle.h>
36
37 #include <ui/DynamicDisplayInfo.h>
38 #include <ui/Fence.h>
39 #include <ui/GraphicBuffer.h>
40 #include <ui/Region.h>
41
42 #include <gui/BufferItem.h>
43 #include <gui/IProducerListener.h>
44
45 #include <gui/ISurfaceComposer.h>
46 #include <gui/LayerState.h>
47 #include <private/gui/ComposerService.h>
48 #include <private/gui/ComposerServiceAIDL.h>
49
50 namespace android {
51
52 using ui::Dataspace;
53
54 namespace {
55
isInterceptorRegistrationOp(int op)56 bool isInterceptorRegistrationOp(int op) {
57 return op == NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR ||
58 op == NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR ||
59 op == NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR ||
60 op == NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR ||
61 op == NATIVE_WINDOW_SET_QUERY_INTERCEPTOR;
62 }
63
64 } // namespace
65
Surface(const sp<IGraphicBufferProducer> & bufferProducer,bool controlledByApp,const sp<IBinder> & surfaceControlHandle)66 Surface::Surface(const sp<IGraphicBufferProducer>& bufferProducer, bool controlledByApp,
67 const sp<IBinder>& surfaceControlHandle)
68 : mGraphicBufferProducer(bufferProducer),
69 mCrop(Rect::EMPTY_RECT),
70 mBufferAge(0),
71 mGenerationNumber(0),
72 mSharedBufferMode(false),
73 mAutoRefresh(false),
74 mAutoPrerotation(false),
75 mSharedBufferSlot(BufferItem::INVALID_BUFFER_SLOT),
76 mSharedBufferHasBeenQueued(false),
77 mQueriedSupportedTimestamps(false),
78 mFrameTimestampsSupportsPresent(false),
79 mEnableFrameTimestamps(false),
80 mFrameEventHistory(std::make_unique<ProducerFrameEventHistory>()) {
81 // Initialize the ANativeWindow function pointers.
82 ANativeWindow::setSwapInterval = hook_setSwapInterval;
83 ANativeWindow::dequeueBuffer = hook_dequeueBuffer;
84 ANativeWindow::cancelBuffer = hook_cancelBuffer;
85 ANativeWindow::queueBuffer = hook_queueBuffer;
86 ANativeWindow::query = hook_query;
87 ANativeWindow::perform = hook_perform;
88
89 ANativeWindow::dequeueBuffer_DEPRECATED = hook_dequeueBuffer_DEPRECATED;
90 ANativeWindow::cancelBuffer_DEPRECATED = hook_cancelBuffer_DEPRECATED;
91 ANativeWindow::lockBuffer_DEPRECATED = hook_lockBuffer_DEPRECATED;
92 ANativeWindow::queueBuffer_DEPRECATED = hook_queueBuffer_DEPRECATED;
93
94 const_cast<int&>(ANativeWindow::minSwapInterval) = 0;
95 const_cast<int&>(ANativeWindow::maxSwapInterval) = 1;
96
97 mReqWidth = 0;
98 mReqHeight = 0;
99 mReqFormat = 0;
100 mReqUsage = 0;
101 mTimestamp = NATIVE_WINDOW_TIMESTAMP_AUTO;
102 mDataSpace = Dataspace::UNKNOWN;
103 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
104 mTransform = 0;
105 mStickyTransform = 0;
106 mDefaultWidth = 0;
107 mDefaultHeight = 0;
108 mUserWidth = 0;
109 mUserHeight = 0;
110 mTransformHint = 0;
111 mConsumerRunningBehind = false;
112 mConnectedToCpu = false;
113 mProducerControlledByApp = controlledByApp;
114 mSwapIntervalZero = false;
115 mMaxBufferCount = NUM_BUFFER_SLOTS;
116 mSurfaceControlHandle = surfaceControlHandle;
117 }
118
~Surface()119 Surface::~Surface() {
120 if (mConnectedToCpu) {
121 Surface::disconnect(NATIVE_WINDOW_API_CPU);
122 }
123 }
124
composerService() const125 sp<ISurfaceComposer> Surface::composerService() const {
126 return ComposerService::getComposerService();
127 }
128
composerServiceAIDL() const129 sp<gui::ISurfaceComposer> Surface::composerServiceAIDL() const {
130 return ComposerServiceAIDL::getComposerService();
131 }
132
now() const133 nsecs_t Surface::now() const {
134 return systemTime();
135 }
136
getIGraphicBufferProducer() const137 sp<IGraphicBufferProducer> Surface::getIGraphicBufferProducer() const {
138 return mGraphicBufferProducer;
139 }
140
setSidebandStream(const sp<NativeHandle> & stream)141 void Surface::setSidebandStream(const sp<NativeHandle>& stream) {
142 mGraphicBufferProducer->setSidebandStream(stream);
143 }
144
allocateBuffers()145 void Surface::allocateBuffers() {
146 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
147 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
148 mGraphicBufferProducer->allocateBuffers(reqWidth, reqHeight,
149 mReqFormat, mReqUsage);
150 }
151
setGenerationNumber(uint32_t generation)152 status_t Surface::setGenerationNumber(uint32_t generation) {
153 status_t result = mGraphicBufferProducer->setGenerationNumber(generation);
154 if (result == NO_ERROR) {
155 mGenerationNumber = generation;
156 }
157 return result;
158 }
159
getNextFrameNumber() const160 uint64_t Surface::getNextFrameNumber() const {
161 Mutex::Autolock lock(mMutex);
162 return mNextFrameNumber;
163 }
164
getConsumerName() const165 String8 Surface::getConsumerName() const {
166 return mGraphicBufferProducer->getConsumerName();
167 }
168
setDequeueTimeout(nsecs_t timeout)169 status_t Surface::setDequeueTimeout(nsecs_t timeout) {
170 return mGraphicBufferProducer->setDequeueTimeout(timeout);
171 }
172
getLastQueuedBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence,float outTransformMatrix[16])173 status_t Surface::getLastQueuedBuffer(sp<GraphicBuffer>* outBuffer,
174 sp<Fence>* outFence, float outTransformMatrix[16]) {
175 return mGraphicBufferProducer->getLastQueuedBuffer(outBuffer, outFence,
176 outTransformMatrix);
177 }
178
getDisplayRefreshCycleDuration(nsecs_t * outRefreshDuration)179 status_t Surface::getDisplayRefreshCycleDuration(nsecs_t* outRefreshDuration) {
180 ATRACE_CALL();
181
182 gui::DisplayStatInfo stats;
183 binder::Status status = composerServiceAIDL()->getDisplayStats(nullptr, &stats);
184 if (!status.isOk()) {
185 return status.transactionError();
186 }
187
188 *outRefreshDuration = stats.vsyncPeriod;
189
190 return NO_ERROR;
191 }
192
enableFrameTimestamps(bool enable)193 void Surface::enableFrameTimestamps(bool enable) {
194 Mutex::Autolock lock(mMutex);
195 // If going from disabled to enabled, get the initial values for
196 // compositor and display timing.
197 if (!mEnableFrameTimestamps && enable) {
198 FrameEventHistoryDelta delta;
199 mGraphicBufferProducer->getFrameTimestamps(&delta);
200 mFrameEventHistory->applyDelta(delta);
201 }
202 mEnableFrameTimestamps = enable;
203 }
204
getCompositorTiming(nsecs_t * compositeDeadline,nsecs_t * compositeInterval,nsecs_t * compositeToPresentLatency)205 status_t Surface::getCompositorTiming(
206 nsecs_t* compositeDeadline, nsecs_t* compositeInterval,
207 nsecs_t* compositeToPresentLatency) {
208 Mutex::Autolock lock(mMutex);
209 if (!mEnableFrameTimestamps) {
210 return INVALID_OPERATION;
211 }
212
213 if (compositeDeadline != nullptr) {
214 *compositeDeadline =
215 mFrameEventHistory->getNextCompositeDeadline(now());
216 }
217 if (compositeInterval != nullptr) {
218 *compositeInterval = mFrameEventHistory->getCompositeInterval();
219 }
220 if (compositeToPresentLatency != nullptr) {
221 *compositeToPresentLatency =
222 mFrameEventHistory->getCompositeToPresentLatency();
223 }
224 return NO_ERROR;
225 }
226
checkConsumerForUpdates(const FrameEvents * e,const uint64_t lastFrameNumber,const nsecs_t * outLatchTime,const nsecs_t * outFirstRefreshStartTime,const nsecs_t * outLastRefreshStartTime,const nsecs_t * outGpuCompositionDoneTime,const nsecs_t * outDisplayPresentTime,const nsecs_t * outDequeueReadyTime,const nsecs_t * outReleaseTime)227 static bool checkConsumerForUpdates(
228 const FrameEvents* e, const uint64_t lastFrameNumber,
229 const nsecs_t* outLatchTime,
230 const nsecs_t* outFirstRefreshStartTime,
231 const nsecs_t* outLastRefreshStartTime,
232 const nsecs_t* outGpuCompositionDoneTime,
233 const nsecs_t* outDisplayPresentTime,
234 const nsecs_t* outDequeueReadyTime,
235 const nsecs_t* outReleaseTime) {
236 bool checkForLatch = (outLatchTime != nullptr) && !e->hasLatchInfo();
237 bool checkForFirstRefreshStart = (outFirstRefreshStartTime != nullptr) &&
238 !e->hasFirstRefreshStartInfo();
239 bool checkForGpuCompositionDone = (outGpuCompositionDoneTime != nullptr) &&
240 !e->hasGpuCompositionDoneInfo();
241 bool checkForDisplayPresent = (outDisplayPresentTime != nullptr) &&
242 !e->hasDisplayPresentInfo();
243
244 // LastRefreshStart, DequeueReady, and Release are never available for the
245 // last frame.
246 bool checkForLastRefreshStart = (outLastRefreshStartTime != nullptr) &&
247 !e->hasLastRefreshStartInfo() &&
248 (e->frameNumber != lastFrameNumber);
249 bool checkForDequeueReady = (outDequeueReadyTime != nullptr) &&
250 !e->hasDequeueReadyInfo() && (e->frameNumber != lastFrameNumber);
251 bool checkForRelease = (outReleaseTime != nullptr) &&
252 !e->hasReleaseInfo() && (e->frameNumber != lastFrameNumber);
253
254 // RequestedPresent and Acquire info are always available producer-side.
255 return checkForLatch || checkForFirstRefreshStart ||
256 checkForLastRefreshStart || checkForGpuCompositionDone ||
257 checkForDisplayPresent || checkForDequeueReady || checkForRelease;
258 }
259
getFrameTimestamp(nsecs_t * dst,const nsecs_t & src)260 static void getFrameTimestamp(nsecs_t *dst, const nsecs_t& src) {
261 if (dst != nullptr) {
262 // We always get valid timestamps for these eventually.
263 *dst = (src == FrameEvents::TIMESTAMP_PENDING) ?
264 NATIVE_WINDOW_TIMESTAMP_PENDING : src;
265 }
266 }
267
getFrameTimestampFence(nsecs_t * dst,const std::shared_ptr<FenceTime> & src,bool fenceShouldBeKnown)268 static void getFrameTimestampFence(nsecs_t *dst,
269 const std::shared_ptr<FenceTime>& src, bool fenceShouldBeKnown) {
270 if (dst != nullptr) {
271 if (!fenceShouldBeKnown) {
272 *dst = NATIVE_WINDOW_TIMESTAMP_PENDING;
273 return;
274 }
275
276 nsecs_t signalTime = src->getSignalTime();
277 *dst = (signalTime == Fence::SIGNAL_TIME_PENDING) ?
278 NATIVE_WINDOW_TIMESTAMP_PENDING :
279 (signalTime == Fence::SIGNAL_TIME_INVALID) ?
280 NATIVE_WINDOW_TIMESTAMP_INVALID :
281 signalTime;
282 }
283 }
284
getFrameTimestamps(uint64_t frameNumber,nsecs_t * outRequestedPresentTime,nsecs_t * outAcquireTime,nsecs_t * outLatchTime,nsecs_t * outFirstRefreshStartTime,nsecs_t * outLastRefreshStartTime,nsecs_t * outGpuCompositionDoneTime,nsecs_t * outDisplayPresentTime,nsecs_t * outDequeueReadyTime,nsecs_t * outReleaseTime)285 status_t Surface::getFrameTimestamps(uint64_t frameNumber,
286 nsecs_t* outRequestedPresentTime, nsecs_t* outAcquireTime,
287 nsecs_t* outLatchTime, nsecs_t* outFirstRefreshStartTime,
288 nsecs_t* outLastRefreshStartTime, nsecs_t* outGpuCompositionDoneTime,
289 nsecs_t* outDisplayPresentTime, nsecs_t* outDequeueReadyTime,
290 nsecs_t* outReleaseTime) {
291 ATRACE_CALL();
292
293 Mutex::Autolock lock(mMutex);
294
295 if (!mEnableFrameTimestamps) {
296 return INVALID_OPERATION;
297 }
298
299 // Verify the requested timestamps are supported.
300 querySupportedTimestampsLocked();
301 if (outDisplayPresentTime != nullptr && !mFrameTimestampsSupportsPresent) {
302 return BAD_VALUE;
303 }
304
305 FrameEvents* events = mFrameEventHistory->getFrame(frameNumber);
306 if (events == nullptr) {
307 // If the entry isn't available in the producer, it's definitely not
308 // available in the consumer.
309 return NAME_NOT_FOUND;
310 }
311
312 // Update our cache of events if the requested events are not available.
313 if (checkConsumerForUpdates(events, mLastFrameNumber,
314 outLatchTime, outFirstRefreshStartTime, outLastRefreshStartTime,
315 outGpuCompositionDoneTime, outDisplayPresentTime,
316 outDequeueReadyTime, outReleaseTime)) {
317 FrameEventHistoryDelta delta;
318 mGraphicBufferProducer->getFrameTimestamps(&delta);
319 mFrameEventHistory->applyDelta(delta);
320 events = mFrameEventHistory->getFrame(frameNumber);
321 }
322
323 if (events == nullptr) {
324 // The entry was available before the update, but was overwritten
325 // after the update. Make sure not to send the wrong frame's data.
326 return NAME_NOT_FOUND;
327 }
328
329 getFrameTimestamp(outRequestedPresentTime, events->requestedPresentTime);
330 getFrameTimestamp(outLatchTime, events->latchTime);
331 getFrameTimestamp(outFirstRefreshStartTime, events->firstRefreshStartTime);
332 getFrameTimestamp(outLastRefreshStartTime, events->lastRefreshStartTime);
333 getFrameTimestamp(outDequeueReadyTime, events->dequeueReadyTime);
334
335 getFrameTimestampFence(outAcquireTime, events->acquireFence,
336 events->hasAcquireInfo());
337 getFrameTimestampFence(outGpuCompositionDoneTime,
338 events->gpuCompositionDoneFence,
339 events->hasGpuCompositionDoneInfo());
340 getFrameTimestampFence(outDisplayPresentTime, events->displayPresentFence,
341 events->hasDisplayPresentInfo());
342 getFrameTimestampFence(outReleaseTime, events->releaseFence,
343 events->hasReleaseInfo());
344
345 return NO_ERROR;
346 }
347
getWideColorSupport(bool * supported)348 status_t Surface::getWideColorSupport(bool* supported) {
349 ATRACE_CALL();
350
351 const sp<IBinder> display = ComposerServiceAIDL::getInstance().getInternalDisplayToken();
352 if (display == nullptr) {
353 return NAME_NOT_FOUND;
354 }
355
356 *supported = false;
357 binder::Status status = composerServiceAIDL()->isWideColorDisplay(display, supported);
358 return status.transactionError();
359 }
360
getHdrSupport(bool * supported)361 status_t Surface::getHdrSupport(bool* supported) {
362 ATRACE_CALL();
363
364 const sp<IBinder> display = ComposerServiceAIDL::getInstance().getInternalDisplayToken();
365 if (display == nullptr) {
366 return NAME_NOT_FOUND;
367 }
368
369 ui::DynamicDisplayInfo info;
370 if (status_t err = composerService()->getDynamicDisplayInfo(display, &info); err != NO_ERROR) {
371 return err;
372 }
373
374 *supported = !info.hdrCapabilities.getSupportedHdrTypes().empty();
375 return NO_ERROR;
376 }
377
hook_setSwapInterval(ANativeWindow * window,int interval)378 int Surface::hook_setSwapInterval(ANativeWindow* window, int interval) {
379 Surface* c = getSelf(window);
380 return c->setSwapInterval(interval);
381 }
382
hook_dequeueBuffer(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)383 int Surface::hook_dequeueBuffer(ANativeWindow* window,
384 ANativeWindowBuffer** buffer, int* fenceFd) {
385 Surface* c = getSelf(window);
386 {
387 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
388 if (c->mDequeueInterceptor != nullptr) {
389 auto interceptor = c->mDequeueInterceptor;
390 auto data = c->mDequeueInterceptorData;
391 return interceptor(window, Surface::dequeueBufferInternal, data, buffer, fenceFd);
392 }
393 }
394 return c->dequeueBuffer(buffer, fenceFd);
395 }
396
dequeueBufferInternal(ANativeWindow * window,ANativeWindowBuffer ** buffer,int * fenceFd)397 int Surface::dequeueBufferInternal(ANativeWindow* window, ANativeWindowBuffer** buffer,
398 int* fenceFd) {
399 Surface* c = getSelf(window);
400 return c->dequeueBuffer(buffer, fenceFd);
401 }
402
hook_cancelBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)403 int Surface::hook_cancelBuffer(ANativeWindow* window,
404 ANativeWindowBuffer* buffer, int fenceFd) {
405 Surface* c = getSelf(window);
406 {
407 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
408 if (c->mCancelInterceptor != nullptr) {
409 auto interceptor = c->mCancelInterceptor;
410 auto data = c->mCancelInterceptorData;
411 return interceptor(window, Surface::cancelBufferInternal, data, buffer, fenceFd);
412 }
413 }
414 return c->cancelBuffer(buffer, fenceFd);
415 }
416
cancelBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)417 int Surface::cancelBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
418 Surface* c = getSelf(window);
419 return c->cancelBuffer(buffer, fenceFd);
420 }
421
hook_queueBuffer(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)422 int Surface::hook_queueBuffer(ANativeWindow* window,
423 ANativeWindowBuffer* buffer, int fenceFd) {
424 Surface* c = getSelf(window);
425 {
426 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
427 if (c->mQueueInterceptor != nullptr) {
428 auto interceptor = c->mQueueInterceptor;
429 auto data = c->mQueueInterceptorData;
430 return interceptor(window, Surface::queueBufferInternal, data, buffer, fenceFd);
431 }
432 }
433 return c->queueBuffer(buffer, fenceFd);
434 }
435
queueBufferInternal(ANativeWindow * window,ANativeWindowBuffer * buffer,int fenceFd)436 int Surface::queueBufferInternal(ANativeWindow* window, ANativeWindowBuffer* buffer, int fenceFd) {
437 Surface* c = getSelf(window);
438 return c->queueBuffer(buffer, fenceFd);
439 }
440
hook_dequeueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer ** buffer)441 int Surface::hook_dequeueBuffer_DEPRECATED(ANativeWindow* window,
442 ANativeWindowBuffer** buffer) {
443 Surface* c = getSelf(window);
444 ANativeWindowBuffer* buf;
445 int fenceFd = -1;
446 int result = c->dequeueBuffer(&buf, &fenceFd);
447 if (result != OK) {
448 return result;
449 }
450 sp<Fence> fence(new Fence(fenceFd));
451 int waitResult = fence->waitForever("dequeueBuffer_DEPRECATED");
452 if (waitResult != OK) {
453 ALOGE("dequeueBuffer_DEPRECATED: Fence::wait returned an error: %d",
454 waitResult);
455 c->cancelBuffer(buf, -1);
456 return waitResult;
457 }
458 *buffer = buf;
459 return result;
460 }
461
hook_cancelBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)462 int Surface::hook_cancelBuffer_DEPRECATED(ANativeWindow* window,
463 ANativeWindowBuffer* buffer) {
464 Surface* c = getSelf(window);
465 return c->cancelBuffer(buffer, -1);
466 }
467
hook_lockBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)468 int Surface::hook_lockBuffer_DEPRECATED(ANativeWindow* window,
469 ANativeWindowBuffer* buffer) {
470 Surface* c = getSelf(window);
471 return c->lockBuffer_DEPRECATED(buffer);
472 }
473
hook_queueBuffer_DEPRECATED(ANativeWindow * window,ANativeWindowBuffer * buffer)474 int Surface::hook_queueBuffer_DEPRECATED(ANativeWindow* window,
475 ANativeWindowBuffer* buffer) {
476 Surface* c = getSelf(window);
477 return c->queueBuffer(buffer, -1);
478 }
479
hook_perform(ANativeWindow * window,int operation,...)480 int Surface::hook_perform(ANativeWindow* window, int operation, ...) {
481 va_list args;
482 va_start(args, operation);
483 Surface* c = getSelf(window);
484 int result;
485 // Don't acquire shared ownership of the interceptor mutex if we're going to
486 // do interceptor registration, as otherwise we'll deadlock on acquiring
487 // exclusive ownership.
488 if (!isInterceptorRegistrationOp(operation)) {
489 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
490 if (c->mPerformInterceptor != nullptr) {
491 result = c->mPerformInterceptor(window, Surface::performInternal,
492 c->mPerformInterceptorData, operation, args);
493 va_end(args);
494 return result;
495 }
496 }
497 result = c->perform(operation, args);
498 va_end(args);
499 return result;
500 }
501
performInternal(ANativeWindow * window,int operation,va_list args)502 int Surface::performInternal(ANativeWindow* window, int operation, va_list args) {
503 Surface* c = getSelf(window);
504 return c->perform(operation, args);
505 }
506
hook_query(const ANativeWindow * window,int what,int * value)507 int Surface::hook_query(const ANativeWindow* window, int what, int* value) {
508 const Surface* c = getSelf(window);
509 {
510 std::shared_lock<std::shared_mutex> lock(c->mInterceptorMutex);
511 if (c->mQueryInterceptor != nullptr) {
512 auto interceptor = c->mQueryInterceptor;
513 auto data = c->mQueryInterceptorData;
514 return interceptor(window, Surface::queryInternal, data, what, value);
515 }
516 }
517 return c->query(what, value);
518 }
519
queryInternal(const ANativeWindow * window,int what,int * value)520 int Surface::queryInternal(const ANativeWindow* window, int what, int* value) {
521 const Surface* c = getSelf(window);
522 return c->query(what, value);
523 }
524
setSwapInterval(int interval)525 int Surface::setSwapInterval(int interval) {
526 ATRACE_CALL();
527 // EGL specification states:
528 // interval is silently clamped to minimum and maximum implementation
529 // dependent values before being stored.
530
531 if (interval < minSwapInterval)
532 interval = minSwapInterval;
533
534 if (interval > maxSwapInterval)
535 interval = maxSwapInterval;
536
537 const bool wasSwapIntervalZero = mSwapIntervalZero;
538 mSwapIntervalZero = (interval == 0);
539
540 if (mSwapIntervalZero != wasSwapIntervalZero) {
541 mGraphicBufferProducer->setAsyncMode(mSwapIntervalZero);
542 }
543
544 return NO_ERROR;
545 }
546
547 class FenceMonitor {
548 public:
FenceMonitor(const char * name)549 explicit FenceMonitor(const char* name) : mName(name), mFencesQueued(0), mFencesSignaled(0) {
550 std::thread thread(&FenceMonitor::loop, this);
551 pthread_setname_np(thread.native_handle(), mName);
552 thread.detach();
553 }
554
queueFence(const sp<Fence> & fence)555 void queueFence(const sp<Fence>& fence) {
556 char message[64];
557
558 std::lock_guard<std::mutex> lock(mMutex);
559 if (fence->getSignalTime() != Fence::SIGNAL_TIME_PENDING) {
560 snprintf(message, sizeof(message), "%s fence %u has signaled", mName, mFencesQueued);
561 ATRACE_NAME(message);
562 // Need an increment on both to make the trace number correct.
563 mFencesQueued++;
564 mFencesSignaled++;
565 return;
566 }
567 snprintf(message, sizeof(message), "Trace %s fence %u", mName, mFencesQueued);
568 ATRACE_NAME(message);
569
570 mQueue.push_back(fence);
571 mCondition.notify_one();
572 mFencesQueued++;
573 ATRACE_INT(mName, int32_t(mQueue.size()));
574 }
575
576 private:
577 #pragma clang diagnostic push
578 #pragma clang diagnostic ignored "-Wmissing-noreturn"
loop()579 void loop() {
580 while (true) {
581 threadLoop();
582 }
583 }
584 #pragma clang diagnostic pop
585
threadLoop()586 void threadLoop() {
587 sp<Fence> fence;
588 uint32_t fenceNum;
589 {
590 std::unique_lock<std::mutex> lock(mMutex);
591 while (mQueue.empty()) {
592 mCondition.wait(lock);
593 }
594 fence = mQueue[0];
595 fenceNum = mFencesSignaled;
596 }
597 {
598 char message[64];
599 snprintf(message, sizeof(message), "waiting for %s %u", mName, fenceNum);
600 ATRACE_NAME(message);
601
602 status_t result = fence->waitForever(message);
603 if (result != OK) {
604 ALOGE("Error waiting for fence: %d", result);
605 }
606 }
607 {
608 std::lock_guard<std::mutex> lock(mMutex);
609 mQueue.pop_front();
610 mFencesSignaled++;
611 ATRACE_INT(mName, int32_t(mQueue.size()));
612 }
613 }
614
615 const char* mName;
616 uint32_t mFencesQueued;
617 uint32_t mFencesSignaled;
618 std::deque<sp<Fence>> mQueue;
619 std::condition_variable mCondition;
620 std::mutex mMutex;
621 };
622
getDequeueBufferInputLocked(IGraphicBufferProducer::DequeueBufferInput * dequeueInput)623 void Surface::getDequeueBufferInputLocked(
624 IGraphicBufferProducer::DequeueBufferInput* dequeueInput) {
625 LOG_ALWAYS_FATAL_IF(dequeueInput == nullptr, "input is null");
626
627 dequeueInput->width = mReqWidth ? mReqWidth : mUserWidth;
628 dequeueInput->height = mReqHeight ? mReqHeight : mUserHeight;
629
630 dequeueInput->format = mReqFormat;
631 dequeueInput->usage = mReqUsage;
632
633 dequeueInput->getTimestamps = mEnableFrameTimestamps;
634 }
635
dequeueBuffer(android_native_buffer_t ** buffer,int * fenceFd)636 int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
637 ATRACE_CALL();
638 ALOGV("Surface::dequeueBuffer");
639
640 IGraphicBufferProducer::DequeueBufferInput dqInput;
641 {
642 Mutex::Autolock lock(mMutex);
643 if (mReportRemovedBuffers) {
644 mRemovedBuffers.clear();
645 }
646
647 getDequeueBufferInputLocked(&dqInput);
648
649 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot !=
650 BufferItem::INVALID_BUFFER_SLOT) {
651 sp<GraphicBuffer>& gbuf(mSlots[mSharedBufferSlot].buffer);
652 if (gbuf != nullptr) {
653 *buffer = gbuf.get();
654 *fenceFd = -1;
655 return OK;
656 }
657 }
658 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffer
659
660 int buf = -1;
661 sp<Fence> fence;
662 nsecs_t startTime = systemTime();
663
664 FrameEventHistoryDelta frameTimestamps;
665 status_t result = mGraphicBufferProducer->dequeueBuffer(&buf, &fence, dqInput.width,
666 dqInput.height, dqInput.format,
667 dqInput.usage, &mBufferAge,
668 dqInput.getTimestamps ?
669 &frameTimestamps : nullptr);
670 mLastDequeueDuration = systemTime() - startTime;
671
672 if (result < 0) {
673 ALOGV("dequeueBuffer: IGraphicBufferProducer::dequeueBuffer"
674 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
675 dqInput.width, dqInput.height, dqInput.format, dqInput.usage, result);
676 return result;
677 }
678
679 if (buf < 0 || buf >= NUM_BUFFER_SLOTS) {
680 ALOGE("dequeueBuffer: IGraphicBufferProducer returned invalid slot number %d", buf);
681 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
682 return FAILED_TRANSACTION;
683 }
684
685 Mutex::Autolock lock(mMutex);
686
687 // Write this while holding the mutex
688 mLastDequeueStartTime = startTime;
689
690 sp<GraphicBuffer>& gbuf(mSlots[buf].buffer);
691
692 // this should never happen
693 ALOGE_IF(fence == nullptr, "Surface::dequeueBuffer: received null Fence! buf=%d", buf);
694
695 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
696 static FenceMonitor hwcReleaseThread("HWC release");
697 hwcReleaseThread.queueFence(fence);
698 }
699
700 if (result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
701 freeAllBuffers();
702 }
703
704 if (dqInput.getTimestamps) {
705 mFrameEventHistory->applyDelta(frameTimestamps);
706 }
707
708 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
709 if (mReportRemovedBuffers && (gbuf != nullptr)) {
710 mRemovedBuffers.push_back(gbuf);
711 }
712 result = mGraphicBufferProducer->requestBuffer(buf, &gbuf);
713 if (result != NO_ERROR) {
714 ALOGE("dequeueBuffer: IGraphicBufferProducer::requestBuffer failed: %d", result);
715 mGraphicBufferProducer->cancelBuffer(buf, fence);
716 return result;
717 }
718 }
719
720 if (fence->isValid()) {
721 *fenceFd = fence->dup();
722 if (*fenceFd == -1) {
723 ALOGE("dequeueBuffer: error duping fence: %d", errno);
724 // dup() should never fail; something is badly wrong. Soldier on
725 // and hope for the best; the worst that should happen is some
726 // visible corruption that lasts until the next frame.
727 }
728 } else {
729 *fenceFd = -1;
730 }
731
732 *buffer = gbuf.get();
733
734 if (mSharedBufferMode && mAutoRefresh) {
735 mSharedBufferSlot = buf;
736 mSharedBufferHasBeenQueued = false;
737 } else if (mSharedBufferSlot == buf) {
738 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
739 mSharedBufferHasBeenQueued = false;
740 }
741
742 mDequeuedSlots.insert(buf);
743
744 return OK;
745 }
746
dequeueBuffers(std::vector<BatchBuffer> * buffers)747 int Surface::dequeueBuffers(std::vector<BatchBuffer>* buffers) {
748 using DequeueBufferInput = IGraphicBufferProducer::DequeueBufferInput;
749 using DequeueBufferOutput = IGraphicBufferProducer::DequeueBufferOutput;
750 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
751 using RequestBufferOutput = IGraphicBufferProducer::RequestBufferOutput;
752
753 ATRACE_CALL();
754 ALOGV("Surface::dequeueBuffers");
755
756 if (buffers->size() == 0) {
757 ALOGE("%s: must dequeue at least 1 buffer!", __FUNCTION__);
758 return BAD_VALUE;
759 }
760
761 if (mSharedBufferMode) {
762 ALOGE("%s: batch operation is not supported in shared buffer mode!",
763 __FUNCTION__);
764 return INVALID_OPERATION;
765 }
766
767 size_t numBufferRequested = buffers->size();
768 DequeueBufferInput input;
769
770 {
771 Mutex::Autolock lock(mMutex);
772 if (mReportRemovedBuffers) {
773 mRemovedBuffers.clear();
774 }
775
776 getDequeueBufferInputLocked(&input);
777 } // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffers
778
779 std::vector<DequeueBufferInput> dequeueInput(numBufferRequested, input);
780 std::vector<DequeueBufferOutput> dequeueOutput;
781
782 nsecs_t startTime = systemTime();
783
784 status_t result = mGraphicBufferProducer->dequeueBuffers(dequeueInput, &dequeueOutput);
785
786 mLastDequeueDuration = systemTime() - startTime;
787
788 if (result < 0) {
789 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
790 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
791 __FUNCTION__, input.width, input.height, input.format, input.usage, result);
792 return result;
793 }
794
795 std::vector<CancelBufferInput> cancelBufferInputs(numBufferRequested);
796 std::vector<status_t> cancelBufferOutputs;
797 for (size_t i = 0; i < numBufferRequested; i++) {
798 cancelBufferInputs[i].slot = dequeueOutput[i].slot;
799 cancelBufferInputs[i].fence = dequeueOutput[i].fence;
800 }
801
802 for (const auto& output : dequeueOutput) {
803 if (output.result < 0) {
804 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
805 ALOGV("%s: IGraphicBufferProducer::dequeueBuffers"
806 "(%d, %d, %d, %#" PRIx64 ") failed: %d",
807 __FUNCTION__, input.width, input.height, input.format, input.usage,
808 output.result);
809 return output.result;
810 }
811
812 if (output.slot < 0 || output.slot >= NUM_BUFFER_SLOTS) {
813 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
814 ALOGE("%s: IGraphicBufferProducer returned invalid slot number %d",
815 __FUNCTION__, output.slot);
816 android_errorWriteLog(0x534e4554, "36991414"); // SafetyNet logging
817 return FAILED_TRANSACTION;
818 }
819
820 if (input.getTimestamps && !output.timestamps.has_value()) {
821 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
822 ALOGE("%s: no frame timestamp returns!", __FUNCTION__);
823 return FAILED_TRANSACTION;
824 }
825
826 // this should never happen
827 ALOGE_IF(output.fence == nullptr,
828 "%s: received null Fence! slot=%d", __FUNCTION__, output.slot);
829 }
830
831 Mutex::Autolock lock(mMutex);
832
833 // Write this while holding the mutex
834 mLastDequeueStartTime = startTime;
835
836 std::vector<int32_t> requestBufferSlots;
837 requestBufferSlots.reserve(numBufferRequested);
838 // handle release all buffers and request buffers
839 for (const auto& output : dequeueOutput) {
840 if (output.result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
841 ALOGV("%s: RELEASE_ALL_BUFFERS during batch operation", __FUNCTION__);
842 freeAllBuffers();
843 break;
844 }
845 }
846
847 for (const auto& output : dequeueOutput) {
848 // Collect slots that needs requesting buffer
849 sp<GraphicBuffer>& gbuf(mSlots[output.slot].buffer);
850 if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == nullptr) {
851 if (mReportRemovedBuffers && (gbuf != nullptr)) {
852 mRemovedBuffers.push_back(gbuf);
853 }
854 requestBufferSlots.push_back(output.slot);
855 }
856 }
857
858 // Batch request Buffer
859 std::vector<RequestBufferOutput> reqBufferOutput;
860 if (requestBufferSlots.size() > 0) {
861 result = mGraphicBufferProducer->requestBuffers(requestBufferSlots, &reqBufferOutput);
862 if (result != NO_ERROR) {
863 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed: %d",
864 __FUNCTION__, result);
865 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
866 return result;
867 }
868
869 // Check if we have any single failure
870 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
871 if (reqBufferOutput[i].result != OK) {
872 ALOGE("%s: IGraphicBufferProducer::requestBuffers failed at %zu-th buffer, slot %d",
873 __FUNCTION__, i, requestBufferSlots[i]);
874 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
875 return reqBufferOutput[i].result;
876 }
877 }
878
879 // Fill request buffer results to mSlots
880 for (size_t i = 0; i < requestBufferSlots.size(); i++) {
881 mSlots[requestBufferSlots[i]].buffer = reqBufferOutput[i].buffer;
882 }
883 }
884
885 for (size_t batchIdx = 0; batchIdx < numBufferRequested; batchIdx++) {
886 const auto& output = dequeueOutput[batchIdx];
887 int slot = output.slot;
888 sp<GraphicBuffer>& gbuf(mSlots[slot].buffer);
889
890 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
891 static FenceMonitor hwcReleaseThread("HWC release");
892 hwcReleaseThread.queueFence(output.fence);
893 }
894
895 if (input.getTimestamps) {
896 mFrameEventHistory->applyDelta(output.timestamps.value());
897 }
898
899 if (output.fence->isValid()) {
900 buffers->at(batchIdx).fenceFd = output.fence->dup();
901 if (buffers->at(batchIdx).fenceFd == -1) {
902 ALOGE("%s: error duping fence: %d", __FUNCTION__, errno);
903 // dup() should never fail; something is badly wrong. Soldier on
904 // and hope for the best; the worst that should happen is some
905 // visible corruption that lasts until the next frame.
906 }
907 } else {
908 buffers->at(batchIdx).fenceFd = -1;
909 }
910
911 buffers->at(batchIdx).buffer = gbuf.get();
912 mDequeuedSlots.insert(slot);
913 }
914 return OK;
915 }
916
cancelBuffer(android_native_buffer_t * buffer,int fenceFd)917 int Surface::cancelBuffer(android_native_buffer_t* buffer,
918 int fenceFd) {
919 ATRACE_CALL();
920 ALOGV("Surface::cancelBuffer");
921 Mutex::Autolock lock(mMutex);
922 int i = getSlotFromBufferLocked(buffer);
923 if (i < 0) {
924 if (fenceFd >= 0) {
925 close(fenceFd);
926 }
927 return i;
928 }
929 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
930 if (fenceFd >= 0) {
931 close(fenceFd);
932 }
933 return OK;
934 }
935 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
936 mGraphicBufferProducer->cancelBuffer(i, fence);
937
938 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == i) {
939 mSharedBufferHasBeenQueued = true;
940 }
941
942 mDequeuedSlots.erase(i);
943
944 return OK;
945 }
946
cancelBuffers(const std::vector<BatchBuffer> & buffers)947 int Surface::cancelBuffers(const std::vector<BatchBuffer>& buffers) {
948 using CancelBufferInput = IGraphicBufferProducer::CancelBufferInput;
949 ATRACE_CALL();
950 ALOGV("Surface::cancelBuffers");
951
952 if (mSharedBufferMode) {
953 ALOGE("%s: batch operation is not supported in shared buffer mode!",
954 __FUNCTION__);
955 return INVALID_OPERATION;
956 }
957
958 size_t numBuffers = buffers.size();
959 std::vector<CancelBufferInput> cancelBufferInputs(numBuffers);
960 std::vector<status_t> cancelBufferOutputs;
961 size_t numBuffersCancelled = 0;
962 int badSlotResult = 0;
963 for (size_t i = 0; i < numBuffers; i++) {
964 int slot = getSlotFromBufferLocked(buffers[i].buffer);
965 int fenceFd = buffers[i].fenceFd;
966 if (slot < 0) {
967 if (fenceFd >= 0) {
968 close(fenceFd);
969 }
970 ALOGE("%s: cannot find slot number for cancelled buffer", __FUNCTION__);
971 badSlotResult = slot;
972 } else {
973 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
974 cancelBufferInputs[numBuffersCancelled].slot = slot;
975 cancelBufferInputs[numBuffersCancelled++].fence = fence;
976 }
977 }
978 cancelBufferInputs.resize(numBuffersCancelled);
979 mGraphicBufferProducer->cancelBuffers(cancelBufferInputs, &cancelBufferOutputs);
980
981
982 for (size_t i = 0; i < numBuffersCancelled; i++) {
983 mDequeuedSlots.erase(cancelBufferInputs[i].slot);
984 }
985
986 if (badSlotResult != 0) {
987 return badSlotResult;
988 }
989 return OK;
990 }
991
getSlotFromBufferLocked(android_native_buffer_t * buffer) const992 int Surface::getSlotFromBufferLocked(
993 android_native_buffer_t* buffer) const {
994 if (buffer == nullptr) {
995 ALOGE("%s: input buffer is null!", __FUNCTION__);
996 return BAD_VALUE;
997 }
998
999 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
1000 if (mSlots[i].buffer != nullptr &&
1001 mSlots[i].buffer->handle == buffer->handle) {
1002 return i;
1003 }
1004 }
1005 ALOGE("%s: unknown buffer: %p", __FUNCTION__, buffer->handle);
1006 return BAD_VALUE;
1007 }
1008
lockBuffer_DEPRECATED(android_native_buffer_t * buffer)1009 int Surface::lockBuffer_DEPRECATED(android_native_buffer_t* buffer __attribute__((unused))) {
1010 ALOGV("Surface::lockBuffer");
1011 Mutex::Autolock lock(mMutex);
1012 return OK;
1013 }
1014
getQueueBufferInputLocked(android_native_buffer_t * buffer,int fenceFd,nsecs_t timestamp,IGraphicBufferProducer::QueueBufferInput * out)1015 void Surface::getQueueBufferInputLocked(android_native_buffer_t* buffer, int fenceFd,
1016 nsecs_t timestamp, IGraphicBufferProducer::QueueBufferInput* out) {
1017 bool isAutoTimestamp = false;
1018
1019 if (timestamp == NATIVE_WINDOW_TIMESTAMP_AUTO) {
1020 timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
1021 isAutoTimestamp = true;
1022 ALOGV("Surface::queueBuffer making up timestamp: %.2f ms",
1023 timestamp / 1000000.0);
1024 }
1025
1026 // Make sure the crop rectangle is entirely inside the buffer.
1027 Rect crop(Rect::EMPTY_RECT);
1028 mCrop.intersect(Rect(buffer->width, buffer->height), &crop);
1029
1030 sp<Fence> fence(fenceFd >= 0 ? new Fence(fenceFd) : Fence::NO_FENCE);
1031 IGraphicBufferProducer::QueueBufferInput input(timestamp, isAutoTimestamp,
1032 static_cast<android_dataspace>(mDataSpace), crop, mScalingMode,
1033 mTransform ^ mStickyTransform, fence, mStickyTransform,
1034 mEnableFrameTimestamps);
1035
1036 // we should send HDR metadata as needed if this becomes a bottleneck
1037 input.setHdrMetadata(mHdrMetadata);
1038
1039 if (mConnectedToCpu || mDirtyRegion.bounds() == Rect::INVALID_RECT) {
1040 input.setSurfaceDamage(Region::INVALID_REGION);
1041 } else {
1042 // Here we do two things:
1043 // 1) The surface damage was specified using the OpenGL ES convention of
1044 // the origin being in the bottom-left corner. Here we flip to the
1045 // convention that the rest of the system uses (top-left corner) by
1046 // subtracting all top/bottom coordinates from the buffer height.
1047 // 2) If the buffer is coming in rotated (for example, because the EGL
1048 // implementation is reacting to the transform hint coming back from
1049 // SurfaceFlinger), the surface damage needs to be rotated the
1050 // opposite direction, since it was generated assuming an unrotated
1051 // buffer (the app doesn't know that the EGL implementation is
1052 // reacting to the transform hint behind its back). The
1053 // transformations in the switch statement below apply those
1054 // complementary rotations (e.g., if 90 degrees, rotate 270 degrees).
1055
1056 int width = buffer->width;
1057 int height = buffer->height;
1058 bool rotated90 = (mTransform ^ mStickyTransform) &
1059 NATIVE_WINDOW_TRANSFORM_ROT_90;
1060 if (rotated90) {
1061 std::swap(width, height);
1062 }
1063
1064 Region flippedRegion;
1065 for (auto rect : mDirtyRegion) {
1066 int left = rect.left;
1067 int right = rect.right;
1068 int top = height - rect.bottom; // Flip from OpenGL convention
1069 int bottom = height - rect.top; // Flip from OpenGL convention
1070 switch (mTransform ^ mStickyTransform) {
1071 case NATIVE_WINDOW_TRANSFORM_ROT_90: {
1072 // Rotate 270 degrees
1073 Rect flippedRect{top, width - right, bottom, width - left};
1074 flippedRegion.orSelf(flippedRect);
1075 break;
1076 }
1077 case NATIVE_WINDOW_TRANSFORM_ROT_180: {
1078 // Rotate 180 degrees
1079 Rect flippedRect{width - right, height - bottom,
1080 width - left, height - top};
1081 flippedRegion.orSelf(flippedRect);
1082 break;
1083 }
1084 case NATIVE_WINDOW_TRANSFORM_ROT_270: {
1085 // Rotate 90 degrees
1086 Rect flippedRect{height - bottom, left,
1087 height - top, right};
1088 flippedRegion.orSelf(flippedRect);
1089 break;
1090 }
1091 default: {
1092 Rect flippedRect{left, top, right, bottom};
1093 flippedRegion.orSelf(flippedRect);
1094 break;
1095 }
1096 }
1097 }
1098
1099 input.setSurfaceDamage(flippedRegion);
1100 }
1101 *out = input;
1102 }
1103
applyGrallocMetadataLocked(android_native_buffer_t * buffer,const IGraphicBufferProducer::QueueBufferInput & queueBufferInput)1104 void Surface::applyGrallocMetadataLocked(
1105 android_native_buffer_t* buffer,
1106 const IGraphicBufferProducer::QueueBufferInput& queueBufferInput) {
1107 ATRACE_CALL();
1108 auto& mapper = GraphicBufferMapper::get();
1109 mapper.setDataspace(buffer->handle, static_cast<ui::Dataspace>(queueBufferInput.dataSpace));
1110 mapper.setSmpte2086(buffer->handle, queueBufferInput.getHdrMetadata().getSmpte2086());
1111 mapper.setCta861_3(buffer->handle, queueBufferInput.getHdrMetadata().getCta8613());
1112 mapper.setSmpte2094_40(buffer->handle, queueBufferInput.getHdrMetadata().getHdr10Plus());
1113 }
1114
onBufferQueuedLocked(int slot,sp<Fence> fence,const IGraphicBufferProducer::QueueBufferOutput & output)1115 void Surface::onBufferQueuedLocked(int slot, sp<Fence> fence,
1116 const IGraphicBufferProducer::QueueBufferOutput& output) {
1117 mDequeuedSlots.erase(slot);
1118
1119 if (mEnableFrameTimestamps) {
1120 mFrameEventHistory->applyDelta(output.frameTimestamps);
1121 // Update timestamps with the local acquire fence.
1122 // The consumer doesn't send it back to prevent us from having two
1123 // file descriptors of the same fence.
1124 mFrameEventHistory->updateAcquireFence(mNextFrameNumber,
1125 std::make_shared<FenceTime>(fence));
1126
1127 // Cache timestamps of signaled fences so we can close their file
1128 // descriptors.
1129 mFrameEventHistory->updateSignalTimes();
1130 }
1131
1132 mLastFrameNumber = mNextFrameNumber;
1133
1134 mDefaultWidth = output.width;
1135 mDefaultHeight = output.height;
1136 mNextFrameNumber = output.nextFrameNumber;
1137
1138 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
1139 // set.
1140 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
1141 mTransformHint = output.transformHint;
1142 }
1143
1144 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
1145
1146 if (!mConnectedToCpu) {
1147 // Clear surface damage back to full-buffer
1148 mDirtyRegion = Region::INVALID_REGION;
1149 }
1150
1151 if (mSharedBufferMode && mAutoRefresh && mSharedBufferSlot == slot) {
1152 mSharedBufferHasBeenQueued = true;
1153 }
1154
1155 mQueueBufferCondition.broadcast();
1156
1157 if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
1158 static FenceMonitor gpuCompletionThread("GPU completion");
1159 gpuCompletionThread.queueFence(fence);
1160 }
1161 }
1162
queueBuffer(android_native_buffer_t * buffer,int fenceFd)1163 int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd) {
1164 ATRACE_CALL();
1165 ALOGV("Surface::queueBuffer");
1166 Mutex::Autolock lock(mMutex);
1167
1168 int i = getSlotFromBufferLocked(buffer);
1169 if (i < 0) {
1170 if (fenceFd >= 0) {
1171 close(fenceFd);
1172 }
1173 return i;
1174 }
1175 if (mSharedBufferSlot == i && mSharedBufferHasBeenQueued) {
1176 if (fenceFd >= 0) {
1177 close(fenceFd);
1178 }
1179 return OK;
1180 }
1181
1182 IGraphicBufferProducer::QueueBufferOutput output;
1183 IGraphicBufferProducer::QueueBufferInput input;
1184 getQueueBufferInputLocked(buffer, fenceFd, mTimestamp, &input);
1185 applyGrallocMetadataLocked(buffer, input);
1186 sp<Fence> fence = input.fence;
1187
1188 nsecs_t now = systemTime();
1189
1190 status_t err = mGraphicBufferProducer->queueBuffer(i, input, &output);
1191 mLastQueueDuration = systemTime() - now;
1192 if (err != OK) {
1193 ALOGE("queueBuffer: error queuing buffer, %d", err);
1194 }
1195
1196 onBufferQueuedLocked(i, fence, output);
1197 return err;
1198 }
1199
queueBuffers(const std::vector<BatchQueuedBuffer> & buffers)1200 int Surface::queueBuffers(const std::vector<BatchQueuedBuffer>& buffers) {
1201 ATRACE_CALL();
1202 ALOGV("Surface::queueBuffers");
1203 Mutex::Autolock lock(mMutex);
1204
1205 if (mSharedBufferMode) {
1206 ALOGE("%s: batched operation is not supported in shared buffer mode", __FUNCTION__);
1207 return INVALID_OPERATION;
1208 }
1209
1210 size_t numBuffers = buffers.size();
1211 std::vector<IGraphicBufferProducer::QueueBufferInput> queueBufferInputs(numBuffers);
1212 std::vector<IGraphicBufferProducer::QueueBufferOutput> queueBufferOutputs;
1213 std::vector<int> bufferSlots(numBuffers, -1);
1214 std::vector<sp<Fence>> bufferFences(numBuffers);
1215
1216 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1217 int i = getSlotFromBufferLocked(buffers[batchIdx].buffer);
1218 if (i < 0) {
1219 if (buffers[batchIdx].fenceFd >= 0) {
1220 close(buffers[batchIdx].fenceFd);
1221 }
1222 return i;
1223 }
1224 bufferSlots[batchIdx] = i;
1225
1226 IGraphicBufferProducer::QueueBufferInput input;
1227 getQueueBufferInputLocked(
1228 buffers[batchIdx].buffer, buffers[batchIdx].fenceFd, buffers[batchIdx].timestamp,
1229 &input);
1230 bufferFences[batchIdx] = input.fence;
1231 queueBufferInputs[batchIdx] = input;
1232 }
1233
1234 nsecs_t now = systemTime();
1235 status_t err = mGraphicBufferProducer->queueBuffers(queueBufferInputs, &queueBufferOutputs);
1236 mLastQueueDuration = systemTime() - now;
1237 if (err != OK) {
1238 ALOGE("%s: error queuing buffer, %d", __FUNCTION__, err);
1239 }
1240
1241
1242 for (size_t batchIdx = 0; batchIdx < numBuffers; batchIdx++) {
1243 onBufferQueuedLocked(bufferSlots[batchIdx], bufferFences[batchIdx],
1244 queueBufferOutputs[batchIdx]);
1245 }
1246
1247 return err;
1248 }
1249
querySupportedTimestampsLocked() const1250 void Surface::querySupportedTimestampsLocked() const {
1251 // mMutex must be locked when calling this method.
1252
1253 if (mQueriedSupportedTimestamps) {
1254 return;
1255 }
1256 mQueriedSupportedTimestamps = true;
1257
1258 std::vector<FrameEvent> supportedFrameTimestamps;
1259 status_t err = composerService()->getSupportedFrameTimestamps(
1260 &supportedFrameTimestamps);
1261
1262 if (err != NO_ERROR) {
1263 return;
1264 }
1265
1266 for (auto sft : supportedFrameTimestamps) {
1267 if (sft == FrameEvent::DISPLAY_PRESENT) {
1268 mFrameTimestampsSupportsPresent = true;
1269 }
1270 }
1271 }
1272
query(int what,int * value) const1273 int Surface::query(int what, int* value) const {
1274 ATRACE_CALL();
1275 ALOGV("Surface::query");
1276 { // scope for the lock
1277 Mutex::Autolock lock(mMutex);
1278 switch (what) {
1279 case NATIVE_WINDOW_FORMAT:
1280 if (mReqFormat) {
1281 *value = static_cast<int>(mReqFormat);
1282 return NO_ERROR;
1283 }
1284 break;
1285 case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER: {
1286 status_t err = mGraphicBufferProducer->query(what, value);
1287 if (err == NO_ERROR) {
1288 return NO_ERROR;
1289 }
1290 sp<ISurfaceComposer> surfaceComposer = composerService();
1291 if (surfaceComposer == nullptr) {
1292 return -EPERM; // likely permissions error
1293 }
1294 if (surfaceComposer->authenticateSurfaceTexture(mGraphicBufferProducer)) {
1295 *value = 1;
1296 } else {
1297 *value = 0;
1298 }
1299 return NO_ERROR;
1300 }
1301 case NATIVE_WINDOW_CONCRETE_TYPE:
1302 *value = NATIVE_WINDOW_SURFACE;
1303 return NO_ERROR;
1304 case NATIVE_WINDOW_DEFAULT_WIDTH:
1305 *value = static_cast<int>(
1306 mUserWidth ? mUserWidth : mDefaultWidth);
1307 return NO_ERROR;
1308 case NATIVE_WINDOW_DEFAULT_HEIGHT:
1309 *value = static_cast<int>(
1310 mUserHeight ? mUserHeight : mDefaultHeight);
1311 return NO_ERROR;
1312 case NATIVE_WINDOW_TRANSFORM_HINT:
1313 *value = static_cast<int>(getTransformHint());
1314 return NO_ERROR;
1315 case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND: {
1316 status_t err = NO_ERROR;
1317 if (!mConsumerRunningBehind) {
1318 *value = 0;
1319 } else {
1320 err = mGraphicBufferProducer->query(what, value);
1321 if (err == NO_ERROR) {
1322 mConsumerRunningBehind = *value;
1323 }
1324 }
1325 return err;
1326 }
1327 case NATIVE_WINDOW_BUFFER_AGE: {
1328 if (mBufferAge > INT32_MAX) {
1329 *value = 0;
1330 } else {
1331 *value = static_cast<int32_t>(mBufferAge);
1332 }
1333 return NO_ERROR;
1334 }
1335 case NATIVE_WINDOW_LAST_DEQUEUE_DURATION: {
1336 int64_t durationUs = mLastDequeueDuration / 1000;
1337 *value = durationUs > std::numeric_limits<int>::max() ?
1338 std::numeric_limits<int>::max() :
1339 static_cast<int>(durationUs);
1340 return NO_ERROR;
1341 }
1342 case NATIVE_WINDOW_LAST_QUEUE_DURATION: {
1343 int64_t durationUs = mLastQueueDuration / 1000;
1344 *value = durationUs > std::numeric_limits<int>::max() ?
1345 std::numeric_limits<int>::max() :
1346 static_cast<int>(durationUs);
1347 return NO_ERROR;
1348 }
1349 case NATIVE_WINDOW_FRAME_TIMESTAMPS_SUPPORTS_PRESENT: {
1350 querySupportedTimestampsLocked();
1351 *value = mFrameTimestampsSupportsPresent ? 1 : 0;
1352 return NO_ERROR;
1353 }
1354 case NATIVE_WINDOW_IS_VALID: {
1355 *value = mGraphicBufferProducer != nullptr ? 1 : 0;
1356 return NO_ERROR;
1357 }
1358 case NATIVE_WINDOW_DATASPACE: {
1359 *value = static_cast<int>(mDataSpace);
1360 return NO_ERROR;
1361 }
1362 case NATIVE_WINDOW_MAX_BUFFER_COUNT: {
1363 *value = mMaxBufferCount;
1364 return NO_ERROR;
1365 }
1366 }
1367 }
1368 return mGraphicBufferProducer->query(what, value);
1369 }
1370
perform(int operation,va_list args)1371 int Surface::perform(int operation, va_list args)
1372 {
1373 int res = NO_ERROR;
1374 switch (operation) {
1375 case NATIVE_WINDOW_CONNECT:
1376 // deprecated. must return NO_ERROR.
1377 break;
1378 case NATIVE_WINDOW_DISCONNECT:
1379 // deprecated. must return NO_ERROR.
1380 break;
1381 case NATIVE_WINDOW_SET_USAGE:
1382 res = dispatchSetUsage(args);
1383 break;
1384 case NATIVE_WINDOW_SET_CROP:
1385 res = dispatchSetCrop(args);
1386 break;
1387 case NATIVE_WINDOW_SET_BUFFER_COUNT:
1388 res = dispatchSetBufferCount(args);
1389 break;
1390 case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
1391 res = dispatchSetBuffersGeometry(args);
1392 break;
1393 case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
1394 res = dispatchSetBuffersTransform(args);
1395 break;
1396 case NATIVE_WINDOW_SET_BUFFERS_STICKY_TRANSFORM:
1397 res = dispatchSetBuffersStickyTransform(args);
1398 break;
1399 case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
1400 res = dispatchSetBuffersTimestamp(args);
1401 break;
1402 case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
1403 res = dispatchSetBuffersDimensions(args);
1404 break;
1405 case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS:
1406 res = dispatchSetBuffersUserDimensions(args);
1407 break;
1408 case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
1409 res = dispatchSetBuffersFormat(args);
1410 break;
1411 case NATIVE_WINDOW_LOCK:
1412 res = dispatchLock(args);
1413 break;
1414 case NATIVE_WINDOW_UNLOCK_AND_POST:
1415 res = dispatchUnlockAndPost(args);
1416 break;
1417 case NATIVE_WINDOW_SET_SCALING_MODE:
1418 res = dispatchSetScalingMode(args);
1419 break;
1420 case NATIVE_WINDOW_API_CONNECT:
1421 res = dispatchConnect(args);
1422 break;
1423 case NATIVE_WINDOW_API_DISCONNECT:
1424 res = dispatchDisconnect(args);
1425 break;
1426 case NATIVE_WINDOW_SET_SIDEBAND_STREAM:
1427 res = dispatchSetSidebandStream(args);
1428 break;
1429 case NATIVE_WINDOW_SET_BUFFERS_DATASPACE:
1430 res = dispatchSetBuffersDataSpace(args);
1431 break;
1432 case NATIVE_WINDOW_SET_BUFFERS_SMPTE2086_METADATA:
1433 res = dispatchSetBuffersSmpte2086Metadata(args);
1434 break;
1435 case NATIVE_WINDOW_SET_BUFFERS_CTA861_3_METADATA:
1436 res = dispatchSetBuffersCta8613Metadata(args);
1437 break;
1438 case NATIVE_WINDOW_SET_BUFFERS_HDR10_PLUS_METADATA:
1439 res = dispatchSetBuffersHdr10PlusMetadata(args);
1440 break;
1441 case NATIVE_WINDOW_SET_SURFACE_DAMAGE:
1442 res = dispatchSetSurfaceDamage(args);
1443 break;
1444 case NATIVE_WINDOW_SET_SHARED_BUFFER_MODE:
1445 res = dispatchSetSharedBufferMode(args);
1446 break;
1447 case NATIVE_WINDOW_SET_AUTO_REFRESH:
1448 res = dispatchSetAutoRefresh(args);
1449 break;
1450 case NATIVE_WINDOW_GET_REFRESH_CYCLE_DURATION:
1451 res = dispatchGetDisplayRefreshCycleDuration(args);
1452 break;
1453 case NATIVE_WINDOW_GET_NEXT_FRAME_ID:
1454 res = dispatchGetNextFrameId(args);
1455 break;
1456 case NATIVE_WINDOW_ENABLE_FRAME_TIMESTAMPS:
1457 res = dispatchEnableFrameTimestamps(args);
1458 break;
1459 case NATIVE_WINDOW_GET_COMPOSITOR_TIMING:
1460 res = dispatchGetCompositorTiming(args);
1461 break;
1462 case NATIVE_WINDOW_GET_FRAME_TIMESTAMPS:
1463 res = dispatchGetFrameTimestamps(args);
1464 break;
1465 case NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT:
1466 res = dispatchGetWideColorSupport(args);
1467 break;
1468 case NATIVE_WINDOW_GET_HDR_SUPPORT:
1469 res = dispatchGetHdrSupport(args);
1470 break;
1471 case NATIVE_WINDOW_SET_USAGE64:
1472 res = dispatchSetUsage64(args);
1473 break;
1474 case NATIVE_WINDOW_GET_CONSUMER_USAGE64:
1475 res = dispatchGetConsumerUsage64(args);
1476 break;
1477 case NATIVE_WINDOW_SET_AUTO_PREROTATION:
1478 res = dispatchSetAutoPrerotation(args);
1479 break;
1480 case NATIVE_WINDOW_GET_LAST_DEQUEUE_START:
1481 res = dispatchGetLastDequeueStartTime(args);
1482 break;
1483 case NATIVE_WINDOW_SET_DEQUEUE_TIMEOUT:
1484 res = dispatchSetDequeueTimeout(args);
1485 break;
1486 case NATIVE_WINDOW_GET_LAST_DEQUEUE_DURATION:
1487 res = dispatchGetLastDequeueDuration(args);
1488 break;
1489 case NATIVE_WINDOW_GET_LAST_QUEUE_DURATION:
1490 res = dispatchGetLastQueueDuration(args);
1491 break;
1492 case NATIVE_WINDOW_SET_FRAME_RATE:
1493 res = dispatchSetFrameRate(args);
1494 break;
1495 case NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR:
1496 res = dispatchAddCancelInterceptor(args);
1497 break;
1498 case NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR:
1499 res = dispatchAddDequeueInterceptor(args);
1500 break;
1501 case NATIVE_WINDOW_SET_PERFORM_INTERCEPTOR:
1502 res = dispatchAddPerformInterceptor(args);
1503 break;
1504 case NATIVE_WINDOW_SET_QUEUE_INTERCEPTOR:
1505 res = dispatchAddQueueInterceptor(args);
1506 break;
1507 case NATIVE_WINDOW_SET_QUERY_INTERCEPTOR:
1508 res = dispatchAddQueryInterceptor(args);
1509 break;
1510 case NATIVE_WINDOW_ALLOCATE_BUFFERS:
1511 allocateBuffers();
1512 res = NO_ERROR;
1513 break;
1514 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER:
1515 res = dispatchGetLastQueuedBuffer(args);
1516 break;
1517 case NATIVE_WINDOW_GET_LAST_QUEUED_BUFFER2:
1518 res = dispatchGetLastQueuedBuffer2(args);
1519 break;
1520 case NATIVE_WINDOW_SET_FRAME_TIMELINE_INFO:
1521 res = dispatchSetFrameTimelineInfo(args);
1522 break;
1523 default:
1524 res = NAME_NOT_FOUND;
1525 break;
1526 }
1527 return res;
1528 }
1529
dispatchConnect(va_list args)1530 int Surface::dispatchConnect(va_list args) {
1531 int api = va_arg(args, int);
1532 return connect(api);
1533 }
1534
dispatchDisconnect(va_list args)1535 int Surface::dispatchDisconnect(va_list args) {
1536 int api = va_arg(args, int);
1537 return disconnect(api);
1538 }
1539
dispatchSetUsage(va_list args)1540 int Surface::dispatchSetUsage(va_list args) {
1541 uint64_t usage = va_arg(args, uint32_t);
1542 return setUsage(usage);
1543 }
1544
dispatchSetUsage64(va_list args)1545 int Surface::dispatchSetUsage64(va_list args) {
1546 uint64_t usage = va_arg(args, uint64_t);
1547 return setUsage(usage);
1548 }
1549
dispatchSetCrop(va_list args)1550 int Surface::dispatchSetCrop(va_list args) {
1551 android_native_rect_t const* rect = va_arg(args, android_native_rect_t*);
1552 return setCrop(reinterpret_cast<Rect const*>(rect));
1553 }
1554
dispatchSetBufferCount(va_list args)1555 int Surface::dispatchSetBufferCount(va_list args) {
1556 size_t bufferCount = va_arg(args, size_t);
1557 return setBufferCount(static_cast<int32_t>(bufferCount));
1558 }
1559
dispatchSetBuffersGeometry(va_list args)1560 int Surface::dispatchSetBuffersGeometry(va_list args) {
1561 uint32_t width = va_arg(args, uint32_t);
1562 uint32_t height = va_arg(args, uint32_t);
1563 PixelFormat format = va_arg(args, PixelFormat);
1564 int err = setBuffersDimensions(width, height);
1565 if (err != 0) {
1566 return err;
1567 }
1568 return setBuffersFormat(format);
1569 }
1570
dispatchSetBuffersDimensions(va_list args)1571 int Surface::dispatchSetBuffersDimensions(va_list args) {
1572 uint32_t width = va_arg(args, uint32_t);
1573 uint32_t height = va_arg(args, uint32_t);
1574 return setBuffersDimensions(width, height);
1575 }
1576
dispatchSetBuffersUserDimensions(va_list args)1577 int Surface::dispatchSetBuffersUserDimensions(va_list args) {
1578 uint32_t width = va_arg(args, uint32_t);
1579 uint32_t height = va_arg(args, uint32_t);
1580 return setBuffersUserDimensions(width, height);
1581 }
1582
dispatchSetBuffersFormat(va_list args)1583 int Surface::dispatchSetBuffersFormat(va_list args) {
1584 PixelFormat format = va_arg(args, PixelFormat);
1585 return setBuffersFormat(format);
1586 }
1587
dispatchSetScalingMode(va_list args)1588 int Surface::dispatchSetScalingMode(va_list args) {
1589 int mode = va_arg(args, int);
1590 return setScalingMode(mode);
1591 }
1592
dispatchSetBuffersTransform(va_list args)1593 int Surface::dispatchSetBuffersTransform(va_list args) {
1594 uint32_t transform = va_arg(args, uint32_t);
1595 return setBuffersTransform(transform);
1596 }
1597
dispatchSetBuffersStickyTransform(va_list args)1598 int Surface::dispatchSetBuffersStickyTransform(va_list args) {
1599 uint32_t transform = va_arg(args, uint32_t);
1600 return setBuffersStickyTransform(transform);
1601 }
1602
dispatchSetBuffersTimestamp(va_list args)1603 int Surface::dispatchSetBuffersTimestamp(va_list args) {
1604 int64_t timestamp = va_arg(args, int64_t);
1605 return setBuffersTimestamp(timestamp);
1606 }
1607
dispatchLock(va_list args)1608 int Surface::dispatchLock(va_list args) {
1609 ANativeWindow_Buffer* outBuffer = va_arg(args, ANativeWindow_Buffer*);
1610 ARect* inOutDirtyBounds = va_arg(args, ARect*);
1611 return lock(outBuffer, inOutDirtyBounds);
1612 }
1613
dispatchUnlockAndPost(va_list args)1614 int Surface::dispatchUnlockAndPost(va_list args __attribute__((unused))) {
1615 return unlockAndPost();
1616 }
1617
dispatchSetSidebandStream(va_list args)1618 int Surface::dispatchSetSidebandStream(va_list args) {
1619 native_handle_t* sH = va_arg(args, native_handle_t*);
1620 sp<NativeHandle> sidebandHandle = NativeHandle::create(sH, false);
1621 setSidebandStream(sidebandHandle);
1622 return OK;
1623 }
1624
dispatchSetBuffersDataSpace(va_list args)1625 int Surface::dispatchSetBuffersDataSpace(va_list args) {
1626 Dataspace dataspace = static_cast<Dataspace>(va_arg(args, int));
1627 return setBuffersDataSpace(dataspace);
1628 }
1629
dispatchSetBuffersSmpte2086Metadata(va_list args)1630 int Surface::dispatchSetBuffersSmpte2086Metadata(va_list args) {
1631 const android_smpte2086_metadata* metadata =
1632 va_arg(args, const android_smpte2086_metadata*);
1633 return setBuffersSmpte2086Metadata(metadata);
1634 }
1635
dispatchSetBuffersCta8613Metadata(va_list args)1636 int Surface::dispatchSetBuffersCta8613Metadata(va_list args) {
1637 const android_cta861_3_metadata* metadata =
1638 va_arg(args, const android_cta861_3_metadata*);
1639 return setBuffersCta8613Metadata(metadata);
1640 }
1641
dispatchSetBuffersHdr10PlusMetadata(va_list args)1642 int Surface::dispatchSetBuffersHdr10PlusMetadata(va_list args) {
1643 const size_t size = va_arg(args, size_t);
1644 const uint8_t* metadata = va_arg(args, const uint8_t*);
1645 return setBuffersHdr10PlusMetadata(size, metadata);
1646 }
1647
dispatchSetSurfaceDamage(va_list args)1648 int Surface::dispatchSetSurfaceDamage(va_list args) {
1649 android_native_rect_t* rects = va_arg(args, android_native_rect_t*);
1650 size_t numRects = va_arg(args, size_t);
1651 setSurfaceDamage(rects, numRects);
1652 return NO_ERROR;
1653 }
1654
dispatchSetSharedBufferMode(va_list args)1655 int Surface::dispatchSetSharedBufferMode(va_list args) {
1656 bool sharedBufferMode = va_arg(args, int);
1657 return setSharedBufferMode(sharedBufferMode);
1658 }
1659
dispatchSetAutoRefresh(va_list args)1660 int Surface::dispatchSetAutoRefresh(va_list args) {
1661 bool autoRefresh = va_arg(args, int);
1662 return setAutoRefresh(autoRefresh);
1663 }
1664
dispatchGetDisplayRefreshCycleDuration(va_list args)1665 int Surface::dispatchGetDisplayRefreshCycleDuration(va_list args) {
1666 nsecs_t* outRefreshDuration = va_arg(args, int64_t*);
1667 return getDisplayRefreshCycleDuration(outRefreshDuration);
1668 }
1669
dispatchGetNextFrameId(va_list args)1670 int Surface::dispatchGetNextFrameId(va_list args) {
1671 uint64_t* nextFrameId = va_arg(args, uint64_t*);
1672 *nextFrameId = getNextFrameNumber();
1673 return NO_ERROR;
1674 }
1675
dispatchEnableFrameTimestamps(va_list args)1676 int Surface::dispatchEnableFrameTimestamps(va_list args) {
1677 bool enable = va_arg(args, int);
1678 enableFrameTimestamps(enable);
1679 return NO_ERROR;
1680 }
1681
dispatchGetCompositorTiming(va_list args)1682 int Surface::dispatchGetCompositorTiming(va_list args) {
1683 nsecs_t* compositeDeadline = va_arg(args, int64_t*);
1684 nsecs_t* compositeInterval = va_arg(args, int64_t*);
1685 nsecs_t* compositeToPresentLatency = va_arg(args, int64_t*);
1686 return getCompositorTiming(compositeDeadline, compositeInterval,
1687 compositeToPresentLatency);
1688 }
1689
dispatchGetFrameTimestamps(va_list args)1690 int Surface::dispatchGetFrameTimestamps(va_list args) {
1691 uint64_t frameId = va_arg(args, uint64_t);
1692 nsecs_t* outRequestedPresentTime = va_arg(args, int64_t*);
1693 nsecs_t* outAcquireTime = va_arg(args, int64_t*);
1694 nsecs_t* outLatchTime = va_arg(args, int64_t*);
1695 nsecs_t* outFirstRefreshStartTime = va_arg(args, int64_t*);
1696 nsecs_t* outLastRefreshStartTime = va_arg(args, int64_t*);
1697 nsecs_t* outGpuCompositionDoneTime = va_arg(args, int64_t*);
1698 nsecs_t* outDisplayPresentTime = va_arg(args, int64_t*);
1699 nsecs_t* outDequeueReadyTime = va_arg(args, int64_t*);
1700 nsecs_t* outReleaseTime = va_arg(args, int64_t*);
1701 return getFrameTimestamps(frameId,
1702 outRequestedPresentTime, outAcquireTime, outLatchTime,
1703 outFirstRefreshStartTime, outLastRefreshStartTime,
1704 outGpuCompositionDoneTime, outDisplayPresentTime,
1705 outDequeueReadyTime, outReleaseTime);
1706 }
1707
dispatchGetWideColorSupport(va_list args)1708 int Surface::dispatchGetWideColorSupport(va_list args) {
1709 bool* outSupport = va_arg(args, bool*);
1710 return getWideColorSupport(outSupport);
1711 }
1712
dispatchGetHdrSupport(va_list args)1713 int Surface::dispatchGetHdrSupport(va_list args) {
1714 bool* outSupport = va_arg(args, bool*);
1715 return getHdrSupport(outSupport);
1716 }
1717
dispatchGetConsumerUsage64(va_list args)1718 int Surface::dispatchGetConsumerUsage64(va_list args) {
1719 uint64_t* usage = va_arg(args, uint64_t*);
1720 return getConsumerUsage(usage);
1721 }
1722
dispatchSetAutoPrerotation(va_list args)1723 int Surface::dispatchSetAutoPrerotation(va_list args) {
1724 bool autoPrerotation = va_arg(args, int);
1725 return setAutoPrerotation(autoPrerotation);
1726 }
1727
dispatchGetLastDequeueStartTime(va_list args)1728 int Surface::dispatchGetLastDequeueStartTime(va_list args) {
1729 int64_t* lastDequeueStartTime = va_arg(args, int64_t*);
1730 *lastDequeueStartTime = mLastDequeueStartTime;
1731 return NO_ERROR;
1732 }
1733
dispatchSetDequeueTimeout(va_list args)1734 int Surface::dispatchSetDequeueTimeout(va_list args) {
1735 nsecs_t timeout = va_arg(args, int64_t);
1736 return setDequeueTimeout(timeout);
1737 }
1738
dispatchGetLastDequeueDuration(va_list args)1739 int Surface::dispatchGetLastDequeueDuration(va_list args) {
1740 int64_t* lastDequeueDuration = va_arg(args, int64_t*);
1741 *lastDequeueDuration = mLastDequeueDuration;
1742 return NO_ERROR;
1743 }
1744
dispatchGetLastQueueDuration(va_list args)1745 int Surface::dispatchGetLastQueueDuration(va_list args) {
1746 int64_t* lastQueueDuration = va_arg(args, int64_t*);
1747 *lastQueueDuration = mLastQueueDuration;
1748 return NO_ERROR;
1749 }
1750
dispatchSetFrameRate(va_list args)1751 int Surface::dispatchSetFrameRate(va_list args) {
1752 float frameRate = static_cast<float>(va_arg(args, double));
1753 int8_t compatibility = static_cast<int8_t>(va_arg(args, int));
1754 int8_t changeFrameRateStrategy = static_cast<int8_t>(va_arg(args, int));
1755 return setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
1756 }
1757
dispatchAddCancelInterceptor(va_list args)1758 int Surface::dispatchAddCancelInterceptor(va_list args) {
1759 ANativeWindow_cancelBufferInterceptor interceptor =
1760 va_arg(args, ANativeWindow_cancelBufferInterceptor);
1761 void* data = va_arg(args, void*);
1762 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1763 mCancelInterceptor = interceptor;
1764 mCancelInterceptorData = data;
1765 return NO_ERROR;
1766 }
1767
dispatchAddDequeueInterceptor(va_list args)1768 int Surface::dispatchAddDequeueInterceptor(va_list args) {
1769 ANativeWindow_dequeueBufferInterceptor interceptor =
1770 va_arg(args, ANativeWindow_dequeueBufferInterceptor);
1771 void* data = va_arg(args, void*);
1772 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1773 mDequeueInterceptor = interceptor;
1774 mDequeueInterceptorData = data;
1775 return NO_ERROR;
1776 }
1777
dispatchAddPerformInterceptor(va_list args)1778 int Surface::dispatchAddPerformInterceptor(va_list args) {
1779 ANativeWindow_performInterceptor interceptor = va_arg(args, ANativeWindow_performInterceptor);
1780 void* data = va_arg(args, void*);
1781 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1782 mPerformInterceptor = interceptor;
1783 mPerformInterceptorData = data;
1784 return NO_ERROR;
1785 }
1786
dispatchAddQueueInterceptor(va_list args)1787 int Surface::dispatchAddQueueInterceptor(va_list args) {
1788 ANativeWindow_queueBufferInterceptor interceptor =
1789 va_arg(args, ANativeWindow_queueBufferInterceptor);
1790 void* data = va_arg(args, void*);
1791 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1792 mQueueInterceptor = interceptor;
1793 mQueueInterceptorData = data;
1794 return NO_ERROR;
1795 }
1796
dispatchAddQueryInterceptor(va_list args)1797 int Surface::dispatchAddQueryInterceptor(va_list args) {
1798 ANativeWindow_queryInterceptor interceptor = va_arg(args, ANativeWindow_queryInterceptor);
1799 void* data = va_arg(args, void*);
1800 std::lock_guard<std::shared_mutex> lock(mInterceptorMutex);
1801 mQueryInterceptor = interceptor;
1802 mQueryInterceptorData = data;
1803 return NO_ERROR;
1804 }
1805
dispatchGetLastQueuedBuffer(va_list args)1806 int Surface::dispatchGetLastQueuedBuffer(va_list args) {
1807 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
1808 int* fence = va_arg(args, int*);
1809 float* matrix = va_arg(args, float*);
1810 sp<GraphicBuffer> graphicBuffer;
1811 sp<Fence> spFence;
1812
1813 int result = mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, matrix);
1814
1815 if (graphicBuffer != nullptr) {
1816 *buffer = graphicBuffer->toAHardwareBuffer();
1817 AHardwareBuffer_acquire(*buffer);
1818 } else {
1819 *buffer = nullptr;
1820 }
1821
1822 if (spFence != nullptr) {
1823 *fence = spFence->dup();
1824 } else {
1825 *fence = -1;
1826 }
1827 return result;
1828 }
1829
dispatchGetLastQueuedBuffer2(va_list args)1830 int Surface::dispatchGetLastQueuedBuffer2(va_list args) {
1831 AHardwareBuffer** buffer = va_arg(args, AHardwareBuffer**);
1832 int* fence = va_arg(args, int*);
1833 ARect* crop = va_arg(args, ARect*);
1834 uint32_t* transform = va_arg(args, uint32_t*);
1835 sp<GraphicBuffer> graphicBuffer;
1836 sp<Fence> spFence;
1837
1838 Rect r;
1839 int result =
1840 mGraphicBufferProducer->getLastQueuedBuffer(&graphicBuffer, &spFence, &r, transform);
1841
1842 if (graphicBuffer != nullptr) {
1843 *buffer = graphicBuffer->toAHardwareBuffer();
1844 AHardwareBuffer_acquire(*buffer);
1845
1846 // Avoid setting crop* unless buffer is valid (matches IGBP behavior)
1847 crop->left = r.left;
1848 crop->top = r.top;
1849 crop->right = r.right;
1850 crop->bottom = r.bottom;
1851 } else {
1852 *buffer = nullptr;
1853 }
1854
1855 if (spFence != nullptr) {
1856 *fence = spFence->dup();
1857 } else {
1858 *fence = -1;
1859 }
1860 return result;
1861 }
1862
dispatchSetFrameTimelineInfo(va_list args)1863 int Surface::dispatchSetFrameTimelineInfo(va_list args) {
1864 ATRACE_CALL();
1865 auto frameNumber = static_cast<uint64_t>(va_arg(args, uint64_t));
1866 auto frameTimelineVsyncId = static_cast<int64_t>(va_arg(args, int64_t));
1867 auto inputEventId = static_cast<int32_t>(va_arg(args, int32_t));
1868 auto startTimeNanos = static_cast<int64_t>(va_arg(args, int64_t));
1869
1870 ALOGV("Surface::%s", __func__);
1871 return setFrameTimelineInfo(frameNumber, {frameTimelineVsyncId, inputEventId, startTimeNanos});
1872 }
1873
transformToDisplayInverse() const1874 bool Surface::transformToDisplayInverse() const {
1875 return (mTransform & NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) ==
1876 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
1877 }
1878
connect(int api)1879 int Surface::connect(int api) {
1880 static sp<IProducerListener> listener = new StubProducerListener();
1881 return connect(api, listener);
1882 }
1883
connect(int api,const sp<IProducerListener> & listener)1884 int Surface::connect(int api, const sp<IProducerListener>& listener) {
1885 return connect(api, listener, false);
1886 }
1887
connect(int api,bool reportBufferRemoval,const sp<SurfaceListener> & sListener)1888 int Surface::connect(
1889 int api, bool reportBufferRemoval, const sp<SurfaceListener>& sListener) {
1890 if (sListener != nullptr) {
1891 mListenerProxy = new ProducerListenerProxy(this, sListener);
1892 }
1893 return connect(api, mListenerProxy, reportBufferRemoval);
1894 }
1895
connect(int api,const sp<IProducerListener> & listener,bool reportBufferRemoval)1896 int Surface::connect(
1897 int api, const sp<IProducerListener>& listener, bool reportBufferRemoval) {
1898 ATRACE_CALL();
1899 ALOGV("Surface::connect");
1900 Mutex::Autolock lock(mMutex);
1901 IGraphicBufferProducer::QueueBufferOutput output;
1902 mReportRemovedBuffers = reportBufferRemoval;
1903 int err = mGraphicBufferProducer->connect(listener, api, mProducerControlledByApp, &output);
1904 if (err == NO_ERROR) {
1905 mDefaultWidth = output.width;
1906 mDefaultHeight = output.height;
1907 mNextFrameNumber = output.nextFrameNumber;
1908 mMaxBufferCount = output.maxBufferCount;
1909
1910 // Ignore transform hint if sticky transform is set or transform to display inverse flag is
1911 // set. Transform hint should be ignored if the client is expected to always submit buffers
1912 // in the same orientation.
1913 if (mStickyTransform == 0 && !transformToDisplayInverse()) {
1914 mTransformHint = output.transformHint;
1915 }
1916
1917 mConsumerRunningBehind = (output.numPendingBuffers >= 2);
1918 }
1919 if (!err && api == NATIVE_WINDOW_API_CPU) {
1920 mConnectedToCpu = true;
1921 // Clear the dirty region in case we're switching from a non-CPU API
1922 mDirtyRegion.clear();
1923 } else if (!err) {
1924 // Initialize the dirty region for tracking surface damage
1925 mDirtyRegion = Region::INVALID_REGION;
1926 }
1927
1928 return err;
1929 }
1930
1931
disconnect(int api,IGraphicBufferProducer::DisconnectMode mode)1932 int Surface::disconnect(int api, IGraphicBufferProducer::DisconnectMode mode) {
1933 ATRACE_CALL();
1934 ALOGV("Surface::disconnect");
1935 Mutex::Autolock lock(mMutex);
1936 mRemovedBuffers.clear();
1937 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
1938 mSharedBufferHasBeenQueued = false;
1939 freeAllBuffers();
1940 int err = mGraphicBufferProducer->disconnect(api, mode);
1941 if (!err) {
1942 mReqFormat = 0;
1943 mReqWidth = 0;
1944 mReqHeight = 0;
1945 mReqUsage = 0;
1946 mCrop.clear();
1947 mScalingMode = NATIVE_WINDOW_SCALING_MODE_FREEZE;
1948 mTransform = 0;
1949 mStickyTransform = 0;
1950 mAutoPrerotation = false;
1951 mEnableFrameTimestamps = false;
1952 mMaxBufferCount = NUM_BUFFER_SLOTS;
1953
1954 if (api == NATIVE_WINDOW_API_CPU) {
1955 mConnectedToCpu = false;
1956 }
1957 }
1958 return err;
1959 }
1960
detachNextBuffer(sp<GraphicBuffer> * outBuffer,sp<Fence> * outFence)1961 int Surface::detachNextBuffer(sp<GraphicBuffer>* outBuffer,
1962 sp<Fence>* outFence) {
1963 ATRACE_CALL();
1964 ALOGV("Surface::detachNextBuffer");
1965
1966 if (outBuffer == nullptr || outFence == nullptr) {
1967 return BAD_VALUE;
1968 }
1969
1970 Mutex::Autolock lock(mMutex);
1971 if (mReportRemovedBuffers) {
1972 mRemovedBuffers.clear();
1973 }
1974
1975 sp<GraphicBuffer> buffer(nullptr);
1976 sp<Fence> fence(nullptr);
1977 status_t result = mGraphicBufferProducer->detachNextBuffer(
1978 &buffer, &fence);
1979 if (result != NO_ERROR) {
1980 return result;
1981 }
1982
1983 *outBuffer = buffer;
1984 if (fence != nullptr && fence->isValid()) {
1985 *outFence = fence;
1986 } else {
1987 *outFence = Fence::NO_FENCE;
1988 }
1989
1990 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
1991 if (mSlots[i].buffer != nullptr &&
1992 mSlots[i].buffer->getId() == buffer->getId()) {
1993 if (mReportRemovedBuffers) {
1994 mRemovedBuffers.push_back(mSlots[i].buffer);
1995 }
1996 mSlots[i].buffer = nullptr;
1997 }
1998 }
1999
2000 return NO_ERROR;
2001 }
2002
attachBuffer(ANativeWindowBuffer * buffer)2003 int Surface::attachBuffer(ANativeWindowBuffer* buffer)
2004 {
2005 ATRACE_CALL();
2006 ALOGV("Surface::attachBuffer");
2007
2008 Mutex::Autolock lock(mMutex);
2009 if (mReportRemovedBuffers) {
2010 mRemovedBuffers.clear();
2011 }
2012
2013 sp<GraphicBuffer> graphicBuffer(static_cast<GraphicBuffer*>(buffer));
2014 uint32_t priorGeneration = graphicBuffer->mGenerationNumber;
2015 graphicBuffer->mGenerationNumber = mGenerationNumber;
2016 int32_t attachedSlot = -1;
2017 status_t result = mGraphicBufferProducer->attachBuffer(&attachedSlot, graphicBuffer);
2018 if (result != NO_ERROR) {
2019 ALOGE("attachBuffer: IGraphicBufferProducer call failed (%d)", result);
2020 graphicBuffer->mGenerationNumber = priorGeneration;
2021 return result;
2022 }
2023 if (mReportRemovedBuffers && (mSlots[attachedSlot].buffer != nullptr)) {
2024 mRemovedBuffers.push_back(mSlots[attachedSlot].buffer);
2025 }
2026 mSlots[attachedSlot].buffer = graphicBuffer;
2027 mDequeuedSlots.insert(attachedSlot);
2028
2029 return NO_ERROR;
2030 }
2031
setUsage(uint64_t reqUsage)2032 int Surface::setUsage(uint64_t reqUsage)
2033 {
2034 ALOGV("Surface::setUsage");
2035 Mutex::Autolock lock(mMutex);
2036 if (reqUsage != mReqUsage) {
2037 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2038 }
2039 mReqUsage = reqUsage;
2040 return OK;
2041 }
2042
setCrop(Rect const * rect)2043 int Surface::setCrop(Rect const* rect)
2044 {
2045 ATRACE_CALL();
2046
2047 Rect realRect(Rect::EMPTY_RECT);
2048 if (rect == nullptr || rect->isEmpty()) {
2049 realRect.clear();
2050 } else {
2051 realRect = *rect;
2052 }
2053
2054 ALOGV("Surface::setCrop rect=[%d %d %d %d]",
2055 realRect.left, realRect.top, realRect.right, realRect.bottom);
2056
2057 Mutex::Autolock lock(mMutex);
2058 mCrop = realRect;
2059 return NO_ERROR;
2060 }
2061
setBufferCount(int bufferCount)2062 int Surface::setBufferCount(int bufferCount)
2063 {
2064 ATRACE_CALL();
2065 ALOGV("Surface::setBufferCount");
2066 Mutex::Autolock lock(mMutex);
2067
2068 status_t err = NO_ERROR;
2069 if (bufferCount == 0) {
2070 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(1);
2071 } else {
2072 int minUndequeuedBuffers = 0;
2073 err = mGraphicBufferProducer->query(
2074 NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
2075 if (err == NO_ERROR) {
2076 err = mGraphicBufferProducer->setMaxDequeuedBufferCount(
2077 bufferCount - minUndequeuedBuffers);
2078 }
2079 }
2080
2081 ALOGE_IF(err, "IGraphicBufferProducer::setBufferCount(%d) returned %s",
2082 bufferCount, strerror(-err));
2083
2084 return err;
2085 }
2086
setMaxDequeuedBufferCount(int maxDequeuedBuffers)2087 int Surface::setMaxDequeuedBufferCount(int maxDequeuedBuffers) {
2088 ATRACE_CALL();
2089 ALOGV("Surface::setMaxDequeuedBufferCount");
2090 Mutex::Autolock lock(mMutex);
2091
2092 status_t err = mGraphicBufferProducer->setMaxDequeuedBufferCount(
2093 maxDequeuedBuffers);
2094 ALOGE_IF(err, "IGraphicBufferProducer::setMaxDequeuedBufferCount(%d) "
2095 "returned %s", maxDequeuedBuffers, strerror(-err));
2096
2097 return err;
2098 }
2099
setAsyncMode(bool async)2100 int Surface::setAsyncMode(bool async) {
2101 ATRACE_CALL();
2102 ALOGV("Surface::setAsyncMode");
2103 Mutex::Autolock lock(mMutex);
2104
2105 status_t err = mGraphicBufferProducer->setAsyncMode(async);
2106 ALOGE_IF(err, "IGraphicBufferProducer::setAsyncMode(%d) returned %s",
2107 async, strerror(-err));
2108
2109 return err;
2110 }
2111
setSharedBufferMode(bool sharedBufferMode)2112 int Surface::setSharedBufferMode(bool sharedBufferMode) {
2113 ATRACE_CALL();
2114 ALOGV("Surface::setSharedBufferMode (%d)", sharedBufferMode);
2115 Mutex::Autolock lock(mMutex);
2116
2117 status_t err = mGraphicBufferProducer->setSharedBufferMode(
2118 sharedBufferMode);
2119 if (err == NO_ERROR) {
2120 mSharedBufferMode = sharedBufferMode;
2121 }
2122 ALOGE_IF(err, "IGraphicBufferProducer::setSharedBufferMode(%d) returned"
2123 "%s", sharedBufferMode, strerror(-err));
2124
2125 return err;
2126 }
2127
setAutoRefresh(bool autoRefresh)2128 int Surface::setAutoRefresh(bool autoRefresh) {
2129 ATRACE_CALL();
2130 ALOGV("Surface::setAutoRefresh (%d)", autoRefresh);
2131 Mutex::Autolock lock(mMutex);
2132
2133 status_t err = mGraphicBufferProducer->setAutoRefresh(autoRefresh);
2134 if (err == NO_ERROR) {
2135 mAutoRefresh = autoRefresh;
2136 }
2137 ALOGE_IF(err, "IGraphicBufferProducer::setAutoRefresh(%d) returned %s",
2138 autoRefresh, strerror(-err));
2139 return err;
2140 }
2141
setBuffersDimensions(uint32_t width,uint32_t height)2142 int Surface::setBuffersDimensions(uint32_t width, uint32_t height)
2143 {
2144 ATRACE_CALL();
2145 ALOGV("Surface::setBuffersDimensions");
2146
2147 if ((width && !height) || (!width && height))
2148 return BAD_VALUE;
2149
2150 Mutex::Autolock lock(mMutex);
2151 if (width != mReqWidth || height != mReqHeight) {
2152 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2153 }
2154 mReqWidth = width;
2155 mReqHeight = height;
2156 return NO_ERROR;
2157 }
2158
setBuffersUserDimensions(uint32_t width,uint32_t height)2159 int Surface::setBuffersUserDimensions(uint32_t width, uint32_t height)
2160 {
2161 ATRACE_CALL();
2162 ALOGV("Surface::setBuffersUserDimensions");
2163
2164 if ((width && !height) || (!width && height))
2165 return BAD_VALUE;
2166
2167 Mutex::Autolock lock(mMutex);
2168 if (width != mUserWidth || height != mUserHeight) {
2169 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2170 }
2171 mUserWidth = width;
2172 mUserHeight = height;
2173 return NO_ERROR;
2174 }
2175
setBuffersFormat(PixelFormat format)2176 int Surface::setBuffersFormat(PixelFormat format)
2177 {
2178 ALOGV("Surface::setBuffersFormat");
2179
2180 Mutex::Autolock lock(mMutex);
2181 if (format != mReqFormat) {
2182 mSharedBufferSlot = BufferItem::INVALID_BUFFER_SLOT;
2183 }
2184 mReqFormat = format;
2185 return NO_ERROR;
2186 }
2187
setScalingMode(int mode)2188 int Surface::setScalingMode(int mode)
2189 {
2190 ATRACE_CALL();
2191 ALOGV("Surface::setScalingMode(%d)", mode);
2192
2193 switch (mode) {
2194 case NATIVE_WINDOW_SCALING_MODE_FREEZE:
2195 case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
2196 case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
2197 case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
2198 break;
2199 default:
2200 ALOGE("unknown scaling mode: %d", mode);
2201 return BAD_VALUE;
2202 }
2203
2204 Mutex::Autolock lock(mMutex);
2205 mScalingMode = mode;
2206 return NO_ERROR;
2207 }
2208
setBuffersTransform(uint32_t transform)2209 int Surface::setBuffersTransform(uint32_t transform)
2210 {
2211 ATRACE_CALL();
2212 ALOGV("Surface::setBuffersTransform");
2213 Mutex::Autolock lock(mMutex);
2214 // Ensure NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY is sticky. If the client sets the flag, do not
2215 // override it until the surface is disconnected. This is a temporary workaround for camera
2216 // until they switch to using Buffer State Layers. Currently if client sets the buffer transform
2217 // it may be overriden by the buffer producer when the producer sets the buffer transform.
2218 if (transformToDisplayInverse()) {
2219 transform |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;
2220 }
2221 mTransform = transform;
2222 return NO_ERROR;
2223 }
2224
setBuffersStickyTransform(uint32_t transform)2225 int Surface::setBuffersStickyTransform(uint32_t transform)
2226 {
2227 ATRACE_CALL();
2228 ALOGV("Surface::setBuffersStickyTransform");
2229 Mutex::Autolock lock(mMutex);
2230 mStickyTransform = transform;
2231 return NO_ERROR;
2232 }
2233
setBuffersTimestamp(int64_t timestamp)2234 int Surface::setBuffersTimestamp(int64_t timestamp)
2235 {
2236 ALOGV("Surface::setBuffersTimestamp");
2237 Mutex::Autolock lock(mMutex);
2238 mTimestamp = timestamp;
2239 return NO_ERROR;
2240 }
2241
setBuffersDataSpace(Dataspace dataSpace)2242 int Surface::setBuffersDataSpace(Dataspace dataSpace)
2243 {
2244 ALOGV("Surface::setBuffersDataSpace");
2245 Mutex::Autolock lock(mMutex);
2246 mDataSpace = dataSpace;
2247 return NO_ERROR;
2248 }
2249
setBuffersSmpte2086Metadata(const android_smpte2086_metadata * metadata)2250 int Surface::setBuffersSmpte2086Metadata(const android_smpte2086_metadata* metadata) {
2251 ALOGV("Surface::setBuffersSmpte2086Metadata");
2252 Mutex::Autolock lock(mMutex);
2253 if (metadata) {
2254 mHdrMetadata.smpte2086 = *metadata;
2255 mHdrMetadata.validTypes |= HdrMetadata::SMPTE2086;
2256 } else {
2257 mHdrMetadata.validTypes &= ~HdrMetadata::SMPTE2086;
2258 }
2259 return NO_ERROR;
2260 }
2261
setBuffersCta8613Metadata(const android_cta861_3_metadata * metadata)2262 int Surface::setBuffersCta8613Metadata(const android_cta861_3_metadata* metadata) {
2263 ALOGV("Surface::setBuffersCta8613Metadata");
2264 Mutex::Autolock lock(mMutex);
2265 if (metadata) {
2266 mHdrMetadata.cta8613 = *metadata;
2267 mHdrMetadata.validTypes |= HdrMetadata::CTA861_3;
2268 } else {
2269 mHdrMetadata.validTypes &= ~HdrMetadata::CTA861_3;
2270 }
2271 return NO_ERROR;
2272 }
2273
setBuffersHdr10PlusMetadata(const size_t size,const uint8_t * metadata)2274 int Surface::setBuffersHdr10PlusMetadata(const size_t size, const uint8_t* metadata) {
2275 ALOGV("Surface::setBuffersBlobMetadata");
2276 Mutex::Autolock lock(mMutex);
2277 if (size > 0) {
2278 mHdrMetadata.hdr10plus.assign(metadata, metadata + size);
2279 mHdrMetadata.validTypes |= HdrMetadata::HDR10PLUS;
2280 } else {
2281 mHdrMetadata.validTypes &= ~HdrMetadata::HDR10PLUS;
2282 mHdrMetadata.hdr10plus.clear();
2283 }
2284 return NO_ERROR;
2285 }
2286
getBuffersDataSpace()2287 Dataspace Surface::getBuffersDataSpace() {
2288 ALOGV("Surface::getBuffersDataSpace");
2289 Mutex::Autolock lock(mMutex);
2290 return mDataSpace;
2291 }
2292
freeAllBuffers()2293 void Surface::freeAllBuffers() {
2294 if (!mDequeuedSlots.empty()) {
2295 ALOGE("%s: %zu buffers were freed while being dequeued!",
2296 __FUNCTION__, mDequeuedSlots.size());
2297 }
2298 for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
2299 mSlots[i].buffer = nullptr;
2300 }
2301 }
2302
getAndFlushBuffersFromSlots(const std::vector<int32_t> & slots,std::vector<sp<GraphicBuffer>> * outBuffers)2303 status_t Surface::getAndFlushBuffersFromSlots(const std::vector<int32_t>& slots,
2304 std::vector<sp<GraphicBuffer>>* outBuffers) {
2305 ALOGV("Surface::getAndFlushBuffersFromSlots");
2306 for (int32_t i : slots) {
2307 if (i < 0 || i >= NUM_BUFFER_SLOTS) {
2308 ALOGE("%s: Invalid slotIndex: %d", __FUNCTION__, i);
2309 return BAD_VALUE;
2310 }
2311 }
2312
2313 Mutex::Autolock lock(mMutex);
2314 for (int32_t i : slots) {
2315 if (mSlots[i].buffer == nullptr) {
2316 ALOGW("%s: Discarded slot %d doesn't contain buffer!", __FUNCTION__, i);
2317 continue;
2318 }
2319 // Don't flush currently dequeued buffers
2320 if (mDequeuedSlots.count(i) > 0) {
2321 continue;
2322 }
2323 outBuffers->push_back(mSlots[i].buffer);
2324 mSlots[i].buffer = nullptr;
2325 }
2326 return OK;
2327 }
2328
setSurfaceDamage(android_native_rect_t * rects,size_t numRects)2329 void Surface::setSurfaceDamage(android_native_rect_t* rects, size_t numRects) {
2330 ATRACE_CALL();
2331 ALOGV("Surface::setSurfaceDamage");
2332 Mutex::Autolock lock(mMutex);
2333
2334 if (mConnectedToCpu || numRects == 0) {
2335 mDirtyRegion = Region::INVALID_REGION;
2336 return;
2337 }
2338
2339 mDirtyRegion.clear();
2340 for (size_t r = 0; r < numRects; ++r) {
2341 // We intentionally flip top and bottom here, since because they're
2342 // specified with a bottom-left origin, top > bottom, which fails
2343 // validation in the Region class. We will fix this up when we flip to a
2344 // top-left origin in queueBuffer.
2345 Rect rect(rects[r].left, rects[r].bottom, rects[r].right, rects[r].top);
2346 mDirtyRegion.orSelf(rect);
2347 }
2348 }
2349
2350 // ----------------------------------------------------------------------
2351 // the lock/unlock APIs must be used from the same thread
2352
copyBlt(const sp<GraphicBuffer> & dst,const sp<GraphicBuffer> & src,const Region & reg,int * dstFenceFd)2353 static status_t copyBlt(
2354 const sp<GraphicBuffer>& dst,
2355 const sp<GraphicBuffer>& src,
2356 const Region& reg,
2357 int *dstFenceFd)
2358 {
2359 if (dst->getId() == src->getId())
2360 return OK;
2361
2362 // src and dst with, height and format must be identical. no verification
2363 // is done here.
2364 status_t err;
2365 uint8_t* src_bits = nullptr;
2366 err = src->lock(GRALLOC_USAGE_SW_READ_OFTEN, reg.bounds(),
2367 reinterpret_cast<void**>(&src_bits));
2368 ALOGE_IF(err, "error locking src buffer %s", strerror(-err));
2369
2370 uint8_t* dst_bits = nullptr;
2371 err = dst->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, reg.bounds(),
2372 reinterpret_cast<void**>(&dst_bits), *dstFenceFd);
2373 ALOGE_IF(err, "error locking dst buffer %s", strerror(-err));
2374 *dstFenceFd = -1;
2375
2376 Region::const_iterator head(reg.begin());
2377 Region::const_iterator tail(reg.end());
2378 if (head != tail && src_bits && dst_bits) {
2379 const size_t bpp = bytesPerPixel(src->format);
2380 const size_t dbpr = static_cast<uint32_t>(dst->stride) * bpp;
2381 const size_t sbpr = static_cast<uint32_t>(src->stride) * bpp;
2382
2383 while (head != tail) {
2384 const Rect& r(*head++);
2385 int32_t h = r.height();
2386 if (h <= 0) continue;
2387 size_t size = static_cast<uint32_t>(r.width()) * bpp;
2388 uint8_t const * s = src_bits +
2389 static_cast<uint32_t>(r.left + src->stride * r.top) * bpp;
2390 uint8_t * d = dst_bits +
2391 static_cast<uint32_t>(r.left + dst->stride * r.top) * bpp;
2392 if (dbpr==sbpr && size==sbpr) {
2393 size *= static_cast<size_t>(h);
2394 h = 1;
2395 }
2396 do {
2397 memcpy(d, s, size);
2398 d += dbpr;
2399 s += sbpr;
2400 } while (--h > 0);
2401 }
2402 }
2403
2404 if (src_bits)
2405 src->unlock();
2406
2407 if (dst_bits)
2408 dst->unlockAsync(dstFenceFd);
2409
2410 return err;
2411 }
2412
2413 // ----------------------------------------------------------------------------
2414
lock(ANativeWindow_Buffer * outBuffer,ARect * inOutDirtyBounds)2415 status_t Surface::lock(
2416 ANativeWindow_Buffer* outBuffer, ARect* inOutDirtyBounds)
2417 {
2418 if (mLockedBuffer != nullptr) {
2419 ALOGE("Surface::lock failed, already locked");
2420 return INVALID_OPERATION;
2421 }
2422
2423 if (!mConnectedToCpu) {
2424 int err = Surface::connect(NATIVE_WINDOW_API_CPU);
2425 if (err) {
2426 return err;
2427 }
2428 // we're intending to do software rendering from this point
2429 setUsage(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN);
2430 }
2431
2432 ANativeWindowBuffer* out;
2433 int fenceFd = -1;
2434 status_t err = dequeueBuffer(&out, &fenceFd);
2435 ALOGE_IF(err, "dequeueBuffer failed (%s)", strerror(-err));
2436 if (err == NO_ERROR) {
2437 sp<GraphicBuffer> backBuffer(GraphicBuffer::getSelf(out));
2438 const Rect bounds(backBuffer->width, backBuffer->height);
2439
2440 Region newDirtyRegion;
2441 if (inOutDirtyBounds) {
2442 newDirtyRegion.set(static_cast<Rect const&>(*inOutDirtyBounds));
2443 newDirtyRegion.andSelf(bounds);
2444 } else {
2445 newDirtyRegion.set(bounds);
2446 }
2447
2448 // figure out if we can copy the frontbuffer back
2449 const sp<GraphicBuffer>& frontBuffer(mPostedBuffer);
2450 const bool canCopyBack = (frontBuffer != nullptr &&
2451 backBuffer->width == frontBuffer->width &&
2452 backBuffer->height == frontBuffer->height &&
2453 backBuffer->format == frontBuffer->format);
2454
2455 if (canCopyBack) {
2456 // copy the area that is invalid and not repainted this round
2457 const Region copyback(mDirtyRegion.subtract(newDirtyRegion));
2458 if (!copyback.isEmpty()) {
2459 copyBlt(backBuffer, frontBuffer, copyback, &fenceFd);
2460 }
2461 } else {
2462 // if we can't copy-back anything, modify the user's dirty
2463 // region to make sure they redraw the whole buffer
2464 newDirtyRegion.set(bounds);
2465 mDirtyRegion.clear();
2466 Mutex::Autolock lock(mMutex);
2467 for (size_t i=0 ; i<NUM_BUFFER_SLOTS ; i++) {
2468 mSlots[i].dirtyRegion.clear();
2469 }
2470 }
2471
2472
2473 { // scope for the lock
2474 Mutex::Autolock lock(mMutex);
2475 int backBufferSlot(getSlotFromBufferLocked(backBuffer.get()));
2476 if (backBufferSlot >= 0) {
2477 Region& dirtyRegion(mSlots[backBufferSlot].dirtyRegion);
2478 mDirtyRegion.subtract(dirtyRegion);
2479 dirtyRegion = newDirtyRegion;
2480 }
2481 }
2482
2483 mDirtyRegion.orSelf(newDirtyRegion);
2484 if (inOutDirtyBounds) {
2485 *inOutDirtyBounds = newDirtyRegion.getBounds();
2486 }
2487
2488 void* vaddr;
2489 status_t res = backBuffer->lockAsync(
2490 GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
2491 newDirtyRegion.bounds(), &vaddr, fenceFd);
2492
2493 ALOGW_IF(res, "failed locking buffer (handle = %p)",
2494 backBuffer->handle);
2495
2496 if (res != 0) {
2497 err = INVALID_OPERATION;
2498 } else {
2499 mLockedBuffer = backBuffer;
2500 outBuffer->width = backBuffer->width;
2501 outBuffer->height = backBuffer->height;
2502 outBuffer->stride = backBuffer->stride;
2503 outBuffer->format = backBuffer->format;
2504 outBuffer->bits = vaddr;
2505 }
2506 }
2507 return err;
2508 }
2509
unlockAndPost()2510 status_t Surface::unlockAndPost()
2511 {
2512 if (mLockedBuffer == nullptr) {
2513 ALOGE("Surface::unlockAndPost failed, no locked buffer");
2514 return INVALID_OPERATION;
2515 }
2516
2517 int fd = -1;
2518 status_t err = mLockedBuffer->unlockAsync(&fd);
2519 ALOGE_IF(err, "failed unlocking buffer (%p)", mLockedBuffer->handle);
2520
2521 err = queueBuffer(mLockedBuffer.get(), fd);
2522 ALOGE_IF(err, "queueBuffer (handle=%p) failed (%s)",
2523 mLockedBuffer->handle, strerror(-err));
2524
2525 mPostedBuffer = mLockedBuffer;
2526 mLockedBuffer = nullptr;
2527 return err;
2528 }
2529
waitForNextFrame(uint64_t lastFrame,nsecs_t timeout)2530 bool Surface::waitForNextFrame(uint64_t lastFrame, nsecs_t timeout) {
2531 Mutex::Autolock lock(mMutex);
2532 if (mNextFrameNumber > lastFrame) {
2533 return true;
2534 }
2535 return mQueueBufferCondition.waitRelative(mMutex, timeout) == OK;
2536 }
2537
getUniqueId(uint64_t * outId) const2538 status_t Surface::getUniqueId(uint64_t* outId) const {
2539 Mutex::Autolock lock(mMutex);
2540 return mGraphicBufferProducer->getUniqueId(outId);
2541 }
2542
getConsumerUsage(uint64_t * outUsage) const2543 int Surface::getConsumerUsage(uint64_t* outUsage) const {
2544 Mutex::Autolock lock(mMutex);
2545 return mGraphicBufferProducer->getConsumerUsage(outUsage);
2546 }
2547
getAndFlushRemovedBuffers(std::vector<sp<GraphicBuffer>> * out)2548 status_t Surface::getAndFlushRemovedBuffers(std::vector<sp<GraphicBuffer>>* out) {
2549 if (out == nullptr) {
2550 ALOGE("%s: out must not be null!", __FUNCTION__);
2551 return BAD_VALUE;
2552 }
2553
2554 Mutex::Autolock lock(mMutex);
2555 *out = mRemovedBuffers;
2556 mRemovedBuffers.clear();
2557 return OK;
2558 }
2559
attachAndQueueBufferWithDataspace(Surface * surface,sp<GraphicBuffer> buffer,Dataspace dataspace)2560 status_t Surface::attachAndQueueBufferWithDataspace(Surface* surface, sp<GraphicBuffer> buffer,
2561 Dataspace dataspace) {
2562 if (buffer == nullptr) {
2563 return BAD_VALUE;
2564 }
2565 int err = static_cast<ANativeWindow*>(surface)->perform(surface, NATIVE_WINDOW_API_CONNECT,
2566 NATIVE_WINDOW_API_CPU);
2567 if (err != OK) {
2568 return err;
2569 }
2570 ui::Dataspace tmpDataspace = surface->getBuffersDataSpace();
2571 err = surface->setBuffersDataSpace(dataspace);
2572 if (err != OK) {
2573 return err;
2574 }
2575 err = surface->attachBuffer(buffer->getNativeBuffer());
2576 if (err != OK) {
2577 return err;
2578 }
2579 err = static_cast<ANativeWindow*>(surface)->queueBuffer(surface, buffer->getNativeBuffer(), -1);
2580 if (err != OK) {
2581 return err;
2582 }
2583 err = surface->setBuffersDataSpace(tmpDataspace);
2584 if (err != OK) {
2585 return err;
2586 }
2587 err = surface->disconnect(NATIVE_WINDOW_API_CPU);
2588 return err;
2589 }
2590
setAutoPrerotation(bool autoPrerotation)2591 int Surface::setAutoPrerotation(bool autoPrerotation) {
2592 ATRACE_CALL();
2593 ALOGV("Surface::setAutoPrerotation (%d)", autoPrerotation);
2594 Mutex::Autolock lock(mMutex);
2595
2596 if (mAutoPrerotation == autoPrerotation) {
2597 return OK;
2598 }
2599
2600 status_t err = mGraphicBufferProducer->setAutoPrerotation(autoPrerotation);
2601 if (err == NO_ERROR) {
2602 mAutoPrerotation = autoPrerotation;
2603 }
2604 ALOGE_IF(err, "IGraphicBufferProducer::setAutoPrerotation(%d) returned %s", autoPrerotation,
2605 strerror(-err));
2606 return err;
2607 }
2608
onBuffersDiscarded(const std::vector<int32_t> & slots)2609 void Surface::ProducerListenerProxy::onBuffersDiscarded(const std::vector<int32_t>& slots) {
2610 ATRACE_CALL();
2611 sp<Surface> parent = mParent.promote();
2612 if (parent == nullptr) {
2613 return;
2614 }
2615
2616 std::vector<sp<GraphicBuffer>> discardedBufs;
2617 status_t res = parent->getAndFlushBuffersFromSlots(slots, &discardedBufs);
2618 if (res != OK) {
2619 ALOGE("%s: Failed to get buffers from slots: %s(%d)", __FUNCTION__,
2620 strerror(-res), res);
2621 return;
2622 }
2623
2624 mSurfaceListener->onBuffersDiscarded(discardedBufs);
2625 }
2626
setFrameRate(float frameRate,int8_t compatibility,int8_t changeFrameRateStrategy)2627 status_t Surface::setFrameRate(float frameRate, int8_t compatibility,
2628 int8_t changeFrameRateStrategy) {
2629 ATRACE_CALL();
2630 ALOGV("Surface::setFrameRate");
2631
2632 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
2633 "Surface::setFrameRate")) {
2634 return BAD_VALUE;
2635 }
2636
2637 return composerService()->setFrameRate(mGraphicBufferProducer, frameRate, compatibility,
2638 changeFrameRateStrategy);
2639 }
2640
setFrameTimelineInfo(uint64_t,const FrameTimelineInfo & frameTimelineInfo)2641 status_t Surface::setFrameTimelineInfo(uint64_t /*frameNumber*/,
2642 const FrameTimelineInfo& frameTimelineInfo) {
2643 return composerService()->setFrameTimelineInfo(mGraphicBufferProducer, frameTimelineInfo);
2644 }
2645
getSurfaceControlHandle() const2646 sp<IBinder> Surface::getSurfaceControlHandle() const {
2647 Mutex::Autolock lock(mMutex);
2648 return mSurfaceControlHandle;
2649 }
2650
destroy()2651 void Surface::destroy() {
2652 Mutex::Autolock lock(mMutex);
2653 mSurfaceControlHandle = nullptr;
2654 }
2655
2656 }; // namespace android
2657