• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // TODO(b/129481165): remove the #pragma below and fix conversion issues
18 #pragma clang diagnostic push
19 #pragma clang diagnostic ignored "-Wconversion"
20 
21 //#define LOG_NDEBUG 0
22 #undef LOG_TAG
23 #define LOG_TAG "BufferLayer"
24 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
25 
26 #include "BufferLayer.h"
27 
28 #include <compositionengine/CompositionEngine.h>
29 #include <compositionengine/LayerFECompositionState.h>
30 #include <compositionengine/OutputLayer.h>
31 #include <compositionengine/impl/OutputLayerCompositionState.h>
32 #include <cutils/compiler.h>
33 #include <cutils/native_handle.h>
34 #include <cutils/properties.h>
35 #include <gui/BufferItem.h>
36 #include <gui/BufferQueue.h>
37 #include <gui/GLConsumer.h>
38 #include <gui/LayerDebugInfo.h>
39 #include <gui/Surface.h>
40 #include <renderengine/RenderEngine.h>
41 #include <ui/DebugUtils.h>
42 #include <utils/Errors.h>
43 #include <utils/Log.h>
44 #include <utils/NativeHandle.h>
45 #include <utils/StopWatch.h>
46 #include <utils/Trace.h>
47 
48 #include <cmath>
49 #include <cstdlib>
50 #include <mutex>
51 #include <sstream>
52 
53 #include "Colorizer.h"
54 #include "DisplayDevice.h"
55 #include "FrameTracer/FrameTracer.h"
56 #include "LayerRejecter.h"
57 #include "TimeStats/TimeStats.h"
58 
59 namespace android {
60 
61 using gui::WindowInfo;
62 
63 static constexpr float defaultMaxLuminance = 1000.0;
64 
BufferLayer(const LayerCreationArgs & args)65 BufferLayer::BufferLayer(const LayerCreationArgs& args)
66       : Layer(args),
67         mTextureName(args.textureName),
68         mCompositionState{mFlinger->getCompositionEngine().createLayerFECompositionState()} {
69     ALOGV("Creating Layer %s", getDebugName());
70 
71     mPremultipliedAlpha = !(args.flags & ISurfaceComposerClient::eNonPremultiplied);
72 
73     mPotentialCursor = args.flags & ISurfaceComposerClient::eCursorWindow;
74     mProtectedByApp = args.flags & ISurfaceComposerClient::eProtectedByApp;
75 }
76 
~BufferLayer()77 BufferLayer::~BufferLayer() {
78     if (!isClone()) {
79         // The original layer and the clone layer share the same texture. Therefore, only one of
80         // the layers, in this case the original layer, needs to handle the deletion. The original
81         // layer and the clone should be removed at the same time so there shouldn't be any issue
82         // with the clone layer trying to use the deleted texture.
83         mFlinger->deleteTextureAsync(mTextureName);
84     }
85     const int32_t layerId = getSequence();
86     mFlinger->mTimeStats->onDestroy(layerId);
87     mFlinger->mFrameTracer->onDestroy(layerId);
88 }
89 
useSurfaceDamage()90 void BufferLayer::useSurfaceDamage() {
91     if (mFlinger->mForceFullDamage) {
92         surfaceDamageRegion = Region::INVALID_REGION;
93     } else {
94         surfaceDamageRegion = mBufferInfo.mSurfaceDamage;
95     }
96 }
97 
useEmptyDamage()98 void BufferLayer::useEmptyDamage() {
99     surfaceDamageRegion.clear();
100 }
101 
isOpaque(const Layer::State & s) const102 bool BufferLayer::isOpaque(const Layer::State& s) const {
103     // if we don't have a buffer or sidebandStream yet, we're translucent regardless of the
104     // layer's opaque flag.
105     if ((mSidebandStream == nullptr) && (mBufferInfo.mBuffer == nullptr)) {
106         return false;
107     }
108 
109     // if the layer has the opaque flag, then we're always opaque,
110     // otherwise we use the current buffer's format.
111     return ((s.flags & layer_state_t::eLayerOpaque) != 0) || getOpacityForFormat(getPixelFormat());
112 }
113 
canReceiveInput() const114 bool BufferLayer::canReceiveInput() const {
115     return !isHiddenByPolicy() && (mBufferInfo.mBuffer == nullptr || getAlpha() > 0.0f);
116 }
117 
isVisible() const118 bool BufferLayer::isVisible() const {
119     return !isHiddenByPolicy() && getAlpha() > 0.0f &&
120             (mBufferInfo.mBuffer != nullptr || mSidebandStream != nullptr);
121 }
122 
isFixedSize() const123 bool BufferLayer::isFixedSize() const {
124     return getEffectiveScalingMode() != NATIVE_WINDOW_SCALING_MODE_FREEZE;
125 }
126 
usesSourceCrop() const127 bool BufferLayer::usesSourceCrop() const {
128     return true;
129 }
130 
inverseOrientation(uint32_t transform)131 static constexpr mat4 inverseOrientation(uint32_t transform) {
132     const mat4 flipH(-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1);
133     const mat4 flipV(1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1);
134     const mat4 rot90(0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1);
135     mat4 tr;
136 
137     if (transform & NATIVE_WINDOW_TRANSFORM_ROT_90) {
138         tr = tr * rot90;
139     }
140     if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_H) {
141         tr = tr * flipH;
142     }
143     if (transform & NATIVE_WINDOW_TRANSFORM_FLIP_V) {
144         tr = tr * flipV;
145     }
146     return inverse(tr);
147 }
148 
prepareClientComposition(compositionengine::LayerFE::ClientCompositionTargetSettings & targetSettings)149 std::optional<compositionengine::LayerFE::LayerSettings> BufferLayer::prepareClientComposition(
150         compositionengine::LayerFE::ClientCompositionTargetSettings& targetSettings) {
151     ATRACE_CALL();
152 
153     std::optional<compositionengine::LayerFE::LayerSettings> result =
154             Layer::prepareClientComposition(targetSettings);
155     if (!result) {
156         return result;
157     }
158 
159     if (CC_UNLIKELY(mBufferInfo.mBuffer == 0) && mSidebandStream != nullptr) {
160         // For surfaceview of tv sideband, there is no activeBuffer
161         // in bufferqueue, we need return LayerSettings.
162         return result;
163     }
164     const bool blackOutLayer = (isProtected() && !targetSettings.supportsProtectedContent) ||
165             ((isSecure() || isProtected()) && !targetSettings.isSecure);
166     const bool bufferCanBeUsedAsHwTexture =
167             mBufferInfo.mBuffer->getUsage() & GraphicBuffer::USAGE_HW_TEXTURE;
168     compositionengine::LayerFE::LayerSettings& layer = *result;
169     if (blackOutLayer || !bufferCanBeUsedAsHwTexture) {
170         ALOGE_IF(!bufferCanBeUsedAsHwTexture, "%s is blacked out as buffer is not gpu readable",
171                  mName.c_str());
172         prepareClearClientComposition(layer, true /* blackout */);
173         return layer;
174     }
175 
176     const State& s(getDrawingState());
177     layer.source.buffer.buffer = mBufferInfo.mBuffer;
178     layer.source.buffer.isOpaque = isOpaque(s);
179     layer.source.buffer.fence = mBufferInfo.mFence;
180     layer.source.buffer.textureName = mTextureName;
181     layer.source.buffer.usePremultipliedAlpha = getPremultipledAlpha();
182     layer.source.buffer.isY410BT2020 = isHdrY410();
183     bool hasSmpte2086 = mBufferInfo.mHdrMetadata.validTypes & HdrMetadata::SMPTE2086;
184     bool hasCta861_3 = mBufferInfo.mHdrMetadata.validTypes & HdrMetadata::CTA861_3;
185     float maxLuminance = 0.f;
186     if (hasSmpte2086 && hasCta861_3) {
187         maxLuminance = std::min(mBufferInfo.mHdrMetadata.smpte2086.maxLuminance,
188                                 mBufferInfo.mHdrMetadata.cta8613.maxContentLightLevel);
189     } else if (hasSmpte2086) {
190         maxLuminance = mBufferInfo.mHdrMetadata.smpte2086.maxLuminance;
191     } else if (hasCta861_3) {
192         maxLuminance = mBufferInfo.mHdrMetadata.cta8613.maxContentLightLevel;
193     } else {
194         switch (layer.sourceDataspace & HAL_DATASPACE_TRANSFER_MASK) {
195             case HAL_DATASPACE_TRANSFER_ST2084:
196             case HAL_DATASPACE_TRANSFER_HLG:
197                 // Behavior-match previous releases for HDR content
198                 maxLuminance = defaultMaxLuminance;
199                 break;
200         }
201     }
202     layer.source.buffer.maxLuminanceNits = maxLuminance;
203     layer.frameNumber = mCurrentFrameNumber;
204     layer.bufferId = mBufferInfo.mBuffer ? mBufferInfo.mBuffer->getId() : 0;
205 
206     const bool useFiltering =
207             targetSettings.needsFiltering || mNeedsFiltering || bufferNeedsFiltering();
208 
209     // Query the texture matrix given our current filtering mode.
210     float textureMatrix[16];
211     getDrawingTransformMatrix(useFiltering, textureMatrix);
212 
213     if (getTransformToDisplayInverse()) {
214         /*
215          * the code below applies the primary display's inverse transform to
216          * the texture transform
217          */
218         uint32_t transform = DisplayDevice::getPrimaryDisplayRotationFlags();
219         mat4 tr = inverseOrientation(transform);
220 
221         /**
222          * TODO(b/36727915): This is basically a hack.
223          *
224          * Ensure that regardless of the parent transformation,
225          * this buffer is always transformed from native display
226          * orientation to display orientation. For example, in the case
227          * of a camera where the buffer remains in native orientation,
228          * we want the pixels to always be upright.
229          */
230         sp<Layer> p = mDrawingParent.promote();
231         if (p != nullptr) {
232             const auto parentTransform = p->getTransform();
233             tr = tr * inverseOrientation(parentTransform.getOrientation());
234         }
235 
236         // and finally apply it to the original texture matrix
237         const mat4 texTransform(mat4(static_cast<const float*>(textureMatrix)) * tr);
238         memcpy(textureMatrix, texTransform.asArray(), sizeof(textureMatrix));
239     }
240 
241     const Rect win{getBounds()};
242     float bufferWidth = getBufferSize(s).getWidth();
243     float bufferHeight = getBufferSize(s).getHeight();
244 
245     // BufferStateLayers can have a "buffer size" of [0, 0, -1, -1] when no display frame has
246     // been set and there is no parent layer bounds. In that case, the scale is meaningless so
247     // ignore them.
248     if (!getBufferSize(s).isValid()) {
249         bufferWidth = float(win.right) - float(win.left);
250         bufferHeight = float(win.bottom) - float(win.top);
251     }
252 
253     const float scaleHeight = (float(win.bottom) - float(win.top)) / bufferHeight;
254     const float scaleWidth = (float(win.right) - float(win.left)) / bufferWidth;
255     const float translateY = float(win.top) / bufferHeight;
256     const float translateX = float(win.left) / bufferWidth;
257 
258     // Flip y-coordinates because GLConsumer expects OpenGL convention.
259     mat4 tr = mat4::translate(vec4(.5, .5, 0, 1)) * mat4::scale(vec4(1, -1, 1, 1)) *
260             mat4::translate(vec4(-.5, -.5, 0, 1)) *
261             mat4::translate(vec4(translateX, translateY, 0, 1)) *
262             mat4::scale(vec4(scaleWidth, scaleHeight, 1.0, 1.0));
263 
264     layer.source.buffer.useTextureFiltering = useFiltering;
265     layer.source.buffer.textureTransform = mat4(static_cast<const float*>(textureMatrix)) * tr;
266 
267     return layer;
268 }
269 
isHdrY410() const270 bool BufferLayer::isHdrY410() const {
271     // pixel format is HDR Y410 masquerading as RGBA_1010102
272     return (mBufferInfo.mDataspace == ui::Dataspace::BT2020_ITU_PQ &&
273             mBufferInfo.mApi == NATIVE_WINDOW_API_MEDIA &&
274             mBufferInfo.mPixelFormat == HAL_PIXEL_FORMAT_RGBA_1010102);
275 }
276 
getCompositionEngineLayerFE() const277 sp<compositionengine::LayerFE> BufferLayer::getCompositionEngineLayerFE() const {
278     return asLayerFE();
279 }
280 
editCompositionState()281 compositionengine::LayerFECompositionState* BufferLayer::editCompositionState() {
282     return mCompositionState.get();
283 }
284 
getCompositionState() const285 const compositionengine::LayerFECompositionState* BufferLayer::getCompositionState() const {
286     return mCompositionState.get();
287 }
288 
preparePerFrameCompositionState()289 void BufferLayer::preparePerFrameCompositionState() {
290     Layer::preparePerFrameCompositionState();
291 
292     // Sideband layers
293     auto* compositionState = editCompositionState();
294     if (compositionState->sidebandStream.get() && !compositionState->sidebandStreamHasFrame) {
295         compositionState->compositionType =
296                 aidl::android::hardware::graphics::composer3::Composition::SIDEBAND;
297         return;
298     } else if ((mDrawingState.flags & layer_state_t::eLayerIsDisplayDecoration) != 0) {
299         compositionState->compositionType =
300                 aidl::android::hardware::graphics::composer3::Composition::DISPLAY_DECORATION;
301     } else {
302         // Normal buffer layers
303         compositionState->hdrMetadata = mBufferInfo.mHdrMetadata;
304         compositionState->compositionType = mPotentialCursor
305                 ? aidl::android::hardware::graphics::composer3::Composition::CURSOR
306                 : aidl::android::hardware::graphics::composer3::Composition::DEVICE;
307     }
308 
309     compositionState->buffer = getBuffer();
310     compositionState->bufferSlot = (mBufferInfo.mBufferSlot == BufferQueue::INVALID_BUFFER_SLOT)
311             ? 0
312             : mBufferInfo.mBufferSlot;
313     compositionState->acquireFence = mBufferInfo.mFence;
314     compositionState->frameNumber = mBufferInfo.mFrameNumber;
315     compositionState->sidebandStreamHasFrame = false;
316 }
317 
onPreComposition(nsecs_t)318 bool BufferLayer::onPreComposition(nsecs_t) {
319     return hasReadyFrame();
320 }
321 namespace {
frameRateToSetFrameRateVotePayload(Layer::FrameRate frameRate)322 TimeStats::SetFrameRateVote frameRateToSetFrameRateVotePayload(Layer::FrameRate frameRate) {
323     using FrameRateCompatibility = TimeStats::SetFrameRateVote::FrameRateCompatibility;
324     using Seamlessness = TimeStats::SetFrameRateVote::Seamlessness;
325     const auto frameRateCompatibility = [frameRate] {
326         switch (frameRate.type) {
327             case Layer::FrameRateCompatibility::Default:
328                 return FrameRateCompatibility::Default;
329             case Layer::FrameRateCompatibility::ExactOrMultiple:
330                 return FrameRateCompatibility::ExactOrMultiple;
331             default:
332                 return FrameRateCompatibility::Undefined;
333         }
334     }();
335 
336     const auto seamlessness = [frameRate] {
337         switch (frameRate.seamlessness) {
338             case scheduler::Seamlessness::OnlySeamless:
339                 return Seamlessness::ShouldBeSeamless;
340             case scheduler::Seamlessness::SeamedAndSeamless:
341                 return Seamlessness::NotRequired;
342             default:
343                 return Seamlessness::Undefined;
344         }
345     }();
346 
347     return TimeStats::SetFrameRateVote{.frameRate = frameRate.rate.getValue(),
348                                        .frameRateCompatibility = frameRateCompatibility,
349                                        .seamlessness = seamlessness};
350 }
351 } // namespace
352 
onPostComposition(const DisplayDevice * display,const std::shared_ptr<FenceTime> & glDoneFence,const std::shared_ptr<FenceTime> & presentFence,const CompositorTiming & compositorTiming)353 void BufferLayer::onPostComposition(const DisplayDevice* display,
354                                     const std::shared_ptr<FenceTime>& glDoneFence,
355                                     const std::shared_ptr<FenceTime>& presentFence,
356                                     const CompositorTiming& compositorTiming) {
357     // mFrameLatencyNeeded is true when a new frame was latched for the
358     // composition.
359     if (!mBufferInfo.mFrameLatencyNeeded) return;
360 
361     // Update mFrameEventHistory.
362     finalizeFrameEventHistory(glDoneFence, compositorTiming);
363 
364     // Update mFrameTracker.
365     nsecs_t desiredPresentTime = mBufferInfo.mDesiredPresentTime;
366     mFrameTracker.setDesiredPresentTime(desiredPresentTime);
367 
368     const int32_t layerId = getSequence();
369     mFlinger->mTimeStats->setDesiredTime(layerId, mCurrentFrameNumber, desiredPresentTime);
370 
371     const auto outputLayer = findOutputLayerForDisplay(display);
372     if (outputLayer && outputLayer->requiresClientComposition()) {
373         nsecs_t clientCompositionTimestamp = outputLayer->getState().clientCompositionTimestamp;
374         mFlinger->mFrameTracer->traceTimestamp(layerId, getCurrentBufferId(), mCurrentFrameNumber,
375                                                clientCompositionTimestamp,
376                                                FrameTracer::FrameEvent::FALLBACK_COMPOSITION);
377         // Update the SurfaceFrames in the drawing state
378         if (mDrawingState.bufferSurfaceFrameTX) {
379             mDrawingState.bufferSurfaceFrameTX->setGpuComposition();
380         }
381         for (auto& [token, surfaceFrame] : mDrawingState.bufferlessSurfaceFramesTX) {
382             surfaceFrame->setGpuComposition();
383         }
384     }
385 
386     std::shared_ptr<FenceTime> frameReadyFence = mBufferInfo.mFenceTime;
387     if (frameReadyFence->isValid()) {
388         mFrameTracker.setFrameReadyFence(std::move(frameReadyFence));
389     } else {
390         // There was no fence for this frame, so assume that it was ready
391         // to be presented at the desired present time.
392         mFrameTracker.setFrameReadyTime(desiredPresentTime);
393     }
394 
395     if (display) {
396         const Fps refreshRate = display->refreshRateConfigs().getActiveMode()->getFps();
397         const std::optional<Fps> renderRate =
398                 mFlinger->mScheduler->getFrameRateOverride(getOwnerUid());
399 
400         const auto vote = frameRateToSetFrameRateVotePayload(mDrawingState.frameRate);
401         const auto gameMode = getGameMode();
402 
403         if (presentFence->isValid()) {
404             mFlinger->mTimeStats->setPresentFence(layerId, mCurrentFrameNumber, presentFence,
405                                                   refreshRate, renderRate, vote, gameMode);
406             mFlinger->mFrameTracer->traceFence(layerId, getCurrentBufferId(), mCurrentFrameNumber,
407                                                presentFence,
408                                                FrameTracer::FrameEvent::PRESENT_FENCE);
409             mFrameTracker.setActualPresentFence(std::shared_ptr<FenceTime>(presentFence));
410         } else if (const auto displayId = PhysicalDisplayId::tryCast(display->getId());
411                    displayId && mFlinger->getHwComposer().isConnected(*displayId)) {
412             // The HWC doesn't support present fences, so use the refresh
413             // timestamp instead.
414             const nsecs_t actualPresentTime = display->getRefreshTimestamp();
415             mFlinger->mTimeStats->setPresentTime(layerId, mCurrentFrameNumber, actualPresentTime,
416                                                  refreshRate, renderRate, vote, gameMode);
417             mFlinger->mFrameTracer->traceTimestamp(layerId, getCurrentBufferId(),
418                                                    mCurrentFrameNumber, actualPresentTime,
419                                                    FrameTracer::FrameEvent::PRESENT_FENCE);
420             mFrameTracker.setActualPresentTime(actualPresentTime);
421         }
422     }
423 
424     mFrameTracker.advanceFrame();
425     mBufferInfo.mFrameLatencyNeeded = false;
426 }
427 
gatherBufferInfo()428 void BufferLayer::gatherBufferInfo() {
429     mBufferInfo.mPixelFormat =
430             !mBufferInfo.mBuffer ? PIXEL_FORMAT_NONE : mBufferInfo.mBuffer->getPixelFormat();
431     mBufferInfo.mFrameLatencyNeeded = true;
432 }
433 
shouldPresentNow(nsecs_t expectedPresentTime) const434 bool BufferLayer::shouldPresentNow(nsecs_t expectedPresentTime) const {
435     // If this is not a valid vsync for the layer's uid, return and try again later
436     const bool isVsyncValidForUid =
437             mFlinger->mScheduler->isVsyncValid(expectedPresentTime, mOwnerUid);
438     if (!isVsyncValidForUid) {
439         ATRACE_NAME("!isVsyncValidForUid");
440         return false;
441     }
442 
443     // AutoRefresh layers and sideband streams should always be presented
444     if (getSidebandStreamChanged() || getAutoRefresh()) {
445         return true;
446     }
447 
448     // If this layer doesn't have a frame is shouldn't be presented
449     if (!hasFrameUpdate()) {
450         return false;
451     }
452 
453     // Defer to the derived class to decide whether the next buffer is due for
454     // presentation.
455     return isBufferDue(expectedPresentTime);
456 }
457 
latchBuffer(bool & recomputeVisibleRegions,nsecs_t latchTime,nsecs_t expectedPresentTime)458 bool BufferLayer::latchBuffer(bool& recomputeVisibleRegions, nsecs_t latchTime,
459                               nsecs_t expectedPresentTime) {
460     ATRACE_CALL();
461 
462     bool refreshRequired = latchSidebandStream(recomputeVisibleRegions);
463 
464     if (refreshRequired) {
465         return refreshRequired;
466     }
467 
468     // If the head buffer's acquire fence hasn't signaled yet, return and
469     // try again later
470     if (!fenceHasSignaled()) {
471         ATRACE_NAME("!fenceHasSignaled()");
472         mFlinger->onLayerUpdate();
473         return false;
474     }
475 
476     // Capture the old state of the layer for comparisons later
477     const State& s(getDrawingState());
478     const bool oldOpacity = isOpaque(s);
479 
480     BufferInfo oldBufferInfo = mBufferInfo;
481 
482     status_t err = updateTexImage(recomputeVisibleRegions, latchTime, expectedPresentTime);
483     if (err != NO_ERROR) {
484         return false;
485     }
486 
487     err = updateActiveBuffer();
488     if (err != NO_ERROR) {
489         return false;
490     }
491 
492     err = updateFrameNumber();
493     if (err != NO_ERROR) {
494         return false;
495     }
496 
497     gatherBufferInfo();
498 
499     if (oldBufferInfo.mBuffer == nullptr) {
500         // the first time we receive a buffer, we need to trigger a
501         // geometry invalidation.
502         recomputeVisibleRegions = true;
503     }
504 
505     if ((mBufferInfo.mCrop != oldBufferInfo.mCrop) ||
506         (mBufferInfo.mTransform != oldBufferInfo.mTransform) ||
507         (mBufferInfo.mScaleMode != oldBufferInfo.mScaleMode) ||
508         (mBufferInfo.mTransformToDisplayInverse != oldBufferInfo.mTransformToDisplayInverse)) {
509         recomputeVisibleRegions = true;
510     }
511 
512     if (oldBufferInfo.mBuffer != nullptr) {
513         uint32_t bufWidth = mBufferInfo.mBuffer->getWidth();
514         uint32_t bufHeight = mBufferInfo.mBuffer->getHeight();
515         if (bufWidth != oldBufferInfo.mBuffer->getWidth() ||
516             bufHeight != oldBufferInfo.mBuffer->getHeight()) {
517             recomputeVisibleRegions = true;
518         }
519     }
520 
521     if (oldOpacity != isOpaque(s)) {
522         recomputeVisibleRegions = true;
523     }
524 
525     return true;
526 }
527 
hasReadyFrame() const528 bool BufferLayer::hasReadyFrame() const {
529     return hasFrameUpdate() || getSidebandStreamChanged() || getAutoRefresh();
530 }
531 
getEffectiveScalingMode() const532 uint32_t BufferLayer::getEffectiveScalingMode() const {
533     return mBufferInfo.mScaleMode;
534 }
535 
isProtected() const536 bool BufferLayer::isProtected() const {
537     return (mBufferInfo.mBuffer != nullptr) &&
538             (mBufferInfo.mBuffer->getUsage() & GRALLOC_USAGE_PROTECTED);
539 }
540 
541 // As documented in libhardware header, formats in the range
542 // 0x100 - 0x1FF are specific to the HAL implementation, and
543 // are known to have no alpha channel
544 // TODO: move definition for device-specific range into
545 // hardware.h, instead of using hard-coded values here.
546 #define HARDWARE_IS_DEVICE_FORMAT(f) ((f) >= 0x100 && (f) <= 0x1FF)
547 
getOpacityForFormat(PixelFormat format)548 bool BufferLayer::getOpacityForFormat(PixelFormat format) {
549     if (HARDWARE_IS_DEVICE_FORMAT(format)) {
550         return true;
551     }
552     switch (format) {
553         case PIXEL_FORMAT_RGBA_8888:
554         case PIXEL_FORMAT_BGRA_8888:
555         case PIXEL_FORMAT_RGBA_FP16:
556         case PIXEL_FORMAT_RGBA_1010102:
557         case PIXEL_FORMAT_R_8:
558             return false;
559     }
560     // in all other case, we have no blending (also for unknown formats)
561     return true;
562 }
563 
needsFiltering(const DisplayDevice * display) const564 bool BufferLayer::needsFiltering(const DisplayDevice* display) const {
565     const auto outputLayer = findOutputLayerForDisplay(display);
566     if (outputLayer == nullptr) {
567         return false;
568     }
569 
570     // We need filtering if the sourceCrop rectangle size does not match the
571     // displayframe rectangle size (not a 1:1 render)
572     const auto& compositionState = outputLayer->getState();
573     const auto displayFrame = compositionState.displayFrame;
574     const auto sourceCrop = compositionState.sourceCrop;
575     return sourceCrop.getHeight() != displayFrame.getHeight() ||
576             sourceCrop.getWidth() != displayFrame.getWidth();
577 }
578 
needsFilteringForScreenshots(const DisplayDevice * display,const ui::Transform & inverseParentTransform) const579 bool BufferLayer::needsFilteringForScreenshots(const DisplayDevice* display,
580                                                const ui::Transform& inverseParentTransform) const {
581     const auto outputLayer = findOutputLayerForDisplay(display);
582     if (outputLayer == nullptr) {
583         return false;
584     }
585 
586     // We need filtering if the sourceCrop rectangle size does not match the
587     // viewport rectangle size (not a 1:1 render)
588     const auto& compositionState = outputLayer->getState();
589     const ui::Transform& displayTransform = display->getTransform();
590     const ui::Transform inverseTransform = inverseParentTransform * displayTransform.inverse();
591     // Undo the transformation of the displayFrame so that we're back into
592     // layer-stack space.
593     const Rect frame = inverseTransform.transform(compositionState.displayFrame);
594     const FloatRect sourceCrop = compositionState.sourceCrop;
595 
596     int32_t frameHeight = frame.getHeight();
597     int32_t frameWidth = frame.getWidth();
598     // If the display transform had a rotational component then undo the
599     // rotation so that the orientation matches the source crop.
600     if (displayTransform.getOrientation() & ui::Transform::ROT_90) {
601         std::swap(frameHeight, frameWidth);
602     }
603     return sourceCrop.getHeight() != frameHeight || sourceCrop.getWidth() != frameWidth;
604 }
605 
getBufferSize(const State & s) const606 Rect BufferLayer::getBufferSize(const State& s) const {
607     // If we have a sideband stream, or we are scaling the buffer then return the layer size since
608     // we cannot determine the buffer size.
609     if ((s.sidebandStream != nullptr) ||
610         (getEffectiveScalingMode() != NATIVE_WINDOW_SCALING_MODE_FREEZE)) {
611         return Rect(getActiveWidth(s), getActiveHeight(s));
612     }
613 
614     if (mBufferInfo.mBuffer == nullptr) {
615         return Rect::INVALID_RECT;
616     }
617 
618     uint32_t bufWidth = mBufferInfo.mBuffer->getWidth();
619     uint32_t bufHeight = mBufferInfo.mBuffer->getHeight();
620 
621     // Undo any transformations on the buffer and return the result.
622     if (mBufferInfo.mTransform & ui::Transform::ROT_90) {
623         std::swap(bufWidth, bufHeight);
624     }
625 
626     if (getTransformToDisplayInverse()) {
627         uint32_t invTransform = DisplayDevice::getPrimaryDisplayRotationFlags();
628         if (invTransform & ui::Transform::ROT_90) {
629             std::swap(bufWidth, bufHeight);
630         }
631     }
632 
633     return Rect(bufWidth, bufHeight);
634 }
635 
computeSourceBounds(const FloatRect & parentBounds) const636 FloatRect BufferLayer::computeSourceBounds(const FloatRect& parentBounds) const {
637     const State& s(getDrawingState());
638 
639     // If we have a sideband stream, or we are scaling the buffer then return the layer size since
640     // we cannot determine the buffer size.
641     if ((s.sidebandStream != nullptr) ||
642         (getEffectiveScalingMode() != NATIVE_WINDOW_SCALING_MODE_FREEZE)) {
643         return FloatRect(0, 0, getActiveWidth(s), getActiveHeight(s));
644     }
645 
646     if (mBufferInfo.mBuffer == nullptr) {
647         return parentBounds;
648     }
649 
650     uint32_t bufWidth = mBufferInfo.mBuffer->getWidth();
651     uint32_t bufHeight = mBufferInfo.mBuffer->getHeight();
652 
653     // Undo any transformations on the buffer and return the result.
654     if (mBufferInfo.mTransform & ui::Transform::ROT_90) {
655         std::swap(bufWidth, bufHeight);
656     }
657 
658     if (getTransformToDisplayInverse()) {
659         uint32_t invTransform = DisplayDevice::getPrimaryDisplayRotationFlags();
660         if (invTransform & ui::Transform::ROT_90) {
661             std::swap(bufWidth, bufHeight);
662         }
663     }
664 
665     return FloatRect(0, 0, bufWidth, bufHeight);
666 }
667 
latchAndReleaseBuffer()668 void BufferLayer::latchAndReleaseBuffer() {
669     if (hasReadyFrame()) {
670         bool ignored = false;
671         latchBuffer(ignored, systemTime(), 0 /* expectedPresentTime */);
672     }
673     releasePendingBuffer(systemTime());
674 }
675 
getPixelFormat() const676 PixelFormat BufferLayer::getPixelFormat() const {
677     return mBufferInfo.mPixelFormat;
678 }
679 
getTransformToDisplayInverse() const680 bool BufferLayer::getTransformToDisplayInverse() const {
681     return mBufferInfo.mTransformToDisplayInverse;
682 }
683 
getBufferCrop() const684 Rect BufferLayer::getBufferCrop() const {
685     // this is the crop rectangle that applies to the buffer
686     // itself (as opposed to the window)
687     if (!mBufferInfo.mCrop.isEmpty()) {
688         // if the buffer crop is defined, we use that
689         return mBufferInfo.mCrop;
690     } else if (mBufferInfo.mBuffer != nullptr) {
691         // otherwise we use the whole buffer
692         return mBufferInfo.mBuffer->getBounds();
693     } else {
694         // if we don't have a buffer yet, we use an empty/invalid crop
695         return Rect();
696     }
697 }
698 
getBufferTransform() const699 uint32_t BufferLayer::getBufferTransform() const {
700     return mBufferInfo.mTransform;
701 }
702 
getDataSpace() const703 ui::Dataspace BufferLayer::getDataSpace() const {
704     return mBufferInfo.mDataspace;
705 }
706 
translateDataspace(ui::Dataspace dataspace)707 ui::Dataspace BufferLayer::translateDataspace(ui::Dataspace dataspace) {
708     ui::Dataspace updatedDataspace = dataspace;
709     // translate legacy dataspaces to modern dataspaces
710     switch (dataspace) {
711         case ui::Dataspace::SRGB:
712             updatedDataspace = ui::Dataspace::V0_SRGB;
713             break;
714         case ui::Dataspace::SRGB_LINEAR:
715             updatedDataspace = ui::Dataspace::V0_SRGB_LINEAR;
716             break;
717         case ui::Dataspace::JFIF:
718             updatedDataspace = ui::Dataspace::V0_JFIF;
719             break;
720         case ui::Dataspace::BT601_625:
721             updatedDataspace = ui::Dataspace::V0_BT601_625;
722             break;
723         case ui::Dataspace::BT601_525:
724             updatedDataspace = ui::Dataspace::V0_BT601_525;
725             break;
726         case ui::Dataspace::BT709:
727             updatedDataspace = ui::Dataspace::V0_BT709;
728             break;
729         default:
730             break;
731     }
732 
733     return updatedDataspace;
734 }
735 
getBuffer() const736 sp<GraphicBuffer> BufferLayer::getBuffer() const {
737     return mBufferInfo.mBuffer ? mBufferInfo.mBuffer->getBuffer() : nullptr;
738 }
739 
getDrawingTransformMatrix(bool filteringEnabled,float outMatrix[16])740 void BufferLayer::getDrawingTransformMatrix(bool filteringEnabled, float outMatrix[16]) {
741     GLConsumer::computeTransformMatrix(outMatrix,
742                                        mBufferInfo.mBuffer ? mBufferInfo.mBuffer->getBuffer()
743                                                            : nullptr,
744                                        mBufferInfo.mCrop, mBufferInfo.mTransform, filteringEnabled);
745 }
746 
setInitialValuesForClone(const sp<Layer> & clonedFrom)747 void BufferLayer::setInitialValuesForClone(const sp<Layer>& clonedFrom) {
748     Layer::setInitialValuesForClone(clonedFrom);
749 
750     sp<BufferLayer> bufferClonedFrom = static_cast<BufferLayer*>(clonedFrom.get());
751     mPremultipliedAlpha = bufferClonedFrom->mPremultipliedAlpha;
752     mPotentialCursor = bufferClonedFrom->mPotentialCursor;
753     mProtectedByApp = bufferClonedFrom->mProtectedByApp;
754 
755     updateCloneBufferInfo();
756 }
757 
updateCloneBufferInfo()758 void BufferLayer::updateCloneBufferInfo() {
759     if (!isClone() || !isClonedFromAlive()) {
760         return;
761     }
762 
763     sp<BufferLayer> clonedFrom = static_cast<BufferLayer*>(getClonedFrom().get());
764     mBufferInfo = clonedFrom->mBufferInfo;
765     mSidebandStream = clonedFrom->mSidebandStream;
766     surfaceDamageRegion = clonedFrom->surfaceDamageRegion;
767     mCurrentFrameNumber = clonedFrom->mCurrentFrameNumber.load();
768     mPreviousFrameNumber = clonedFrom->mPreviousFrameNumber;
769 
770     // After buffer info is updated, the drawingState from the real layer needs to be copied into
771     // the cloned. This is because some properties of drawingState can change when latchBuffer is
772     // called. However, copying the drawingState would also overwrite the cloned layer's relatives
773     // and touchableRegionCrop. Therefore, temporarily store the relatives so they can be set in
774     // the cloned drawingState again.
775     wp<Layer> tmpZOrderRelativeOf = mDrawingState.zOrderRelativeOf;
776     SortedVector<wp<Layer>> tmpZOrderRelatives = mDrawingState.zOrderRelatives;
777     wp<Layer> tmpTouchableRegionCrop = mDrawingState.touchableRegionCrop;
778     WindowInfo tmpInputInfo = mDrawingState.inputInfo;
779 
780     cloneDrawingState(clonedFrom.get());
781 
782     mDrawingState.touchableRegionCrop = tmpTouchableRegionCrop;
783     mDrawingState.zOrderRelativeOf = tmpZOrderRelativeOf;
784     mDrawingState.zOrderRelatives = tmpZOrderRelatives;
785     mDrawingState.inputInfo = tmpInputInfo;
786 }
787 
setTransformHint(ui::Transform::RotationFlags displayTransformHint)788 void BufferLayer::setTransformHint(ui::Transform::RotationFlags displayTransformHint) {
789     mTransformHint = getFixedTransformHint();
790     if (mTransformHint == ui::Transform::ROT_INVALID) {
791         mTransformHint = displayTransformHint;
792     }
793 }
794 
bufferNeedsFiltering() const795 bool BufferLayer::bufferNeedsFiltering() const {
796     return isFixedSize();
797 }
798 
getExternalTexture() const799 const std::shared_ptr<renderengine::ExternalTexture>& BufferLayer::getExternalTexture() const {
800     return mBufferInfo.mBuffer;
801 }
802 
803 } // namespace android
804 
805 #if defined(__gl_h_)
806 #error "don't include gl/gl.h in this file"
807 #endif
808 
809 #if defined(__gl2_h_)
810 #error "don't include gl2/gl2.h in this file"
811 #endif
812 
813 // TODO(b/129481165): remove the #pragma below and fix conversion issues
814 #pragma clang diagnostic pop // ignored "-Wconversion"
815