1 /*
2 * Copyright 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #undef LOG_TAG
18 #define LOG_TAG "Planner"
19 // #define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21
22 #include <android-base/properties.h>
23 #include <compositionengine/impl/planner/Flattener.h>
24 #include <compositionengine/impl/planner/LayerState.h>
25
26 #include <gui/TraceUtils.h>
27
28 using time_point = std::chrono::steady_clock::time_point;
29 using namespace std::chrono_literals;
30
31 namespace android::compositionengine::impl::planner {
32
33 namespace {
34
35 // True if the underlying layer stack is the same modulo state that would be expected to be
36 // different like specific buffers, false otherwise.
isSameStack(const std::vector<const LayerState * > & incomingLayers,const std::vector<CachedSet> & cachedSets)37 bool isSameStack(const std::vector<const LayerState*>& incomingLayers,
38 const std::vector<CachedSet>& cachedSets) {
39 std::vector<const LayerState*> existingLayers;
40 for (auto& cachedSet : cachedSets) {
41 for (auto& layer : cachedSet.getConstituentLayers()) {
42 existingLayers.push_back(layer.getState());
43 }
44 }
45
46 if (incomingLayers.size() != existingLayers.size()) {
47 return false;
48 }
49
50 for (size_t i = 0; i < incomingLayers.size(); i++) {
51 // Checking the IDs here is very strict, but we do this as otherwise we may mistakenly try
52 // to access destroyed OutputLayers later on.
53 if (incomingLayers[i]->getId() != existingLayers[i]->getId() ||
54 incomingLayers[i]->getDifferingFields(*(existingLayers[i])) != LayerStateField::None) {
55 return false;
56 }
57 }
58 return true;
59 }
60
61 } // namespace
62
Flattener(renderengine::RenderEngine & renderEngine,const Tunables & tunables)63 Flattener::Flattener(renderengine::RenderEngine& renderEngine, const Tunables& tunables)
64 : mRenderEngine(renderEngine), mTunables(tunables), mTexturePool(mRenderEngine) {}
65
flattenLayers(const std::vector<const LayerState * > & layers,NonBufferHash hash,time_point now)66 NonBufferHash Flattener::flattenLayers(const std::vector<const LayerState*>& layers,
67 NonBufferHash hash, time_point now) {
68 ATRACE_CALL();
69 const size_t unflattenedDisplayCost = calculateDisplayCost(layers);
70 mUnflattenedDisplayCost += unflattenedDisplayCost;
71
72 // We invalidate the layer cache if:
73 // 1. We're not tracking any layers, or
74 // 2. The last seen hashed geometry changed between frames, or
75 // 3. A stricter equality check demonstrates that the layer stack really did change, since the
76 // hashed geometry does not guarantee uniqueness.
77 if (mCurrentGeometry != hash || (!mLayers.empty() && !isSameStack(layers, mLayers))) {
78 resetActivities(hash, now);
79 mFlattenedDisplayCost += unflattenedDisplayCost;
80 return hash;
81 }
82
83 ++mInitialLayerCounts[layers.size()];
84
85 // Only buildCachedSets if these layers are already stored in mLayers.
86 // Otherwise (i.e. mergeWithCachedSets returns false), the time has not
87 // changed, so buildCachedSets will never find any runs.
88 const bool alreadyHadCachedSets = mergeWithCachedSets(layers, now);
89
90 ++mFinalLayerCounts[mLayers.size()];
91
92 if (alreadyHadCachedSets) {
93 buildCachedSets(now);
94 hash = computeLayersHash();
95 }
96
97 return hash;
98 }
99
renderCachedSets(const OutputCompositionState & outputState,std::optional<std::chrono::steady_clock::time_point> renderDeadline,bool deviceHandlesColorTransform)100 void Flattener::renderCachedSets(
101 const OutputCompositionState& outputState,
102 std::optional<std::chrono::steady_clock::time_point> renderDeadline,
103 bool deviceHandlesColorTransform) {
104 ATRACE_CALL();
105
106 if (!mNewCachedSet) {
107 return;
108 }
109
110 // Ensure that a cached set has a valid buffer first
111 if (mNewCachedSet->hasRenderedBuffer()) {
112 ATRACE_NAME("mNewCachedSet->hasRenderedBuffer()");
113 return;
114 }
115
116 const auto now = std::chrono::steady_clock::now();
117
118 // If we have a render deadline, and the flattener is configured to skip rendering if we don't
119 // have enough time, then we skip rendering the cached set if we think that we'll steal too much
120 // time from the next frame.
121 if (renderDeadline && mTunables.mRenderScheduling) {
122 if (const auto estimatedRenderFinish =
123 now + mTunables.mRenderScheduling->cachedSetRenderDuration;
124 estimatedRenderFinish > *renderDeadline) {
125 mNewCachedSet->incrementSkipCount();
126
127 if (mNewCachedSet->getSkipCount() <=
128 mTunables.mRenderScheduling->maxDeferRenderAttempts) {
129 ATRACE_FORMAT("DeadlinePassed: exceeded deadline by: %d us",
130 std::chrono::duration_cast<std::chrono::microseconds>(
131 estimatedRenderFinish - *renderDeadline)
132 .count());
133 return;
134 } else {
135 ATRACE_NAME("DeadlinePassed: exceeded max skips");
136 }
137 }
138 }
139
140 mNewCachedSet->render(mRenderEngine, mTexturePool, outputState, deviceHandlesColorTransform);
141 }
142
dumpLayers(std::string & result) const143 void Flattener::dumpLayers(std::string& result) const {
144 result.append(" Current layers:");
145 for (const CachedSet& layer : mLayers) {
146 result.append("\n");
147 layer.dump(result);
148 }
149 }
150
dump(std::string & result) const151 void Flattener::dump(std::string& result) const {
152 const auto now = std::chrono::steady_clock::now();
153
154 base::StringAppendF(&result, "Flattener state:\n");
155
156 result.append("\n Statistics:\n");
157
158 result.append(" Display cost (in screen-size buffers):\n");
159 const size_t displayArea = static_cast<size_t>(mDisplaySize.width * mDisplaySize.height);
160 base::StringAppendF(&result, " Unflattened: %.2f\n",
161 static_cast<float>(mUnflattenedDisplayCost) / displayArea);
162 base::StringAppendF(&result, " Flattened: %.2f\n",
163 static_cast<float>(mFlattenedDisplayCost) / displayArea);
164
165 const auto compareLayerCounts = [](const std::pair<size_t, size_t>& left,
166 const std::pair<size_t, size_t>& right) {
167 return left.first < right.first;
168 };
169
170 const size_t maxLayerCount = mInitialLayerCounts.empty()
171 ? 0u
172 : std::max_element(mInitialLayerCounts.cbegin(), mInitialLayerCounts.cend(),
173 compareLayerCounts)
174 ->first;
175
176 result.append("\n Initial counts:\n");
177 for (size_t count = 1; count < maxLayerCount; ++count) {
178 size_t initial = mInitialLayerCounts.count(count) > 0 ? mInitialLayerCounts.at(count) : 0;
179 base::StringAppendF(&result, " % 2zd: %zd\n", count, initial);
180 }
181
182 result.append("\n Final counts:\n");
183 for (size_t count = 1; count < maxLayerCount; ++count) {
184 size_t final = mFinalLayerCounts.count(count) > 0 ? mFinalLayerCounts.at(count) : 0;
185 base::StringAppendF(&result, " % 2zd: %zd\n", count, final);
186 }
187
188 base::StringAppendF(&result, "\n Cached sets created: %zd\n", mCachedSetCreationCount);
189 base::StringAppendF(&result, " Cost: %.2f\n",
190 static_cast<float>(mCachedSetCreationCost) / displayArea);
191
192 const auto lastUpdate =
193 std::chrono::duration_cast<std::chrono::milliseconds>(now - mLastGeometryUpdate);
194 base::StringAppendF(&result, "\n Current hash %016zx, last update %sago\n\n", mCurrentGeometry,
195 durationString(lastUpdate).c_str());
196
197 dumpLayers(result);
198
199 base::StringAppendF(&result, "\n");
200 mTexturePool.dump(result);
201 }
202
calculateDisplayCost(const std::vector<const LayerState * > & layers) const203 size_t Flattener::calculateDisplayCost(const std::vector<const LayerState*>& layers) const {
204 Region coveredRegion;
205 size_t displayCost = 0;
206 bool hasClientComposition = false;
207
208 for (const LayerState* layer : layers) {
209 coveredRegion.orSelf(layer->getDisplayFrame());
210
211 // Regardless of composition type, we always have to read each input once
212 displayCost += static_cast<size_t>(layer->getDisplayFrame().width() *
213 layer->getDisplayFrame().height());
214
215 hasClientComposition |= layer->getCompositionType() ==
216 aidl::android::hardware::graphics::composer3::Composition::CLIENT;
217 }
218
219 if (hasClientComposition) {
220 // If there is client composition, the client target buffer has to be both written by the
221 // GPU and read by the DPU, so we pay its cost twice
222 displayCost += 2 *
223 static_cast<size_t>(coveredRegion.bounds().width() *
224 coveredRegion.bounds().height());
225 }
226
227 return displayCost;
228 }
229
resetActivities(NonBufferHash hash,time_point now)230 void Flattener::resetActivities(NonBufferHash hash, time_point now) {
231 ALOGV("[%s]", __func__);
232
233 mCurrentGeometry = hash;
234 mLastGeometryUpdate = now;
235
236 for (const CachedSet& cachedSet : mLayers) {
237 if (cachedSet.getLayerCount() > 1) {
238 ++mInvalidatedCachedSetAges[cachedSet.getAge()];
239 }
240 }
241
242 mLayers.clear();
243
244 if (mNewCachedSet) {
245 ++mInvalidatedCachedSetAges[mNewCachedSet->getAge()];
246 mNewCachedSet = std::nullopt;
247 }
248 }
249
computeLayersHash() const250 NonBufferHash Flattener::computeLayersHash() const{
251 size_t hash = 0;
252 for (const auto& layer : mLayers) {
253 android::hashCombineSingleHashed(hash, layer.getNonBufferHash());
254 }
255 return hash;
256 }
257
258 // Only called if the geometry matches the last frame. Return true if mLayers
259 // was already populated with these layers, i.e. on the second and following
260 // calls with the same geometry.
mergeWithCachedSets(const std::vector<const LayerState * > & layers,time_point now)261 bool Flattener::mergeWithCachedSets(const std::vector<const LayerState*>& layers, time_point now) {
262 ATRACE_CALL();
263 std::vector<CachedSet> merged;
264
265 if (mLayers.empty()) {
266 merged.reserve(layers.size());
267 for (const LayerState* layer : layers) {
268 merged.emplace_back(layer, now);
269 mFlattenedDisplayCost += merged.back().getDisplayCost();
270 }
271 mLayers = std::move(merged);
272 return false;
273 }
274
275 // the compiler should strip out the following no-op loops when ALOGV is off
276 ALOGV("[%s] Incoming layers:", __func__);
277 for (const LayerState* layer : layers) {
278 ALOGV("%s", layer->getName().c_str());
279 }
280
281 ALOGV("[%s] Current layers:", __func__);
282 for (const CachedSet& layer : mLayers) {
283 const auto dumper = [&] {
284 std::string dump;
285 layer.dump(dump);
286 return dump;
287 };
288 ALOGV("%s", dumper().c_str());
289 }
290
291 auto currentLayerIter = mLayers.begin();
292 auto incomingLayerIter = layers.begin();
293
294 // If not null, this represents the layer that is blurring the layer before
295 // currentLayerIter. The blurring was stored in the override buffer, so the
296 // layer that requests the blur no longer needs to do any blurring.
297 compositionengine::OutputLayer* priorBlurLayer = nullptr;
298
299 while (incomingLayerIter != layers.end()) {
300 if (mNewCachedSet &&
301 mNewCachedSet->getFirstLayer().getState()->getId() == (*incomingLayerIter)->getId()) {
302 if (mNewCachedSet->hasBufferUpdate()) {
303 ALOGV("[%s] Dropping new cached set", __func__);
304 ++mInvalidatedCachedSetAges[0];
305 mNewCachedSet = std::nullopt;
306 } else if (mNewCachedSet->hasReadyBuffer()) {
307 ALOGV("[%s] Found ready buffer", __func__);
308 size_t skipCount = mNewCachedSet->getLayerCount();
309 while (skipCount != 0) {
310 auto* peekThroughLayer = mNewCachedSet->getHolePunchLayer();
311 const size_t layerCount = currentLayerIter->getLayerCount();
312 for (size_t i = 0; i < layerCount; ++i) {
313 bool disableBlur = priorBlurLayer &&
314 priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
315 OutputLayer::CompositionState& state =
316 (*incomingLayerIter)->getOutputLayer()->editState();
317 state.overrideInfo = {
318 .buffer = mNewCachedSet->getBuffer(),
319 .acquireFence = mNewCachedSet->getDrawFence(),
320 .displayFrame = mNewCachedSet->getTextureBounds(),
321 .dataspace = mNewCachedSet->getOutputDataspace(),
322 .displaySpace = mNewCachedSet->getOutputSpace(),
323 .damageRegion = Region::INVALID_REGION,
324 .visibleRegion = mNewCachedSet->getVisibleRegion(),
325 .peekThroughLayer = peekThroughLayer,
326 .disableBackgroundBlur = disableBlur,
327 };
328 ++incomingLayerIter;
329 }
330
331 if (currentLayerIter->getLayerCount() > 1) {
332 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
333 }
334 ++currentLayerIter;
335
336 skipCount -= layerCount;
337 }
338 priorBlurLayer = mNewCachedSet->getBlurLayer();
339 merged.emplace_back(std::move(*mNewCachedSet));
340 mNewCachedSet = std::nullopt;
341 continue;
342 }
343 }
344
345 if (!currentLayerIter->hasBufferUpdate()) {
346 currentLayerIter->incrementAge();
347 merged.emplace_back(*currentLayerIter);
348
349 // Skip the incoming layers corresponding to this valid current layer
350 const size_t layerCount = currentLayerIter->getLayerCount();
351 auto* peekThroughLayer = currentLayerIter->getHolePunchLayer();
352 for (size_t i = 0; i < layerCount; ++i) {
353 bool disableBlur =
354 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
355 OutputLayer::CompositionState& state =
356 (*incomingLayerIter)->getOutputLayer()->editState();
357 state.overrideInfo = {
358 .buffer = currentLayerIter->getBuffer(),
359 .acquireFence = currentLayerIter->getDrawFence(),
360 .displayFrame = currentLayerIter->getTextureBounds(),
361 .dataspace = currentLayerIter->getOutputDataspace(),
362 .displaySpace = currentLayerIter->getOutputSpace(),
363 .damageRegion = Region(),
364 .visibleRegion = currentLayerIter->getVisibleRegion(),
365 .peekThroughLayer = peekThroughLayer,
366 .disableBackgroundBlur = disableBlur,
367 };
368 ++incomingLayerIter;
369 }
370 } else if (currentLayerIter->getLayerCount() > 1) {
371 // Break the current layer into its constituent layers
372 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
373 for (CachedSet& layer : currentLayerIter->decompose()) {
374 bool disableBlur =
375 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
376 OutputLayer::CompositionState& state =
377 (*incomingLayerIter)->getOutputLayer()->editState();
378 state.overrideInfo.disableBackgroundBlur = disableBlur;
379 layer.updateAge(now);
380 merged.emplace_back(layer);
381 ++incomingLayerIter;
382 }
383 } else {
384 bool disableBlur =
385 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
386 OutputLayer::CompositionState& state =
387 (*incomingLayerIter)->getOutputLayer()->editState();
388 state.overrideInfo.disableBackgroundBlur = disableBlur;
389 currentLayerIter->updateAge(now);
390 merged.emplace_back(*currentLayerIter);
391 ++incomingLayerIter;
392 }
393 priorBlurLayer = currentLayerIter->getBlurLayer();
394 ++currentLayerIter;
395 }
396
397 for (const CachedSet& layer : merged) {
398 mFlattenedDisplayCost += layer.getDisplayCost();
399 }
400
401 mLayers = std::move(merged);
402 return true;
403 }
404
findCandidateRuns(time_point now) const405 std::vector<Flattener::Run> Flattener::findCandidateRuns(time_point now) const {
406 ATRACE_CALL();
407 std::vector<Run> runs;
408 bool isPartOfRun = false;
409 Run::Builder builder;
410 bool firstLayer = true;
411 bool runHasFirstLayer = false;
412
413 for (auto currentSet = mLayers.cbegin(); currentSet != mLayers.cend(); ++currentSet) {
414 bool layerIsInactive = now - currentSet->getLastUpdate() > mTunables.mActiveLayerTimeout;
415 const bool layerHasBlur = currentSet->hasBlurBehind();
416
417 // Layers should also be considered inactive whenever their framerate is lower than 1fps.
418 if (!layerIsInactive && currentSet->getLayerCount() == kNumLayersFpsConsideration) {
419 auto layerFps = currentSet->getFirstLayer().getState()->getFps();
420 if (layerFps > 0 && layerFps <= kFpsActiveThreshold) {
421 ATRACE_FORMAT("layer is considered inactive due to low FPS [%s] %f",
422 currentSet->getFirstLayer().getName().c_str(), layerFps);
423 layerIsInactive = true;
424 }
425 }
426
427 if (layerIsInactive && (firstLayer || runHasFirstLayer || !layerHasBlur) &&
428 !currentSet->hasUnsupportedDataspace()) {
429 if (isPartOfRun) {
430 builder.increment();
431 } else {
432 builder.init(currentSet);
433 if (firstLayer) {
434 runHasFirstLayer = true;
435 }
436 isPartOfRun = true;
437 }
438 } else if (isPartOfRun) {
439 builder.setHolePunchCandidate(&(*currentSet));
440
441 // If we're here then this blur layer recently had an active buffer updating, meaning
442 // that there is exactly one layer. Blur radius currently is part of layer stack
443 // geometry, so we're also guaranteed that the background blur radius hasn't changed for
444 // at least as long as this new inactive cached set.
445 if (runHasFirstLayer && layerHasBlur &&
446 currentSet->getFirstLayer().getBackgroundBlurRadius() > 0) {
447 builder.setBlurringLayer(&(*currentSet));
448 }
449 if (auto run = builder.validateAndBuild(); run) {
450 runs.push_back(*run);
451 }
452
453 runHasFirstLayer = false;
454 builder.reset();
455 isPartOfRun = false;
456 }
457
458 firstLayer = false;
459 }
460
461 // If we're in the middle of a run at the end, we still need to validate and build it.
462 if (isPartOfRun) {
463 if (auto run = builder.validateAndBuild(); run) {
464 runs.push_back(*run);
465 }
466 }
467
468 ALOGV("[%s] Found %zu candidate runs", __func__, runs.size());
469
470 return runs;
471 }
472
findBestRun(std::vector<Flattener::Run> & runs) const473 std::optional<Flattener::Run> Flattener::findBestRun(std::vector<Flattener::Run>& runs) const {
474 if (runs.empty()) {
475 return std::nullopt;
476 }
477
478 // TODO (b/181192467): Choose the best run, instead of just the first.
479 return runs[0];
480 }
481
buildCachedSets(time_point now)482 void Flattener::buildCachedSets(time_point now) {
483 ATRACE_CALL();
484 if (mLayers.empty()) {
485 ALOGV("[%s] No layers found, returning", __func__);
486 return;
487 }
488
489 // Don't try to build a new cached set if we already have a new one in progress
490 if (mNewCachedSet) {
491 return;
492 }
493
494 for (const CachedSet& layer : mLayers) {
495 // TODO (b/191997217): make it less aggressive, and sync with findCandidateRuns
496 if (layer.hasProtectedLayers()) {
497 ATRACE_NAME("layer->hasProtectedLayers()");
498 return;
499 }
500 }
501
502 for (const CachedSet& layer : mLayers) {
503 if (layer.hasSolidColorLayers()) {
504 ATRACE_NAME("layer->hasSolidColorLayers()");
505 return;
506 }
507 }
508
509 std::vector<Run> runs = findCandidateRuns(now);
510
511 std::optional<Run> bestRun = findBestRun(runs);
512
513 if (!bestRun) {
514 return;
515 }
516
517 mNewCachedSet.emplace(*bestRun->getStart());
518 mNewCachedSet->setLastUpdate(now);
519 auto currentSet = bestRun->getStart();
520 while (mNewCachedSet->getLayerCount() < bestRun->getLayerLength()) {
521 ++currentSet;
522 mNewCachedSet->append(*currentSet);
523 }
524
525 if (bestRun->getBlurringLayer()) {
526 mNewCachedSet->addBackgroundBlurLayer(*bestRun->getBlurringLayer());
527 }
528
529 if (mTunables.mEnableHolePunch && bestRun->getHolePunchCandidate() &&
530 bestRun->getHolePunchCandidate()->requiresHolePunch()) {
531 // Add the pip layer to mNewCachedSet, but in a special way - it should
532 // replace the buffer with a clear round rect.
533 mNewCachedSet->addHolePunchLayerIfFeasible(*bestRun->getHolePunchCandidate(),
534 bestRun->getStart() == mLayers.cbegin());
535 }
536
537 // TODO(b/181192467): Actually compute new LayerState vector and corresponding hash for each run
538 // and feedback into the predictor
539
540 ++mCachedSetCreationCount;
541 mCachedSetCreationCost += mNewCachedSet->getCreationCost();
542
543 // note the compiler should strip the follow no-op statements when ALOGV is off
544 const auto dumper = [&] {
545 std::string setDump;
546 mNewCachedSet->dump(setDump);
547 return setDump;
548 };
549 ALOGV("[%s] Added new cached set:\n%s", __func__, dumper().c_str());
550 }
551
552 } // namespace android::compositionengine::impl::planner
553