1 /*
2 * Copyright 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #undef LOG_TAG
18 #define LOG_TAG "Planner"
19 // #define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
21
22 #include <android-base/properties.h>
23 #include <compositionengine/impl/planner/Flattener.h>
24 #include <compositionengine/impl/planner/LayerState.h>
25
26 #include <gui/TraceUtils.h>
27
28 using time_point = std::chrono::steady_clock::time_point;
29 using namespace std::chrono_literals;
30
31 namespace android::compositionengine::impl::planner {
32
33 namespace {
34
35 // True if the underlying layer stack is the same modulo state that would be expected to be
36 // different like specific buffers, false otherwise.
isSameStack(const std::vector<const LayerState * > & incomingLayers,const std::vector<CachedSet> & cachedSets)37 bool isSameStack(const std::vector<const LayerState*>& incomingLayers,
38 const std::vector<CachedSet>& cachedSets) {
39 std::vector<const LayerState*> existingLayers;
40 for (auto& cachedSet : cachedSets) {
41 for (auto& layer : cachedSet.getConstituentLayers()) {
42 existingLayers.push_back(layer.getState());
43 }
44 }
45
46 if (incomingLayers.size() != existingLayers.size()) {
47 return false;
48 }
49
50 for (size_t i = 0; i < incomingLayers.size(); i++) {
51 // Checking the IDs here is very strict, but we do this as otherwise we may mistakenly try
52 // to access destroyed OutputLayers later on.
53 if (incomingLayers[i]->getId() != existingLayers[i]->getId() ||
54 incomingLayers[i]->getDifferingFields(*(existingLayers[i])) != LayerStateField::None) {
55 return false;
56 }
57 }
58 return true;
59 }
60
61 } // namespace
62
Flattener(renderengine::RenderEngine & renderEngine,bool enableHolePunch,std::optional<CachedSetRenderSchedulingTunables> cachedSetRenderSchedulingTunables)63 Flattener::Flattener(
64 renderengine::RenderEngine& renderEngine, bool enableHolePunch,
65 std::optional<CachedSetRenderSchedulingTunables> cachedSetRenderSchedulingTunables)
66 : mRenderEngine(renderEngine),
67 mEnableHolePunch(enableHolePunch),
68 mCachedSetRenderSchedulingTunables(cachedSetRenderSchedulingTunables),
69 mTexturePool(mRenderEngine) {
70 const int timeoutInMs =
71 base::GetIntProperty(std::string("debug.sf.layer_caching_active_layer_timeout_ms"), 0);
72 if (timeoutInMs != 0) {
73 mActiveLayerTimeout = std::chrono::milliseconds(timeoutInMs);
74 }
75 }
76
flattenLayers(const std::vector<const LayerState * > & layers,NonBufferHash hash,time_point now)77 NonBufferHash Flattener::flattenLayers(const std::vector<const LayerState*>& layers,
78 NonBufferHash hash, time_point now) {
79 ATRACE_CALL();
80 const size_t unflattenedDisplayCost = calculateDisplayCost(layers);
81 mUnflattenedDisplayCost += unflattenedDisplayCost;
82
83 // We invalidate the layer cache if:
84 // 1. We're not tracking any layers, or
85 // 2. The last seen hashed geometry changed between frames, or
86 // 3. A stricter equality check demonstrates that the layer stack really did change, since the
87 // hashed geometry does not guarantee uniqueness.
88 if (mCurrentGeometry != hash || (!mLayers.empty() && !isSameStack(layers, mLayers))) {
89 resetActivities(hash, now);
90 mFlattenedDisplayCost += unflattenedDisplayCost;
91 return hash;
92 }
93
94 ++mInitialLayerCounts[layers.size()];
95
96 // Only buildCachedSets if these layers are already stored in mLayers.
97 // Otherwise (i.e. mergeWithCachedSets returns false), the time has not
98 // changed, so buildCachedSets will never find any runs.
99 const bool alreadyHadCachedSets = mergeWithCachedSets(layers, now);
100
101 ++mFinalLayerCounts[mLayers.size()];
102
103 if (alreadyHadCachedSets) {
104 buildCachedSets(now);
105 hash = computeLayersHash();
106 }
107
108 return hash;
109 }
110
renderCachedSets(const OutputCompositionState & outputState,std::optional<std::chrono::steady_clock::time_point> renderDeadline)111 void Flattener::renderCachedSets(
112 const OutputCompositionState& outputState,
113 std::optional<std::chrono::steady_clock::time_point> renderDeadline) {
114 ATRACE_CALL();
115
116 if (!mNewCachedSet) {
117 return;
118 }
119
120 // Ensure that a cached set has a valid buffer first
121 if (mNewCachedSet->hasRenderedBuffer()) {
122 ATRACE_NAME("mNewCachedSet->hasRenderedBuffer()");
123 return;
124 }
125
126 const auto now = std::chrono::steady_clock::now();
127
128 // If we have a render deadline, and the flattener is configured to skip rendering if we don't
129 // have enough time, then we skip rendering the cached set if we think that we'll steal too much
130 // time from the next frame.
131 if (renderDeadline && mCachedSetRenderSchedulingTunables) {
132 if (const auto estimatedRenderFinish =
133 now + mCachedSetRenderSchedulingTunables->cachedSetRenderDuration;
134 estimatedRenderFinish > *renderDeadline) {
135 mNewCachedSet->incrementSkipCount();
136
137 if (mNewCachedSet->getSkipCount() <=
138 mCachedSetRenderSchedulingTunables->maxDeferRenderAttempts) {
139 ATRACE_FORMAT("DeadlinePassed: exceeded deadline by: %d us",
140 std::chrono::duration_cast<std::chrono::microseconds>(
141 estimatedRenderFinish - *renderDeadline)
142 .count());
143 return;
144 } else {
145 ATRACE_NAME("DeadlinePassed: exceeded max skips");
146 }
147 }
148 }
149
150 mNewCachedSet->render(mRenderEngine, mTexturePool, outputState);
151 }
152
dumpLayers(std::string & result) const153 void Flattener::dumpLayers(std::string& result) const {
154 result.append(" Current layers:");
155 for (const CachedSet& layer : mLayers) {
156 result.append("\n");
157 layer.dump(result);
158 }
159 }
160
dump(std::string & result) const161 void Flattener::dump(std::string& result) const {
162 const auto now = std::chrono::steady_clock::now();
163
164 base::StringAppendF(&result, "Flattener state:\n");
165
166 result.append("\n Statistics:\n");
167
168 result.append(" Display cost (in screen-size buffers):\n");
169 const size_t displayArea = static_cast<size_t>(mDisplaySize.width * mDisplaySize.height);
170 base::StringAppendF(&result, " Unflattened: %.2f\n",
171 static_cast<float>(mUnflattenedDisplayCost) / displayArea);
172 base::StringAppendF(&result, " Flattened: %.2f\n",
173 static_cast<float>(mFlattenedDisplayCost) / displayArea);
174
175 const auto compareLayerCounts = [](const std::pair<size_t, size_t>& left,
176 const std::pair<size_t, size_t>& right) {
177 return left.first < right.first;
178 };
179
180 const size_t maxLayerCount = mInitialLayerCounts.empty()
181 ? 0u
182 : std::max_element(mInitialLayerCounts.cbegin(), mInitialLayerCounts.cend(),
183 compareLayerCounts)
184 ->first;
185
186 result.append("\n Initial counts:\n");
187 for (size_t count = 1; count < maxLayerCount; ++count) {
188 size_t initial = mInitialLayerCounts.count(count) > 0 ? mInitialLayerCounts.at(count) : 0;
189 base::StringAppendF(&result, " % 2zd: %zd\n", count, initial);
190 }
191
192 result.append("\n Final counts:\n");
193 for (size_t count = 1; count < maxLayerCount; ++count) {
194 size_t final = mFinalLayerCounts.count(count) > 0 ? mFinalLayerCounts.at(count) : 0;
195 base::StringAppendF(&result, " % 2zd: %zd\n", count, final);
196 }
197
198 base::StringAppendF(&result, "\n Cached sets created: %zd\n", mCachedSetCreationCount);
199 base::StringAppendF(&result, " Cost: %.2f\n",
200 static_cast<float>(mCachedSetCreationCost) / displayArea);
201
202 const auto lastUpdate =
203 std::chrono::duration_cast<std::chrono::milliseconds>(now - mLastGeometryUpdate);
204 base::StringAppendF(&result, "\n Current hash %016zx, last update %sago\n\n", mCurrentGeometry,
205 durationString(lastUpdate).c_str());
206
207 dumpLayers(result);
208 }
209
calculateDisplayCost(const std::vector<const LayerState * > & layers) const210 size_t Flattener::calculateDisplayCost(const std::vector<const LayerState*>& layers) const {
211 Region coveredRegion;
212 size_t displayCost = 0;
213 bool hasClientComposition = false;
214
215 for (const LayerState* layer : layers) {
216 coveredRegion.orSelf(layer->getDisplayFrame());
217
218 // Regardless of composition type, we always have to read each input once
219 displayCost += static_cast<size_t>(layer->getDisplayFrame().width() *
220 layer->getDisplayFrame().height());
221
222 hasClientComposition |= layer->getCompositionType() == hal::Composition::CLIENT;
223 }
224
225 if (hasClientComposition) {
226 // If there is client composition, the client target buffer has to be both written by the
227 // GPU and read by the DPU, so we pay its cost twice
228 displayCost += 2 *
229 static_cast<size_t>(coveredRegion.bounds().width() *
230 coveredRegion.bounds().height());
231 }
232
233 return displayCost;
234 }
235
resetActivities(NonBufferHash hash,time_point now)236 void Flattener::resetActivities(NonBufferHash hash, time_point now) {
237 ALOGV("[%s]", __func__);
238
239 mCurrentGeometry = hash;
240 mLastGeometryUpdate = now;
241
242 for (const CachedSet& cachedSet : mLayers) {
243 if (cachedSet.getLayerCount() > 1) {
244 ++mInvalidatedCachedSetAges[cachedSet.getAge()];
245 }
246 }
247
248 mLayers.clear();
249
250 if (mNewCachedSet) {
251 ++mInvalidatedCachedSetAges[mNewCachedSet->getAge()];
252 mNewCachedSet = std::nullopt;
253 }
254 }
255
computeLayersHash() const256 NonBufferHash Flattener::computeLayersHash() const{
257 size_t hash = 0;
258 for (const auto& layer : mLayers) {
259 android::hashCombineSingleHashed(hash, layer.getNonBufferHash());
260 }
261 return hash;
262 }
263
264 // Only called if the geometry matches the last frame. Return true if mLayers
265 // was already populated with these layers, i.e. on the second and following
266 // calls with the same geometry.
mergeWithCachedSets(const std::vector<const LayerState * > & layers,time_point now)267 bool Flattener::mergeWithCachedSets(const std::vector<const LayerState*>& layers, time_point now) {
268 ATRACE_CALL();
269 std::vector<CachedSet> merged;
270
271 if (mLayers.empty()) {
272 merged.reserve(layers.size());
273 for (const LayerState* layer : layers) {
274 merged.emplace_back(layer, now);
275 mFlattenedDisplayCost += merged.back().getDisplayCost();
276 }
277 mLayers = std::move(merged);
278 return false;
279 }
280
281 // the compiler should strip out the following no-op loops when ALOGV is off
282 ALOGV("[%s] Incoming layers:", __func__);
283 for (const LayerState* layer : layers) {
284 ALOGV("%s", layer->getName().c_str());
285 }
286
287 ALOGV("[%s] Current layers:", __func__);
288 for (const CachedSet& layer : mLayers) {
289 const auto dumper = [&] {
290 std::string dump;
291 layer.dump(dump);
292 return dump;
293 };
294 ALOGV("%s", dumper().c_str());
295 }
296
297 auto currentLayerIter = mLayers.begin();
298 auto incomingLayerIter = layers.begin();
299
300 // If not null, this represents the layer that is blurring the layer before
301 // currentLayerIter. The blurring was stored in the override buffer, so the
302 // layer that requests the blur no longer needs to do any blurring.
303 compositionengine::OutputLayer* priorBlurLayer = nullptr;
304
305 while (incomingLayerIter != layers.end()) {
306 if (mNewCachedSet &&
307 mNewCachedSet->getFirstLayer().getState()->getId() == (*incomingLayerIter)->getId()) {
308 if (mNewCachedSet->hasBufferUpdate()) {
309 ALOGV("[%s] Dropping new cached set", __func__);
310 ++mInvalidatedCachedSetAges[0];
311 mNewCachedSet = std::nullopt;
312 } else if (mNewCachedSet->hasReadyBuffer()) {
313 ALOGV("[%s] Found ready buffer", __func__);
314 size_t skipCount = mNewCachedSet->getLayerCount();
315 while (skipCount != 0) {
316 auto* peekThroughLayer = mNewCachedSet->getHolePunchLayer();
317 const size_t layerCount = currentLayerIter->getLayerCount();
318 for (size_t i = 0; i < layerCount; ++i) {
319 bool disableBlur = priorBlurLayer &&
320 priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
321 OutputLayer::CompositionState& state =
322 (*incomingLayerIter)->getOutputLayer()->editState();
323 state.overrideInfo = {
324 .buffer = mNewCachedSet->getBuffer(),
325 .acquireFence = mNewCachedSet->getDrawFence(),
326 .displayFrame = mNewCachedSet->getTextureBounds(),
327 .dataspace = mNewCachedSet->getOutputDataspace(),
328 .displaySpace = mNewCachedSet->getOutputSpace(),
329 .damageRegion = Region::INVALID_REGION,
330 .visibleRegion = mNewCachedSet->getVisibleRegion(),
331 .peekThroughLayer = peekThroughLayer,
332 .disableBackgroundBlur = disableBlur,
333 };
334 ++incomingLayerIter;
335 }
336
337 if (currentLayerIter->getLayerCount() > 1) {
338 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
339 }
340 ++currentLayerIter;
341
342 skipCount -= layerCount;
343 }
344 priorBlurLayer = mNewCachedSet->getBlurLayer();
345 merged.emplace_back(std::move(*mNewCachedSet));
346 mNewCachedSet = std::nullopt;
347 continue;
348 }
349 }
350
351 if (!currentLayerIter->hasBufferUpdate()) {
352 currentLayerIter->incrementAge();
353 merged.emplace_back(*currentLayerIter);
354
355 // Skip the incoming layers corresponding to this valid current layer
356 const size_t layerCount = currentLayerIter->getLayerCount();
357 auto* peekThroughLayer = currentLayerIter->getHolePunchLayer();
358 for (size_t i = 0; i < layerCount; ++i) {
359 bool disableBlur =
360 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
361 OutputLayer::CompositionState& state =
362 (*incomingLayerIter)->getOutputLayer()->editState();
363 state.overrideInfo = {
364 .buffer = currentLayerIter->getBuffer(),
365 .acquireFence = currentLayerIter->getDrawFence(),
366 .displayFrame = currentLayerIter->getTextureBounds(),
367 .dataspace = currentLayerIter->getOutputDataspace(),
368 .displaySpace = currentLayerIter->getOutputSpace(),
369 .damageRegion = Region(),
370 .visibleRegion = currentLayerIter->getVisibleRegion(),
371 .peekThroughLayer = peekThroughLayer,
372 .disableBackgroundBlur = disableBlur,
373 };
374 ++incomingLayerIter;
375 }
376 } else if (currentLayerIter->getLayerCount() > 1) {
377 // Break the current layer into its constituent layers
378 ++mInvalidatedCachedSetAges[currentLayerIter->getAge()];
379 for (CachedSet& layer : currentLayerIter->decompose()) {
380 bool disableBlur =
381 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
382 OutputLayer::CompositionState& state =
383 (*incomingLayerIter)->getOutputLayer()->editState();
384 state.overrideInfo.disableBackgroundBlur = disableBlur;
385 layer.updateAge(now);
386 merged.emplace_back(layer);
387 ++incomingLayerIter;
388 }
389 } else {
390 bool disableBlur =
391 priorBlurLayer && priorBlurLayer == (*incomingLayerIter)->getOutputLayer();
392 OutputLayer::CompositionState& state =
393 (*incomingLayerIter)->getOutputLayer()->editState();
394 state.overrideInfo.disableBackgroundBlur = disableBlur;
395 currentLayerIter->updateAge(now);
396 merged.emplace_back(*currentLayerIter);
397 ++incomingLayerIter;
398 }
399 priorBlurLayer = currentLayerIter->getBlurLayer();
400 ++currentLayerIter;
401 }
402
403 for (const CachedSet& layer : merged) {
404 mFlattenedDisplayCost += layer.getDisplayCost();
405 }
406
407 mLayers = std::move(merged);
408 return true;
409 }
410
findCandidateRuns(time_point now) const411 std::vector<Flattener::Run> Flattener::findCandidateRuns(time_point now) const {
412 ATRACE_CALL();
413 std::vector<Run> runs;
414 bool isPartOfRun = false;
415 Run::Builder builder;
416 bool firstLayer = true;
417 bool runHasFirstLayer = false;
418
419 for (auto currentSet = mLayers.cbegin(); currentSet != mLayers.cend(); ++currentSet) {
420 const bool layerIsInactive = now - currentSet->getLastUpdate() > mActiveLayerTimeout;
421 const bool layerHasBlur = currentSet->hasBlurBehind();
422 if (layerIsInactive && (firstLayer || runHasFirstLayer || !layerHasBlur) &&
423 !currentSet->hasUnsupportedDataspace()) {
424 if (isPartOfRun) {
425 builder.append(currentSet->getLayerCount());
426 } else {
427 // Runs can't start with a non-buffer layer
428 if (currentSet->getFirstLayer().getBuffer() == nullptr) {
429 ALOGV("[%s] Skipping initial non-buffer layer", __func__);
430 } else {
431 builder.init(currentSet);
432 if (firstLayer) {
433 runHasFirstLayer = true;
434 }
435 isPartOfRun = true;
436 }
437 }
438 } else if (isPartOfRun) {
439 builder.setHolePunchCandidate(&(*currentSet));
440
441 // If we're here then this blur layer recently had an active buffer updating, meaning
442 // that there is exactly one layer. Blur radius currently is part of layer stack
443 // geometry, so we're also guaranteed that the background blur radius hasn't changed for
444 // at least as long as this new inactive cached set.
445 if (runHasFirstLayer && layerHasBlur &&
446 currentSet->getFirstLayer().getBackgroundBlurRadius() > 0) {
447 builder.setBlurringLayer(&(*currentSet));
448 }
449 if (auto run = builder.validateAndBuild(); run) {
450 runs.push_back(*run);
451 }
452
453 runHasFirstLayer = false;
454 builder.reset();
455 isPartOfRun = false;
456 }
457
458 firstLayer = false;
459 }
460
461 // If we're in the middle of a run at the end, we still need to validate and build it.
462 if (isPartOfRun) {
463 if (auto run = builder.validateAndBuild(); run) {
464 runs.push_back(*run);
465 }
466 }
467
468 ALOGV("[%s] Found %zu candidate runs", __func__, runs.size());
469
470 return runs;
471 }
472
findBestRun(std::vector<Flattener::Run> & runs) const473 std::optional<Flattener::Run> Flattener::findBestRun(std::vector<Flattener::Run>& runs) const {
474 if (runs.empty()) {
475 return std::nullopt;
476 }
477
478 // TODO (b/181192467): Choose the best run, instead of just the first.
479 return runs[0];
480 }
481
buildCachedSets(time_point now)482 void Flattener::buildCachedSets(time_point now) {
483 ATRACE_CALL();
484 if (mLayers.empty()) {
485 ALOGV("[%s] No layers found, returning", __func__);
486 return;
487 }
488
489 // Don't try to build a new cached set if we already have a new one in progress
490 if (mNewCachedSet) {
491 return;
492 }
493
494 for (const CachedSet& layer : mLayers) {
495 // TODO (b/191997217): make it less aggressive, and sync with findCandidateRuns
496 if (layer.hasProtectedLayers()) {
497 ATRACE_NAME("layer->hasProtectedLayers()");
498 return;
499 }
500 }
501
502 std::vector<Run> runs = findCandidateRuns(now);
503
504 std::optional<Run> bestRun = findBestRun(runs);
505
506 if (!bestRun) {
507 return;
508 }
509
510 mNewCachedSet.emplace(*bestRun->getStart());
511 mNewCachedSet->setLastUpdate(now);
512 auto currentSet = bestRun->getStart();
513 while (mNewCachedSet->getLayerCount() < bestRun->getLayerLength()) {
514 ++currentSet;
515 mNewCachedSet->append(*currentSet);
516 }
517
518 if (bestRun->getBlurringLayer()) {
519 mNewCachedSet->addBackgroundBlurLayer(*bestRun->getBlurringLayer());
520 }
521
522 if (mEnableHolePunch && bestRun->getHolePunchCandidate() &&
523 bestRun->getHolePunchCandidate()->requiresHolePunch()) {
524 // Add the pip layer to mNewCachedSet, but in a special way - it should
525 // replace the buffer with a clear round rect.
526 mNewCachedSet->addHolePunchLayerIfFeasible(*bestRun->getHolePunchCandidate(),
527 bestRun->getStart() == mLayers.cbegin());
528 }
529
530 // TODO(b/181192467): Actually compute new LayerState vector and corresponding hash for each run
531 // and feedback into the predictor
532
533 ++mCachedSetCreationCount;
534 mCachedSetCreationCost += mNewCachedSet->getCreationCost();
535
536 // note the compiler should strip the follow no-op statements when ALOGV is off
537 const auto dumper = [&] {
538 std::string setDump;
539 mNewCachedSet->dump(setDump);
540 return setDump;
541 };
542 ALOGV("[%s] Added new cached set:\n%s", __func__, dumper().c_str());
543 }
544
545 } // namespace android::compositionengine::impl::planner
546