1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "CanvasContext.h"
18
19 #include <apex/window.h>
20 #include <fcntl.h>
21
22 #ifdef __ANDROID__
23 #include <gui/ITransactionCompletedListener.h>
24 #include <gui/SurfaceComposerClient.h>
25 #endif
26
27 #include <gui/TraceUtils.h>
28 #include <strings.h>
29 #include <sys/stat.h>
30 #include <ui/Fence.h>
31
32 #include <algorithm>
33 #include <cstdint>
34 #include <cstdlib>
35 #include <functional>
36
37 #include "../Properties.h"
38 #include "AnimationContext.h"
39 #include "Frame.h"
40 #include "LayerUpdateQueue.h"
41 #include "Properties.h"
42 #include "RenderThread.h"
43 #include "hwui/Canvas.h"
44 #include "pipeline/skia/SkiaCpuPipeline.h"
45 #include "pipeline/skia/SkiaGpuPipeline.h"
46 #include "pipeline/skia/SkiaOpenGLPipeline.h"
47 #include "pipeline/skia/SkiaVulkanPipeline.h"
48 #include "thread/CommonPool.h"
49 #include "utils/GLUtils.h"
50 #include "utils/TimeUtils.h"
51
52 #define LOG_FRAMETIME_MMA 0
53
54 #if LOG_FRAMETIME_MMA
55 static float sBenchMma = 0;
56 static int sFrameCount = 0;
57 static const float NANOS_PER_MILLIS_F = 1000000.0f;
58 #endif
59
60 namespace android {
61 namespace uirenderer {
62 namespace renderthread {
63
64 namespace {
65 class ScopedActiveContext {
66 public:
ScopedActiveContext(CanvasContext * context)67 ScopedActiveContext(CanvasContext* context) { sActiveContext = context; }
68
~ScopedActiveContext()69 ~ScopedActiveContext() { sActiveContext = nullptr; }
70
getActiveContext()71 static CanvasContext* getActiveContext() { return sActiveContext; }
72
73 private:
74 static CanvasContext* sActiveContext;
75 };
76
77 CanvasContext* ScopedActiveContext::sActiveContext = nullptr;
78 } /* namespace */
79
create(RenderThread & thread,bool translucent,RenderNode * rootRenderNode,IContextFactory * contextFactory,pid_t uiThreadId,pid_t renderThreadId)80 CanvasContext* CanvasContext::create(RenderThread& thread, bool translucent,
81 RenderNode* rootRenderNode, IContextFactory* contextFactory,
82 pid_t uiThreadId, pid_t renderThreadId) {
83 auto renderType = Properties::getRenderPipelineType();
84
85 switch (renderType) {
86 case RenderPipelineType::SkiaGL:
87 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
88 std::make_unique<skiapipeline::SkiaOpenGLPipeline>(thread),
89 uiThreadId, renderThreadId);
90 case RenderPipelineType::SkiaVulkan:
91 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
92 std::make_unique<skiapipeline::SkiaVulkanPipeline>(thread),
93 uiThreadId, renderThreadId);
94 #ifndef __ANDROID__
95 case RenderPipelineType::SkiaCpu:
96 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
97 std::make_unique<skiapipeline::SkiaCpuPipeline>(thread),
98 uiThreadId, renderThreadId);
99 #endif
100 default:
101 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
102 break;
103 }
104 return nullptr;
105 }
106
invokeFunctor(const RenderThread & thread,Functor * functor)107 void CanvasContext::invokeFunctor(const RenderThread& thread, Functor* functor) {
108 ATRACE_CALL();
109 auto renderType = Properties::getRenderPipelineType();
110 switch (renderType) {
111 case RenderPipelineType::SkiaGL:
112 skiapipeline::SkiaOpenGLPipeline::invokeFunctor(thread, functor);
113 break;
114 case RenderPipelineType::SkiaVulkan:
115 skiapipeline::SkiaVulkanPipeline::invokeFunctor(thread, functor);
116 break;
117 default:
118 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
119 break;
120 }
121 }
122
prepareToDraw(const RenderThread & thread,Bitmap * bitmap)123 void CanvasContext::prepareToDraw(const RenderThread& thread, Bitmap* bitmap) {
124 skiapipeline::SkiaGpuPipeline::prepareToDraw(thread, bitmap);
125 }
126
CanvasContext(RenderThread & thread,bool translucent,RenderNode * rootRenderNode,IContextFactory * contextFactory,std::unique_ptr<IRenderPipeline> renderPipeline,pid_t uiThreadId,pid_t renderThreadId)127 CanvasContext::CanvasContext(RenderThread& thread, bool translucent, RenderNode* rootRenderNode,
128 IContextFactory* contextFactory,
129 std::unique_ptr<IRenderPipeline> renderPipeline, pid_t uiThreadId,
130 pid_t renderThreadId)
131 : mRenderThread(thread)
132 , mGenerationID(0)
133 , mOpaque(!translucent)
134 , mAnimationContext(contextFactory->createAnimationContext(mRenderThread.timeLord()))
135 , mJankTracker(&thread.globalProfileData())
136 , mProfiler(mJankTracker.frames(), thread.timeLord().frameIntervalNanos())
137 , mContentDrawBounds(0, 0, 0, 0)
138 , mRenderPipeline(std::move(renderPipeline))
139 , mHintSessionWrapper(std::make_shared<HintSessionWrapper>(uiThreadId, renderThreadId)) {
140 mRenderThread.cacheManager().registerCanvasContext(this);
141 mRenderThread.renderState().registerContextCallback(this);
142 rootRenderNode->makeRoot();
143 mRenderNodes.emplace_back(rootRenderNode);
144 mProfiler.setDensity(DeviceInfo::getDensity());
145 }
146
~CanvasContext()147 CanvasContext::~CanvasContext() {
148 destroy();
149 for (auto& node : mRenderNodes) {
150 node->clearRoot();
151 }
152 mRenderNodes.clear();
153 mRenderThread.cacheManager().unregisterCanvasContext(this);
154 mRenderThread.renderState().removeContextCallback(this);
155 mHintSessionWrapper->destroy();
156 }
157
addRenderNode(RenderNode * node,bool placeFront)158 void CanvasContext::addRenderNode(RenderNode* node, bool placeFront) {
159 int pos = placeFront ? 0 : static_cast<int>(mRenderNodes.size());
160 node->makeRoot();
161 mRenderNodes.emplace(mRenderNodes.begin() + pos, node);
162 }
163
removeRenderNode(RenderNode * node)164 void CanvasContext::removeRenderNode(RenderNode* node) {
165 node->clearRoot();
166 mRenderNodes.erase(std::remove(mRenderNodes.begin(), mRenderNodes.end(), node),
167 mRenderNodes.end());
168 }
169
destroy()170 void CanvasContext::destroy() {
171 stopDrawing();
172 setHardwareBuffer(nullptr);
173 setSurface(nullptr);
174 #ifdef __ANDROID__
175 setSurfaceControl(nullptr);
176 #endif
177 freePrefetchedLayers();
178 destroyHardwareResources();
179 mAnimationContext->destroy();
180 mRenderThread.cacheManager().onContextStopped(this);
181 mHintSessionWrapper->delayedDestroy(mRenderThread, 2_s, mHintSessionWrapper);
182 }
183
setBufferCount(ANativeWindow * window)184 static void setBufferCount(ANativeWindow* window) {
185 int query_value;
186 int err = window->query(window, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &query_value);
187 if (err != 0 || query_value < 0) {
188 ALOGE("window->query failed: %s (%d) value=%d", strerror(-err), err, query_value);
189 return;
190 }
191 auto min_undequeued_buffers = static_cast<uint32_t>(query_value);
192
193 // We only need to set min_undequeued + 2 because the renderahead amount was already factored into the
194 // query for min_undequeued
195 int bufferCount = min_undequeued_buffers + 2;
196 native_window_set_buffer_count(window, bufferCount);
197 }
198
setHardwareBuffer(AHardwareBuffer * buffer)199 void CanvasContext::setHardwareBuffer(AHardwareBuffer* buffer) {
200 #ifdef __ANDROID__
201 if (mHardwareBuffer) {
202 AHardwareBuffer_release(mHardwareBuffer);
203 mHardwareBuffer = nullptr;
204 }
205
206 if (buffer) {
207 AHardwareBuffer_acquire(buffer);
208 mHardwareBuffer = buffer;
209 }
210 mRenderPipeline->setHardwareBuffer(mHardwareBuffer);
211 #endif
212 }
213
setSurface(ANativeWindow * window,bool enableTimeout)214 void CanvasContext::setSurface(ANativeWindow* window, bool enableTimeout) {
215 ATRACE_CALL();
216
217 startHintSession();
218 if (window) {
219 mNativeSurface = std::make_unique<ReliableSurface>(window);
220 mNativeSurface->init();
221 if (enableTimeout) {
222 // TODO: Fix error handling & re-shorten timeout
223 ANativeWindow_setDequeueTimeout(window, 4000_ms);
224 }
225 } else {
226 mNativeSurface = nullptr;
227 }
228 setupPipelineSurface();
229 }
230
231 #ifdef __ANDROID__
getSurfaceControl() const232 sp<SurfaceControl> CanvasContext::getSurfaceControl() const {
233 return mSurfaceControl;
234 }
235 #endif
236
setSurfaceControl(sp<SurfaceControl> surfaceControl)237 void CanvasContext::setSurfaceControl(sp<SurfaceControl> surfaceControl) {
238 #ifdef __ANDROID__
239 if (surfaceControl == mSurfaceControl) return;
240
241 if (surfaceControl == nullptr) {
242 setASurfaceTransactionCallback(nullptr);
243 setPrepareSurfaceControlForWebviewCallback(nullptr);
244 }
245
246 if (mSurfaceControl != nullptr) {
247 TransactionCompletedListener::getInstance()->removeSurfaceStatsListener(
248 this, reinterpret_cast<void*>(onSurfaceStatsAvailable));
249 }
250
251 mSurfaceControl = std::move(surfaceControl);
252 mSurfaceControlGenerationId++;
253 mExpectSurfaceStats = mSurfaceControl != nullptr;
254 if (mExpectSurfaceStats) {
255 SurfaceStatsCallback callback = [generationId = mSurfaceControlGenerationId](
256 void* callback_context, nsecs_t, const sp<Fence>&,
257 const SurfaceStats& surfaceStats) {
258 onSurfaceStatsAvailable(callback_context, generationId, surfaceStats);
259 };
260 TransactionCompletedListener::getInstance()->addSurfaceStatsListener(
261 this, reinterpret_cast<void*>(onSurfaceStatsAvailable), mSurfaceControl, callback);
262 }
263 #endif
264 }
265
setupPipelineSurface()266 void CanvasContext::setupPipelineSurface() {
267 bool hasSurface = mRenderPipeline->setSurface(
268 mNativeSurface ? mNativeSurface->getNativeWindow() : nullptr, mSwapBehavior);
269
270 if (mNativeSurface && !mNativeSurface->didSetExtraBuffers()) {
271 setBufferCount(mNativeSurface->getNativeWindow());
272 }
273
274 mFrameNumber = 0;
275
276 if (mNativeSurface != nullptr && hasSurface) {
277 mHaveNewSurface = true;
278 mSwapHistory.clear();
279 // Enable frame stats after the surface has been bound to the appropriate graphics API.
280 // Order is important when new and old surfaces are the same, because old surface has
281 // its frame stats disabled automatically.
282 native_window_enable_frame_timestamps(mNativeSurface->getNativeWindow(), true);
283 native_window_set_scaling_mode(mNativeSurface->getNativeWindow(),
284 NATIVE_WINDOW_SCALING_MODE_FREEZE);
285 } else {
286 mRenderThread.removeFrameCallback(this);
287 mGenerationID++;
288 }
289 }
290
setSwapBehavior(SwapBehavior swapBehavior)291 void CanvasContext::setSwapBehavior(SwapBehavior swapBehavior) {
292 mSwapBehavior = swapBehavior;
293 }
294
pauseSurface()295 bool CanvasContext::pauseSurface() {
296 mGenerationID++;
297 return mRenderThread.removeFrameCallback(this);
298 }
299
setStopped(bool stopped)300 void CanvasContext::setStopped(bool stopped) {
301 if (mStopped != stopped) {
302 mStopped = stopped;
303 if (mStopped) {
304 mGenerationID++;
305 mRenderThread.removeFrameCallback(this);
306 mRenderPipeline->onStop();
307 mRenderThread.cacheManager().onContextStopped(this);
308 } else if (mIsDirty && hasOutputTarget()) {
309 mRenderThread.postFrameCallback(this);
310 }
311 }
312 }
313
allocateBuffers()314 void CanvasContext::allocateBuffers() {
315 if (mNativeSurface && Properties::isDrawingEnabled()) {
316 ANativeWindow_tryAllocateBuffers(mNativeSurface->getNativeWindow());
317 }
318 }
319
setLightAlpha(uint8_t ambientShadowAlpha,uint8_t spotShadowAlpha)320 void CanvasContext::setLightAlpha(uint8_t ambientShadowAlpha, uint8_t spotShadowAlpha) {
321 mLightInfo.ambientShadowAlpha = ambientShadowAlpha;
322 mLightInfo.spotShadowAlpha = spotShadowAlpha;
323 }
324
setLightGeometry(const Vector3 & lightCenter,float lightRadius)325 void CanvasContext::setLightGeometry(const Vector3& lightCenter, float lightRadius) {
326 mLightGeometry.center = lightCenter;
327 mLightGeometry.radius = lightRadius;
328 }
329
setOpaque(bool opaque)330 void CanvasContext::setOpaque(bool opaque) {
331 mOpaque = opaque;
332 }
333
setColorMode(ColorMode mode)334 float CanvasContext::setColorMode(ColorMode mode) {
335 if (mode != mColorMode) {
336 mColorMode = mode;
337 mRenderPipeline->setSurfaceColorProperties(mode);
338 setupPipelineSurface();
339 }
340 switch (mColorMode) {
341 case ColorMode::Hdr:
342 return Properties::maxHdrHeadroomOn8bit;
343 case ColorMode::Hdr10:
344 return 10.f;
345 default:
346 return 1.f;
347 }
348 }
349
targetSdrHdrRatio() const350 float CanvasContext::targetSdrHdrRatio() const {
351 if (mColorMode == ColorMode::Hdr || mColorMode == ColorMode::Hdr10) {
352 return mTargetSdrHdrRatio;
353 } else {
354 return 1.f;
355 }
356 }
357
setTargetSdrHdrRatio(float ratio)358 void CanvasContext::setTargetSdrHdrRatio(float ratio) {
359 if (mTargetSdrHdrRatio == ratio) return;
360
361 mTargetSdrHdrRatio = ratio;
362 mRenderPipeline->setTargetSdrHdrRatio(ratio);
363 // We don't actually but we need to behave as if we do. Specifically we need to ensure
364 // all buffers in the swapchain are fully re-rendered as any partial updates to them will
365 // result in mixed target white points which looks really bad & flickery
366 mHaveNewSurface = true;
367 }
368
makeCurrent()369 bool CanvasContext::makeCurrent() {
370 if (mStopped) return false;
371
372 auto result = mRenderPipeline->makeCurrent();
373 switch (result) {
374 case MakeCurrentResult::AlreadyCurrent:
375 return true;
376 case MakeCurrentResult::Failed:
377 mHaveNewSurface = true;
378 setSurface(nullptr);
379 return false;
380 case MakeCurrentResult::Succeeded:
381 mHaveNewSurface = true;
382 return true;
383 default:
384 LOG_ALWAYS_FATAL("unexpected result %d from IRenderPipeline::makeCurrent",
385 (int32_t)result);
386 }
387
388 return true;
389 }
390
wasSkipped(FrameInfo * info)391 static std::optional<SkippedFrameReason> wasSkipped(FrameInfo* info) {
392 if (info) return info->getSkippedFrameReason();
393 return std::nullopt;
394 }
395
isSwapChainStuffed()396 bool CanvasContext::isSwapChainStuffed() {
397 static const auto SLOW_THRESHOLD = 6_ms;
398
399 if (mSwapHistory.size() != mSwapHistory.capacity()) {
400 // We want at least 3 frames of history before attempting to
401 // guess if the queue is stuffed
402 return false;
403 }
404 nsecs_t frameInterval = mRenderThread.timeLord().frameIntervalNanos();
405 auto& swapA = mSwapHistory[0];
406
407 // Was there a happy queue & dequeue time? If so, don't
408 // consider it stuffed
409 if (swapA.dequeueDuration < SLOW_THRESHOLD && swapA.queueDuration < SLOW_THRESHOLD) {
410 return false;
411 }
412
413 for (size_t i = 1; i < mSwapHistory.size(); i++) {
414 auto& swapB = mSwapHistory[i];
415
416 // If there's a multi-frameInterval gap we effectively already dropped a frame,
417 // so consider the queue healthy.
418 if (std::abs(swapA.swapCompletedTime - swapB.swapCompletedTime) > frameInterval * 3) {
419 return false;
420 }
421
422 // Was there a happy queue & dequeue time? If so, don't
423 // consider it stuffed
424 if (swapB.dequeueDuration < SLOW_THRESHOLD && swapB.queueDuration < SLOW_THRESHOLD) {
425 return false;
426 }
427
428 swapA = swapB;
429 }
430
431 // All signs point to a stuffed swap chain
432 ATRACE_NAME("swap chain stuffed");
433 return true;
434 }
435
prepareTree(TreeInfo & info,int64_t * uiFrameInfo,int64_t syncQueued,RenderNode * target)436 void CanvasContext::prepareTree(TreeInfo& info, int64_t* uiFrameInfo, int64_t syncQueued,
437 RenderNode* target) {
438 mRenderThread.removeFrameCallback(this);
439
440 // Make sure we have a valid device info
441 if (!DeviceInfo::get()->hasMaxTextureSize()) {
442 (void)mRenderThread.requireGrContext();
443 }
444
445 // If the previous frame was dropped we don't need to hold onto it, so
446 // just keep using the previous frame's structure instead
447 const auto reason = wasSkipped(mCurrentFrameInfo);
448 if (reason.has_value()) {
449 // Use the oldest skipped frame in case we skip more than a single frame
450 if (!mSkippedFrameInfo) {
451 switch (*reason) {
452 case SkippedFrameReason::AlreadyDrawn:
453 case SkippedFrameReason::NoBuffer:
454 case SkippedFrameReason::NoOutputTarget:
455 mSkippedFrameInfo.emplace();
456 mSkippedFrameInfo->vsyncId =
457 mCurrentFrameInfo->get(FrameInfoIndex::FrameTimelineVsyncId);
458 mSkippedFrameInfo->startTime =
459 mCurrentFrameInfo->get(FrameInfoIndex::FrameStartTime);
460 break;
461 case SkippedFrameReason::DrawingOff:
462 case SkippedFrameReason::ContextIsStopped:
463 case SkippedFrameReason::NothingToDraw:
464 // Do not report those as skipped frames as there was no frame expected to be
465 // drawn
466 break;
467 }
468 }
469 } else {
470 mCurrentFrameInfo = mJankTracker.startFrame();
471 mSkippedFrameInfo.reset();
472 }
473
474 mCurrentFrameInfo->importUiThreadInfo(uiFrameInfo);
475 mCurrentFrameInfo->set(FrameInfoIndex::SyncQueued) = syncQueued;
476 mCurrentFrameInfo->markSyncStart();
477
478 info.damageAccumulator = &mDamageAccumulator;
479 info.layerUpdateQueue = &mLayerUpdateQueue;
480 info.damageGenerationId = mDamageId++;
481 info.out.skippedFrameReason = std::nullopt;
482
483 mAnimationContext->startFrame(info.mode);
484 for (const sp<RenderNode>& node : mRenderNodes) {
485 // Only the primary target node will be drawn full - all other nodes would get drawn in
486 // real time mode. In case of a window, the primary node is the window content and the other
487 // node(s) are non client / filler nodes.
488 info.mode = (node.get() == target ? TreeInfo::MODE_FULL : TreeInfo::MODE_RT_ONLY);
489 node->prepareTree(info);
490 GL_CHECKPOINT(MODERATE);
491 }
492 mAnimationContext->runRemainingAnimations(info);
493 GL_CHECKPOINT(MODERATE);
494
495 freePrefetchedLayers();
496 GL_CHECKPOINT(MODERATE);
497
498 mIsDirty = true;
499
500 if (CC_UNLIKELY(!hasOutputTarget())) {
501 info.out.skippedFrameReason = SkippedFrameReason::NoOutputTarget;
502 mCurrentFrameInfo->setSkippedFrameReason(*info.out.skippedFrameReason);
503 return;
504 }
505
506 if (CC_LIKELY(mSwapHistory.size() && !info.forceDrawFrame)) {
507 nsecs_t latestVsync = mRenderThread.timeLord().latestVsync();
508 SwapHistory& lastSwap = mSwapHistory.back();
509 nsecs_t vsyncDelta = std::abs(lastSwap.vsyncTime - latestVsync);
510 // The slight fudge-factor is to deal with cases where
511 // the vsync was estimated due to being slow handling the signal.
512 // See the logic in TimeLord#computeFrameTimeNanos or in
513 // Choreographer.java for details on when this happens
514 if (vsyncDelta < 2_ms) {
515 // Already drew for this vsync pulse, UI draw request missed
516 // the deadline for RT animations
517 info.out.skippedFrameReason = SkippedFrameReason::AlreadyDrawn;
518 }
519 } else {
520 info.out.skippedFrameReason = std::nullopt;
521 }
522
523 // TODO: Do we need to abort out if the backdrop is added but not ready? Should that even
524 // be an allowable combination?
525 if (mRenderNodes.size() > 2 && !mRenderNodes[1]->isRenderable()) {
526 info.out.skippedFrameReason = SkippedFrameReason::NothingToDraw;
527 }
528
529 if (!info.out.skippedFrameReason) {
530 int err = mNativeSurface->reserveNext();
531 if (err != OK) {
532 info.out.skippedFrameReason = SkippedFrameReason::NoBuffer;
533 mCurrentFrameInfo->setSkippedFrameReason(*info.out.skippedFrameReason);
534 ALOGW("reserveNext failed, error = %d (%s)", err, strerror(-err));
535 if (err != TIMED_OUT) {
536 // A timed out surface can still recover, but assume others are permanently dead.
537 setSurface(nullptr);
538 return;
539 }
540 }
541 } else {
542 mCurrentFrameInfo->setSkippedFrameReason(*info.out.skippedFrameReason);
543 }
544
545 bool postedFrameCallback = false;
546 if (info.out.hasAnimations || info.out.skippedFrameReason) {
547 if (CC_UNLIKELY(!Properties::enableRTAnimations)) {
548 info.out.requiresUiRedraw = true;
549 }
550 if (!info.out.requiresUiRedraw) {
551 // If animationsNeedsRedraw is set don't bother posting for an RT anim
552 // as we will just end up fighting the UI thread.
553 mRenderThread.postFrameCallback(this);
554 postedFrameCallback = true;
555 }
556 }
557
558 if (!postedFrameCallback &&
559 info.out.animatedImageDelay != TreeInfo::Out::kNoAnimatedImageDelay) {
560 // Subtract the time of one frame so it can be displayed on time.
561 const nsecs_t kFrameTime = mRenderThread.timeLord().frameIntervalNanos();
562 if (info.out.animatedImageDelay <= kFrameTime) {
563 mRenderThread.postFrameCallback(this);
564 } else {
565 const auto delay = info.out.animatedImageDelay - kFrameTime;
566 int genId = mGenerationID;
567 mRenderThread.queue().postDelayed(delay, [this, genId]() {
568 if (mGenerationID == genId) {
569 mRenderThread.postFrameCallback(this);
570 }
571 });
572 }
573 }
574 }
575
stopDrawing()576 void CanvasContext::stopDrawing() {
577 mRenderThread.removeFrameCallback(this);
578 mAnimationContext->pauseAnimators();
579 mGenerationID++;
580 }
581
notifyFramePending()582 void CanvasContext::notifyFramePending() {
583 ATRACE_CALL();
584 mRenderThread.pushBackFrameCallback(this);
585 sendLoadResetHint();
586 }
587
getFrame()588 Frame CanvasContext::getFrame() {
589 if (mHardwareBuffer != nullptr) {
590 return {mBufferParams.getLogicalWidth(), mBufferParams.getLogicalHeight(), 0};
591 } else {
592 return mRenderPipeline->getFrame();
593 }
594 }
595
draw(bool solelyTextureViewUpdates)596 void CanvasContext::draw(bool solelyTextureViewUpdates) {
597 #ifdef __ANDROID__
598 if (auto grContext = getGrContext()) {
599 if (grContext->abandoned()) {
600 if (grContext->isDeviceLost()) {
601 LOG_ALWAYS_FATAL("Lost GPU device unexpectedly");
602 return;
603 }
604 LOG_ALWAYS_FATAL("GrContext is abandoned at start of CanvasContext::draw");
605 return;
606 }
607 }
608 #endif
609 SkRect dirty;
610 mDamageAccumulator.finish(&dirty);
611
612 // reset syncDelayDuration each time we draw
613 nsecs_t syncDelayDuration = mSyncDelayDuration;
614 nsecs_t idleDuration = mIdleDuration;
615 mSyncDelayDuration = 0;
616 mIdleDuration = 0;
617
618 const auto skippedFrameReason = [&]() -> std::optional<SkippedFrameReason> {
619 if (!Properties::isDrawingEnabled()) {
620 return SkippedFrameReason::DrawingOff;
621 }
622
623 if (dirty.isEmpty() && Properties::skipEmptyFrames && !surfaceRequiresRedraw()) {
624 return SkippedFrameReason::NothingToDraw;
625 }
626
627 return std::nullopt;
628 }();
629 if (skippedFrameReason) {
630 mCurrentFrameInfo->setSkippedFrameReason(*skippedFrameReason);
631
632 #ifdef __ANDROID__
633 if (auto grContext = getGrContext()) {
634 // Submit to ensure that any texture uploads complete and Skia can
635 // free its staging buffers.
636 grContext->flushAndSubmit();
637 }
638 #endif
639
640 // Notify the callbacks, even if there's nothing to draw so they aren't waiting
641 // indefinitely
642 waitOnFences();
643 for (auto& func : mFrameCommitCallbacks) {
644 std::invoke(func, false /* didProduceBuffer */);
645 }
646 mFrameCommitCallbacks.clear();
647 return;
648 }
649
650 ScopedActiveContext activeContext(this);
651 mCurrentFrameInfo->set(FrameInfoIndex::FrameInterval) =
652 mRenderThread.timeLord().frameIntervalNanos();
653
654 mCurrentFrameInfo->markIssueDrawCommandsStart();
655
656 Frame frame = getFrame();
657
658 SkRect windowDirty = computeDirtyRect(frame, &dirty);
659
660 ATRACE_FORMAT("Drawing " RECT_STRING, SK_RECT_ARGS(dirty));
661
662 IRenderPipeline::DrawResult drawResult;
663 {
664 // FrameInfoVisualizer accesses the frame events, which cannot be mutated mid-draw
665 // or it can lead to memory corruption.
666 drawResult = mRenderPipeline->draw(
667 frame, windowDirty, dirty, mLightGeometry, &mLayerUpdateQueue, mContentDrawBounds,
668 mOpaque, mLightInfo, mRenderNodes, &(profiler()), mBufferParams, profilerLock());
669 }
670
671 uint64_t frameCompleteNr = getFrameNumber();
672
673 waitOnFences();
674
675 if (mNativeSurface) {
676 // TODO(b/165985262): measure performance impact
677 const auto vsyncId = mCurrentFrameInfo->get(FrameInfoIndex::FrameTimelineVsyncId);
678 if (vsyncId != UiFrameInfoBuilder::INVALID_VSYNC_ID) {
679 const auto inputEventId =
680 static_cast<int32_t>(mCurrentFrameInfo->get(FrameInfoIndex::InputEventId));
681 ATRACE_FORMAT(
682 "frameTimelineInfo(frameNumber=%llu, vsyncId=%lld, inputEventId=0x%" PRIx32 ")",
683 frameCompleteNr, vsyncId, inputEventId);
684 const ANativeWindowFrameTimelineInfo ftl = {
685 .frameNumber = frameCompleteNr,
686 .frameTimelineVsyncId = vsyncId,
687 .inputEventId = inputEventId,
688 .startTimeNanos = mCurrentFrameInfo->get(FrameInfoIndex::FrameStartTime),
689 .useForRefreshRateSelection = solelyTextureViewUpdates,
690 .skippedFrameVsyncId = mSkippedFrameInfo ? mSkippedFrameInfo->vsyncId
691 : UiFrameInfoBuilder::INVALID_VSYNC_ID,
692 .skippedFrameStartTimeNanos =
693 mSkippedFrameInfo ? mSkippedFrameInfo->startTime : 0,
694 };
695 native_window_set_frame_timeline_info(mNativeSurface->getNativeWindow(), ftl);
696 }
697 }
698
699 bool requireSwap = false;
700 bool didDraw = false;
701
702 int error = OK;
703 bool didSwap = mRenderPipeline->swapBuffers(frame, drawResult, windowDirty, mCurrentFrameInfo,
704 &requireSwap);
705
706 mCurrentFrameInfo->set(FrameInfoIndex::CommandSubmissionCompleted) = std::max(
707 drawResult.commandSubmissionTime, mCurrentFrameInfo->get(FrameInfoIndex::SwapBuffers));
708
709 mIsDirty = false;
710
711 if (requireSwap) {
712 didDraw = true;
713 // Handle any swapchain errors
714 error = mNativeSurface->getAndClearError();
715 if (error == TIMED_OUT) {
716 // Try again
717 mRenderThread.postFrameCallback(this);
718 // But since this frame didn't happen, we need to mark full damage in the swap
719 // history
720 didDraw = false;
721
722 } else if (error != OK || !didSwap) {
723 // Unknown error, abandon the surface
724 setSurface(nullptr);
725 didDraw = false;
726 }
727
728 SwapHistory& swap = mSwapHistory.next();
729 if (didDraw) {
730 swap.damage = windowDirty;
731 } else {
732 float max = static_cast<float>(INT_MAX);
733 swap.damage = SkRect::MakeWH(max, max);
734 }
735 swap.swapCompletedTime = systemTime(SYSTEM_TIME_MONOTONIC);
736 swap.vsyncTime = mRenderThread.timeLord().latestVsync();
737 if (didDraw) {
738 nsecs_t dequeueStart =
739 ANativeWindow_getLastDequeueStartTime(mNativeSurface->getNativeWindow());
740 if (dequeueStart < mCurrentFrameInfo->get(FrameInfoIndex::SyncStart)) {
741 // Ignoring dequeue duration as it happened prior to frame render start
742 // and thus is not part of the frame.
743 swap.dequeueDuration = 0;
744 } else {
745 swap.dequeueDuration =
746 ANativeWindow_getLastDequeueDuration(mNativeSurface->getNativeWindow());
747 }
748 swap.queueDuration =
749 ANativeWindow_getLastQueueDuration(mNativeSurface->getNativeWindow());
750 } else {
751 swap.dequeueDuration = 0;
752 swap.queueDuration = 0;
753 }
754 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = swap.dequeueDuration;
755 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = swap.queueDuration;
756 mHaveNewSurface = false;
757 mFrameNumber = 0;
758 } else {
759 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = 0;
760 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = 0;
761 }
762
763 mCurrentFrameInfo->markSwapBuffersCompleted();
764
765 #if LOG_FRAMETIME_MMA
766 float thisFrame = mCurrentFrameInfo->duration(FrameInfoIndex::IssueDrawCommandsStart,
767 FrameInfoIndex::FrameCompleted) /
768 NANOS_PER_MILLIS_F;
769 if (sFrameCount) {
770 sBenchMma = ((9 * sBenchMma) + thisFrame) / 10;
771 } else {
772 sBenchMma = thisFrame;
773 }
774 if (++sFrameCount == 10) {
775 sFrameCount = 1;
776 ALOGD("Average frame time: %.4f", sBenchMma);
777 }
778 #endif
779
780 if (didSwap) {
781 for (auto& func : mFrameCommitCallbacks) {
782 std::invoke(func, true /* didProduceBuffer */);
783 }
784 mFrameCommitCallbacks.clear();
785 }
786
787 if (requireSwap) {
788 if (mExpectSurfaceStats) {
789 reportMetricsWithPresentTime();
790 { // acquire lock
791 std::lock_guard lock(mLastFrameMetricsInfosMutex);
792 FrameMetricsInfo& next = mLastFrameMetricsInfos.next();
793 next.frameInfo = mCurrentFrameInfo;
794 next.frameNumber = frameCompleteNr;
795 next.surfaceId = mSurfaceControlGenerationId;
796 } // release lock
797 } else {
798 mCurrentFrameInfo->markFrameCompleted();
799 mCurrentFrameInfo->set(FrameInfoIndex::GpuCompleted)
800 = mCurrentFrameInfo->get(FrameInfoIndex::FrameCompleted);
801 std::scoped_lock lock(mFrameInfoMutex);
802 mJankTracker.finishFrame(*mCurrentFrameInfo, mFrameMetricsReporter, frameCompleteNr,
803 mSurfaceControlGenerationId);
804 }
805 }
806
807 int64_t intendedVsync = mCurrentFrameInfo->get(FrameInfoIndex::IntendedVsync);
808 int64_t frameDeadline = mCurrentFrameInfo->get(FrameInfoIndex::FrameDeadline);
809 int64_t dequeueBufferDuration = mCurrentFrameInfo->get(FrameInfoIndex::DequeueBufferDuration);
810
811 if (Properties::calcWorkloadOrigDeadline()) {
812 // Uses the unmodified frame deadline in calculating workload target duration
813 mHintSessionWrapper->updateTargetWorkDuration(
814 mCurrentFrameInfo->get(FrameInfoIndex::WorkloadTarget));
815 } else {
816 mHintSessionWrapper->updateTargetWorkDuration(frameDeadline - intendedVsync);
817 }
818
819 if (didDraw) {
820 int64_t frameStartTime = mCurrentFrameInfo->get(FrameInfoIndex::FrameStartTime);
821 int64_t frameDuration = systemTime(SYSTEM_TIME_MONOTONIC) - frameStartTime;
822 int64_t actualDuration = frameDuration -
823 (std::min(syncDelayDuration, mLastDequeueBufferDuration)) -
824 dequeueBufferDuration - idleDuration;
825 mHintSessionWrapper->reportActualWorkDuration(actualDuration);
826 mHintSessionWrapper->setActiveFunctorThreads(
827 WebViewFunctorManager::instance().getRenderingThreadsForActiveFunctors());
828 }
829
830 mLastDequeueBufferDuration = dequeueBufferDuration;
831
832 mRenderThread.cacheManager().onFrameCompleted();
833 return;
834 }
835
reportMetricsWithPresentTime()836 void CanvasContext::reportMetricsWithPresentTime() {
837 { // acquire lock
838 std::scoped_lock lock(mFrameInfoMutex);
839 if (mFrameMetricsReporter == nullptr) {
840 return;
841 }
842 } // release lock
843 if (mNativeSurface == nullptr) {
844 return;
845 }
846 ATRACE_CALL();
847 FrameInfo* forthBehind;
848 int64_t frameNumber;
849 int32_t surfaceControlId;
850
851 { // acquire lock
852 std::scoped_lock lock(mLastFrameMetricsInfosMutex);
853 if (mLastFrameMetricsInfos.size() != mLastFrameMetricsInfos.capacity()) {
854 // Not enough frames yet
855 return;
856 }
857 auto frameMetricsInfo = mLastFrameMetricsInfos.front();
858 forthBehind = frameMetricsInfo.frameInfo;
859 frameNumber = frameMetricsInfo.frameNumber;
860 surfaceControlId = frameMetricsInfo.surfaceId;
861 } // release lock
862
863 nsecs_t presentTime = 0;
864 native_window_get_frame_timestamps(
865 mNativeSurface->getNativeWindow(), frameNumber, nullptr /*outRequestedPresentTime*/,
866 nullptr /*outAcquireTime*/, nullptr /*outLatchTime*/,
867 nullptr /*outFirstRefreshStartTime*/, nullptr /*outLastRefreshStartTime*/,
868 nullptr /*outGpuCompositionDoneTime*/, &presentTime, nullptr /*outDequeueReadyTime*/,
869 nullptr /*outReleaseTime*/);
870
871 forthBehind->set(FrameInfoIndex::DisplayPresentTime) = presentTime;
872 { // acquire lock
873 std::scoped_lock lock(mFrameInfoMutex);
874 if (mFrameMetricsReporter != nullptr) {
875 mFrameMetricsReporter->reportFrameMetrics(forthBehind->data(), true /*hasPresentTime*/,
876 frameNumber, surfaceControlId);
877 }
878 } // release lock
879 }
880
addFrameMetricsObserver(sp<FrameMetricsObserver> && observer)881 void CanvasContext::addFrameMetricsObserver(sp<FrameMetricsObserver>&& observer) {
882 std::scoped_lock lock(mFrameInfoMutex);
883 if (mFrameMetricsReporter.get() == nullptr) {
884 mFrameMetricsReporter.reset(new FrameMetricsReporter());
885 }
886
887 // We want to make sure we aren't reporting frames that have already been queued by the
888 // BufferQueueProducer on the rendner thread but are still pending the callback to report their
889 // their frame metrics.
890 uint64_t nextFrameNumber = getFrameNumber();
891 observer->reportMetricsFrom(nextFrameNumber, mSurfaceControlGenerationId);
892 mFrameMetricsReporter->addObserver(std::move(observer));
893 }
894
removeFrameMetricsObserver(const sp<FrameMetricsObserver> & observer)895 void CanvasContext::removeFrameMetricsObserver(const sp<FrameMetricsObserver>& observer) {
896 std::scoped_lock lock(mFrameInfoMutex);
897 if (mFrameMetricsReporter.get() != nullptr) {
898 mFrameMetricsReporter->removeObserver(observer);
899 if (!mFrameMetricsReporter->hasObservers()) {
900 mFrameMetricsReporter.reset(nullptr);
901 }
902 }
903 }
904
getFrameInfoFromLastFew(uint64_t frameNumber,uint32_t surfaceControlId)905 FrameInfo* CanvasContext::getFrameInfoFromLastFew(uint64_t frameNumber, uint32_t surfaceControlId) {
906 std::scoped_lock lock(mLastFrameMetricsInfosMutex);
907 for (size_t i = 0; i < mLastFrameMetricsInfos.size(); i++) {
908 if (mLastFrameMetricsInfos[i].frameNumber == frameNumber &&
909 mLastFrameMetricsInfos[i].surfaceId == surfaceControlId) {
910 return mLastFrameMetricsInfos[i].frameInfo;
911 }
912 }
913
914 return nullptr;
915 }
916
onSurfaceStatsAvailable(void * context,int32_t surfaceControlId,const SurfaceStats & stats)917 void CanvasContext::onSurfaceStatsAvailable(void* context, int32_t surfaceControlId,
918 const SurfaceStats& stats) {
919 #ifdef __ANDROID__
920 auto* instance = static_cast<CanvasContext*>(context);
921
922 nsecs_t gpuCompleteTime = -1L;
923 if (const auto* fence = std::get_if<sp<Fence>>(&stats.acquireTimeOrFence)) {
924 // We got a fence instead of the acquire time due to latching unsignaled.
925 // Ideally the client could just get the acquire time directly from
926 // the fence instead of calling this function which needs to block.
927 (*fence)->waitForever("acquireFence");
928 gpuCompleteTime = (*fence)->getSignalTime();
929 } else {
930 gpuCompleteTime = std::get<int64_t>(stats.acquireTimeOrFence);
931 }
932
933 if (gpuCompleteTime == Fence::SIGNAL_TIME_PENDING) {
934 gpuCompleteTime = -1;
935 }
936
937 uint64_t frameNumber = stats.eventStats.frameNumber;
938
939 FrameInfo* frameInfo = instance->getFrameInfoFromLastFew(frameNumber, surfaceControlId);
940
941 if (frameInfo != nullptr) {
942 std::scoped_lock lock(instance->mFrameInfoMutex);
943 frameInfo->set(FrameInfoIndex::FrameCompleted) = std::max(gpuCompleteTime,
944 frameInfo->get(FrameInfoIndex::SwapBuffersCompleted));
945 frameInfo->set(FrameInfoIndex::GpuCompleted) = std::max(
946 gpuCompleteTime, frameInfo->get(FrameInfoIndex::CommandSubmissionCompleted));
947 instance->mJankTracker.finishFrame(*frameInfo, instance->mFrameMetricsReporter, frameNumber,
948 surfaceControlId);
949 }
950 #endif
951 }
952
953 // Called by choreographer to do an RT-driven animation
doFrame()954 void CanvasContext::doFrame() {
955 if (!mRenderPipeline->isSurfaceReady()) return;
956 mIdleDuration =
957 systemTime(SYSTEM_TIME_MONOTONIC) - mRenderThread.timeLord().computeFrameTimeNanos();
958 prepareAndDraw(nullptr);
959 }
960
getNextFrameSize() const961 SkISize CanvasContext::getNextFrameSize() const {
962 static constexpr SkISize defaultFrameSize = {INT32_MAX, INT32_MAX};
963 if (mNativeSurface == nullptr) {
964 return defaultFrameSize;
965 }
966 ANativeWindow* anw = mNativeSurface->getNativeWindow();
967
968 SkISize size;
969 size.fWidth = ANativeWindow_getWidth(anw);
970 size.fHeight = ANativeWindow_getHeight(anw);
971 mRenderThread.cacheManager().notifyNextFrameSize(size.fWidth, size.fHeight);
972 return size;
973 }
974
getPixelSnapMatrix() const975 const SkM44& CanvasContext::getPixelSnapMatrix() const {
976 return mRenderPipeline->getPixelSnapMatrix();
977 }
978
prepareAndDraw(RenderNode * node)979 void CanvasContext::prepareAndDraw(RenderNode* node) {
980 int64_t vsyncId = mRenderThread.timeLord().lastVsyncId();
981 ATRACE_FORMAT("%s %" PRId64, __func__, vsyncId);
982
983 nsecs_t vsync = mRenderThread.timeLord().computeFrameTimeNanos();
984 int64_t frameDeadline = mRenderThread.timeLord().lastFrameDeadline();
985 int64_t frameInterval = mRenderThread.timeLord().frameIntervalNanos();
986 int64_t frameInfo[UI_THREAD_FRAME_INFO_SIZE];
987 UiFrameInfoBuilder(frameInfo)
988 .addFlag(FrameInfoFlags::RTAnimation)
989 .setVsync(vsync, vsync, vsyncId, frameDeadline, frameInterval);
990
991 TreeInfo info(TreeInfo::MODE_RT_ONLY, *this);
992 prepareTree(info, frameInfo, systemTime(SYSTEM_TIME_MONOTONIC), node);
993 if (!info.out.skippedFrameReason) {
994 draw(info.out.solelyTextureViewUpdates);
995 } else {
996 // wait on fences so tasks don't overlap next frame
997 waitOnFences();
998 }
999 }
1000
markLayerInUse(RenderNode * node)1001 void CanvasContext::markLayerInUse(RenderNode* node) {
1002 if (mPrefetchedLayers.erase(node)) {
1003 node->decStrong(nullptr);
1004 }
1005 }
1006
freePrefetchedLayers()1007 void CanvasContext::freePrefetchedLayers() {
1008 if (mPrefetchedLayers.size()) {
1009 for (auto& node : mPrefetchedLayers) {
1010 ALOGW("Incorrectly called buildLayer on View: %s, destroying layer...",
1011 node->getName());
1012 node->destroyLayers();
1013 node->decStrong(nullptr);
1014 }
1015 mPrefetchedLayers.clear();
1016 }
1017 }
1018
buildLayer(RenderNode * node)1019 void CanvasContext::buildLayer(RenderNode* node) {
1020 ATRACE_CALL();
1021 if (!mRenderPipeline->isContextReady()) return;
1022
1023 // buildLayer() will leave the tree in an unknown state, so we must stop drawing
1024 stopDrawing();
1025
1026 ScopedActiveContext activeContext(this);
1027 TreeInfo info(TreeInfo::MODE_FULL, *this);
1028 info.damageAccumulator = &mDamageAccumulator;
1029 info.layerUpdateQueue = &mLayerUpdateQueue;
1030 info.runAnimations = false;
1031 node->prepareTree(info);
1032 SkRect ignore;
1033 mDamageAccumulator.finish(&ignore);
1034 // Tickle the GENERIC property on node to mark it as dirty for damaging
1035 // purposes when the frame is actually drawn
1036 node->setPropertyFieldsDirty(RenderNode::GENERIC);
1037
1038 mRenderPipeline->renderLayers(mLightGeometry, &mLayerUpdateQueue, mOpaque, mLightInfo);
1039
1040 node->incStrong(nullptr);
1041 mPrefetchedLayers.insert(node);
1042 }
1043
destroyHardwareResources()1044 void CanvasContext::destroyHardwareResources() {
1045 stopDrawing();
1046 if (mRenderPipeline->isContextReady()) {
1047 freePrefetchedLayers();
1048 for (const sp<RenderNode>& node : mRenderNodes) {
1049 node->destroyHardwareResources();
1050 }
1051 mRenderPipeline->onDestroyHardwareResources();
1052 }
1053 }
1054
onContextDestroyed()1055 void CanvasContext::onContextDestroyed() {
1056 // We don't want to destroyHardwareResources as that will invalidate display lists which
1057 // the client may not be expecting. Instead just purge all scratch resources
1058 if (mRenderPipeline->isContextReady()) {
1059 freePrefetchedLayers();
1060 for (const sp<RenderNode>& node : mRenderNodes) {
1061 node->destroyLayers();
1062 }
1063 mRenderPipeline->onDestroyHardwareResources();
1064 }
1065 }
1066
createTextureLayer()1067 DeferredLayerUpdater* CanvasContext::createTextureLayer() {
1068 return mRenderPipeline->createTextureLayer();
1069 }
1070
dumpFrames(int fd)1071 void CanvasContext::dumpFrames(int fd) {
1072 mJankTracker.dumpStats(fd);
1073 mJankTracker.dumpFrames(fd);
1074 }
1075
resetFrameStats()1076 void CanvasContext::resetFrameStats() {
1077 mJankTracker.reset();
1078 }
1079
setName(const std::string && name)1080 void CanvasContext::setName(const std::string&& name) {
1081 mJankTracker.setDescription(JankTrackerType::Window, std::move(name));
1082 }
1083
waitOnFences()1084 void CanvasContext::waitOnFences() {
1085 if (mFrameFences.size()) {
1086 ATRACE_CALL();
1087 for (auto& fence : mFrameFences) {
1088 fence.get();
1089 }
1090 mFrameFences.clear();
1091 }
1092 }
1093
enqueueFrameWork(std::function<void ()> && func)1094 void CanvasContext::enqueueFrameWork(std::function<void()>&& func) {
1095 mFrameFences.push_back(CommonPool::async(std::move(func)));
1096 }
1097
getFrameNumber()1098 uint64_t CanvasContext::getFrameNumber() {
1099 // mFrameNumber is reset to 0 when the surface changes or we swap buffers
1100 if (mFrameNumber == 0 && mNativeSurface.get()) {
1101 mFrameNumber = ANativeWindow_getNextFrameId(mNativeSurface->getNativeWindow());
1102 }
1103 return mFrameNumber;
1104 }
1105
surfaceRequiresRedraw()1106 bool CanvasContext::surfaceRequiresRedraw() {
1107 if (!mNativeSurface) return false;
1108 if (mHaveNewSurface) return true;
1109
1110 ANativeWindow* anw = mNativeSurface->getNativeWindow();
1111 const int width = ANativeWindow_getWidth(anw);
1112 const int height = ANativeWindow_getHeight(anw);
1113
1114 return width != mLastFrameWidth || height != mLastFrameHeight;
1115 }
1116
computeDirtyRect(const Frame & frame,SkRect * dirty)1117 SkRect CanvasContext::computeDirtyRect(const Frame& frame, SkRect* dirty) {
1118 if (frame.width() != mLastFrameWidth || frame.height() != mLastFrameHeight) {
1119 // can't rely on prior content of window if viewport size changes
1120 dirty->setEmpty();
1121 mLastFrameWidth = frame.width();
1122 mLastFrameHeight = frame.height();
1123 } else if (mHaveNewSurface || frame.bufferAge() == 0) {
1124 // New surface needs a full draw
1125 dirty->setEmpty();
1126 } else {
1127 if (!dirty->isEmpty() && !dirty->intersect(SkRect::MakeIWH(frame.width(), frame.height()))) {
1128 ALOGW("Dirty " RECT_STRING " doesn't intersect with 0 0 %d %d ?", SK_RECT_ARGS(*dirty),
1129 frame.width(), frame.height());
1130 dirty->setEmpty();
1131 }
1132 profiler().unionDirty(dirty);
1133 }
1134
1135 if (dirty->isEmpty()) {
1136 dirty->setIWH(frame.width(), frame.height());
1137 return *dirty;
1138 }
1139
1140 // At this point dirty is the area of the window to update. However,
1141 // the area of the frame we need to repaint is potentially different, so
1142 // stash the screen area for later
1143 SkRect windowDirty(*dirty);
1144
1145 // If the buffer age is 0 we do a full-screen repaint (handled above)
1146 // If the buffer age is 1 the buffer contents are the same as they were
1147 // last frame so there's nothing to union() against
1148 // Therefore we only care about the > 1 case.
1149 if (frame.bufferAge() > 1) {
1150 if (frame.bufferAge() > (int)mSwapHistory.size()) {
1151 // We don't have enough history to handle this old of a buffer
1152 // Just do a full-draw
1153 dirty->setIWH(frame.width(), frame.height());
1154 } else {
1155 // At this point we haven't yet added the latest frame
1156 // to the damage history (happens below)
1157 // So we need to damage
1158 for (int i = mSwapHistory.size() - 1;
1159 i > ((int)mSwapHistory.size()) - frame.bufferAge(); i--) {
1160 dirty->join(mSwapHistory[i].damage);
1161 }
1162 }
1163 }
1164
1165 return windowDirty;
1166 }
1167
getActiveContext()1168 CanvasContext* CanvasContext::getActiveContext() {
1169 return ScopedActiveContext::getActiveContext();
1170 }
1171
mergeTransaction(ASurfaceTransaction * transaction,const sp<SurfaceControl> & control)1172 bool CanvasContext::mergeTransaction(ASurfaceTransaction* transaction,
1173 const sp<SurfaceControl>& control) {
1174 if (!mASurfaceTransactionCallback) return false;
1175 return std::invoke(mASurfaceTransactionCallback, reinterpret_cast<int64_t>(transaction),
1176 reinterpret_cast<int64_t>(control.get()), getFrameNumber());
1177 }
1178
prepareSurfaceControlForWebview()1179 void CanvasContext::prepareSurfaceControlForWebview() {
1180 if (mPrepareSurfaceControlForWebviewCallback) {
1181 std::invoke(mPrepareSurfaceControlForWebviewCallback);
1182 }
1183 }
1184
sendLoadResetHint()1185 void CanvasContext::sendLoadResetHint() {
1186 mHintSessionWrapper->sendLoadResetHint();
1187 }
1188
sendLoadIncreaseHint()1189 void CanvasContext::sendLoadIncreaseHint() {
1190 mHintSessionWrapper->sendLoadIncreaseHint();
1191 }
1192
setSyncDelayDuration(nsecs_t duration)1193 void CanvasContext::setSyncDelayDuration(nsecs_t duration) {
1194 mSyncDelayDuration = duration;
1195 }
1196
startHintSession()1197 void CanvasContext::startHintSession() {
1198 mHintSessionWrapper->init();
1199 }
1200
shouldDither()1201 bool CanvasContext::shouldDither() {
1202 CanvasContext* self = getActiveContext();
1203 if (!self) return false;
1204 return self->mColorMode != ColorMode::Default;
1205 }
1206
visitAllRenderNodes(std::function<void (const RenderNode &)> func) const1207 void CanvasContext::visitAllRenderNodes(std::function<void(const RenderNode&)> func) const {
1208 for (auto node : mRenderNodes) {
1209 node->visit(func);
1210 }
1211 }
1212
1213 } /* namespace renderthread */
1214 } /* namespace uirenderer */
1215 } /* namespace android */
1216