1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "CanvasContext.h"
18
19 #include <apex/window.h>
20 #include <fcntl.h>
21 #include <strings.h>
22 #include <sys/stat.h>
23
24 #include <algorithm>
25 #include <cstdint>
26 #include <cstdlib>
27 #include <functional>
28
29 #include <gui/TraceUtils.h>
30 #include "../Properties.h"
31 #include "AnimationContext.h"
32 #include "Frame.h"
33 #include "LayerUpdateQueue.h"
34 #include "Properties.h"
35 #include "RenderThread.h"
36 #include "hwui/Canvas.h"
37 #include "pipeline/skia/SkiaOpenGLPipeline.h"
38 #include "pipeline/skia/SkiaPipeline.h"
39 #include "pipeline/skia/SkiaVulkanPipeline.h"
40 #include "thread/CommonPool.h"
41 #include "utils/GLUtils.h"
42 #include "utils/TimeUtils.h"
43
44 #define TRIM_MEMORY_COMPLETE 80
45 #define TRIM_MEMORY_UI_HIDDEN 20
46
47 #define LOG_FRAMETIME_MMA 0
48
49 #if LOG_FRAMETIME_MMA
50 static float sBenchMma = 0;
51 static int sFrameCount = 0;
52 static const float NANOS_PER_MILLIS_F = 1000000.0f;
53 #endif
54
55 namespace android {
56 namespace uirenderer {
57 namespace renderthread {
58
59 namespace {
60 class ScopedActiveContext {
61 public:
ScopedActiveContext(CanvasContext * context)62 ScopedActiveContext(CanvasContext* context) { sActiveContext = context; }
63
~ScopedActiveContext()64 ~ScopedActiveContext() { sActiveContext = nullptr; }
65
getActiveContext()66 static CanvasContext* getActiveContext() { return sActiveContext; }
67
68 private:
69 static CanvasContext* sActiveContext;
70 };
71
72 CanvasContext* ScopedActiveContext::sActiveContext = nullptr;
73 } /* namespace */
74
create(RenderThread & thread,bool translucent,RenderNode * rootRenderNode,IContextFactory * contextFactory)75 CanvasContext* CanvasContext::create(RenderThread& thread, bool translucent,
76 RenderNode* rootRenderNode, IContextFactory* contextFactory) {
77 auto renderType = Properties::getRenderPipelineType();
78
79 switch (renderType) {
80 case RenderPipelineType::SkiaGL:
81 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
82 std::make_unique<skiapipeline::SkiaOpenGLPipeline>(thread));
83 case RenderPipelineType::SkiaVulkan:
84 return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
85 std::make_unique<skiapipeline::SkiaVulkanPipeline>(thread));
86 default:
87 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
88 break;
89 }
90 return nullptr;
91 }
92
invokeFunctor(const RenderThread & thread,Functor * functor)93 void CanvasContext::invokeFunctor(const RenderThread& thread, Functor* functor) {
94 ATRACE_CALL();
95 auto renderType = Properties::getRenderPipelineType();
96 switch (renderType) {
97 case RenderPipelineType::SkiaGL:
98 skiapipeline::SkiaOpenGLPipeline::invokeFunctor(thread, functor);
99 break;
100 case RenderPipelineType::SkiaVulkan:
101 skiapipeline::SkiaVulkanPipeline::invokeFunctor(thread, functor);
102 break;
103 default:
104 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
105 break;
106 }
107 }
108
prepareToDraw(const RenderThread & thread,Bitmap * bitmap)109 void CanvasContext::prepareToDraw(const RenderThread& thread, Bitmap* bitmap) {
110 skiapipeline::SkiaPipeline::prepareToDraw(thread, bitmap);
111 }
112
CanvasContext(RenderThread & thread,bool translucent,RenderNode * rootRenderNode,IContextFactory * contextFactory,std::unique_ptr<IRenderPipeline> renderPipeline)113 CanvasContext::CanvasContext(RenderThread& thread, bool translucent, RenderNode* rootRenderNode,
114 IContextFactory* contextFactory,
115 std::unique_ptr<IRenderPipeline> renderPipeline)
116 : mRenderThread(thread)
117 , mGenerationID(0)
118 , mOpaque(!translucent)
119 , mAnimationContext(contextFactory->createAnimationContext(mRenderThread.timeLord()))
120 , mJankTracker(&thread.globalProfileData())
121 , mProfiler(mJankTracker.frames(), thread.timeLord().frameIntervalNanos())
122 , mContentDrawBounds(0, 0, 0, 0)
123 , mRenderPipeline(std::move(renderPipeline)) {
124 rootRenderNode->makeRoot();
125 mRenderNodes.emplace_back(rootRenderNode);
126 mProfiler.setDensity(DeviceInfo::getDensity());
127 }
128
~CanvasContext()129 CanvasContext::~CanvasContext() {
130 destroy();
131 for (auto& node : mRenderNodes) {
132 node->clearRoot();
133 }
134 mRenderNodes.clear();
135 }
136
addRenderNode(RenderNode * node,bool placeFront)137 void CanvasContext::addRenderNode(RenderNode* node, bool placeFront) {
138 int pos = placeFront ? 0 : static_cast<int>(mRenderNodes.size());
139 node->makeRoot();
140 mRenderNodes.emplace(mRenderNodes.begin() + pos, node);
141 }
142
removeRenderNode(RenderNode * node)143 void CanvasContext::removeRenderNode(RenderNode* node) {
144 node->clearRoot();
145 mRenderNodes.erase(std::remove(mRenderNodes.begin(), mRenderNodes.end(), node),
146 mRenderNodes.end());
147 }
148
destroy()149 void CanvasContext::destroy() {
150 stopDrawing();
151 setSurface(nullptr);
152 setSurfaceControl(nullptr);
153 freePrefetchedLayers();
154 destroyHardwareResources();
155 mAnimationContext->destroy();
156 }
157
setBufferCount(ANativeWindow * window)158 static void setBufferCount(ANativeWindow* window) {
159 int query_value;
160 int err = window->query(window, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &query_value);
161 if (err != 0 || query_value < 0) {
162 ALOGE("window->query failed: %s (%d) value=%d", strerror(-err), err, query_value);
163 return;
164 }
165 auto min_undequeued_buffers = static_cast<uint32_t>(query_value);
166
167 // We only need to set min_undequeued + 2 because the renderahead amount was already factored into the
168 // query for min_undequeued
169 int bufferCount = min_undequeued_buffers + 2;
170 native_window_set_buffer_count(window, bufferCount);
171 }
172
setSurface(ANativeWindow * window,bool enableTimeout)173 void CanvasContext::setSurface(ANativeWindow* window, bool enableTimeout) {
174 ATRACE_CALL();
175
176 if (window) {
177 mNativeSurface = std::make_unique<ReliableSurface>(window);
178 mNativeSurface->init();
179 if (enableTimeout) {
180 // TODO: Fix error handling & re-shorten timeout
181 ANativeWindow_setDequeueTimeout(window, 4000_ms);
182 }
183 } else {
184 mNativeSurface = nullptr;
185 }
186 setupPipelineSurface();
187 }
188
setSurfaceControl(ASurfaceControl * surfaceControl)189 void CanvasContext::setSurfaceControl(ASurfaceControl* surfaceControl) {
190 if (surfaceControl == mSurfaceControl) return;
191
192 auto funcs = mRenderThread.getASurfaceControlFunctions();
193
194 if (surfaceControl == nullptr) {
195 setASurfaceTransactionCallback(nullptr);
196 setPrepareSurfaceControlForWebviewCallback(nullptr);
197 }
198
199 if (mSurfaceControl != nullptr) {
200 funcs.unregisterListenerFunc(this, &onSurfaceStatsAvailable);
201 funcs.releaseFunc(mSurfaceControl);
202 }
203 mSurfaceControl = surfaceControl;
204 mSurfaceControlGenerationId++;
205 mExpectSurfaceStats = surfaceControl != nullptr;
206 if (mSurfaceControl != nullptr) {
207 funcs.acquireFunc(mSurfaceControl);
208 funcs.registerListenerFunc(surfaceControl, this, &onSurfaceStatsAvailable);
209 }
210 }
211
setupPipelineSurface()212 void CanvasContext::setupPipelineSurface() {
213 bool hasSurface = mRenderPipeline->setSurface(
214 mNativeSurface ? mNativeSurface->getNativeWindow() : nullptr, mSwapBehavior);
215
216 if (mNativeSurface && !mNativeSurface->didSetExtraBuffers()) {
217 setBufferCount(mNativeSurface->getNativeWindow());
218
219 }
220
221 mFrameNumber = -1;
222
223 if (mNativeSurface != nullptr && hasSurface) {
224 mHaveNewSurface = true;
225 mSwapHistory.clear();
226 // Enable frame stats after the surface has been bound to the appropriate graphics API.
227 // Order is important when new and old surfaces are the same, because old surface has
228 // its frame stats disabled automatically.
229 native_window_enable_frame_timestamps(mNativeSurface->getNativeWindow(), true);
230 } else {
231 mRenderThread.removeFrameCallback(this);
232 mGenerationID++;
233 }
234 }
235
setSwapBehavior(SwapBehavior swapBehavior)236 void CanvasContext::setSwapBehavior(SwapBehavior swapBehavior) {
237 mSwapBehavior = swapBehavior;
238 }
239
pauseSurface()240 bool CanvasContext::pauseSurface() {
241 mGenerationID++;
242 return mRenderThread.removeFrameCallback(this);
243 }
244
setStopped(bool stopped)245 void CanvasContext::setStopped(bool stopped) {
246 if (mStopped != stopped) {
247 mStopped = stopped;
248 if (mStopped) {
249 mGenerationID++;
250 mRenderThread.removeFrameCallback(this);
251 mRenderPipeline->onStop();
252 } else if (mIsDirty && hasSurface()) {
253 mRenderThread.postFrameCallback(this);
254 }
255 }
256 }
257
allocateBuffers()258 void CanvasContext::allocateBuffers() {
259 if (mNativeSurface) {
260 ANativeWindow_tryAllocateBuffers(mNativeSurface->getNativeWindow());
261 }
262 }
263
setLightAlpha(uint8_t ambientShadowAlpha,uint8_t spotShadowAlpha)264 void CanvasContext::setLightAlpha(uint8_t ambientShadowAlpha, uint8_t spotShadowAlpha) {
265 mLightInfo.ambientShadowAlpha = ambientShadowAlpha;
266 mLightInfo.spotShadowAlpha = spotShadowAlpha;
267 }
268
setLightGeometry(const Vector3 & lightCenter,float lightRadius)269 void CanvasContext::setLightGeometry(const Vector3& lightCenter, float lightRadius) {
270 mLightGeometry.center = lightCenter;
271 mLightGeometry.radius = lightRadius;
272 }
273
setOpaque(bool opaque)274 void CanvasContext::setOpaque(bool opaque) {
275 mOpaque = opaque;
276 }
277
setColorMode(ColorMode mode)278 void CanvasContext::setColorMode(ColorMode mode) {
279 mRenderPipeline->setSurfaceColorProperties(mode);
280 setupPipelineSurface();
281 }
282
makeCurrent()283 bool CanvasContext::makeCurrent() {
284 if (mStopped) return false;
285
286 auto result = mRenderPipeline->makeCurrent();
287 switch (result) {
288 case MakeCurrentResult::AlreadyCurrent:
289 return true;
290 case MakeCurrentResult::Failed:
291 mHaveNewSurface = true;
292 setSurface(nullptr);
293 return false;
294 case MakeCurrentResult::Succeeded:
295 mHaveNewSurface = true;
296 return true;
297 default:
298 LOG_ALWAYS_FATAL("unexpected result %d from IRenderPipeline::makeCurrent",
299 (int32_t)result);
300 }
301
302 return true;
303 }
304
wasSkipped(FrameInfo * info)305 static bool wasSkipped(FrameInfo* info) {
306 return info && ((*info)[FrameInfoIndex::Flags] & FrameInfoFlags::SkippedFrame);
307 }
308
isSwapChainStuffed()309 bool CanvasContext::isSwapChainStuffed() {
310 static const auto SLOW_THRESHOLD = 6_ms;
311
312 if (mSwapHistory.size() != mSwapHistory.capacity()) {
313 // We want at least 3 frames of history before attempting to
314 // guess if the queue is stuffed
315 return false;
316 }
317 nsecs_t frameInterval = mRenderThread.timeLord().frameIntervalNanos();
318 auto& swapA = mSwapHistory[0];
319
320 // Was there a happy queue & dequeue time? If so, don't
321 // consider it stuffed
322 if (swapA.dequeueDuration < SLOW_THRESHOLD && swapA.queueDuration < SLOW_THRESHOLD) {
323 return false;
324 }
325
326 for (size_t i = 1; i < mSwapHistory.size(); i++) {
327 auto& swapB = mSwapHistory[i];
328
329 // If there's a multi-frameInterval gap we effectively already dropped a frame,
330 // so consider the queue healthy.
331 if (std::abs(swapA.swapCompletedTime - swapB.swapCompletedTime) > frameInterval * 3) {
332 return false;
333 }
334
335 // Was there a happy queue & dequeue time? If so, don't
336 // consider it stuffed
337 if (swapB.dequeueDuration < SLOW_THRESHOLD && swapB.queueDuration < SLOW_THRESHOLD) {
338 return false;
339 }
340
341 swapA = swapB;
342 }
343
344 // All signs point to a stuffed swap chain
345 ATRACE_NAME("swap chain stuffed");
346 return true;
347 }
348
prepareTree(TreeInfo & info,int64_t * uiFrameInfo,int64_t syncQueued,RenderNode * target)349 void CanvasContext::prepareTree(TreeInfo& info, int64_t* uiFrameInfo, int64_t syncQueued,
350 RenderNode* target) {
351 mRenderThread.removeFrameCallback(this);
352
353 // If the previous frame was dropped we don't need to hold onto it, so
354 // just keep using the previous frame's structure instead
355 if (!wasSkipped(mCurrentFrameInfo)) {
356 mCurrentFrameInfo = mJankTracker.startFrame();
357 }
358
359 mCurrentFrameInfo->importUiThreadInfo(uiFrameInfo);
360 mCurrentFrameInfo->set(FrameInfoIndex::SyncQueued) = syncQueued;
361 mCurrentFrameInfo->markSyncStart();
362
363 info.damageAccumulator = &mDamageAccumulator;
364 info.layerUpdateQueue = &mLayerUpdateQueue;
365 info.damageGenerationId = mDamageId++;
366 info.out.canDrawThisFrame = true;
367
368 mAnimationContext->startFrame(info.mode);
369 for (const sp<RenderNode>& node : mRenderNodes) {
370 // Only the primary target node will be drawn full - all other nodes would get drawn in
371 // real time mode. In case of a window, the primary node is the window content and the other
372 // node(s) are non client / filler nodes.
373 info.mode = (node.get() == target ? TreeInfo::MODE_FULL : TreeInfo::MODE_RT_ONLY);
374 node->prepareTree(info);
375 GL_CHECKPOINT(MODERATE);
376 }
377 mAnimationContext->runRemainingAnimations(info);
378 GL_CHECKPOINT(MODERATE);
379
380 freePrefetchedLayers();
381 GL_CHECKPOINT(MODERATE);
382
383 mIsDirty = true;
384
385 if (CC_UNLIKELY(!hasSurface())) {
386 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
387 info.out.canDrawThisFrame = false;
388 return;
389 }
390
391 if (CC_LIKELY(mSwapHistory.size() && !Properties::forceDrawFrame)) {
392 nsecs_t latestVsync = mRenderThread.timeLord().latestVsync();
393 SwapHistory& lastSwap = mSwapHistory.back();
394 nsecs_t vsyncDelta = std::abs(lastSwap.vsyncTime - latestVsync);
395 // The slight fudge-factor is to deal with cases where
396 // the vsync was estimated due to being slow handling the signal.
397 // See the logic in TimeLord#computeFrameTimeNanos or in
398 // Choreographer.java for details on when this happens
399 if (vsyncDelta < 2_ms) {
400 // Already drew for this vsync pulse, UI draw request missed
401 // the deadline for RT animations
402 info.out.canDrawThisFrame = false;
403 }
404 } else {
405 info.out.canDrawThisFrame = true;
406 }
407
408 // TODO: Do we need to abort out if the backdrop is added but not ready? Should that even
409 // be an allowable combination?
410 if (mRenderNodes.size() > 2 && !mRenderNodes[1]->isRenderable()) {
411 info.out.canDrawThisFrame = false;
412 }
413
414 if (info.out.canDrawThisFrame) {
415 int err = mNativeSurface->reserveNext();
416 if (err != OK) {
417 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
418 info.out.canDrawThisFrame = false;
419 ALOGW("reserveNext failed, error = %d (%s)", err, strerror(-err));
420 if (err != TIMED_OUT) {
421 // A timed out surface can still recover, but assume others are permanently dead.
422 setSurface(nullptr);
423 return;
424 }
425 }
426 } else {
427 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
428 }
429
430 bool postedFrameCallback = false;
431 if (info.out.hasAnimations || !info.out.canDrawThisFrame) {
432 if (CC_UNLIKELY(!Properties::enableRTAnimations)) {
433 info.out.requiresUiRedraw = true;
434 }
435 if (!info.out.requiresUiRedraw) {
436 // If animationsNeedsRedraw is set don't bother posting for an RT anim
437 // as we will just end up fighting the UI thread.
438 mRenderThread.postFrameCallback(this);
439 postedFrameCallback = true;
440 }
441 }
442
443 if (!postedFrameCallback &&
444 info.out.animatedImageDelay != TreeInfo::Out::kNoAnimatedImageDelay) {
445 // Subtract the time of one frame so it can be displayed on time.
446 const nsecs_t kFrameTime = mRenderThread.timeLord().frameIntervalNanos();
447 if (info.out.animatedImageDelay <= kFrameTime) {
448 mRenderThread.postFrameCallback(this);
449 } else {
450 const auto delay = info.out.animatedImageDelay - kFrameTime;
451 int genId = mGenerationID;
452 mRenderThread.queue().postDelayed(delay, [this, genId]() {
453 if (mGenerationID == genId) {
454 mRenderThread.postFrameCallback(this);
455 }
456 });
457 }
458 }
459 }
460
stopDrawing()461 void CanvasContext::stopDrawing() {
462 cleanupResources();
463 mRenderThread.removeFrameCallback(this);
464 mAnimationContext->pauseAnimators();
465 mGenerationID++;
466 }
467
notifyFramePending()468 void CanvasContext::notifyFramePending() {
469 ATRACE_CALL();
470 mRenderThread.pushBackFrameCallback(this);
471 }
472
draw()473 nsecs_t CanvasContext::draw() {
474 if (auto grContext = getGrContext()) {
475 if (grContext->abandoned()) {
476 LOG_ALWAYS_FATAL("GrContext is abandoned/device lost at start of CanvasContext::draw");
477 return 0;
478 }
479 }
480 SkRect dirty;
481 mDamageAccumulator.finish(&dirty);
482
483 if (dirty.isEmpty() && Properties::skipEmptyFrames && !surfaceRequiresRedraw()) {
484 mCurrentFrameInfo->addFlag(FrameInfoFlags::SkippedFrame);
485 if (auto grContext = getGrContext()) {
486 // Submit to ensure that any texture uploads complete and Skia can
487 // free its staging buffers.
488 grContext->flushAndSubmit();
489 }
490
491 // Notify the callbacks, even if there's nothing to draw so they aren't waiting
492 // indefinitely
493 waitOnFences();
494 for (auto& func : mFrameCompleteCallbacks) {
495 std::invoke(func, mFrameNumber);
496 }
497 mFrameCompleteCallbacks.clear();
498 return 0;
499 }
500
501 ScopedActiveContext activeContext(this);
502 mCurrentFrameInfo->set(FrameInfoIndex::FrameInterval) =
503 mRenderThread.timeLord().frameIntervalNanos();
504
505 mCurrentFrameInfo->markIssueDrawCommandsStart();
506
507 Frame frame = mRenderPipeline->getFrame();
508 SkRect windowDirty = computeDirtyRect(frame, &dirty);
509
510 bool drew = mRenderPipeline->draw(frame, windowDirty, dirty, mLightGeometry, &mLayerUpdateQueue,
511 mContentDrawBounds, mOpaque, mLightInfo, mRenderNodes,
512 &(profiler()));
513
514 int64_t frameCompleteNr = getFrameNumber();
515
516 waitOnFences();
517
518 if (mNativeSurface) {
519 // TODO(b/165985262): measure performance impact
520 const auto vsyncId = mCurrentFrameInfo->get(FrameInfoIndex::FrameTimelineVsyncId);
521 if (vsyncId != UiFrameInfoBuilder::INVALID_VSYNC_ID) {
522 const auto inputEventId =
523 static_cast<int32_t>(mCurrentFrameInfo->get(FrameInfoIndex::InputEventId));
524 native_window_set_frame_timeline_info(mNativeSurface->getNativeWindow(), vsyncId,
525 inputEventId);
526 }
527 }
528
529 bool requireSwap = false;
530 int error = OK;
531 bool didSwap =
532 mRenderPipeline->swapBuffers(frame, drew, windowDirty, mCurrentFrameInfo, &requireSwap);
533
534 mIsDirty = false;
535
536 if (requireSwap) {
537 bool didDraw = true;
538 // Handle any swapchain errors
539 error = mNativeSurface->getAndClearError();
540 if (error == TIMED_OUT) {
541 // Try again
542 mRenderThread.postFrameCallback(this);
543 // But since this frame didn't happen, we need to mark full damage in the swap
544 // history
545 didDraw = false;
546
547 } else if (error != OK || !didSwap) {
548 // Unknown error, abandon the surface
549 setSurface(nullptr);
550 didDraw = false;
551 }
552
553 SwapHistory& swap = mSwapHistory.next();
554 if (didDraw) {
555 swap.damage = windowDirty;
556 } else {
557 float max = static_cast<float>(INT_MAX);
558 swap.damage = SkRect::MakeWH(max, max);
559 }
560 swap.swapCompletedTime = systemTime(SYSTEM_TIME_MONOTONIC);
561 swap.vsyncTime = mRenderThread.timeLord().latestVsync();
562 if (didDraw) {
563 nsecs_t dequeueStart =
564 ANativeWindow_getLastDequeueStartTime(mNativeSurface->getNativeWindow());
565 if (dequeueStart < mCurrentFrameInfo->get(FrameInfoIndex::SyncStart)) {
566 // Ignoring dequeue duration as it happened prior to frame render start
567 // and thus is not part of the frame.
568 swap.dequeueDuration = 0;
569 } else {
570 swap.dequeueDuration =
571 ANativeWindow_getLastDequeueDuration(mNativeSurface->getNativeWindow());
572 }
573 swap.queueDuration =
574 ANativeWindow_getLastQueueDuration(mNativeSurface->getNativeWindow());
575 } else {
576 swap.dequeueDuration = 0;
577 swap.queueDuration = 0;
578 }
579 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = swap.dequeueDuration;
580 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = swap.queueDuration;
581 mHaveNewSurface = false;
582 mFrameNumber = -1;
583 } else {
584 mCurrentFrameInfo->set(FrameInfoIndex::DequeueBufferDuration) = 0;
585 mCurrentFrameInfo->set(FrameInfoIndex::QueueBufferDuration) = 0;
586 }
587
588 mCurrentFrameInfo->markSwapBuffersCompleted();
589
590 #if LOG_FRAMETIME_MMA
591 float thisFrame = mCurrentFrameInfo->duration(FrameInfoIndex::IssueDrawCommandsStart,
592 FrameInfoIndex::FrameCompleted) /
593 NANOS_PER_MILLIS_F;
594 if (sFrameCount) {
595 sBenchMma = ((9 * sBenchMma) + thisFrame) / 10;
596 } else {
597 sBenchMma = thisFrame;
598 }
599 if (++sFrameCount == 10) {
600 sFrameCount = 1;
601 ALOGD("Average frame time: %.4f", sBenchMma);
602 }
603 #endif
604
605 if (didSwap) {
606 for (auto& func : mFrameCompleteCallbacks) {
607 std::invoke(func, frameCompleteNr);
608 }
609 mFrameCompleteCallbacks.clear();
610 }
611
612 if (requireSwap) {
613 if (mExpectSurfaceStats) {
614 reportMetricsWithPresentTime();
615 std::lock_guard lock(mLast4FrameInfosMutex);
616 std::pair<FrameInfo*, int64_t>& next = mLast4FrameInfos.next();
617 next.first = mCurrentFrameInfo;
618 next.second = frameCompleteNr;
619 } else {
620 mCurrentFrameInfo->markFrameCompleted();
621 mCurrentFrameInfo->set(FrameInfoIndex::GpuCompleted)
622 = mCurrentFrameInfo->get(FrameInfoIndex::FrameCompleted);
623 std::scoped_lock lock(mFrameMetricsReporterMutex);
624 mJankTracker.finishFrame(*mCurrentFrameInfo, mFrameMetricsReporter);
625 }
626 }
627
628 cleanupResources();
629 mRenderThread.cacheManager().onFrameCompleted();
630 return mCurrentFrameInfo->get(FrameInfoIndex::DequeueBufferDuration);
631 }
632
cleanupResources()633 void CanvasContext::cleanupResources() {
634 auto& tracker = mJankTracker.frames();
635 auto size = tracker.size();
636 auto capacity = tracker.capacity();
637 if (size == capacity) {
638 nsecs_t nowNanos = systemTime(SYSTEM_TIME_MONOTONIC);
639 nsecs_t frameCompleteNanos =
640 tracker[0].get(FrameInfoIndex::FrameCompleted);
641 nsecs_t frameDiffNanos = nowNanos - frameCompleteNanos;
642 nsecs_t cleanupMillis = ns2ms(std::max(frameDiffNanos, 10_s));
643 mRenderThread.cacheManager().performDeferredCleanup(cleanupMillis);
644 }
645 }
646
reportMetricsWithPresentTime()647 void CanvasContext::reportMetricsWithPresentTime() {
648 { // acquire lock
649 std::scoped_lock lock(mFrameMetricsReporterMutex);
650 if (mFrameMetricsReporter == nullptr) {
651 return;
652 }
653 } // release lock
654 if (mNativeSurface == nullptr) {
655 return;
656 }
657 ATRACE_CALL();
658 FrameInfo* forthBehind;
659 int64_t frameNumber;
660 { // acquire lock
661 std::scoped_lock lock(mLast4FrameInfosMutex);
662 if (mLast4FrameInfos.size() != mLast4FrameInfos.capacity()) {
663 // Not enough frames yet
664 return;
665 }
666 // Surface object keeps stats for the last 8 frames.
667 std::tie(forthBehind, frameNumber) = mLast4FrameInfos.front();
668 } // release lock
669
670 nsecs_t presentTime = 0;
671 native_window_get_frame_timestamps(
672 mNativeSurface->getNativeWindow(), frameNumber, nullptr /*outRequestedPresentTime*/,
673 nullptr /*outAcquireTime*/, nullptr /*outLatchTime*/,
674 nullptr /*outFirstRefreshStartTime*/, nullptr /*outLastRefreshStartTime*/,
675 nullptr /*outGpuCompositionDoneTime*/, &presentTime, nullptr /*outDequeueReadyTime*/,
676 nullptr /*outReleaseTime*/);
677
678 forthBehind->set(FrameInfoIndex::DisplayPresentTime) = presentTime;
679 { // acquire lock
680 std::scoped_lock lock(mFrameMetricsReporterMutex);
681 if (mFrameMetricsReporter != nullptr) {
682 mFrameMetricsReporter->reportFrameMetrics(forthBehind->data(), true /*hasPresentTime*/);
683 }
684 } // release lock
685 }
686
getFrameInfoFromLast4(uint64_t frameNumber)687 FrameInfo* CanvasContext::getFrameInfoFromLast4(uint64_t frameNumber) {
688 std::scoped_lock lock(mLast4FrameInfosMutex);
689 for (size_t i = 0; i < mLast4FrameInfos.size(); i++) {
690 if (mLast4FrameInfos[i].second == frameNumber) {
691 return mLast4FrameInfos[i].first;
692 }
693 }
694 return nullptr;
695 }
696
onSurfaceStatsAvailable(void * context,ASurfaceControl * control,ASurfaceControlStats * stats)697 void CanvasContext::onSurfaceStatsAvailable(void* context, ASurfaceControl* control,
698 ASurfaceControlStats* stats) {
699
700 CanvasContext* instance = static_cast<CanvasContext*>(context);
701
702 const ASurfaceControlFunctions& functions =
703 instance->mRenderThread.getASurfaceControlFunctions();
704
705 nsecs_t gpuCompleteTime = functions.getAcquireTimeFunc(stats);
706 uint64_t frameNumber = functions.getFrameNumberFunc(stats);
707
708 FrameInfo* frameInfo = instance->getFrameInfoFromLast4(frameNumber);
709
710 if (frameInfo != nullptr) {
711 frameInfo->set(FrameInfoIndex::FrameCompleted) = std::max(gpuCompleteTime,
712 frameInfo->get(FrameInfoIndex::SwapBuffersCompleted));
713 frameInfo->set(FrameInfoIndex::GpuCompleted) = gpuCompleteTime;
714 std::scoped_lock lock(instance->mFrameMetricsReporterMutex);
715 instance->mJankTracker.finishFrame(*frameInfo, instance->mFrameMetricsReporter);
716 }
717 }
718
719 // Called by choreographer to do an RT-driven animation
doFrame()720 void CanvasContext::doFrame() {
721 if (!mRenderPipeline->isSurfaceReady()) return;
722 prepareAndDraw(nullptr);
723 }
724
getNextFrameSize() const725 SkISize CanvasContext::getNextFrameSize() const {
726 static constexpr SkISize defaultFrameSize = {INT32_MAX, INT32_MAX};
727 if (mNativeSurface == nullptr) {
728 return defaultFrameSize;
729 }
730 ANativeWindow* anw = mNativeSurface->getNativeWindow();
731
732 SkISize size;
733 size.fWidth = ANativeWindow_getWidth(anw);
734 size.fHeight = ANativeWindow_getHeight(anw);
735 return size;
736 }
737
prepareAndDraw(RenderNode * node)738 void CanvasContext::prepareAndDraw(RenderNode* node) {
739 ATRACE_CALL();
740
741 nsecs_t vsync = mRenderThread.timeLord().computeFrameTimeNanos();
742 int64_t vsyncId = mRenderThread.timeLord().lastVsyncId();
743 int64_t frameDeadline = mRenderThread.timeLord().lastFrameDeadline();
744 int64_t frameInterval = mRenderThread.timeLord().frameIntervalNanos();
745 int64_t frameInfo[UI_THREAD_FRAME_INFO_SIZE];
746 UiFrameInfoBuilder(frameInfo)
747 .addFlag(FrameInfoFlags::RTAnimation)
748 .setVsync(vsync, vsync, vsyncId, frameDeadline, frameInterval);
749
750 TreeInfo info(TreeInfo::MODE_RT_ONLY, *this);
751 prepareTree(info, frameInfo, systemTime(SYSTEM_TIME_MONOTONIC), node);
752 if (info.out.canDrawThisFrame) {
753 draw();
754 } else {
755 // wait on fences so tasks don't overlap next frame
756 waitOnFences();
757 }
758 }
759
markLayerInUse(RenderNode * node)760 void CanvasContext::markLayerInUse(RenderNode* node) {
761 if (mPrefetchedLayers.erase(node)) {
762 node->decStrong(nullptr);
763 }
764 }
765
freePrefetchedLayers()766 void CanvasContext::freePrefetchedLayers() {
767 if (mPrefetchedLayers.size()) {
768 for (auto& node : mPrefetchedLayers) {
769 ALOGW("Incorrectly called buildLayer on View: %s, destroying layer...",
770 node->getName());
771 node->destroyLayers();
772 node->decStrong(nullptr);
773 }
774 mPrefetchedLayers.clear();
775 }
776 }
777
buildLayer(RenderNode * node)778 void CanvasContext::buildLayer(RenderNode* node) {
779 ATRACE_CALL();
780 if (!mRenderPipeline->isContextReady()) return;
781
782 // buildLayer() will leave the tree in an unknown state, so we must stop drawing
783 stopDrawing();
784
785 TreeInfo info(TreeInfo::MODE_FULL, *this);
786 info.damageAccumulator = &mDamageAccumulator;
787 info.layerUpdateQueue = &mLayerUpdateQueue;
788 info.runAnimations = false;
789 node->prepareTree(info);
790 SkRect ignore;
791 mDamageAccumulator.finish(&ignore);
792 // Tickle the GENERIC property on node to mark it as dirty for damaging
793 // purposes when the frame is actually drawn
794 node->setPropertyFieldsDirty(RenderNode::GENERIC);
795
796 mRenderPipeline->renderLayers(mLightGeometry, &mLayerUpdateQueue, mOpaque, mLightInfo);
797
798 node->incStrong(nullptr);
799 mPrefetchedLayers.insert(node);
800 }
801
destroyHardwareResources()802 void CanvasContext::destroyHardwareResources() {
803 stopDrawing();
804 if (mRenderPipeline->isContextReady()) {
805 freePrefetchedLayers();
806 for (const sp<RenderNode>& node : mRenderNodes) {
807 node->destroyHardwareResources();
808 }
809 mRenderPipeline->onDestroyHardwareResources();
810 }
811 }
812
trimMemory(RenderThread & thread,int level)813 void CanvasContext::trimMemory(RenderThread& thread, int level) {
814 ATRACE_CALL();
815 if (!thread.getGrContext()) return;
816 ATRACE_CALL();
817 if (level >= TRIM_MEMORY_COMPLETE) {
818 thread.cacheManager().trimMemory(CacheManager::TrimMemoryMode::Complete);
819 thread.destroyRenderingContext();
820 } else if (level >= TRIM_MEMORY_UI_HIDDEN) {
821 thread.cacheManager().trimMemory(CacheManager::TrimMemoryMode::UiHidden);
822 }
823 }
824
createTextureLayer()825 DeferredLayerUpdater* CanvasContext::createTextureLayer() {
826 return mRenderPipeline->createTextureLayer();
827 }
828
dumpFrames(int fd)829 void CanvasContext::dumpFrames(int fd) {
830 mJankTracker.dumpStats(fd);
831 mJankTracker.dumpFrames(fd);
832 }
833
resetFrameStats()834 void CanvasContext::resetFrameStats() {
835 mJankTracker.reset();
836 }
837
setName(const std::string && name)838 void CanvasContext::setName(const std::string&& name) {
839 mJankTracker.setDescription(JankTrackerType::Window, std::move(name));
840 }
841
waitOnFences()842 void CanvasContext::waitOnFences() {
843 if (mFrameFences.size()) {
844 ATRACE_CALL();
845 for (auto& fence : mFrameFences) {
846 fence.get();
847 }
848 mFrameFences.clear();
849 }
850 }
851
enqueueFrameWork(std::function<void ()> && func)852 void CanvasContext::enqueueFrameWork(std::function<void()>&& func) {
853 mFrameFences.push_back(CommonPool::async(std::move(func)));
854 }
855
getFrameNumber()856 int64_t CanvasContext::getFrameNumber() {
857 // mFrameNumber is reset to -1 when the surface changes or we swap buffers
858 if (mFrameNumber == -1 && mNativeSurface.get()) {
859 mFrameNumber = ANativeWindow_getNextFrameId(mNativeSurface->getNativeWindow());
860 }
861 return mFrameNumber;
862 }
863
surfaceRequiresRedraw()864 bool CanvasContext::surfaceRequiresRedraw() {
865 if (!mNativeSurface) return false;
866 if (mHaveNewSurface) return true;
867
868 ANativeWindow* anw = mNativeSurface->getNativeWindow();
869 const int width = ANativeWindow_getWidth(anw);
870 const int height = ANativeWindow_getHeight(anw);
871
872 return width != mLastFrameWidth || height != mLastFrameHeight;
873 }
874
computeDirtyRect(const Frame & frame,SkRect * dirty)875 SkRect CanvasContext::computeDirtyRect(const Frame& frame, SkRect* dirty) {
876 if (frame.width() != mLastFrameWidth || frame.height() != mLastFrameHeight) {
877 // can't rely on prior content of window if viewport size changes
878 dirty->setEmpty();
879 mLastFrameWidth = frame.width();
880 mLastFrameHeight = frame.height();
881 } else if (mHaveNewSurface || frame.bufferAge() == 0) {
882 // New surface needs a full draw
883 dirty->setEmpty();
884 } else {
885 if (!dirty->isEmpty() && !dirty->intersect(SkRect::MakeIWH(frame.width(), frame.height()))) {
886 ALOGW("Dirty " RECT_STRING " doesn't intersect with 0 0 %d %d ?", SK_RECT_ARGS(*dirty),
887 frame.width(), frame.height());
888 dirty->setEmpty();
889 }
890 profiler().unionDirty(dirty);
891 }
892
893 if (dirty->isEmpty()) {
894 dirty->setIWH(frame.width(), frame.height());
895 }
896
897 // At this point dirty is the area of the window to update. However,
898 // the area of the frame we need to repaint is potentially different, so
899 // stash the screen area for later
900 SkRect windowDirty(*dirty);
901
902 // If the buffer age is 0 we do a full-screen repaint (handled above)
903 // If the buffer age is 1 the buffer contents are the same as they were
904 // last frame so there's nothing to union() against
905 // Therefore we only care about the > 1 case.
906 if (frame.bufferAge() > 1) {
907 if (frame.bufferAge() > (int)mSwapHistory.size()) {
908 // We don't have enough history to handle this old of a buffer
909 // Just do a full-draw
910 dirty->setIWH(frame.width(), frame.height());
911 } else {
912 // At this point we haven't yet added the latest frame
913 // to the damage history (happens below)
914 // So we need to damage
915 for (int i = mSwapHistory.size() - 1;
916 i > ((int)mSwapHistory.size()) - frame.bufferAge(); i--) {
917 dirty->join(mSwapHistory[i].damage);
918 }
919 }
920 }
921
922 return windowDirty;
923 }
924
getActiveContext()925 CanvasContext* CanvasContext::getActiveContext() {
926 return ScopedActiveContext::getActiveContext();
927 }
928
mergeTransaction(ASurfaceTransaction * transaction,ASurfaceControl * control)929 bool CanvasContext::mergeTransaction(ASurfaceTransaction* transaction, ASurfaceControl* control) {
930 if (!mASurfaceTransactionCallback) return false;
931 return std::invoke(mASurfaceTransactionCallback, reinterpret_cast<int64_t>(transaction),
932 reinterpret_cast<int64_t>(control), getFrameNumber());
933 }
934
prepareSurfaceControlForWebview()935 void CanvasContext::prepareSurfaceControlForWebview() {
936 if (mPrepareSurfaceControlForWebviewCallback) {
937 std::invoke(mPrepareSurfaceControlForWebviewCallback);
938 }
939 }
940
941 } /* namespace renderthread */
942 } /* namespace uirenderer */
943 } /* namespace android */
944