1 /*
2 * Copyright 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #undef LOG_TAG
18 #define LOG_TAG "TimeStats"
19 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
20
21 #include <android-base/stringprintf.h>
22 #include <log/log.h>
23 #include <timestatsatomsproto/TimeStatsAtomsProtoHeader.h>
24 #include <utils/String8.h>
25 #include <utils/Timers.h>
26 #include <utils/Trace.h>
27
28 #include <algorithm>
29 #include <chrono>
30 #include <cmath>
31 #include <unordered_map>
32
33 #include "TimeStats.h"
34 #include "timestatsproto/TimeStatsHelper.h"
35
36 namespace android {
37
38 namespace impl {
39
40 namespace {
41
histogramToProto(const std::unordered_map<int32_t,int32_t> & histogram,size_t maxPulledHistogramBuckets)42 FrameTimingHistogram histogramToProto(const std::unordered_map<int32_t, int32_t>& histogram,
43 size_t maxPulledHistogramBuckets) {
44 auto buckets = std::vector<std::pair<int32_t, int32_t>>(histogram.begin(), histogram.end());
45 std::sort(buckets.begin(), buckets.end(),
46 [](std::pair<int32_t, int32_t>& left, std::pair<int32_t, int32_t>& right) {
47 return left.second > right.second;
48 });
49
50 FrameTimingHistogram histogramProto;
51 int histogramSize = 0;
52 for (const auto& bucket : buckets) {
53 if (++histogramSize > maxPulledHistogramBuckets) {
54 break;
55 }
56 histogramProto.add_time_millis_buckets((int32_t)bucket.first);
57 histogramProto.add_frame_counts((int64_t)bucket.second);
58 }
59 return histogramProto;
60 }
61
gameModeToProto(GameMode gameMode)62 SurfaceflingerStatsLayerInfo_GameMode gameModeToProto(GameMode gameMode) {
63 switch (gameMode) {
64 case GameMode::Unsupported:
65 return SurfaceflingerStatsLayerInfo::GAME_MODE_UNSUPPORTED;
66 case GameMode::Standard:
67 return SurfaceflingerStatsLayerInfo::GAME_MODE_STANDARD;
68 case GameMode::Performance:
69 return SurfaceflingerStatsLayerInfo::GAME_MODE_PERFORMANCE;
70 case GameMode::Battery:
71 return SurfaceflingerStatsLayerInfo::GAME_MODE_BATTERY;
72 case GameMode::Custom:
73 return SurfaceflingerStatsLayerInfo::GAME_MODE_CUSTOM;
74 default:
75 return SurfaceflingerStatsLayerInfo::GAME_MODE_UNSPECIFIED;
76 }
77 }
78
frameRateVoteToProto(const TimeStats::SetFrameRateVote & setFrameRateVote)79 SurfaceflingerStatsLayerInfo_SetFrameRateVote frameRateVoteToProto(
80 const TimeStats::SetFrameRateVote& setFrameRateVote) {
81 using FrameRateCompatibilityEnum =
82 SurfaceflingerStatsLayerInfo::SetFrameRateVote::FrameRateCompatibility;
83 using SeamlessnessEnum = SurfaceflingerStatsLayerInfo::SetFrameRateVote::Seamlessness;
84
85 SurfaceflingerStatsLayerInfo_SetFrameRateVote proto;
86 proto.set_frame_rate(setFrameRateVote.frameRate);
87 proto.set_frame_rate_compatibility(
88 static_cast<FrameRateCompatibilityEnum>(setFrameRateVote.frameRateCompatibility));
89 proto.set_seamlessness(static_cast<SeamlessnessEnum>(setFrameRateVote.seamlessness));
90 return proto;
91 }
92 } // namespace
93
populateGlobalAtom(std::vector<uint8_t> * pulledData)94 bool TimeStats::populateGlobalAtom(std::vector<uint8_t>* pulledData) {
95 std::lock_guard<std::mutex> lock(mMutex);
96
97 if (mTimeStats.statsStartLegacy == 0) {
98 return false;
99 }
100 flushPowerTimeLocked();
101 SurfaceflingerStatsGlobalInfoWrapper atomList;
102 for (const auto& globalSlice : mTimeStats.stats) {
103 SurfaceflingerStatsGlobalInfo* atom = atomList.add_atom();
104 atom->set_total_frames(mTimeStats.totalFramesLegacy);
105 atom->set_missed_frames(mTimeStats.missedFramesLegacy);
106 atom->set_client_composition_frames(mTimeStats.clientCompositionFramesLegacy);
107 atom->set_display_on_millis(mTimeStats.displayOnTimeLegacy);
108 atom->set_animation_millis(mTimeStats.presentToPresentLegacy.totalTime());
109 atom->set_event_connection_count(mTimeStats.displayEventConnectionsCountLegacy);
110 *atom->mutable_frame_duration() =
111 histogramToProto(mTimeStats.frameDurationLegacy.hist, mMaxPulledHistogramBuckets);
112 *atom->mutable_render_engine_timing() =
113 histogramToProto(mTimeStats.renderEngineTimingLegacy.hist,
114 mMaxPulledHistogramBuckets);
115 atom->set_total_timeline_frames(globalSlice.second.jankPayload.totalFrames);
116 atom->set_total_janky_frames(globalSlice.second.jankPayload.totalJankyFrames);
117 atom->set_total_janky_frames_with_long_cpu(globalSlice.second.jankPayload.totalSFLongCpu);
118 atom->set_total_janky_frames_with_long_gpu(globalSlice.second.jankPayload.totalSFLongGpu);
119 atom->set_total_janky_frames_sf_unattributed(
120 globalSlice.second.jankPayload.totalSFUnattributed);
121 atom->set_total_janky_frames_app_unattributed(
122 globalSlice.second.jankPayload.totalAppUnattributed);
123 atom->set_total_janky_frames_sf_scheduling(
124 globalSlice.second.jankPayload.totalSFScheduling);
125 atom->set_total_jank_frames_sf_prediction_error(
126 globalSlice.second.jankPayload.totalSFPredictionError);
127 atom->set_total_jank_frames_app_buffer_stuffing(
128 globalSlice.second.jankPayload.totalAppBufferStuffing);
129 atom->set_display_refresh_rate_bucket(globalSlice.first.displayRefreshRateBucket);
130 *atom->mutable_sf_deadline_misses() =
131 histogramToProto(globalSlice.second.displayDeadlineDeltas.hist,
132 mMaxPulledHistogramBuckets);
133 *atom->mutable_sf_prediction_errors() =
134 histogramToProto(globalSlice.second.displayPresentDeltas.hist,
135 mMaxPulledHistogramBuckets);
136 atom->set_render_rate_bucket(globalSlice.first.renderRateBucket);
137 }
138
139 // Always clear data.
140 clearGlobalLocked();
141
142 pulledData->resize(atomList.ByteSizeLong());
143 return atomList.SerializeToArray(pulledData->data(), atomList.ByteSizeLong());
144 }
145
populateLayerAtom(std::vector<uint8_t> * pulledData)146 bool TimeStats::populateLayerAtom(std::vector<uint8_t>* pulledData) {
147 std::lock_guard<std::mutex> lock(mMutex);
148
149 std::vector<TimeStatsHelper::TimeStatsLayer*> dumpStats;
150 uint32_t numLayers = 0;
151 for (const auto& globalSlice : mTimeStats.stats) {
152 numLayers += globalSlice.second.stats.size();
153 }
154
155 dumpStats.reserve(numLayers);
156
157 for (auto& globalSlice : mTimeStats.stats) {
158 for (auto& layerSlice : globalSlice.second.stats) {
159 dumpStats.push_back(&layerSlice.second);
160 }
161 }
162
163 std::sort(dumpStats.begin(), dumpStats.end(),
164 [](TimeStatsHelper::TimeStatsLayer const* l,
165 TimeStatsHelper::TimeStatsLayer const* r) {
166 return l->totalFrames > r->totalFrames;
167 });
168
169 if (mMaxPulledLayers < dumpStats.size()) {
170 dumpStats.resize(mMaxPulledLayers);
171 }
172
173 SurfaceflingerStatsLayerInfoWrapper atomList;
174 for (auto& layer : dumpStats) {
175 SurfaceflingerStatsLayerInfo* atom = atomList.add_atom();
176 atom->set_layer_name(layer->layerName);
177 atom->set_total_frames(layer->totalFrames);
178 atom->set_dropped_frames(layer->droppedFrames);
179 const auto& present2PresentHist = layer->deltas.find("present2present");
180 if (present2PresentHist != layer->deltas.cend()) {
181 *atom->mutable_present_to_present() =
182 histogramToProto(present2PresentHist->second.hist, mMaxPulledHistogramBuckets);
183 }
184 const auto& present2PresentDeltaHist = layer->deltas.find("present2presentDelta");
185 if (present2PresentDeltaHist != layer->deltas.cend()) {
186 *atom->mutable_present_to_present_delta() =
187 histogramToProto(present2PresentDeltaHist->second.hist,
188 mMaxPulledHistogramBuckets);
189 }
190 const auto& post2presentHist = layer->deltas.find("post2present");
191 if (post2presentHist != layer->deltas.cend()) {
192 *atom->mutable_post_to_present() =
193 histogramToProto(post2presentHist->second.hist, mMaxPulledHistogramBuckets);
194 }
195 const auto& acquire2presentHist = layer->deltas.find("acquire2present");
196 if (acquire2presentHist != layer->deltas.cend()) {
197 *atom->mutable_acquire_to_present() =
198 histogramToProto(acquire2presentHist->second.hist, mMaxPulledHistogramBuckets);
199 }
200 const auto& latch2presentHist = layer->deltas.find("latch2present");
201 if (latch2presentHist != layer->deltas.cend()) {
202 *atom->mutable_latch_to_present() =
203 histogramToProto(latch2presentHist->second.hist, mMaxPulledHistogramBuckets);
204 }
205 const auto& desired2presentHist = layer->deltas.find("desired2present");
206 if (desired2presentHist != layer->deltas.cend()) {
207 *atom->mutable_desired_to_present() =
208 histogramToProto(desired2presentHist->second.hist, mMaxPulledHistogramBuckets);
209 }
210 const auto& post2acquireHist = layer->deltas.find("post2acquire");
211 if (post2acquireHist != layer->deltas.cend()) {
212 *atom->mutable_post_to_acquire() =
213 histogramToProto(post2acquireHist->second.hist, mMaxPulledHistogramBuckets);
214 }
215
216 atom->set_late_acquire_frames(layer->lateAcquireFrames);
217 atom->set_bad_desired_present_frames(layer->badDesiredPresentFrames);
218 atom->set_uid(layer->uid);
219 atom->set_total_timeline_frames(layer->jankPayload.totalFrames);
220 atom->set_total_janky_frames(layer->jankPayload.totalJankyFrames);
221 atom->set_total_janky_frames_with_long_cpu(layer->jankPayload.totalSFLongCpu);
222 atom->set_total_janky_frames_with_long_gpu(layer->jankPayload.totalSFLongGpu);
223 atom->set_total_janky_frames_sf_unattributed(layer->jankPayload.totalSFUnattributed);
224 atom->set_total_janky_frames_app_unattributed(layer->jankPayload.totalAppUnattributed);
225 atom->set_total_janky_frames_sf_scheduling(layer->jankPayload.totalSFScheduling);
226 atom->set_total_jank_frames_sf_prediction_error(layer->jankPayload.totalSFPredictionError);
227 atom->set_total_jank_frames_app_buffer_stuffing(layer->jankPayload.totalAppBufferStuffing);
228 atom->set_display_refresh_rate_bucket(layer->displayRefreshRateBucket);
229 atom->set_render_rate_bucket(layer->renderRateBucket);
230 *atom->mutable_set_frame_rate_vote() = frameRateVoteToProto(layer->setFrameRateVote);
231 *atom->mutable_app_deadline_misses() =
232 histogramToProto(layer->deltas["appDeadlineDeltas"].hist,
233 mMaxPulledHistogramBuckets);
234 atom->set_game_mode(gameModeToProto(layer->gameMode));
235 }
236
237 // Always clear data.
238 clearLayersLocked();
239
240 pulledData->resize(atomList.ByteSizeLong());
241 return atomList.SerializeToArray(pulledData->data(), atomList.ByteSizeLong());
242 }
243
TimeStats()244 TimeStats::TimeStats() : TimeStats(std::nullopt, std::nullopt) {}
245
TimeStats(std::optional<size_t> maxPulledLayers,std::optional<size_t> maxPulledHistogramBuckets)246 TimeStats::TimeStats(std::optional<size_t> maxPulledLayers,
247 std::optional<size_t> maxPulledHistogramBuckets) {
248 if (maxPulledLayers) {
249 mMaxPulledLayers = *maxPulledLayers;
250 }
251
252 if (maxPulledHistogramBuckets) {
253 mMaxPulledHistogramBuckets = *maxPulledHistogramBuckets;
254 }
255 }
256
onPullAtom(const int atomId,std::vector<uint8_t> * pulledData)257 bool TimeStats::onPullAtom(const int atomId, std::vector<uint8_t>* pulledData) {
258 bool success = false;
259 if (atomId == 10062) { // SURFACEFLINGER_STATS_GLOBAL_INFO
260 success = populateGlobalAtom(pulledData);
261 } else if (atomId == 10063) { // SURFACEFLINGER_STATS_LAYER_INFO
262 success = populateLayerAtom(pulledData);
263 }
264
265 // Enable timestats now. The first full pull for a given build is expected to
266 // have empty or very little stats, as stats are first enabled after the
267 // first pull is completed for either the global or layer stats.
268 enable();
269 return success;
270 }
271
parseArgs(bool asProto,const Vector<String16> & args,std::string & result)272 void TimeStats::parseArgs(bool asProto, const Vector<String16>& args, std::string& result) {
273 ATRACE_CALL();
274
275 std::unordered_map<std::string, int32_t> argsMap;
276 for (size_t index = 0; index < args.size(); ++index) {
277 argsMap[std::string(String8(args[index]).c_str())] = index;
278 }
279
280 if (argsMap.count("-disable")) {
281 disable();
282 }
283
284 if (argsMap.count("-dump")) {
285 std::optional<uint32_t> maxLayers = std::nullopt;
286 auto iter = argsMap.find("-maxlayers");
287 if (iter != argsMap.end() && iter->second + 1 < static_cast<int32_t>(args.size())) {
288 int64_t value = strtol(String8(args[iter->second + 1]).c_str(), nullptr, 10);
289 value = std::clamp(value, int64_t(0), int64_t(UINT32_MAX));
290 maxLayers = static_cast<uint32_t>(value);
291 }
292
293 dump(asProto, maxLayers, result);
294 }
295
296 if (argsMap.count("-clear")) {
297 clearAll();
298 }
299
300 if (argsMap.count("-enable")) {
301 enable();
302 }
303 }
304
miniDump()305 std::string TimeStats::miniDump() {
306 ATRACE_CALL();
307
308 std::string result = "TimeStats miniDump:\n";
309 std::lock_guard<std::mutex> lock(mMutex);
310 android::base::StringAppendF(&result, "Number of layers currently being tracked is %zu\n",
311 mTimeStatsTracker.size());
312 android::base::StringAppendF(&result, "Number of layers in the stats pool is %zu\n",
313 mTimeStats.stats.size());
314 return result;
315 }
316
incrementTotalFrames()317 void TimeStats::incrementTotalFrames() {
318 if (!mEnabled.load()) return;
319
320 ATRACE_CALL();
321
322 std::lock_guard<std::mutex> lock(mMutex);
323 mTimeStats.totalFramesLegacy++;
324 }
325
incrementMissedFrames()326 void TimeStats::incrementMissedFrames() {
327 if (!mEnabled.load()) return;
328
329 ATRACE_CALL();
330
331 std::lock_guard<std::mutex> lock(mMutex);
332 mTimeStats.missedFramesLegacy++;
333 }
334
pushCompositionStrategyState(const TimeStats::ClientCompositionRecord & record)335 void TimeStats::pushCompositionStrategyState(const TimeStats::ClientCompositionRecord& record) {
336 if (!mEnabled.load() || !record.hasInterestingData()) {
337 return;
338 }
339
340 ATRACE_CALL();
341
342 std::lock_guard<std::mutex> lock(mMutex);
343 if (record.changed) mTimeStats.compositionStrategyChangesLegacy++;
344 if (record.hadClientComposition) mTimeStats.clientCompositionFramesLegacy++;
345 if (record.reused) mTimeStats.clientCompositionReusedFramesLegacy++;
346 if (record.predicted) mTimeStats.compositionStrategyPredictedLegacy++;
347 if (record.predictionSucceeded) mTimeStats.compositionStrategyPredictionSucceededLegacy++;
348 }
349
incrementRefreshRateSwitches()350 void TimeStats::incrementRefreshRateSwitches() {
351 if (!mEnabled.load()) return;
352
353 ATRACE_CALL();
354
355 std::lock_guard<std::mutex> lock(mMutex);
356 mTimeStats.refreshRateSwitchesLegacy++;
357 }
358
recordDisplayEventConnectionCount(int32_t count)359 void TimeStats::recordDisplayEventConnectionCount(int32_t count) {
360 if (!mEnabled.load()) return;
361
362 ATRACE_CALL();
363
364 std::lock_guard<std::mutex> lock(mMutex);
365 mTimeStats.displayEventConnectionsCountLegacy =
366 std::max(mTimeStats.displayEventConnectionsCountLegacy, count);
367 }
368
toMs(nsecs_t nanos)369 static int32_t toMs(nsecs_t nanos) {
370 int64_t millis =
371 std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::nanoseconds(nanos))
372 .count();
373 millis = std::clamp(millis, int64_t(INT32_MIN), int64_t(INT32_MAX));
374 return static_cast<int32_t>(millis);
375 }
376
msBetween(nsecs_t start,nsecs_t end)377 static int32_t msBetween(nsecs_t start, nsecs_t end) {
378 return toMs(end - start);
379 }
380
recordFrameDuration(nsecs_t startTime,nsecs_t endTime)381 void TimeStats::recordFrameDuration(nsecs_t startTime, nsecs_t endTime) {
382 if (!mEnabled.load()) return;
383
384 std::lock_guard<std::mutex> lock(mMutex);
385 if (mPowerTime.powerMode == PowerMode::ON) {
386 mTimeStats.frameDurationLegacy.insert(msBetween(startTime, endTime));
387 }
388 }
389
recordRenderEngineDuration(nsecs_t startTime,nsecs_t endTime)390 void TimeStats::recordRenderEngineDuration(nsecs_t startTime, nsecs_t endTime) {
391 if (!mEnabled.load()) return;
392
393 std::lock_guard<std::mutex> lock(mMutex);
394 if (mGlobalRecord.renderEngineDurations.size() == MAX_NUM_TIME_RECORDS) {
395 ALOGE("RenderEngineTimes are already at its maximum size[%zu]", MAX_NUM_TIME_RECORDS);
396 mGlobalRecord.renderEngineDurations.pop_front();
397 }
398 mGlobalRecord.renderEngineDurations.push_back({startTime, endTime});
399 }
400
recordRenderEngineDuration(nsecs_t startTime,const std::shared_ptr<FenceTime> & endTime)401 void TimeStats::recordRenderEngineDuration(nsecs_t startTime,
402 const std::shared_ptr<FenceTime>& endTime) {
403 if (!mEnabled.load()) return;
404
405 std::lock_guard<std::mutex> lock(mMutex);
406 if (mGlobalRecord.renderEngineDurations.size() == MAX_NUM_TIME_RECORDS) {
407 ALOGE("RenderEngineTimes are already at its maximum size[%zu]", MAX_NUM_TIME_RECORDS);
408 mGlobalRecord.renderEngineDurations.pop_front();
409 }
410 mGlobalRecord.renderEngineDurations.push_back({startTime, endTime});
411 }
412
recordReadyLocked(int32_t layerId,TimeRecord * timeRecord)413 bool TimeStats::recordReadyLocked(int32_t layerId, TimeRecord* timeRecord) {
414 if (!timeRecord->ready) {
415 ALOGV("[%d]-[%" PRIu64 "]-presentFence is still not received", layerId,
416 timeRecord->frameTime.frameNumber);
417 return false;
418 }
419
420 if (timeRecord->acquireFence != nullptr) {
421 if (timeRecord->acquireFence->getSignalTime() == Fence::SIGNAL_TIME_PENDING) {
422 return false;
423 }
424 if (timeRecord->acquireFence->getSignalTime() != Fence::SIGNAL_TIME_INVALID) {
425 timeRecord->frameTime.acquireTime = timeRecord->acquireFence->getSignalTime();
426 timeRecord->acquireFence = nullptr;
427 } else {
428 ALOGV("[%d]-[%" PRIu64 "]-acquireFence signal time is invalid", layerId,
429 timeRecord->frameTime.frameNumber);
430 }
431 }
432
433 if (timeRecord->presentFence != nullptr) {
434 if (timeRecord->presentFence->getSignalTime() == Fence::SIGNAL_TIME_PENDING) {
435 return false;
436 }
437 if (timeRecord->presentFence->getSignalTime() != Fence::SIGNAL_TIME_INVALID) {
438 timeRecord->frameTime.presentTime = timeRecord->presentFence->getSignalTime();
439 timeRecord->presentFence = nullptr;
440 } else {
441 ALOGV("[%d]-[%" PRIu64 "]-presentFence signal time invalid", layerId,
442 timeRecord->frameTime.frameNumber);
443 }
444 }
445
446 return true;
447 }
448
clampToNearestBucket(Fps fps,size_t bucketWidth)449 static int32_t clampToNearestBucket(Fps fps, size_t bucketWidth) {
450 return std::round(fps.getValue() / bucketWidth) * bucketWidth;
451 }
452
flushAvailableRecordsToStatsLocked(int32_t layerId,Fps displayRefreshRate,std::optional<Fps> renderRate,SetFrameRateVote frameRateVote,GameMode gameMode)453 void TimeStats::flushAvailableRecordsToStatsLocked(int32_t layerId, Fps displayRefreshRate,
454 std::optional<Fps> renderRate,
455 SetFrameRateVote frameRateVote,
456 GameMode gameMode) {
457 ATRACE_CALL();
458 ALOGV("[%d]-flushAvailableRecordsToStatsLocked", layerId);
459
460 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
461 TimeRecord& prevTimeRecord = layerRecord.prevTimeRecord;
462 std::optional<int32_t>& prevPresentToPresentMs = layerRecord.prevPresentToPresentMs;
463 std::deque<TimeRecord>& timeRecords = layerRecord.timeRecords;
464 const int32_t refreshRateBucket =
465 clampToNearestBucket(displayRefreshRate, REFRESH_RATE_BUCKET_WIDTH);
466 const int32_t renderRateBucket =
467 clampToNearestBucket(renderRate ? *renderRate : displayRefreshRate,
468 RENDER_RATE_BUCKET_WIDTH);
469 while (!timeRecords.empty()) {
470 if (!recordReadyLocked(layerId, &timeRecords[0])) break;
471 ALOGV("[%d]-[%" PRIu64 "]-presentFenceTime[%" PRId64 "]", layerId,
472 timeRecords[0].frameTime.frameNumber, timeRecords[0].frameTime.presentTime);
473
474 if (prevTimeRecord.ready) {
475 uid_t uid = layerRecord.uid;
476 const std::string& layerName = layerRecord.layerName;
477 TimeStatsHelper::TimelineStatsKey timelineKey = {refreshRateBucket, renderRateBucket};
478 if (!mTimeStats.stats.count(timelineKey)) {
479 mTimeStats.stats[timelineKey].key = timelineKey;
480 }
481
482 TimeStatsHelper::TimelineStats& displayStats = mTimeStats.stats[timelineKey];
483
484 TimeStatsHelper::LayerStatsKey layerKey = {uid, layerName, gameMode};
485 if (!displayStats.stats.count(layerKey)) {
486 displayStats.stats[layerKey].displayRefreshRateBucket = refreshRateBucket;
487 displayStats.stats[layerKey].renderRateBucket = renderRateBucket;
488 displayStats.stats[layerKey].uid = uid;
489 displayStats.stats[layerKey].layerName = layerName;
490 displayStats.stats[layerKey].gameMode = gameMode;
491 }
492 if (frameRateVote.frameRate > 0.0f) {
493 displayStats.stats[layerKey].setFrameRateVote = frameRateVote;
494 }
495 TimeStatsHelper::TimeStatsLayer& timeStatsLayer = displayStats.stats[layerKey];
496 timeStatsLayer.totalFrames++;
497 timeStatsLayer.droppedFrames += layerRecord.droppedFrames;
498 timeStatsLayer.lateAcquireFrames += layerRecord.lateAcquireFrames;
499 timeStatsLayer.badDesiredPresentFrames += layerRecord.badDesiredPresentFrames;
500
501 layerRecord.droppedFrames = 0;
502 layerRecord.lateAcquireFrames = 0;
503 layerRecord.badDesiredPresentFrames = 0;
504
505 const int32_t postToAcquireMs = msBetween(timeRecords[0].frameTime.postTime,
506 timeRecords[0].frameTime.acquireTime);
507 ALOGV("[%d]-[%" PRIu64 "]-post2acquire[%d]", layerId,
508 timeRecords[0].frameTime.frameNumber, postToAcquireMs);
509 timeStatsLayer.deltas["post2acquire"].insert(postToAcquireMs);
510
511 const int32_t postToPresentMs = msBetween(timeRecords[0].frameTime.postTime,
512 timeRecords[0].frameTime.presentTime);
513 ALOGV("[%d]-[%" PRIu64 "]-post2present[%d]", layerId,
514 timeRecords[0].frameTime.frameNumber, postToPresentMs);
515 timeStatsLayer.deltas["post2present"].insert(postToPresentMs);
516
517 const int32_t acquireToPresentMs = msBetween(timeRecords[0].frameTime.acquireTime,
518 timeRecords[0].frameTime.presentTime);
519 ALOGV("[%d]-[%" PRIu64 "]-acquire2present[%d]", layerId,
520 timeRecords[0].frameTime.frameNumber, acquireToPresentMs);
521 timeStatsLayer.deltas["acquire2present"].insert(acquireToPresentMs);
522
523 const int32_t latchToPresentMs = msBetween(timeRecords[0].frameTime.latchTime,
524 timeRecords[0].frameTime.presentTime);
525 ALOGV("[%d]-[%" PRIu64 "]-latch2present[%d]", layerId,
526 timeRecords[0].frameTime.frameNumber, latchToPresentMs);
527 timeStatsLayer.deltas["latch2present"].insert(latchToPresentMs);
528
529 const int32_t desiredToPresentMs = msBetween(timeRecords[0].frameTime.desiredTime,
530 timeRecords[0].frameTime.presentTime);
531 ALOGV("[%d]-[%" PRIu64 "]-desired2present[%d]", layerId,
532 timeRecords[0].frameTime.frameNumber, desiredToPresentMs);
533 timeStatsLayer.deltas["desired2present"].insert(desiredToPresentMs);
534
535 const int32_t presentToPresentMs = msBetween(prevTimeRecord.frameTime.presentTime,
536 timeRecords[0].frameTime.presentTime);
537 ALOGV("[%d]-[%" PRIu64 "]-present2present[%d]", layerId,
538 timeRecords[0].frameTime.frameNumber, presentToPresentMs);
539 timeStatsLayer.deltas["present2present"].insert(presentToPresentMs);
540 if (prevPresentToPresentMs) {
541 const int32_t presentToPresentDeltaMs =
542 std::abs(presentToPresentMs - *prevPresentToPresentMs);
543 timeStatsLayer.deltas["present2presentDelta"].insert(presentToPresentDeltaMs);
544 }
545 prevPresentToPresentMs = presentToPresentMs;
546 }
547 prevTimeRecord = timeRecords[0];
548 timeRecords.pop_front();
549 layerRecord.waitData--;
550 }
551 }
552
553 static constexpr const char* kPopupWindowPrefix = "PopupWindow";
554 static const size_t kMinLenLayerName = std::strlen(kPopupWindowPrefix);
555
556 // Avoid tracking the "PopupWindow:<random hash>#<number>" layers
layerNameIsValid(const std::string & layerName)557 static bool layerNameIsValid(const std::string& layerName) {
558 return layerName.length() >= kMinLenLayerName &&
559 layerName.compare(0, kMinLenLayerName, kPopupWindowPrefix) != 0;
560 }
561
canAddNewAggregatedStats(uid_t uid,const std::string & layerName,GameMode gameMode)562 bool TimeStats::canAddNewAggregatedStats(uid_t uid, const std::string& layerName,
563 GameMode gameMode) {
564 uint32_t layerRecords = 0;
565 for (const auto& record : mTimeStats.stats) {
566 if (record.second.stats.count({uid, layerName, gameMode}) > 0) {
567 return true;
568 }
569
570 layerRecords += record.second.stats.size();
571 }
572
573 return layerRecords < MAX_NUM_LAYER_STATS;
574 }
575
setPostTime(int32_t layerId,uint64_t frameNumber,const std::string & layerName,uid_t uid,nsecs_t postTime,GameMode gameMode)576 void TimeStats::setPostTime(int32_t layerId, uint64_t frameNumber, const std::string& layerName,
577 uid_t uid, nsecs_t postTime, GameMode gameMode) {
578 if (!mEnabled.load()) return;
579
580 ATRACE_CALL();
581 ALOGV("[%d]-[%" PRIu64 "]-[%s]-PostTime[%" PRId64 "]", layerId, frameNumber, layerName.c_str(),
582 postTime);
583
584 std::lock_guard<std::mutex> lock(mMutex);
585 if (!canAddNewAggregatedStats(uid, layerName, gameMode)) {
586 return;
587 }
588 if (!mTimeStatsTracker.count(layerId) && mTimeStatsTracker.size() < MAX_NUM_LAYER_RECORDS &&
589 layerNameIsValid(layerName)) {
590 mTimeStatsTracker[layerId].uid = uid;
591 mTimeStatsTracker[layerId].layerName = layerName;
592 mTimeStatsTracker[layerId].gameMode = gameMode;
593 }
594 if (!mTimeStatsTracker.count(layerId)) return;
595 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
596 if (layerRecord.timeRecords.size() == MAX_NUM_TIME_RECORDS) {
597 ALOGE("[%d]-[%s]-timeRecords is at its maximum size[%zu]. Ignore this when unittesting.",
598 layerId, layerRecord.layerName.c_str(), MAX_NUM_TIME_RECORDS);
599 mTimeStatsTracker.erase(layerId);
600 return;
601 }
602 // For most media content, the acquireFence is invalid because the buffer is
603 // ready at the queueBuffer stage. In this case, acquireTime should be given
604 // a default value as postTime.
605 TimeRecord timeRecord = {
606 .frameTime =
607 {
608 .frameNumber = frameNumber,
609 .postTime = postTime,
610 .latchTime = postTime,
611 .acquireTime = postTime,
612 .desiredTime = postTime,
613 },
614 };
615 layerRecord.timeRecords.push_back(timeRecord);
616 if (layerRecord.waitData < 0 ||
617 layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
618 layerRecord.waitData = layerRecord.timeRecords.size() - 1;
619 }
620
setLatchTime(int32_t layerId,uint64_t frameNumber,nsecs_t latchTime)621 void TimeStats::setLatchTime(int32_t layerId, uint64_t frameNumber, nsecs_t latchTime) {
622 if (!mEnabled.load()) return;
623
624 ATRACE_CALL();
625 ALOGV("[%d]-[%" PRIu64 "]-LatchTime[%" PRId64 "]", layerId, frameNumber, latchTime);
626
627 std::lock_guard<std::mutex> lock(mMutex);
628 if (!mTimeStatsTracker.count(layerId)) return;
629 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
630 if (layerRecord.waitData < 0 ||
631 layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
632 return;
633 TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
634 if (timeRecord.frameTime.frameNumber == frameNumber) {
635 timeRecord.frameTime.latchTime = latchTime;
636 }
637 }
638
incrementLatchSkipped(int32_t layerId,LatchSkipReason reason)639 void TimeStats::incrementLatchSkipped(int32_t layerId, LatchSkipReason reason) {
640 if (!mEnabled.load()) return;
641
642 ATRACE_CALL();
643 ALOGV("[%d]-LatchSkipped-Reason[%d]", layerId,
644 static_cast<std::underlying_type<LatchSkipReason>::type>(reason));
645
646 std::lock_guard<std::mutex> lock(mMutex);
647 if (!mTimeStatsTracker.count(layerId)) return;
648 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
649
650 switch (reason) {
651 case LatchSkipReason::LateAcquire:
652 layerRecord.lateAcquireFrames++;
653 break;
654 }
655 }
656
incrementBadDesiredPresent(int32_t layerId)657 void TimeStats::incrementBadDesiredPresent(int32_t layerId) {
658 if (!mEnabled.load()) return;
659
660 ATRACE_CALL();
661 ALOGV("[%d]-BadDesiredPresent", layerId);
662
663 std::lock_guard<std::mutex> lock(mMutex);
664 if (!mTimeStatsTracker.count(layerId)) return;
665 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
666 layerRecord.badDesiredPresentFrames++;
667 }
668
setDesiredTime(int32_t layerId,uint64_t frameNumber,nsecs_t desiredTime)669 void TimeStats::setDesiredTime(int32_t layerId, uint64_t frameNumber, nsecs_t desiredTime) {
670 if (!mEnabled.load()) return;
671
672 ATRACE_CALL();
673 ALOGV("[%d]-[%" PRIu64 "]-DesiredTime[%" PRId64 "]", layerId, frameNumber, desiredTime);
674
675 std::lock_guard<std::mutex> lock(mMutex);
676 if (!mTimeStatsTracker.count(layerId)) return;
677 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
678 if (layerRecord.waitData < 0 ||
679 layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
680 return;
681 TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
682 if (timeRecord.frameTime.frameNumber == frameNumber) {
683 timeRecord.frameTime.desiredTime = desiredTime;
684 }
685 }
686
setAcquireTime(int32_t layerId,uint64_t frameNumber,nsecs_t acquireTime)687 void TimeStats::setAcquireTime(int32_t layerId, uint64_t frameNumber, nsecs_t acquireTime) {
688 if (!mEnabled.load()) return;
689
690 ATRACE_CALL();
691 ALOGV("[%d]-[%" PRIu64 "]-AcquireTime[%" PRId64 "]", layerId, frameNumber, acquireTime);
692
693 std::lock_guard<std::mutex> lock(mMutex);
694 if (!mTimeStatsTracker.count(layerId)) return;
695 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
696 if (layerRecord.waitData < 0 ||
697 layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
698 return;
699 TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
700 if (timeRecord.frameTime.frameNumber == frameNumber) {
701 timeRecord.frameTime.acquireTime = acquireTime;
702 }
703 }
704
setAcquireFence(int32_t layerId,uint64_t frameNumber,const std::shared_ptr<FenceTime> & acquireFence)705 void TimeStats::setAcquireFence(int32_t layerId, uint64_t frameNumber,
706 const std::shared_ptr<FenceTime>& acquireFence) {
707 if (!mEnabled.load()) return;
708
709 ATRACE_CALL();
710 ALOGV("[%d]-[%" PRIu64 "]-AcquireFenceTime[%" PRId64 "]", layerId, frameNumber,
711 acquireFence->getSignalTime());
712
713 std::lock_guard<std::mutex> lock(mMutex);
714 if (!mTimeStatsTracker.count(layerId)) return;
715 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
716 if (layerRecord.waitData < 0 ||
717 layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
718 return;
719 TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
720 if (timeRecord.frameTime.frameNumber == frameNumber) {
721 timeRecord.acquireFence = acquireFence;
722 }
723 }
724
setPresentTime(int32_t layerId,uint64_t frameNumber,nsecs_t presentTime,Fps displayRefreshRate,std::optional<Fps> renderRate,SetFrameRateVote frameRateVote,GameMode gameMode)725 void TimeStats::setPresentTime(int32_t layerId, uint64_t frameNumber, nsecs_t presentTime,
726 Fps displayRefreshRate, std::optional<Fps> renderRate,
727 SetFrameRateVote frameRateVote, GameMode gameMode) {
728 if (!mEnabled.load()) return;
729
730 ATRACE_CALL();
731 ALOGV("[%d]-[%" PRIu64 "]-PresentTime[%" PRId64 "]", layerId, frameNumber, presentTime);
732
733 std::lock_guard<std::mutex> lock(mMutex);
734 if (!mTimeStatsTracker.count(layerId)) return;
735 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
736 if (layerRecord.waitData < 0 ||
737 layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
738 return;
739 TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
740 if (timeRecord.frameTime.frameNumber == frameNumber) {
741 timeRecord.frameTime.presentTime = presentTime;
742 timeRecord.ready = true;
743 layerRecord.waitData++;
744 }
745
746 flushAvailableRecordsToStatsLocked(layerId, displayRefreshRate, renderRate, frameRateVote,
747 gameMode);
748 }
749
setPresentFence(int32_t layerId,uint64_t frameNumber,const std::shared_ptr<FenceTime> & presentFence,Fps displayRefreshRate,std::optional<Fps> renderRate,SetFrameRateVote frameRateVote,GameMode gameMode)750 void TimeStats::setPresentFence(int32_t layerId, uint64_t frameNumber,
751 const std::shared_ptr<FenceTime>& presentFence,
752 Fps displayRefreshRate, std::optional<Fps> renderRate,
753 SetFrameRateVote frameRateVote, GameMode gameMode) {
754 if (!mEnabled.load()) return;
755
756 ATRACE_CALL();
757 ALOGV("[%d]-[%" PRIu64 "]-PresentFenceTime[%" PRId64 "]", layerId, frameNumber,
758 presentFence->getSignalTime());
759
760 std::lock_guard<std::mutex> lock(mMutex);
761 if (!mTimeStatsTracker.count(layerId)) return;
762 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
763 if (layerRecord.waitData < 0 ||
764 layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
765 return;
766 TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
767 if (timeRecord.frameTime.frameNumber == frameNumber) {
768 timeRecord.presentFence = presentFence;
769 timeRecord.ready = true;
770 layerRecord.waitData++;
771 }
772
773 flushAvailableRecordsToStatsLocked(layerId, displayRefreshRate, renderRate, frameRateVote,
774 gameMode);
775 }
776
777 static const constexpr int32_t kValidJankyReason = JankType::DisplayHAL |
778 JankType::SurfaceFlingerCpuDeadlineMissed | JankType::SurfaceFlingerGpuDeadlineMissed |
779 JankType::AppDeadlineMissed | JankType::PredictionError |
780 JankType::SurfaceFlingerScheduling;
781
782 template <class T>
updateJankPayload(T & t,int32_t reasons)783 static void updateJankPayload(T& t, int32_t reasons) {
784 t.jankPayload.totalFrames++;
785
786 if (reasons & kValidJankyReason) {
787 t.jankPayload.totalJankyFrames++;
788 if ((reasons & JankType::SurfaceFlingerCpuDeadlineMissed) != 0) {
789 t.jankPayload.totalSFLongCpu++;
790 }
791 if ((reasons & JankType::SurfaceFlingerGpuDeadlineMissed) != 0) {
792 t.jankPayload.totalSFLongGpu++;
793 }
794 if ((reasons & JankType::DisplayHAL) != 0) {
795 t.jankPayload.totalSFUnattributed++;
796 }
797 if ((reasons & JankType::AppDeadlineMissed) != 0) {
798 t.jankPayload.totalAppUnattributed++;
799 }
800 if ((reasons & JankType::PredictionError) != 0) {
801 t.jankPayload.totalSFPredictionError++;
802 }
803 if ((reasons & JankType::SurfaceFlingerScheduling) != 0) {
804 t.jankPayload.totalSFScheduling++;
805 }
806 }
807
808 // We want to track BufferStuffing separately as it can provide info on latency issues
809 if (reasons & JankType::BufferStuffing) {
810 t.jankPayload.totalAppBufferStuffing++;
811 }
812 }
813
incrementJankyFrames(const JankyFramesInfo & info)814 void TimeStats::incrementJankyFrames(const JankyFramesInfo& info) {
815 if (!mEnabled.load()) return;
816
817 ATRACE_CALL();
818 std::lock_guard<std::mutex> lock(mMutex);
819
820 // Only update layer stats if we're already tracking the layer in TimeStats.
821 // Otherwise, continue tracking the statistic but use a default layer name instead.
822 // As an implementation detail, we do this because this method is expected to be
823 // called from FrameTimeline, whose jank classification includes transaction jank
824 // that occurs without a buffer. But, in general those layer names are not suitable as
825 // aggregation keys: e.g., it's normal and expected for Window Manager to include the hash code
826 // for an animation leash. So while we can show that jank in dumpsys, aggregating based on the
827 // layer blows up the stats size, so as a workaround drop those stats. This assumes that
828 // TimeStats will flush the first present fence for a layer *before* FrameTimeline does so that
829 // the first jank record is not dropped.
830
831 static const std::string kDefaultLayerName = "none";
832 constexpr GameMode kDefaultGameMode = GameMode::Unsupported;
833
834 const int32_t refreshRateBucket =
835 clampToNearestBucket(info.refreshRate, REFRESH_RATE_BUCKET_WIDTH);
836 const int32_t renderRateBucket =
837 clampToNearestBucket(info.renderRate ? *info.renderRate : info.refreshRate,
838 RENDER_RATE_BUCKET_WIDTH);
839 const TimeStatsHelper::TimelineStatsKey timelineKey = {refreshRateBucket, renderRateBucket};
840
841 if (!mTimeStats.stats.count(timelineKey)) {
842 mTimeStats.stats[timelineKey].key = timelineKey;
843 }
844
845 TimeStatsHelper::TimelineStats& timelineStats = mTimeStats.stats[timelineKey];
846
847 updateJankPayload<TimeStatsHelper::TimelineStats>(timelineStats, info.reasons);
848
849 TimeStatsHelper::LayerStatsKey layerKey = {info.uid, info.layerName, info.gameMode};
850 if (!timelineStats.stats.count(layerKey)) {
851 layerKey = {info.uid, kDefaultLayerName, kDefaultGameMode};
852 timelineStats.stats[layerKey].displayRefreshRateBucket = refreshRateBucket;
853 timelineStats.stats[layerKey].renderRateBucket = renderRateBucket;
854 timelineStats.stats[layerKey].uid = info.uid;
855 timelineStats.stats[layerKey].layerName = kDefaultLayerName;
856 timelineStats.stats[layerKey].gameMode = kDefaultGameMode;
857 }
858
859 TimeStatsHelper::TimeStatsLayer& timeStatsLayer = timelineStats.stats[layerKey];
860 updateJankPayload<TimeStatsHelper::TimeStatsLayer>(timeStatsLayer, info.reasons);
861
862 if (info.reasons & kValidJankyReason) {
863 // TimeStats Histograms only retain positive values, so we don't need to check if these
864 // deadlines were really missed if we know that the frame had jank, since deadlines
865 // that were met will be dropped.
866 timelineStats.displayDeadlineDeltas.insert(toMs(info.displayDeadlineDelta));
867 timelineStats.displayPresentDeltas.insert(toMs(info.displayPresentJitter));
868 timeStatsLayer.deltas["appDeadlineDeltas"].insert(toMs(info.appDeadlineDelta));
869 }
870 }
871
onDestroy(int32_t layerId)872 void TimeStats::onDestroy(int32_t layerId) {
873 ATRACE_CALL();
874 ALOGV("[%d]-onDestroy", layerId);
875 std::lock_guard<std::mutex> lock(mMutex);
876 mTimeStatsTracker.erase(layerId);
877 }
878
removeTimeRecord(int32_t layerId,uint64_t frameNumber)879 void TimeStats::removeTimeRecord(int32_t layerId, uint64_t frameNumber) {
880 if (!mEnabled.load()) return;
881
882 ATRACE_CALL();
883 ALOGV("[%d]-[%" PRIu64 "]-removeTimeRecord", layerId, frameNumber);
884
885 std::lock_guard<std::mutex> lock(mMutex);
886 if (!mTimeStatsTracker.count(layerId)) return;
887 LayerRecord& layerRecord = mTimeStatsTracker[layerId];
888 size_t removeAt = 0;
889 for (const TimeRecord& record : layerRecord.timeRecords) {
890 if (record.frameTime.frameNumber == frameNumber) break;
891 removeAt++;
892 }
893 if (removeAt == layerRecord.timeRecords.size()) return;
894 layerRecord.timeRecords.erase(layerRecord.timeRecords.begin() + removeAt);
895 if (layerRecord.waitData > static_cast<int32_t>(removeAt)) {
896 layerRecord.waitData--;
897 }
898 layerRecord.droppedFrames++;
899 }
900
flushPowerTimeLocked()901 void TimeStats::flushPowerTimeLocked() {
902 if (!mEnabled.load()) return;
903
904 nsecs_t curTime = systemTime();
905 // elapsedTime is in milliseconds.
906 int64_t elapsedTime = (curTime - mPowerTime.prevTime) / 1000000;
907
908 switch (mPowerTime.powerMode) {
909 case PowerMode::ON:
910 mTimeStats.displayOnTimeLegacy += elapsedTime;
911 break;
912 case PowerMode::OFF:
913 case PowerMode::DOZE:
914 case PowerMode::DOZE_SUSPEND:
915 case PowerMode::ON_SUSPEND:
916 default:
917 break;
918 }
919
920 mPowerTime.prevTime = curTime;
921 }
922
setPowerMode(PowerMode powerMode)923 void TimeStats::setPowerMode(PowerMode powerMode) {
924 if (!mEnabled.load()) {
925 std::lock_guard<std::mutex> lock(mMutex);
926 mPowerTime.powerMode = powerMode;
927 return;
928 }
929
930 std::lock_guard<std::mutex> lock(mMutex);
931 if (powerMode == mPowerTime.powerMode) return;
932
933 flushPowerTimeLocked();
934 mPowerTime.powerMode = powerMode;
935 }
936
recordRefreshRate(uint32_t fps,nsecs_t duration)937 void TimeStats::recordRefreshRate(uint32_t fps, nsecs_t duration) {
938 std::lock_guard<std::mutex> lock(mMutex);
939 if (mTimeStats.refreshRateStatsLegacy.count(fps)) {
940 mTimeStats.refreshRateStatsLegacy[fps] += duration;
941 } else {
942 mTimeStats.refreshRateStatsLegacy.insert({fps, duration});
943 }
944 }
945
flushAvailableGlobalRecordsToStatsLocked()946 void TimeStats::flushAvailableGlobalRecordsToStatsLocked() {
947 ATRACE_CALL();
948
949 while (!mGlobalRecord.presentFences.empty()) {
950 const nsecs_t curPresentTime = mGlobalRecord.presentFences.front()->getSignalTime();
951 if (curPresentTime == Fence::SIGNAL_TIME_PENDING) break;
952
953 if (curPresentTime == Fence::SIGNAL_TIME_INVALID) {
954 ALOGE("GlobalPresentFence is invalid!");
955 mGlobalRecord.prevPresentTime = 0;
956 mGlobalRecord.presentFences.pop_front();
957 continue;
958 }
959
960 ALOGV("GlobalPresentFenceTime[%" PRId64 "]",
961 mGlobalRecord.presentFences.front()->getSignalTime());
962
963 if (mGlobalRecord.prevPresentTime != 0) {
964 const int32_t presentToPresentMs =
965 msBetween(mGlobalRecord.prevPresentTime, curPresentTime);
966 ALOGV("Global present2present[%d] prev[%" PRId64 "] curr[%" PRId64 "]",
967 presentToPresentMs, mGlobalRecord.prevPresentTime, curPresentTime);
968 mTimeStats.presentToPresentLegacy.insert(presentToPresentMs);
969 }
970
971 mGlobalRecord.prevPresentTime = curPresentTime;
972 mGlobalRecord.presentFences.pop_front();
973 }
974 while (!mGlobalRecord.renderEngineDurations.empty()) {
975 const auto duration = mGlobalRecord.renderEngineDurations.front();
976 const auto& endTime = duration.endTime;
977
978 nsecs_t endNs = -1;
979
980 if (auto val = std::get_if<nsecs_t>(&endTime)) {
981 endNs = *val;
982 } else {
983 endNs = std::get<std::shared_ptr<FenceTime>>(endTime)->getSignalTime();
984 }
985
986 if (endNs == Fence::SIGNAL_TIME_PENDING) break;
987
988 if (endNs < 0) {
989 ALOGE("RenderEngineTiming is invalid!");
990 mGlobalRecord.renderEngineDurations.pop_front();
991 continue;
992 }
993
994 const int32_t renderEngineMs = msBetween(duration.startTime, endNs);
995 mTimeStats.renderEngineTimingLegacy.insert(renderEngineMs);
996
997 mGlobalRecord.renderEngineDurations.pop_front();
998 }
999 }
1000
setPresentFenceGlobal(const std::shared_ptr<FenceTime> & presentFence)1001 void TimeStats::setPresentFenceGlobal(const std::shared_ptr<FenceTime>& presentFence) {
1002 if (!mEnabled.load()) return;
1003
1004 ATRACE_CALL();
1005 std::lock_guard<std::mutex> lock(mMutex);
1006 if (presentFence == nullptr || !presentFence->isValid()) {
1007 mGlobalRecord.prevPresentTime = 0;
1008 return;
1009 }
1010
1011 if (mPowerTime.powerMode != PowerMode::ON) {
1012 // Try flushing the last present fence on PowerMode::ON.
1013 flushAvailableGlobalRecordsToStatsLocked();
1014 mGlobalRecord.presentFences.clear();
1015 mGlobalRecord.prevPresentTime = 0;
1016 return;
1017 }
1018
1019 if (mGlobalRecord.presentFences.size() == MAX_NUM_TIME_RECORDS) {
1020 // The front presentFence must be trapped in pending status in this
1021 // case. Try dequeuing the front one to recover.
1022 ALOGE("GlobalPresentFences is already at its maximum size[%zu]", MAX_NUM_TIME_RECORDS);
1023 mGlobalRecord.prevPresentTime = 0;
1024 mGlobalRecord.presentFences.pop_front();
1025 }
1026
1027 mGlobalRecord.presentFences.emplace_back(presentFence);
1028 flushAvailableGlobalRecordsToStatsLocked();
1029 }
1030
enable()1031 void TimeStats::enable() {
1032 if (mEnabled.load()) return;
1033
1034 ATRACE_CALL();
1035
1036 std::lock_guard<std::mutex> lock(mMutex);
1037 mEnabled.store(true);
1038 mTimeStats.statsStartLegacy = static_cast<int64_t>(std::time(0));
1039 mPowerTime.prevTime = systemTime();
1040 ALOGD("Enabled");
1041 }
1042
disable()1043 void TimeStats::disable() {
1044 if (!mEnabled.load()) return;
1045
1046 ATRACE_CALL();
1047
1048 std::lock_guard<std::mutex> lock(mMutex);
1049 flushPowerTimeLocked();
1050 mEnabled.store(false);
1051 mTimeStats.statsEndLegacy = static_cast<int64_t>(std::time(0));
1052 ALOGD("Disabled");
1053 }
1054
clearAll()1055 void TimeStats::clearAll() {
1056 std::lock_guard<std::mutex> lock(mMutex);
1057 mTimeStats.stats.clear();
1058 clearGlobalLocked();
1059 clearLayersLocked();
1060 }
1061
clearGlobalLocked()1062 void TimeStats::clearGlobalLocked() {
1063 ATRACE_CALL();
1064
1065 mTimeStats.statsStartLegacy = (mEnabled.load() ? static_cast<int64_t>(std::time(0)) : 0);
1066 mTimeStats.statsEndLegacy = 0;
1067 mTimeStats.totalFramesLegacy = 0;
1068 mTimeStats.missedFramesLegacy = 0;
1069 mTimeStats.clientCompositionFramesLegacy = 0;
1070 mTimeStats.clientCompositionReusedFramesLegacy = 0;
1071 mTimeStats.compositionStrategyChangesLegacy = 0;
1072 mTimeStats.compositionStrategyPredictedLegacy = 0;
1073 mTimeStats.compositionStrategyPredictionSucceededLegacy = 0;
1074 mTimeStats.refreshRateSwitchesLegacy = 0;
1075 mTimeStats.displayEventConnectionsCountLegacy = 0;
1076 mTimeStats.displayOnTimeLegacy = 0;
1077 mTimeStats.presentToPresentLegacy.hist.clear();
1078 mTimeStats.frameDurationLegacy.hist.clear();
1079 mTimeStats.renderEngineTimingLegacy.hist.clear();
1080 mTimeStats.refreshRateStatsLegacy.clear();
1081 mPowerTime.prevTime = systemTime();
1082 for (auto& globalRecord : mTimeStats.stats) {
1083 globalRecord.second.clearGlobals();
1084 }
1085 mGlobalRecord.prevPresentTime = 0;
1086 mGlobalRecord.presentFences.clear();
1087 ALOGD("Cleared global stats");
1088 }
1089
clearLayersLocked()1090 void TimeStats::clearLayersLocked() {
1091 ATRACE_CALL();
1092
1093 mTimeStatsTracker.clear();
1094
1095 for (auto& globalRecord : mTimeStats.stats) {
1096 globalRecord.second.stats.clear();
1097 }
1098 ALOGD("Cleared layer stats");
1099 }
1100
isEnabled()1101 bool TimeStats::isEnabled() {
1102 return mEnabled.load();
1103 }
1104
dump(bool asProto,std::optional<uint32_t> maxLayers,std::string & result)1105 void TimeStats::dump(bool asProto, std::optional<uint32_t> maxLayers, std::string& result) {
1106 ATRACE_CALL();
1107
1108 std::lock_guard<std::mutex> lock(mMutex);
1109 if (mTimeStats.statsStartLegacy == 0) {
1110 return;
1111 }
1112
1113 mTimeStats.statsEndLegacy = static_cast<int64_t>(std::time(0));
1114
1115 flushPowerTimeLocked();
1116
1117 if (asProto) {
1118 ALOGD("Dumping TimeStats as proto");
1119 SFTimeStatsGlobalProto timeStatsProto = mTimeStats.toProto(maxLayers);
1120 result.append(timeStatsProto.SerializeAsString());
1121 } else {
1122 ALOGD("Dumping TimeStats as text");
1123 result.append(mTimeStats.toString(maxLayers));
1124 result.append("\n");
1125 }
1126 }
1127
1128 } // namespace impl
1129
1130 } // namespace android
1131