• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2024 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "LatencyAggregatorWithHistograms"
18 #include "../InputDeviceMetricsSource.h"
19 #include "InputDispatcher.h"
20 
21 #include <inttypes.h>
22 #include <log/log_event_list.h>
23 #include <statslog.h>
24 
25 #include <android-base/logging.h>
26 #include <android-base/stringprintf.h>
27 #include <input/Input.h>
28 #include <log/log.h>
29 #include <server_configurable_flags/get_flags.h>
30 
31 using android::base::StringPrintf;
32 using std::chrono_literals::operator""ms;
33 
34 namespace {
35 
36 // Convert the provided nanoseconds into hundreds of microseconds.
37 // Use hundreds of microseconds (as opposed to microseconds) to preserve space.
ns2hus(nsecs_t nanos)38 static inline int64_t ns2hus(nsecs_t nanos) {
39     return ns2us(nanos) / 100;
40 }
41 
42 // Category (=namespace) name for the input settings that are applied at boot time
43 static const char* INPUT_NATIVE_BOOT = "input_native_boot";
44 // Feature flag name for the threshold of end-to-end touch latency that would trigger
45 // SlowEventReported atom to be pushed
46 static const char* SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS =
47         "slow_event_min_reporting_latency_millis";
48 // Feature flag name for the minimum delay before reporting a slow event after having just reported
49 // a slow event. This helps limit the amount of data sent to the server
50 static const char* SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS =
51         "slow_event_min_reporting_interval_millis";
52 
53 // If an event has end-to-end latency > 200 ms, it will get reported as a slow event.
54 std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY = 200ms;
55 // If we receive two slow events less than 1 min apart, we will only report 1 of them.
56 std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL = 60000ms;
57 
getSlowEventMinReportingLatency()58 static std::chrono::milliseconds getSlowEventMinReportingLatency() {
59     std::string millis = server_configurable_flags::
60             GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS,
61                                       std::to_string(
62                                               DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY.count()));
63     return std::chrono::milliseconds(std::stoi(millis));
64 }
65 
getSlowEventMinReportingInterval()66 static std::chrono::milliseconds getSlowEventMinReportingInterval() {
67     std::string millis = server_configurable_flags::
68             GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS,
69                                       std::to_string(
70                                               DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL.count()));
71     return std::chrono::milliseconds(std::stoi(millis));
72 }
73 
74 } // namespace
75 
76 namespace android::inputdispatcher {
77 
LatencyStageIndexToAtomEnum(LatencyStageIndex latencyStageIndex)78 int32_t LatencyStageIndexToAtomEnum(LatencyStageIndex latencyStageIndex) {
79     switch (latencyStageIndex) {
80         case LatencyStageIndex::EVENT_TO_READ:
81             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__EVENT_TO_READ;
82         case LatencyStageIndex::READ_TO_DELIVER:
83             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__READ_TO_DELIVER;
84         case LatencyStageIndex::DELIVER_TO_CONSUME:
85             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__DELIVER_TO_CONSUME;
86         case LatencyStageIndex::CONSUME_TO_FINISH:
87             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__CONSUME_TO_FINISH;
88         case LatencyStageIndex::CONSUME_TO_GPU_COMPLETE:
89             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__CONSUME_TO_GPU_COMPLETE;
90         case LatencyStageIndex::GPU_COMPLETE_TO_PRESENT:
91             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__GPU_COMPLETE_TO_PRESENT;
92         case LatencyStageIndex::END_TO_END:
93             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__END_TO_END;
94         default:
95             return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__UNKNOWN_LATENCY_STAGE;
96     }
97 }
98 
InputEventTypeEnumToAtomEnum(InputEventActionType inputEventActionType)99 int32_t InputEventTypeEnumToAtomEnum(InputEventActionType inputEventActionType) {
100     switch (inputEventActionType) {
101         case InputEventActionType::UNKNOWN_INPUT_EVENT:
102             return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__UNKNOWN_INPUT_EVENT;
103         case InputEventActionType::MOTION_ACTION_DOWN:
104             return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_DOWN;
105         case InputEventActionType::MOTION_ACTION_MOVE:
106             return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_MOVE;
107         case InputEventActionType::MOTION_ACTION_UP:
108             return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_UP;
109         case InputEventActionType::MOTION_ACTION_HOVER_MOVE:
110             return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_HOVER_MOVE;
111         case InputEventActionType::MOTION_ACTION_SCROLL:
112             return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_SCROLL;
113         case InputEventActionType::KEY:
114             return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__KEY;
115     }
116 }
117 
processTimeline(const InputEventTimeline & timeline)118 void LatencyAggregatorWithHistograms::processTimeline(const InputEventTimeline& timeline) {
119     processStatistics(timeline);
120     processSlowEvent(timeline);
121 }
122 
addSampleToHistogram(const InputEventLatencyIdentifier & identifier,LatencyStageIndex latencyStageIndex,nsecs_t latency)123 void LatencyAggregatorWithHistograms::addSampleToHistogram(
124         const InputEventLatencyIdentifier& identifier, LatencyStageIndex latencyStageIndex,
125         nsecs_t latency) {
126     // Only record positive values for the statistics
127     if (latency > 0) {
128         auto it = mHistograms.find(identifier);
129         if (it != mHistograms.end()) {
130             it->second[static_cast<size_t>(latencyStageIndex)].addSample(ns2hus(latency));
131         }
132     }
133 }
134 
processStatistics(const InputEventTimeline & timeline)135 void LatencyAggregatorWithHistograms::processStatistics(const InputEventTimeline& timeline) {
136     // Only gather data for Down, Move, Up and Scroll motion events and Key events
137     if (!(timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_DOWN ||
138           timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_MOVE ||
139           timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_UP ||
140           timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_SCROLL ||
141           timeline.inputEventActionType == InputEventActionType::KEY))
142         return;
143 
144     // Don't collect data for unidentified devices. This situation can occur for the first few input
145     // events produced when an input device is first connected
146     if (timeline.vendorId == 0xFFFF && timeline.productId == 0xFFFF) return;
147 
148     InputEventLatencyIdentifier identifier = {timeline.vendorId, timeline.productId,
149                                               timeline.sources, timeline.inputEventActionType};
150     // Check if there's a value in mHistograms map associated to identifier.
151     // If not, add an array with 7 empty histograms as an entry
152     if (mHistograms.count(identifier) == 0) {
153         if (static_cast<int32_t>(timeline.inputEventActionType) - 1 < 0) {
154             LOG(FATAL) << "Action index is smaller than 0. Action type: "
155                        << ftl::enum_string(timeline.inputEventActionType);
156             return;
157         }
158         size_t actionIndex =
159                 static_cast<size_t>(static_cast<int32_t>(timeline.inputEventActionType) - 1);
160         if (actionIndex >= NUM_INPUT_EVENT_TYPES) {
161             LOG(FATAL) << "Action index greater than the number of input event types. Action Type: "
162                        << ftl::enum_string(timeline.inputEventActionType)
163                        << "; Action Type Index: " << actionIndex;
164             return;
165         }
166 
167         std::array<Histogram, 7> histograms =
168                 {Histogram(allBinSizes[binSizesMappings[0][actionIndex]]),
169                  Histogram(allBinSizes[binSizesMappings[1][actionIndex]]),
170                  Histogram(allBinSizes[binSizesMappings[2][actionIndex]]),
171                  Histogram(allBinSizes[binSizesMappings[3][actionIndex]]),
172                  Histogram(allBinSizes[binSizesMappings[4][actionIndex]]),
173                  Histogram(allBinSizes[binSizesMappings[5][actionIndex]]),
174                  Histogram(allBinSizes[binSizesMappings[6][actionIndex]])};
175         mHistograms.insert({identifier, histograms});
176     }
177 
178     // Process common ones first
179     const nsecs_t eventToRead = timeline.readTime - timeline.eventTime;
180     addSampleToHistogram(identifier, LatencyStageIndex::EVENT_TO_READ, eventToRead);
181 
182     // Now process per-connection ones
183     for (const auto& [connectionToken, connectionTimeline] : timeline.connectionTimelines) {
184         if (!connectionTimeline.isComplete()) {
185             continue;
186         }
187         const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime;
188         const nsecs_t deliverToConsume =
189                 connectionTimeline.consumeTime - connectionTimeline.deliveryTime;
190         const nsecs_t consumeToFinish =
191                 connectionTimeline.finishTime - connectionTimeline.consumeTime;
192         const nsecs_t gpuCompletedTime =
193                 connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME];
194         const nsecs_t presentTime =
195                 connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME];
196         const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime;
197         const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime;
198         const nsecs_t endToEnd = presentTime - timeline.eventTime;
199 
200         addSampleToHistogram(identifier, LatencyStageIndex::READ_TO_DELIVER, readToDeliver);
201         addSampleToHistogram(identifier, LatencyStageIndex::DELIVER_TO_CONSUME, deliverToConsume);
202         addSampleToHistogram(identifier, LatencyStageIndex::CONSUME_TO_FINISH, consumeToFinish);
203         addSampleToHistogram(identifier, LatencyStageIndex::CONSUME_TO_GPU_COMPLETE,
204                              consumeToGpuComplete);
205         addSampleToHistogram(identifier, LatencyStageIndex::GPU_COMPLETE_TO_PRESENT,
206                              gpuCompleteToPresent);
207         addSampleToHistogram(identifier, LatencyStageIndex::END_TO_END, endToEnd);
208     }
209 }
210 
pushLatencyStatistics()211 void LatencyAggregatorWithHistograms::pushLatencyStatistics() {
212     for (auto& [id, histograms] : mHistograms) {
213         auto [vendorId, productId, sources, action] = id;
214         for (size_t latencyStageIndex = static_cast<size_t>(LatencyStageIndex::EVENT_TO_READ);
215              latencyStageIndex < static_cast<size_t>(LatencyStageIndex::SIZE);
216              ++latencyStageIndex) {
217             // Convert sources set to vector for atom logging:
218             std::vector<int32_t> sourcesVector = {};
219             for (auto& elem : sources) {
220                 sourcesVector.push_back(static_cast<int32_t>(elem));
221             }
222 
223             // convert histogram bin counts array to vector for atom logging:
224             std::array arr = histograms[latencyStageIndex].getBinCounts();
225             std::vector<int32_t> binCountsVector(arr.begin(), arr.end());
226 
227             if (static_cast<int32_t>(action) - 1 < 0) {
228                 ALOGW("Action index is smaller than 0. Action type: %s",
229                       ftl::enum_string(action).c_str());
230                 continue;
231             }
232             size_t actionIndex = static_cast<size_t>(static_cast<int32_t>(action) - 1);
233             if (actionIndex >= NUM_INPUT_EVENT_TYPES) {
234                 ALOGW("Action index greater than the number of input event types. Action Type: %s; "
235                       "Action Type Index: %zu",
236                       ftl::enum_string(action).c_str(), actionIndex);
237                 continue;
238             }
239 
240             stats_write(android::util::INPUT_EVENT_LATENCY_REPORTED, vendorId, productId,
241                         sourcesVector, InputEventTypeEnumToAtomEnum(action),
242                         LatencyStageIndexToAtomEnum(
243                                 static_cast<LatencyStageIndex>(latencyStageIndex)),
244                         histogramVersions[latencyStageIndex][actionIndex], binCountsVector);
245         }
246     }
247     mHistograms.clear();
248 }
249 
250 // TODO (b/270049345): For now, we just copied the code from LatencyAggregator to populate the old
251 // atom, but eventually we should migrate this to use the new SlowEventReported atom
processSlowEvent(const InputEventTimeline & timeline)252 void LatencyAggregatorWithHistograms::processSlowEvent(const InputEventTimeline& timeline) {
253     static const std::chrono::duration sSlowEventThreshold = getSlowEventMinReportingLatency();
254     static const std::chrono::duration sSlowEventReportingInterval =
255             getSlowEventMinReportingInterval();
256     for (const auto& [token, connectionTimeline] : timeline.connectionTimelines) {
257         if (!connectionTimeline.isComplete()) {
258             continue;
259         }
260         mNumEventsSinceLastSlowEventReport++;
261         const nsecs_t presentTime =
262                 connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME];
263         const std::chrono::nanoseconds endToEndLatency =
264                 std::chrono::nanoseconds(presentTime - timeline.eventTime);
265         if (endToEndLatency < sSlowEventThreshold) {
266             continue;
267         }
268         // This is a slow event. Before we report it, check if we are reporting too often
269         const std::chrono::duration elapsedSinceLastReport =
270                 std::chrono::nanoseconds(timeline.eventTime - mLastSlowEventTime);
271         if (elapsedSinceLastReport < sSlowEventReportingInterval) {
272             mNumSkippedSlowEvents++;
273             continue;
274         }
275 
276         const nsecs_t eventToRead = timeline.readTime - timeline.eventTime;
277         const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime;
278         const nsecs_t deliverToConsume =
279                 connectionTimeline.consumeTime - connectionTimeline.deliveryTime;
280         const nsecs_t consumeToFinish =
281                 connectionTimeline.finishTime - connectionTimeline.consumeTime;
282         const nsecs_t gpuCompletedTime =
283                 connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME];
284         const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime;
285         const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime;
286 
287         android::util::stats_write(android::util::SLOW_INPUT_EVENT_REPORTED,
288                                    timeline.inputEventActionType ==
289                                            InputEventActionType::MOTION_ACTION_DOWN,
290                                    static_cast<int32_t>(ns2us(eventToRead)),
291                                    static_cast<int32_t>(ns2us(readToDeliver)),
292                                    static_cast<int32_t>(ns2us(deliverToConsume)),
293                                    static_cast<int32_t>(ns2us(consumeToFinish)),
294                                    static_cast<int32_t>(ns2us(consumeToGpuComplete)),
295                                    static_cast<int32_t>(ns2us(gpuCompleteToPresent)),
296                                    static_cast<int32_t>(ns2us(endToEndLatency.count())),
297                                    static_cast<int32_t>(mNumEventsSinceLastSlowEventReport),
298                                    static_cast<int32_t>(mNumSkippedSlowEvents));
299         mNumEventsSinceLastSlowEventReport = 0;
300         mNumSkippedSlowEvents = 0;
301         mLastSlowEventTime = timeline.readTime;
302     }
303 }
304 
dump(const char * prefix) const305 std::string LatencyAggregatorWithHistograms::dump(const char* prefix) const {
306     std::string statisticsStr = StringPrintf("%s Histograms:\n", prefix);
307     for (const auto& [id, histograms] : mHistograms) {
308         auto [vendorId, productId, sources, action] = id;
309 
310         std::string identifierStr =
311                 StringPrintf("%s  Identifier: vendor %d, product %d, sources: {", prefix, vendorId,
312                              productId);
313         bool firstSource = true;
314         for (const auto& source : sources) {
315             if (!firstSource) {
316                 identifierStr += ", ";
317             }
318             identifierStr += StringPrintf("%d", static_cast<int32_t>(source));
319             firstSource = false;
320         }
321         identifierStr += StringPrintf("}, action: %d\n", static_cast<int32_t>(action));
322 
323         std::string histogramsStr;
324         for (size_t stageIndex = 0; stageIndex < static_cast<size_t>(LatencyStageIndex::SIZE);
325              stageIndex++) {
326             const auto& histogram = histograms[stageIndex];
327             const std::array<int, NUM_BINS>& binCounts = histogram.getBinCounts();
328 
329             histogramsStr += StringPrintf("%s   %zu: ", prefix, stageIndex);
330             histogramsStr += StringPrintf("%d", binCounts[0]);
331             for (size_t bin = 1; bin < NUM_BINS; bin++) {
332                 histogramsStr += StringPrintf(", %d", binCounts[bin]);
333             }
334             histogramsStr += StringPrintf("\n");
335         }
336         statisticsStr += identifierStr + histogramsStr;
337     }
338 
339     return StringPrintf("%sLatencyAggregatorWithHistograms:\n", prefix) + statisticsStr +
340             StringPrintf("%s  mLastSlowEventTime=%" PRId64 "\n", prefix, mLastSlowEventTime) +
341             StringPrintf("%s  mNumEventsSinceLastSlowEventReport = %zu\n", prefix,
342                          mNumEventsSinceLastSlowEventReport) +
343             StringPrintf("%s  mNumSkippedSlowEvents = %zu\n", prefix, mNumSkippedSlowEvents);
344 }
345 
346 } // namespace android::inputdispatcher
347