1 /**
2 * Copyright (c) 2020, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "carwatchdogd"
18
19 #include "IoPerfCollection.h"
20
21 #include <WatchdogProperties.sysprop.h>
22 #include <android-base/file.h>
23 #include <android-base/stringprintf.h>
24 #include <log/log.h>
25
26 #include <inttypes.h>
27
28 #include <iomanip>
29 #include <limits>
30 #include <string>
31 #include <unordered_map>
32 #include <unordered_set>
33 #include <vector>
34
35 namespace android {
36 namespace automotive {
37 namespace watchdog {
38
39 using ::android::wp;
40 using ::android::base::Error;
41 using ::android::base::Result;
42 using ::android::base::StringAppendF;
43 using ::android::base::StringPrintf;
44 using ::android::base::WriteStringToFd;
45
46 namespace {
47
48 const int32_t kDefaultTopNStatsPerCategory = 10;
49 const int32_t kDefaultTopNStatsPerSubcategory = 5;
50
percentage(uint64_t numer,uint64_t denom)51 double percentage(uint64_t numer, uint64_t denom) {
52 return denom == 0 ? 0.0 : (static_cast<double>(numer) / static_cast<double>(denom)) * 100.0;
53 }
54
55 struct UidProcessStats {
56 struct ProcessInfo {
57 std::string comm = "";
58 uint64_t count = 0;
59 };
60 uint64_t uid = 0;
61 uint32_t ioBlockedTasksCnt = 0;
62 uint32_t totalTasksCnt = 0;
63 uint64_t majorFaults = 0;
64 std::vector<ProcessInfo> topNIoBlockedProcesses = {};
65 std::vector<ProcessInfo> topNMajorFaultProcesses = {};
66 };
67
getUidProcessStats(const std::vector<ProcessStats> & processStats,int topNStatsPerSubCategory)68 std::unique_ptr<std::unordered_map<uid_t, UidProcessStats>> getUidProcessStats(
69 const std::vector<ProcessStats>& processStats, int topNStatsPerSubCategory) {
70 std::unique_ptr<std::unordered_map<uid_t, UidProcessStats>> uidProcessStats(
71 new std::unordered_map<uid_t, UidProcessStats>());
72 for (const auto& stats : processStats) {
73 if (stats.uid < 0) {
74 continue;
75 }
76 uid_t uid = static_cast<uid_t>(stats.uid);
77 if (uidProcessStats->find(uid) == uidProcessStats->end()) {
78 (*uidProcessStats)[uid] = UidProcessStats{
79 .uid = uid,
80 .topNIoBlockedProcesses = std::vector<
81 UidProcessStats::ProcessInfo>(topNStatsPerSubCategory,
82 UidProcessStats::ProcessInfo{}),
83 .topNMajorFaultProcesses = std::vector<
84 UidProcessStats::ProcessInfo>(topNStatsPerSubCategory,
85 UidProcessStats::ProcessInfo{}),
86 };
87 }
88 auto& curUidProcessStats = (*uidProcessStats)[uid];
89 // Top-level process stats has the aggregated major page faults count and this should be
90 // persistent across thread creation/termination. Thus use the value from this field.
91 curUidProcessStats.majorFaults += stats.process.majorFaults;
92 curUidProcessStats.totalTasksCnt += stats.threads.size();
93 // The process state is the same as the main thread state. Thus to avoid double counting
94 // ignore the process state.
95 uint32_t ioBlockedTasksCnt = 0;
96 for (const auto& threadStat : stats.threads) {
97 ioBlockedTasksCnt += threadStat.second.state == "D" ? 1 : 0;
98 }
99 curUidProcessStats.ioBlockedTasksCnt += ioBlockedTasksCnt;
100 for (auto it = curUidProcessStats.topNIoBlockedProcesses.begin();
101 it != curUidProcessStats.topNIoBlockedProcesses.end(); ++it) {
102 if (it->count < ioBlockedTasksCnt) {
103 curUidProcessStats.topNIoBlockedProcesses
104 .emplace(it,
105 UidProcessStats::ProcessInfo{
106 .comm = stats.process.comm,
107 .count = ioBlockedTasksCnt,
108 });
109 curUidProcessStats.topNIoBlockedProcesses.pop_back();
110 break;
111 }
112 }
113 for (auto it = curUidProcessStats.topNMajorFaultProcesses.begin();
114 it != curUidProcessStats.topNMajorFaultProcesses.end(); ++it) {
115 if (it->count < stats.process.majorFaults) {
116 curUidProcessStats.topNMajorFaultProcesses
117 .emplace(it,
118 UidProcessStats::ProcessInfo{
119 .comm = stats.process.comm,
120 .count = stats.process.majorFaults,
121 });
122 curUidProcessStats.topNMajorFaultProcesses.pop_back();
123 break;
124 }
125 }
126 }
127 return uidProcessStats;
128 }
129
checkDataCollectors(const wp<UidIoStats> & uidIoStats,const wp<ProcStat> & procStat,const wp<ProcPidStat> & procPidStat)130 Result<void> checkDataCollectors(const wp<UidIoStats>& uidIoStats, const wp<ProcStat>& procStat,
131 const wp<ProcPidStat>& procPidStat) {
132 if (uidIoStats != nullptr && procStat != nullptr && procPidStat != nullptr) {
133 return {};
134 }
135 std::string error;
136 if (uidIoStats == nullptr) {
137 error = "Per-UID I/O stats collector must not be empty";
138 }
139 if (procStat == nullptr) {
140 StringAppendF(&error, "%s%s", error.empty() ? "" : ", ",
141 "Proc stats collector must not be empty");
142 }
143 if (procPidStat == nullptr) {
144 StringAppendF(&error, "%s%s", error.empty() ? "" : ", ",
145 "Per-process stats collector must not be empty");
146 }
147
148 return Error() << "Invalid data collectors: " << error;
149 }
150
151 } // namespace
152
toString(const UidIoPerfData & data)153 std::string toString(const UidIoPerfData& data) {
154 std::string buffer;
155 if (data.topNReads.size() > 0) {
156 StringAppendF(&buffer, "\nTop N Reads:\n%s\n", std::string(12, '-').c_str());
157 StringAppendF(&buffer,
158 "Android User ID, Package Name, Foreground Bytes, Foreground Bytes %%, "
159 "Foreground Fsync, Foreground Fsync %%, Background Bytes, "
160 "Background Bytes %%, Background Fsync, Background Fsync %%\n");
161 }
162 for (const auto& stat : data.topNReads) {
163 StringAppendF(&buffer, "%" PRIu32 ", %s", stat.userId, stat.packageName.c_str());
164 for (int i = 0; i < UID_STATES; ++i) {
165 StringAppendF(&buffer, ", %" PRIi64 ", %.2f%%, %" PRIi64 ", %.2f%%", stat.bytes[i],
166 percentage(stat.bytes[i], data.total[READ_BYTES][i]), stat.fsync[i],
167 percentage(stat.fsync[i], data.total[FSYNC_COUNT][i]));
168 }
169 StringAppendF(&buffer, "\n");
170 }
171 if (data.topNWrites.size() > 0) {
172 StringAppendF(&buffer, "\nTop N Writes:\n%s\n", std::string(13, '-').c_str());
173 StringAppendF(&buffer,
174 "Android User ID, Package Name, Foreground Bytes, Foreground Bytes %%, "
175 "Foreground Fsync, Foreground Fsync %%, Background Bytes, "
176 "Background Bytes %%, Background Fsync, Background Fsync %%\n");
177 }
178 for (const auto& stat : data.topNWrites) {
179 StringAppendF(&buffer, "%" PRIu32 ", %s", stat.userId, stat.packageName.c_str());
180 for (int i = 0; i < UID_STATES; ++i) {
181 StringAppendF(&buffer, ", %" PRIi64 ", %.2f%%, %" PRIi64 ", %.2f%%", stat.bytes[i],
182 percentage(stat.bytes[i], data.total[WRITE_BYTES][i]), stat.fsync[i],
183 percentage(stat.fsync[i], data.total[FSYNC_COUNT][i]));
184 }
185 StringAppendF(&buffer, "\n");
186 }
187 return buffer;
188 }
189
toString(const SystemIoPerfData & data)190 std::string toString(const SystemIoPerfData& data) {
191 std::string buffer;
192 StringAppendF(&buffer, "CPU I/O wait time/percent: %" PRIu64 " / %.2f%%\n", data.cpuIoWaitTime,
193 percentage(data.cpuIoWaitTime, data.totalCpuTime));
194 StringAppendF(&buffer, "Number of I/O blocked processes/percent: %" PRIu32 " / %.2f%%\n",
195 data.ioBlockedProcessesCnt,
196 percentage(data.ioBlockedProcessesCnt, data.totalProcessesCnt));
197 return buffer;
198 }
199
toString(const ProcessIoPerfData & data)200 std::string toString(const ProcessIoPerfData& data) {
201 std::string buffer;
202 StringAppendF(&buffer, "Number of major page faults since last collection: %" PRIu64 "\n",
203 data.totalMajorFaults);
204 StringAppendF(&buffer,
205 "Percentage of change in major page faults since last collection: %.2f%%\n",
206 data.majorFaultsPercentChange);
207 if (data.topNMajorFaultUids.size() > 0) {
208 StringAppendF(&buffer, "\nTop N major page faults:\n%s\n", std::string(24, '-').c_str());
209 StringAppendF(&buffer,
210 "Android User ID, Package Name, Number of major page faults, "
211 "Percentage of total major page faults\n");
212 StringAppendF(&buffer,
213 "\tCommand, Number of major page faults, Percentage of UID's major page "
214 "faults\n");
215 }
216 for (const auto& uidStats : data.topNMajorFaultUids) {
217 StringAppendF(&buffer, "%" PRIu32 ", %s, %" PRIu64 ", %.2f%%\n", uidStats.userId,
218 uidStats.packageName.c_str(), uidStats.count,
219 percentage(uidStats.count, data.totalMajorFaults));
220 for (const auto& procStats : uidStats.topNProcesses) {
221 StringAppendF(&buffer, "\t%s, %" PRIu64 ", %.2f%%\n", procStats.comm.c_str(),
222 procStats.count, percentage(procStats.count, uidStats.count));
223 }
224 }
225 if (data.topNIoBlockedUids.size() > 0) {
226 StringAppendF(&buffer, "\nTop N I/O waiting UIDs:\n%s\n", std::string(23, '-').c_str());
227 StringAppendF(&buffer,
228 "Android User ID, Package Name, Number of owned tasks waiting for I/O, "
229 "Percentage of owned tasks waiting for I/O\n");
230 StringAppendF(&buffer,
231 "\tCommand, Number of I/O waiting tasks, Percentage of UID's tasks waiting "
232 "for I/O\n");
233 }
234 for (size_t i = 0; i < data.topNIoBlockedUids.size(); ++i) {
235 const auto& uidStats = data.topNIoBlockedUids[i];
236 StringAppendF(&buffer, "%" PRIu32 ", %s, %" PRIu64 ", %.2f%%\n", uidStats.userId,
237 uidStats.packageName.c_str(), uidStats.count,
238 percentage(uidStats.count, data.topNIoBlockedUidsTotalTaskCnt[i]));
239 for (const auto& procStats : uidStats.topNProcesses) {
240 StringAppendF(&buffer, "\t%s, %" PRIu64 ", %.2f%%\n", procStats.comm.c_str(),
241 procStats.count, percentage(procStats.count, uidStats.count));
242 }
243 }
244 return buffer;
245 }
246
toString(const IoPerfRecord & record)247 std::string toString(const IoPerfRecord& record) {
248 std::string buffer;
249 StringAppendF(&buffer, "%s%s%s", toString(record.systemIoPerfData).c_str(),
250 toString(record.processIoPerfData).c_str(),
251 toString(record.uidIoPerfData).c_str());
252 return buffer;
253 }
254
toString(const CollectionInfo & collectionInfo)255 std::string toString(const CollectionInfo& collectionInfo) {
256 if (collectionInfo.records.empty()) {
257 return kEmptyCollectionMessage;
258 }
259 std::string buffer;
260 double duration =
261 difftime(collectionInfo.records.back().time, collectionInfo.records.front().time);
262 StringAppendF(&buffer, "Collection duration: %.f seconds\nNumber of collections: %zu\n",
263 duration, collectionInfo.records.size());
264
265 for (size_t i = 0; i < collectionInfo.records.size(); ++i) {
266 const auto& record = collectionInfo.records[i];
267 std::stringstream timestamp;
268 timestamp << std::put_time(std::localtime(&record.time), "%c %Z");
269 StringAppendF(&buffer, "\nCollection %zu: <%s>\n%s\n%s", i, timestamp.str().c_str(),
270 std::string(45, '=').c_str(), toString(record).c_str());
271 }
272 return buffer;
273 }
274
init()275 Result<void> IoPerfCollection::init() {
276 Mutex::Autolock lock(mMutex);
277 if (mTopNStatsPerCategory != 0 || mTopNStatsPerSubcategory != 0) {
278 return Error() << "Cannot initialize " << name() << " more than once";
279 }
280 mTopNStatsPerCategory = static_cast<int>(
281 sysprop::topNStatsPerCategory().value_or(kDefaultTopNStatsPerCategory));
282 mTopNStatsPerSubcategory = static_cast<int>(
283 sysprop::topNStatsPerSubcategory().value_or(kDefaultTopNStatsPerSubcategory));
284 size_t periodicCollectionBufferSize = static_cast<size_t>(
285 sysprop::periodicCollectionBufferSize().value_or(kDefaultPeriodicCollectionBufferSize));
286 mBoottimeCollection = {
287 .maxCacheSize = std::numeric_limits<std::size_t>::max(),
288 .records = {},
289 };
290 mPeriodicCollection = {
291 .maxCacheSize = periodicCollectionBufferSize,
292 .records = {},
293 };
294 mCustomCollection = {
295 .maxCacheSize = std::numeric_limits<std::size_t>::max(),
296 .records = {},
297 };
298 return {};
299 }
300
terminate()301 void IoPerfCollection::terminate() {
302 Mutex::Autolock lock(mMutex);
303
304 ALOGW("Terminating %s", name().c_str());
305
306 mBoottimeCollection.records.clear();
307 mBoottimeCollection = {};
308
309 mPeriodicCollection.records.clear();
310 mPeriodicCollection = {};
311
312 mCustomCollection.records.clear();
313 mCustomCollection = {};
314 }
315
onDump(int fd)316 Result<void> IoPerfCollection::onDump(int fd) {
317 Mutex::Autolock lock(mMutex);
318 if (!WriteStringToFd(StringPrintf("%s\nBoot-time I/O performance report:\n%s\n",
319 std::string(75, '-').c_str(), std::string(33, '=').c_str()),
320 fd) ||
321 !WriteStringToFd(toString(mBoottimeCollection), fd) ||
322 !WriteStringToFd(StringPrintf("%s\nLast N minutes I/O performance report:\n%s\n",
323 std::string(75, '-').c_str(), std::string(38, '=').c_str()),
324 fd) ||
325 !WriteStringToFd(toString(mPeriodicCollection), fd)) {
326 return Error(FAILED_TRANSACTION)
327 << "Failed to dump the boot-time and periodic collection reports.";
328 }
329 return {};
330 }
331
onCustomCollectionDump(int fd)332 Result<void> IoPerfCollection::onCustomCollectionDump(int fd) {
333 if (fd == -1) {
334 // Custom collection ends so clear the cache.
335 mCustomCollection.records.clear();
336 mCustomCollection = {
337 .maxCacheSize = std::numeric_limits<std::size_t>::max(),
338 .records = {},
339 };
340 return {};
341 }
342
343 if (!WriteStringToFd(StringPrintf("%s\nCustom I/O performance data report:\n%s\n",
344 std::string(75, '-').c_str(), std::string(75, '-').c_str()),
345 fd) ||
346 !WriteStringToFd(toString(mCustomCollection), fd)) {
347 return Error(FAILED_TRANSACTION) << "Failed to write custom I/O collection report.";
348 }
349
350 return {};
351 }
352
onBoottimeCollection(time_t time,const wp<UidIoStats> & uidIoStats,const wp<ProcStat> & procStat,const wp<ProcPidStat> & procPidStat)353 Result<void> IoPerfCollection::onBoottimeCollection(time_t time, const wp<UidIoStats>& uidIoStats,
354 const wp<ProcStat>& procStat,
355 const wp<ProcPidStat>& procPidStat) {
356 auto result = checkDataCollectors(uidIoStats, procStat, procPidStat);
357 if (!result.ok()) {
358 return result;
359 }
360 Mutex::Autolock lock(mMutex);
361 return processLocked(time, std::unordered_set<std::string>(), uidIoStats, procStat, procPidStat,
362 &mBoottimeCollection);
363 }
364
onPeriodicCollection(time_t time,SystemState systemState,const wp<UidIoStats> & uidIoStats,const wp<ProcStat> & procStat,const wp<ProcPidStat> & procPidStat)365 Result<void> IoPerfCollection::onPeriodicCollection(time_t time,
366 [[maybe_unused]] SystemState systemState,
367 const wp<UidIoStats>& uidIoStats,
368 const wp<ProcStat>& procStat,
369 const wp<ProcPidStat>& procPidStat) {
370 auto result = checkDataCollectors(uidIoStats, procStat, procPidStat);
371 if (!result.ok()) {
372 return result;
373 }
374 Mutex::Autolock lock(mMutex);
375 return processLocked(time, std::unordered_set<std::string>(), uidIoStats, procStat, procPidStat,
376 &mPeriodicCollection);
377 }
378
onCustomCollection(time_t time,SystemState systemState,const std::unordered_set<std::string> & filterPackages,const wp<UidIoStats> & uidIoStats,const wp<ProcStat> & procStat,const wp<ProcPidStat> & procPidStat)379 Result<void> IoPerfCollection::onCustomCollection(
380 time_t time, [[maybe_unused]] SystemState systemState,
381 const std::unordered_set<std::string>& filterPackages, const wp<UidIoStats>& uidIoStats,
382 const wp<ProcStat>& procStat, const wp<ProcPidStat>& procPidStat) {
383 auto result = checkDataCollectors(uidIoStats, procStat, procPidStat);
384 if (!result.ok()) {
385 return result;
386 }
387 Mutex::Autolock lock(mMutex);
388 return processLocked(time, filterPackages, uidIoStats, procStat, procPidStat,
389 &mCustomCollection);
390 }
391
processLocked(time_t time,const std::unordered_set<std::string> & filterPackages,const wp<UidIoStats> & uidIoStats,const wp<ProcStat> & procStat,const wp<ProcPidStat> & procPidStat,CollectionInfo * collectionInfo)392 Result<void> IoPerfCollection::processLocked(time_t time,
393 const std::unordered_set<std::string>& filterPackages,
394 const wp<UidIoStats>& uidIoStats,
395 const wp<ProcStat>& procStat,
396 const wp<ProcPidStat>& procPidStat,
397 CollectionInfo* collectionInfo) {
398 if (collectionInfo->maxCacheSize == 0) {
399 return Error() << "Maximum cache size cannot be 0";
400 }
401 IoPerfRecord record{
402 .time = time,
403 };
404 processSystemIoPerfData(procStat, &record.systemIoPerfData);
405 processProcessIoPerfDataLocked(filterPackages, procPidStat, &record.processIoPerfData);
406 processUidIoPerfData(filterPackages, uidIoStats, &record.uidIoPerfData);
407 if (collectionInfo->records.size() > collectionInfo->maxCacheSize) {
408 collectionInfo->records.erase(collectionInfo->records.begin()); // Erase the oldest record.
409 }
410 collectionInfo->records.emplace_back(record);
411 return {};
412 }
413
processUidIoPerfData(const std::unordered_set<std::string> & filterPackages,const wp<UidIoStats> & uidIoStats,UidIoPerfData * uidIoPerfData) const414 void IoPerfCollection::processUidIoPerfData(const std::unordered_set<std::string>& filterPackages,
415 const wp<UidIoStats>& uidIoStats,
416 UidIoPerfData* uidIoPerfData) const {
417 const std::unordered_map<uid_t, UidIoUsage>& usages = uidIoStats.promote()->deltaStats();
418
419 // Fetch only the top N reads and writes from the usage records.
420 UidIoUsage tempUsage = {};
421 std::vector<const UidIoUsage*> topNReads(mTopNStatsPerCategory, &tempUsage);
422 std::vector<const UidIoUsage*> topNWrites(mTopNStatsPerCategory, &tempUsage);
423 std::vector<uid_t> uids;
424
425 for (const auto& uIt : usages) {
426 const UidIoUsage& curUsage = uIt.second;
427 uids.push_back(curUsage.uid);
428 uidIoPerfData->total[READ_BYTES][FOREGROUND] +=
429 curUsage.ios.metrics[READ_BYTES][FOREGROUND];
430 uidIoPerfData->total[READ_BYTES][BACKGROUND] +=
431 curUsage.ios.metrics[READ_BYTES][BACKGROUND];
432 uidIoPerfData->total[WRITE_BYTES][FOREGROUND] +=
433 curUsage.ios.metrics[WRITE_BYTES][FOREGROUND];
434 uidIoPerfData->total[WRITE_BYTES][BACKGROUND] +=
435 curUsage.ios.metrics[WRITE_BYTES][BACKGROUND];
436 uidIoPerfData->total[FSYNC_COUNT][FOREGROUND] +=
437 curUsage.ios.metrics[FSYNC_COUNT][FOREGROUND];
438 uidIoPerfData->total[FSYNC_COUNT][BACKGROUND] +=
439 curUsage.ios.metrics[FSYNC_COUNT][BACKGROUND];
440
441 for (auto it = topNReads.begin(); it != topNReads.end(); ++it) {
442 const UidIoUsage* curRead = *it;
443 if (curRead->ios.sumReadBytes() < curUsage.ios.sumReadBytes()) {
444 topNReads.emplace(it, &curUsage);
445 if (filterPackages.empty()) {
446 topNReads.pop_back();
447 }
448 break;
449 }
450 }
451 for (auto it = topNWrites.begin(); it != topNWrites.end(); ++it) {
452 const UidIoUsage* curWrite = *it;
453 if (curWrite->ios.sumWriteBytes() < curUsage.ios.sumWriteBytes()) {
454 topNWrites.emplace(it, &curUsage);
455 if (filterPackages.empty()) {
456 topNWrites.pop_back();
457 }
458 break;
459 }
460 }
461 }
462
463 const auto& uidToPackageNameMapping = mPackageInfoResolver->getPackageNamesForUids(uids);
464
465 // Convert the top N I/O usage to UidIoPerfData.
466 for (const auto& usage : topNReads) {
467 if (usage->ios.isZero()) {
468 // End of non-zero usage records. This case occurs when the number of UIDs with active
469 // I/O operations is < |ro.carwatchdog.top_n_stats_per_category|.
470 break;
471 }
472 UidIoPerfData::Stats stats = {
473 .userId = multiuser_get_user_id(usage->uid),
474 .packageName = std::to_string(usage->uid),
475 .bytes = {usage->ios.metrics[READ_BYTES][FOREGROUND],
476 usage->ios.metrics[READ_BYTES][BACKGROUND]},
477 .fsync = {usage->ios.metrics[FSYNC_COUNT][FOREGROUND],
478 usage->ios.metrics[FSYNC_COUNT][BACKGROUND]},
479 };
480 if (uidToPackageNameMapping.find(usage->uid) != uidToPackageNameMapping.end()) {
481 stats.packageName = uidToPackageNameMapping.at(usage->uid);
482 }
483 if (!filterPackages.empty() &&
484 filterPackages.find(stats.packageName) == filterPackages.end()) {
485 continue;
486 }
487 uidIoPerfData->topNReads.emplace_back(stats);
488 }
489
490 for (const auto& usage : topNWrites) {
491 if (usage->ios.isZero()) {
492 // End of non-zero usage records. This case occurs when the number of UIDs with active
493 // I/O operations is < |ro.carwatchdog.top_n_stats_per_category|.
494 break;
495 }
496 UidIoPerfData::Stats stats = {
497 .userId = multiuser_get_user_id(usage->uid),
498 .packageName = std::to_string(usage->uid),
499 .bytes = {usage->ios.metrics[WRITE_BYTES][FOREGROUND],
500 usage->ios.metrics[WRITE_BYTES][BACKGROUND]},
501 .fsync = {usage->ios.metrics[FSYNC_COUNT][FOREGROUND],
502 usage->ios.metrics[FSYNC_COUNT][BACKGROUND]},
503 };
504 if (uidToPackageNameMapping.find(usage->uid) != uidToPackageNameMapping.end()) {
505 stats.packageName = uidToPackageNameMapping.at(usage->uid);
506 }
507 if (!filterPackages.empty() &&
508 filterPackages.find(stats.packageName) == filterPackages.end()) {
509 continue;
510 }
511 uidIoPerfData->topNWrites.emplace_back(stats);
512 }
513 }
514
processSystemIoPerfData(const wp<ProcStat> & procStat,SystemIoPerfData * systemIoPerfData) const515 void IoPerfCollection::processSystemIoPerfData(const wp<ProcStat>& procStat,
516 SystemIoPerfData* systemIoPerfData) const {
517 const ProcStatInfo& procStatInfo = procStat.promote()->deltaStats();
518 systemIoPerfData->cpuIoWaitTime = procStatInfo.cpuStats.ioWaitTime;
519 systemIoPerfData->totalCpuTime = procStatInfo.totalCpuTime();
520 systemIoPerfData->ioBlockedProcessesCnt = procStatInfo.ioBlockedProcessesCnt;
521 systemIoPerfData->totalProcessesCnt = procStatInfo.totalProcessesCnt();
522 }
523
processProcessIoPerfDataLocked(const std::unordered_set<std::string> & filterPackages,const wp<ProcPidStat> & procPidStat,ProcessIoPerfData * processIoPerfData)524 void IoPerfCollection::processProcessIoPerfDataLocked(
525 const std::unordered_set<std::string>& filterPackages, const wp<ProcPidStat>& procPidStat,
526 ProcessIoPerfData* processIoPerfData) {
527 const std::vector<ProcessStats>& processStats = procPidStat.promote()->deltaStats();
528
529 const auto& uidProcessStats = getUidProcessStats(processStats, mTopNStatsPerSubcategory);
530 std::vector<uid_t> uids;
531 // Fetch only the top N I/O blocked UIDs and UIDs with most major page faults.
532 UidProcessStats temp = {};
533 std::vector<const UidProcessStats*> topNIoBlockedUids(mTopNStatsPerCategory, &temp);
534 std::vector<const UidProcessStats*> topNMajorFaultUids(mTopNStatsPerCategory, &temp);
535 processIoPerfData->totalMajorFaults = 0;
536 for (const auto& it : *uidProcessStats) {
537 const UidProcessStats& curStats = it.second;
538 uids.push_back(curStats.uid);
539 processIoPerfData->totalMajorFaults += curStats.majorFaults;
540 for (auto it = topNIoBlockedUids.begin(); it != topNIoBlockedUids.end(); ++it) {
541 const UidProcessStats* topStats = *it;
542 if (topStats->ioBlockedTasksCnt < curStats.ioBlockedTasksCnt) {
543 topNIoBlockedUids.emplace(it, &curStats);
544 if (filterPackages.empty()) {
545 topNIoBlockedUids.pop_back();
546 }
547 break;
548 }
549 }
550 for (auto it = topNMajorFaultUids.begin(); it != topNMajorFaultUids.end(); ++it) {
551 const UidProcessStats* topStats = *it;
552 if (topStats->majorFaults < curStats.majorFaults) {
553 topNMajorFaultUids.emplace(it, &curStats);
554 if (filterPackages.empty()) {
555 topNMajorFaultUids.pop_back();
556 }
557 break;
558 }
559 }
560 }
561
562 const auto& uidToPackageNameMapping = mPackageInfoResolver->getPackageNamesForUids(uids);
563
564 // Convert the top N uid process stats to ProcessIoPerfData.
565 for (const auto& it : topNIoBlockedUids) {
566 if (it->ioBlockedTasksCnt == 0) {
567 // End of non-zero elements. This case occurs when the number of UIDs with I/O blocked
568 // processes is < |ro.carwatchdog.top_n_stats_per_category|.
569 break;
570 }
571 ProcessIoPerfData::UidStats stats = {
572 .userId = multiuser_get_user_id(it->uid),
573 .packageName = std::to_string(it->uid),
574 .count = it->ioBlockedTasksCnt,
575 };
576 if (uidToPackageNameMapping.find(it->uid) != uidToPackageNameMapping.end()) {
577 stats.packageName = uidToPackageNameMapping.at(it->uid);
578 }
579 if (!filterPackages.empty() &&
580 filterPackages.find(stats.packageName) == filterPackages.end()) {
581 continue;
582 }
583 for (const auto& pIt : it->topNIoBlockedProcesses) {
584 if (pIt.count == 0) {
585 break;
586 }
587 stats.topNProcesses.emplace_back(
588 ProcessIoPerfData::UidStats::ProcessStats{pIt.comm, pIt.count});
589 }
590 processIoPerfData->topNIoBlockedUids.emplace_back(stats);
591 processIoPerfData->topNIoBlockedUidsTotalTaskCnt.emplace_back(it->totalTasksCnt);
592 }
593 for (const auto& it : topNMajorFaultUids) {
594 if (it->majorFaults == 0) {
595 // End of non-zero elements. This case occurs when the number of UIDs with major faults
596 // is < |ro.carwatchdog.top_n_stats_per_category|.
597 break;
598 }
599 ProcessIoPerfData::UidStats stats = {
600 .userId = multiuser_get_user_id(it->uid),
601 .packageName = std::to_string(it->uid),
602 .count = it->majorFaults,
603 };
604 if (uidToPackageNameMapping.find(it->uid) != uidToPackageNameMapping.end()) {
605 stats.packageName = uidToPackageNameMapping.at(it->uid);
606 }
607 if (!filterPackages.empty() &&
608 filterPackages.find(stats.packageName) == filterPackages.end()) {
609 continue;
610 }
611 for (const auto& pIt : it->topNMajorFaultProcesses) {
612 if (pIt.count == 0) {
613 break;
614 }
615 stats.topNProcesses.emplace_back(
616 ProcessIoPerfData::UidStats::ProcessStats{pIt.comm, pIt.count});
617 }
618 processIoPerfData->topNMajorFaultUids.emplace_back(stats);
619 }
620 if (mLastMajorFaults == 0) {
621 processIoPerfData->majorFaultsPercentChange = 0;
622 } else {
623 int64_t increase = processIoPerfData->totalMajorFaults - mLastMajorFaults;
624 processIoPerfData->majorFaultsPercentChange =
625 (static_cast<double>(increase) / static_cast<double>(mLastMajorFaults)) * 100.0;
626 }
627 mLastMajorFaults = processIoPerfData->totalMajorFaults;
628 }
629
630 } // namespace watchdog
631 } // namespace automotive
632 } // namespace android
633