1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17
18 #include <bpf_timeinstate.h>
19
20 #include <sys/sysinfo.h>
21
22 #include <pthread.h>
23 #include <semaphore.h>
24 #include <numeric>
25 #include <unordered_map>
26 #include <vector>
27
28 #include <gtest/gtest.h>
29
30 #include <android-base/properties.h>
31 #include <android-base/unique_fd.h>
32 #include <bpf/BpfMap.h>
33 #include <cputimeinstate.h>
34 #include <cutils/android_filesystem_config.h>
35 #include <libbpf.h>
36
37 namespace android {
38 namespace bpf {
39
40 static constexpr uint64_t NSEC_PER_SEC = 1000000000;
41 static constexpr uint64_t NSEC_PER_YEAR = NSEC_PER_SEC * 60 * 60 * 24 * 365;
42
43 // Declare busy loop variable globally to prevent removal during optimization
44 static long sum __attribute__((used)) = 0;
45
46 using std::vector;
47
48 class TimeInStateTest : public testing::Test {
49 protected:
TimeInStateTest()50 TimeInStateTest() {};
51
SetUp()52 void SetUp() {
53 if (!isTrackingUidTimesSupported() ||
54 !android::base::GetBoolProperty("sys.init.perf_lsm_hooks", false)) {
55 GTEST_SKIP();
56 }
57 }
58 };
59
TEST_F(TimeInStateTest,TotalTimeInState)60 TEST_F(TimeInStateTest, TotalTimeInState) {
61 auto times = getTotalCpuFreqTimes();
62 ASSERT_TRUE(times.has_value());
63 EXPECT_FALSE(times->empty());
64 }
65
TEST_F(TimeInStateTest,SingleUidTimeInState)66 TEST_F(TimeInStateTest, SingleUidTimeInState) {
67 auto times = getUidCpuFreqTimes(0);
68 ASSERT_TRUE(times.has_value());
69 EXPECT_FALSE(times->empty());
70 }
71
TEST_F(TimeInStateTest,SingleUidConcurrentTimes)72 TEST_F(TimeInStateTest, SingleUidConcurrentTimes) {
73 auto concurrentTimes = getUidConcurrentTimes(0);
74 ASSERT_TRUE(concurrentTimes.has_value());
75 ASSERT_FALSE(concurrentTimes->active.empty());
76 ASSERT_FALSE(concurrentTimes->policy.empty());
77
78 uint64_t policyEntries = 0;
79 for (const auto &policyTimeVec : concurrentTimes->policy) policyEntries += policyTimeVec.size();
80 ASSERT_EQ(concurrentTimes->active.size(), policyEntries);
81 }
82
TestConcurrentTimesConsistent(const struct concurrent_time_t & concurrentTime)83 static void TestConcurrentTimesConsistent(const struct concurrent_time_t &concurrentTime) {
84 size_t maxPolicyCpus = 0;
85 for (const auto &vec : concurrentTime.policy) {
86 maxPolicyCpus = std::max(maxPolicyCpus, vec.size());
87 }
88 uint64_t policySum = 0;
89 for (size_t i = 0; i < maxPolicyCpus; ++i) {
90 for (const auto &vec : concurrentTime.policy) {
91 if (i < vec.size()) policySum += vec[i];
92 }
93 ASSERT_LE(concurrentTime.active[i], policySum);
94 policySum -= concurrentTime.active[i];
95 }
96 policySum = 0;
97 for (size_t i = 0; i < concurrentTime.active.size(); ++i) {
98 for (const auto &vec : concurrentTime.policy) {
99 if (i < vec.size()) policySum += vec[vec.size() - 1 - i];
100 }
101 auto activeSum = concurrentTime.active[concurrentTime.active.size() - 1 - i];
102 // This check is slightly flaky because we may read a map entry in the middle of an update
103 // when active times have been updated but policy times have not. This happens infrequently
104 // and can be distinguished from more serious bugs by re-running the test: if the underlying
105 // data itself is inconsistent, the test will fail every time.
106 ASSERT_LE(activeSum, policySum);
107 policySum -= activeSum;
108 }
109 }
110
TestUidTimesConsistent(const std::vector<std::vector<uint64_t>> & timeInState,const struct concurrent_time_t & concurrentTime)111 static void TestUidTimesConsistent(const std::vector<std::vector<uint64_t>> &timeInState,
112 const struct concurrent_time_t &concurrentTime) {
113 ASSERT_NO_FATAL_FAILURE(TestConcurrentTimesConsistent(concurrentTime));
114 ASSERT_EQ(timeInState.size(), concurrentTime.policy.size());
115 uint64_t policySum = 0;
116 for (uint32_t i = 0; i < timeInState.size(); ++i) {
117 uint64_t tisSum =
118 std::accumulate(timeInState[i].begin(), timeInState[i].end(), (uint64_t)0);
119 uint64_t concurrentSum = std::accumulate(concurrentTime.policy[i].begin(),
120 concurrentTime.policy[i].end(), (uint64_t)0);
121 if (tisSum < concurrentSum)
122 ASSERT_LE(concurrentSum - tisSum, NSEC_PER_SEC);
123 else
124 ASSERT_LE(tisSum - concurrentSum, NSEC_PER_SEC);
125 policySum += concurrentSum;
126 }
127 uint64_t activeSum = std::accumulate(concurrentTime.active.begin(), concurrentTime.active.end(),
128 (uint64_t)0);
129 EXPECT_EQ(activeSum, policySum);
130 }
131
TEST_F(TimeInStateTest,SingleUidTimesConsistent)132 TEST_F(TimeInStateTest, SingleUidTimesConsistent) {
133 auto times = getUidCpuFreqTimes(0);
134 ASSERT_TRUE(times.has_value());
135
136 auto concurrentTimes = getUidConcurrentTimes(0);
137 ASSERT_TRUE(concurrentTimes.has_value());
138
139 ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(*times, *concurrentTimes));
140 }
141
TEST_F(TimeInStateTest,AllUidTimeInState)142 TEST_F(TimeInStateTest, AllUidTimeInState) {
143 uint64_t zero = 0;
144 auto maps = {getUidsCpuFreqTimes(), getUidsUpdatedCpuFreqTimes(&zero)};
145 for (const auto &map : maps) {
146 ASSERT_TRUE(map.has_value());
147
148 ASSERT_FALSE(map->empty());
149
150 vector<size_t> sizes;
151 auto firstEntry = map->begin()->second;
152 for (const auto &subEntry : firstEntry) sizes.emplace_back(subEntry.size());
153
154 for (const auto &vec : *map) {
155 ASSERT_EQ(vec.second.size(), sizes.size());
156 for (size_t i = 0; i < vec.second.size(); ++i) ASSERT_EQ(vec.second[i].size(), sizes[i]);
157 }
158 }
159 }
160
TestCheckUpdate(const std::vector<std::vector<uint64_t>> & before,const std::vector<std::vector<uint64_t>> & after)161 void TestCheckUpdate(const std::vector<std::vector<uint64_t>> &before,
162 const std::vector<std::vector<uint64_t>> &after) {
163 ASSERT_EQ(before.size(), after.size());
164 uint64_t sumBefore = 0, sumAfter = 0;
165 for (size_t i = 0; i < before.size(); ++i) {
166 ASSERT_EQ(before[i].size(), after[i].size());
167 for (size_t j = 0; j < before[i].size(); ++j) {
168 // Times should never decrease
169 ASSERT_LE(before[i][j], after[i][j]);
170 }
171 sumBefore += std::accumulate(before[i].begin(), before[i].end(), (uint64_t)0);
172 sumAfter += std::accumulate(after[i].begin(), after[i].end(), (uint64_t)0);
173 }
174 ASSERT_LE(sumBefore, sumAfter);
175 ASSERT_LE(sumAfter - sumBefore, NSEC_PER_SEC);
176 }
177
TEST_F(TimeInStateTest,AllUidUpdatedTimeInState)178 TEST_F(TimeInStateTest, AllUidUpdatedTimeInState) {
179 uint64_t lastUpdate = 0;
180 auto map1 = getUidsUpdatedCpuFreqTimes(&lastUpdate);
181 ASSERT_TRUE(map1.has_value());
182 ASSERT_FALSE(map1->empty());
183 ASSERT_NE(lastUpdate, (uint64_t)0);
184 uint64_t oldLastUpdate = lastUpdate;
185
186 // Sleep briefly to trigger a context switch, ensuring we see at least one update.
187 struct timespec ts;
188 ts.tv_sec = 0;
189 ts.tv_nsec = 1000000;
190 nanosleep (&ts, NULL);
191
192 auto map2 = getUidsUpdatedCpuFreqTimes(&lastUpdate);
193 ASSERT_TRUE(map2.has_value());
194 ASSERT_FALSE(map2->empty());
195 ASSERT_NE(lastUpdate, oldLastUpdate);
196
197 bool someUidsExcluded = false;
198 for (const auto &[uid, v] : *map1) {
199 if (map2->find(uid) == map2->end()) {
200 someUidsExcluded = true;
201 break;
202 }
203 }
204 ASSERT_TRUE(someUidsExcluded);
205
206 for (const auto &[uid, newTimes] : *map2) {
207 ASSERT_NE(map1->find(uid), map1->end());
208 ASSERT_NO_FATAL_FAILURE(TestCheckUpdate((*map1)[uid], newTimes));
209 }
210 }
211
TEST_F(TimeInStateTest,TotalAndAllUidTimeInStateConsistent)212 TEST_F(TimeInStateTest, TotalAndAllUidTimeInStateConsistent) {
213 auto allUid = getUidsCpuFreqTimes();
214 auto total = getTotalCpuFreqTimes();
215
216 ASSERT_TRUE(allUid.has_value() && total.has_value());
217
218 // Check the number of policies.
219 ASSERT_EQ(allUid->at(0).size(), total->size());
220
221 for (uint32_t policyIdx = 0; policyIdx < total->size(); ++policyIdx) {
222 std::vector<uint64_t> totalTimes = total->at(policyIdx);
223 uint32_t totalFreqsCount = totalTimes.size();
224 std::vector<uint64_t> allUidTimes(totalFreqsCount, 0);
225 for (auto const &[uid, uidTimes]: *allUid) {
226 if (uid == AID_SDK_SANDBOX) continue;
227 for (uint32_t freqIdx = 0; freqIdx < uidTimes[policyIdx].size(); ++freqIdx) {
228 allUidTimes[std::min(freqIdx, totalFreqsCount - 1)] += uidTimes[policyIdx][freqIdx];
229 }
230 }
231
232 for (uint32_t freqIdx = 0; freqIdx < totalFreqsCount; ++freqIdx) {
233 ASSERT_LE(allUidTimes[freqIdx], totalTimes[freqIdx]);
234 }
235 }
236 }
237
TEST_F(TimeInStateTest,SingleAndAllUidTimeInStateConsistent)238 TEST_F(TimeInStateTest, SingleAndAllUidTimeInStateConsistent) {
239 uint64_t zero = 0;
240 auto maps = {getUidsCpuFreqTimes(), getUidsUpdatedCpuFreqTimes(&zero)};
241 for (const auto &map : maps) {
242 ASSERT_TRUE(map.has_value());
243 ASSERT_FALSE(map->empty());
244
245 for (const auto &kv : *map) {
246 uint32_t uid = kv.first;
247 auto times1 = kv.second;
248 auto times2 = getUidCpuFreqTimes(uid);
249 ASSERT_TRUE(times2.has_value());
250
251 ASSERT_EQ(times1.size(), times2->size());
252 for (uint32_t i = 0; i < times1.size(); ++i) {
253 ASSERT_EQ(times1[i].size(), (*times2)[i].size());
254 for (uint32_t j = 0; j < times1[i].size(); ++j) {
255 ASSERT_LE((*times2)[i][j] - times1[i][j], NSEC_PER_SEC);
256 }
257 }
258 }
259 }
260 }
261
TEST_F(TimeInStateTest,AllUidConcurrentTimes)262 TEST_F(TimeInStateTest, AllUidConcurrentTimes) {
263 uint64_t zero = 0;
264 auto maps = {getUidsConcurrentTimes(), getUidsUpdatedConcurrentTimes(&zero)};
265 for (const auto &map : maps) {
266 ASSERT_TRUE(map.has_value());
267 ASSERT_FALSE(map->empty());
268
269 auto firstEntry = map->begin()->second;
270 for (const auto &kv : *map) {
271 ASSERT_EQ(kv.second.active.size(), firstEntry.active.size());
272 ASSERT_EQ(kv.second.policy.size(), firstEntry.policy.size());
273 for (size_t i = 0; i < kv.second.policy.size(); ++i) {
274 ASSERT_EQ(kv.second.policy[i].size(), firstEntry.policy[i].size());
275 }
276 }
277 }
278 }
279
TEST_F(TimeInStateTest,AllUidUpdatedConcurrentTimes)280 TEST_F(TimeInStateTest, AllUidUpdatedConcurrentTimes) {
281 uint64_t lastUpdate = 0;
282 auto map1 = getUidsUpdatedConcurrentTimes(&lastUpdate);
283 ASSERT_TRUE(map1.has_value());
284 ASSERT_FALSE(map1->empty());
285 ASSERT_NE(lastUpdate, (uint64_t)0);
286
287 // Sleep briefly to trigger a context switch, ensuring we see at least one update.
288 struct timespec ts;
289 ts.tv_sec = 0;
290 ts.tv_nsec = 1000000;
291 nanosleep (&ts, NULL);
292
293 uint64_t oldLastUpdate = lastUpdate;
294 auto map2 = getUidsUpdatedConcurrentTimes(&lastUpdate);
295 ASSERT_TRUE(map2.has_value());
296 ASSERT_FALSE(map2->empty());
297 ASSERT_NE(lastUpdate, oldLastUpdate);
298
299 bool someUidsExcluded = false;
300 for (const auto &[uid, v] : *map1) {
301 if (map2->find(uid) == map2->end()) {
302 someUidsExcluded = true;
303 break;
304 }
305 }
306 ASSERT_TRUE(someUidsExcluded);
307
308 for (const auto &[uid, newTimes] : *map2) {
309 ASSERT_NE(map1->find(uid), map1->end());
310 ASSERT_NO_FATAL_FAILURE(TestCheckUpdate({(*map1)[uid].active},{newTimes.active}));
311 ASSERT_NO_FATAL_FAILURE(TestCheckUpdate((*map1)[uid].policy, newTimes.policy));
312 }
313 }
314
TEST_F(TimeInStateTest,SingleAndAllUidConcurrentTimesConsistent)315 TEST_F(TimeInStateTest, SingleAndAllUidConcurrentTimesConsistent) {
316 uint64_t zero = 0;
317 auto maps = {getUidsConcurrentTimes(), getUidsUpdatedConcurrentTimes(&zero)};
318 for (const auto &map : maps) {
319 ASSERT_TRUE(map.has_value());
320 for (const auto &kv : *map) {
321 uint32_t uid = kv.first;
322 auto times1 = kv.second;
323 auto times2 = getUidConcurrentTimes(uid);
324 ASSERT_TRUE(times2.has_value());
325 for (uint32_t i = 0; i < times1.active.size(); ++i) {
326 ASSERT_LE(times2->active[i] - times1.active[i], NSEC_PER_SEC);
327 }
328 for (uint32_t i = 0; i < times1.policy.size(); ++i) {
329 for (uint32_t j = 0; j < times1.policy[i].size(); ++j) {
330 ASSERT_LE(times2->policy[i][j] - times1.policy[i][j], NSEC_PER_SEC);
331 }
332 }
333 }
334 }
335 }
336
TestCheckDelta(uint64_t before,uint64_t after)337 void TestCheckDelta(uint64_t before, uint64_t after) {
338 // Times should never decrease
339 ASSERT_LE(before, after);
340 // UID can't have run for more than ~1s on each CPU
341 ASSERT_LE(after - before, NSEC_PER_SEC * 2 * get_nprocs_conf());
342 }
343
TEST_F(TimeInStateTest,TotalTimeInStateMonotonic)344 TEST_F(TimeInStateTest, TotalTimeInStateMonotonic) {
345 auto before = getTotalCpuFreqTimes();
346 ASSERT_TRUE(before.has_value());
347 sleep(1);
348 auto after = getTotalCpuFreqTimes();
349 ASSERT_TRUE(after.has_value());
350
351 for (uint32_t policyIdx = 0; policyIdx < after->size(); ++policyIdx) {
352 auto timesBefore = before->at(policyIdx);
353 auto timesAfter = after->at(policyIdx);
354 for (uint32_t freqIdx = 0; freqIdx < timesAfter.size(); ++freqIdx) {
355 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(timesBefore[freqIdx], timesAfter[freqIdx]));
356 }
357 }
358 }
359
TEST_F(TimeInStateTest,AllUidTimeInStateMonotonic)360 TEST_F(TimeInStateTest, AllUidTimeInStateMonotonic) {
361 auto map1 = getUidsCpuFreqTimes();
362 ASSERT_TRUE(map1.has_value());
363 sleep(1);
364 auto map2 = getUidsCpuFreqTimes();
365 ASSERT_TRUE(map2.has_value());
366
367 for (const auto &kv : *map1) {
368 uint32_t uid = kv.first;
369 auto times = kv.second;
370 ASSERT_NE(map2->find(uid), map2->end());
371 for (uint32_t policy = 0; policy < times.size(); ++policy) {
372 for (uint32_t freqIdx = 0; freqIdx < times[policy].size(); ++freqIdx) {
373 auto before = times[policy][freqIdx];
374 auto after = (*map2)[uid][policy][freqIdx];
375 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
376 }
377 }
378 }
379 }
380
TEST_F(TimeInStateTest,AllUidConcurrentTimesMonotonic)381 TEST_F(TimeInStateTest, AllUidConcurrentTimesMonotonic) {
382 auto map1 = getUidsConcurrentTimes();
383 ASSERT_TRUE(map1.has_value());
384 ASSERT_FALSE(map1->empty());
385 sleep(1);
386 auto map2 = getUidsConcurrentTimes();
387 ASSERT_TRUE(map2.has_value());
388 ASSERT_FALSE(map2->empty());
389
390 for (const auto &kv : *map1) {
391 uint32_t uid = kv.first;
392 auto times = kv.second;
393 ASSERT_NE(map2->find(uid), map2->end());
394 for (uint32_t i = 0; i < times.active.size(); ++i) {
395 auto before = times.active[i];
396 auto after = (*map2)[uid].active[i];
397 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
398 }
399 for (uint32_t policy = 0; policy < times.policy.size(); ++policy) {
400 for (uint32_t idx = 0; idx < times.policy[policy].size(); ++idx) {
401 auto before = times.policy[policy][idx];
402 auto after = (*map2)[uid].policy[policy][idx];
403 ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
404 }
405 }
406 }
407 }
408
TEST_F(TimeInStateTest,AllUidTimeInStateSanityCheck)409 TEST_F(TimeInStateTest, AllUidTimeInStateSanityCheck) {
410 uint64_t zero = 0;
411 auto maps = {getUidsCpuFreqTimes(), getUidsUpdatedCpuFreqTimes(&zero)};
412 for (const auto &map : maps) {
413 ASSERT_TRUE(map.has_value());
414
415 bool foundLargeValue = false;
416 for (const auto &kv : *map) {
417 for (const auto &timeVec : kv.second) {
418 for (const auto &time : timeVec) {
419 ASSERT_LE(time, NSEC_PER_YEAR);
420 if (time > UINT32_MAX) foundLargeValue = true;
421 }
422 }
423 }
424 // UINT32_MAX nanoseconds is less than 5 seconds, so if every part of our pipeline is using
425 // uint64_t as expected, we should have some times higher than that.
426 ASSERT_TRUE(foundLargeValue);
427 }
428 }
429
TEST_F(TimeInStateTest,AllUidConcurrentTimesSanityCheck)430 TEST_F(TimeInStateTest, AllUidConcurrentTimesSanityCheck) {
431 uint64_t zero = 0;
432 auto maps = {getUidsConcurrentTimes(), getUidsUpdatedConcurrentTimes(&zero)};
433 for (const auto &concurrentMap : maps) {
434 ASSERT_TRUE(concurrentMap);
435
436 bool activeFoundLargeValue = false;
437 bool policyFoundLargeValue = false;
438 for (const auto &kv : *concurrentMap) {
439 for (const auto &time : kv.second.active) {
440 ASSERT_LE(time, NSEC_PER_YEAR);
441 if (time > UINT32_MAX) activeFoundLargeValue = true;
442 }
443 for (const auto &policyTimeVec : kv.second.policy) {
444 for (const auto &time : policyTimeVec) {
445 ASSERT_LE(time, NSEC_PER_YEAR);
446 if (time > UINT32_MAX) policyFoundLargeValue = true;
447 }
448 }
449 }
450 // UINT32_MAX nanoseconds is less than 5 seconds, so if every part of our pipeline is using
451 // uint64_t as expected, we should have some times higher than that.
452 ASSERT_TRUE(activeFoundLargeValue);
453 ASSERT_TRUE(policyFoundLargeValue);
454 }
455 }
456
TEST_F(TimeInStateTest,AllUidConcurrentTimesFailsOnInvalidBucket)457 TEST_F(TimeInStateTest, AllUidConcurrentTimesFailsOnInvalidBucket) {
458 uint32_t uid = 0;
459 {
460 // Find an unused UID
461 auto map = getUidsConcurrentTimes();
462 ASSERT_TRUE(map.has_value());
463 ASSERT_FALSE(map->empty());
464 for (const auto &kv : *map) uid = std::max(uid, kv.first);
465 ++uid;
466 }
467 android::base::unique_fd fd{
468 bpf_obj_get(BPF_FS_PATH "map_timeInState_uid_concurrent_times_map")};
469 ASSERT_GE(fd, 0);
470 uint32_t nCpus = get_nprocs_conf();
471 uint32_t maxBucket = (nCpus - 1) / CPUS_PER_ENTRY;
472 time_key_t key = {.uid = uid, .bucket = maxBucket + 1};
473 std::vector<concurrent_val_t> vals(nCpus);
474 ASSERT_FALSE(writeToMapEntry(fd, &key, vals.data(), BPF_NOEXIST));
475 EXPECT_FALSE(getUidsConcurrentTimes().has_value());
476 ASSERT_FALSE(deleteMapEntry(fd, &key));
477 }
478
TEST_F(TimeInStateTest,AllUidTimesConsistent)479 TEST_F(TimeInStateTest, AllUidTimesConsistent) {
480 auto tisMap = getUidsCpuFreqTimes();
481 ASSERT_TRUE(tisMap.has_value());
482
483 auto concurrentMap = getUidsConcurrentTimes();
484 ASSERT_TRUE(concurrentMap.has_value());
485
486 ASSERT_EQ(tisMap->size(), concurrentMap->size());
487 for (const auto &kv : *tisMap) {
488 uint32_t uid = kv.first;
489 auto times = kv.second;
490 ASSERT_NE(concurrentMap->find(uid), concurrentMap->end());
491
492 auto concurrentTimes = (*concurrentMap)[uid];
493 ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(times, concurrentTimes));
494 }
495 }
496
TEST_F(TimeInStateTest,RemoveUid)497 TEST_F(TimeInStateTest, RemoveUid) {
498 uint32_t uid = 0;
499 {
500 // Find an unused UID
501 auto times = getUidsCpuFreqTimes();
502 ASSERT_TRUE(times.has_value());
503 ASSERT_FALSE(times->empty());
504 for (const auto &kv : *times) uid = std::max(uid, kv.first);
505 ++uid;
506 }
507 {
508 // Add a map entry for our fake UID by copying a real map entry
509 android::base::unique_fd fd{
510 bpf_obj_get(BPF_FS_PATH "map_timeInState_uid_time_in_state_map")};
511 ASSERT_GE(fd, 0);
512 time_key_t k;
513 ASSERT_FALSE(getFirstMapKey(fd, &k));
514 std::vector<tis_val_t> vals(get_nprocs_conf());
515 ASSERT_FALSE(findMapEntry(fd, &k, vals.data()));
516 uint32_t copiedUid = k.uid;
517 k.uid = uid;
518 ASSERT_FALSE(writeToMapEntry(fd, &k, vals.data(), BPF_NOEXIST));
519
520 android::base::unique_fd fd2{
521 bpf_obj_get(BPF_FS_PATH "map_timeInState_uid_concurrent_times_map")};
522 k.uid = copiedUid;
523 k.bucket = 0;
524 std::vector<concurrent_val_t> cvals(get_nprocs_conf());
525 ASSERT_FALSE(findMapEntry(fd2, &k, cvals.data()));
526 k.uid = uid;
527 ASSERT_FALSE(writeToMapEntry(fd2, &k, cvals.data(), BPF_NOEXIST));
528 }
529 auto times = getUidCpuFreqTimes(uid);
530 ASSERT_TRUE(times.has_value());
531 ASSERT_FALSE(times->empty());
532
533 auto concurrentTimes = getUidConcurrentTimes(0);
534 ASSERT_TRUE(concurrentTimes.has_value());
535 ASSERT_FALSE(concurrentTimes->active.empty());
536 ASSERT_FALSE(concurrentTimes->policy.empty());
537
538 uint64_t sum = 0;
539 for (size_t i = 0; i < times->size(); ++i) {
540 for (auto x : (*times)[i]) sum += x;
541 }
542 ASSERT_GT(sum, (uint64_t)0);
543
544 uint64_t activeSum = 0;
545 for (size_t i = 0; i < concurrentTimes->active.size(); ++i) {
546 activeSum += concurrentTimes->active[i];
547 }
548 ASSERT_GT(activeSum, (uint64_t)0);
549
550 ASSERT_TRUE(clearUidTimes(uid));
551
552 auto allTimes = getUidsCpuFreqTimes();
553 ASSERT_TRUE(allTimes.has_value());
554 ASSERT_FALSE(allTimes->empty());
555 ASSERT_EQ(allTimes->find(uid), allTimes->end());
556
557 auto allConcurrentTimes = getUidsConcurrentTimes();
558 ASSERT_TRUE(allConcurrentTimes.has_value());
559 ASSERT_FALSE(allConcurrentTimes->empty());
560 ASSERT_EQ(allConcurrentTimes->find(uid), allConcurrentTimes->end());
561 }
562
TEST_F(TimeInStateTest,GetCpuFreqs)563 TEST_F(TimeInStateTest, GetCpuFreqs) {
564 auto freqs = getCpuFreqs();
565 ASSERT_TRUE(freqs.has_value());
566
567 auto times = getUidCpuFreqTimes(0);
568 ASSERT_TRUE(times.has_value());
569
570 ASSERT_EQ(freqs->size(), times->size());
571 for (size_t i = 0; i < freqs->size(); ++i) EXPECT_EQ((*freqs)[i].size(), (*times)[i].size());
572 }
573
timeNanos()574 uint64_t timeNanos() {
575 struct timespec spec;
576 clock_gettime(CLOCK_MONOTONIC, &spec);
577 return spec.tv_sec * 1000000000 + spec.tv_nsec;
578 }
579
580 // Keeps CPU busy with some number crunching
useCpu()581 void useCpu() {
582 sum = 0;
583 for (int i = 0; i < 100000; i++) {
584 sum *= i;
585 }
586 }
587
588 sem_t pingsem, pongsem;
589
testThread(void *)590 void *testThread(void *) {
591 for (int i = 0; i < 10; i++) {
592 sem_wait(&pingsem);
593 useCpu();
594 sem_post(&pongsem);
595 }
596 return nullptr;
597 }
598
TEST_F(TimeInStateTest,GetAggregatedTaskCpuFreqTimes)599 TEST_F(TimeInStateTest, GetAggregatedTaskCpuFreqTimes) {
600 uint64_t startTimeNs = timeNanos();
601
602 sem_init(&pingsem, 0, 1);
603 sem_init(&pongsem, 0, 0);
604
605 pthread_t thread;
606 ASSERT_EQ(pthread_create(&thread, NULL, &testThread, NULL), 0);
607
608 // This process may have been running for some time, so when we start tracking
609 // CPU time, the very first switch may include the accumulated time.
610 // Yield the remainder of this timeslice to the newly created thread.
611 sem_wait(&pongsem);
612 sem_post(&pingsem);
613
614 pid_t tgid = getpid();
615 startTrackingProcessCpuTimes(tgid);
616
617 pid_t tid = pthread_gettid_np(thread);
618 startAggregatingTaskCpuTimes(tid, 42);
619
620 // Play ping-pong with the other thread to ensure that both threads get
621 // some CPU time.
622 for (int i = 0; i < 9; i++) {
623 sem_wait(&pongsem);
624 useCpu();
625 sem_post(&pingsem);
626 }
627
628 pthread_join(thread, NULL);
629
630 std::optional<std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>>> optionalMap =
631 getAggregatedTaskCpuFreqTimes(tgid, {0, 42});
632 ASSERT_TRUE(optionalMap);
633
634 std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>> map = *optionalMap;
635 ASSERT_EQ(map.size(), 2u);
636
637 uint64_t testDurationNs = timeNanos() - startTimeNs;
638 for (auto pair : map) {
639 uint16_t aggregationKey = pair.first;
640 ASSERT_TRUE(aggregationKey == 0 || aggregationKey == 42);
641
642 std::vector<std::vector<uint64_t>> timesInState = pair.second;
643 uint64_t totalCpuTime = 0;
644 for (size_t i = 0; i < timesInState.size(); i++) {
645 for (size_t j = 0; j < timesInState[i].size(); j++) {
646 totalCpuTime += timesInState[i][j];
647 }
648 }
649 ASSERT_GT(totalCpuTime, 0ul);
650 ASSERT_LE(totalCpuTime, testDurationNs);
651 }
652 }
653
forceSwitchWithUid(void * uidPtr)654 void *forceSwitchWithUid(void *uidPtr) {
655 if (!uidPtr) return nullptr;
656 setuid(*(uint32_t *)uidPtr);
657
658 // Sleep briefly to trigger a context switch, ensuring we see at least one update.
659 struct timespec ts;
660 ts.tv_sec = 0;
661 ts.tv_nsec = 1000000;
662 nanosleep(&ts, NULL);
663 return nullptr;
664 }
665
TEST_F(TimeInStateTest,SdkSandboxUid)666 TEST_F(TimeInStateTest, SdkSandboxUid) {
667 // Find an unused app UID and its corresponding SDK sandbox uid.
668 uint32_t appUid = AID_APP_START, sandboxUid;
669 {
670 auto times = getUidsCpuFreqTimes();
671 ASSERT_TRUE(times.has_value());
672 ASSERT_FALSE(times->empty());
673 for (const auto &kv : *times) {
674 if (kv.first > AID_APP_END) break;
675 appUid = std::max(appUid, kv.first);
676 }
677 appUid++;
678 sandboxUid = appUid + (AID_SDK_SANDBOX_PROCESS_START - AID_APP_START);
679 }
680
681 // Create a thread to run with the fake sandbox uid.
682 pthread_t thread;
683 ASSERT_EQ(pthread_create(&thread, NULL, &forceSwitchWithUid, &sandboxUid), 0);
684 pthread_join(thread, NULL);
685
686 // Confirm we recorded stats for appUid and AID_SDK_SANDBOX but not sandboxUid
687 auto allTimes = getUidsCpuFreqTimes();
688 ASSERT_TRUE(allTimes.has_value());
689 ASSERT_FALSE(allTimes->empty());
690 ASSERT_NE(allTimes->find(appUid), allTimes->end());
691 ASSERT_NE(allTimes->find(AID_SDK_SANDBOX), allTimes->end());
692 ASSERT_EQ(allTimes->find(sandboxUid), allTimes->end());
693
694 auto allConcurrentTimes = getUidsConcurrentTimes();
695 ASSERT_TRUE(allConcurrentTimes.has_value());
696 ASSERT_FALSE(allConcurrentTimes->empty());
697 ASSERT_NE(allConcurrentTimes->find(appUid), allConcurrentTimes->end());
698 ASSERT_NE(allConcurrentTimes->find(AID_SDK_SANDBOX), allConcurrentTimes->end());
699 ASSERT_EQ(allConcurrentTimes->find(sandboxUid), allConcurrentTimes->end());
700
701 ASSERT_TRUE(clearUidTimes(appUid));
702 }
703
704 } // namespace bpf
705 } // namespace android
706