1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <err.h>
18 #include <inttypes.h>
19 #include <malloc.h>
20 #include <sched.h>
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/mman.h>
26 #include <unistd.h>
27
28 #include <algorithm>
29 #include <stack>
30 #include <string>
31 #include <unordered_map>
32 #include <vector>
33
34 #include <android-base/file.h>
35 #include <android-base/strings.h>
36 #include <benchmark/benchmark.h>
37
38 #include "Alloc.h"
39 #include "File.h"
40 #include "Utils.h"
41
42 struct TraceDataType {
43 AllocEntry* entries = nullptr;
44 size_t num_entries = 0;
45 void** ptrs = nullptr;
46 size_t num_ptrs = 0;
47 };
48
GetIndex(std::stack<size_t> & free_indices,size_t * max_index)49 static size_t GetIndex(std::stack<size_t>& free_indices, size_t* max_index) {
50 if (free_indices.empty()) {
51 return (*max_index)++;
52 }
53 size_t index = free_indices.top();
54 free_indices.pop();
55 return index;
56 }
57
FreePtrs(TraceDataType * trace_data)58 static void FreePtrs(TraceDataType* trace_data) {
59 for (size_t i = 0; i < trace_data->num_ptrs; i++) {
60 void* ptr = trace_data->ptrs[i];
61 if (ptr != nullptr) {
62 free(ptr);
63 trace_data->ptrs[i] = nullptr;
64 }
65 }
66 }
67
FreeTraceData(TraceDataType * trace_data)68 static void FreeTraceData(TraceDataType* trace_data) {
69 if (trace_data->ptrs == nullptr) {
70 return;
71 }
72
73 munmap(trace_data->ptrs, sizeof(void*) * trace_data->num_ptrs);
74 FreeEntries(trace_data->entries, trace_data->num_entries);
75 }
76
GetTraceData(const std::string & filename,TraceDataType * trace_data)77 static void GetTraceData(const std::string& filename, TraceDataType* trace_data) {
78 // Only keep last trace encountered cached.
79 static std::string cached_filename;
80 static TraceDataType cached_trace_data;
81 if (cached_filename == filename) {
82 *trace_data = cached_trace_data;
83 return;
84 } else {
85 FreeTraceData(&cached_trace_data);
86 }
87
88 cached_filename = filename;
89 GetUnwindInfo(filename.c_str(), &trace_data->entries, &trace_data->num_entries);
90
91 // This loop will convert the ptr field into an index into the ptrs array.
92 // Creating this index allows the trace run to quickly store or retrieve the
93 // allocation.
94 // For free, the ptr field will be index + one, where a zero represents
95 // a free(nullptr) call.
96 // For realloc, the old_pointer field will be index + one, where a zero
97 // represents a realloc(nullptr, XX).
98 trace_data->num_ptrs = 0;
99 std::stack<size_t> free_indices;
100 std::unordered_map<uint64_t, size_t> ptr_to_index;
101 for (size_t i = 0; i < trace_data->num_entries; i++) {
102 AllocEntry* entry = &trace_data->entries[i];
103 switch (entry->type) {
104 case MALLOC:
105 case CALLOC:
106 case MEMALIGN: {
107 size_t idx = GetIndex(free_indices, &trace_data->num_ptrs);
108 ptr_to_index[entry->ptr] = idx;
109 entry->ptr = idx;
110 break;
111 }
112 case REALLOC: {
113 if (entry->u.old_ptr != 0) {
114 auto idx_entry = ptr_to_index.find(entry->u.old_ptr);
115 if (idx_entry == ptr_to_index.end()) {
116 errx(1, "File Error: Failed to find realloc pointer %" PRIx64, entry->u.old_ptr);
117 }
118 size_t old_pointer_idx = idx_entry->second;
119 free_indices.push(old_pointer_idx);
120 ptr_to_index.erase(idx_entry);
121 entry->u.old_ptr = old_pointer_idx + 1;
122 }
123 size_t idx = GetIndex(free_indices, &trace_data->num_ptrs);
124 ptr_to_index[entry->ptr] = idx;
125 entry->ptr = idx;
126 break;
127 }
128 case FREE:
129 if (entry->ptr != 0) {
130 auto idx_entry = ptr_to_index.find(entry->ptr);
131 if (idx_entry == ptr_to_index.end()) {
132 errx(1, "File Error: Unable to find free pointer %" PRIx64, entry->ptr);
133 }
134 free_indices.push(idx_entry->second);
135 entry->ptr = idx_entry->second + 1;
136 ptr_to_index.erase(idx_entry);
137 }
138 break;
139 case THREAD_DONE:
140 break;
141 }
142 }
143 void* map = mmap(nullptr, sizeof(void*) * trace_data->num_ptrs, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
144 if (map == MAP_FAILED) {
145 err(1, "mmap failed\n");
146 }
147 trace_data->ptrs = reinterpret_cast<void**>(map);
148
149 cached_trace_data = *trace_data;
150 }
151
RunTrace(benchmark::State & state,TraceDataType * trace_data)152 static void RunTrace(benchmark::State& state, TraceDataType* trace_data) {
153 int pagesize = getpagesize();
154 uint64_t total_ns = 0;
155 uint64_t start_ns;
156 void** ptrs = trace_data->ptrs;
157 for (size_t i = 0; i < trace_data->num_entries; i++) {
158 void* ptr;
159 const AllocEntry& entry = trace_data->entries[i];
160 switch (entry.type) {
161 case MALLOC:
162 start_ns = Nanotime();
163 ptr = malloc(entry.size);
164 if (ptr == nullptr) {
165 errx(1, "malloc returned nullptr");
166 }
167 MakeAllocationResident(ptr, entry.size, pagesize);
168 total_ns += Nanotime() - start_ns;
169
170 if (ptrs[entry.ptr] != nullptr) {
171 errx(1, "Internal Error: malloc pointer being replaced is not nullptr");
172 }
173 ptrs[entry.ptr] = ptr;
174 break;
175
176 case CALLOC:
177 start_ns = Nanotime();
178 ptr = calloc(entry.u.n_elements, entry.size);
179 if (ptr == nullptr) {
180 errx(1, "calloc returned nullptr");
181 }
182 MakeAllocationResident(ptr, entry.size, pagesize);
183 total_ns += Nanotime() - start_ns;
184
185 if (ptrs[entry.ptr] != nullptr) {
186 errx(1, "Internal Error: calloc pointer being replaced is not nullptr");
187 }
188 ptrs[entry.ptr] = ptr;
189 break;
190
191 case MEMALIGN:
192 start_ns = Nanotime();
193 ptr = memalign(entry.u.align, entry.size);
194 if (ptr == nullptr) {
195 errx(1, "memalign returned nullptr");
196 }
197 MakeAllocationResident(ptr, entry.size, pagesize);
198 total_ns += Nanotime() - start_ns;
199
200 if (ptrs[entry.ptr] != nullptr) {
201 errx(1, "Internal Error: memalign pointer being replaced is not nullptr");
202 }
203 ptrs[entry.ptr] = ptr;
204 break;
205
206 case REALLOC:
207 start_ns = Nanotime();
208 if (entry.u.old_ptr == 0) {
209 ptr = realloc(nullptr, entry.size);
210 } else {
211 ptr = realloc(ptrs[entry.u.old_ptr - 1], entry.size);
212 ptrs[entry.u.old_ptr - 1] = nullptr;
213 }
214 if (entry.size > 0) {
215 if (ptr == nullptr) {
216 errx(1, "realloc returned nullptr");
217 }
218 MakeAllocationResident(ptr, entry.size, pagesize);
219 }
220 total_ns += Nanotime() - start_ns;
221
222 if (ptrs[entry.ptr] != nullptr) {
223 errx(1, "Internal Error: realloc pointer being replaced is not nullptr");
224 }
225 ptrs[entry.ptr] = ptr;
226 break;
227
228 case FREE:
229 if (entry.ptr != 0) {
230 ptr = ptrs[entry.ptr - 1];
231 ptrs[entry.ptr - 1] = nullptr;
232 } else {
233 ptr = nullptr;
234 }
235 start_ns = Nanotime();
236 free(ptr);
237 total_ns += Nanotime() - start_ns;
238 break;
239
240 case THREAD_DONE:
241 break;
242 }
243 }
244 state.SetIterationTime(total_ns / double(1000000000.0));
245
246 FreePtrs(trace_data);
247 }
248
249 // Run a trace as if all of the allocations occurred in a single thread.
250 // This is not completely realistic, but it is a possible worst case that
251 // could happen in an app.
BenchmarkTrace(benchmark::State & state,const char * filename,bool enable_decay_time)252 static void BenchmarkTrace(benchmark::State& state, const char* filename, bool enable_decay_time) {
253 #if defined(__BIONIC__)
254 if (enable_decay_time) {
255 mallopt(M_DECAY_TIME, 1);
256 } else {
257 mallopt(M_DECAY_TIME, 0);
258 }
259 #endif
260 std::string full_filename(android::base::GetExecutableDirectory() + "/traces/" + filename);
261
262 TraceDataType trace_data;
263 GetTraceData(full_filename, &trace_data);
264
265 for (auto _ : state) {
266 RunTrace(state, &trace_data);
267 }
268
269 // Don't free the trace_data, it is cached. The last set of trace data
270 // will be leaked away.
271 }
272
273 #define BENCH_OPTIONS \
274 UseManualTime() \
275 ->Unit(benchmark::kMicrosecond) \
276 ->MinTime(15.0) \
277 ->Repetitions(4) \
278 ->ReportAggregatesOnly(true)
279
BM_angry_birds2(benchmark::State & state)280 static void BM_angry_birds2(benchmark::State& state) {
281 BenchmarkTrace(state, "angry_birds2.zip", true);
282 }
283 BENCHMARK(BM_angry_birds2)->BENCH_OPTIONS;
284
285 #if defined(__BIONIC__)
BM_angry_birds2_no_decay(benchmark::State & state)286 static void BM_angry_birds2_no_decay(benchmark::State& state) {
287 BenchmarkTrace(state, "angry_birds2.zip", false);
288 }
289 BENCHMARK(BM_angry_birds2_no_decay)->BENCH_OPTIONS;
290 #endif
291
BM_camera(benchmark::State & state)292 static void BM_camera(benchmark::State& state) {
293 BenchmarkTrace(state, "camera.zip", true);
294 }
295 BENCHMARK(BM_camera)->BENCH_OPTIONS;
296
297 #if defined(__BIONIC__)
BM_camera_no_decay(benchmark::State & state)298 static void BM_camera_no_decay(benchmark::State& state) {
299 BenchmarkTrace(state, "camera.zip", false);
300 }
301 BENCHMARK(BM_camera_no_decay)->BENCH_OPTIONS;
302 #endif
303
BM_candy_crush_saga(benchmark::State & state)304 static void BM_candy_crush_saga(benchmark::State& state) {
305 BenchmarkTrace(state, "candy_crush_saga.zip", true);
306 }
307 BENCHMARK(BM_candy_crush_saga)->BENCH_OPTIONS;
308
309 #if defined(__BIONIC__)
BM_candy_crush_saga_no_decay(benchmark::State & state)310 static void BM_candy_crush_saga_no_decay(benchmark::State& state) {
311 BenchmarkTrace(state, "candy_crush_saga.zip", false);
312 }
313 BENCHMARK(BM_candy_crush_saga_no_decay)->BENCH_OPTIONS;
314 #endif
315
BM_gmail(benchmark::State & state)316 void BM_gmail(benchmark::State& state) {
317 BenchmarkTrace(state, "gmail.zip", true);
318 }
319 BENCHMARK(BM_gmail)->BENCH_OPTIONS;
320
321 #if defined(__BIONIC__)
BM_gmail_no_decay(benchmark::State & state)322 void BM_gmail_no_decay(benchmark::State& state) {
323 BenchmarkTrace(state, "gmail.zip", false);
324 }
325 BENCHMARK(BM_gmail_no_decay)->BENCH_OPTIONS;
326 #endif
327
BM_maps(benchmark::State & state)328 void BM_maps(benchmark::State& state) {
329 BenchmarkTrace(state, "maps.zip", true);
330 }
331 BENCHMARK(BM_maps)->BENCH_OPTIONS;
332
333 #if defined(__BIONIC__)
BM_maps_no_decay(benchmark::State & state)334 void BM_maps_no_decay(benchmark::State& state) {
335 BenchmarkTrace(state, "maps.zip", false);
336 }
337 BENCHMARK(BM_maps_no_decay)->BENCH_OPTIONS;
338 #endif
339
BM_photos(benchmark::State & state)340 void BM_photos(benchmark::State& state) {
341 BenchmarkTrace(state, "photos.zip", true);
342 }
343 BENCHMARK(BM_photos)->BENCH_OPTIONS;
344
345 #if defined(__BIONIC__)
BM_photos_no_decay(benchmark::State & state)346 void BM_photos_no_decay(benchmark::State& state) {
347 BenchmarkTrace(state, "photos.zip", false);
348 }
349 BENCHMARK(BM_photos_no_decay)->BENCH_OPTIONS;
350 #endif
351
BM_pubg(benchmark::State & state)352 void BM_pubg(benchmark::State& state) {
353 BenchmarkTrace(state, "pubg.zip", true);
354 }
355 BENCHMARK(BM_pubg)->BENCH_OPTIONS;
356
357 #if defined(__BIONIC__)
BM_pubg_no_decay(benchmark::State & state)358 void BM_pubg_no_decay(benchmark::State& state) {
359 BenchmarkTrace(state, "pubg.zip", false);
360 }
361 BENCHMARK(BM_pubg_no_decay)->BENCH_OPTIONS;
362 #endif
363
BM_surfaceflinger(benchmark::State & state)364 void BM_surfaceflinger(benchmark::State& state) {
365 BenchmarkTrace(state, "surfaceflinger.zip", true);
366 }
367 BENCHMARK(BM_surfaceflinger)->BENCH_OPTIONS;
368
369 #if defined(__BIONIC__)
BM_surfaceflinger_no_decay(benchmark::State & state)370 void BM_surfaceflinger_no_decay(benchmark::State& state) {
371 BenchmarkTrace(state, "surfaceflinger.zip", false);
372 }
373 BENCHMARK(BM_surfaceflinger_no_decay)->BENCH_OPTIONS;
374 #endif
375
BM_system_server(benchmark::State & state)376 void BM_system_server(benchmark::State& state) {
377 BenchmarkTrace(state, "system_server.zip", true);
378 }
379 BENCHMARK(BM_system_server)->BENCH_OPTIONS;
380
381 #if defined(__BIONIC__)
BM_system_server_no_decay(benchmark::State & state)382 void BM_system_server_no_decay(benchmark::State& state) {
383 BenchmarkTrace(state, "system_server.zip", false);
384 }
385 BENCHMARK(BM_system_server_no_decay)->BENCH_OPTIONS;
386 #endif
387
BM_systemui(benchmark::State & state)388 void BM_systemui(benchmark::State& state) {
389 BenchmarkTrace(state, "systemui.zip", true);
390 }
391 BENCHMARK(BM_systemui)->BENCH_OPTIONS;
392
393 #if defined(__BIONIC__)
BM_systemui_no_decay(benchmark::State & state)394 void BM_systemui_no_decay(benchmark::State& state) {
395 BenchmarkTrace(state, "systemui.zip", false);
396 }
397 BENCHMARK(BM_systemui_no_decay)->BENCH_OPTIONS;
398 #endif
399
BM_youtube(benchmark::State & state)400 void BM_youtube(benchmark::State& state) {
401 BenchmarkTrace(state, "youtube.zip", true);
402 }
403 BENCHMARK(BM_youtube)->BENCH_OPTIONS;
404
405 #if defined(__BIONIC__)
BM_youtube_no_decay(benchmark::State & state)406 void BM_youtube_no_decay(benchmark::State& state) {
407 BenchmarkTrace(state, "youtube.zip", false);
408 }
409 BENCHMARK(BM_youtube_no_decay)->BENCH_OPTIONS;
410 #endif
411
main(int argc,char ** argv)412 int main(int argc, char** argv) {
413 std::vector<char*> args;
414 args.push_back(argv[0]);
415
416 // Look for the --cpu=XX option.
417 for (int i = 1; i < argc; i++) {
418 if (strncmp(argv[i], "--cpu=", 6) == 0) {
419 char* endptr;
420 int cpu = strtol(&argv[i][6], &endptr, 10);
421 if (argv[i][0] == '\0' || endptr == nullptr || *endptr != '\0') {
422 printf("Invalid format of --cpu option, '%s' must be an integer value.\n", argv[i] + 6);
423 return 1;
424 }
425 cpu_set_t cpuset;
426 CPU_ZERO(&cpuset);
427 CPU_SET(cpu, &cpuset);
428 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
429 if (errno == EINVAL) {
430 printf("Invalid cpu %d\n", cpu);
431 return 1;
432 }
433 perror("sched_setaffinity failed");
434 return 1;
435 }
436 printf("Locking to cpu %d\n", cpu);
437 } else {
438 args.push_back(argv[i]);
439 }
440 }
441
442 argc = args.size();
443 ::benchmark::Initialize(&argc, args.data());
444 if (::benchmark::ReportUnrecognizedArguments(argc, args.data())) return 1;
445 ::benchmark::RunSpecifiedBenchmarks();
446 }
447