1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <benchmark/benchmark.h>
18
19 #include <cstddef>
20 #include <cstdint>
21 #include <initializer_list>
22 #include <optional>
23 #include <string>
24 #include <string_view>
25 #include <vector>
26
27 #include "perfetto/base/logging.h"
28 #include "perfetto/ext/base/file_utils.h"
29 #include "perfetto/ext/base/string_utils.h"
30 #include "perfetto/ext/base/string_view.h"
31 #include "perfetto/trace_processor/basic_types.h"
32 #include "src/base/test/utils.h"
33 #include "src/trace_processor/containers/string_pool.h"
34 #include "src/trace_processor/db/column/types.h"
35 #include "src/trace_processor/db/table.h"
36 #include "src/trace_processor/tables/metadata_tables_py.h"
37 #include "src/trace_processor/tables/profiler_tables_py.h"
38 #include "src/trace_processor/tables/slice_tables_py.h"
39 #include "src/trace_processor/tables/track_tables_py.h"
40
41 namespace perfetto::trace_processor {
42 namespace {
43
44 using SliceTable = tables::SliceTable;
45 using ThreadTrackTable = tables::ThreadTrackTable;
46 using ExpectedFrameTimelineSliceTable = tables::ExpectedFrameTimelineSliceTable;
47 using RawTable = tables::RawTable;
48 using FtraceEventTable = tables::FtraceEventTable;
49 using HeapGraphObjectTable = tables::HeapGraphObjectTable;
50
51 // `SELECT * FROM SLICE` on android_monitor_contention_trace.at
52 constexpr std::string_view kSliceTable =
53 "test/data/slice_table_for_benchmarks.csv";
54
55 // `SELECT * FROM SLICE` on android_monitor_contention_trace.at
56 constexpr std::string_view kExpectedFrameTimelineTable =
57 "test/data/expected_frame_timeline_for_benchmarks.csv";
58
59 // `SELECT id, cpu FROM raw` on chrome_android_systrace.pftrace.
60 constexpr std::string_view kRawTable = "test/data/raw_cpu_for_benchmarks.csv";
61
62 // `SELECT id, cpu FROM ftrace_event` on chrome_android_systrace.pftrace.
63 constexpr std::string_view kFtraceEventTable =
64 "test/data/ftrace_event_cpu_for_benchmarks.csv";
65
66 // `SELECT id, upid, reference_set_id FROM heap_graph_object` on
67 constexpr std::string_view kHeapGraphObjectTable =
68 "test/data/heap_pgraph_object_for_benchmarks_query.csv";
69
SplitCSVLine(const std::string & line)70 std::vector<std::string> SplitCSVLine(const std::string& line) {
71 std::vector<std::string> output;
72 uint32_t start = 0;
73 bool in_string = false;
74
75 for (uint32_t i = 0; i < line.size(); ++i) {
76 if (!in_string && line[i] == ',') {
77 output.emplace_back(&line[start], i - start);
78 start = i + 1;
79 continue;
80 }
81 if (line[i] == '"')
82 in_string = !in_string;
83 }
84
85 if (start < line.size())
86 output.emplace_back(&line[start], line.size() - start);
87
88 return output;
89 }
90
ReadCSV(benchmark::State & state,std::string_view file_name)91 std::vector<std::string> ReadCSV(benchmark::State& state,
92 std::string_view file_name) {
93 std::string table_csv;
94 perfetto::base::ReadFile(
95 perfetto::base::GetTestDataPath(std::string(file_name)), &table_csv);
96 if (table_csv.empty()) {
97 state.SkipWithError(
98 "Test strings missing. Googlers: download "
99 "go/perfetto-benchmark-trace-strings and save into /tmp/trace_strings");
100 return {};
101 }
102 PERFETTO_CHECK(!table_csv.empty());
103 return base::SplitString(table_csv, "\n");
104 }
105
StripAndIntern(StringPool & pool,const std::string & data)106 StringPool::Id StripAndIntern(StringPool& pool, const std::string& data) {
107 std::string res = base::StripSuffix(base::StripPrefix(data, "\""), "\"");
108 return pool.InternString(base::StringView(res));
109 }
110
GetSliceTableRow(const std::string & string_row,StringPool & pool)111 SliceTable::Row GetSliceTableRow(const std::string& string_row,
112 StringPool& pool) {
113 std::vector<std::string> row_vec = SplitCSVLine(string_row);
114 SliceTable::Row row;
115 PERFETTO_CHECK(row_vec.size() >= 14);
116 row.ts = *base::StringToInt64(row_vec[2]);
117 row.dur = *base::StringToInt64(row_vec[3]);
118 row.track_id = ThreadTrackTable::Id(*base::StringToUInt32(row_vec[4]));
119 row.category = StripAndIntern(pool, row_vec[5]);
120 row.name = StripAndIntern(pool, row_vec[6]);
121 row.depth = *base::StringToUInt32(row_vec[7]);
122 row.stack_id = *base::StringToInt32(row_vec[8]);
123 row.parent_stack_id = *base::StringToInt32(row_vec[9]);
124 row.parent_id = base::StringToUInt32(row_vec[10]).has_value()
125 ? std::make_optional<SliceTable::Id>(
126 *base::StringToUInt32(row_vec[10]))
127 : std::nullopt;
128 row.arg_set_id = *base::StringToUInt32(row_vec[11]);
129 row.thread_ts = base::StringToInt64(row_vec[12]);
130 row.thread_dur = base::StringToInt64(row_vec[13]);
131 return row;
132 }
133
134 struct SliceTableForBenchmark {
SliceTableForBenchmarkperfetto::trace_processor::__anonf6904c480111::SliceTableForBenchmark135 explicit SliceTableForBenchmark(benchmark::State& state) : table_{&pool_} {
136 std::vector<std::string> rows_strings = ReadCSV(state, kSliceTable);
137
138 for (size_t i = 1; i < rows_strings.size(); ++i) {
139 table_.Insert(GetSliceTableRow(rows_strings[i], pool_));
140 }
141 }
142
143 StringPool pool_;
144 SliceTable table_;
145 };
146
147 struct ExpectedFrameTimelineTableForBenchmark {
ExpectedFrameTimelineTableForBenchmarkperfetto::trace_processor::__anonf6904c480111::ExpectedFrameTimelineTableForBenchmark148 explicit ExpectedFrameTimelineTableForBenchmark(benchmark::State& state)
149 : table_{&pool_, &parent_} {
150 std::vector<std::string> table_rows_as_string =
151 ReadCSV(state, kExpectedFrameTimelineTable);
152 std::vector<std::string> parent_rows_as_string =
153 ReadCSV(state, kSliceTable);
154
155 uint32_t cur_idx = 0;
156 for (size_t i = 1; i < table_rows_as_string.size(); ++i, ++cur_idx) {
157 std::vector<std::string> row_vec = SplitCSVLine(table_rows_as_string[i]);
158
159 uint32_t idx = *base::StringToUInt32(row_vec[0]);
160 while (cur_idx < idx) {
161 parent_.Insert(
162 GetSliceTableRow(parent_rows_as_string[cur_idx + 1], pool_));
163 cur_idx++;
164 }
165
166 ExpectedFrameTimelineSliceTable::Row row;
167 row.ts = *base::StringToInt64(row_vec[2]);
168 row.dur = *base::StringToInt64(row_vec[3]);
169 row.track_id = ThreadTrackTable::Id(*base::StringToUInt32(row_vec[4]));
170 row.depth = *base::StringToUInt32(row_vec[7]);
171 row.stack_id = *base::StringToInt32(row_vec[8]);
172 row.parent_stack_id = *base::StringToInt32(row_vec[9]);
173 row.parent_id = base::StringToUInt32(row_vec[11]).has_value()
174 ? std::make_optional<SliceTable::Id>(
175 *base::StringToUInt32(row_vec[11]))
176 : std::nullopt;
177 row.arg_set_id = *base::StringToUInt32(row_vec[11]);
178 row.thread_ts = base::StringToInt64(row_vec[12]);
179 row.thread_dur = base::StringToInt64(row_vec[13]);
180 table_.Insert(row);
181 }
182 }
183 StringPool pool_;
184 SliceTable parent_{&pool_};
185 ExpectedFrameTimelineSliceTable table_;
186 };
187
188 struct FtraceEventTableForBenchmark {
FtraceEventTableForBenchmarkperfetto::trace_processor::__anonf6904c480111::FtraceEventTableForBenchmark189 explicit FtraceEventTableForBenchmark(benchmark::State& state) {
190 std::vector<std::string> raw_rows = ReadCSV(state, kRawTable);
191 std::vector<std::string> ftrace_event_rows =
192 ReadCSV(state, kFtraceEventTable);
193
194 uint32_t cur_idx = 0;
195 for (size_t i = 1; i < ftrace_event_rows.size(); ++i, cur_idx++) {
196 std::vector<std::string> row_vec = SplitCSVLine(ftrace_event_rows[i]);
197 uint32_t idx = *base::StringToUInt32(row_vec[0]);
198 while (cur_idx < idx) {
199 std::vector<std::string> raw_row = SplitCSVLine(raw_rows[cur_idx + 1]);
200 RawTable::Row r;
201 r.ucpu = tables::CpuTable::Id(*base::StringToUInt32(raw_row[1]));
202 raw_.Insert(r);
203 cur_idx++;
204 }
205 FtraceEventTable::Row row;
206 row.ucpu = tables::CpuTable::Id(*base::StringToUInt32(row_vec[1]));
207 table_.Insert(row);
208 }
209 }
210
211 StringPool pool_;
212 RawTable raw_{&pool_};
213 tables::FtraceEventTable table_{&pool_, &raw_};
214 };
215
216 struct HeapGraphObjectTableForBenchmark {
HeapGraphObjectTableForBenchmarkperfetto::trace_processor::__anonf6904c480111::HeapGraphObjectTableForBenchmark217 explicit HeapGraphObjectTableForBenchmark(benchmark::State& state) {
218 std::vector<std::string> table_rows_as_string =
219 ReadCSV(state, kHeapGraphObjectTable);
220
221 for (size_t i = 1; i < table_rows_as_string.size(); ++i) {
222 std::vector<std::string> row_vec = SplitCSVLine(table_rows_as_string[i]);
223
224 HeapGraphObjectTable::Row row;
225 row.upid = *base::StringToUInt32(row_vec[1]);
226 row.reference_set_id = base::StringToUInt32(row_vec[2]);
227 table_.Insert(row);
228 }
229 }
230 StringPool pool_;
231 HeapGraphObjectTable table_{&pool_};
232 };
233
BenchmarkSliceTableFilter(benchmark::State & state,SliceTableForBenchmark & table,std::initializer_list<Constraint> c)234 void BenchmarkSliceTableFilter(benchmark::State& state,
235 SliceTableForBenchmark& table,
236 std::initializer_list<Constraint> c) {
237 Query q;
238 q.constraints = c;
239 for (auto _ : state) {
240 benchmark::DoNotOptimize(table.table_.QueryToRowMap(q));
241 }
242 state.counters["s/row"] =
243 benchmark::Counter(static_cast<double>(table.table_.row_count()),
244 benchmark::Counter::kIsIterationInvariantRate |
245 benchmark::Counter::kInvert);
246 state.counters["s/out"] = benchmark::Counter(
247 static_cast<double>(table.table_.QueryToRowMap(q).size()),
248 benchmark::Counter::kIsIterationInvariantRate |
249 benchmark::Counter::kInvert);
250 }
251
BenchmarkSliceTableSort(benchmark::State & state,SliceTableForBenchmark & table,std::initializer_list<Order> ob)252 void BenchmarkSliceTableSort(benchmark::State& state,
253 SliceTableForBenchmark& table,
254 std::initializer_list<Order> ob) {
255 for (auto _ : state) {
256 benchmark::DoNotOptimize(table.table_.Sort(ob));
257 }
258 state.counters["s/row"] =
259 benchmark::Counter(static_cast<double>(table.table_.row_count()),
260 benchmark::Counter::kIsIterationInvariantRate |
261 benchmark::Counter::kInvert);
262 }
263
BenchmarkExpectedFrameTableQuery(benchmark::State & state,ExpectedFrameTimelineTableForBenchmark & table,Query q)264 void BenchmarkExpectedFrameTableQuery(
265 benchmark::State& state,
266 ExpectedFrameTimelineTableForBenchmark& table,
267 Query q) {
268 for (auto _ : state) {
269 benchmark::DoNotOptimize(table.table_.QueryToRowMap(q));
270 }
271 state.counters["s/row"] =
272 benchmark::Counter(static_cast<double>(table.table_.row_count()),
273 benchmark::Counter::kIsIterationInvariantRate |
274 benchmark::Counter::kInvert);
275 state.counters["s/out"] = benchmark::Counter(
276 static_cast<double>(table.table_.QueryToRowMap(q).size()),
277 benchmark::Counter::kIsIterationInvariantRate |
278 benchmark::Counter::kInvert);
279 }
280
BenchmarkFtraceEventTableQuery(benchmark::State & state,FtraceEventTableForBenchmark & table,Query q)281 void BenchmarkFtraceEventTableQuery(benchmark::State& state,
282 FtraceEventTableForBenchmark& table,
283 Query q) {
284 for (auto _ : state) {
285 benchmark::DoNotOptimize(table.table_.QueryToRowMap(q));
286 }
287 state.counters["s/row"] =
288 benchmark::Counter(static_cast<double>(table.table_.row_count()),
289 benchmark::Counter::kIsIterationInvariantRate |
290 benchmark::Counter::kInvert);
291 state.counters["s/out"] = benchmark::Counter(
292 static_cast<double>(table.table_.QueryToRowMap(q).size()),
293 benchmark::Counter::kIsIterationInvariantRate |
294 benchmark::Counter::kInvert);
295 }
296
BenchmarkFtraceEventTableSort(benchmark::State & state,FtraceEventTableForBenchmark & table,std::initializer_list<Order> ob)297 void BenchmarkFtraceEventTableSort(benchmark::State& state,
298 FtraceEventTableForBenchmark& table,
299 std::initializer_list<Order> ob) {
300 for (auto _ : state) {
301 benchmark::DoNotOptimize(table.table_.Sort(ob));
302 }
303 state.counters["s/row"] =
304 benchmark::Counter(static_cast<double>(table.table_.row_count()),
305 benchmark::Counter::kIsIterationInvariantRate |
306 benchmark::Counter::kInvert);
307 }
308
BM_QESliceTableTrackIdEq(benchmark::State & state)309 void BM_QESliceTableTrackIdEq(benchmark::State& state) {
310 SliceTableForBenchmark table(state);
311 BenchmarkSliceTableFilter(state, table, {table.table_.track_id().eq(1213)});
312 }
313 BENCHMARK(BM_QESliceTableTrackIdEq);
314
BM_QESliceTableParentIdIsNotNull(benchmark::State & state)315 void BM_QESliceTableParentIdIsNotNull(benchmark::State& state) {
316 SliceTableForBenchmark table(state);
317 BenchmarkSliceTableFilter(state, table,
318 {table.table_.parent_id().is_not_null()});
319 }
320 BENCHMARK(BM_QESliceTableParentIdIsNotNull);
321
BM_QESliceTableParentIdEq(benchmark::State & state)322 void BM_QESliceTableParentIdEq(benchmark::State& state) {
323 SliceTableForBenchmark table(state);
324 BenchmarkSliceTableFilter(state, table, {table.table_.parent_id().eq(26711)});
325 }
326 BENCHMARK(BM_QESliceTableParentIdEq);
327
BM_QESliceTableNameEq(benchmark::State & state)328 void BM_QESliceTableNameEq(benchmark::State& state) {
329 SliceTableForBenchmark table(state);
330 BenchmarkSliceTableFilter(
331 state, table,
332 {table.table_.name().eq("MarkFromReadBarrierWithMeasurements")});
333 }
334 BENCHMARK(BM_QESliceTableNameEq);
335
BM_QESliceTableNameGlobNoStars(benchmark::State & state)336 void BM_QESliceTableNameGlobNoStars(benchmark::State& state) {
337 SliceTableForBenchmark table(state);
338 BenchmarkSliceTableFilter(
339 state, table,
340 {table.table_.name().glob("MarkFromReadBarrierWithMeasurements")});
341 }
342 BENCHMARK(BM_QESliceTableNameGlobNoStars);
343
BM_QESliceTableNameGlob(benchmark::State & state)344 void BM_QESliceTableNameGlob(benchmark::State& state) {
345 SliceTableForBenchmark table(state);
346 BenchmarkSliceTableFilter(
347 state, table, {table.table_.name().glob("HIDL::IMapper::unlock::*")});
348 }
349 BENCHMARK(BM_QESliceTableNameGlob);
350
BM_QESliceTableNameRegex(benchmark::State & state)351 void BM_QESliceTableNameRegex(benchmark::State& state) {
352 SliceTableForBenchmark table(state);
353 BenchmarkSliceTableFilter(state, table,
354 {table.table_.name().regex(".*Pool.*")});
355 }
356 BENCHMARK(BM_QESliceTableNameRegex);
357
BM_QESliceTableSorted(benchmark::State & state)358 void BM_QESliceTableSorted(benchmark::State& state) {
359 SliceTableForBenchmark table(state);
360 BenchmarkSliceTableFilter(state, table,
361 {table.table_.ts().gt(1738923505854),
362 table.table_.ts().lt(1738950140556)});
363 }
364 BENCHMARK(BM_QESliceTableSorted);
365
BM_QEFilterWithSparseSelector(benchmark::State & state)366 void BM_QEFilterWithSparseSelector(benchmark::State& state) {
367 ExpectedFrameTimelineTableForBenchmark table(state);
368 Query q;
369 q.constraints = {table.table_.track_id().eq(1445)};
370 BenchmarkExpectedFrameTableQuery(state, table, q);
371 }
372 BENCHMARK(BM_QEFilterWithSparseSelector);
373
BM_QEFilterWithDenseSelector(benchmark::State & state)374 void BM_QEFilterWithDenseSelector(benchmark::State& state) {
375 FtraceEventTableForBenchmark table(state);
376 Query q;
377 q.constraints = {table.table_.ucpu().eq(4)};
378 BenchmarkFtraceEventTableQuery(state, table, q);
379 }
380 BENCHMARK(BM_QEFilterWithDenseSelector);
381
BM_QESliceEventFilterId(benchmark::State & state)382 void BM_QESliceEventFilterId(benchmark::State& state) {
383 SliceTableForBenchmark table(state);
384 BenchmarkSliceTableFilter(state, table, {table.table_.id().eq(500)});
385 }
386 BENCHMARK(BM_QESliceEventFilterId);
387
BM_QEFtraceEventFilterId(benchmark::State & state)388 void BM_QEFtraceEventFilterId(benchmark::State& state) {
389 FtraceEventTableForBenchmark table(state);
390 Query q;
391 q.constraints = {table.table_.id().eq(500)};
392 BenchmarkFtraceEventTableQuery(state, table, q);
393 }
394
395 BENCHMARK(BM_QEFtraceEventFilterId);
396
BM_QESliceTableTsAndTrackId(benchmark::State & state)397 void BM_QESliceTableTsAndTrackId(benchmark::State& state) {
398 SliceTableForBenchmark table(state);
399 BenchmarkSliceTableFilter(
400 state, table,
401 {table.table_.ts().ge(1738923505854), table.table_.ts().le(1738950140556),
402 table.table_.track_id().eq(1422)});
403 }
404 BENCHMARK(BM_QESliceTableTsAndTrackId);
405
BM_QEFilterOneElement(benchmark::State & state)406 void BM_QEFilterOneElement(benchmark::State& state) {
407 SliceTableForBenchmark table(state);
408 BenchmarkSliceTableFilter(
409 state, table,
410 {table.table_.id().eq(11732), table.table_.track_id().eq(1422)});
411 }
412 BENCHMARK(BM_QEFilterOneElement);
413
BM_QEFilterWithArrangement(benchmark::State & state)414 void BM_QEFilterWithArrangement(benchmark::State& state) {
415 SliceTableForBenchmark table(state);
416 Order order{table.table_.dur().index_in_table(), false};
417 Table slice_sorted_with_duration = table.table_.Sort({order});
418
419 Constraint c{table.table_.track_id().index_in_table(), FilterOp::kGt,
420 SqlValue::Long(10)};
421 Query q;
422 q.constraints = {c};
423 for (auto _ : state) {
424 benchmark::DoNotOptimize(slice_sorted_with_duration.QueryToRowMap(q));
425 }
426 state.counters["s/row"] = benchmark::Counter(
427 static_cast<double>(slice_sorted_with_duration.row_count()),
428 benchmark::Counter::kIsIterationInvariantRate |
429 benchmark::Counter::kInvert);
430 state.counters["s/out"] = benchmark::Counter(
431 static_cast<double>(table.table_.QueryToRowMap(q).size()),
432 benchmark::Counter::kIsIterationInvariantRate |
433 benchmark::Counter::kInvert);
434 }
435 BENCHMARK(BM_QEFilterWithArrangement);
436
BM_QEDenseNullFilter(benchmark::State & state)437 void BM_QEDenseNullFilter(benchmark::State& state) {
438 HeapGraphObjectTableForBenchmark table(state);
439 Constraint c{table.table_.reference_set_id().index_in_table(), FilterOp::kGt,
440 SqlValue::Long(1000)};
441 Query q;
442 q.constraints = {c};
443 for (auto _ : state) {
444 benchmark::DoNotOptimize(table.table_.QueryToRowMap(q));
445 }
446 state.counters["s/row"] =
447 benchmark::Counter(static_cast<double>(table.table_.row_count()),
448 benchmark::Counter::kIsIterationInvariantRate |
449 benchmark::Counter::kInvert);
450 state.counters["s/out"] = benchmark::Counter(
451 static_cast<double>(table.table_.QueryToRowMap(q).size()),
452 benchmark::Counter::kIsIterationInvariantRate |
453 benchmark::Counter::kInvert);
454 }
455 BENCHMARK(BM_QEDenseNullFilter);
456
BM_QEDenseNullFilterIsNull(benchmark::State & state)457 void BM_QEDenseNullFilterIsNull(benchmark::State& state) {
458 HeapGraphObjectTableForBenchmark table(state);
459 Constraint c{table.table_.reference_set_id().index_in_table(),
460 FilterOp::kIsNull, SqlValue()};
461 Query q;
462 q.constraints = {c};
463 for (auto _ : state) {
464 benchmark::DoNotOptimize(table.table_.QueryToRowMap(q));
465 }
466 state.counters["s/row"] =
467 benchmark::Counter(static_cast<double>(table.table_.row_count()),
468 benchmark::Counter::kIsIterationInvariantRate |
469 benchmark::Counter::kInvert);
470 state.counters["s/out"] = benchmark::Counter(
471 static_cast<double>(table.table_.QueryToRowMap(q).size()),
472 benchmark::Counter::kIsIterationInvariantRate |
473 benchmark::Counter::kInvert);
474 }
475 BENCHMARK(BM_QEDenseNullFilterIsNull);
476
BM_QEIdColumnWithIntAsDouble(benchmark::State & state)477 void BM_QEIdColumnWithIntAsDouble(benchmark::State& state) {
478 SliceTableForBenchmark table(state);
479 Constraint c{table.table_.track_id().index_in_table(), FilterOp::kEq,
480 SqlValue::Double(100)};
481 BenchmarkSliceTableFilter(state, table, {c});
482 }
483 BENCHMARK(BM_QEIdColumnWithIntAsDouble);
484
BM_QEIdColumnWithDouble(benchmark::State & state)485 void BM_QEIdColumnWithDouble(benchmark::State& state) {
486 SliceTableForBenchmark table(state);
487 Constraint c{table.table_.track_id().index_in_table(), FilterOp::kEq,
488 SqlValue::Double(100.5)};
489 BenchmarkSliceTableFilter(state, table, {c});
490 }
491 BENCHMARK(BM_QEIdColumnWithDouble);
492
BM_QEFilterOrderedArrangement(benchmark::State & state)493 void BM_QEFilterOrderedArrangement(benchmark::State& state) {
494 SliceTableForBenchmark table(state);
495 Order order{table.table_.dur().index_in_table(), false};
496 Table slice_sorted_with_duration = table.table_.Sort({order});
497
498 Constraint c{table.table_.dur().index_in_table(), FilterOp::kGt,
499 SqlValue::Long(10)};
500 Query q;
501 q.constraints = {c};
502 for (auto _ : state) {
503 benchmark::DoNotOptimize(slice_sorted_with_duration.QueryToRowMap(q));
504 }
505 state.counters["s/row"] = benchmark::Counter(
506 static_cast<double>(slice_sorted_with_duration.row_count()),
507 benchmark::Counter::kIsIterationInvariantRate |
508 benchmark::Counter::kInvert);
509 state.counters["s/out"] = benchmark::Counter(
510 static_cast<double>(table.table_.QueryToRowMap(q).size()),
511 benchmark::Counter::kIsIterationInvariantRate |
512 benchmark::Counter::kInvert);
513 }
514 BENCHMARK(BM_QEFilterOrderedArrangement);
515
BM_QEFilterNullOrderedArrangement(benchmark::State & state)516 void BM_QEFilterNullOrderedArrangement(benchmark::State& state) {
517 SliceTableForBenchmark table(state);
518 Order order{table.table_.parent_id().index_in_table(), false};
519 Table slice_sorted_with_parent_id = table.table_.Sort({order});
520
521 Constraint c{table.table_.parent_id().index_in_table(), FilterOp::kGt,
522 SqlValue::Long(26091)};
523 Query q;
524 q.constraints = {c};
525 for (auto _ : state) {
526 benchmark::DoNotOptimize(slice_sorted_with_parent_id.QueryToRowMap(q));
527 }
528 state.counters["s/row"] = benchmark::Counter(
529 static_cast<double>(slice_sorted_with_parent_id.row_count()),
530 benchmark::Counter::kIsIterationInvariantRate |
531 benchmark::Counter::kInvert);
532 state.counters["s/out"] = benchmark::Counter(
533 static_cast<double>(table.table_.QueryToRowMap(q).size()),
534 benchmark::Counter::kIsIterationInvariantRate |
535 benchmark::Counter::kInvert);
536 }
537 BENCHMARK(BM_QEFilterNullOrderedArrangement);
538
BM_QESliceFilterIndexSearchOneElement(benchmark::State & state)539 void BM_QESliceFilterIndexSearchOneElement(benchmark::State& state) {
540 SliceTableForBenchmark table(state);
541 BenchmarkSliceTableFilter(
542 state, table,
543 {table.table_.track_id().eq(1422), table.table_.id().eq(11732)});
544 }
545 BENCHMARK(BM_QESliceFilterIndexSearchOneElement);
546
BM_QESliceFilterIndexSearch(benchmark::State & state)547 void BM_QESliceFilterIndexSearch(benchmark::State& state) {
548 SliceTableForBenchmark table(state);
549 BenchmarkSliceTableFilter(state, table,
550 {table.table_.track_id().eq(1422),
551 table.table_.name().eq("notifyFramePending")});
552 }
553 BENCHMARK(BM_QESliceFilterIndexSearch);
554
BM_QESliceSortNumericAsc(benchmark::State & state)555 void BM_QESliceSortNumericAsc(benchmark::State& state) {
556 SliceTableForBenchmark table(state);
557 BenchmarkSliceTableSort(state, table, {table.table_.track_id().ascending()});
558 }
559 BENCHMARK(BM_QESliceSortNumericAsc);
560
BM_QESliceSortNullNumericAsc(benchmark::State & state)561 void BM_QESliceSortNullNumericAsc(benchmark::State& state) {
562 SliceTableForBenchmark table(state);
563 BenchmarkSliceTableSort(state, table, {table.table_.parent_id().ascending()});
564 }
565 BENCHMARK(BM_QESliceSortNullNumericAsc);
566
BM_QEFtraceEventSortSelectorNumericAsc(benchmark::State & state)567 void BM_QEFtraceEventSortSelectorNumericAsc(benchmark::State& state) {
568 FtraceEventTableForBenchmark table(state);
569 BenchmarkFtraceEventTableSort(state, table,
570 {table.table_.ucpu().ascending()});
571 }
572 BENCHMARK(BM_QEFtraceEventSortSelectorNumericAsc);
573
BM_QEFtraceEventSortSelectorNumericDesc(benchmark::State & state)574 void BM_QEFtraceEventSortSelectorNumericDesc(benchmark::State& state) {
575 FtraceEventTableForBenchmark table(state);
576 BenchmarkFtraceEventTableSort(state, table,
577 {table.table_.ucpu().descending()});
578 }
579 BENCHMARK(BM_QEFtraceEventSortSelectorNumericDesc);
580
BM_QEDistinctWithSparseSelector(benchmark::State & state)581 void BM_QEDistinctWithSparseSelector(benchmark::State& state) {
582 ExpectedFrameTimelineTableForBenchmark table(state);
583 Query q;
584 q.order_type = Query::OrderType::kDistinct;
585 q.orders = {table.table_.track_id().descending()};
586 BenchmarkExpectedFrameTableQuery(state, table, q);
587 }
588 BENCHMARK(BM_QEDistinctWithSparseSelector);
589
BM_QEDistinctWithDenseSelector(benchmark::State & state)590 void BM_QEDistinctWithDenseSelector(benchmark::State& state) {
591 FtraceEventTableForBenchmark table(state);
592 Query q;
593 q.order_type = Query::OrderType::kDistinct;
594 q.orders = {table.table_.ucpu().descending()};
595 BenchmarkFtraceEventTableQuery(state, table, q);
596 }
597 BENCHMARK(BM_QEDistinctWithDenseSelector);
598
BM_QEDistinctSortedWithSparseSelector(benchmark::State & state)599 void BM_QEDistinctSortedWithSparseSelector(benchmark::State& state) {
600 ExpectedFrameTimelineTableForBenchmark table(state);
601 Query q;
602 q.order_type = Query::OrderType::kDistinctAndSort;
603 q.orders = {table.table_.track_id().descending()};
604 BenchmarkExpectedFrameTableQuery(state, table, q);
605 }
606 BENCHMARK(BM_QEDistinctSortedWithSparseSelector);
607
BM_QEDistinctSortedWithDenseSelector(benchmark::State & state)608 void BM_QEDistinctSortedWithDenseSelector(benchmark::State& state) {
609 FtraceEventTableForBenchmark table(state);
610 Query q;
611 q.order_type = Query::OrderType::kDistinctAndSort;
612 q.orders = {table.table_.ucpu().descending()};
613 BenchmarkFtraceEventTableQuery(state, table, q);
614 }
615 BENCHMARK(BM_QEDistinctSortedWithDenseSelector);
616
BM_QEDistinctWithArrangement(benchmark::State & state)617 void BM_QEDistinctWithArrangement(benchmark::State& state) {
618 SliceTableForBenchmark table(state);
619 Order order{table.table_.dur().index_in_table(), false};
620 Table slice_sorted_with_duration = table.table_.Sort({order});
621
622 Query q;
623 q.order_type = Query::OrderType::kDistinct;
624 q.orders = {table.table_.track_id().descending()};
625
626 for (auto _ : state) {
627 benchmark::DoNotOptimize(slice_sorted_with_duration.QueryToRowMap(q));
628 }
629 state.counters["s/row"] = benchmark::Counter(
630 static_cast<double>(slice_sorted_with_duration.row_count()),
631 benchmark::Counter::kIsIterationInvariantRate |
632 benchmark::Counter::kInvert);
633 state.counters["s/out"] = benchmark::Counter(
634 static_cast<double>(table.table_.QueryToRowMap(q).size()),
635 benchmark::Counter::kIsIterationInvariantRate |
636 benchmark::Counter::kInvert);
637 }
638 BENCHMARK(BM_QEDistinctWithArrangement);
639
BM_QEDistinctSortedWithArrangement(benchmark::State & state)640 void BM_QEDistinctSortedWithArrangement(benchmark::State& state) {
641 SliceTableForBenchmark table(state);
642 Order order{table.table_.dur().index_in_table(), false};
643 Table slice_sorted_with_duration = table.table_.Sort({order});
644
645 Query q;
646 q.order_type = Query::OrderType::kDistinctAndSort;
647 q.orders = {table.table_.track_id().descending()};
648
649 for (auto _ : state) {
650 benchmark::DoNotOptimize(slice_sorted_with_duration.QueryToRowMap(q));
651 }
652 state.counters["s/row"] = benchmark::Counter(
653 static_cast<double>(slice_sorted_with_duration.row_count()),
654 benchmark::Counter::kIsIterationInvariantRate |
655 benchmark::Counter::kInvert);
656 state.counters["s/out"] = benchmark::Counter(
657 static_cast<double>(table.table_.QueryToRowMap(q).size()),
658 benchmark::Counter::kIsIterationInvariantRate |
659 benchmark::Counter::kInvert);
660 }
661 BENCHMARK(BM_QEDistinctSortedWithArrangement);
662
BM_QEOffsetLimit(benchmark::State & state)663 void BM_QEOffsetLimit(benchmark::State& state) {
664 FtraceEventTableForBenchmark table(state);
665 Query q;
666 q.limit = 10;
667 q.offset = 100;
668 BenchmarkFtraceEventTableQuery(state, table, q);
669 }
670 BENCHMARK(BM_QEOffsetLimit);
671
BM_QEMax(benchmark::State & state)672 void BM_QEMax(benchmark::State& state) {
673 FtraceEventTableForBenchmark table(state);
674 Query q;
675 q.limit = 1;
676 q.orders = {table.table_.utid().descending()};
677 BenchmarkFtraceEventTableQuery(state, table, q);
678 }
679 BENCHMARK(BM_QEMax);
680
681 } // namespace
682 } // namespace perfetto::trace_processor
683