1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_ 18 #define SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_ 19 20 #include <array> 21 #include <deque> 22 #include <map> 23 #include <string> 24 #include <unordered_map> 25 #include <utility> 26 #include <vector> 27 28 #include "perfetto/base/logging.h" 29 #include "perfetto/base/time.h" 30 #include "perfetto/ext/base/hash.h" 31 #include "perfetto/ext/base/optional.h" 32 #include "perfetto/ext/base/string_view.h" 33 #include "perfetto/ext/base/utils.h" 34 #include "perfetto/trace_processor/basic_types.h" 35 #include "perfetto/trace_processor/status.h" 36 #include "src/trace_processor/containers/string_pool.h" 37 #include "src/trace_processor/storage/metadata.h" 38 #include "src/trace_processor/storage/stats.h" 39 #include "src/trace_processor/tables/android_tables.h" 40 #include "src/trace_processor/tables/counter_tables.h" 41 #include "src/trace_processor/tables/flow_tables.h" 42 #include "src/trace_processor/tables/memory_tables.h" 43 #include "src/trace_processor/tables/metadata_tables.h" 44 #include "src/trace_processor/tables/profiler_tables.h" 45 #include "src/trace_processor/tables/slice_tables.h" 46 #include "src/trace_processor/tables/track_tables.h" 47 #include "src/trace_processor/types/variadic.h" 48 49 namespace perfetto { 50 namespace trace_processor { 51 52 // UniquePid is an offset into |unique_processes_|. This is necessary because 53 // Unix pids are reused and thus not guaranteed to be unique over a long 54 // period of time. 55 using UniquePid = uint32_t; 56 57 // UniqueTid is an offset into |unique_threads_|. Necessary because tids can 58 // be reused. 59 using UniqueTid = uint32_t; 60 61 // StringId is an offset into |string_pool_|. 62 using StringId = StringPool::Id; 63 static const StringId kNullStringId = StringId::Null(); 64 65 using ArgSetId = uint32_t; 66 static const ArgSetId kInvalidArgSetId = 0; 67 68 using TrackId = tables::TrackTable::Id; 69 70 using CounterId = tables::CounterTable::Id; 71 72 using SliceId = tables::SliceTable::Id; 73 74 using InstantId = tables::InstantTable::Id; 75 76 using SchedId = tables::SchedSliceTable::Id; 77 78 using MappingId = tables::StackProfileMappingTable::Id; 79 80 using FrameId = tables::StackProfileFrameTable::Id; 81 82 using SymbolId = tables::SymbolTable::Id; 83 84 using CallsiteId = tables::StackProfileCallsiteTable::Id; 85 86 using MetadataId = tables::MetadataTable::Id; 87 88 using RawId = tables::RawTable::Id; 89 90 using FlamegraphId = tables::ExperimentalFlamegraphNodesTable::Id; 91 92 using VulkanAllocId = tables::VulkanMemoryAllocationsTable::Id; 93 94 using ProcessMemorySnapshotId = tables::ProcessMemorySnapshotTable::Id; 95 96 using SnapshotNodeId = tables::MemorySnapshotNodeTable::Id; 97 98 // TODO(lalitm): this is a temporary hack while migrating the counters table and 99 // will be removed when the migration is complete. 100 static const TrackId kInvalidTrackId = 101 TrackId(std::numeric_limits<TrackId>::max()); 102 103 enum class RefType { 104 kRefNoRef = 0, 105 kRefUtid = 1, 106 kRefCpuId = 2, 107 kRefIrq = 3, 108 kRefSoftIrq = 4, 109 kRefUpid = 5, 110 kRefGpuId = 6, 111 kRefTrack = 7, 112 kRefMax 113 }; 114 115 const std::vector<NullTermStringView>& GetRefTypeStringMap(); 116 117 // Stores a data inside a trace file in a columnar form. This makes it efficient 118 // to read or search across a single field of the trace (e.g. all the thread 119 // names for a given CPU). 120 class TraceStorage { 121 public: 122 TraceStorage(const Config& = Config()); 123 124 virtual ~TraceStorage(); 125 126 class VirtualTrackSlices { 127 public: AddVirtualTrackSlice(SliceId slice_id,int64_t thread_timestamp_ns,int64_t thread_duration_ns,int64_t thread_instruction_count,int64_t thread_instruction_delta)128 inline uint32_t AddVirtualTrackSlice(SliceId slice_id, 129 int64_t thread_timestamp_ns, 130 int64_t thread_duration_ns, 131 int64_t thread_instruction_count, 132 int64_t thread_instruction_delta) { 133 slice_ids_.emplace_back(slice_id); 134 thread_timestamp_ns_.emplace_back(thread_timestamp_ns); 135 thread_duration_ns_.emplace_back(thread_duration_ns); 136 thread_instruction_counts_.emplace_back(thread_instruction_count); 137 thread_instruction_deltas_.emplace_back(thread_instruction_delta); 138 return slice_count() - 1; 139 } 140 slice_count()141 uint32_t slice_count() const { 142 return static_cast<uint32_t>(slice_ids_.size()); 143 } 144 slice_ids()145 const std::deque<SliceId>& slice_ids() const { return slice_ids_; } thread_timestamp_ns()146 const std::deque<int64_t>& thread_timestamp_ns() const { 147 return thread_timestamp_ns_; 148 } thread_duration_ns()149 const std::deque<int64_t>& thread_duration_ns() const { 150 return thread_duration_ns_; 151 } thread_instruction_counts()152 const std::deque<int64_t>& thread_instruction_counts() const { 153 return thread_instruction_counts_; 154 } thread_instruction_deltas()155 const std::deque<int64_t>& thread_instruction_deltas() const { 156 return thread_instruction_deltas_; 157 } 158 FindRowForSliceId(SliceId slice_id)159 base::Optional<uint32_t> FindRowForSliceId(SliceId slice_id) const { 160 auto it = 161 std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id); 162 if (it != slice_ids().end() && *it == slice_id) { 163 return static_cast<uint32_t>(std::distance(slice_ids().begin(), it)); 164 } 165 return base::nullopt; 166 } 167 UpdateThreadDeltasForSliceId(SliceId slice_id,int64_t end_thread_timestamp_ns,int64_t end_thread_instruction_count)168 void UpdateThreadDeltasForSliceId(SliceId slice_id, 169 int64_t end_thread_timestamp_ns, 170 int64_t end_thread_instruction_count) { 171 auto opt_row = FindRowForSliceId(slice_id); 172 if (!opt_row) 173 return; 174 uint32_t row = *opt_row; 175 int64_t begin_ns = thread_timestamp_ns_[row]; 176 thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns; 177 int64_t begin_ticount = thread_instruction_counts_[row]; 178 thread_instruction_deltas_[row] = 179 end_thread_instruction_count - begin_ticount; 180 } 181 182 private: 183 std::deque<SliceId> slice_ids_; 184 std::deque<int64_t> thread_timestamp_ns_; 185 std::deque<int64_t> thread_duration_ns_; 186 std::deque<int64_t> thread_instruction_counts_; 187 std::deque<int64_t> thread_instruction_deltas_; 188 }; 189 190 class SqlStats { 191 public: 192 static constexpr size_t kMaxLogEntries = 100; 193 uint32_t RecordQueryBegin(const std::string& query, 194 int64_t time_started); 195 void RecordQueryFirstNext(uint32_t row, int64_t time_first_next); 196 void RecordQueryEnd(uint32_t row, int64_t time_end); size()197 size_t size() const { return queries_.size(); } queries()198 const std::deque<std::string>& queries() const { return queries_; } times_started()199 const std::deque<int64_t>& times_started() const { return times_started_; } times_first_next()200 const std::deque<int64_t>& times_first_next() const { 201 return times_first_next_; 202 } times_ended()203 const std::deque<int64_t>& times_ended() const { return times_ended_; } 204 205 private: 206 uint32_t popped_queries_ = 0; 207 208 std::deque<std::string> queries_; 209 std::deque<int64_t> times_started_; 210 std::deque<int64_t> times_first_next_; 211 std::deque<int64_t> times_ended_; 212 }; 213 214 struct Stats { 215 using IndexMap = std::map<int, int64_t>; 216 int64_t value = 0; 217 IndexMap indexed_values; 218 }; 219 using StatsMap = std::array<Stats, stats::kNumKeys>; 220 221 // Return an unqiue identifier for the contents of each string. 222 // The string is copied internally and can be destroyed after this called. 223 // Virtual for testing. InternString(base::StringView str)224 virtual StringId InternString(base::StringView str) { 225 return string_pool_.InternString(str); 226 } 227 228 // Example usage: SetStats(stats::android_log_num_failed, 42); SetStats(size_t key,int64_t value)229 void SetStats(size_t key, int64_t value) { 230 PERFETTO_DCHECK(key < stats::kNumKeys); 231 PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle); 232 stats_[key].value = value; 233 } 234 235 // Example usage: IncrementStats(stats::android_log_num_failed, -1); 236 void IncrementStats(size_t key, int64_t increment = 1) { 237 PERFETTO_DCHECK(key < stats::kNumKeys); 238 PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle); 239 stats_[key].value += increment; 240 } 241 242 // Example usage: IncrementIndexedStats(stats::cpu_failure, 1); 243 void IncrementIndexedStats(size_t key, int index, int64_t increment = 1) { 244 PERFETTO_DCHECK(key < stats::kNumKeys); 245 PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed); 246 stats_[key].indexed_values[index] += increment; 247 } 248 249 // Example usage: SetIndexedStats(stats::cpu_failure, 1, 42); SetIndexedStats(size_t key,int index,int64_t value)250 void SetIndexedStats(size_t key, int index, int64_t value) { 251 PERFETTO_DCHECK(key < stats::kNumKeys); 252 PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed); 253 stats_[key].indexed_values[index] = value; 254 } 255 256 // Example usage: opt_cpu_failure = GetIndexedStats(stats::cpu_failure, 1); GetIndexedStats(size_t key,int index)257 base::Optional<int64_t> GetIndexedStats(size_t key, int index) { 258 PERFETTO_DCHECK(key < stats::kNumKeys); 259 PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed); 260 auto kv = stats_[key].indexed_values.find(index); 261 if (kv != stats_[key].indexed_values.end()) { 262 return kv->second; 263 } 264 return base::nullopt; 265 } 266 267 class ScopedStatsTracer { 268 public: ScopedStatsTracer(TraceStorage * storage,size_t key)269 ScopedStatsTracer(TraceStorage* storage, size_t key) 270 : storage_(storage), key_(key), start_ns_(base::GetWallTimeNs()) {} 271 ~ScopedStatsTracer()272 ~ScopedStatsTracer() { 273 if (!storage_) 274 return; 275 auto delta_ns = base::GetWallTimeNs() - start_ns_; 276 storage_->IncrementStats(key_, delta_ns.count()); 277 } 278 ScopedStatsTracer(ScopedStatsTracer && other)279 ScopedStatsTracer(ScopedStatsTracer&& other) noexcept { MoveImpl(&other); } 280 281 ScopedStatsTracer& operator=(ScopedStatsTracer&& other) { 282 MoveImpl(&other); 283 return *this; 284 } 285 286 private: 287 ScopedStatsTracer(const ScopedStatsTracer&) = delete; 288 ScopedStatsTracer& operator=(const ScopedStatsTracer&) = delete; 289 MoveImpl(ScopedStatsTracer * other)290 void MoveImpl(ScopedStatsTracer* other) { 291 storage_ = other->storage_; 292 key_ = other->key_; 293 start_ns_ = other->start_ns_; 294 other->storage_ = nullptr; 295 } 296 297 TraceStorage* storage_; 298 size_t key_; 299 base::TimeNanos start_ns_; 300 }; 301 TraceExecutionTimeIntoStats(size_t key)302 ScopedStatsTracer TraceExecutionTimeIntoStats(size_t key) { 303 return ScopedStatsTracer(this, key); 304 } 305 306 // Reading methods. 307 // Virtual for testing. GetString(StringId id)308 virtual NullTermStringView GetString(StringId id) const { 309 return string_pool_.Get(id); 310 } 311 thread_table()312 const tables::ThreadTable& thread_table() const { return thread_table_; } mutable_thread_table()313 tables::ThreadTable* mutable_thread_table() { return &thread_table_; } 314 process_table()315 const tables::ProcessTable& process_table() const { return process_table_; } mutable_process_table()316 tables::ProcessTable* mutable_process_table() { return &process_table_; } 317 track_table()318 const tables::TrackTable& track_table() const { return track_table_; } mutable_track_table()319 tables::TrackTable* mutable_track_table() { return &track_table_; } 320 process_track_table()321 const tables::ProcessTrackTable& process_track_table() const { 322 return process_track_table_; 323 } mutable_process_track_table()324 tables::ProcessTrackTable* mutable_process_track_table() { 325 return &process_track_table_; 326 } 327 thread_track_table()328 const tables::ThreadTrackTable& thread_track_table() const { 329 return thread_track_table_; 330 } mutable_thread_track_table()331 tables::ThreadTrackTable* mutable_thread_track_table() { 332 return &thread_track_table_; 333 } 334 counter_track_table()335 const tables::CounterTrackTable& counter_track_table() const { 336 return counter_track_table_; 337 } mutable_counter_track_table()338 tables::CounterTrackTable* mutable_counter_track_table() { 339 return &counter_track_table_; 340 } 341 thread_counter_track_table()342 const tables::ThreadCounterTrackTable& thread_counter_track_table() const { 343 return thread_counter_track_table_; 344 } mutable_thread_counter_track_table()345 tables::ThreadCounterTrackTable* mutable_thread_counter_track_table() { 346 return &thread_counter_track_table_; 347 } 348 process_counter_track_table()349 const tables::ProcessCounterTrackTable& process_counter_track_table() const { 350 return process_counter_track_table_; 351 } mutable_process_counter_track_table()352 tables::ProcessCounterTrackTable* mutable_process_counter_track_table() { 353 return &process_counter_track_table_; 354 } 355 cpu_counter_track_table()356 const tables::CpuCounterTrackTable& cpu_counter_track_table() const { 357 return cpu_counter_track_table_; 358 } mutable_cpu_counter_track_table()359 tables::CpuCounterTrackTable* mutable_cpu_counter_track_table() { 360 return &cpu_counter_track_table_; 361 } 362 irq_counter_track_table()363 const tables::IrqCounterTrackTable& irq_counter_track_table() const { 364 return irq_counter_track_table_; 365 } mutable_irq_counter_track_table()366 tables::IrqCounterTrackTable* mutable_irq_counter_track_table() { 367 return &irq_counter_track_table_; 368 } 369 softirq_counter_track_table()370 const tables::SoftirqCounterTrackTable& softirq_counter_track_table() const { 371 return softirq_counter_track_table_; 372 } mutable_softirq_counter_track_table()373 tables::SoftirqCounterTrackTable* mutable_softirq_counter_track_table() { 374 return &softirq_counter_track_table_; 375 } 376 gpu_counter_track_table()377 const tables::GpuCounterTrackTable& gpu_counter_track_table() const { 378 return gpu_counter_track_table_; 379 } mutable_gpu_counter_track_table()380 tables::GpuCounterTrackTable* mutable_gpu_counter_track_table() { 381 return &gpu_counter_track_table_; 382 } 383 gpu_counter_group_table()384 const tables::GpuCounterGroupTable& gpu_counter_group_table() const { 385 return gpu_counter_group_table_; 386 } mutable_gpu_counter_group_table()387 tables::GpuCounterGroupTable* mutable_gpu_counter_group_table() { 388 return &gpu_counter_group_table_; 389 } 390 perf_counter_track_table()391 const tables::PerfCounterTrackTable& perf_counter_track_table() const { 392 return perf_counter_track_table_; 393 } mutable_perf_counter_track_table()394 tables::PerfCounterTrackTable* mutable_perf_counter_track_table() { 395 return &perf_counter_track_table_; 396 } 397 sched_slice_table()398 const tables::SchedSliceTable& sched_slice_table() const { 399 return sched_slice_table_; 400 } mutable_sched_slice_table()401 tables::SchedSliceTable* mutable_sched_slice_table() { 402 return &sched_slice_table_; 403 } 404 slice_table()405 const tables::SliceTable& slice_table() const { return slice_table_; } mutable_slice_table()406 tables::SliceTable* mutable_slice_table() { return &slice_table_; } 407 flow_table()408 const tables::FlowTable& flow_table() const { return flow_table_; } mutable_flow_table()409 tables::FlowTable* mutable_flow_table() { return &flow_table_; } 410 thread_slice_table()411 const tables::ThreadSliceTable& thread_slice_table() const { 412 return thread_slice_table_; 413 } mutable_thread_slice_table()414 tables::ThreadSliceTable* mutable_thread_slice_table() { 415 return &thread_slice_table_; 416 } 417 virtual_track_slices()418 const VirtualTrackSlices& virtual_track_slices() const { 419 return virtual_track_slices_; 420 } mutable_virtual_track_slices()421 VirtualTrackSlices* mutable_virtual_track_slices() { 422 return &virtual_track_slices_; 423 } 424 gpu_slice_table()425 const tables::GpuSliceTable& gpu_slice_table() const { 426 return gpu_slice_table_; 427 } mutable_gpu_slice_table()428 tables::GpuSliceTable* mutable_gpu_slice_table() { return &gpu_slice_table_; } 429 counter_table()430 const tables::CounterTable& counter_table() const { return counter_table_; } mutable_counter_table()431 tables::CounterTable* mutable_counter_table() { return &counter_table_; } 432 sql_stats()433 const SqlStats& sql_stats() const { return sql_stats_; } mutable_sql_stats()434 SqlStats* mutable_sql_stats() { return &sql_stats_; } 435 instant_table()436 const tables::InstantTable& instant_table() const { return instant_table_; } mutable_instant_table()437 tables::InstantTable* mutable_instant_table() { return &instant_table_; } 438 android_log_table()439 const tables::AndroidLogTable& android_log_table() const { 440 return android_log_table_; 441 } mutable_android_log_table()442 tables::AndroidLogTable* mutable_android_log_table() { 443 return &android_log_table_; 444 } 445 stats()446 const StatsMap& stats() const { return stats_; } 447 metadata_table()448 const tables::MetadataTable& metadata_table() const { 449 return metadata_table_; 450 } mutable_metadata_table()451 tables::MetadataTable* mutable_metadata_table() { return &metadata_table_; } 452 clock_snapshot_table()453 const tables::ClockSnapshotTable& clock_snapshot_table() const { 454 return clock_snapshot_table_; 455 } mutable_clock_snapshot_table()456 tables::ClockSnapshotTable* mutable_clock_snapshot_table() { 457 return &clock_snapshot_table_; 458 } 459 arg_table()460 const tables::ArgTable& arg_table() const { return arg_table_; } mutable_arg_table()461 tables::ArgTable* mutable_arg_table() { return &arg_table_; } 462 raw_table()463 const tables::RawTable& raw_table() const { return raw_table_; } mutable_raw_table()464 tables::RawTable* mutable_raw_table() { return &raw_table_; } 465 cpu_table()466 const tables::CpuTable& cpu_table() const { return cpu_table_; } mutable_cpu_table()467 tables::CpuTable* mutable_cpu_table() { return &cpu_table_; } 468 cpu_freq_table()469 const tables::CpuFreqTable& cpu_freq_table() const { return cpu_freq_table_; } mutable_cpu_freq_table()470 tables::CpuFreqTable* mutable_cpu_freq_table() { return &cpu_freq_table_; } 471 stack_profile_mapping_table()472 const tables::StackProfileMappingTable& stack_profile_mapping_table() const { 473 return stack_profile_mapping_table_; 474 } mutable_stack_profile_mapping_table()475 tables::StackProfileMappingTable* mutable_stack_profile_mapping_table() { 476 return &stack_profile_mapping_table_; 477 } 478 stack_profile_frame_table()479 const tables::StackProfileFrameTable& stack_profile_frame_table() const { 480 return stack_profile_frame_table_; 481 } mutable_stack_profile_frame_table()482 tables::StackProfileFrameTable* mutable_stack_profile_frame_table() { 483 return &stack_profile_frame_table_; 484 } 485 stack_profile_callsite_table()486 const tables::StackProfileCallsiteTable& stack_profile_callsite_table() 487 const { 488 return stack_profile_callsite_table_; 489 } mutable_stack_profile_callsite_table()490 tables::StackProfileCallsiteTable* mutable_stack_profile_callsite_table() { 491 return &stack_profile_callsite_table_; 492 } 493 heap_profile_allocation_table()494 const tables::HeapProfileAllocationTable& heap_profile_allocation_table() 495 const { 496 return heap_profile_allocation_table_; 497 } mutable_heap_profile_allocation_table()498 tables::HeapProfileAllocationTable* mutable_heap_profile_allocation_table() { 499 return &heap_profile_allocation_table_; 500 } 501 package_list_table()502 const tables::PackageListTable& package_list_table() const { 503 return package_list_table_; 504 } mutable_package_list_table()505 tables::PackageListTable* mutable_package_list_table() { 506 return &package_list_table_; 507 } 508 profiler_smaps_table()509 const tables::ProfilerSmapsTable& profiler_smaps_table() const { 510 return profiler_smaps_table_; 511 } mutable_profiler_smaps_table()512 tables::ProfilerSmapsTable* mutable_profiler_smaps_table() { 513 return &profiler_smaps_table_; 514 } 515 stack_sample_table()516 const tables::StackSampleTable& stack_sample_table() const { 517 return stack_sample_table_; 518 } mutable_stack_sample_table()519 tables::StackSampleTable* mutable_stack_sample_table() { 520 return &stack_sample_table_; 521 } 522 cpu_profile_stack_sample_table()523 const tables::CpuProfileStackSampleTable& cpu_profile_stack_sample_table() 524 const { 525 return cpu_profile_stack_sample_table_; 526 } mutable_cpu_profile_stack_sample_table()527 tables::CpuProfileStackSampleTable* mutable_cpu_profile_stack_sample_table() { 528 return &cpu_profile_stack_sample_table_; 529 } 530 perf_sample_table()531 const tables::PerfSampleTable& perf_sample_table() const { 532 return perf_sample_table_; 533 } mutable_perf_sample_table()534 tables::PerfSampleTable* mutable_perf_sample_table() { 535 return &perf_sample_table_; 536 } 537 symbol_table()538 const tables::SymbolTable& symbol_table() const { return symbol_table_; } 539 mutable_symbol_table()540 tables::SymbolTable* mutable_symbol_table() { return &symbol_table_; } 541 heap_graph_object_table()542 const tables::HeapGraphObjectTable& heap_graph_object_table() const { 543 return heap_graph_object_table_; 544 } 545 mutable_heap_graph_object_table()546 tables::HeapGraphObjectTable* mutable_heap_graph_object_table() { 547 return &heap_graph_object_table_; 548 } heap_graph_class_table()549 const tables::HeapGraphClassTable& heap_graph_class_table() const { 550 return heap_graph_class_table_; 551 } 552 mutable_heap_graph_class_table()553 tables::HeapGraphClassTable* mutable_heap_graph_class_table() { 554 return &heap_graph_class_table_; 555 } 556 heap_graph_reference_table()557 const tables::HeapGraphReferenceTable& heap_graph_reference_table() const { 558 return heap_graph_reference_table_; 559 } 560 mutable_heap_graph_reference_table()561 tables::HeapGraphReferenceTable* mutable_heap_graph_reference_table() { 562 return &heap_graph_reference_table_; 563 } 564 gpu_track_table()565 const tables::GpuTrackTable& gpu_track_table() const { 566 return gpu_track_table_; 567 } mutable_gpu_track_table()568 tables::GpuTrackTable* mutable_gpu_track_table() { return &gpu_track_table_; } 569 vulkan_memory_allocations_table()570 const tables::VulkanMemoryAllocationsTable& vulkan_memory_allocations_table() 571 const { 572 return vulkan_memory_allocations_table_; 573 } 574 575 tables::VulkanMemoryAllocationsTable* mutable_vulkan_memory_allocations_table()576 mutable_vulkan_memory_allocations_table() { 577 return &vulkan_memory_allocations_table_; 578 } 579 graphics_frame_slice_table()580 const tables::GraphicsFrameSliceTable& graphics_frame_slice_table() const { 581 return graphics_frame_slice_table_; 582 } 583 mutable_graphics_frame_slice_table()584 tables::GraphicsFrameSliceTable* mutable_graphics_frame_slice_table() { 585 return &graphics_frame_slice_table_; 586 } 587 memory_snapshot_table()588 const tables::MemorySnapshotTable& memory_snapshot_table() const { 589 return memory_snapshot_table_; 590 } mutable_memory_snapshot_table()591 tables::MemorySnapshotTable* mutable_memory_snapshot_table() { 592 return &memory_snapshot_table_; 593 } 594 process_memory_snapshot_table()595 const tables::ProcessMemorySnapshotTable& process_memory_snapshot_table() 596 const { 597 return process_memory_snapshot_table_; 598 } mutable_process_memory_snapshot_table()599 tables::ProcessMemorySnapshotTable* mutable_process_memory_snapshot_table() { 600 return &process_memory_snapshot_table_; 601 } 602 memory_snapshot_node_table()603 const tables::MemorySnapshotNodeTable& memory_snapshot_node_table() const { 604 return memory_snapshot_node_table_; 605 } mutable_memory_snapshot_node_table()606 tables::MemorySnapshotNodeTable* mutable_memory_snapshot_node_table() { 607 return &memory_snapshot_node_table_; 608 } 609 memory_snapshot_edge_table()610 const tables::MemorySnapshotEdgeTable& memory_snapshot_edge_table() const { 611 return memory_snapshot_edge_table_; 612 } mutable_memory_snapshot_edge_table()613 tables::MemorySnapshotEdgeTable* mutable_memory_snapshot_edge_table() { 614 return &memory_snapshot_edge_table_; 615 } 616 617 const tables::ExpectedFrameTimelineSliceTable& expected_frame_timeline_slice_table()618 expected_frame_timeline_slice_table() const { 619 return expected_frame_timeline_slice_table_; 620 } 621 622 tables::ExpectedFrameTimelineSliceTable* mutable_expected_frame_timeline_slice_table()623 mutable_expected_frame_timeline_slice_table() { 624 return &expected_frame_timeline_slice_table_; 625 } 626 627 const tables::ActualFrameTimelineSliceTable& actual_frame_timeline_slice_table()628 actual_frame_timeline_slice_table() const { 629 return actual_frame_timeline_slice_table_; 630 } 631 632 tables::ActualFrameTimelineSliceTable* mutable_actual_frame_timeline_slice_table()633 mutable_actual_frame_timeline_slice_table() { 634 return &actual_frame_timeline_slice_table_; 635 } 636 string_pool()637 const StringPool& string_pool() const { return string_pool_; } mutable_string_pool()638 StringPool* mutable_string_pool() { return &string_pool_; } 639 640 // Number of interned strings in the pool. Includes the empty string w/ ID=0. string_count()641 size_t string_count() const { return string_pool_.size(); } 642 643 // Start / end ts (in nanoseconds) across the parsed trace events. 644 // Returns (0, 0) if the trace is empty. 645 std::pair<int64_t, int64_t> GetTraceTimestampBoundsNs() const; 646 ExtractArg(uint32_t arg_set_id,const char * key,base::Optional<Variadic> * result)647 util::Status ExtractArg(uint32_t arg_set_id, 648 const char* key, 649 base::Optional<Variadic>* result) { 650 const auto& args = arg_table(); 651 RowMap filtered = args.FilterToRowMap( 652 {args.arg_set_id().eq(arg_set_id), args.key().eq(key)}); 653 if (filtered.empty()) { 654 *result = base::nullopt; 655 return util::OkStatus(); 656 } 657 if (filtered.size() > 1) { 658 return util::ErrStatus( 659 "EXTRACT_ARG: received multiple args matching arg set id and key"); 660 } 661 uint32_t idx = filtered.Get(0); 662 *result = GetArgValue(idx); 663 return util::OkStatus(); 664 } 665 GetArgValue(uint32_t row)666 Variadic GetArgValue(uint32_t row) const { 667 Variadic v; 668 v.type = *GetVariadicTypeForId(arg_table_.value_type()[row]); 669 670 // Force initialization of union to stop GCC complaining. 671 v.int_value = 0; 672 673 switch (v.type) { 674 case Variadic::Type::kBool: 675 v.bool_value = static_cast<bool>(*arg_table_.int_value()[row]); 676 break; 677 case Variadic::Type::kInt: 678 v.int_value = *arg_table_.int_value()[row]; 679 break; 680 case Variadic::Type::kUint: 681 v.uint_value = static_cast<uint64_t>(*arg_table_.int_value()[row]); 682 break; 683 case Variadic::Type::kString: { 684 auto opt_value = arg_table_.string_value()[row]; 685 v.string_value = opt_value ? *opt_value : kNullStringId; 686 break; 687 } 688 case Variadic::Type::kPointer: 689 v.pointer_value = static_cast<uint64_t>(*arg_table_.int_value()[row]); 690 break; 691 case Variadic::Type::kReal: 692 v.real_value = *arg_table_.real_value()[row]; 693 break; 694 case Variadic::Type::kJson: { 695 auto opt_value = arg_table_.string_value()[row]; 696 v.json_value = opt_value ? *opt_value : kNullStringId; 697 break; 698 } 699 case Variadic::Type::kNull: 700 break; 701 } 702 return v; 703 } 704 GetIdForVariadicType(Variadic::Type type)705 StringId GetIdForVariadicType(Variadic::Type type) const { 706 return variadic_type_ids_[type]; 707 } 708 GetVariadicTypeForId(StringId id)709 base::Optional<Variadic::Type> GetVariadicTypeForId(StringId id) const { 710 auto it = 711 std::find(variadic_type_ids_.begin(), variadic_type_ids_.end(), id); 712 if (it == variadic_type_ids_.end()) 713 return base::nullopt; 714 715 int64_t idx = std::distance(variadic_type_ids_.begin(), it); 716 return static_cast<Variadic::Type>(idx); 717 } 718 719 private: 720 using StringHash = uint64_t; 721 722 TraceStorage(const TraceStorage&) = delete; 723 TraceStorage& operator=(const TraceStorage&) = delete; 724 725 TraceStorage(TraceStorage&&) = delete; 726 TraceStorage& operator=(TraceStorage&&) = delete; 727 728 // One entry for each unique string in the trace. 729 StringPool string_pool_; 730 731 // Stats about parsing the trace. 732 StatsMap stats_{}; 733 734 // Extra data extracted from the trace. Includes: 735 // * metadata from chrome and benchmarking infrastructure 736 // * descriptions of android packages 737 tables::MetadataTable metadata_table_{&string_pool_, nullptr}; 738 739 // Contains data from all the clock snapshots in the trace. 740 tables::ClockSnapshotTable clock_snapshot_table_{&string_pool_, nullptr}; 741 742 // Metadata for tracks. 743 tables::TrackTable track_table_{&string_pool_, nullptr}; 744 tables::GpuTrackTable gpu_track_table_{&string_pool_, &track_table_}; 745 tables::ProcessTrackTable process_track_table_{&string_pool_, &track_table_}; 746 tables::ThreadTrackTable thread_track_table_{&string_pool_, &track_table_}; 747 748 // Track tables for counter events. 749 tables::CounterTrackTable counter_track_table_{&string_pool_, &track_table_}; 750 tables::ThreadCounterTrackTable thread_counter_track_table_{ 751 &string_pool_, &counter_track_table_}; 752 tables::ProcessCounterTrackTable process_counter_track_table_{ 753 &string_pool_, &counter_track_table_}; 754 tables::CpuCounterTrackTable cpu_counter_track_table_{&string_pool_, 755 &counter_track_table_}; 756 tables::IrqCounterTrackTable irq_counter_track_table_{&string_pool_, 757 &counter_track_table_}; 758 tables::SoftirqCounterTrackTable softirq_counter_track_table_{ 759 &string_pool_, &counter_track_table_}; 760 tables::GpuCounterTrackTable gpu_counter_track_table_{&string_pool_, 761 &counter_track_table_}; 762 tables::GpuCounterGroupTable gpu_counter_group_table_{&string_pool_, nullptr}; 763 tables::PerfCounterTrackTable perf_counter_track_table_{ 764 &string_pool_, &counter_track_table_}; 765 766 // Args for all other tables. 767 tables::ArgTable arg_table_{&string_pool_, nullptr}; 768 769 // Information about all the threads and processes in the trace. 770 tables::ThreadTable thread_table_{&string_pool_, nullptr}; 771 tables::ProcessTable process_table_{&string_pool_, nullptr}; 772 773 // Slices coming from userspace events (e.g. Chromium TRACE_EVENT macros). 774 tables::SliceTable slice_table_{&string_pool_, nullptr}; 775 776 // Flow events from userspace events (e.g. Chromium TRACE_EVENT macros). 777 tables::FlowTable flow_table_{&string_pool_, nullptr}; 778 779 // Slices from CPU scheduling data. 780 tables::SchedSliceTable sched_slice_table_{&string_pool_, nullptr}; 781 782 // Additional attributes for threads slices (sub-type of NestableSlices). 783 tables::ThreadSliceTable thread_slice_table_{&string_pool_, &slice_table_}; 784 785 // Additional attributes for virtual track slices (sub-type of 786 // NestableSlices). 787 VirtualTrackSlices virtual_track_slices_; 788 789 // Additional attributes for gpu track slices (sub-type of 790 // NestableSlices). 791 tables::GpuSliceTable gpu_slice_table_{&string_pool_, &slice_table_}; 792 793 // The values from the Counter events from the trace. This includes CPU 794 // frequency events as well systrace trace_marker counter events. 795 tables::CounterTable counter_table_{&string_pool_, nullptr}; 796 797 SqlStats sql_stats_; 798 799 // These are instantaneous events in the trace. They have no duration 800 // and do not have a value that make sense to track over time. 801 // e.g. signal events 802 tables::InstantTable instant_table_{&string_pool_, nullptr}; 803 804 // Raw events are every ftrace event in the trace. The raw event includes 805 // the timestamp and the pid. The args for the raw event will be in the 806 // args table. This table can be used to generate a text version of the 807 // trace. 808 tables::RawTable raw_table_{&string_pool_, nullptr}; 809 810 tables::CpuTable cpu_table_{&string_pool_, nullptr}; 811 812 tables::CpuFreqTable cpu_freq_table_{&string_pool_, nullptr}; 813 814 tables::AndroidLogTable android_log_table_{&string_pool_, nullptr}; 815 816 tables::StackProfileMappingTable stack_profile_mapping_table_{&string_pool_, 817 nullptr}; 818 tables::StackProfileFrameTable stack_profile_frame_table_{&string_pool_, 819 nullptr}; 820 tables::StackProfileCallsiteTable stack_profile_callsite_table_{&string_pool_, 821 nullptr}; 822 tables::StackSampleTable stack_sample_table_{&string_pool_, nullptr}; 823 tables::HeapProfileAllocationTable heap_profile_allocation_table_{ 824 &string_pool_, nullptr}; 825 tables::CpuProfileStackSampleTable cpu_profile_stack_sample_table_{ 826 &string_pool_, &stack_sample_table_}; 827 tables::PerfSampleTable perf_sample_table_{&string_pool_, nullptr}; 828 tables::PackageListTable package_list_table_{&string_pool_, nullptr}; 829 tables::ProfilerSmapsTable profiler_smaps_table_{&string_pool_, nullptr}; 830 831 // Symbol tables (mappings from frames to symbol names) 832 tables::SymbolTable symbol_table_{&string_pool_, nullptr}; 833 tables::HeapGraphObjectTable heap_graph_object_table_{&string_pool_, nullptr}; 834 tables::HeapGraphClassTable heap_graph_class_table_{&string_pool_, nullptr}; 835 tables::HeapGraphReferenceTable heap_graph_reference_table_{&string_pool_, 836 nullptr}; 837 838 tables::VulkanMemoryAllocationsTable vulkan_memory_allocations_table_{ 839 &string_pool_, nullptr}; 840 841 tables::GraphicsFrameSliceTable graphics_frame_slice_table_{&string_pool_, 842 &slice_table_}; 843 844 // Metadata for memory snapshot. 845 tables::MemorySnapshotTable memory_snapshot_table_{&string_pool_, nullptr}; 846 tables::ProcessMemorySnapshotTable process_memory_snapshot_table_{ 847 &string_pool_, nullptr}; 848 tables::MemorySnapshotNodeTable memory_snapshot_node_table_{&string_pool_, 849 nullptr}; 850 tables::MemorySnapshotEdgeTable memory_snapshot_edge_table_{&string_pool_, 851 nullptr}; 852 853 // FrameTimeline tables 854 tables::ExpectedFrameTimelineSliceTable expected_frame_timeline_slice_table_{ 855 &string_pool_, &slice_table_}; 856 tables::ActualFrameTimelineSliceTable actual_frame_timeline_slice_table_{ 857 &string_pool_, &slice_table_}; 858 859 // The below array allow us to map between enums and their string 860 // representations. 861 std::array<StringId, Variadic::kMaxType + 1> variadic_type_ids_; 862 }; 863 864 } // namespace trace_processor 865 } // namespace perfetto 866 867 template <> 868 struct std::hash<::perfetto::trace_processor::BaseId> { 869 using argument_type = ::perfetto::trace_processor::BaseId; 870 using result_type = size_t; 871 872 result_type operator()(const argument_type& r) const { 873 return std::hash<uint32_t>{}(r.value); 874 } 875 }; 876 877 template <> 878 struct std::hash<::perfetto::trace_processor::TrackId> 879 : std::hash<::perfetto::trace_processor::BaseId> {}; 880 template <> 881 struct std::hash<::perfetto::trace_processor::MappingId> 882 : std::hash<::perfetto::trace_processor::BaseId> {}; 883 template <> 884 struct std::hash<::perfetto::trace_processor::CallsiteId> 885 : std::hash<::perfetto::trace_processor::BaseId> {}; 886 template <> 887 struct std::hash<::perfetto::trace_processor::FrameId> 888 : std::hash<::perfetto::trace_processor::BaseId> {}; 889 890 template <> 891 struct std::hash< 892 ::perfetto::trace_processor::tables::StackProfileFrameTable::Row> { 893 using argument_type = 894 ::perfetto::trace_processor::tables::StackProfileFrameTable::Row; 895 using result_type = size_t; 896 897 result_type operator()(const argument_type& r) const { 898 return std::hash<::perfetto::trace_processor::StringId>{}(r.name) ^ 899 std::hash<::perfetto::base::Optional< 900 ::perfetto::trace_processor::MappingId>>{}(r.mapping) ^ 901 std::hash<int64_t>{}(r.rel_pc); 902 } 903 }; 904 905 template <> 906 struct std::hash< 907 ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row> { 908 using argument_type = 909 ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row; 910 using result_type = size_t; 911 912 result_type operator()(const argument_type& r) const { 913 return std::hash<int64_t>{}(r.depth) ^ 914 std::hash<::perfetto::base::Optional< 915 ::perfetto::trace_processor::CallsiteId>>{}(r.parent_id) ^ 916 std::hash<::perfetto::trace_processor::FrameId>{}(r.frame_id); 917 } 918 }; 919 920 template <> 921 struct std::hash< 922 ::perfetto::trace_processor::tables::StackProfileMappingTable::Row> { 923 using argument_type = 924 ::perfetto::trace_processor::tables::StackProfileMappingTable::Row; 925 using result_type = size_t; 926 927 result_type operator()(const argument_type& r) const { 928 return std::hash<::perfetto::trace_processor::StringId>{}(r.build_id) ^ 929 std::hash<int64_t>{}(r.exact_offset) ^ 930 std::hash<int64_t>{}(r.start_offset) ^ 931 std::hash<int64_t>{}(r.start) ^ std::hash<int64_t>{}(r.end) ^ 932 std::hash<int64_t>{}(r.load_bias) ^ 933 std::hash<::perfetto::trace_processor::StringId>{}(r.name); 934 } 935 }; 936 937 #endif // SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_ 938