1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_ 18 #define SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_ 19 20 #include <array> 21 #include <deque> 22 #include <map> 23 #include <string> 24 #include <unordered_map> 25 #include <utility> 26 #include <vector> 27 28 #include "perfetto/base/logging.h" 29 #include "perfetto/base/time.h" 30 #include "perfetto/ext/base/hash.h" 31 #include "perfetto/ext/base/optional.h" 32 #include "perfetto/ext/base/string_view.h" 33 #include "perfetto/ext/base/utils.h" 34 #include "perfetto/trace_processor/basic_types.h" 35 #include "perfetto/trace_processor/status.h" 36 #include "src/trace_processor/containers/string_pool.h" 37 #include "src/trace_processor/storage/metadata.h" 38 #include "src/trace_processor/storage/stats.h" 39 #include "src/trace_processor/tables/android_tables.h" 40 #include "src/trace_processor/tables/counter_tables.h" 41 #include "src/trace_processor/tables/flow_tables.h" 42 #include "src/trace_processor/tables/memory_tables.h" 43 #include "src/trace_processor/tables/metadata_tables.h" 44 #include "src/trace_processor/tables/profiler_tables.h" 45 #include "src/trace_processor/tables/slice_tables.h" 46 #include "src/trace_processor/tables/track_tables.h" 47 #include "src/trace_processor/types/variadic.h" 48 49 namespace perfetto { 50 namespace trace_processor { 51 52 // UniquePid is an offset into |unique_processes_|. This is necessary because 53 // Unix pids are reused and thus not guaranteed to be unique over a long 54 // period of time. 55 using UniquePid = uint32_t; 56 57 // UniqueTid is an offset into |unique_threads_|. Necessary because tids can 58 // be reused. 59 using UniqueTid = uint32_t; 60 61 // StringId is an offset into |string_pool_|. 62 using StringId = StringPool::Id; 63 static const StringId kNullStringId = StringId::Null(); 64 65 using ArgSetId = uint32_t; 66 static const ArgSetId kInvalidArgSetId = 0; 67 68 using TrackId = tables::TrackTable::Id; 69 70 using CounterId = tables::CounterTable::Id; 71 72 using SliceId = tables::SliceTable::Id; 73 74 using InstantId = tables::InstantTable::Id; 75 76 using SchedId = tables::SchedSliceTable::Id; 77 78 using MappingId = tables::StackProfileMappingTable::Id; 79 80 using FrameId = tables::StackProfileFrameTable::Id; 81 82 using SymbolId = tables::SymbolTable::Id; 83 84 using CallsiteId = tables::StackProfileCallsiteTable::Id; 85 86 using MetadataId = tables::MetadataTable::Id; 87 88 using RawId = tables::RawTable::Id; 89 90 using FlamegraphId = tables::ExperimentalFlamegraphNodesTable::Id; 91 92 using VulkanAllocId = tables::VulkanMemoryAllocationsTable::Id; 93 94 using ProcessMemorySnapshotId = tables::ProcessMemorySnapshotTable::Id; 95 96 using SnapshotNodeId = tables::MemorySnapshotNodeTable::Id; 97 98 // TODO(lalitm): this is a temporary hack while migrating the counters table and 99 // will be removed when the migration is complete. 100 static const TrackId kInvalidTrackId = 101 TrackId(std::numeric_limits<TrackId>::max()); 102 103 enum class RefType { 104 kRefNoRef = 0, 105 kRefUtid = 1, 106 kRefCpuId = 2, 107 kRefIrq = 3, 108 kRefSoftIrq = 4, 109 kRefUpid = 5, 110 kRefGpuId = 6, 111 kRefTrack = 7, 112 kRefMax 113 }; 114 115 const std::vector<NullTermStringView>& GetRefTypeStringMap(); 116 117 // Stores a data inside a trace file in a columnar form. This makes it efficient 118 // to read or search across a single field of the trace (e.g. all the thread 119 // names for a given CPU). 120 class TraceStorage { 121 public: 122 TraceStorage(const Config& = Config()); 123 124 virtual ~TraceStorage(); 125 126 class VirtualTrackSlices { 127 public: AddVirtualTrackSlice(SliceId slice_id,int64_t thread_timestamp_ns,int64_t thread_duration_ns,int64_t thread_instruction_count,int64_t thread_instruction_delta)128 inline uint32_t AddVirtualTrackSlice(SliceId slice_id, 129 int64_t thread_timestamp_ns, 130 int64_t thread_duration_ns, 131 int64_t thread_instruction_count, 132 int64_t thread_instruction_delta) { 133 slice_ids_.emplace_back(slice_id); 134 thread_timestamp_ns_.emplace_back(thread_timestamp_ns); 135 thread_duration_ns_.emplace_back(thread_duration_ns); 136 thread_instruction_counts_.emplace_back(thread_instruction_count); 137 thread_instruction_deltas_.emplace_back(thread_instruction_delta); 138 return slice_count() - 1; 139 } 140 slice_count()141 uint32_t slice_count() const { 142 return static_cast<uint32_t>(slice_ids_.size()); 143 } 144 slice_ids()145 const std::deque<SliceId>& slice_ids() const { return slice_ids_; } thread_timestamp_ns()146 const std::deque<int64_t>& thread_timestamp_ns() const { 147 return thread_timestamp_ns_; 148 } thread_duration_ns()149 const std::deque<int64_t>& thread_duration_ns() const { 150 return thread_duration_ns_; 151 } thread_instruction_counts()152 const std::deque<int64_t>& thread_instruction_counts() const { 153 return thread_instruction_counts_; 154 } thread_instruction_deltas()155 const std::deque<int64_t>& thread_instruction_deltas() const { 156 return thread_instruction_deltas_; 157 } 158 FindRowForSliceId(SliceId slice_id)159 base::Optional<uint32_t> FindRowForSliceId(SliceId slice_id) const { 160 auto it = 161 std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id); 162 if (it != slice_ids().end() && *it == slice_id) { 163 return static_cast<uint32_t>(std::distance(slice_ids().begin(), it)); 164 } 165 return base::nullopt; 166 } 167 UpdateThreadDeltasForSliceId(SliceId slice_id,int64_t end_thread_timestamp_ns,int64_t end_thread_instruction_count)168 void UpdateThreadDeltasForSliceId(SliceId slice_id, 169 int64_t end_thread_timestamp_ns, 170 int64_t end_thread_instruction_count) { 171 auto opt_row = FindRowForSliceId(slice_id); 172 if (!opt_row) 173 return; 174 uint32_t row = *opt_row; 175 int64_t begin_ns = thread_timestamp_ns_[row]; 176 thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns; 177 int64_t begin_ticount = thread_instruction_counts_[row]; 178 thread_instruction_deltas_[row] = 179 end_thread_instruction_count - begin_ticount; 180 } 181 182 private: 183 std::deque<SliceId> slice_ids_; 184 std::deque<int64_t> thread_timestamp_ns_; 185 std::deque<int64_t> thread_duration_ns_; 186 std::deque<int64_t> thread_instruction_counts_; 187 std::deque<int64_t> thread_instruction_deltas_; 188 }; 189 190 class SqlStats { 191 public: 192 static constexpr size_t kMaxLogEntries = 100; 193 uint32_t RecordQueryBegin(const std::string& query, 194 int64_t time_queued, 195 int64_t time_started); 196 void RecordQueryFirstNext(uint32_t row, int64_t time_first_next); 197 void RecordQueryEnd(uint32_t row, int64_t time_end); size()198 size_t size() const { return queries_.size(); } queries()199 const std::deque<std::string>& queries() const { return queries_; } times_queued()200 const std::deque<int64_t>& times_queued() const { return times_queued_; } times_started()201 const std::deque<int64_t>& times_started() const { return times_started_; } times_first_next()202 const std::deque<int64_t>& times_first_next() const { 203 return times_first_next_; 204 } times_ended()205 const std::deque<int64_t>& times_ended() const { return times_ended_; } 206 207 private: 208 uint32_t popped_queries_ = 0; 209 210 std::deque<std::string> queries_; 211 std::deque<int64_t> times_queued_; 212 std::deque<int64_t> times_started_; 213 std::deque<int64_t> times_first_next_; 214 std::deque<int64_t> times_ended_; 215 }; 216 217 struct Stats { 218 using IndexMap = std::map<int, int64_t>; 219 int64_t value = 0; 220 IndexMap indexed_values; 221 }; 222 using StatsMap = std::array<Stats, stats::kNumKeys>; 223 224 // Return an unqiue identifier for the contents of each string. 225 // The string is copied internally and can be destroyed after this called. 226 // Virtual for testing. InternString(base::StringView str)227 virtual StringId InternString(base::StringView str) { 228 return string_pool_.InternString(str); 229 } 230 231 // Example usage: SetStats(stats::android_log_num_failed, 42); SetStats(size_t key,int64_t value)232 void SetStats(size_t key, int64_t value) { 233 PERFETTO_DCHECK(key < stats::kNumKeys); 234 PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle); 235 stats_[key].value = value; 236 } 237 238 // Example usage: IncrementStats(stats::android_log_num_failed, -1); 239 void IncrementStats(size_t key, int64_t increment = 1) { 240 PERFETTO_DCHECK(key < stats::kNumKeys); 241 PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle); 242 stats_[key].value += increment; 243 } 244 245 // Example usage: IncrementIndexedStats(stats::cpu_failure, 1); 246 void IncrementIndexedStats(size_t key, int index, int64_t increment = 1) { 247 PERFETTO_DCHECK(key < stats::kNumKeys); 248 PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed); 249 stats_[key].indexed_values[index] += increment; 250 } 251 252 // Example usage: SetIndexedStats(stats::cpu_failure, 1, 42); SetIndexedStats(size_t key,int index,int64_t value)253 void SetIndexedStats(size_t key, int index, int64_t value) { 254 PERFETTO_DCHECK(key < stats::kNumKeys); 255 PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed); 256 stats_[key].indexed_values[index] = value; 257 } 258 259 class ScopedStatsTracer { 260 public: ScopedStatsTracer(TraceStorage * storage,size_t key)261 ScopedStatsTracer(TraceStorage* storage, size_t key) 262 : storage_(storage), key_(key), start_ns_(base::GetWallTimeNs()) {} 263 ~ScopedStatsTracer()264 ~ScopedStatsTracer() { 265 if (!storage_) 266 return; 267 auto delta_ns = base::GetWallTimeNs() - start_ns_; 268 storage_->IncrementStats(key_, delta_ns.count()); 269 } 270 ScopedStatsTracer(ScopedStatsTracer && other)271 ScopedStatsTracer(ScopedStatsTracer&& other) noexcept { MoveImpl(&other); } 272 273 ScopedStatsTracer& operator=(ScopedStatsTracer&& other) { 274 MoveImpl(&other); 275 return *this; 276 } 277 278 private: 279 ScopedStatsTracer(const ScopedStatsTracer&) = delete; 280 ScopedStatsTracer& operator=(const ScopedStatsTracer&) = delete; 281 MoveImpl(ScopedStatsTracer * other)282 void MoveImpl(ScopedStatsTracer* other) { 283 storage_ = other->storage_; 284 key_ = other->key_; 285 start_ns_ = other->start_ns_; 286 other->storage_ = nullptr; 287 } 288 289 TraceStorage* storage_; 290 size_t key_; 291 base::TimeNanos start_ns_; 292 }; 293 TraceExecutionTimeIntoStats(size_t key)294 ScopedStatsTracer TraceExecutionTimeIntoStats(size_t key) { 295 return ScopedStatsTracer(this, key); 296 } 297 298 // Reading methods. 299 // Virtual for testing. GetString(StringId id)300 virtual NullTermStringView GetString(StringId id) const { 301 return string_pool_.Get(id); 302 } 303 thread_table()304 const tables::ThreadTable& thread_table() const { return thread_table_; } mutable_thread_table()305 tables::ThreadTable* mutable_thread_table() { return &thread_table_; } 306 process_table()307 const tables::ProcessTable& process_table() const { return process_table_; } mutable_process_table()308 tables::ProcessTable* mutable_process_table() { return &process_table_; } 309 track_table()310 const tables::TrackTable& track_table() const { return track_table_; } mutable_track_table()311 tables::TrackTable* mutable_track_table() { return &track_table_; } 312 process_track_table()313 const tables::ProcessTrackTable& process_track_table() const { 314 return process_track_table_; 315 } mutable_process_track_table()316 tables::ProcessTrackTable* mutable_process_track_table() { 317 return &process_track_table_; 318 } 319 thread_track_table()320 const tables::ThreadTrackTable& thread_track_table() const { 321 return thread_track_table_; 322 } mutable_thread_track_table()323 tables::ThreadTrackTable* mutable_thread_track_table() { 324 return &thread_track_table_; 325 } 326 counter_track_table()327 const tables::CounterTrackTable& counter_track_table() const { 328 return counter_track_table_; 329 } mutable_counter_track_table()330 tables::CounterTrackTable* mutable_counter_track_table() { 331 return &counter_track_table_; 332 } 333 thread_counter_track_table()334 const tables::ThreadCounterTrackTable& thread_counter_track_table() const { 335 return thread_counter_track_table_; 336 } mutable_thread_counter_track_table()337 tables::ThreadCounterTrackTable* mutable_thread_counter_track_table() { 338 return &thread_counter_track_table_; 339 } 340 process_counter_track_table()341 const tables::ProcessCounterTrackTable& process_counter_track_table() const { 342 return process_counter_track_table_; 343 } mutable_process_counter_track_table()344 tables::ProcessCounterTrackTable* mutable_process_counter_track_table() { 345 return &process_counter_track_table_; 346 } 347 cpu_counter_track_table()348 const tables::CpuCounterTrackTable& cpu_counter_track_table() const { 349 return cpu_counter_track_table_; 350 } mutable_cpu_counter_track_table()351 tables::CpuCounterTrackTable* mutable_cpu_counter_track_table() { 352 return &cpu_counter_track_table_; 353 } 354 irq_counter_track_table()355 const tables::IrqCounterTrackTable& irq_counter_track_table() const { 356 return irq_counter_track_table_; 357 } mutable_irq_counter_track_table()358 tables::IrqCounterTrackTable* mutable_irq_counter_track_table() { 359 return &irq_counter_track_table_; 360 } 361 softirq_counter_track_table()362 const tables::SoftirqCounterTrackTable& softirq_counter_track_table() const { 363 return softirq_counter_track_table_; 364 } mutable_softirq_counter_track_table()365 tables::SoftirqCounterTrackTable* mutable_softirq_counter_track_table() { 366 return &softirq_counter_track_table_; 367 } 368 gpu_counter_track_table()369 const tables::GpuCounterTrackTable& gpu_counter_track_table() const { 370 return gpu_counter_track_table_; 371 } mutable_gpu_counter_track_table()372 tables::GpuCounterTrackTable* mutable_gpu_counter_track_table() { 373 return &gpu_counter_track_table_; 374 } 375 gpu_counter_group_table()376 const tables::GpuCounterGroupTable& gpu_counter_group_table() const { 377 return gpu_counter_group_table_; 378 } mutable_gpu_counter_group_table()379 tables::GpuCounterGroupTable* mutable_gpu_counter_group_table() { 380 return &gpu_counter_group_table_; 381 } 382 perf_counter_track_table()383 const tables::PerfCounterTrackTable& perf_counter_track_table() const { 384 return perf_counter_track_table_; 385 } mutable_perf_counter_track_table()386 tables::PerfCounterTrackTable* mutable_perf_counter_track_table() { 387 return &perf_counter_track_table_; 388 } 389 sched_slice_table()390 const tables::SchedSliceTable& sched_slice_table() const { 391 return sched_slice_table_; 392 } mutable_sched_slice_table()393 tables::SchedSliceTable* mutable_sched_slice_table() { 394 return &sched_slice_table_; 395 } 396 slice_table()397 const tables::SliceTable& slice_table() const { return slice_table_; } mutable_slice_table()398 tables::SliceTable* mutable_slice_table() { return &slice_table_; } 399 flow_table()400 const tables::FlowTable& flow_table() const { return flow_table_; } mutable_flow_table()401 tables::FlowTable* mutable_flow_table() { return &flow_table_; } 402 thread_slice_table()403 const tables::ThreadSliceTable& thread_slice_table() const { 404 return thread_slice_table_; 405 } mutable_thread_slice_table()406 tables::ThreadSliceTable* mutable_thread_slice_table() { 407 return &thread_slice_table_; 408 } 409 virtual_track_slices()410 const VirtualTrackSlices& virtual_track_slices() const { 411 return virtual_track_slices_; 412 } mutable_virtual_track_slices()413 VirtualTrackSlices* mutable_virtual_track_slices() { 414 return &virtual_track_slices_; 415 } 416 gpu_slice_table()417 const tables::GpuSliceTable& gpu_slice_table() const { 418 return gpu_slice_table_; 419 } mutable_gpu_slice_table()420 tables::GpuSliceTable* mutable_gpu_slice_table() { return &gpu_slice_table_; } 421 counter_table()422 const tables::CounterTable& counter_table() const { return counter_table_; } mutable_counter_table()423 tables::CounterTable* mutable_counter_table() { return &counter_table_; } 424 sql_stats()425 const SqlStats& sql_stats() const { return sql_stats_; } mutable_sql_stats()426 SqlStats* mutable_sql_stats() { return &sql_stats_; } 427 instant_table()428 const tables::InstantTable& instant_table() const { return instant_table_; } mutable_instant_table()429 tables::InstantTable* mutable_instant_table() { return &instant_table_; } 430 android_log_table()431 const tables::AndroidLogTable& android_log_table() const { 432 return android_log_table_; 433 } mutable_android_log_table()434 tables::AndroidLogTable* mutable_android_log_table() { 435 return &android_log_table_; 436 } 437 stats()438 const StatsMap& stats() const { return stats_; } 439 metadata_table()440 const tables::MetadataTable& metadata_table() const { 441 return metadata_table_; 442 } mutable_metadata_table()443 tables::MetadataTable* mutable_metadata_table() { return &metadata_table_; } 444 clock_snapshot_table()445 const tables::ClockSnapshotTable& clock_snapshot_table() const { 446 return clock_snapshot_table_; 447 } mutable_clock_snapshot_table()448 tables::ClockSnapshotTable* mutable_clock_snapshot_table() { 449 return &clock_snapshot_table_; 450 } 451 arg_table()452 const tables::ArgTable& arg_table() const { return arg_table_; } mutable_arg_table()453 tables::ArgTable* mutable_arg_table() { return &arg_table_; } 454 raw_table()455 const tables::RawTable& raw_table() const { return raw_table_; } mutable_raw_table()456 tables::RawTable* mutable_raw_table() { return &raw_table_; } 457 cpu_table()458 const tables::CpuTable& cpu_table() const { return cpu_table_; } mutable_cpu_table()459 tables::CpuTable* mutable_cpu_table() { return &cpu_table_; } 460 cpu_freq_table()461 const tables::CpuFreqTable& cpu_freq_table() const { return cpu_freq_table_; } mutable_cpu_freq_table()462 tables::CpuFreqTable* mutable_cpu_freq_table() { return &cpu_freq_table_; } 463 stack_profile_mapping_table()464 const tables::StackProfileMappingTable& stack_profile_mapping_table() const { 465 return stack_profile_mapping_table_; 466 } mutable_stack_profile_mapping_table()467 tables::StackProfileMappingTable* mutable_stack_profile_mapping_table() { 468 return &stack_profile_mapping_table_; 469 } 470 stack_profile_frame_table()471 const tables::StackProfileFrameTable& stack_profile_frame_table() const { 472 return stack_profile_frame_table_; 473 } mutable_stack_profile_frame_table()474 tables::StackProfileFrameTable* mutable_stack_profile_frame_table() { 475 return &stack_profile_frame_table_; 476 } 477 stack_profile_callsite_table()478 const tables::StackProfileCallsiteTable& stack_profile_callsite_table() 479 const { 480 return stack_profile_callsite_table_; 481 } mutable_stack_profile_callsite_table()482 tables::StackProfileCallsiteTable* mutable_stack_profile_callsite_table() { 483 return &stack_profile_callsite_table_; 484 } 485 heap_profile_allocation_table()486 const tables::HeapProfileAllocationTable& heap_profile_allocation_table() 487 const { 488 return heap_profile_allocation_table_; 489 } mutable_heap_profile_allocation_table()490 tables::HeapProfileAllocationTable* mutable_heap_profile_allocation_table() { 491 return &heap_profile_allocation_table_; 492 } 493 package_list_table()494 const tables::PackageListTable& package_list_table() const { 495 return package_list_table_; 496 } mutable_package_list_table()497 tables::PackageListTable* mutable_package_list_table() { 498 return &package_list_table_; 499 } 500 profiler_smaps_table()501 const tables::ProfilerSmapsTable& profiler_smaps_table() const { 502 return profiler_smaps_table_; 503 } mutable_profiler_smaps_table()504 tables::ProfilerSmapsTable* mutable_profiler_smaps_table() { 505 return &profiler_smaps_table_; 506 } 507 stack_sample_table()508 const tables::StackSampleTable& stack_sample_table() const { 509 return stack_sample_table_; 510 } mutable_stack_sample_table()511 tables::StackSampleTable* mutable_stack_sample_table() { 512 return &stack_sample_table_; 513 } 514 cpu_profile_stack_sample_table()515 const tables::CpuProfileStackSampleTable& cpu_profile_stack_sample_table() 516 const { 517 return cpu_profile_stack_sample_table_; 518 } mutable_cpu_profile_stack_sample_table()519 tables::CpuProfileStackSampleTable* mutable_cpu_profile_stack_sample_table() { 520 return &cpu_profile_stack_sample_table_; 521 } 522 perf_sample_table()523 const tables::PerfSampleTable& perf_sample_table() const { 524 return perf_sample_table_; 525 } mutable_perf_sample_table()526 tables::PerfSampleTable* mutable_perf_sample_table() { 527 return &perf_sample_table_; 528 } 529 symbol_table()530 const tables::SymbolTable& symbol_table() const { return symbol_table_; } 531 mutable_symbol_table()532 tables::SymbolTable* mutable_symbol_table() { return &symbol_table_; } 533 heap_graph_object_table()534 const tables::HeapGraphObjectTable& heap_graph_object_table() const { 535 return heap_graph_object_table_; 536 } 537 mutable_heap_graph_object_table()538 tables::HeapGraphObjectTable* mutable_heap_graph_object_table() { 539 return &heap_graph_object_table_; 540 } heap_graph_class_table()541 const tables::HeapGraphClassTable& heap_graph_class_table() const { 542 return heap_graph_class_table_; 543 } 544 mutable_heap_graph_class_table()545 tables::HeapGraphClassTable* mutable_heap_graph_class_table() { 546 return &heap_graph_class_table_; 547 } 548 heap_graph_reference_table()549 const tables::HeapGraphReferenceTable& heap_graph_reference_table() const { 550 return heap_graph_reference_table_; 551 } 552 mutable_heap_graph_reference_table()553 tables::HeapGraphReferenceTable* mutable_heap_graph_reference_table() { 554 return &heap_graph_reference_table_; 555 } 556 gpu_track_table()557 const tables::GpuTrackTable& gpu_track_table() const { 558 return gpu_track_table_; 559 } mutable_gpu_track_table()560 tables::GpuTrackTable* mutable_gpu_track_table() { return &gpu_track_table_; } 561 vulkan_memory_allocations_table()562 const tables::VulkanMemoryAllocationsTable& vulkan_memory_allocations_table() 563 const { 564 return vulkan_memory_allocations_table_; 565 } 566 567 tables::VulkanMemoryAllocationsTable* mutable_vulkan_memory_allocations_table()568 mutable_vulkan_memory_allocations_table() { 569 return &vulkan_memory_allocations_table_; 570 } 571 graphics_frame_slice_table()572 const tables::GraphicsFrameSliceTable& graphics_frame_slice_table() const { 573 return graphics_frame_slice_table_; 574 } 575 mutable_graphics_frame_slice_table()576 tables::GraphicsFrameSliceTable* mutable_graphics_frame_slice_table() { 577 return &graphics_frame_slice_table_; 578 } 579 memory_snapshot_table()580 const tables::MemorySnapshotTable& memory_snapshot_table() const { 581 return memory_snapshot_table_; 582 } mutable_memory_snapshot_table()583 tables::MemorySnapshotTable* mutable_memory_snapshot_table() { 584 return &memory_snapshot_table_; 585 } 586 process_memory_snapshot_table()587 const tables::ProcessMemorySnapshotTable& process_memory_snapshot_table() 588 const { 589 return process_memory_snapshot_table_; 590 } mutable_process_memory_snapshot_table()591 tables::ProcessMemorySnapshotTable* mutable_process_memory_snapshot_table() { 592 return &process_memory_snapshot_table_; 593 } 594 memory_snapshot_node_table()595 const tables::MemorySnapshotNodeTable& memory_snapshot_node_table() const { 596 return memory_snapshot_node_table_; 597 } mutable_memory_snapshot_node_table()598 tables::MemorySnapshotNodeTable* mutable_memory_snapshot_node_table() { 599 return &memory_snapshot_node_table_; 600 } 601 memory_snapshot_edge_table()602 const tables::MemorySnapshotEdgeTable& memory_snapshot_edge_table() const { 603 return memory_snapshot_edge_table_; 604 } mutable_memory_snapshot_edge_table()605 tables::MemorySnapshotEdgeTable* mutable_memory_snapshot_edge_table() { 606 return &memory_snapshot_edge_table_; 607 } 608 609 const tables::ExpectedFrameTimelineSliceTable& expected_frame_timeline_slice_table()610 expected_frame_timeline_slice_table() const { 611 return expected_frame_timeline_slice_table_; 612 } 613 614 tables::ExpectedFrameTimelineSliceTable* mutable_expected_frame_timeline_slice_table()615 mutable_expected_frame_timeline_slice_table() { 616 return &expected_frame_timeline_slice_table_; 617 } 618 619 const tables::ActualFrameTimelineSliceTable& actual_frame_timeline_slice_table()620 actual_frame_timeline_slice_table() const { 621 return actual_frame_timeline_slice_table_; 622 } 623 624 tables::ActualFrameTimelineSliceTable* mutable_actual_frame_timeline_slice_table()625 mutable_actual_frame_timeline_slice_table() { 626 return &actual_frame_timeline_slice_table_; 627 } 628 string_pool()629 const StringPool& string_pool() const { return string_pool_; } mutable_string_pool()630 StringPool* mutable_string_pool() { return &string_pool_; } 631 632 // Number of interned strings in the pool. Includes the empty string w/ ID=0. string_count()633 size_t string_count() const { return string_pool_.size(); } 634 635 // Start / end ts (in nanoseconds) across the parsed trace events. 636 // Returns (0, 0) if the trace is empty. 637 std::pair<int64_t, int64_t> GetTraceTimestampBoundsNs() const; 638 ExtractArg(uint32_t arg_set_id,const char * key,base::Optional<Variadic> * result)639 util::Status ExtractArg(uint32_t arg_set_id, 640 const char* key, 641 base::Optional<Variadic>* result) { 642 const auto& args = arg_table(); 643 RowMap filtered = args.FilterToRowMap( 644 {args.arg_set_id().eq(arg_set_id), args.key().eq(key)}); 645 if (filtered.empty()) { 646 *result = base::nullopt; 647 return util::OkStatus(); 648 } 649 if (filtered.size() > 1) { 650 return util::ErrStatus( 651 "EXTRACT_ARG: received multiple args matching arg set id and key"); 652 } 653 uint32_t idx = filtered.Get(0); 654 *result = GetArgValue(idx); 655 return util::OkStatus(); 656 } 657 GetArgValue(uint32_t row)658 Variadic GetArgValue(uint32_t row) const { 659 Variadic v; 660 v.type = *GetVariadicTypeForId(arg_table_.value_type()[row]); 661 662 // Force initialization of union to stop GCC complaining. 663 v.int_value = 0; 664 665 switch (v.type) { 666 case Variadic::Type::kBool: 667 v.bool_value = static_cast<bool>(*arg_table_.int_value()[row]); 668 break; 669 case Variadic::Type::kInt: 670 v.int_value = *arg_table_.int_value()[row]; 671 break; 672 case Variadic::Type::kUint: 673 v.uint_value = static_cast<uint64_t>(*arg_table_.int_value()[row]); 674 break; 675 case Variadic::Type::kString: { 676 auto opt_value = arg_table_.string_value()[row]; 677 v.string_value = opt_value ? *opt_value : kNullStringId; 678 break; 679 } 680 case Variadic::Type::kPointer: 681 v.pointer_value = static_cast<uint64_t>(*arg_table_.int_value()[row]); 682 break; 683 case Variadic::Type::kReal: 684 v.real_value = *arg_table_.real_value()[row]; 685 break; 686 case Variadic::Type::kJson: { 687 auto opt_value = arg_table_.string_value()[row]; 688 v.json_value = opt_value ? *opt_value : kNullStringId; 689 break; 690 } 691 } 692 return v; 693 } 694 GetIdForVariadicType(Variadic::Type type)695 StringId GetIdForVariadicType(Variadic::Type type) const { 696 return variadic_type_ids_[type]; 697 } 698 GetVariadicTypeForId(StringId id)699 base::Optional<Variadic::Type> GetVariadicTypeForId(StringId id) const { 700 auto it = 701 std::find(variadic_type_ids_.begin(), variadic_type_ids_.end(), id); 702 if (it == variadic_type_ids_.end()) 703 return base::nullopt; 704 705 int64_t idx = std::distance(variadic_type_ids_.begin(), it); 706 return static_cast<Variadic::Type>(idx); 707 } 708 709 private: 710 using StringHash = uint64_t; 711 712 TraceStorage(const TraceStorage&) = delete; 713 TraceStorage& operator=(const TraceStorage&) = delete; 714 715 TraceStorage(TraceStorage&&) = delete; 716 TraceStorage& operator=(TraceStorage&&) = delete; 717 718 // One entry for each unique string in the trace. 719 StringPool string_pool_; 720 721 // Stats about parsing the trace. 722 StatsMap stats_{}; 723 724 // Extra data extracted from the trace. Includes: 725 // * metadata from chrome and benchmarking infrastructure 726 // * descriptions of android packages 727 tables::MetadataTable metadata_table_{&string_pool_, nullptr}; 728 729 // Contains data from all the clock snapshots in the trace. 730 tables::ClockSnapshotTable clock_snapshot_table_{&string_pool_, nullptr}; 731 732 // Metadata for tracks. 733 tables::TrackTable track_table_{&string_pool_, nullptr}; 734 tables::GpuTrackTable gpu_track_table_{&string_pool_, &track_table_}; 735 tables::ProcessTrackTable process_track_table_{&string_pool_, &track_table_}; 736 tables::ThreadTrackTable thread_track_table_{&string_pool_, &track_table_}; 737 738 // Track tables for counter events. 739 tables::CounterTrackTable counter_track_table_{&string_pool_, &track_table_}; 740 tables::ThreadCounterTrackTable thread_counter_track_table_{ 741 &string_pool_, &counter_track_table_}; 742 tables::ProcessCounterTrackTable process_counter_track_table_{ 743 &string_pool_, &counter_track_table_}; 744 tables::CpuCounterTrackTable cpu_counter_track_table_{&string_pool_, 745 &counter_track_table_}; 746 tables::IrqCounterTrackTable irq_counter_track_table_{&string_pool_, 747 &counter_track_table_}; 748 tables::SoftirqCounterTrackTable softirq_counter_track_table_{ 749 &string_pool_, &counter_track_table_}; 750 tables::GpuCounterTrackTable gpu_counter_track_table_{&string_pool_, 751 &counter_track_table_}; 752 tables::GpuCounterGroupTable gpu_counter_group_table_{&string_pool_, nullptr}; 753 tables::PerfCounterTrackTable perf_counter_track_table_{ 754 &string_pool_, &counter_track_table_}; 755 756 // Args for all other tables. 757 tables::ArgTable arg_table_{&string_pool_, nullptr}; 758 759 // Information about all the threads and processes in the trace. 760 tables::ThreadTable thread_table_{&string_pool_, nullptr}; 761 tables::ProcessTable process_table_{&string_pool_, nullptr}; 762 763 // Slices coming from userspace events (e.g. Chromium TRACE_EVENT macros). 764 tables::SliceTable slice_table_{&string_pool_, nullptr}; 765 766 // Flow events from userspace events (e.g. Chromium TRACE_EVENT macros). 767 tables::FlowTable flow_table_{&string_pool_, nullptr}; 768 769 // Slices from CPU scheduling data. 770 tables::SchedSliceTable sched_slice_table_{&string_pool_, nullptr}; 771 772 // Additional attributes for threads slices (sub-type of NestableSlices). 773 tables::ThreadSliceTable thread_slice_table_{&string_pool_, &slice_table_}; 774 775 // Additional attributes for virtual track slices (sub-type of 776 // NestableSlices). 777 VirtualTrackSlices virtual_track_slices_; 778 779 // Additional attributes for gpu track slices (sub-type of 780 // NestableSlices). 781 tables::GpuSliceTable gpu_slice_table_{&string_pool_, &slice_table_}; 782 783 // The values from the Counter events from the trace. This includes CPU 784 // frequency events as well systrace trace_marker counter events. 785 tables::CounterTable counter_table_{&string_pool_, nullptr}; 786 787 SqlStats sql_stats_; 788 789 // These are instantaneous events in the trace. They have no duration 790 // and do not have a value that make sense to track over time. 791 // e.g. signal events 792 tables::InstantTable instant_table_{&string_pool_, nullptr}; 793 794 // Raw events are every ftrace event in the trace. The raw event includes 795 // the timestamp and the pid. The args for the raw event will be in the 796 // args table. This table can be used to generate a text version of the 797 // trace. 798 tables::RawTable raw_table_{&string_pool_, nullptr}; 799 800 tables::CpuTable cpu_table_{&string_pool_, nullptr}; 801 802 tables::CpuFreqTable cpu_freq_table_{&string_pool_, nullptr}; 803 804 tables::AndroidLogTable android_log_table_{&string_pool_, nullptr}; 805 806 tables::StackProfileMappingTable stack_profile_mapping_table_{&string_pool_, 807 nullptr}; 808 tables::StackProfileFrameTable stack_profile_frame_table_{&string_pool_, 809 nullptr}; 810 tables::StackProfileCallsiteTable stack_profile_callsite_table_{&string_pool_, 811 nullptr}; 812 tables::StackSampleTable stack_sample_table_{&string_pool_, nullptr}; 813 tables::HeapProfileAllocationTable heap_profile_allocation_table_{ 814 &string_pool_, nullptr}; 815 tables::CpuProfileStackSampleTable cpu_profile_stack_sample_table_{ 816 &string_pool_, &stack_sample_table_}; 817 tables::PerfSampleTable perf_sample_table_{&string_pool_, nullptr}; 818 tables::PackageListTable package_list_table_{&string_pool_, nullptr}; 819 tables::ProfilerSmapsTable profiler_smaps_table_{&string_pool_, nullptr}; 820 821 // Symbol tables (mappings from frames to symbol names) 822 tables::SymbolTable symbol_table_{&string_pool_, nullptr}; 823 tables::HeapGraphObjectTable heap_graph_object_table_{&string_pool_, nullptr}; 824 tables::HeapGraphClassTable heap_graph_class_table_{&string_pool_, nullptr}; 825 tables::HeapGraphReferenceTable heap_graph_reference_table_{&string_pool_, 826 nullptr}; 827 828 tables::VulkanMemoryAllocationsTable vulkan_memory_allocations_table_{ 829 &string_pool_, nullptr}; 830 831 tables::GraphicsFrameSliceTable graphics_frame_slice_table_{&string_pool_, 832 &slice_table_}; 833 834 // Metadata for memory snapshot. 835 tables::MemorySnapshotTable memory_snapshot_table_{&string_pool_, nullptr}; 836 tables::ProcessMemorySnapshotTable process_memory_snapshot_table_{ 837 &string_pool_, nullptr}; 838 tables::MemorySnapshotNodeTable memory_snapshot_node_table_{&string_pool_, 839 nullptr}; 840 tables::MemorySnapshotEdgeTable memory_snapshot_edge_table_{&string_pool_, 841 nullptr}; 842 843 // FrameTimeline tables 844 tables::ExpectedFrameTimelineSliceTable expected_frame_timeline_slice_table_{ 845 &string_pool_, &slice_table_}; 846 tables::ActualFrameTimelineSliceTable actual_frame_timeline_slice_table_{ 847 &string_pool_, &slice_table_}; 848 849 // The below array allow us to map between enums and their string 850 // representations. 851 std::array<StringId, Variadic::kMaxType + 1> variadic_type_ids_; 852 }; 853 854 } // namespace trace_processor 855 } // namespace perfetto 856 857 template <> 858 struct std::hash<::perfetto::trace_processor::BaseId> { 859 using argument_type = ::perfetto::trace_processor::BaseId; 860 using result_type = size_t; 861 862 result_type operator()(const argument_type& r) const { 863 return std::hash<uint32_t>{}(r.value); 864 } 865 }; 866 867 template <> 868 struct std::hash<::perfetto::trace_processor::TrackId> 869 : std::hash<::perfetto::trace_processor::BaseId> {}; 870 template <> 871 struct std::hash<::perfetto::trace_processor::MappingId> 872 : std::hash<::perfetto::trace_processor::BaseId> {}; 873 template <> 874 struct std::hash<::perfetto::trace_processor::CallsiteId> 875 : std::hash<::perfetto::trace_processor::BaseId> {}; 876 template <> 877 struct std::hash<::perfetto::trace_processor::FrameId> 878 : std::hash<::perfetto::trace_processor::BaseId> {}; 879 880 template <> 881 struct std::hash< 882 ::perfetto::trace_processor::tables::StackProfileFrameTable::Row> { 883 using argument_type = 884 ::perfetto::trace_processor::tables::StackProfileFrameTable::Row; 885 using result_type = size_t; 886 887 result_type operator()(const argument_type& r) const { 888 return std::hash<::perfetto::trace_processor::StringId>{}(r.name) ^ 889 std::hash<::perfetto::base::Optional< 890 ::perfetto::trace_processor::MappingId>>{}(r.mapping) ^ 891 std::hash<int64_t>{}(r.rel_pc); 892 } 893 }; 894 895 template <> 896 struct std::hash< 897 ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row> { 898 using argument_type = 899 ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row; 900 using result_type = size_t; 901 902 result_type operator()(const argument_type& r) const { 903 return std::hash<int64_t>{}(r.depth) ^ 904 std::hash<::perfetto::base::Optional< 905 ::perfetto::trace_processor::CallsiteId>>{}(r.parent_id) ^ 906 std::hash<::perfetto::trace_processor::FrameId>{}(r.frame_id); 907 } 908 }; 909 910 template <> 911 struct std::hash< 912 ::perfetto::trace_processor::tables::StackProfileMappingTable::Row> { 913 using argument_type = 914 ::perfetto::trace_processor::tables::StackProfileMappingTable::Row; 915 using result_type = size_t; 916 917 result_type operator()(const argument_type& r) const { 918 return std::hash<::perfetto::trace_processor::StringId>{}(r.build_id) ^ 919 std::hash<int64_t>{}(r.exact_offset) ^ 920 std::hash<int64_t>{}(r.start_offset) ^ 921 std::hash<int64_t>{}(r.start) ^ std::hash<int64_t>{}(r.end) ^ 922 std::hash<int64_t>{}(r.load_bias) ^ 923 std::hash<::perfetto::trace_processor::StringId>{}(r.name); 924 } 925 }; 926 927 #endif // SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_ 928