• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
18 #define SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
19 
20 #include <array>
21 #include <deque>
22 #include <map>
23 #include <optional>
24 #include <string>
25 #include <unordered_map>
26 #include <utility>
27 #include <vector>
28 
29 #include "perfetto/base/logging.h"
30 #include "perfetto/base/time.h"
31 #include "perfetto/ext/base/hash.h"
32 #include "perfetto/ext/base/string_view.h"
33 #include "perfetto/ext/base/utils.h"
34 #include "perfetto/trace_processor/basic_types.h"
35 #include "perfetto/trace_processor/status.h"
36 #include "src/trace_processor/containers/string_pool.h"
37 #include "src/trace_processor/storage/metadata.h"
38 #include "src/trace_processor/storage/stats.h"
39 #include "src/trace_processor/tables/android_tables_py.h"
40 #include "src/trace_processor/tables/counter_tables_py.h"
41 #include "src/trace_processor/tables/flow_tables_py.h"
42 #include "src/trace_processor/tables/memory_tables_py.h"
43 #include "src/trace_processor/tables/metadata_tables_py.h"
44 #include "src/trace_processor/tables/profiler_tables_py.h"
45 #include "src/trace_processor/tables/slice_tables_py.h"
46 #include "src/trace_processor/tables/trace_proto_tables_py.h"
47 #include "src/trace_processor/tables/track_tables_py.h"
48 #include "src/trace_processor/types/variadic.h"
49 #include "src/trace_processor/views/slice_views.h"
50 
51 namespace perfetto {
52 namespace trace_processor {
53 
54 // UniquePid is an offset into |unique_processes_|. This is necessary because
55 // Unix pids are reused and thus not guaranteed to be unique over a long
56 // period of time.
57 using UniquePid = uint32_t;
58 
59 // UniqueTid is an offset into |unique_threads_|. Necessary because tids can
60 // be reused.
61 using UniqueTid = uint32_t;
62 
63 // StringId is an offset into |string_pool_|.
64 using StringId = StringPool::Id;
65 static const StringId kNullStringId = StringId::Null();
66 
67 using ArgSetId = uint32_t;
68 static const ArgSetId kInvalidArgSetId = 0;
69 
70 using TrackId = tables::TrackTable::Id;
71 
72 using CounterId = tables::CounterTable::Id;
73 
74 using SliceId = tables::SliceTable::Id;
75 
76 using SchedId = tables::SchedSliceTable::Id;
77 
78 using MappingId = tables::StackProfileMappingTable::Id;
79 
80 using FrameId = tables::StackProfileFrameTable::Id;
81 
82 using SymbolId = tables::SymbolTable::Id;
83 
84 using CallsiteId = tables::StackProfileCallsiteTable::Id;
85 
86 using MetadataId = tables::MetadataTable::Id;
87 
88 using RawId = tables::RawTable::Id;
89 
90 using FlamegraphId = tables::ExperimentalFlamegraphNodesTable::Id;
91 
92 using VulkanAllocId = tables::VulkanMemoryAllocationsTable::Id;
93 
94 using ProcessMemorySnapshotId = tables::ProcessMemorySnapshotTable::Id;
95 
96 using SnapshotNodeId = tables::MemorySnapshotNodeTable::Id;
97 
98 static const TrackId kInvalidTrackId =
99     TrackId(std::numeric_limits<uint32_t>::max());
100 
101 enum class RefType {
102   kRefNoRef = 0,
103   kRefUtid = 1,
104   kRefCpuId = 2,
105   kRefIrq = 3,
106   kRefSoftIrq = 4,
107   kRefUpid = 5,
108   kRefGpuId = 6,
109   kRefTrack = 7,
110   kRefMax
111 };
112 
113 const std::vector<NullTermStringView>& GetRefTypeStringMap();
114 
115 // Stores a data inside a trace file in a columnar form. This makes it efficient
116 // to read or search across a single field of the trace (e.g. all the thread
117 // names for a given CPU).
118 class TraceStorage {
119  public:
120   TraceStorage(const Config& = Config());
121 
122   virtual ~TraceStorage();
123 
124   class VirtualTrackSlices {
125    public:
AddVirtualTrackSlice(SliceId slice_id,int64_t thread_timestamp_ns,int64_t thread_duration_ns,int64_t thread_instruction_count,int64_t thread_instruction_delta)126     inline uint32_t AddVirtualTrackSlice(SliceId slice_id,
127                                          int64_t thread_timestamp_ns,
128                                          int64_t thread_duration_ns,
129                                          int64_t thread_instruction_count,
130                                          int64_t thread_instruction_delta) {
131       slice_ids_.emplace_back(slice_id);
132       thread_timestamp_ns_.emplace_back(thread_timestamp_ns);
133       thread_duration_ns_.emplace_back(thread_duration_ns);
134       thread_instruction_counts_.emplace_back(thread_instruction_count);
135       thread_instruction_deltas_.emplace_back(thread_instruction_delta);
136       return slice_count() - 1;
137     }
138 
slice_count()139     uint32_t slice_count() const {
140       return static_cast<uint32_t>(slice_ids_.size());
141     }
142 
slice_ids()143     const std::deque<SliceId>& slice_ids() const { return slice_ids_; }
thread_timestamp_ns()144     const std::deque<int64_t>& thread_timestamp_ns() const {
145       return thread_timestamp_ns_;
146     }
thread_duration_ns()147     const std::deque<int64_t>& thread_duration_ns() const {
148       return thread_duration_ns_;
149     }
thread_instruction_counts()150     const std::deque<int64_t>& thread_instruction_counts() const {
151       return thread_instruction_counts_;
152     }
thread_instruction_deltas()153     const std::deque<int64_t>& thread_instruction_deltas() const {
154       return thread_instruction_deltas_;
155     }
156 
FindRowForSliceId(SliceId slice_id)157     std::optional<uint32_t> FindRowForSliceId(SliceId slice_id) const {
158       auto it =
159           std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id);
160       if (it != slice_ids().end() && *it == slice_id) {
161         return static_cast<uint32_t>(std::distance(slice_ids().begin(), it));
162       }
163       return std::nullopt;
164     }
165 
UpdateThreadDeltasForSliceId(SliceId slice_id,int64_t end_thread_timestamp_ns,int64_t end_thread_instruction_count)166     void UpdateThreadDeltasForSliceId(SliceId slice_id,
167                                       int64_t end_thread_timestamp_ns,
168                                       int64_t end_thread_instruction_count) {
169       auto opt_row = FindRowForSliceId(slice_id);
170       if (!opt_row)
171         return;
172       uint32_t row = *opt_row;
173       int64_t begin_ns = thread_timestamp_ns_[row];
174       thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns;
175       int64_t begin_ticount = thread_instruction_counts_[row];
176       thread_instruction_deltas_[row] =
177           end_thread_instruction_count - begin_ticount;
178     }
179 
180    private:
181     std::deque<SliceId> slice_ids_;
182     std::deque<int64_t> thread_timestamp_ns_;
183     std::deque<int64_t> thread_duration_ns_;
184     std::deque<int64_t> thread_instruction_counts_;
185     std::deque<int64_t> thread_instruction_deltas_;
186   };
187 
188   class SqlStats {
189    public:
190     static constexpr size_t kMaxLogEntries = 100;
191     uint32_t RecordQueryBegin(const std::string& query, int64_t time_started);
192     void RecordQueryFirstNext(uint32_t row, int64_t time_first_next);
193     void RecordQueryEnd(uint32_t row, int64_t time_end);
size()194     size_t size() const { return queries_.size(); }
queries()195     const std::deque<std::string>& queries() const { return queries_; }
times_started()196     const std::deque<int64_t>& times_started() const { return times_started_; }
times_first_next()197     const std::deque<int64_t>& times_first_next() const {
198       return times_first_next_;
199     }
times_ended()200     const std::deque<int64_t>& times_ended() const { return times_ended_; }
201 
202    private:
203     uint32_t popped_queries_ = 0;
204 
205     std::deque<std::string> queries_;
206     std::deque<int64_t> times_started_;
207     std::deque<int64_t> times_first_next_;
208     std::deque<int64_t> times_ended_;
209   };
210 
211   struct Stats {
212     using IndexMap = std::map<int, int64_t>;
213     int64_t value = 0;
214     IndexMap indexed_values;
215   };
216   using StatsMap = std::array<Stats, stats::kNumKeys>;
217 
218   // Return an unqiue identifier for the contents of each string.
219   // The string is copied internally and can be destroyed after this called.
220   // Virtual for testing.
InternString(base::StringView str)221   virtual StringId InternString(base::StringView str) {
222     return string_pool_.InternString(str);
223   }
224 
225   // Example usage: SetStats(stats::android_log_num_failed, 42);
SetStats(size_t key,int64_t value)226   void SetStats(size_t key, int64_t value) {
227     PERFETTO_DCHECK(key < stats::kNumKeys);
228     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
229     stats_[key].value = value;
230   }
231 
232   // Example usage: IncrementStats(stats::android_log_num_failed, -1);
233   void IncrementStats(size_t key, int64_t increment = 1) {
234     PERFETTO_DCHECK(key < stats::kNumKeys);
235     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
236     stats_[key].value += increment;
237   }
238 
239   // Example usage: IncrementIndexedStats(stats::cpu_failure, 1);
240   void IncrementIndexedStats(size_t key, int index, int64_t increment = 1) {
241     PERFETTO_DCHECK(key < stats::kNumKeys);
242     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
243     stats_[key].indexed_values[index] += increment;
244   }
245 
246   // Example usage: SetIndexedStats(stats::cpu_failure, 1, 42);
SetIndexedStats(size_t key,int index,int64_t value)247   void SetIndexedStats(size_t key, int index, int64_t value) {
248     PERFETTO_DCHECK(key < stats::kNumKeys);
249     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
250     stats_[key].indexed_values[index] = value;
251   }
252 
253   // Example usage: opt_cpu_failure = GetIndexedStats(stats::cpu_failure, 1);
GetIndexedStats(size_t key,int index)254   std::optional<int64_t> GetIndexedStats(size_t key, int index) {
255     PERFETTO_DCHECK(key < stats::kNumKeys);
256     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
257     auto kv = stats_[key].indexed_values.find(index);
258     if (kv != stats_[key].indexed_values.end()) {
259       return kv->second;
260     }
261     return std::nullopt;
262   }
263 
264   class ScopedStatsTracer {
265    public:
ScopedStatsTracer(TraceStorage * storage,size_t key)266     ScopedStatsTracer(TraceStorage* storage, size_t key)
267         : storage_(storage), key_(key), start_ns_(base::GetWallTimeNs()) {}
268 
~ScopedStatsTracer()269     ~ScopedStatsTracer() {
270       if (!storage_)
271         return;
272       auto delta_ns = base::GetWallTimeNs() - start_ns_;
273       storage_->IncrementStats(key_, delta_ns.count());
274     }
275 
ScopedStatsTracer(ScopedStatsTracer && other)276     ScopedStatsTracer(ScopedStatsTracer&& other) noexcept { MoveImpl(&other); }
277 
278     ScopedStatsTracer& operator=(ScopedStatsTracer&& other) {
279       MoveImpl(&other);
280       return *this;
281     }
282 
283    private:
284     ScopedStatsTracer(const ScopedStatsTracer&) = delete;
285     ScopedStatsTracer& operator=(const ScopedStatsTracer&) = delete;
286 
MoveImpl(ScopedStatsTracer * other)287     void MoveImpl(ScopedStatsTracer* other) {
288       storage_ = other->storage_;
289       key_ = other->key_;
290       start_ns_ = other->start_ns_;
291       other->storage_ = nullptr;
292     }
293 
294     TraceStorage* storage_;
295     size_t key_;
296     base::TimeNanos start_ns_;
297   };
298 
TraceExecutionTimeIntoStats(size_t key)299   ScopedStatsTracer TraceExecutionTimeIntoStats(size_t key) {
300     return ScopedStatsTracer(this, key);
301   }
302 
303   // Reading methods.
304   // Virtual for testing.
GetString(StringId id)305   virtual NullTermStringView GetString(StringId id) const {
306     return string_pool_.Get(id);
307   }
308 
309   // Requests the removal of unused capacity.
310   // Matches the semantics of std::vector::shrink_to_fit.
ShrinkToFitTables()311   void ShrinkToFitTables() {
312     // At the moment, we only bother calling ShrinkToFit on a set group
313     // of tables. If we wanted to extend this to every table, we'd need to deal
314     // with tracking all the tables in the storage: this is not worth doing
315     // given most memory is used by these tables.
316     thread_table_.ShrinkToFit();
317     process_table_.ShrinkToFit();
318     track_table_.ShrinkToFit();
319     counter_table_.ShrinkToFit();
320     slice_table_.ShrinkToFit();
321     raw_table_.ShrinkToFit();
322     sched_slice_table_.ShrinkToFit();
323     thread_state_table_.ShrinkToFit();
324     arg_table_.ShrinkToFit();
325   }
326 
thread_table()327   const tables::ThreadTable& thread_table() const { return thread_table_; }
mutable_thread_table()328   tables::ThreadTable* mutable_thread_table() { return &thread_table_; }
329 
process_table()330   const tables::ProcessTable& process_table() const { return process_table_; }
mutable_process_table()331   tables::ProcessTable* mutable_process_table() { return &process_table_; }
332 
filedescriptor_table()333   const tables::FiledescriptorTable& filedescriptor_table() const {
334     return filedescriptor_table_;
335   }
mutable_filedescriptor_table()336   tables::FiledescriptorTable* mutable_filedescriptor_table() {
337     return &filedescriptor_table_;
338   }
339 
track_table()340   const tables::TrackTable& track_table() const { return track_table_; }
mutable_track_table()341   tables::TrackTable* mutable_track_table() { return &track_table_; }
342 
counter_track_table()343   const tables::CounterTrackTable& counter_track_table() const {
344     return counter_track_table_;
345   }
mutable_counter_track_table()346   tables::CounterTrackTable* mutable_counter_track_table() {
347     return &counter_track_table_;
348   }
349 
cpu_counter_track_table()350   const tables::CpuCounterTrackTable& cpu_counter_track_table() const {
351     return cpu_counter_track_table_;
352   }
mutable_cpu_counter_track_table()353   tables::CpuCounterTrackTable* mutable_cpu_counter_track_table() {
354     return &cpu_counter_track_table_;
355   }
356 
gpu_counter_group_table()357   const tables::GpuCounterGroupTable& gpu_counter_group_table() const {
358     return gpu_counter_group_table_;
359   }
mutable_gpu_counter_group_table()360   tables::GpuCounterGroupTable* mutable_gpu_counter_group_table() {
361     return &gpu_counter_group_table_;
362   }
363 
gpu_counter_track_table()364   const tables::GpuCounterTrackTable& gpu_counter_track_table() const {
365     return gpu_counter_track_table_;
366   }
mutable_gpu_counter_track_table()367   tables::GpuCounterTrackTable* mutable_gpu_counter_track_table() {
368     return &gpu_counter_track_table_;
369   }
370 
energy_counter_track_table()371   const tables::EnergyCounterTrackTable& energy_counter_track_table() const {
372     return energy_counter_track_table_;
373   }
mutable_energy_counter_track_table()374   tables::EnergyCounterTrackTable* mutable_energy_counter_track_table() {
375     return &energy_counter_track_table_;
376   }
377 
uid_counter_track_table()378   const tables::UidCounterTrackTable& uid_counter_track_table() const {
379     return uid_counter_track_table_;
380   }
mutable_uid_counter_track_table()381   tables::UidCounterTrackTable* mutable_uid_counter_track_table() {
382     return &uid_counter_track_table_;
383   }
384 
385   const tables::EnergyPerUidCounterTrackTable&
energy_per_uid_counter_track_table()386   energy_per_uid_counter_track_table() const {
387     return energy_per_uid_counter_track_table_;
388   }
389   tables::EnergyPerUidCounterTrackTable*
mutable_energy_per_uid_counter_track_table()390   mutable_energy_per_uid_counter_track_table() {
391     return &energy_per_uid_counter_track_table_;
392   }
393 
irq_counter_track_table()394   const tables::IrqCounterTrackTable& irq_counter_track_table() const {
395     return irq_counter_track_table_;
396   }
mutable_irq_counter_track_table()397   tables::IrqCounterTrackTable* mutable_irq_counter_track_table() {
398     return &irq_counter_track_table_;
399   }
400 
perf_counter_track_table()401   const tables::PerfCounterTrackTable& perf_counter_track_table() const {
402     return perf_counter_track_table_;
403   }
mutable_perf_counter_track_table()404   tables::PerfCounterTrackTable* mutable_perf_counter_track_table() {
405     return &perf_counter_track_table_;
406   }
407 
process_counter_track_table()408   const tables::ProcessCounterTrackTable& process_counter_track_table() const {
409     return process_counter_track_table_;
410   }
mutable_process_counter_track_table()411   tables::ProcessCounterTrackTable* mutable_process_counter_track_table() {
412     return &process_counter_track_table_;
413   }
414 
process_track_table()415   const tables::ProcessTrackTable& process_track_table() const {
416     return process_track_table_;
417   }
mutable_process_track_table()418   tables::ProcessTrackTable* mutable_process_track_table() {
419     return &process_track_table_;
420   }
421 
thread_track_table()422   const tables::ThreadTrackTable& thread_track_table() const {
423     return thread_track_table_;
424   }
mutable_thread_track_table()425   tables::ThreadTrackTable* mutable_thread_track_table() {
426     return &thread_track_table_;
427   }
428 
thread_state_table()429   const tables::ThreadStateTable& thread_state_table() const {
430     return thread_state_table_;
431   }
mutable_thread_state_table()432   tables::ThreadStateTable* mutable_thread_state_table() {
433     return &thread_state_table_;
434   }
435 
thread_counter_track_table()436   const tables::ThreadCounterTrackTable& thread_counter_track_table() const {
437     return thread_counter_track_table_;
438   }
mutable_thread_counter_track_table()439   tables::ThreadCounterTrackTable* mutable_thread_counter_track_table() {
440     return &thread_counter_track_table_;
441   }
442 
softirq_counter_track_table()443   const tables::SoftirqCounterTrackTable& softirq_counter_track_table() const {
444     return softirq_counter_track_table_;
445   }
mutable_softirq_counter_track_table()446   tables::SoftirqCounterTrackTable* mutable_softirq_counter_track_table() {
447     return &softirq_counter_track_table_;
448   }
449 
sched_slice_table()450   const tables::SchedSliceTable& sched_slice_table() const {
451     return sched_slice_table_;
452   }
mutable_sched_slice_table()453   tables::SchedSliceTable* mutable_sched_slice_table() {
454     return &sched_slice_table_;
455   }
456 
slice_table()457   const tables::SliceTable& slice_table() const { return slice_table_; }
mutable_slice_table()458   tables::SliceTable* mutable_slice_table() { return &slice_table_; }
459 
flow_table()460   const tables::FlowTable& flow_table() const { return flow_table_; }
mutable_flow_table()461   tables::FlowTable* mutable_flow_table() { return &flow_table_; }
462 
virtual_track_slices()463   const VirtualTrackSlices& virtual_track_slices() const {
464     return virtual_track_slices_;
465   }
mutable_virtual_track_slices()466   VirtualTrackSlices* mutable_virtual_track_slices() {
467     return &virtual_track_slices_;
468   }
469 
gpu_slice_table()470   const tables::GpuSliceTable& gpu_slice_table() const {
471     return gpu_slice_table_;
472   }
mutable_gpu_slice_table()473   tables::GpuSliceTable* mutable_gpu_slice_table() { return &gpu_slice_table_; }
474 
counter_table()475   const tables::CounterTable& counter_table() const { return counter_table_; }
mutable_counter_table()476   tables::CounterTable* mutable_counter_table() { return &counter_table_; }
477 
sql_stats()478   const SqlStats& sql_stats() const { return sql_stats_; }
mutable_sql_stats()479   SqlStats* mutable_sql_stats() { return &sql_stats_; }
480 
android_log_table()481   const tables::AndroidLogTable& android_log_table() const {
482     return android_log_table_;
483   }
mutable_android_log_table()484   tables::AndroidLogTable* mutable_android_log_table() {
485     return &android_log_table_;
486   }
487 
android_dumpstate_table()488   const tables::AndroidDumpstateTable& android_dumpstate_table() const {
489     return android_dumpstate_table_;
490   }
491 
mutable_android_dumpstate_table()492   tables::AndroidDumpstateTable* mutable_android_dumpstate_table() {
493     return &android_dumpstate_table_;
494   }
495 
stats()496   const StatsMap& stats() const { return stats_; }
497 
metadata_table()498   const tables::MetadataTable& metadata_table() const {
499     return metadata_table_;
500   }
mutable_metadata_table()501   tables::MetadataTable* mutable_metadata_table() { return &metadata_table_; }
502 
clock_snapshot_table()503   const tables::ClockSnapshotTable& clock_snapshot_table() const {
504     return clock_snapshot_table_;
505   }
mutable_clock_snapshot_table()506   tables::ClockSnapshotTable* mutable_clock_snapshot_table() {
507     return &clock_snapshot_table_;
508   }
509 
arg_table()510   const tables::ArgTable& arg_table() const { return arg_table_; }
mutable_arg_table()511   tables::ArgTable* mutable_arg_table() { return &arg_table_; }
512 
raw_table()513   const tables::RawTable& raw_table() const { return raw_table_; }
mutable_raw_table()514   tables::RawTable* mutable_raw_table() { return &raw_table_; }
515 
ftrace_event_table()516   const tables::FtraceEventTable& ftrace_event_table() const {
517     return ftrace_event_table_;
518   }
mutable_ftrace_event_table()519   tables::FtraceEventTable* mutable_ftrace_event_table() {
520     return &ftrace_event_table_;
521   }
522 
cpu_table()523   const tables::CpuTable& cpu_table() const { return cpu_table_; }
mutable_cpu_table()524   tables::CpuTable* mutable_cpu_table() { return &cpu_table_; }
525 
cpu_freq_table()526   const tables::CpuFreqTable& cpu_freq_table() const { return cpu_freq_table_; }
mutable_cpu_freq_table()527   tables::CpuFreqTable* mutable_cpu_freq_table() { return &cpu_freq_table_; }
528 
stack_profile_mapping_table()529   const tables::StackProfileMappingTable& stack_profile_mapping_table() const {
530     return stack_profile_mapping_table_;
531   }
mutable_stack_profile_mapping_table()532   tables::StackProfileMappingTable* mutable_stack_profile_mapping_table() {
533     return &stack_profile_mapping_table_;
534   }
535 
stack_profile_frame_table()536   const tables::StackProfileFrameTable& stack_profile_frame_table() const {
537     return stack_profile_frame_table_;
538   }
mutable_stack_profile_frame_table()539   tables::StackProfileFrameTable* mutable_stack_profile_frame_table() {
540     return &stack_profile_frame_table_;
541   }
542 
stack_profile_callsite_table()543   const tables::StackProfileCallsiteTable& stack_profile_callsite_table()
544       const {
545     return stack_profile_callsite_table_;
546   }
mutable_stack_profile_callsite_table()547   tables::StackProfileCallsiteTable* mutable_stack_profile_callsite_table() {
548     return &stack_profile_callsite_table_;
549   }
550 
heap_profile_allocation_table()551   const tables::HeapProfileAllocationTable& heap_profile_allocation_table()
552       const {
553     return heap_profile_allocation_table_;
554   }
mutable_heap_profile_allocation_table()555   tables::HeapProfileAllocationTable* mutable_heap_profile_allocation_table() {
556     return &heap_profile_allocation_table_;
557   }
558 
package_list_table()559   const tables::PackageListTable& package_list_table() const {
560     return package_list_table_;
561   }
mutable_package_list_table()562   tables::PackageListTable* mutable_package_list_table() {
563     return &package_list_table_;
564   }
565 
566   const tables::AndroidGameInterventionListTable&
android_game_intervention_list_table()567   android_game_intervention_list_table() const {
568     return android_game_intervention_list_table_;
569   }
570   tables::AndroidGameInterventionListTable*
mutable_android_game_intervenion_list_table()571   mutable_android_game_intervenion_list_table() {
572     return &android_game_intervention_list_table_;
573   }
574 
profiler_smaps_table()575   const tables::ProfilerSmapsTable& profiler_smaps_table() const {
576     return profiler_smaps_table_;
577   }
mutable_profiler_smaps_table()578   tables::ProfilerSmapsTable* mutable_profiler_smaps_table() {
579     return &profiler_smaps_table_;
580   }
581 
stack_sample_table()582   const tables::StackSampleTable& stack_sample_table() const {
583     return stack_sample_table_;
584   }
mutable_stack_sample_table()585   tables::StackSampleTable* mutable_stack_sample_table() {
586     return &stack_sample_table_;
587   }
588 
cpu_profile_stack_sample_table()589   const tables::CpuProfileStackSampleTable& cpu_profile_stack_sample_table()
590       const {
591     return cpu_profile_stack_sample_table_;
592   }
mutable_cpu_profile_stack_sample_table()593   tables::CpuProfileStackSampleTable* mutable_cpu_profile_stack_sample_table() {
594     return &cpu_profile_stack_sample_table_;
595   }
596 
perf_sample_table()597   const tables::PerfSampleTable& perf_sample_table() const {
598     return perf_sample_table_;
599   }
mutable_perf_sample_table()600   tables::PerfSampleTable* mutable_perf_sample_table() {
601     return &perf_sample_table_;
602   }
603 
symbol_table()604   const tables::SymbolTable& symbol_table() const { return symbol_table_; }
605 
mutable_symbol_table()606   tables::SymbolTable* mutable_symbol_table() { return &symbol_table_; }
607 
heap_graph_object_table()608   const tables::HeapGraphObjectTable& heap_graph_object_table() const {
609     return heap_graph_object_table_;
610   }
611 
mutable_heap_graph_object_table()612   tables::HeapGraphObjectTable* mutable_heap_graph_object_table() {
613     return &heap_graph_object_table_;
614   }
heap_graph_class_table()615   const tables::HeapGraphClassTable& heap_graph_class_table() const {
616     return heap_graph_class_table_;
617   }
618 
mutable_heap_graph_class_table()619   tables::HeapGraphClassTable* mutable_heap_graph_class_table() {
620     return &heap_graph_class_table_;
621   }
622 
heap_graph_reference_table()623   const tables::HeapGraphReferenceTable& heap_graph_reference_table() const {
624     return heap_graph_reference_table_;
625   }
626 
mutable_heap_graph_reference_table()627   tables::HeapGraphReferenceTable* mutable_heap_graph_reference_table() {
628     return &heap_graph_reference_table_;
629   }
630 
cpu_track_table()631   const tables::CpuTrackTable& cpu_track_table() const {
632     return cpu_track_table_;
633   }
mutable_cpu_track_table()634   tables::CpuTrackTable* mutable_cpu_track_table() { return &cpu_track_table_; }
635 
gpu_track_table()636   const tables::GpuTrackTable& gpu_track_table() const {
637     return gpu_track_table_;
638   }
mutable_gpu_track_table()639   tables::GpuTrackTable* mutable_gpu_track_table() { return &gpu_track_table_; }
640 
vulkan_memory_allocations_table()641   const tables::VulkanMemoryAllocationsTable& vulkan_memory_allocations_table()
642       const {
643     return vulkan_memory_allocations_table_;
644   }
645 
646   tables::VulkanMemoryAllocationsTable*
mutable_vulkan_memory_allocations_table()647   mutable_vulkan_memory_allocations_table() {
648     return &vulkan_memory_allocations_table_;
649   }
650 
graphics_frame_slice_table()651   const tables::GraphicsFrameSliceTable& graphics_frame_slice_table() const {
652     return graphics_frame_slice_table_;
653   }
654 
mutable_graphics_frame_slice_table()655   tables::GraphicsFrameSliceTable* mutable_graphics_frame_slice_table() {
656     return &graphics_frame_slice_table_;
657   }
658 
memory_snapshot_table()659   const tables::MemorySnapshotTable& memory_snapshot_table() const {
660     return memory_snapshot_table_;
661   }
mutable_memory_snapshot_table()662   tables::MemorySnapshotTable* mutable_memory_snapshot_table() {
663     return &memory_snapshot_table_;
664   }
665 
process_memory_snapshot_table()666   const tables::ProcessMemorySnapshotTable& process_memory_snapshot_table()
667       const {
668     return process_memory_snapshot_table_;
669   }
mutable_process_memory_snapshot_table()670   tables::ProcessMemorySnapshotTable* mutable_process_memory_snapshot_table() {
671     return &process_memory_snapshot_table_;
672   }
673 
memory_snapshot_node_table()674   const tables::MemorySnapshotNodeTable& memory_snapshot_node_table() const {
675     return memory_snapshot_node_table_;
676   }
mutable_memory_snapshot_node_table()677   tables::MemorySnapshotNodeTable* mutable_memory_snapshot_node_table() {
678     return &memory_snapshot_node_table_;
679   }
680 
memory_snapshot_edge_table()681   const tables::MemorySnapshotEdgeTable& memory_snapshot_edge_table() const {
682     return memory_snapshot_edge_table_;
683   }
mutable_memory_snapshot_edge_table()684   tables::MemorySnapshotEdgeTable* mutable_memory_snapshot_edge_table() {
685     return &memory_snapshot_edge_table_;
686   }
687 
688   const tables::ExpectedFrameTimelineSliceTable&
expected_frame_timeline_slice_table()689   expected_frame_timeline_slice_table() const {
690     return expected_frame_timeline_slice_table_;
691   }
692 
693   tables::ExpectedFrameTimelineSliceTable*
mutable_expected_frame_timeline_slice_table()694   mutable_expected_frame_timeline_slice_table() {
695     return &expected_frame_timeline_slice_table_;
696   }
697 
698   const tables::ActualFrameTimelineSliceTable&
actual_frame_timeline_slice_table()699   actual_frame_timeline_slice_table() const {
700     return actual_frame_timeline_slice_table_;
701   }
702   tables::ActualFrameTimelineSliceTable*
mutable_actual_frame_timeline_slice_table()703   mutable_actual_frame_timeline_slice_table() {
704     return &actual_frame_timeline_slice_table_;
705   }
706 
experimental_proto_path_table()707   const tables::ExperimentalProtoPathTable& experimental_proto_path_table()
708       const {
709     return experimental_proto_path_table_;
710   }
mutable_experimental_proto_path_table()711   tables::ExperimentalProtoPathTable* mutable_experimental_proto_path_table() {
712     return &experimental_proto_path_table_;
713   }
714 
715   const tables::ExperimentalProtoContentTable&
experimental_proto_content_table()716   experimental_proto_content_table() const {
717     return experimental_proto_content_table_;
718   }
719   tables::ExperimentalProtoContentTable*
mutable_experimental_proto_content_table()720   mutable_experimental_proto_content_table() {
721     return &experimental_proto_content_table_;
722   }
723 
724   const tables::ExpMissingChromeProcTable&
experimental_missing_chrome_processes_table()725   experimental_missing_chrome_processes_table() const {
726     return experimental_missing_chrome_processes_table_;
727   }
728   tables::ExpMissingChromeProcTable*
mutable_experimental_missing_chrome_processes_table()729   mutable_experimental_missing_chrome_processes_table() {
730     return &experimental_missing_chrome_processes_table_;
731   }
732 
thread_slice_view()733   const views::ThreadSliceView& thread_slice_view() const {
734     return thread_slice_view_;
735   }
736 
string_pool()737   const StringPool& string_pool() const { return string_pool_; }
mutable_string_pool()738   StringPool* mutable_string_pool() { return &string_pool_; }
739 
740   // Number of interned strings in the pool. Includes the empty string w/ ID=0.
string_count()741   size_t string_count() const { return string_pool_.size(); }
742 
743   // Start / end ts (in nanoseconds) across the parsed trace events.
744   // Returns (0, 0) if the trace is empty.
745   std::pair<int64_t, int64_t> GetTraceTimestampBoundsNs() const;
746 
ExtractArg(uint32_t arg_set_id,const char * key,std::optional<Variadic> * result)747   util::Status ExtractArg(uint32_t arg_set_id,
748                           const char* key,
749                           std::optional<Variadic>* result) {
750     const auto& args = arg_table();
751     RowMap filtered = args.FilterToRowMap(
752         {args.arg_set_id().eq(arg_set_id), args.key().eq(key)});
753     if (filtered.empty()) {
754       *result = std::nullopt;
755       return util::OkStatus();
756     }
757     if (filtered.size() > 1) {
758       return util::ErrStatus(
759           "EXTRACT_ARG: received multiple args matching arg set id and key");
760     }
761     uint32_t idx = filtered.Get(0);
762     *result = GetArgValue(idx);
763     return util::OkStatus();
764   }
765 
GetArgValue(uint32_t row)766   Variadic GetArgValue(uint32_t row) const {
767     Variadic v;
768     v.type = *GetVariadicTypeForId(arg_table_.value_type()[row]);
769 
770     // Force initialization of union to stop GCC complaining.
771     v.int_value = 0;
772 
773     switch (v.type) {
774       case Variadic::Type::kBool:
775         v.bool_value = static_cast<bool>(*arg_table_.int_value()[row]);
776         break;
777       case Variadic::Type::kInt:
778         v.int_value = *arg_table_.int_value()[row];
779         break;
780       case Variadic::Type::kUint:
781         v.uint_value = static_cast<uint64_t>(*arg_table_.int_value()[row]);
782         break;
783       case Variadic::Type::kString: {
784         auto opt_value = arg_table_.string_value()[row];
785         v.string_value = opt_value ? *opt_value : kNullStringId;
786         break;
787       }
788       case Variadic::Type::kPointer:
789         v.pointer_value = static_cast<uint64_t>(*arg_table_.int_value()[row]);
790         break;
791       case Variadic::Type::kReal:
792         v.real_value = *arg_table_.real_value()[row];
793         break;
794       case Variadic::Type::kJson: {
795         auto opt_value = arg_table_.string_value()[row];
796         v.json_value = opt_value ? *opt_value : kNullStringId;
797         break;
798       }
799       case Variadic::Type::kNull:
800         break;
801     }
802     return v;
803   }
804 
GetIdForVariadicType(Variadic::Type type)805   StringId GetIdForVariadicType(Variadic::Type type) const {
806     return variadic_type_ids_[type];
807   }
808 
GetVariadicTypeForId(StringId id)809   std::optional<Variadic::Type> GetVariadicTypeForId(StringId id) const {
810     auto it =
811         std::find(variadic_type_ids_.begin(), variadic_type_ids_.end(), id);
812     if (it == variadic_type_ids_.end())
813       return std::nullopt;
814 
815     int64_t idx = std::distance(variadic_type_ids_.begin(), it);
816     return static_cast<Variadic::Type>(idx);
817   }
818 
819  private:
820   using StringHash = uint64_t;
821 
822   TraceStorage(const TraceStorage&) = delete;
823   TraceStorage& operator=(const TraceStorage&) = delete;
824 
825   TraceStorage(TraceStorage&&) = delete;
826   TraceStorage& operator=(TraceStorage&&) = delete;
827 
828   // One entry for each unique string in the trace.
829   StringPool string_pool_;
830 
831   // Stats about parsing the trace.
832   StatsMap stats_{};
833 
834   // Extra data extracted from the trace. Includes:
835   // * metadata from chrome and benchmarking infrastructure
836   // * descriptions of android packages
837   tables::MetadataTable metadata_table_{&string_pool_};
838 
839   // Contains data from all the clock snapshots in the trace.
840   tables::ClockSnapshotTable clock_snapshot_table_{&string_pool_};
841 
842   // Metadata for tracks.
843   tables::TrackTable track_table_{&string_pool_};
844   tables::ThreadStateTable thread_state_table_{&string_pool_};
845   tables::CpuTrackTable cpu_track_table_{&string_pool_, &track_table_};
846   tables::GpuTrackTable gpu_track_table_{&string_pool_, &track_table_};
847   tables::ProcessTrackTable process_track_table_{&string_pool_, &track_table_};
848   tables::ThreadTrackTable thread_track_table_{&string_pool_, &track_table_};
849 
850   // Track tables for counter events.
851   tables::CounterTrackTable counter_track_table_{&string_pool_, &track_table_};
852   tables::ThreadCounterTrackTable thread_counter_track_table_{
853       &string_pool_, &counter_track_table_};
854   tables::ProcessCounterTrackTable process_counter_track_table_{
855       &string_pool_, &counter_track_table_};
856   tables::CpuCounterTrackTable cpu_counter_track_table_{&string_pool_,
857                                                         &counter_track_table_};
858   tables::IrqCounterTrackTable irq_counter_track_table_{&string_pool_,
859                                                         &counter_track_table_};
860   tables::SoftirqCounterTrackTable softirq_counter_track_table_{
861       &string_pool_, &counter_track_table_};
862   tables::GpuCounterTrackTable gpu_counter_track_table_{&string_pool_,
863                                                         &counter_track_table_};
864   tables::EnergyCounterTrackTable energy_counter_track_table_{
865       &string_pool_, &counter_track_table_};
866   tables::UidCounterTrackTable uid_counter_track_table_{&string_pool_,
867                                                         &counter_track_table_};
868   tables::EnergyPerUidCounterTrackTable energy_per_uid_counter_track_table_{
869       &string_pool_, &uid_counter_track_table_};
870   tables::GpuCounterGroupTable gpu_counter_group_table_{&string_pool_};
871   tables::PerfCounterTrackTable perf_counter_track_table_{
872       &string_pool_, &counter_track_table_};
873 
874   // Args for all other tables.
875   tables::ArgTable arg_table_{&string_pool_};
876 
877   // Information about all the threads and processes in the trace.
878   tables::ThreadTable thread_table_{&string_pool_};
879   tables::ProcessTable process_table_{&string_pool_};
880   tables::FiledescriptorTable filedescriptor_table_{&string_pool_};
881 
882   // Slices coming from userspace events (e.g. Chromium TRACE_EVENT macros).
883   tables::SliceTable slice_table_{&string_pool_};
884 
885   // Flow events from userspace events (e.g. Chromium TRACE_EVENT macros).
886   tables::FlowTable flow_table_{&string_pool_};
887 
888   // Slices from CPU scheduling data.
889   tables::SchedSliceTable sched_slice_table_{&string_pool_};
890 
891   // Additional attributes for virtual track slices (sub-type of
892   // NestableSlices).
893   VirtualTrackSlices virtual_track_slices_;
894 
895   // Additional attributes for gpu track slices (sub-type of
896   // NestableSlices).
897   tables::GpuSliceTable gpu_slice_table_{&string_pool_, &slice_table_};
898 
899   // The values from the Counter events from the trace. This includes CPU
900   // frequency events as well systrace trace_marker counter events.
901   tables::CounterTable counter_table_{&string_pool_};
902 
903   SqlStats sql_stats_;
904 
905   tables::RawTable raw_table_{&string_pool_};
906   tables::FtraceEventTable ftrace_event_table_{&string_pool_, &raw_table_};
907 
908   tables::CpuTable cpu_table_{&string_pool_};
909 
910   tables::CpuFreqTable cpu_freq_table_{&string_pool_};
911 
912   tables::AndroidLogTable android_log_table_{&string_pool_};
913 
914   tables::AndroidDumpstateTable android_dumpstate_table_{&string_pool_};
915 
916   tables::StackProfileMappingTable stack_profile_mapping_table_{&string_pool_};
917   tables::StackProfileFrameTable stack_profile_frame_table_{&string_pool_};
918   tables::StackProfileCallsiteTable stack_profile_callsite_table_{
919       &string_pool_};
920   tables::StackSampleTable stack_sample_table_{&string_pool_};
921   tables::HeapProfileAllocationTable heap_profile_allocation_table_{
922       &string_pool_};
923   tables::CpuProfileStackSampleTable cpu_profile_stack_sample_table_{
924       &string_pool_, &stack_sample_table_};
925   tables::PerfSampleTable perf_sample_table_{&string_pool_};
926   tables::PackageListTable package_list_table_{&string_pool_};
927   tables::AndroidGameInterventionListTable
928       android_game_intervention_list_table_{&string_pool_};
929   tables::ProfilerSmapsTable profiler_smaps_table_{&string_pool_};
930 
931   // Symbol tables (mappings from frames to symbol names)
932   tables::SymbolTable symbol_table_{&string_pool_};
933   tables::HeapGraphObjectTable heap_graph_object_table_{&string_pool_};
934   tables::HeapGraphClassTable heap_graph_class_table_{&string_pool_};
935   tables::HeapGraphReferenceTable heap_graph_reference_table_{&string_pool_};
936 
937   tables::VulkanMemoryAllocationsTable vulkan_memory_allocations_table_{
938       &string_pool_};
939 
940   tables::GraphicsFrameSliceTable graphics_frame_slice_table_{&string_pool_,
941                                                               &slice_table_};
942 
943   // Metadata for memory snapshot.
944   tables::MemorySnapshotTable memory_snapshot_table_{&string_pool_};
945   tables::ProcessMemorySnapshotTable process_memory_snapshot_table_{
946       &string_pool_};
947   tables::MemorySnapshotNodeTable memory_snapshot_node_table_{&string_pool_};
948   tables::MemorySnapshotEdgeTable memory_snapshot_edge_table_{&string_pool_};
949 
950   // FrameTimeline tables
951   tables::ExpectedFrameTimelineSliceTable expected_frame_timeline_slice_table_{
952       &string_pool_, &slice_table_};
953   tables::ActualFrameTimelineSliceTable actual_frame_timeline_slice_table_{
954       &string_pool_, &slice_table_};
955 
956   tables::ExperimentalProtoPathTable experimental_proto_path_table_{
957       &string_pool_};
958   tables::ExperimentalProtoContentTable experimental_proto_content_table_{
959       &string_pool_};
960 
961   tables::ExpMissingChromeProcTable
962       experimental_missing_chrome_processes_table_{&string_pool_};
963 
964   views::ThreadSliceView thread_slice_view_{&slice_table_, &thread_track_table_,
965                                             &thread_table_};
966 
967   // The below array allow us to map between enums and their string
968   // representations.
969   std::array<StringId, Variadic::kMaxType + 1> variadic_type_ids_;
970 };
971 
972 }  // namespace trace_processor
973 }  // namespace perfetto
974 
975 template <>
976 struct std::hash<::perfetto::trace_processor::BaseId> {
977   using argument_type = ::perfetto::trace_processor::BaseId;
978   using result_type = size_t;
979 
980   result_type operator()(const argument_type& r) const {
981     return std::hash<uint32_t>{}(r.value);
982   }
983 };
984 
985 template <>
986 struct std::hash<::perfetto::trace_processor::TrackId>
987     : std::hash<::perfetto::trace_processor::BaseId> {};
988 template <>
989 struct std::hash<::perfetto::trace_processor::MappingId>
990     : std::hash<::perfetto::trace_processor::BaseId> {};
991 template <>
992 struct std::hash<::perfetto::trace_processor::CallsiteId>
993     : std::hash<::perfetto::trace_processor::BaseId> {};
994 template <>
995 struct std::hash<::perfetto::trace_processor::FrameId>
996     : std::hash<::perfetto::trace_processor::BaseId> {};
997 template <>
998 struct std::hash<::perfetto::trace_processor::tables::HeapGraphObjectTable::Id>
999     : std::hash<::perfetto::trace_processor::BaseId> {};
1000 
1001 template <>
1002 struct std::hash<
1003     ::perfetto::trace_processor::tables::StackProfileFrameTable::Row> {
1004   using argument_type =
1005       ::perfetto::trace_processor::tables::StackProfileFrameTable::Row;
1006   using result_type = size_t;
1007 
1008   result_type operator()(const argument_type& r) const {
1009     return std::hash<::perfetto::trace_processor::StringId>{}(r.name) ^
1010            std::hash<std::optional<::perfetto::trace_processor::MappingId>>{}(
1011                r.mapping) ^
1012            std::hash<int64_t>{}(r.rel_pc);
1013   }
1014 };
1015 
1016 template <>
1017 struct std::hash<
1018     ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row> {
1019   using argument_type =
1020       ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row;
1021   using result_type = size_t;
1022 
1023   result_type operator()(const argument_type& r) const {
1024     return std::hash<int64_t>{}(r.depth) ^
1025            std::hash<std::optional<::perfetto::trace_processor::CallsiteId>>{}(
1026                r.parent_id) ^
1027            std::hash<::perfetto::trace_processor::FrameId>{}(r.frame_id);
1028   }
1029 };
1030 
1031 template <>
1032 struct std::hash<
1033     ::perfetto::trace_processor::tables::StackProfileMappingTable::Row> {
1034   using argument_type =
1035       ::perfetto::trace_processor::tables::StackProfileMappingTable::Row;
1036   using result_type = size_t;
1037 
1038   result_type operator()(const argument_type& r) const {
1039     return std::hash<::perfetto::trace_processor::StringId>{}(r.build_id) ^
1040            std::hash<int64_t>{}(r.exact_offset) ^
1041            std::hash<int64_t>{}(r.start_offset) ^
1042            std::hash<int64_t>{}(r.start) ^ std::hash<int64_t>{}(r.end) ^
1043            std::hash<int64_t>{}(r.load_bias) ^
1044            std::hash<::perfetto::trace_processor::StringId>{}(r.name);
1045   }
1046 };
1047 
1048 #endif  // SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
1049