• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
18 #define SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
19 
20 #include <array>
21 #include <deque>
22 #include <map>
23 #include <string>
24 #include <unordered_map>
25 #include <utility>
26 #include <vector>
27 
28 #include "perfetto/base/logging.h"
29 #include "perfetto/base/time.h"
30 #include "perfetto/ext/base/hash.h"
31 #include "perfetto/ext/base/optional.h"
32 #include "perfetto/ext/base/string_view.h"
33 #include "perfetto/ext/base/utils.h"
34 #include "perfetto/trace_processor/basic_types.h"
35 #include "src/trace_processor/containers/string_pool.h"
36 #include "src/trace_processor/storage/metadata.h"
37 #include "src/trace_processor/storage/stats.h"
38 #include "src/trace_processor/tables/android_tables.h"
39 #include "src/trace_processor/tables/counter_tables.h"
40 #include "src/trace_processor/tables/metadata_tables.h"
41 #include "src/trace_processor/tables/profiler_tables.h"
42 #include "src/trace_processor/tables/slice_tables.h"
43 #include "src/trace_processor/tables/track_tables.h"
44 #include "src/trace_processor/types/variadic.h"
45 
46 namespace perfetto {
47 namespace trace_processor {
48 
49 // UniquePid is an offset into |unique_processes_|. This is necessary because
50 // Unix pids are reused and thus not guaranteed to be unique over a long
51 // period of time.
52 using UniquePid = uint32_t;
53 
54 // UniqueTid is an offset into |unique_threads_|. Necessary because tids can
55 // be reused.
56 using UniqueTid = uint32_t;
57 
58 // StringId is an offset into |string_pool_|.
59 using StringId = StringPool::Id;
60 static const StringId kNullStringId = StringId::Null();
61 
62 using ArgSetId = uint32_t;
63 static const ArgSetId kInvalidArgSetId = 0;
64 
65 using TrackId = tables::TrackTable::Id;
66 
67 using CounterId = tables::CounterTable::Id;
68 
69 using SliceId = tables::SliceTable::Id;
70 
71 using InstantId = tables::InstantTable::Id;
72 
73 using SchedId = tables::SchedSliceTable::Id;
74 
75 using MappingId = tables::StackProfileMappingTable::Id;
76 
77 using FrameId = tables::StackProfileFrameTable::Id;
78 
79 using SymbolId = tables::SymbolTable::Id;
80 
81 using CallsiteId = tables::StackProfileCallsiteTable::Id;
82 
83 using MetadataId = tables::MetadataTable::Id;
84 
85 using RawId = tables::RawTable::Id;
86 
87 using FlamegraphId = tables::ExperimentalFlamegraphNodesTable::Id;
88 
89 using VulkanAllocId = tables::VulkanMemoryAllocationsTable::Id;
90 
91 // TODO(lalitm): this is a temporary hack while migrating the counters table and
92 // will be removed when the migration is complete.
93 static const TrackId kInvalidTrackId =
94     TrackId(std::numeric_limits<TrackId>::max());
95 
96 enum class RefType {
97   kRefNoRef = 0,
98   kRefUtid = 1,
99   kRefCpuId = 2,
100   kRefIrq = 3,
101   kRefSoftIrq = 4,
102   kRefUpid = 5,
103   kRefGpuId = 6,
104   kRefTrack = 7,
105   kRefMax
106 };
107 
108 const std::vector<NullTermStringView>& GetRefTypeStringMap();
109 
110 // Stores a data inside a trace file in a columnar form. This makes it efficient
111 // to read or search across a single field of the trace (e.g. all the thread
112 // names for a given CPU).
113 class TraceStorage {
114  public:
115   TraceStorage(const Config& = Config());
116 
117   virtual ~TraceStorage();
118 
119   class ThreadSlices {
120    public:
AddThreadSlice(uint32_t slice_id,int64_t thread_timestamp_ns,int64_t thread_duration_ns,int64_t thread_instruction_count,int64_t thread_instruction_delta)121     inline uint32_t AddThreadSlice(uint32_t slice_id,
122                                    int64_t thread_timestamp_ns,
123                                    int64_t thread_duration_ns,
124                                    int64_t thread_instruction_count,
125                                    int64_t thread_instruction_delta) {
126       slice_ids_.emplace_back(slice_id);
127       thread_timestamp_ns_.emplace_back(thread_timestamp_ns);
128       thread_duration_ns_.emplace_back(thread_duration_ns);
129       thread_instruction_counts_.emplace_back(thread_instruction_count);
130       thread_instruction_deltas_.emplace_back(thread_instruction_delta);
131       return slice_count() - 1;
132     }
133 
slice_count()134     uint32_t slice_count() const {
135       return static_cast<uint32_t>(slice_ids_.size());
136     }
137 
slice_ids()138     const std::deque<uint32_t>& slice_ids() const { return slice_ids_; }
thread_timestamp_ns()139     const std::deque<int64_t>& thread_timestamp_ns() const {
140       return thread_timestamp_ns_;
141     }
thread_duration_ns()142     const std::deque<int64_t>& thread_duration_ns() const {
143       return thread_duration_ns_;
144     }
thread_instruction_counts()145     const std::deque<int64_t>& thread_instruction_counts() const {
146       return thread_instruction_counts_;
147     }
thread_instruction_deltas()148     const std::deque<int64_t>& thread_instruction_deltas() const {
149       return thread_instruction_deltas_;
150     }
151 
FindRowForSliceId(uint32_t slice_id)152     base::Optional<uint32_t> FindRowForSliceId(uint32_t slice_id) const {
153       auto it =
154           std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id);
155       if (it != slice_ids().end() && *it == slice_id) {
156         return static_cast<uint32_t>(std::distance(slice_ids().begin(), it));
157       }
158       return base::nullopt;
159     }
160 
UpdateThreadDeltasForSliceId(uint32_t slice_id,int64_t end_thread_timestamp_ns,int64_t end_thread_instruction_count)161     void UpdateThreadDeltasForSliceId(uint32_t slice_id,
162                                       int64_t end_thread_timestamp_ns,
163                                       int64_t end_thread_instruction_count) {
164       uint32_t row = *FindRowForSliceId(slice_id);
165       int64_t begin_ns = thread_timestamp_ns_[row];
166       thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns;
167       int64_t begin_ticount = thread_instruction_counts_[row];
168       thread_instruction_deltas_[row] =
169           end_thread_instruction_count - begin_ticount;
170     }
171 
172    private:
173     std::deque<uint32_t> slice_ids_;
174     std::deque<int64_t> thread_timestamp_ns_;
175     std::deque<int64_t> thread_duration_ns_;
176     std::deque<int64_t> thread_instruction_counts_;
177     std::deque<int64_t> thread_instruction_deltas_;
178   };
179 
180   class VirtualTrackSlices {
181    public:
AddVirtualTrackSlice(uint32_t slice_id,int64_t thread_timestamp_ns,int64_t thread_duration_ns,int64_t thread_instruction_count,int64_t thread_instruction_delta)182     inline uint32_t AddVirtualTrackSlice(uint32_t slice_id,
183                                          int64_t thread_timestamp_ns,
184                                          int64_t thread_duration_ns,
185                                          int64_t thread_instruction_count,
186                                          int64_t thread_instruction_delta) {
187       slice_ids_.emplace_back(slice_id);
188       thread_timestamp_ns_.emplace_back(thread_timestamp_ns);
189       thread_duration_ns_.emplace_back(thread_duration_ns);
190       thread_instruction_counts_.emplace_back(thread_instruction_count);
191       thread_instruction_deltas_.emplace_back(thread_instruction_delta);
192       return slice_count() - 1;
193     }
194 
slice_count()195     uint32_t slice_count() const {
196       return static_cast<uint32_t>(slice_ids_.size());
197     }
198 
slice_ids()199     const std::deque<uint32_t>& slice_ids() const { return slice_ids_; }
thread_timestamp_ns()200     const std::deque<int64_t>& thread_timestamp_ns() const {
201       return thread_timestamp_ns_;
202     }
thread_duration_ns()203     const std::deque<int64_t>& thread_duration_ns() const {
204       return thread_duration_ns_;
205     }
thread_instruction_counts()206     const std::deque<int64_t>& thread_instruction_counts() const {
207       return thread_instruction_counts_;
208     }
thread_instruction_deltas()209     const std::deque<int64_t>& thread_instruction_deltas() const {
210       return thread_instruction_deltas_;
211     }
212 
FindRowForSliceId(uint32_t slice_id)213     base::Optional<uint32_t> FindRowForSliceId(uint32_t slice_id) const {
214       auto it =
215           std::lower_bound(slice_ids().begin(), slice_ids().end(), slice_id);
216       if (it != slice_ids().end() && *it == slice_id) {
217         return static_cast<uint32_t>(std::distance(slice_ids().begin(), it));
218       }
219       return base::nullopt;
220     }
221 
UpdateThreadDeltasForSliceId(uint32_t slice_id,int64_t end_thread_timestamp_ns,int64_t end_thread_instruction_count)222     void UpdateThreadDeltasForSliceId(uint32_t slice_id,
223                                       int64_t end_thread_timestamp_ns,
224                                       int64_t end_thread_instruction_count) {
225       uint32_t row = *FindRowForSliceId(slice_id);
226       int64_t begin_ns = thread_timestamp_ns_[row];
227       thread_duration_ns_[row] = end_thread_timestamp_ns - begin_ns;
228       int64_t begin_ticount = thread_instruction_counts_[row];
229       thread_instruction_deltas_[row] =
230           end_thread_instruction_count - begin_ticount;
231     }
232 
233    private:
234     std::deque<uint32_t> slice_ids_;
235     std::deque<int64_t> thread_timestamp_ns_;
236     std::deque<int64_t> thread_duration_ns_;
237     std::deque<int64_t> thread_instruction_counts_;
238     std::deque<int64_t> thread_instruction_deltas_;
239   };
240 
241   class SqlStats {
242    public:
243     static constexpr size_t kMaxLogEntries = 100;
244     uint32_t RecordQueryBegin(const std::string& query,
245                               int64_t time_queued,
246                               int64_t time_started);
247     void RecordQueryFirstNext(uint32_t row, int64_t time_first_next);
248     void RecordQueryEnd(uint32_t row, int64_t time_end);
size()249     size_t size() const { return queries_.size(); }
queries()250     const std::deque<std::string>& queries() const { return queries_; }
times_queued()251     const std::deque<int64_t>& times_queued() const { return times_queued_; }
times_started()252     const std::deque<int64_t>& times_started() const { return times_started_; }
times_first_next()253     const std::deque<int64_t>& times_first_next() const {
254       return times_first_next_;
255     }
times_ended()256     const std::deque<int64_t>& times_ended() const { return times_ended_; }
257 
258    private:
259     uint32_t popped_queries_ = 0;
260 
261     std::deque<std::string> queries_;
262     std::deque<int64_t> times_queued_;
263     std::deque<int64_t> times_started_;
264     std::deque<int64_t> times_first_next_;
265     std::deque<int64_t> times_ended_;
266   };
267 
268   struct Stats {
269     using IndexMap = std::map<int, int64_t>;
270     int64_t value = 0;
271     IndexMap indexed_values;
272   };
273   using StatsMap = std::array<Stats, stats::kNumKeys>;
274 
275   // Return an unqiue identifier for the contents of each string.
276   // The string is copied internally and can be destroyed after this called.
277   // Virtual for testing.
InternString(base::StringView str)278   virtual StringId InternString(base::StringView str) {
279     return string_pool_.InternString(str);
280   }
281 
282   // Example usage: SetStats(stats::android_log_num_failed, 42);
SetStats(size_t key,int64_t value)283   void SetStats(size_t key, int64_t value) {
284     PERFETTO_DCHECK(key < stats::kNumKeys);
285     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
286     stats_[key].value = value;
287   }
288 
289   // Example usage: IncrementStats(stats::android_log_num_failed, -1);
290   void IncrementStats(size_t key, int64_t increment = 1) {
291     PERFETTO_DCHECK(key < stats::kNumKeys);
292     PERFETTO_DCHECK(stats::kTypes[key] == stats::kSingle);
293     stats_[key].value += increment;
294   }
295 
296   // Example usage: IncrementIndexedStats(stats::cpu_failure, 1);
297   void IncrementIndexedStats(size_t key, int index, int64_t increment = 1) {
298     PERFETTO_DCHECK(key < stats::kNumKeys);
299     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
300     stats_[key].indexed_values[index] += increment;
301   }
302 
303   // Example usage: SetIndexedStats(stats::cpu_failure, 1, 42);
SetIndexedStats(size_t key,int index,int64_t value)304   void SetIndexedStats(size_t key, int index, int64_t value) {
305     PERFETTO_DCHECK(key < stats::kNumKeys);
306     PERFETTO_DCHECK(stats::kTypes[key] == stats::kIndexed);
307     stats_[key].indexed_values[index] = value;
308   }
309 
310   class ScopedStatsTracer {
311    public:
ScopedStatsTracer(TraceStorage * storage,size_t key)312     ScopedStatsTracer(TraceStorage* storage, size_t key)
313         : storage_(storage), key_(key), start_ns_(base::GetWallTimeNs()) {}
314 
~ScopedStatsTracer()315     ~ScopedStatsTracer() {
316       if (!storage_)
317         return;
318       auto delta_ns = base::GetWallTimeNs() - start_ns_;
319       storage_->IncrementStats(key_, delta_ns.count());
320     }
321 
ScopedStatsTracer(ScopedStatsTracer && other)322     ScopedStatsTracer(ScopedStatsTracer&& other) noexcept { MoveImpl(&other); }
323 
324     ScopedStatsTracer& operator=(ScopedStatsTracer&& other) {
325       MoveImpl(&other);
326       return *this;
327     }
328 
329    private:
330     ScopedStatsTracer(const ScopedStatsTracer&) = delete;
331     ScopedStatsTracer& operator=(const ScopedStatsTracer&) = delete;
332 
MoveImpl(ScopedStatsTracer * other)333     void MoveImpl(ScopedStatsTracer* other) {
334       storage_ = other->storage_;
335       key_ = other->key_;
336       start_ns_ = other->start_ns_;
337       other->storage_ = nullptr;
338     }
339 
340     TraceStorage* storage_;
341     size_t key_;
342     base::TimeNanos start_ns_;
343   };
344 
TraceExecutionTimeIntoStats(size_t key)345   ScopedStatsTracer TraceExecutionTimeIntoStats(size_t key) {
346     return ScopedStatsTracer(this, key);
347   }
348 
349   // Reading methods.
350   // Virtual for testing.
GetString(StringId id)351   virtual NullTermStringView GetString(StringId id) const {
352     return string_pool_.Get(id);
353   }
354 
thread_table()355   const tables::ThreadTable& thread_table() const { return thread_table_; }
mutable_thread_table()356   tables::ThreadTable* mutable_thread_table() { return &thread_table_; }
357 
process_table()358   const tables::ProcessTable& process_table() const { return process_table_; }
mutable_process_table()359   tables::ProcessTable* mutable_process_table() { return &process_table_; }
360 
track_table()361   const tables::TrackTable& track_table() const { return track_table_; }
mutable_track_table()362   tables::TrackTable* mutable_track_table() { return &track_table_; }
363 
process_track_table()364   const tables::ProcessTrackTable& process_track_table() const {
365     return process_track_table_;
366   }
mutable_process_track_table()367   tables::ProcessTrackTable* mutable_process_track_table() {
368     return &process_track_table_;
369   }
370 
thread_track_table()371   const tables::ThreadTrackTable& thread_track_table() const {
372     return thread_track_table_;
373   }
mutable_thread_track_table()374   tables::ThreadTrackTable* mutable_thread_track_table() {
375     return &thread_track_table_;
376   }
377 
counter_track_table()378   const tables::CounterTrackTable& counter_track_table() const {
379     return counter_track_table_;
380   }
mutable_counter_track_table()381   tables::CounterTrackTable* mutable_counter_track_table() {
382     return &counter_track_table_;
383   }
384 
thread_counter_track_table()385   const tables::ThreadCounterTrackTable& thread_counter_track_table() const {
386     return thread_counter_track_table_;
387   }
mutable_thread_counter_track_table()388   tables::ThreadCounterTrackTable* mutable_thread_counter_track_table() {
389     return &thread_counter_track_table_;
390   }
391 
process_counter_track_table()392   const tables::ProcessCounterTrackTable& process_counter_track_table() const {
393     return process_counter_track_table_;
394   }
mutable_process_counter_track_table()395   tables::ProcessCounterTrackTable* mutable_process_counter_track_table() {
396     return &process_counter_track_table_;
397   }
398 
cpu_counter_track_table()399   const tables::CpuCounterTrackTable& cpu_counter_track_table() const {
400     return cpu_counter_track_table_;
401   }
mutable_cpu_counter_track_table()402   tables::CpuCounterTrackTable* mutable_cpu_counter_track_table() {
403     return &cpu_counter_track_table_;
404   }
405 
irq_counter_track_table()406   const tables::IrqCounterTrackTable& irq_counter_track_table() const {
407     return irq_counter_track_table_;
408   }
mutable_irq_counter_track_table()409   tables::IrqCounterTrackTable* mutable_irq_counter_track_table() {
410     return &irq_counter_track_table_;
411   }
412 
softirq_counter_track_table()413   const tables::SoftirqCounterTrackTable& softirq_counter_track_table() const {
414     return softirq_counter_track_table_;
415   }
mutable_softirq_counter_track_table()416   tables::SoftirqCounterTrackTable* mutable_softirq_counter_track_table() {
417     return &softirq_counter_track_table_;
418   }
419 
gpu_counter_track_table()420   const tables::GpuCounterTrackTable& gpu_counter_track_table() const {
421     return gpu_counter_track_table_;
422   }
mutable_gpu_counter_track_table()423   tables::GpuCounterTrackTable* mutable_gpu_counter_track_table() {
424     return &gpu_counter_track_table_;
425   }
426 
gpu_counter_group_table()427   const tables::GpuCounterGroupTable& gpu_counter_group_table() const {
428     return gpu_counter_group_table_;
429   }
mutable_gpu_counter_group_table()430   tables::GpuCounterGroupTable* mutable_gpu_counter_group_table() {
431     return &gpu_counter_group_table_;
432   }
433 
sched_slice_table()434   const tables::SchedSliceTable& sched_slice_table() const {
435     return sched_slice_table_;
436   }
mutable_sched_slice_table()437   tables::SchedSliceTable* mutable_sched_slice_table() {
438     return &sched_slice_table_;
439   }
440 
slice_table()441   const tables::SliceTable& slice_table() const { return slice_table_; }
mutable_slice_table()442   tables::SliceTable* mutable_slice_table() { return &slice_table_; }
443 
thread_slices()444   const ThreadSlices& thread_slices() const { return thread_slices_; }
mutable_thread_slices()445   ThreadSlices* mutable_thread_slices() { return &thread_slices_; }
446 
virtual_track_slices()447   const VirtualTrackSlices& virtual_track_slices() const {
448     return virtual_track_slices_;
449   }
mutable_virtual_track_slices()450   VirtualTrackSlices* mutable_virtual_track_slices() {
451     return &virtual_track_slices_;
452   }
453 
gpu_slice_table()454   const tables::GpuSliceTable& gpu_slice_table() const {
455     return gpu_slice_table_;
456   }
mutable_gpu_slice_table()457   tables::GpuSliceTable* mutable_gpu_slice_table() { return &gpu_slice_table_; }
458 
counter_table()459   const tables::CounterTable& counter_table() const { return counter_table_; }
mutable_counter_table()460   tables::CounterTable* mutable_counter_table() { return &counter_table_; }
461 
sql_stats()462   const SqlStats& sql_stats() const { return sql_stats_; }
mutable_sql_stats()463   SqlStats* mutable_sql_stats() { return &sql_stats_; }
464 
instant_table()465   const tables::InstantTable& instant_table() const { return instant_table_; }
mutable_instant_table()466   tables::InstantTable* mutable_instant_table() { return &instant_table_; }
467 
android_log_table()468   const tables::AndroidLogTable& android_log_table() const {
469     return android_log_table_;
470   }
mutable_android_log_table()471   tables::AndroidLogTable* mutable_android_log_table() {
472     return &android_log_table_;
473   }
474 
stats()475   const StatsMap& stats() const { return stats_; }
476 
metadata_table()477   const tables::MetadataTable& metadata_table() const {
478     return metadata_table_;
479   }
mutable_metadata_table()480   tables::MetadataTable* mutable_metadata_table() { return &metadata_table_; }
481 
arg_table()482   const tables::ArgTable& arg_table() const { return arg_table_; }
mutable_arg_table()483   tables::ArgTable* mutable_arg_table() { return &arg_table_; }
484 
raw_table()485   const tables::RawTable& raw_table() const { return raw_table_; }
mutable_raw_table()486   tables::RawTable* mutable_raw_table() { return &raw_table_; }
487 
cpu_table()488   const tables::CpuTable& cpu_table() const { return cpu_table_; }
mutable_cpu_table()489   tables::CpuTable* mutable_cpu_table() { return &cpu_table_; }
490 
cpu_freq_table()491   const tables::CpuFreqTable& cpu_freq_table() const { return cpu_freq_table_; }
mutable_cpu_freq_table()492   tables::CpuFreqTable* mutable_cpu_freq_table() { return &cpu_freq_table_; }
493 
stack_profile_mapping_table()494   const tables::StackProfileMappingTable& stack_profile_mapping_table() const {
495     return stack_profile_mapping_table_;
496   }
mutable_stack_profile_mapping_table()497   tables::StackProfileMappingTable* mutable_stack_profile_mapping_table() {
498     return &stack_profile_mapping_table_;
499   }
500 
stack_profile_frame_table()501   const tables::StackProfileFrameTable& stack_profile_frame_table() const {
502     return stack_profile_frame_table_;
503   }
mutable_stack_profile_frame_table()504   tables::StackProfileFrameTable* mutable_stack_profile_frame_table() {
505     return &stack_profile_frame_table_;
506   }
507 
stack_profile_callsite_table()508   const tables::StackProfileCallsiteTable& stack_profile_callsite_table()
509       const {
510     return stack_profile_callsite_table_;
511   }
mutable_stack_profile_callsite_table()512   tables::StackProfileCallsiteTable* mutable_stack_profile_callsite_table() {
513     return &stack_profile_callsite_table_;
514   }
515 
heap_profile_allocation_table()516   const tables::HeapProfileAllocationTable& heap_profile_allocation_table()
517       const {
518     return heap_profile_allocation_table_;
519   }
mutable_heap_profile_allocation_table()520   tables::HeapProfileAllocationTable* mutable_heap_profile_allocation_table() {
521     return &heap_profile_allocation_table_;
522   }
523 
package_list_table()524   const tables::PackageListTable& package_list_table() const {
525     return package_list_table_;
526   }
mutable_package_list_table()527   tables::PackageListTable* mutable_package_list_table() {
528     return &package_list_table_;
529   }
530 
profiler_smaps_table()531   const tables::ProfilerSmapsTable& profiler_smaps_table() const {
532     return profiler_smaps_table_;
533   }
mutable_profiler_smaps_table()534   tables::ProfilerSmapsTable* mutable_profiler_smaps_table() {
535     return &profiler_smaps_table_;
536   }
537 
cpu_profile_stack_sample_table()538   const tables::CpuProfileStackSampleTable& cpu_profile_stack_sample_table()
539       const {
540     return cpu_profile_stack_sample_table_;
541   }
mutable_cpu_profile_stack_sample_table()542   tables::CpuProfileStackSampleTable* mutable_cpu_profile_stack_sample_table() {
543     return &cpu_profile_stack_sample_table_;
544   }
545 
symbol_table()546   const tables::SymbolTable& symbol_table() const { return symbol_table_; }
547 
mutable_symbol_table()548   tables::SymbolTable* mutable_symbol_table() { return &symbol_table_; }
549 
heap_graph_object_table()550   const tables::HeapGraphObjectTable& heap_graph_object_table() const {
551     return heap_graph_object_table_;
552   }
553 
mutable_heap_graph_object_table()554   tables::HeapGraphObjectTable* mutable_heap_graph_object_table() {
555     return &heap_graph_object_table_;
556   }
heap_graph_class_table()557   const tables::HeapGraphClassTable& heap_graph_class_table() const {
558     return heap_graph_class_table_;
559   }
560 
mutable_heap_graph_class_table()561   tables::HeapGraphClassTable* mutable_heap_graph_class_table() {
562     return &heap_graph_class_table_;
563   }
564 
heap_graph_reference_table()565   const tables::HeapGraphReferenceTable& heap_graph_reference_table() const {
566     return heap_graph_reference_table_;
567   }
568 
mutable_heap_graph_reference_table()569   tables::HeapGraphReferenceTable* mutable_heap_graph_reference_table() {
570     return &heap_graph_reference_table_;
571   }
572 
gpu_track_table()573   const tables::GpuTrackTable& gpu_track_table() const {
574     return gpu_track_table_;
575   }
mutable_gpu_track_table()576   tables::GpuTrackTable* mutable_gpu_track_table() { return &gpu_track_table_; }
577 
vulkan_memory_allocations_table()578   const tables::VulkanMemoryAllocationsTable& vulkan_memory_allocations_table()
579       const {
580     return vulkan_memory_allocations_table_;
581   }
582 
583   tables::VulkanMemoryAllocationsTable*
mutable_vulkan_memory_allocations_table()584   mutable_vulkan_memory_allocations_table() {
585     return &vulkan_memory_allocations_table_;
586   }
587 
graphics_frame_slice_table()588   const tables::GraphicsFrameSliceTable& graphics_frame_slice_table() const {
589     return graphics_frame_slice_table_;
590   }
591 
mutable_graphics_frame_slice_table()592   tables::GraphicsFrameSliceTable* mutable_graphics_frame_slice_table() {
593     return &graphics_frame_slice_table_;
594   }
595 
string_pool()596   const StringPool& string_pool() const { return string_pool_; }
mutable_string_pool()597   StringPool* mutable_string_pool() { return &string_pool_; }
598 
599   // Number of interned strings in the pool. Includes the empty string w/ ID=0.
string_count()600   size_t string_count() const { return string_pool_.size(); }
601 
602   // Start / end ts (in nanoseconds) across the parsed trace events.
603   // Returns (0, 0) if the trace is empty.
604   std::pair<int64_t, int64_t> GetTraceTimestampBoundsNs() const;
605 
606   // TODO(lalitm): remove this when we have a better home.
FindMappingRow(StringId name,StringId build_id)607   std::vector<MappingId> FindMappingRow(StringId name,
608                                         StringId build_id) const {
609     auto it = stack_profile_mapping_index_.find(std::make_pair(name, build_id));
610     if (it == stack_profile_mapping_index_.end())
611       return {};
612     return it->second;
613   }
614 
615   // TODO(lalitm): remove this when we have a better home.
InsertMappingId(StringId name,StringId build_id,MappingId row)616   void InsertMappingId(StringId name, StringId build_id, MappingId row) {
617     auto pair = std::make_pair(name, build_id);
618     stack_profile_mapping_index_[pair].emplace_back(row);
619   }
620 
621   // TODO(lalitm): remove this when we have a better home.
FindFrameIds(MappingId mapping_row,uint64_t rel_pc)622   std::vector<FrameId> FindFrameIds(MappingId mapping_row,
623                                     uint64_t rel_pc) const {
624     auto it =
625         stack_profile_frame_index_.find(std::make_pair(mapping_row, rel_pc));
626     if (it == stack_profile_frame_index_.end())
627       return {};
628     return it->second;
629   }
630 
631   // TODO(lalitm): remove this when we have a better home.
InsertFrameRow(MappingId mapping_row,uint64_t rel_pc,FrameId row)632   void InsertFrameRow(MappingId mapping_row, uint64_t rel_pc, FrameId row) {
633     auto pair = std::make_pair(mapping_row, rel_pc);
634     stack_profile_frame_index_[pair].emplace_back(row);
635   }
636 
GetArgValue(uint32_t row)637   Variadic GetArgValue(uint32_t row) const {
638     Variadic v;
639     v.type = *GetVariadicTypeForId(arg_table_.value_type()[row]);
640 
641     // Force initialization of union to stop GCC complaining.
642     v.int_value = 0;
643 
644     switch (v.type) {
645       case Variadic::Type::kBool:
646         v.bool_value = static_cast<bool>(*arg_table_.int_value()[row]);
647         break;
648       case Variadic::Type::kInt:
649         v.int_value = *arg_table_.int_value()[row];
650         break;
651       case Variadic::Type::kUint:
652         v.uint_value = static_cast<uint64_t>(*arg_table_.int_value()[row]);
653         break;
654       case Variadic::Type::kString: {
655         auto opt_value = arg_table_.string_value()[row];
656         v.string_value = opt_value ? *opt_value : kNullStringId;
657         break;
658       }
659       case Variadic::Type::kPointer:
660         v.pointer_value = static_cast<uint64_t>(*arg_table_.int_value()[row]);
661         break;
662       case Variadic::Type::kReal:
663         v.real_value = *arg_table_.real_value()[row];
664         break;
665       case Variadic::Type::kJson: {
666         auto opt_value = arg_table_.string_value()[row];
667         v.json_value = opt_value ? *opt_value : kNullStringId;
668         break;
669       }
670     }
671     return v;
672   }
673 
GetIdForVariadicType(Variadic::Type type)674   StringId GetIdForVariadicType(Variadic::Type type) const {
675     return variadic_type_ids_[type];
676   }
677 
GetVariadicTypeForId(StringId id)678   base::Optional<Variadic::Type> GetVariadicTypeForId(StringId id) const {
679     auto it =
680         std::find(variadic_type_ids_.begin(), variadic_type_ids_.end(), id);
681     if (it == variadic_type_ids_.end())
682       return base::nullopt;
683 
684     int64_t idx = std::distance(variadic_type_ids_.begin(), it);
685     return static_cast<Variadic::Type>(idx);
686   }
687 
688  private:
689   using StringHash = uint64_t;
690 
691   TraceStorage(const TraceStorage&) = delete;
692   TraceStorage& operator=(const TraceStorage&) = delete;
693 
694   TraceStorage(TraceStorage&&) = delete;
695   TraceStorage& operator=(TraceStorage&&) = delete;
696 
697   // TODO(lalitm): remove this when we find a better home for this.
698   using MappingKey = std::pair<StringId /* name */, StringId /* build id */>;
699   std::map<MappingKey, std::vector<MappingId>> stack_profile_mapping_index_;
700 
701   // TODO(lalitm): remove this when we find a better home for this.
702   using FrameKey = std::pair<MappingId, uint64_t /* rel_pc */>;
703   std::map<FrameKey, std::vector<FrameId>> stack_profile_frame_index_;
704 
705   // One entry for each unique string in the trace.
706   StringPool string_pool_;
707 
708   // Stats about parsing the trace.
709   StatsMap stats_{};
710 
711   // Extra data extracted from the trace. Includes:
712   // * metadata from chrome and benchmarking infrastructure
713   // * descriptions of android packages
714   tables::MetadataTable metadata_table_{&string_pool_, nullptr};
715 
716   // Metadata for tracks.
717   tables::TrackTable track_table_{&string_pool_, nullptr};
718   tables::GpuTrackTable gpu_track_table_{&string_pool_, &track_table_};
719   tables::ProcessTrackTable process_track_table_{&string_pool_, &track_table_};
720   tables::ThreadTrackTable thread_track_table_{&string_pool_, &track_table_};
721 
722   // Track tables for counter events.
723   tables::CounterTrackTable counter_track_table_{&string_pool_, &track_table_};
724   tables::ThreadCounterTrackTable thread_counter_track_table_{
725       &string_pool_, &counter_track_table_};
726   tables::ProcessCounterTrackTable process_counter_track_table_{
727       &string_pool_, &counter_track_table_};
728   tables::CpuCounterTrackTable cpu_counter_track_table_{&string_pool_,
729                                                         &counter_track_table_};
730   tables::IrqCounterTrackTable irq_counter_track_table_{&string_pool_,
731                                                         &counter_track_table_};
732   tables::SoftirqCounterTrackTable softirq_counter_track_table_{
733       &string_pool_, &counter_track_table_};
734   tables::GpuCounterTrackTable gpu_counter_track_table_{&string_pool_,
735                                                         &counter_track_table_};
736   tables::GpuCounterGroupTable gpu_counter_group_table_{&string_pool_, nullptr};
737 
738   // Args for all other tables.
739   tables::ArgTable arg_table_{&string_pool_, nullptr};
740 
741   // Information about all the threads and processes in the trace.
742   tables::ThreadTable thread_table_{&string_pool_, nullptr};
743   tables::ProcessTable process_table_{&string_pool_, nullptr};
744 
745   // Slices coming from userspace events (e.g. Chromium TRACE_EVENT macros).
746   tables::SliceTable slice_table_{&string_pool_, nullptr};
747 
748   // Slices from CPU scheduling data.
749   tables::SchedSliceTable sched_slice_table_{&string_pool_, nullptr};
750 
751   // Additional attributes for threads slices (sub-type of NestableSlices).
752   ThreadSlices thread_slices_;
753 
754   // Additional attributes for virtual track slices (sub-type of
755   // NestableSlices).
756   VirtualTrackSlices virtual_track_slices_;
757 
758   // Additional attributes for gpu track slices (sub-type of
759   // NestableSlices).
760   tables::GpuSliceTable gpu_slice_table_{&string_pool_, &slice_table_};
761 
762   // The values from the Counter events from the trace. This includes CPU
763   // frequency events as well systrace trace_marker counter events.
764   tables::CounterTable counter_table_{&string_pool_, nullptr};
765 
766   SqlStats sql_stats_;
767 
768   // These are instantaneous events in the trace. They have no duration
769   // and do not have a value that make sense to track over time.
770   // e.g. signal events
771   tables::InstantTable instant_table_{&string_pool_, nullptr};
772 
773   // Raw events are every ftrace event in the trace. The raw event includes
774   // the timestamp and the pid. The args for the raw event will be in the
775   // args table. This table can be used to generate a text version of the
776   // trace.
777   tables::RawTable raw_table_{&string_pool_, nullptr};
778 
779   tables::CpuTable cpu_table_{&string_pool_, nullptr};
780 
781   tables::CpuFreqTable cpu_freq_table_{&string_pool_, nullptr};
782 
783   tables::AndroidLogTable android_log_table_{&string_pool_, nullptr};
784 
785   tables::StackProfileMappingTable stack_profile_mapping_table_{&string_pool_,
786                                                                 nullptr};
787   tables::StackProfileFrameTable stack_profile_frame_table_{&string_pool_,
788                                                             nullptr};
789   tables::StackProfileCallsiteTable stack_profile_callsite_table_{&string_pool_,
790                                                                   nullptr};
791   tables::HeapProfileAllocationTable heap_profile_allocation_table_{
792       &string_pool_, nullptr};
793   tables::CpuProfileStackSampleTable cpu_profile_stack_sample_table_{
794       &string_pool_, nullptr};
795   tables::PackageListTable package_list_table_{&string_pool_, nullptr};
796   tables::ProfilerSmapsTable profiler_smaps_table_{&string_pool_, nullptr};
797 
798   // Symbol tables (mappings from frames to symbol names)
799   tables::SymbolTable symbol_table_{&string_pool_, nullptr};
800   tables::HeapGraphObjectTable heap_graph_object_table_{&string_pool_, nullptr};
801   tables::HeapGraphClassTable heap_graph_class_table_{&string_pool_, nullptr};
802   tables::HeapGraphReferenceTable heap_graph_reference_table_{&string_pool_,
803                                                               nullptr};
804 
805   tables::VulkanMemoryAllocationsTable vulkan_memory_allocations_table_{
806       &string_pool_, nullptr};
807 
808   tables::GraphicsFrameSliceTable graphics_frame_slice_table_{&string_pool_,
809                                                               &slice_table_};
810 
811   // The below array allow us to map between enums and their string
812   // representations.
813   std::array<StringId, Variadic::kMaxType + 1> variadic_type_ids_;
814 };
815 
816 }  // namespace trace_processor
817 }  // namespace perfetto
818 
819 namespace std {
820 
821 template <>
822 struct hash<::perfetto::trace_processor::BaseId> {
823   using argument_type = ::perfetto::trace_processor::BaseId;
824   using result_type = size_t;
825 
826   result_type operator()(const argument_type& r) const {
827     return std::hash<uint32_t>{}(r.value);
828   }
829 };
830 
831 template <>
832 struct hash<::perfetto::trace_processor::TrackId>
833     : hash<::perfetto::trace_processor::BaseId> {};
834 template <>
835 struct hash<::perfetto::trace_processor::MappingId>
836     : hash<::perfetto::trace_processor::BaseId> {};
837 template <>
838 struct hash<::perfetto::trace_processor::CallsiteId>
839     : hash<::perfetto::trace_processor::BaseId> {};
840 template <>
841 struct hash<::perfetto::trace_processor::FrameId>
842     : hash<::perfetto::trace_processor::BaseId> {};
843 
844 template <>
845 struct hash<::perfetto::trace_processor::tables::StackProfileFrameTable::Row> {
846   using argument_type =
847       ::perfetto::trace_processor::tables::StackProfileFrameTable::Row;
848   using result_type = size_t;
849 
850   result_type operator()(const argument_type& r) const {
851     return std::hash<::perfetto::trace_processor::StringId>{}(r.name) ^
852            std::hash<::perfetto::base::Optional<
853                ::perfetto::trace_processor::MappingId>>{}(r.mapping) ^
854            std::hash<int64_t>{}(r.rel_pc);
855   }
856 };
857 
858 template <>
859 struct hash<
860     ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row> {
861   using argument_type =
862       ::perfetto::trace_processor::tables::StackProfileCallsiteTable::Row;
863   using result_type = size_t;
864 
865   result_type operator()(const argument_type& r) const {
866     return std::hash<int64_t>{}(r.depth) ^
867            std::hash<::perfetto::base::Optional<
868                ::perfetto::trace_processor::CallsiteId>>{}(r.parent_id) ^
869            std::hash<::perfetto::trace_processor::FrameId>{}(r.frame_id);
870   }
871 };
872 
873 template <>
874 struct hash<
875     ::perfetto::trace_processor::tables::StackProfileMappingTable::Row> {
876   using argument_type =
877       ::perfetto::trace_processor::tables::StackProfileMappingTable::Row;
878   using result_type = size_t;
879 
880   result_type operator()(const argument_type& r) const {
881     return std::hash<::perfetto::trace_processor::StringId>{}(r.build_id) ^
882            std::hash<int64_t>{}(r.exact_offset) ^
883            std::hash<int64_t>{}(r.start_offset) ^
884            std::hash<int64_t>{}(r.start) ^ std::hash<int64_t>{}(r.end) ^
885            std::hash<int64_t>{}(r.load_bias) ^
886            std::hash<::perfetto::trace_processor::StringId>{}(r.name);
887   }
888 };
889 
890 }  // namespace std
891 
892 #endif  // SRC_TRACE_PROCESSOR_STORAGE_TRACE_STORAGE_H_
893