1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/proto/heap_profile_tracker.h"
18
19 #include "perfetto/base/logging.h"
20 #include "src/trace_processor/importers/common/process_tracker.h"
21 #include "src/trace_processor/types/trace_processor_context.h"
22
23 #include "protos/perfetto/trace/profiling/profile_common.pbzero.h"
24 #include "protos/perfetto/trace/profiling/profile_packet.pbzero.h"
25
26 namespace perfetto {
27 namespace trace_processor {
28
HeapProfileTracker(TraceProcessorContext * context)29 HeapProfileTracker::HeapProfileTracker(TraceProcessorContext* context)
30 : context_(context),
31 empty_(context_->storage->InternString({"", 0})),
32 art_heap_(context_->storage->InternString("com.android.art")) {}
33
34 HeapProfileTracker::~HeapProfileTracker() = default;
35
SetProfilePacketIndex(uint32_t seq_id,uint64_t index)36 void HeapProfileTracker::SetProfilePacketIndex(uint32_t seq_id,
37 uint64_t index) {
38 SequenceState& sequence_state = sequence_state_[seq_id];
39 bool dropped_packet = false;
40 // heapprofd starts counting at index = 0.
41 if (!sequence_state.prev_index && index != 0) {
42 dropped_packet = true;
43 }
44
45 if (sequence_state.prev_index && *sequence_state.prev_index + 1 != index) {
46 dropped_packet = true;
47 }
48
49 if (dropped_packet) {
50 if (sequence_state.prev_index) {
51 PERFETTO_ELOG("Missing packets between %" PRIu64 " and %" PRIu64,
52 *sequence_state.prev_index, index);
53 } else {
54 PERFETTO_ELOG("Invalid first packet index %" PRIu64 " (!= 0)", index);
55 }
56
57 context_->storage->IncrementStats(stats::heapprofd_missing_packet);
58 }
59 sequence_state.prev_index = index;
60 }
61
AddAllocation(uint32_t seq_id,SequenceStackProfileTracker * sequence_stack_profile_tracker,const SourceAllocation & alloc,const SequenceStackProfileTracker::InternLookup * intern_lookup)62 void HeapProfileTracker::AddAllocation(
63 uint32_t seq_id,
64 SequenceStackProfileTracker* sequence_stack_profile_tracker,
65 const SourceAllocation& alloc,
66 const SequenceStackProfileTracker::InternLookup* intern_lookup) {
67 SequenceState& sequence_state = sequence_state_[seq_id];
68
69 auto opt_callstack_id = sequence_stack_profile_tracker->FindOrInsertCallstack(
70 alloc.callstack_id, intern_lookup);
71 if (!opt_callstack_id)
72 return;
73
74 CallsiteId callstack_id = *opt_callstack_id;
75
76 UniquePid upid = context_->process_tracker->GetOrCreateProcess(
77 static_cast<uint32_t>(alloc.pid));
78
79 tables::HeapProfileAllocationTable::Row alloc_row{
80 alloc.timestamp,
81 upid,
82 alloc.heap_name,
83 callstack_id,
84 static_cast<int64_t>(alloc.alloc_count),
85 static_cast<int64_t>(alloc.self_allocated)};
86
87 tables::HeapProfileAllocationTable::Row free_row{
88 alloc.timestamp,
89 upid,
90 alloc.heap_name,
91 callstack_id,
92 -static_cast<int64_t>(alloc.free_count),
93 -static_cast<int64_t>(alloc.self_freed)};
94
95 auto prev_alloc_it = sequence_state.prev_alloc.find({upid, callstack_id});
96 if (prev_alloc_it == sequence_state.prev_alloc.end()) {
97 std::tie(prev_alloc_it, std::ignore) = sequence_state.prev_alloc.emplace(
98 std::make_pair(upid, callstack_id),
99 tables::HeapProfileAllocationTable::Row{});
100 }
101
102 tables::HeapProfileAllocationTable::Row& prev_alloc = prev_alloc_it->second;
103
104 auto prev_free_it = sequence_state.prev_free.find({upid, callstack_id});
105 if (prev_free_it == sequence_state.prev_free.end()) {
106 std::tie(prev_free_it, std::ignore) = sequence_state.prev_free.emplace(
107 std::make_pair(upid, callstack_id),
108 tables::HeapProfileAllocationTable::Row{});
109 }
110
111 tables::HeapProfileAllocationTable::Row& prev_free = prev_free_it->second;
112
113 std::set<CallsiteId>& callstacks_for_source_callstack_id =
114 sequence_state.seen_callstacks[SourceAllocationIndex{
115 upid, alloc.callstack_id, alloc.heap_name}];
116 bool new_callstack;
117 std::tie(std::ignore, new_callstack) =
118 callstacks_for_source_callstack_id.emplace(callstack_id);
119
120 if (new_callstack) {
121 sequence_state.alloc_correction[alloc.callstack_id] = prev_alloc;
122 sequence_state.free_correction[alloc.callstack_id] = prev_free;
123 }
124
125 auto alloc_correction_it =
126 sequence_state.alloc_correction.find(alloc.callstack_id);
127 if (alloc_correction_it != sequence_state.alloc_correction.end()) {
128 const auto& alloc_correction = alloc_correction_it->second;
129 alloc_row.count += alloc_correction.count;
130 alloc_row.size += alloc_correction.size;
131 }
132
133 auto free_correction_it =
134 sequence_state.free_correction.find(alloc.callstack_id);
135 if (free_correction_it != sequence_state.free_correction.end()) {
136 const auto& free_correction = free_correction_it->second;
137 free_row.count += free_correction.count;
138 free_row.size += free_correction.size;
139 }
140
141 tables::HeapProfileAllocationTable::Row alloc_delta = alloc_row;
142 tables::HeapProfileAllocationTable::Row free_delta = free_row;
143
144 alloc_delta.count -= prev_alloc.count;
145 alloc_delta.size -= prev_alloc.size;
146
147 free_delta.count -= prev_free.count;
148 free_delta.size -= prev_free.size;
149
150 if (alloc_delta.count < 0 || alloc_delta.size < 0 || free_delta.count > 0 ||
151 free_delta.size > 0) {
152 PERFETTO_DLOG("Non-monotonous allocation.");
153 context_->storage->IncrementIndexedStats(stats::heapprofd_malformed_packet,
154 static_cast<int>(upid));
155 return;
156 }
157
158 // Dump at max profiles do not have .count set.
159 if (alloc_delta.count || alloc_delta.size) {
160 context_->storage->mutable_heap_profile_allocation_table()->Insert(
161 alloc_delta);
162 }
163
164 // ART only reports allocations, and not frees. This throws off our logic
165 // that assumes that if a new object was allocated with the same address,
166 // the old one has to have been freed in the meantime.
167 // See HeapTracker::RecordMalloc in bookkeeping.cc.
168 if (alloc.heap_name != art_heap_ && (free_delta.count || free_delta.size)) {
169 context_->storage->mutable_heap_profile_allocation_table()->Insert(
170 free_delta);
171 }
172
173 prev_alloc = alloc_row;
174 prev_free = free_row;
175 }
176
StoreAllocation(uint32_t seq_id,SourceAllocation alloc)177 void HeapProfileTracker::StoreAllocation(uint32_t seq_id,
178 SourceAllocation alloc) {
179 SequenceState& sequence_state = sequence_state_[seq_id];
180 sequence_state.pending_allocs.emplace_back(std::move(alloc));
181 }
182
CommitAllocations(uint32_t seq_id,SequenceStackProfileTracker * sequence_stack_profile_tracker,const SequenceStackProfileTracker::InternLookup * intern_lookup)183 void HeapProfileTracker::CommitAllocations(
184 uint32_t seq_id,
185 SequenceStackProfileTracker* sequence_stack_profile_tracker,
186 const SequenceStackProfileTracker::InternLookup* intern_lookup) {
187 SequenceState& sequence_state = sequence_state_[seq_id];
188 for (const auto& p : sequence_state.pending_allocs)
189 AddAllocation(seq_id, sequence_stack_profile_tracker, p, intern_lookup);
190 sequence_state.pending_allocs.clear();
191 }
192
FinalizeProfile(uint32_t seq_id,SequenceStackProfileTracker * sequence_stack_profile_tracker,const SequenceStackProfileTracker::InternLookup * intern_lookup)193 void HeapProfileTracker::FinalizeProfile(
194 uint32_t seq_id,
195 SequenceStackProfileTracker* sequence_stack_profile_tracker,
196 const SequenceStackProfileTracker::InternLookup* intern_lookup) {
197 CommitAllocations(seq_id, sequence_stack_profile_tracker, intern_lookup);
198 sequence_stack_profile_tracker->ClearIndices();
199 }
200
NotifyEndOfFile()201 void HeapProfileTracker::NotifyEndOfFile() {
202 for (const auto& key_and_sequence_state : sequence_state_) {
203 const SequenceState& sequence_state = key_and_sequence_state.second;
204 if (!sequence_state.pending_allocs.empty()) {
205 context_->storage->IncrementStats(stats::heapprofd_non_finalized_profile);
206 }
207 }
208 }
209
210 } // namespace trace_processor
211 } // namespace perfetto
212