1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/event_tracker.h"
18
19 #include <math.h>
20
21 #include "perfetto/base/utils.h"
22 #include "src/trace_processor/args_tracker.h"
23 #include "src/trace_processor/ftrace_descriptors.h"
24 #include "src/trace_processor/ftrace_utils.h"
25 #include "src/trace_processor/process_tracker.h"
26 #include "src/trace_processor/stats.h"
27 #include "src/trace_processor/trace_processor_context.h"
28
29 #include "perfetto/trace/ftrace/ftrace_event.pbzero.h"
30 #include "perfetto/trace/ftrace/sched.pbzero.h"
31
32 namespace perfetto {
33 namespace trace_processor {
34
EventTracker(TraceProcessorContext * context)35 EventTracker::EventTracker(TraceProcessorContext* context) : context_(context) {
36 auto* descriptor = GetMessageDescriptorForId(
37 protos::pbzero::FtraceEvent::kSchedSwitchFieldNumber);
38 PERFETTO_CHECK(descriptor->max_field_id == kSchedSwitchMaxFieldId);
39
40 for (size_t i = 1; i <= kSchedSwitchMaxFieldId; i++) {
41 sched_switch_field_ids_[i] =
42 context->storage->InternString(descriptor->fields[i].name);
43 }
44 sched_switch_id_ = context->storage->InternString(descriptor->name);
45 }
46
47 EventTracker::~EventTracker() = default;
48
PushSchedSwitch(uint32_t cpu,int64_t ts,uint32_t prev_pid,base::StringView prev_comm,int32_t prev_prio,int64_t prev_state,uint32_t next_pid,base::StringView next_comm,int32_t next_prio)49 void EventTracker::PushSchedSwitch(uint32_t cpu,
50 int64_t ts,
51 uint32_t prev_pid,
52 base::StringView prev_comm,
53 int32_t prev_prio,
54 int64_t prev_state,
55 uint32_t next_pid,
56 base::StringView next_comm,
57 int32_t next_prio) {
58 // At this stage all events should be globally timestamp ordered.
59 if (ts < prev_timestamp_) {
60 PERFETTO_ELOG("sched_switch event out of order by %.4f ms, skipping",
61 (prev_timestamp_ - ts) / 1e6);
62 context_->storage->IncrementStats(stats::sched_switch_out_of_order);
63 return;
64 }
65 prev_timestamp_ = ts;
66 PERFETTO_DCHECK(cpu < base::kMaxCpus);
67
68 auto* slices = context_->storage->mutable_slices();
69
70 StringId next_comm_id = context_->storage->InternString(next_comm);
71 auto next_utid =
72 context_->process_tracker->UpdateThreadName(next_pid, next_comm_id);
73
74 // First use this data to close the previous slice.
75 bool prev_pid_match_prev_next_pid = false;
76 auto* prev_slice = &pending_sched_per_cpu_[cpu];
77 size_t slice_idx = prev_slice->storage_index;
78 if (slice_idx < std::numeric_limits<size_t>::max()) {
79 prev_pid_match_prev_next_pid = prev_pid == prev_slice->next_pid;
80 if (PERFETTO_LIKELY(prev_pid_match_prev_next_pid)) {
81 int64_t duration = ts - slices->start_ns()[slice_idx];
82 slices->set_duration(slice_idx, duration);
83
84 // We store the state as a uint16 as we only consider values up to 2048
85 // when unpacking the information inside; this allows savings of 48 bits
86 // per slice.
87 slices->set_end_state(slice_idx, ftrace_utils::TaskState(
88 static_cast<uint16_t>(prev_state)));
89 } else {
90 // If the pids ae not consistent, make a note of this.
91 context_->storage->IncrementStats(stats::mismatched_sched_switch_tids);
92 }
93 }
94
95 // We have to intern prev_comm again because our assumption that
96 // this event's |prev_comm| == previous event's |next_comm| does not hold
97 // if the thread changed its name while scheduled.
98 StringId prev_comm_id = context_->storage->InternString(prev_comm);
99 UniqueTid prev_utid =
100 context_->process_tracker->UpdateThreadName(prev_pid, prev_comm_id);
101
102 // Push the raw event - this is done as the raw ftrace event codepath does
103 // not insert sched_switch.
104 auto rid = context_->storage->mutable_raw_events()->AddRawEvent(
105 ts, sched_switch_id_, cpu, prev_utid);
106
107 // Note: this ordering is important. The events should be pushed in the same
108 // order as the order of fields in the proto; this is used by the raw table to
109 // index these events using the field ids.
110 using Variadic = TraceStorage::Args::Variadic;
111 using SS = protos::pbzero::SchedSwitchFtraceEvent;
112 auto add_raw_arg = [this](RowId row_id, int field_num,
113 TraceStorage::Args::Variadic var) {
114 StringId key = sched_switch_field_ids_[static_cast<size_t>(field_num)];
115 context_->args_tracker->AddArg(row_id, key, key, var);
116 };
117 add_raw_arg(rid, SS::kPrevCommFieldNumber, Variadic::String(prev_comm_id));
118 add_raw_arg(rid, SS::kPrevPidFieldNumber, Variadic::Integer(prev_pid));
119 add_raw_arg(rid, SS::kPrevPrioFieldNumber, Variadic::Integer(prev_prio));
120 add_raw_arg(rid, SS::kPrevStateFieldNumber, Variadic::Integer(prev_state));
121 add_raw_arg(rid, SS::kNextCommFieldNumber, Variadic::String(next_comm_id));
122 add_raw_arg(rid, SS::kNextPidFieldNumber, Variadic::Integer(next_pid));
123 add_raw_arg(rid, SS::kNextPrioFieldNumber, Variadic::Integer(next_prio));
124
125 // Add the slice for the "next" slice.
126 auto next_idx = slices->AddSlice(cpu, ts, 0 /* duration */, next_utid,
127 ftrace_utils::TaskState(), next_prio);
128
129 // Finally, update the info for the next sched switch on this CPU.
130 prev_slice->storage_index = next_idx;
131 prev_slice->next_pid = next_pid;
132 }
133
PushCounter(int64_t timestamp,double value,StringId name_id,int64_t ref,RefType ref_type,bool resolve_utid_to_upid)134 RowId EventTracker::PushCounter(int64_t timestamp,
135 double value,
136 StringId name_id,
137 int64_t ref,
138 RefType ref_type,
139 bool resolve_utid_to_upid) {
140 if (timestamp < prev_timestamp_) {
141 PERFETTO_DLOG("counter event (ts: %" PRId64
142 ") out of order by %.4f ms, skipping",
143 timestamp, (prev_timestamp_ - timestamp) / 1e6);
144 context_->storage->IncrementStats(stats::counter_events_out_of_order);
145 return kInvalidRowId;
146 }
147 prev_timestamp_ = timestamp;
148
149 PERFETTO_DCHECK(!resolve_utid_to_upid || ref_type == RefType::kRefUtid);
150
151 auto* definitions = context_->storage->mutable_counter_definitions();
152 TraceStorage::CounterDefinitions::Id defn_id;
153 if (resolve_utid_to_upid) {
154 defn_id = TraceStorage::CounterDefinitions::kInvalidId;
155 } else {
156 defn_id = definitions->AddCounterDefinition(name_id, ref, ref_type);
157 }
158
159 auto* counter_values = context_->storage->mutable_counter_values();
160 uint32_t idx = counter_values->AddCounterValue(defn_id, timestamp, value);
161 if (resolve_utid_to_upid) {
162 PendingUpidResolutionCounter pending;
163 pending.row = idx;
164 pending.utid = static_cast<UniqueTid>(ref);
165 pending.name_id = name_id;
166 pending_upid_resolution_counter_.emplace_back(pending);
167 }
168 return TraceStorage::CreateRowId(TableId::kCounterValues,
169 static_cast<uint32_t>(idx));
170 }
171
PushInstant(int64_t timestamp,StringId name_id,double value,int64_t ref,RefType ref_type,bool resolve_utid_to_upid)172 RowId EventTracker::PushInstant(int64_t timestamp,
173 StringId name_id,
174 double value,
175 int64_t ref,
176 RefType ref_type,
177 bool resolve_utid_to_upid) {
178 auto* instants = context_->storage->mutable_instants();
179 uint32_t idx;
180 if (resolve_utid_to_upid) {
181 idx = instants->AddInstantEvent(timestamp, name_id, value, 0,
182 RefType::kRefUpid);
183 PendingUpidResolutionInstant pending;
184 pending.row = idx;
185 pending.utid = static_cast<UniqueTid>(ref);
186 pending_upid_resolution_instant_.emplace_back(pending);
187 } else {
188 idx = instants->AddInstantEvent(timestamp, name_id, value, ref, ref_type);
189 }
190 return TraceStorage::CreateRowId(TableId::kInstants,
191 static_cast<uint32_t>(idx));
192 }
193
FlushPendingEvents()194 void EventTracker::FlushPendingEvents() {
195 // TODO(lalitm): the day this method is called before end of trace, don't
196 // flush the sched events as they will probably be pushed in the next round
197 // of ftrace events.
198 int64_t end_ts = context_->storage->GetTraceTimestampBoundsNs().second;
199 auto* slices = context_->storage->mutable_slices();
200 for (const auto& pending_sched : pending_sched_per_cpu_) {
201 size_t row = pending_sched.storage_index;
202 if (row == std::numeric_limits<size_t>::max())
203 continue;
204
205 int64_t duration = end_ts - slices->start_ns()[row];
206 slices->set_duration(row, duration);
207 slices->set_end_state(
208 row, ftrace_utils::TaskState(ftrace_utils::TaskState::kRunnable));
209 }
210
211 for (const auto& pending_counter : pending_upid_resolution_counter_) {
212 const auto& thread = context_->storage->GetThread(pending_counter.utid);
213 // TODO(lalitm): having upid == 0 is probably not the correct approach here
214 // but it's unclear what may be better.
215 UniquePid upid = thread.upid.value_or(0);
216 auto id =
217 context_->storage->mutable_counter_definitions()->AddCounterDefinition(
218 pending_counter.name_id, upid, RefType::kRefUpid);
219 context_->storage->mutable_counter_values()->set_counter_id(
220 pending_counter.row, id);
221 }
222
223 for (const auto& pending_instant : pending_upid_resolution_instant_) {
224 const auto& thread = context_->storage->GetThread(pending_instant.utid);
225 // TODO(lalitm): having upid == 0 is probably not the correct approach here
226 // but it's unclear what may be better.
227 UniquePid upid = thread.upid.value_or(0);
228 context_->storage->mutable_instants()->set_ref(pending_instant.row, upid);
229 }
230
231 pending_sched_per_cpu_ = {};
232 pending_upid_resolution_counter_.clear();
233 pending_upid_resolution_instant_.clear();
234 }
235
236 } // namespace trace_processor
237 } // namespace perfetto
238