1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/common/thread_state_tracker.h"
18
19 #include <cstdint>
20 #include <optional>
21
22 #include "src/trace_processor/importers/common/cpu_tracker.h"
23 #include "src/trace_processor/importers/common/process_tracker.h"
24
25 namespace perfetto {
26 namespace trace_processor {
ThreadStateTracker(TraceProcessorContext * context)27 ThreadStateTracker::ThreadStateTracker(TraceProcessorContext* context)
28 : storage_(context->storage.get()),
29 context_(context),
30 running_string_id_(storage_->InternString("Running")),
31 runnable_string_id_(storage_->InternString("R")) {}
32 ThreadStateTracker::~ThreadStateTracker() = default;
33
PushSchedSwitchEvent(int64_t event_ts,uint32_t cpu,UniqueTid prev_utid,StringId prev_state,UniqueTid next_utid)34 void ThreadStateTracker::PushSchedSwitchEvent(int64_t event_ts,
35 uint32_t cpu,
36 UniqueTid prev_utid,
37 StringId prev_state,
38 UniqueTid next_utid) {
39 // Code related to previous utid. If the thread wasn't running before we know
40 // we lost data and should close the slice accordingly.
41 bool data_loss_cond =
42 HasPreviousRowNumbersForUtid(prev_utid) &&
43 !IsRunning(RowNumToRef(prev_row_numbers_for_thread_[prev_utid]->last_row)
44 .state());
45 ClosePendingState(event_ts, prev_utid, data_loss_cond);
46 AddOpenState(event_ts, prev_utid, prev_state);
47
48 // Code related to next utid.
49 // Due to forced migration, it is possible for the same thread to be
50 // scheduled on different CPUs at the same time.
51 // We work around this problem by truncating the previous state to the start
52 // of this state and starting the next state normally. This is why we don't
53 // check whether previous state is running/runnable. See b/186509316 for
54 // details and an example on when this happens.
55 ClosePendingState(event_ts, next_utid, false);
56 AddOpenState(event_ts, next_utid, running_string_id_, cpu);
57 }
58
PushWakingEvent(int64_t event_ts,UniqueTid utid,UniqueTid waker_utid,std::optional<uint16_t> common_flags)59 void ThreadStateTracker::PushWakingEvent(int64_t event_ts,
60 UniqueTid utid,
61 UniqueTid waker_utid,
62 std::optional<uint16_t> common_flags) {
63 // If thread has not had a sched switch event, just open a runnable state.
64 // There's no pending state to close.
65 if (!HasPreviousRowNumbersForUtid(utid)) {
66 AddOpenState(event_ts, utid, runnable_string_id_, std::nullopt, waker_utid,
67 common_flags);
68 return;
69 }
70
71 auto last_row_ref = RowNumToRef(prev_row_numbers_for_thread_[utid]->last_row);
72
73 // Occasionally, it is possible to get a waking event for a thread
74 // which is already in a runnable state. When this happens (or if the thread
75 // is running), we just ignore the waking event. See b/186509316 for details
76 // and an example on when this happens. Only blocked events can be waken up.
77 if (!IsBlocked(last_row_ref.state())) {
78 // If we receive a waking event while we are not blocked, we ignore this
79 // in the |thread_state| table but we track in the |sched_wakeup| table.
80 // The |thread_state_id| in |sched_wakeup| is the current running/runnable
81 // event.
82 std::optional<uint32_t> irq_context =
83 common_flags
84 ? std::make_optional(CommonFlagsToIrqContext(*common_flags))
85 : std::nullopt;
86 storage_->mutable_spurious_sched_wakeup_table()->Insert(
87 {event_ts, prev_row_numbers_for_thread_[utid]->last_row.row_number(),
88 irq_context, utid, waker_utid});
89 return;
90 }
91
92 // Close the sleeping state and open runnable state.
93 ClosePendingState(event_ts, utid, false);
94 AddOpenState(event_ts, utid, runnable_string_id_, std::nullopt, waker_utid,
95 common_flags);
96 }
97
PushNewTaskEvent(int64_t event_ts,UniqueTid utid,UniqueTid waker_utid)98 void ThreadStateTracker::PushNewTaskEvent(int64_t event_ts,
99 UniqueTid utid,
100 UniqueTid waker_utid) {
101 AddOpenState(event_ts, utid, runnable_string_id_, std::nullopt, waker_utid);
102 }
103
PushBlockedReason(UniqueTid utid,std::optional<bool> io_wait,std::optional<StringId> blocked_function)104 void ThreadStateTracker::PushBlockedReason(
105 UniqueTid utid,
106 std::optional<bool> io_wait,
107 std::optional<StringId> blocked_function) {
108 // Return if there is no state, as there is are no previous rows available.
109 if (!HasPreviousRowNumbersForUtid(utid))
110 return;
111
112 // Return if no previous bocked row exists.
113 auto blocked_row_number =
114 prev_row_numbers_for_thread_[utid]->last_blocked_row;
115 if (!blocked_row_number.has_value())
116 return;
117
118 auto row_reference = RowNumToRef(blocked_row_number.value());
119 if (io_wait.has_value()) {
120 row_reference.set_io_wait(*io_wait);
121 }
122 if (blocked_function.has_value()) {
123 row_reference.set_blocked_function(*blocked_function);
124 }
125 }
126
AddOpenState(int64_t ts,UniqueTid utid,StringId state,std::optional<uint16_t> cpu,std::optional<UniqueTid> waker_utid,std::optional<uint16_t> common_flags)127 void ThreadStateTracker::AddOpenState(int64_t ts,
128 UniqueTid utid,
129 StringId state,
130 std::optional<uint16_t> cpu,
131 std::optional<UniqueTid> waker_utid,
132 std::optional<uint16_t> common_flags) {
133 // Ignore the swapper utid because it corresponds to the swapper thread which
134 // doesn't make sense to insert.
135 if (utid == context_->process_tracker->swapper_utid())
136 return;
137
138 // Insert row with unfinished state
139 tables::ThreadStateTable::Row row;
140 row.ts = ts;
141 row.waker_utid = waker_utid;
142 row.dur = -1;
143 row.utid = utid;
144 row.state = state;
145 if (cpu)
146 row.ucpu = context_->cpu_tracker->GetOrCreateCpu(*cpu);
147 if (common_flags.has_value()) {
148 row.irq_context = CommonFlagsToIrqContext(*common_flags);
149 }
150
151 if (waker_utid.has_value() && HasPreviousRowNumbersForUtid(*waker_utid)) {
152 auto waker_row =
153 RowNumToRef(prev_row_numbers_for_thread_[*waker_utid]->last_row);
154
155 // We expect all wakers to be Running. But there are 2 cases where this
156 // might not be true:
157 // 1. At the start of a trace the 'waker CPU' has not yet started
158 // emitting events.
159 // 2. Data loss.
160 if (IsRunning(waker_row.state())) {
161 row.waker_id = std::make_optional(waker_row.id());
162 }
163 }
164
165 auto row_num = storage_->mutable_thread_state_table()->Insert(row).row_number;
166
167 if (utid >= prev_row_numbers_for_thread_.size()) {
168 prev_row_numbers_for_thread_.resize(utid + 1);
169 }
170
171 if (!prev_row_numbers_for_thread_[utid].has_value()) {
172 prev_row_numbers_for_thread_[utid] = RelatedRows{std::nullopt, row_num};
173 }
174
175 if (IsRunning(state)) {
176 prev_row_numbers_for_thread_[utid] = RelatedRows{std::nullopt, row_num};
177 } else if (IsBlocked(state)) {
178 prev_row_numbers_for_thread_[utid] = RelatedRows{row_num, row_num};
179 } else /* if (IsRunnable(state)) */ {
180 prev_row_numbers_for_thread_[utid]->last_row = row_num;
181 }
182 }
183
CommonFlagsToIrqContext(uint32_t common_flags)184 uint32_t ThreadStateTracker::CommonFlagsToIrqContext(uint32_t common_flags) {
185 // If common_flags contains TRACE_FLAG_HARDIRQ | TRACE_FLAG_SOFTIRQ, wakeup
186 // was emitted in interrupt context.
187 // See:
188 // https://cs.android.com/android/kernel/superproject/+/common-android-mainline:common/include/linux/trace_events.h
189 // TODO(rsavitski): we could also include TRACE_FLAG_NMI for a complete
190 // "interrupt context" meaning. But at the moment it's not necessary as this
191 // is used for sched_waking events, which are not emitted from NMI contexts.
192 return common_flags & (0x08 | 0x10) ? 1 : 0;
193 }
194
ClosePendingState(int64_t end_ts,UniqueTid utid,bool data_loss)195 void ThreadStateTracker::ClosePendingState(int64_t end_ts,
196 UniqueTid utid,
197 bool data_loss) {
198 // Discard close if there is no open state to close.
199 if (!HasPreviousRowNumbersForUtid(utid))
200 return;
201
202 auto row_ref = RowNumToRef(prev_row_numbers_for_thread_[utid]->last_row);
203
204 // Update the duration only for states without data loss.
205 if (!data_loss) {
206 row_ref.set_dur(end_ts - row_ref.ts());
207 }
208 }
209
IsRunning(StringId state)210 bool ThreadStateTracker::IsRunning(StringId state) {
211 return state == running_string_id_;
212 }
213
IsRunnable(StringId state)214 bool ThreadStateTracker::IsRunnable(StringId state) {
215 return state == runnable_string_id_;
216 }
217
IsBlocked(StringId state)218 bool ThreadStateTracker::IsBlocked(StringId state) {
219 return !(IsRunnable(state) || IsRunning(state));
220 }
221
222 } // namespace trace_processor
223 } // namespace perfetto
224