1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_tokenizer.h"
18
19 #include <cinttypes>
20 #include <limits>
21
22 #include "perfetto/base/logging.h"
23 #include "perfetto/ext/base/string_view.h"
24 #include "perfetto/trace_processor/trace_blob.h"
25 #include "src/trace_processor/importers/common/cpu_tracker.h"
26 #include "src/trace_processor/importers/common/process_tracker.h"
27 #include "src/trace_processor/importers/common/slice_tracker.h"
28 #include "src/trace_processor/importers/fuchsia/fuchsia_record.h"
29 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_parser.h"
30 #include "src/trace_processor/importers/proto/proto_trace_reader.h"
31 #include "src/trace_processor/sorter/trace_sorter.h"
32 #include "src/trace_processor/types/task_state.h"
33 #include "src/trace_processor/types/trace_processor_context.h"
34
35 namespace perfetto {
36 namespace trace_processor {
37
38 namespace {
39
40 using fuchsia_trace_utils::ArgValue;
41
42 // Record types
43 constexpr uint32_t kMetadata = 0;
44 constexpr uint32_t kInitialization = 1;
45 constexpr uint32_t kString = 2;
46 constexpr uint32_t kThread = 3;
47 constexpr uint32_t kEvent = 4;
48 constexpr uint32_t kBlob = 5;
49 constexpr uint32_t kKernelObject = 7;
50 constexpr uint32_t kSchedulerEvent = 8;
51
52 constexpr uint32_t kSchedulerEventLegacyContextSwitch = 0;
53 constexpr uint32_t kSchedulerEventContextSwitch = 1;
54 constexpr uint32_t kSchedulerEventThreadWakeup = 2;
55
56 // Metadata types
57 constexpr uint32_t kProviderInfo = 1;
58 constexpr uint32_t kProviderSection = 2;
59 constexpr uint32_t kProviderEvent = 3;
60
61 // Thread states
62 constexpr uint32_t kThreadNew = 0;
63 constexpr uint32_t kThreadRunning = 1;
64 constexpr uint32_t kThreadSuspended = 2;
65 constexpr uint32_t kThreadBlocked = 3;
66 constexpr uint32_t kThreadDying = 4;
67 constexpr uint32_t kThreadDead = 5;
68
69 // Zircon object types
70 constexpr uint32_t kZxObjTypeProcess = 1;
71 constexpr uint32_t kZxObjTypeThread = 2;
72
73 constexpr int32_t kIdleWeight = std::numeric_limits<int32_t>::min();
74
75 } // namespace
76
FuchsiaTraceTokenizer(TraceProcessorContext * context)77 FuchsiaTraceTokenizer::FuchsiaTraceTokenizer(TraceProcessorContext* context)
78 : context_(context),
79 proto_reader_(context),
80 running_string_id_(context->storage->InternString("Running")),
81 runnable_string_id_(context->storage->InternString("R")),
82 preempted_string_id_(context->storage->InternString("R+")),
83 waking_string_id_(context->storage->InternString("W")),
84 blocked_string_id_(context->storage->InternString("S")),
85 suspended_string_id_(context->storage->InternString("T")),
86 exit_dying_string_id_(context->storage->InternString("Z")),
87 exit_dead_string_id_(context->storage->InternString("X")),
88 incoming_weight_id_(context->storage->InternString("incoming_weight")),
89 outgoing_weight_id_(context->storage->InternString("outgoing_weight")),
90 weight_id_(context->storage->InternString("weight")),
91 process_id_(context->storage->InternString("process")) {
92 RegisterProvider(0, "");
93 }
94
95 FuchsiaTraceTokenizer::~FuchsiaTraceTokenizer() = default;
96
Parse(TraceBlobView blob)97 util::Status FuchsiaTraceTokenizer::Parse(TraceBlobView blob) {
98 size_t size = blob.size();
99
100 // The relevant internal state is |leftover_bytes_|. Each call to Parse should
101 // maintain the following properties, unless a fatal error occurs in which
102 // case it should return false and no assumptions should be made about the
103 // resulting internal state:
104 //
105 // 1) Every byte passed to |Parse| has either been passed to |ParseRecord| or
106 // is present in |leftover_bytes_|, but not both.
107 // 2) |leftover_bytes_| does not contain a complete record.
108 //
109 // Parse is responsible for creating the "full" |TraceBlobView|s, which own
110 // the underlying data. Generally, there will be one such view. However, if
111 // there is a record that started in an earlier call, then a new buffer is
112 // created here to make the bytes in that record contiguous.
113 //
114 // Because some of the bytes in |data| might belong to the record starting in
115 // |leftover_bytes_|, we track the offset at which the following record will
116 // start.
117 size_t byte_offset = 0;
118
119 // Look for a record starting with the leftover bytes.
120 if (leftover_bytes_.size() + size < 8) {
121 // Even with the new bytes, we can't even read the header of the next
122 // record, so just add the new bytes to |leftover_bytes_| and return.
123 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
124 blob.data() + size);
125 return util::OkStatus();
126 }
127 if (!leftover_bytes_.empty()) {
128 // There is a record starting from leftover bytes.
129 if (leftover_bytes_.size() < 8) {
130 // Header was previously incomplete, but we have enough now.
131 // Copy bytes into |leftover_bytes_| so that the whole header is present,
132 // and update |byte_offset| and |size| accordingly.
133 size_t needed_bytes = 8 - leftover_bytes_.size();
134 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
135 blob.data() + needed_bytes);
136 byte_offset += needed_bytes;
137 size -= needed_bytes;
138 }
139 // Read the record length from the header.
140 uint64_t header =
141 *reinterpret_cast<const uint64_t*>(leftover_bytes_.data());
142 uint32_t record_len_words =
143 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15);
144 uint32_t record_len_bytes = record_len_words * sizeof(uint64_t);
145
146 // From property (2) above, leftover_bytes_ must have had less than a full
147 // record to start with. We padded leftover_bytes_ out to read the header,
148 // so it may now be a full record (in the case that the record consists of
149 // only the header word), but it still cannot have any extra bytes.
150 PERFETTO_DCHECK(leftover_bytes_.size() <= record_len_bytes);
151 size_t missing_bytes = record_len_bytes - leftover_bytes_.size();
152
153 if (missing_bytes <= size) {
154 // We have enough bytes to complete the partial record. Create a new
155 // buffer for that record.
156 TraceBlob buf = TraceBlob::Allocate(record_len_bytes);
157 memcpy(buf.data(), leftover_bytes_.data(), leftover_bytes_.size());
158 memcpy(buf.data() + leftover_bytes_.size(), blob.data() + byte_offset,
159 missing_bytes);
160 byte_offset += missing_bytes;
161 size -= missing_bytes;
162 leftover_bytes_.clear();
163 ParseRecord(TraceBlobView(std::move(buf)));
164 } else {
165 // There are not enough bytes for the full record. Add all the bytes we
166 // have to leftover_bytes_ and wait for more.
167 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
168 blob.data() + byte_offset + size);
169 return util::OkStatus();
170 }
171 }
172
173 TraceBlobView full_view = blob.slice_off(byte_offset, size);
174
175 // |record_offset| is a number of bytes past |byte_offset| where the record
176 // under consideration starts. As a result, it must always be in the range [0,
177 // size-8]. Any larger offset means we don't have enough bytes for the header.
178 size_t record_offset = 0;
179 while (record_offset + 8 <= size) {
180 uint64_t header =
181 *reinterpret_cast<const uint64_t*>(full_view.data() + record_offset);
182 uint32_t record_len_bytes =
183 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15) *
184 sizeof(uint64_t);
185 if (record_len_bytes == 0)
186 return util::ErrStatus("Unexpected record of size 0");
187
188 if (record_offset + record_len_bytes > size)
189 break;
190
191 TraceBlobView record = full_view.slice_off(record_offset, record_len_bytes);
192 ParseRecord(std::move(record));
193
194 record_offset += record_len_bytes;
195 }
196
197 leftover_bytes_.insert(leftover_bytes_.end(),
198 full_view.data() + record_offset,
199 full_view.data() + size);
200
201 TraceBlob perfetto_blob =
202 TraceBlob::CopyFrom(proto_trace_data_.data(), proto_trace_data_.size());
203 proto_trace_data_.clear();
204
205 return proto_reader_.Parse(TraceBlobView(std::move(perfetto_blob)));
206 }
207
IdForOutgoingThreadState(uint32_t state)208 StringId FuchsiaTraceTokenizer::IdForOutgoingThreadState(uint32_t state) {
209 switch (state) {
210 case kThreadNew:
211 case kThreadRunning:
212 return runnable_string_id_;
213 case kThreadBlocked:
214 return blocked_string_id_;
215 case kThreadSuspended:
216 return suspended_string_id_;
217 case kThreadDying:
218 return exit_dying_string_id_;
219 case kThreadDead:
220 return exit_dead_string_id_;
221 default:
222 return kNullStringId;
223 }
224 }
225
SwitchFrom(Thread * thread,int64_t ts,uint32_t cpu,uint32_t thread_state)226 void FuchsiaTraceTokenizer::SwitchFrom(Thread* thread,
227 int64_t ts,
228 uint32_t cpu,
229 uint32_t thread_state) {
230 TraceStorage* storage = context_->storage.get();
231 ProcessTracker* procs = context_->process_tracker.get();
232
233 StringId state = IdForOutgoingThreadState(thread_state);
234 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(thread->info.tid),
235 static_cast<uint32_t>(thread->info.pid));
236
237 const auto duration = ts - thread->last_ts;
238 thread->last_ts = ts;
239
240 // Close the slice record if one is open for this thread.
241 if (thread->last_slice_row.has_value()) {
242 auto row_ref = thread->last_slice_row->ToRowReference(
243 storage->mutable_sched_slice_table());
244 row_ref.set_dur(duration);
245 row_ref.set_end_state(state);
246 thread->last_slice_row.reset();
247 }
248
249 // Close the state record if one is open for this thread.
250 if (thread->last_state_row.has_value()) {
251 auto row_ref = thread->last_state_row->ToRowReference(
252 storage->mutable_thread_state_table());
253 row_ref.set_dur(duration);
254 thread->last_state_row.reset();
255 }
256
257 // Open a new state record to track the duration of the outgoing
258 // state.
259 tables::ThreadStateTable::Row state_row;
260 state_row.ts = ts;
261 state_row.ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
262 state_row.dur = -1;
263 state_row.state = state;
264 state_row.utid = utid;
265 auto state_row_number =
266 storage->mutable_thread_state_table()->Insert(state_row).row_number;
267 thread->last_state_row = state_row_number;
268 }
269
SwitchTo(Thread * thread,int64_t ts,uint32_t cpu,int32_t weight)270 void FuchsiaTraceTokenizer::SwitchTo(Thread* thread,
271 int64_t ts,
272 uint32_t cpu,
273 int32_t weight) {
274 TraceStorage* storage = context_->storage.get();
275 ProcessTracker* procs = context_->process_tracker.get();
276
277 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(thread->info.tid),
278 static_cast<uint32_t>(thread->info.pid));
279
280 const auto duration = ts - thread->last_ts;
281 thread->last_ts = ts;
282
283 // Close the state record if one is open for this thread.
284 if (thread->last_state_row.has_value()) {
285 auto row_ref = thread->last_state_row->ToRowReference(
286 storage->mutable_thread_state_table());
287 row_ref.set_dur(duration);
288 thread->last_state_row.reset();
289 }
290
291 auto ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
292 // Open a new slice record for this thread.
293 tables::SchedSliceTable::Row slice_row;
294 slice_row.ts = ts;
295 slice_row.ucpu = ucpu;
296 slice_row.dur = -1;
297 slice_row.utid = utid;
298 slice_row.priority = weight;
299 auto slice_row_number =
300 storage->mutable_sched_slice_table()->Insert(slice_row).row_number;
301 thread->last_slice_row = slice_row_number;
302
303 // Open a new state record for this thread.
304 tables::ThreadStateTable::Row state_row;
305 state_row.ts = ts;
306 state_row.ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
307 state_row.dur = -1;
308 state_row.state = running_string_id_;
309 state_row.utid = utid;
310 auto state_row_number =
311 storage->mutable_thread_state_table()->Insert(state_row).row_number;
312 thread->last_state_row = state_row_number;
313 }
314
Wake(Thread * thread,int64_t ts,uint32_t cpu)315 void FuchsiaTraceTokenizer::Wake(Thread* thread, int64_t ts, uint32_t cpu) {
316 TraceStorage* storage = context_->storage.get();
317 ProcessTracker* procs = context_->process_tracker.get();
318
319 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(thread->info.tid),
320 static_cast<uint32_t>(thread->info.pid));
321
322 const auto duration = ts - thread->last_ts;
323 thread->last_ts = ts;
324
325 // Close the state record if one is open for this thread.
326 if (thread->last_state_row.has_value()) {
327 auto row_ref = thread->last_state_row->ToRowReference(
328 storage->mutable_thread_state_table());
329 row_ref.set_dur(duration);
330 thread->last_state_row.reset();
331 }
332
333 // Open a new state record for this thread.
334 tables::ThreadStateTable::Row state_row;
335 state_row.ts = ts;
336 state_row.ucpu = context_->cpu_tracker->GetOrCreateCpu(cpu);
337 state_row.dur = -1;
338 state_row.state = waking_string_id_;
339 state_row.utid = utid;
340 auto state_row_number =
341 storage->mutable_thread_state_table()->Insert(state_row).row_number;
342 thread->last_state_row = state_row_number;
343 }
344
345 // Most record types are read and recorded in |TraceStorage| here directly.
346 // Event records are sorted by timestamp before processing, so instead of
347 // recording them in |TraceStorage| they are given to |TraceSorter|. In order to
348 // facilitate the parsing after sorting, a small view of the provider's string
349 // and thread tables is passed alongside the record. See |FuchsiaProviderView|.
ParseRecord(TraceBlobView tbv)350 void FuchsiaTraceTokenizer::ParseRecord(TraceBlobView tbv) {
351 TraceStorage* storage = context_->storage.get();
352 ProcessTracker* procs = context_->process_tracker.get();
353 TraceSorter* sorter = context_->sorter.get();
354
355 fuchsia_trace_utils::RecordCursor cursor(tbv.data(), tbv.length());
356 uint64_t header;
357 if (!cursor.ReadUint64(&header)) {
358 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
359 return;
360 }
361
362 uint32_t record_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 0, 3);
363
364 // All non-metadata events require current_provider_ to be set.
365 if (record_type != kMetadata && current_provider_ == nullptr) {
366 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
367 return;
368 }
369
370 // Adapters for FuchsiaTraceParser::ParseArgs.
371 const auto intern_string = [this](base::StringView string) {
372 return context_->storage->InternString(string);
373 };
374 const auto get_string = [this](uint16_t index) {
375 return current_provider_->GetString(index);
376 };
377
378 switch (record_type) {
379 case kMetadata: {
380 uint32_t metadata_type =
381 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
382 switch (metadata_type) {
383 case kProviderInfo: {
384 uint32_t provider_id =
385 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
386 uint32_t name_len =
387 fuchsia_trace_utils::ReadField<uint32_t>(header, 52, 59);
388 base::StringView name_view;
389 if (!cursor.ReadInlineString(name_len, &name_view)) {
390 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
391 return;
392 }
393 RegisterProvider(provider_id, name_view.ToStdString());
394 break;
395 }
396 case kProviderSection: {
397 uint32_t provider_id =
398 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
399 current_provider_ = providers_[provider_id].get();
400 break;
401 }
402 case kProviderEvent: {
403 // TODO(bhamrick): Handle buffer fill events
404 PERFETTO_DLOG(
405 "Ignoring provider event. Events may have been dropped");
406 break;
407 }
408 }
409 break;
410 }
411 case kInitialization: {
412 if (!cursor.ReadUint64(¤t_provider_->ticks_per_second)) {
413 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
414 return;
415 }
416 break;
417 }
418 case kString: {
419 uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 30);
420 if (index != 0) {
421 uint32_t len = fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
422 base::StringView s;
423 if (!cursor.ReadInlineString(len, &s)) {
424 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
425 return;
426 }
427 StringId id = storage->InternString(s);
428
429 current_provider_->string_table[index] = id;
430 }
431 break;
432 }
433 case kThread: {
434 uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
435 if (index != 0) {
436 FuchsiaThreadInfo tinfo;
437 if (!cursor.ReadInlineThread(&tinfo)) {
438 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
439 return;
440 }
441
442 current_provider_->thread_table[index] = tinfo;
443 }
444 break;
445 }
446 case kEvent: {
447 uint32_t thread_ref =
448 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 31);
449 uint32_t cat_ref =
450 fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 47);
451 uint32_t name_ref =
452 fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 63);
453
454 // Build the FuchsiaRecord for the event, i.e. extract the thread
455 // information if not inline, and any non-inline strings (name, category
456 // for now, arg names and string values in the future).
457 FuchsiaRecord record(std::move(tbv));
458 record.set_ticks_per_second(current_provider_->ticks_per_second);
459
460 uint64_t ticks;
461 if (!cursor.ReadUint64(&ticks)) {
462 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
463 return;
464 }
465 int64_t ts = fuchsia_trace_utils::TicksToNs(
466 ticks, current_provider_->ticks_per_second);
467 if (ts < 0) {
468 storage->IncrementStats(stats::fuchsia_timestamp_overflow);
469 return;
470 }
471
472 if (fuchsia_trace_utils::IsInlineThread(thread_ref)) {
473 // Skip over inline thread
474 cursor.ReadInlineThread(nullptr);
475 } else {
476 record.InsertThread(thread_ref,
477 current_provider_->GetThread(thread_ref));
478 }
479
480 if (fuchsia_trace_utils::IsInlineString(cat_ref)) {
481 // Skip over inline string
482 cursor.ReadInlineString(cat_ref, nullptr);
483 } else {
484 record.InsertString(cat_ref, current_provider_->GetString(cat_ref));
485 }
486
487 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
488 // Skip over inline string
489 cursor.ReadInlineString(name_ref, nullptr);
490 } else {
491 record.InsertString(name_ref, current_provider_->GetString(name_ref));
492 }
493
494 uint32_t n_args =
495 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 23);
496 for (uint32_t i = 0; i < n_args; i++) {
497 const size_t arg_base = cursor.WordIndex();
498 uint64_t arg_header;
499 if (!cursor.ReadUint64(&arg_header)) {
500 storage->IncrementStats(stats::fuchsia_invalid_event);
501 return;
502 }
503 uint32_t arg_type =
504 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
505 uint32_t arg_size_words =
506 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
507 uint32_t arg_name_ref =
508 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
509
510 if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
511 // Skip over inline string
512 cursor.ReadInlineString(arg_name_ref, nullptr);
513 } else {
514 record.InsertString(arg_name_ref,
515 current_provider_->GetString(arg_name_ref));
516 }
517
518 if (arg_type == ArgValue::ArgType::kString) {
519 uint32_t arg_value_ref =
520 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 32, 47);
521 if (fuchsia_trace_utils::IsInlineString(arg_value_ref)) {
522 // Skip over inline string
523 cursor.ReadInlineString(arg_value_ref, nullptr);
524 } else {
525 record.InsertString(arg_value_ref,
526 current_provider_->GetString(arg_value_ref));
527 }
528 }
529
530 cursor.SetWordIndex(arg_base + arg_size_words);
531 }
532
533 sorter->PushFuchsiaRecord(ts, std::move(record));
534 break;
535 }
536 case kBlob: {
537 constexpr uint32_t kPerfettoBlob = 3;
538 uint32_t blob_type =
539 fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 55);
540 if (blob_type == kPerfettoBlob) {
541 FuchsiaRecord record(std::move(tbv));
542 uint32_t blob_size =
543 fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
544 uint32_t name_ref =
545 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 31);
546
547 // We don't need the name, but we still need to parse it in case it is
548 // inline
549 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
550 base::StringView name_view;
551 if (!cursor.ReadInlineString(name_ref, &name_view)) {
552 storage->IncrementStats(stats::fuchsia_invalid_event);
553 return;
554 }
555 }
556
557 // Append the Blob into the embedded perfetto bytes -- we'll parse them
558 // all after the main pass is done.
559 if (!cursor.ReadBlob(blob_size, proto_trace_data_)) {
560 storage->IncrementStats(stats::fuchsia_invalid_event);
561 return;
562 }
563 }
564 break;
565 }
566 case kKernelObject: {
567 uint32_t obj_type =
568 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
569 uint32_t name_ref =
570 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 39);
571
572 uint64_t obj_id;
573 if (!cursor.ReadUint64(&obj_id)) {
574 storage->IncrementStats(stats::fuchsia_invalid_event);
575 return;
576 }
577
578 StringId name = StringId();
579 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
580 base::StringView name_view;
581 if (!cursor.ReadInlineString(name_ref, &name_view)) {
582 storage->IncrementStats(stats::fuchsia_invalid_event);
583 return;
584 }
585 name = storage->InternString(name_view);
586 } else {
587 name = current_provider_->GetString(name_ref);
588 }
589
590 switch (obj_type) {
591 case kZxObjTypeProcess: {
592 // Note: Fuchsia pid/tids are 64 bits but Perfetto's tables only
593 // support 32 bits. This is usually not an issue except for
594 // artificial koids which have the 2^63 bit set. This is used for
595 // things such as virtual threads.
596 procs->SetProcessMetadata(
597 static_cast<uint32_t>(obj_id), std::optional<uint32_t>(),
598 base::StringView(storage->GetString(name)), base::StringView());
599 break;
600 }
601 case kZxObjTypeThread: {
602 uint32_t n_args =
603 fuchsia_trace_utils::ReadField<uint32_t>(header, 40, 43);
604
605 auto maybe_args = FuchsiaTraceParser::ParseArgs(
606 cursor, n_args, intern_string, get_string);
607 if (!maybe_args.has_value()) {
608 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
609 return;
610 }
611
612 uint64_t pid = 0;
613 for (const auto arg : *maybe_args) {
614 if (arg.name == process_id_) {
615 if (arg.value.Type() != ArgValue::ArgType::kKoid) {
616 storage->IncrementStats(stats::fuchsia_invalid_event);
617 return;
618 }
619 pid = arg.value.Koid();
620 }
621 }
622
623 Thread& thread = GetThread(obj_id);
624 thread.info.pid = pid;
625
626 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(obj_id),
627 static_cast<uint32_t>(pid));
628 storage->mutable_thread_table()->mutable_name()->Set(utid, name);
629 break;
630 }
631 default: {
632 PERFETTO_DLOG("Skipping Kernel Object record with type %d", obj_type);
633 break;
634 }
635 }
636 break;
637 }
638 case kSchedulerEvent: {
639 // Context switch records come in order, so they do not need to go through
640 // TraceSorter.
641 uint32_t event_type =
642 fuchsia_trace_utils::ReadField<uint32_t>(header, 60, 63);
643 switch (event_type) {
644 case kSchedulerEventLegacyContextSwitch: {
645 uint32_t cpu =
646 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
647 uint32_t outgoing_state =
648 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 27);
649 uint32_t outgoing_thread_ref =
650 fuchsia_trace_utils::ReadField<uint32_t>(header, 28, 35);
651 int32_t outgoing_priority =
652 fuchsia_trace_utils::ReadField<int32_t>(header, 44, 51);
653 uint32_t incoming_thread_ref =
654 fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 43);
655 int32_t incoming_priority =
656 fuchsia_trace_utils::ReadField<int32_t>(header, 52, 59);
657
658 int64_t ts;
659 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
660 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
661 return;
662 }
663 if (ts == -1) {
664 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
665 return;
666 }
667
668 FuchsiaThreadInfo outgoing_thread_info;
669 if (fuchsia_trace_utils::IsInlineThread(outgoing_thread_ref)) {
670 if (!cursor.ReadInlineThread(&outgoing_thread_info)) {
671 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
672 return;
673 }
674 } else {
675 outgoing_thread_info =
676 current_provider_->GetThread(outgoing_thread_ref);
677 }
678 Thread& outgoing_thread = GetThread(outgoing_thread_info.tid);
679
680 FuchsiaThreadInfo incoming_thread_info;
681 if (fuchsia_trace_utils::IsInlineThread(incoming_thread_ref)) {
682 if (!cursor.ReadInlineThread(&incoming_thread_info)) {
683 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
684 return;
685 }
686 } else {
687 incoming_thread_info =
688 current_provider_->GetThread(incoming_thread_ref);
689 }
690 Thread& incoming_thread = GetThread(incoming_thread_info.tid);
691
692 // Idle threads are identified by pid == 0 and prio == 0.
693 const bool incoming_is_idle =
694 incoming_thread.info.pid == 0 && incoming_priority == 0;
695 const bool outgoing_is_idle =
696 outgoing_thread.info.pid == 0 && outgoing_priority == 0;
697
698 // Handle switching away from the currently running thread.
699 if (!outgoing_is_idle) {
700 SwitchFrom(&outgoing_thread, ts, cpu, outgoing_state);
701 }
702
703 // Handle switching to the new currently running thread.
704 if (!incoming_is_idle) {
705 SwitchTo(&incoming_thread, ts, cpu, incoming_priority);
706 }
707 break;
708 }
709 case kSchedulerEventContextSwitch: {
710 const uint32_t argument_count =
711 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
712 const uint32_t cpu =
713 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 35);
714 const uint32_t outgoing_state =
715 fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 39);
716
717 int64_t ts;
718 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
719 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
720 return;
721 }
722 if (ts < 0) {
723 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
724 return;
725 }
726
727 uint64_t outgoing_tid;
728 if (!cursor.ReadUint64(&outgoing_tid)) {
729 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
730 return;
731 }
732 Thread& outgoing_thread = GetThread(outgoing_tid);
733
734 uint64_t incoming_tid;
735 if (!cursor.ReadUint64(&incoming_tid)) {
736 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
737 return;
738 }
739 Thread& incoming_thread = GetThread(incoming_tid);
740
741 auto maybe_args = FuchsiaTraceParser::ParseArgs(
742 cursor, argument_count, intern_string, get_string);
743 if (!maybe_args.has_value()) {
744 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
745 return;
746 }
747
748 int32_t incoming_weight = 0;
749 int32_t outgoing_weight = 0;
750
751 for (const auto& arg : *maybe_args) {
752 if (arg.name == incoming_weight_id_) {
753 if (arg.value.Type() != ArgValue::ArgType::kInt32) {
754 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
755 return;
756 }
757 incoming_weight = arg.value.Int32();
758 } else if (arg.name == outgoing_weight_id_) {
759 if (arg.value.Type() != ArgValue::ArgType::kInt32) {
760 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
761 return;
762 }
763 outgoing_weight = arg.value.Int32();
764 }
765 }
766
767 const bool incoming_is_idle = incoming_weight == kIdleWeight;
768 const bool outgoing_is_idle = outgoing_weight == kIdleWeight;
769
770 // Handle switching away from the currently running thread.
771 if (!outgoing_is_idle) {
772 SwitchFrom(&outgoing_thread, ts, cpu, outgoing_state);
773 }
774
775 // Handle switching to the new currently running thread.
776 if (!incoming_is_idle) {
777 SwitchTo(&incoming_thread, ts, cpu, incoming_weight);
778 }
779 break;
780 }
781 case kSchedulerEventThreadWakeup: {
782 const uint32_t argument_count =
783 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
784 const uint32_t cpu =
785 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 35);
786
787 int64_t ts;
788 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
789 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
790 return;
791 }
792 if (ts < 0) {
793 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
794 return;
795 }
796
797 uint64_t waking_tid;
798 if (!cursor.ReadUint64(&waking_tid)) {
799 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
800 return;
801 }
802 Thread& waking_thread = GetThread(waking_tid);
803
804 auto maybe_args = FuchsiaTraceParser::ParseArgs(
805 cursor, argument_count, intern_string, get_string);
806 if (!maybe_args.has_value()) {
807 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
808 return;
809 }
810
811 int32_t waking_weight = 0;
812
813 for (const auto& arg : *maybe_args) {
814 if (arg.name == weight_id_) {
815 if (arg.value.Type() != ArgValue::ArgType::kInt32) {
816 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
817 return;
818 }
819 waking_weight = arg.value.Int32();
820 }
821 }
822
823 const bool waking_is_idle = waking_weight == kIdleWeight;
824 if (!waking_is_idle) {
825 Wake(&waking_thread, ts, cpu);
826 }
827 break;
828 }
829 default:
830 PERFETTO_DLOG("Skipping unknown scheduler event type %d", event_type);
831 break;
832 }
833
834 break;
835 }
836 default: {
837 PERFETTO_DLOG("Skipping record of unknown type %d", record_type);
838 break;
839 }
840 }
841 }
842
RegisterProvider(uint32_t provider_id,std::string name)843 void FuchsiaTraceTokenizer::RegisterProvider(uint32_t provider_id,
844 std::string name) {
845 std::unique_ptr<ProviderInfo> provider(new ProviderInfo());
846 provider->name = name;
847 current_provider_ = provider.get();
848 providers_[provider_id] = std::move(provider);
849 }
850
NotifyEndOfFile()851 void FuchsiaTraceTokenizer::NotifyEndOfFile() {}
852
853 } // namespace trace_processor
854 } // namespace perfetto
855