1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_tokenizer.h"
18
19 #include <cstddef>
20 #include <cstdint>
21 #include <cstring>
22 #include <limits>
23 #include <memory>
24 #include <optional>
25 #include <utility>
26
27 #include "perfetto/base/logging.h"
28 #include "perfetto/base/status.h"
29 #include "perfetto/ext/base/string_view.h"
30 #include "perfetto/trace_processor/status.h"
31 #include "perfetto/trace_processor/trace_blob.h"
32 #include "perfetto/trace_processor/trace_blob_view.h"
33 #include "src/trace_processor/importers/common/cpu_tracker.h"
34 #include "src/trace_processor/importers/common/process_tracker.h"
35 #include "src/trace_processor/importers/common/slice_tracker.h"
36 #include "src/trace_processor/importers/fuchsia/fuchsia_record.h"
37 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_parser.h"
38 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_utils.h"
39 #include "src/trace_processor/importers/proto/proto_trace_reader.h"
40 #include "src/trace_processor/sorter/trace_sorter.h"
41 #include "src/trace_processor/storage/stats.h"
42 #include "src/trace_processor/storage/trace_storage.h"
43 #include "src/trace_processor/tables/sched_tables_py.h"
44 #include "src/trace_processor/types/trace_processor_context.h"
45
46 namespace perfetto::trace_processor {
47
48 namespace {
49
50 using fuchsia_trace_utils::ArgValue;
51
52 // Record types
53 constexpr uint32_t kMetadata = 0;
54 constexpr uint32_t kInitialization = 1;
55 constexpr uint32_t kString = 2;
56 constexpr uint32_t kThread = 3;
57 constexpr uint32_t kEvent = 4;
58 constexpr uint32_t kBlob = 5;
59 constexpr uint32_t kKernelObject = 7;
60 constexpr uint32_t kSchedulerEvent = 8;
61
62 constexpr uint32_t kSchedulerEventLegacyContextSwitch = 0;
63 constexpr uint32_t kSchedulerEventContextSwitch = 1;
64 constexpr uint32_t kSchedulerEventThreadWakeup = 2;
65
66 // Metadata types
67 constexpr uint32_t kProviderInfo = 1;
68 constexpr uint32_t kProviderSection = 2;
69 constexpr uint32_t kProviderEvent = 3;
70
71 // Zircon object types
72 constexpr uint32_t kZxObjTypeProcess = 1;
73 constexpr uint32_t kZxObjTypeThread = 2;
74
75 } // namespace
76
FuchsiaTraceTokenizer(TraceProcessorContext * context)77 FuchsiaTraceTokenizer::FuchsiaTraceTokenizer(TraceProcessorContext* context)
78 : context_(context),
79 proto_reader_(context),
80 process_id_(context->storage->InternString("process")) {
81 RegisterProvider(0, "");
82 }
83
84 FuchsiaTraceTokenizer::~FuchsiaTraceTokenizer() = default;
85
Parse(TraceBlobView blob)86 base::Status FuchsiaTraceTokenizer::Parse(TraceBlobView blob) {
87 size_t size = blob.size();
88
89 // The relevant internal state is |leftover_bytes_|. Each call to Parse should
90 // maintain the following properties, unless a fatal error occurs in which
91 // case it should return false and no assumptions should be made about the
92 // resulting internal state:
93 //
94 // 1) Every byte passed to |Parse| has either been passed to |ParseRecord| or
95 // is present in |leftover_bytes_|, but not both.
96 // 2) |leftover_bytes_| does not contain a complete record.
97 //
98 // Parse is responsible for creating the "full" |TraceBlobView|s, which own
99 // the underlying data. Generally, there will be one such view. However, if
100 // there is a record that started in an earlier call, then a new buffer is
101 // created here to make the bytes in that record contiguous.
102 //
103 // Because some of the bytes in |data| might belong to the record starting in
104 // |leftover_bytes_|, we track the offset at which the following record will
105 // start.
106 size_t byte_offset = 0;
107
108 // Look for a record starting with the leftover bytes.
109 if (leftover_bytes_.size() + size < 8) {
110 // Even with the new bytes, we can't even read the header of the next
111 // record, so just add the new bytes to |leftover_bytes_| and return.
112 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
113 blob.data() + size);
114 return base::OkStatus();
115 }
116 if (!leftover_bytes_.empty()) {
117 // There is a record starting from leftover bytes.
118 if (leftover_bytes_.size() < 8) {
119 // Header was previously incomplete, but we have enough now.
120 // Copy bytes into |leftover_bytes_| so that the whole header is present,
121 // and update |byte_offset| and |size| accordingly.
122 size_t needed_bytes = 8 - leftover_bytes_.size();
123 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
124 blob.data() + needed_bytes);
125 byte_offset += needed_bytes;
126 size -= needed_bytes;
127 }
128 // Read the record length from the header.
129 uint64_t header =
130 *reinterpret_cast<const uint64_t*>(leftover_bytes_.data());
131 uint32_t record_len_words =
132 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15);
133 uint32_t record_len_bytes = record_len_words * sizeof(uint64_t);
134
135 // From property (2) above, leftover_bytes_ must have had less than a full
136 // record to start with. We padded leftover_bytes_ out to read the header,
137 // so it may now be a full record (in the case that the record consists of
138 // only the header word), but it still cannot have any extra bytes.
139 PERFETTO_DCHECK(leftover_bytes_.size() <= record_len_bytes);
140 size_t missing_bytes = record_len_bytes - leftover_bytes_.size();
141
142 if (missing_bytes <= size) {
143 // We have enough bytes to complete the partial record. Create a new
144 // buffer for that record.
145 TraceBlob buf = TraceBlob::Allocate(record_len_bytes);
146 memcpy(buf.data(), leftover_bytes_.data(), leftover_bytes_.size());
147 memcpy(buf.data() + leftover_bytes_.size(), blob.data() + byte_offset,
148 missing_bytes);
149 byte_offset += missing_bytes;
150 size -= missing_bytes;
151 leftover_bytes_.clear();
152 ParseRecord(TraceBlobView(std::move(buf)));
153 } else {
154 // There are not enough bytes for the full record. Add all the bytes we
155 // have to leftover_bytes_ and wait for more.
156 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
157 blob.data() + byte_offset + size);
158 return base::OkStatus();
159 }
160 }
161
162 TraceBlobView full_view = blob.slice_off(byte_offset, size);
163
164 // |record_offset| is a number of bytes past |byte_offset| where the record
165 // under consideration starts. As a result, it must always be in the range [0,
166 // size-8]. Any larger offset means we don't have enough bytes for the header.
167 size_t record_offset = 0;
168 while (record_offset + 8 <= size) {
169 uint64_t header =
170 *reinterpret_cast<const uint64_t*>(full_view.data() + record_offset);
171 uint32_t record_len_bytes =
172 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15) *
173 sizeof(uint64_t);
174 if (record_len_bytes == 0)
175 return base::ErrStatus("Unexpected record of size 0");
176
177 if (record_offset + record_len_bytes > size)
178 break;
179
180 TraceBlobView record = full_view.slice_off(record_offset, record_len_bytes);
181 ParseRecord(std::move(record));
182
183 record_offset += record_len_bytes;
184 }
185
186 leftover_bytes_.insert(leftover_bytes_.end(),
187 full_view.data() + record_offset,
188 full_view.data() + size);
189
190 TraceBlob perfetto_blob =
191 TraceBlob::CopyFrom(proto_trace_data_.data(), proto_trace_data_.size());
192 proto_trace_data_.clear();
193
194 return proto_reader_.Parse(TraceBlobView(std::move(perfetto_blob)));
195 }
196
197 // Most record types are read and recorded in |TraceStorage| here directly.
198 // Event records are sorted by timestamp before processing, so instead of
199 // recording them in |TraceStorage| they are given to |TraceSorter|. In order to
200 // facilitate the parsing after sorting, a small view of the provider's string
201 // and thread tables is passed alongside the record. See |FuchsiaProviderView|.
ParseRecord(TraceBlobView tbv)202 void FuchsiaTraceTokenizer::ParseRecord(TraceBlobView tbv) {
203 TraceStorage* storage = context_->storage.get();
204 ProcessTracker* procs = context_->process_tracker.get();
205 TraceSorter* sorter = context_->sorter.get();
206
207 fuchsia_trace_utils::RecordCursor cursor(tbv.data(), tbv.length());
208 uint64_t header;
209 if (!cursor.ReadUint64(&header)) {
210 storage->IncrementStats(stats::fuchsia_record_read_error);
211 return;
212 }
213
214 auto record_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 0, 3);
215
216 // All non-metadata events require current_provider_ to be set.
217 if (record_type != kMetadata && current_provider_ == nullptr) {
218 storage->IncrementStats(stats::fuchsia_invalid_event);
219 return;
220 }
221
222 // Adapters for FuchsiaTraceParser::ParseArgs.
223 const auto intern_string = [this](base::StringView string) {
224 return context_->storage->InternString(string);
225 };
226 const auto get_string = [this](uint16_t index) {
227 StringId id = current_provider_->GetString(index);
228 if (id == StringId::Null()) {
229 context_->storage->IncrementStats(stats::fuchsia_invalid_string_ref);
230 }
231 return id;
232 };
233
234 const auto insert_args = [this](uint32_t n_args,
235 fuchsia_trace_utils::RecordCursor& cursor,
236 FuchsiaRecord& record) {
237 for (uint32_t i = 0; i < n_args; i++) {
238 const size_t arg_base = cursor.WordIndex();
239 uint64_t arg_header;
240 if (!cursor.ReadUint64(&arg_header)) {
241 context_->storage->IncrementStats(stats::fuchsia_record_read_error);
242 return false;
243 }
244 auto arg_type =
245 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
246 auto arg_size_words =
247 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
248 auto arg_name_ref =
249 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
250
251 if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
252 // Skip over inline string
253 if (!cursor.ReadInlineString(arg_name_ref, nullptr)) {
254 context_->storage->IncrementStats(stats::fuchsia_record_read_error);
255 return false;
256 }
257 } else {
258 StringId id = current_provider_->GetString(arg_name_ref);
259 if (id == StringId::Null()) {
260 context_->storage->IncrementStats(stats::fuchsia_invalid_string_ref);
261 return false;
262 }
263 record.InsertString(arg_name_ref, id);
264 }
265
266 if (arg_type == ArgValue::ArgType::kString) {
267 auto arg_value_ref =
268 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 32, 47);
269 if (fuchsia_trace_utils::IsInlineString(arg_value_ref)) {
270 // Skip over inline string
271 if (!cursor.ReadInlineString(arg_value_ref, nullptr)) {
272 context_->storage->IncrementStats(stats::fuchsia_record_read_error);
273 return false;
274 }
275 } else {
276 StringId id = current_provider_->GetString(arg_value_ref);
277 if (id == StringId::Null()) {
278 context_->storage->IncrementStats(
279 stats::fuchsia_invalid_string_ref);
280 return false;
281 }
282 record.InsertString(arg_value_ref, id);
283 }
284 }
285 cursor.SetWordIndex(arg_base + arg_size_words);
286 }
287
288 return true;
289 };
290
291 switch (record_type) {
292 case kMetadata: {
293 auto metadata_type =
294 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
295 switch (metadata_type) {
296 case kProviderInfo: {
297 auto provider_id =
298 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
299 auto name_len =
300 fuchsia_trace_utils::ReadField<uint32_t>(header, 52, 59);
301 base::StringView name_view;
302 if (!cursor.ReadInlineString(name_len, &name_view)) {
303 storage->IncrementStats(stats::fuchsia_record_read_error);
304 return;
305 }
306 RegisterProvider(provider_id, name_view.ToStdString());
307 break;
308 }
309 case kProviderSection: {
310 auto provider_id =
311 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
312 current_provider_ = providers_[provider_id].get();
313 break;
314 }
315 case kProviderEvent: {
316 // TODO(bhamrick): Handle buffer fill events
317 PERFETTO_DLOG(
318 "Ignoring provider event. Events may have been dropped");
319 break;
320 }
321 }
322 break;
323 }
324 case kInitialization: {
325 if (!cursor.ReadUint64(¤t_provider_->ticks_per_second)) {
326 storage->IncrementStats(stats::fuchsia_record_read_error);
327 return;
328 }
329 break;
330 }
331 case kString: {
332 auto index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 30);
333 if (index != 0) {
334 auto len = fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
335 base::StringView s;
336 if (!cursor.ReadInlineString(len, &s)) {
337 storage->IncrementStats(stats::fuchsia_record_read_error);
338 return;
339 }
340 StringId id = storage->InternString(s);
341
342 current_provider_->string_table[index] = id;
343 }
344 break;
345 }
346 case kThread: {
347 auto index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
348 if (index != 0) {
349 FuchsiaThreadInfo tinfo;
350 if (!cursor.ReadInlineThread(&tinfo)) {
351 storage->IncrementStats(stats::fuchsia_record_read_error);
352 return;
353 }
354
355 current_provider_->thread_table[index] = tinfo;
356 }
357 break;
358 }
359 case kEvent: {
360 auto thread_ref =
361 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 31);
362 auto cat_ref = fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 47);
363 auto name_ref = fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 63);
364
365 // Build the FuchsiaRecord for the event, i.e. extract the thread
366 // information if not inline, and any non-inline strings (name, category
367 // for now, arg names and string values in the future).
368 FuchsiaRecord record(std::move(tbv));
369 record.set_ticks_per_second(current_provider_->ticks_per_second);
370
371 uint64_t ticks;
372 if (!cursor.ReadUint64(&ticks)) {
373 storage->IncrementStats(stats::fuchsia_record_read_error);
374 return;
375 }
376 int64_t ts = fuchsia_trace_utils::TicksToNs(
377 ticks, current_provider_->ticks_per_second);
378 if (ts < 0) {
379 storage->IncrementStats(stats::fuchsia_timestamp_overflow);
380 return;
381 }
382
383 if (fuchsia_trace_utils::IsInlineThread(thread_ref)) {
384 // Skip over inline thread
385 if (!cursor.ReadInlineThread(nullptr)) {
386 storage->IncrementStats(stats::fuchsia_record_read_error);
387 return;
388 }
389 } else {
390 record.InsertThread(thread_ref,
391 current_provider_->GetThread(thread_ref));
392 }
393
394 if (fuchsia_trace_utils::IsInlineString(cat_ref)) {
395 // Skip over inline string
396 if (!cursor.ReadInlineString(cat_ref, nullptr)) {
397 storage->IncrementStats(stats::fuchsia_record_read_error);
398 return;
399 }
400 } else {
401 StringId id = current_provider_->GetString(cat_ref);
402 if (id == StringId::Null()) {
403 storage->IncrementStats(stats::fuchsia_invalid_string_ref);
404 return;
405 }
406 record.InsertString(cat_ref, id);
407 }
408
409 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
410 // Skip over inline string
411 if (!cursor.ReadInlineString(name_ref, nullptr)) {
412 storage->IncrementStats(stats::fuchsia_record_read_error);
413 return;
414 }
415 } else {
416 StringId id = current_provider_->GetString(name_ref);
417 if (id == StringId::Null()) {
418 storage->IncrementStats(stats::fuchsia_invalid_string_ref);
419 return;
420 }
421 record.InsertString(name_ref, id);
422 }
423
424 auto n_args = fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 23);
425 if (!insert_args(n_args, cursor, record)) {
426 return;
427 }
428 sorter->PushFuchsiaRecord(ts, std::move(record));
429 break;
430 }
431 case kBlob: {
432 constexpr uint32_t kPerfettoBlob = 3;
433 auto blob_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 55);
434 if (blob_type == kPerfettoBlob) {
435 FuchsiaRecord record(std::move(tbv));
436 auto blob_size =
437 fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
438 auto name_ref =
439 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 31);
440
441 // We don't need the name, but we still need to parse it in case it is
442 // inline
443 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
444 base::StringView name_view;
445 if (!cursor.ReadInlineString(name_ref, &name_view)) {
446 storage->IncrementStats(stats::fuchsia_record_read_error);
447 return;
448 }
449 }
450
451 // Append the Blob into the embedded perfetto bytes -- we'll parse them
452 // all after the main pass is done.
453 if (!cursor.ReadBlob(blob_size, proto_trace_data_)) {
454 storage->IncrementStats(stats::fuchsia_record_read_error);
455 return;
456 }
457 }
458 break;
459 }
460 case kKernelObject: {
461 auto obj_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
462 auto name_ref = fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 39);
463
464 uint64_t obj_id;
465 if (!cursor.ReadUint64(&obj_id)) {
466 storage->IncrementStats(stats::fuchsia_record_read_error);
467 return;
468 }
469
470 StringId name = StringId::Null();
471 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
472 base::StringView name_view;
473 if (!cursor.ReadInlineString(name_ref, &name_view)) {
474 storage->IncrementStats(stats::fuchsia_record_read_error);
475 return;
476 }
477 name = storage->InternString(name_view);
478 } else {
479 name = current_provider_->GetString(name_ref);
480 if (name == StringId::Null()) {
481 storage->IncrementStats(stats::fuchsia_invalid_string_ref);
482 return;
483 }
484 }
485
486 switch (obj_type) {
487 case kZxObjTypeProcess: {
488 // Note: Fuchsia pid/tids are 64 bits but Perfetto's tables only
489 // support 32 bits. This is usually not an issue except for
490 // artificial koids which have the 2^63 bit set. This is used for
491 // things such as virtual threads.
492 procs->SetProcessMetadata(
493 static_cast<uint32_t>(obj_id), std::optional<uint32_t>(),
494 base::StringView(storage->GetString(name)), base::StringView());
495 break;
496 }
497 case kZxObjTypeThread: {
498 auto n_args =
499 fuchsia_trace_utils::ReadField<uint32_t>(header, 40, 43);
500
501 auto maybe_args = FuchsiaTraceParser::ParseArgs(
502 cursor, n_args, intern_string, get_string);
503 if (!maybe_args.has_value()) {
504 storage->IncrementStats(stats::fuchsia_record_read_error);
505 return;
506 }
507
508 uint64_t pid = 0;
509 for (const auto arg : *maybe_args) {
510 if (arg.name == process_id_) {
511 if (arg.value.Type() != ArgValue::ArgType::kKoid) {
512 storage->IncrementStats(stats::fuchsia_invalid_event_arg_type);
513 return;
514 }
515 pid = arg.value.Koid();
516 }
517 }
518
519 // TODO(lalitm): this is a gross hack we're adding to unblock a crash
520 // (b/383877212). This should be refactored properly out into a
521 // tracker (which is the pattern for handling this sort of thing
522 // in the rest of TP) but that is a bunch of boilerplate.
523 auto* parser = static_cast<FuchsiaTraceParser*>(
524 context_->fuchsia_record_parser.get());
525 auto& thread = parser->GetThread(obj_id);
526 thread.info.pid = pid;
527
528 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(obj_id),
529 static_cast<uint32_t>(pid));
530 auto& tt = *storage->mutable_thread_table();
531 tt[utid].set_name(name);
532 break;
533 }
534 default: {
535 PERFETTO_DLOG("Skipping Kernel Object record with type %d", obj_type);
536 break;
537 }
538 }
539 break;
540 }
541 case kSchedulerEvent: {
542 // Context switch records come in order, so they do not need to go through
543 // TraceSorter.
544 auto event_type =
545 fuchsia_trace_utils::ReadField<uint32_t>(header, 60, 63);
546 switch (event_type) {
547 case kSchedulerEventLegacyContextSwitch: {
548 auto outgoing_thread_ref =
549 fuchsia_trace_utils::ReadField<uint32_t>(header, 28, 35);
550 auto incoming_thread_ref =
551 fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 43);
552
553 FuchsiaRecord record(std::move(tbv));
554 record.set_ticks_per_second(current_provider_->ticks_per_second);
555
556 int64_t ts;
557 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
558 storage->IncrementStats(stats::fuchsia_record_read_error);
559 return;
560 }
561 if (ts == -1) {
562 storage->IncrementStats(stats::fuchsia_timestamp_overflow);
563 return;
564 }
565
566 if (fuchsia_trace_utils::IsInlineThread(outgoing_thread_ref)) {
567 // Skip over inline thread
568 if (!cursor.ReadInlineThread(nullptr)) {
569 storage->IncrementStats(stats::fuchsia_record_read_error);
570 return;
571 }
572 } else {
573 record.InsertThread(
574 outgoing_thread_ref,
575 current_provider_->GetThread(outgoing_thread_ref));
576 }
577
578 if (fuchsia_trace_utils::IsInlineThread(incoming_thread_ref)) {
579 // Skip over inline thread
580 if (!cursor.ReadInlineThread(nullptr)) {
581 storage->IncrementStats(stats::fuchsia_record_read_error);
582 return;
583 }
584 } else {
585 record.InsertThread(
586 incoming_thread_ref,
587 current_provider_->GetThread(incoming_thread_ref));
588 }
589 sorter->PushFuchsiaRecord(ts, std::move(record));
590 break;
591 }
592 case kSchedulerEventContextSwitch: {
593 FuchsiaRecord record(std::move(tbv));
594 record.set_ticks_per_second(current_provider_->ticks_per_second);
595
596 int64_t ts;
597 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
598 storage->IncrementStats(stats::fuchsia_record_read_error);
599 return;
600 }
601 if (ts < 0) {
602 storage->IncrementStats(stats::fuchsia_timestamp_overflow);
603 return;
604 }
605
606 // Skip outgoing tid.
607 if (!cursor.ReadUint64(nullptr)) {
608 storage->IncrementStats(stats::fuchsia_record_read_error);
609 return;
610 }
611
612 // Skip incoming tid.
613 if (!cursor.ReadUint64(nullptr)) {
614 storage->IncrementStats(stats::fuchsia_record_read_error);
615 return;
616 }
617
618 const auto n_args =
619 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
620 if (!insert_args(n_args, cursor, record)) {
621 return;
622 }
623 sorter->PushFuchsiaRecord(ts, std::move(record));
624 break;
625 }
626 case kSchedulerEventThreadWakeup: {
627 FuchsiaRecord record(std::move(tbv));
628 record.set_ticks_per_second(current_provider_->ticks_per_second);
629
630 int64_t ts;
631 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
632 storage->IncrementStats(stats::fuchsia_record_read_error);
633 return;
634 }
635 if (ts < 0) {
636 storage->IncrementStats(stats::fuchsia_timestamp_overflow);
637 return;
638 }
639
640 // Skip waking tid.
641 if (!cursor.ReadUint64(nullptr)) {
642 storage->IncrementStats(stats::fuchsia_record_read_error);
643 return;
644 }
645
646 const auto n_args =
647 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
648 if (!insert_args(n_args, cursor, record)) {
649 return;
650 }
651 sorter->PushFuchsiaRecord(ts, std::move(record));
652 break;
653 }
654 default:
655 PERFETTO_DLOG("Skipping unknown scheduler event type %d", event_type);
656 break;
657 }
658
659 break;
660 }
661 default: {
662 PERFETTO_DLOG("Skipping record of unknown type %d", record_type);
663 break;
664 }
665 }
666 }
667
RegisterProvider(uint32_t provider_id,std::string name)668 void FuchsiaTraceTokenizer::RegisterProvider(uint32_t provider_id,
669 std::string name) {
670 std::unique_ptr<ProviderInfo> provider(new ProviderInfo());
671 provider->name = name;
672 current_provider_ = provider.get();
673 providers_[provider_id] = std::move(provider);
674 }
675
NotifyEndOfFile()676 base::Status FuchsiaTraceTokenizer::NotifyEndOfFile() {
677 return base::OkStatus();
678 }
679
680 } // namespace perfetto::trace_processor
681