1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_tokenizer.h"
18
19 #include <cinttypes>
20
21 #include "perfetto/base/logging.h"
22 #include "perfetto/ext/base/string_view.h"
23 #include "perfetto/trace_processor/trace_blob.h"
24 #include "src/trace_processor/importers/common/process_tracker.h"
25 #include "src/trace_processor/importers/common/slice_tracker.h"
26 #include "src/trace_processor/importers/fuchsia/fuchsia_record.h"
27 #include "src/trace_processor/trace_sorter.h"
28 #include "src/trace_processor/types/task_state.h"
29 #include "src/trace_processor/types/trace_processor_context.h"
30
31 namespace perfetto {
32 namespace trace_processor {
33
34 namespace {
35 // Record types
36 constexpr uint32_t kMetadata = 0;
37 constexpr uint32_t kInitialization = 1;
38 constexpr uint32_t kString = 2;
39 constexpr uint32_t kThread = 3;
40 constexpr uint32_t kEvent = 4;
41 constexpr uint32_t kKernelObject = 7;
42 constexpr uint32_t kContextSwitch = 8;
43
44 // Metadata types
45 constexpr uint32_t kProviderInfo = 1;
46 constexpr uint32_t kProviderSection = 2;
47 constexpr uint32_t kProviderEvent = 3;
48
49 // Thread states
50 constexpr uint32_t kThreadNew = 0;
51 constexpr uint32_t kThreadRunning = 1;
52 constexpr uint32_t kThreadSuspended = 2;
53 constexpr uint32_t kThreadBlocked = 3;
54 constexpr uint32_t kThreadDying = 4;
55 constexpr uint32_t kThreadDead = 5;
56
57 // Zircon object types
58 constexpr uint32_t kZxObjTypeProcess = 1;
59 constexpr uint32_t kZxObjTypeThread = 2;
60
61 // Argument types
62 constexpr uint32_t kArgString = 6;
63 constexpr uint32_t kArgKernelObject = 8;
64 } // namespace
65
FuchsiaTraceTokenizer(TraceProcessorContext * context)66 FuchsiaTraceTokenizer::FuchsiaTraceTokenizer(TraceProcessorContext* context)
67 : context_(context) {
68 RegisterProvider(0, "");
69 }
70
71 FuchsiaTraceTokenizer::~FuchsiaTraceTokenizer() = default;
72
Parse(TraceBlobView blob)73 util::Status FuchsiaTraceTokenizer::Parse(TraceBlobView blob) {
74 size_t size = blob.size();
75
76 // The relevant internal state is |leftover_bytes_|. Each call to Parse should
77 // maintain the following properties, unless a fatal error occurs in which
78 // case it should return false and no assumptions should be made about the
79 // resulting internal state:
80 //
81 // 1) Every byte passed to |Parse| has either been passed to |ParseRecord| or
82 // is present in |leftover_bytes_|, but not both.
83 // 2) |leftover_bytes_| does not contain a complete record.
84 //
85 // Parse is responsible for creating the "full" |TraceBlobView|s, which own
86 // the underlying data. Generally, there will be one such view. However, if
87 // there is a record that started in an earlier call, then a new buffer is
88 // created here to make the bytes in that record contiguous.
89 //
90 // Because some of the bytes in |data| might belong to the record starting in
91 // |leftover_bytes_|, we track the offset at which the following record will
92 // start.
93 size_t byte_offset = 0;
94
95 // Look for a record starting with the leftover bytes.
96 if (leftover_bytes_.size() + size < 8) {
97 // Even with the new bytes, we can't even read the header of the next
98 // record, so just add the new bytes to |leftover_bytes_| and return.
99 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
100 blob.data() + size);
101 return util::OkStatus();
102 }
103 if (!leftover_bytes_.empty()) {
104 // There is a record starting from leftover bytes.
105 if (leftover_bytes_.size() < 8) {
106 // Header was previously incomplete, but we have enough now.
107 // Copy bytes into |leftover_bytes_| so that the whole header is present,
108 // and update |byte_offset| and |size| accordingly.
109 size_t needed_bytes = 8 - leftover_bytes_.size();
110 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
111 blob.data() + needed_bytes);
112 byte_offset += needed_bytes;
113 size -= needed_bytes;
114 }
115 // Read the record length from the header.
116 uint64_t header =
117 *reinterpret_cast<const uint64_t*>(leftover_bytes_.data());
118 uint32_t record_len_words =
119 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15);
120 uint32_t record_len_bytes = record_len_words * sizeof(uint64_t);
121
122 // From property (2) above, leftover_bytes_ must have had less than a full
123 // record to start with. We padded leftover_bytes_ out to read the header,
124 // so it may now be a full record (in the case that the record consists of
125 // only the header word), but it still cannot have any extra bytes.
126 PERFETTO_DCHECK(leftover_bytes_.size() <= record_len_bytes);
127 size_t missing_bytes = record_len_bytes - leftover_bytes_.size();
128
129 if (missing_bytes <= size) {
130 // We have enough bytes to complete the partial record. Create a new
131 // buffer for that record.
132 TraceBlob buf = TraceBlob::Allocate(record_len_bytes);
133 memcpy(buf.data(), leftover_bytes_.data(), leftover_bytes_.size());
134 memcpy(buf.data() + leftover_bytes_.size(), blob.data() + byte_offset,
135 missing_bytes);
136 byte_offset += missing_bytes;
137 size -= missing_bytes;
138 leftover_bytes_.clear();
139 ParseRecord(TraceBlobView(std::move(buf)));
140 } else {
141 // There are not enough bytes for the full record. Add all the bytes we
142 // have to leftover_bytes_ and wait for more.
143 leftover_bytes_.insert(leftover_bytes_.end(), blob.data() + byte_offset,
144 blob.data() + byte_offset + size);
145 return util::OkStatus();
146 }
147 }
148
149 TraceBlobView full_view = blob.slice_off(byte_offset, size);
150
151 // |record_offset| is a number of bytes past |byte_offset| where the record
152 // under consideration starts. As a result, it must always be in the range [0,
153 // size-8]. Any larger offset means we don't have enough bytes for the header.
154 size_t record_offset = 0;
155 while (record_offset + 8 <= size) {
156 uint64_t header =
157 *reinterpret_cast<const uint64_t*>(full_view.data() + record_offset);
158 uint32_t record_len_bytes =
159 fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15) *
160 sizeof(uint64_t);
161 if (record_len_bytes == 0)
162 return util::ErrStatus("Unexpected record of size 0");
163
164 if (record_offset + record_len_bytes > size)
165 break;
166
167 TraceBlobView record = full_view.slice_off(record_offset, record_len_bytes);
168 ParseRecord(std::move(record));
169
170 record_offset += record_len_bytes;
171 }
172
173 leftover_bytes_.insert(leftover_bytes_.end(),
174 full_view.data() + record_offset,
175 full_view.data() + size);
176 return util::OkStatus();
177 }
178
179 // Most record types are read and recorded in |TraceStorage| here directly.
180 // Event records are sorted by timestamp before processing, so instead of
181 // recording them in |TraceStorage| they are given to |TraceSorter|. In order to
182 // facilitate the parsing after sorting, a small view of the provider's string
183 // and thread tables is passed alongside the record. See |FuchsiaProviderView|.
ParseRecord(TraceBlobView tbv)184 void FuchsiaTraceTokenizer::ParseRecord(TraceBlobView tbv) {
185 TraceStorage* storage = context_->storage.get();
186 ProcessTracker* procs = context_->process_tracker.get();
187 TraceSorter* sorter = context_->sorter.get();
188
189 fuchsia_trace_utils::RecordCursor cursor(tbv.data(), tbv.length());
190 uint64_t header;
191 if (!cursor.ReadUint64(&header)) {
192 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
193 return;
194 }
195
196 uint32_t record_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 0, 3);
197
198 // All non-metadata events require current_provider_ to be set.
199 if (record_type != kMetadata && current_provider_ == nullptr) {
200 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
201 return;
202 }
203
204 switch (record_type) {
205 case kMetadata: {
206 uint32_t metadata_type =
207 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
208 switch (metadata_type) {
209 case kProviderInfo: {
210 uint32_t provider_id =
211 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
212 uint32_t name_len =
213 fuchsia_trace_utils::ReadField<uint32_t>(header, 52, 59);
214 base::StringView name_view;
215 if (!cursor.ReadInlineString(name_len, &name_view)) {
216 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
217 return;
218 }
219 RegisterProvider(provider_id, name_view.ToStdString());
220 break;
221 }
222 case kProviderSection: {
223 uint32_t provider_id =
224 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
225 current_provider_ = providers_[provider_id].get();
226 break;
227 }
228 case kProviderEvent: {
229 // TODO(bhamrick): Handle buffer fill events
230 PERFETTO_DLOG(
231 "Ignoring provider event. Events may have been dropped");
232 break;
233 }
234 }
235 break;
236 }
237 case kInitialization: {
238 if (!cursor.ReadUint64(¤t_provider_->ticks_per_second)) {
239 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
240 return;
241 }
242 break;
243 }
244 case kString: {
245 uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 30);
246 if (index != 0) {
247 uint32_t len = fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
248 base::StringView s;
249 if (!cursor.ReadInlineString(len, &s)) {
250 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
251 return;
252 }
253 StringId id = storage->InternString(s);
254
255 current_provider_->string_table[index] = id;
256 }
257 break;
258 }
259 case kThread: {
260 uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
261 if (index != 0) {
262 fuchsia_trace_utils::ThreadInfo tinfo;
263 if (!cursor.ReadInlineThread(&tinfo)) {
264 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
265 return;
266 }
267
268 current_provider_->thread_table[index] = tinfo;
269 }
270 break;
271 }
272 case kEvent: {
273 uint32_t thread_ref =
274 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 31);
275 uint32_t cat_ref =
276 fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 47);
277 uint32_t name_ref =
278 fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 63);
279
280 // Build the FuchsiaRecord for the event, i.e. extract the thread
281 // information if not inline, and any non-inline strings (name, category
282 // for now, arg names and string values in the future).
283 auto record =
284 std::unique_ptr<FuchsiaRecord>(new FuchsiaRecord(std::move(tbv)));
285 record->set_ticks_per_second(current_provider_->ticks_per_second);
286
287 uint64_t ticks;
288 if (!cursor.ReadUint64(&ticks)) {
289 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
290 return;
291 }
292 int64_t ts = fuchsia_trace_utils::TicksToNs(
293 ticks, current_provider_->ticks_per_second);
294 if (ts < 0) {
295 storage->IncrementStats(stats::fuchsia_timestamp_overflow);
296 return;
297 }
298
299 if (fuchsia_trace_utils::IsInlineThread(thread_ref)) {
300 // Skip over inline thread
301 cursor.ReadInlineThread(nullptr);
302 } else {
303 record->InsertThread(thread_ref,
304 current_provider_->thread_table[thread_ref]);
305 }
306
307 if (fuchsia_trace_utils::IsInlineString(cat_ref)) {
308 // Skip over inline string
309 cursor.ReadInlineString(cat_ref, nullptr);
310 } else {
311 record->InsertString(cat_ref, current_provider_->string_table[cat_ref]);
312 }
313
314 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
315 // Skip over inline string
316 cursor.ReadInlineString(name_ref, nullptr);
317 } else {
318 record->InsertString(name_ref,
319 current_provider_->string_table[name_ref]);
320 }
321
322 uint32_t n_args =
323 fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 23);
324 for (uint32_t i = 0; i < n_args; i++) {
325 const size_t arg_base = cursor.WordIndex();
326 uint64_t arg_header;
327 if (!cursor.ReadUint64(&arg_header)) {
328 storage->IncrementStats(stats::fuchsia_invalid_event);
329 return;
330 }
331 uint32_t arg_type =
332 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
333 uint32_t arg_size_words =
334 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
335 uint32_t arg_name_ref =
336 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
337
338 if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
339 // Skip over inline string
340 cursor.ReadInlineString(arg_name_ref, nullptr);
341 } else {
342 record->InsertString(arg_name_ref,
343 current_provider_->string_table[arg_name_ref]);
344 }
345
346 if (arg_type == kArgString) {
347 uint32_t arg_value_ref =
348 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 32, 47);
349 if (fuchsia_trace_utils::IsInlineString(arg_value_ref)) {
350 // Skip over inline string
351 cursor.ReadInlineString(arg_value_ref, nullptr);
352 } else {
353 record->InsertString(
354 arg_value_ref, current_provider_->string_table[arg_value_ref]);
355 }
356 }
357
358 cursor.SetWordIndex(arg_base + arg_size_words);
359 }
360
361 sorter->PushFuchsiaRecord(ts, std::move(record));
362 break;
363 }
364 case kKernelObject: {
365 uint32_t obj_type =
366 fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
367 uint32_t name_ref =
368 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 39);
369
370 uint64_t obj_id;
371 if (!cursor.ReadUint64(&obj_id)) {
372 storage->IncrementStats(stats::fuchsia_invalid_event);
373 return;
374 }
375
376 StringId name = StringId();
377 if (fuchsia_trace_utils::IsInlineString(name_ref)) {
378 base::StringView name_view;
379 if (!cursor.ReadInlineString(name_ref, &name_view)) {
380 storage->IncrementStats(stats::fuchsia_invalid_event);
381 return;
382 }
383 name = storage->InternString(name_view);
384 } else {
385 name = current_provider_->string_table[name_ref];
386 }
387
388 switch (obj_type) {
389 case kZxObjTypeProcess: {
390 // Note: Fuchsia pid/tids are 64 bits but Perfetto's tables only
391 // support 32 bits. This is usually not an issue except for
392 // artificial koids which have the 2^63 bit set. This is used for
393 // things such as virtual threads.
394 procs->SetProcessMetadata(
395 static_cast<uint32_t>(obj_id), base::Optional<uint32_t>(),
396 base::StringView(storage->GetString(name)), base::StringView());
397 break;
398 }
399 case kZxObjTypeThread: {
400 uint32_t n_args =
401 fuchsia_trace_utils::ReadField<uint32_t>(header, 40, 43);
402 uint64_t pid = 0;
403
404 // Scan for a Kernel Object argument named "process"
405 for (uint32_t i = 0; i < n_args; i++) {
406 const size_t arg_base = cursor.WordIndex();
407 uint64_t arg_header;
408 if (!cursor.ReadUint64(&arg_header)) {
409 storage->IncrementStats(stats::fuchsia_invalid_event);
410 return;
411 }
412 uint32_t arg_type =
413 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
414 uint32_t arg_size =
415 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
416 if (arg_type == kArgKernelObject) {
417 uint32_t arg_name_ref =
418 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
419 base::StringView arg_name;
420 if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
421 if (!cursor.ReadInlineString(arg_name_ref, &arg_name)) {
422 storage->IncrementStats(stats::fuchsia_invalid_event);
423 return;
424 }
425 } else {
426 arg_name = storage->GetString(
427 current_provider_->string_table[arg_name_ref]);
428 }
429
430 if (arg_name == "process") {
431 if (!cursor.ReadUint64(&pid)) {
432 storage->IncrementStats(stats::fuchsia_invalid_event);
433 return;
434 }
435 }
436 }
437
438 cursor.SetWordIndex(arg_base + arg_size);
439 }
440
441 pid_table_[obj_id] = pid;
442
443 UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(obj_id),
444 static_cast<uint32_t>(pid));
445 storage->mutable_thread_table()->mutable_name()->Set(utid, name);
446 break;
447 }
448 default: {
449 PERFETTO_DLOG("Skipping Kernel Object record with type %d", obj_type);
450 break;
451 }
452 }
453 break;
454 }
455 case kContextSwitch: {
456 // Context switch records come in order, so they do not need to go through
457 // TraceSorter.
458 uint32_t cpu = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
459 uint32_t outgoing_state =
460 fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 27);
461 uint32_t outgoing_thread_ref =
462 fuchsia_trace_utils::ReadField<uint32_t>(header, 28, 35);
463 uint32_t incoming_thread_ref =
464 fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 43);
465 int32_t outgoing_priority =
466 fuchsia_trace_utils::ReadField<int32_t>(header, 44, 51);
467
468 int64_t ts;
469 if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
470 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
471 return;
472 }
473 if (ts == -1) {
474 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
475 return;
476 }
477
478 fuchsia_trace_utils::ThreadInfo outgoing_thread;
479 if (fuchsia_trace_utils::IsInlineThread(outgoing_thread_ref)) {
480 if (!cursor.ReadInlineThread(&outgoing_thread)) {
481 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
482 return;
483 }
484 } else {
485 outgoing_thread = current_provider_->thread_table[outgoing_thread_ref];
486 }
487
488 fuchsia_trace_utils::ThreadInfo incoming_thread;
489 if (fuchsia_trace_utils::IsInlineThread(incoming_thread_ref)) {
490 if (!cursor.ReadInlineThread(&incoming_thread)) {
491 context_->storage->IncrementStats(stats::fuchsia_invalid_event);
492 return;
493 }
494 } else {
495 incoming_thread = current_provider_->thread_table[incoming_thread_ref];
496 }
497
498 // A thread with priority 0 represents an idle CPU
499 if (cpu_threads_.count(cpu) != 0 && outgoing_priority != 0) {
500 // TODO(bhamrick): Some early events will fail to associate with their
501 // pid because the kernel object info event hasn't been processed yet.
502 if (pid_table_.count(outgoing_thread.tid) > 0) {
503 outgoing_thread.pid = pid_table_[outgoing_thread.tid];
504 }
505
506 UniqueTid utid =
507 procs->UpdateThread(static_cast<uint32_t>(outgoing_thread.tid),
508 static_cast<uint32_t>(outgoing_thread.pid));
509 RunningThread previous_thread = cpu_threads_[cpu];
510
511 ftrace_utils::TaskState end_state;
512 switch (outgoing_state) {
513 case kThreadNew:
514 case kThreadRunning: {
515 end_state =
516 ftrace_utils::TaskState(ftrace_utils::TaskState::kRunnable);
517 break;
518 }
519 case kThreadBlocked: {
520 end_state = ftrace_utils::TaskState(
521 ftrace_utils::TaskState::kInterruptibleSleep);
522 break;
523 }
524 case kThreadSuspended: {
525 end_state =
526 ftrace_utils::TaskState(ftrace_utils::TaskState::kStopped);
527 break;
528 }
529 case kThreadDying: {
530 end_state =
531 ftrace_utils::TaskState(ftrace_utils::TaskState::kExitZombie);
532 break;
533 }
534 case kThreadDead: {
535 end_state =
536 ftrace_utils::TaskState(ftrace_utils::TaskState::kExitDead);
537 break;
538 }
539 default: {
540 break;
541 }
542 }
543
544 auto id =
545 end_state.is_valid()
546 ? context_->storage->InternString(end_state.ToString().data())
547 : kNullStringId;
548 storage->mutable_sched_slice_table()->Insert(
549 {previous_thread.start_ts, ts - previous_thread.start_ts, cpu, utid,
550 id, outgoing_priority});
551 }
552
553 RunningThread new_running;
554 new_running.info = incoming_thread;
555 new_running.start_ts = ts;
556 cpu_threads_[cpu] = new_running;
557 break;
558 }
559 default: {
560 PERFETTO_DLOG("Skipping record of unknown type %d", record_type);
561 break;
562 }
563 }
564 }
565
RegisterProvider(uint32_t provider_id,std::string name)566 void FuchsiaTraceTokenizer::RegisterProvider(uint32_t provider_id,
567 std::string name) {
568 std::unique_ptr<ProviderInfo> provider(new ProviderInfo());
569 provider->name = name;
570 current_provider_ = provider.get();
571 providers_[provider_id] = std::move(provider);
572 }
573
NotifyEndOfFile()574 void FuchsiaTraceTokenizer::NotifyEndOfFile() {}
575
576 } // namespace trace_processor
577 } // namespace perfetto
578