• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/trace_processor/importers/fuchsia/fuchsia_trace_tokenizer.h"
18 
19 #include <inttypes.h>
20 #include <unordered_map>
21 
22 #include "perfetto/base/logging.h"
23 #include "perfetto/ext/base/string_view.h"
24 #include "src/trace_processor/importers/common/process_tracker.h"
25 #include "src/trace_processor/importers/common/slice_tracker.h"
26 #include "src/trace_processor/importers/fuchsia/fuchsia_record.h"
27 #include "src/trace_processor/trace_sorter.h"
28 #include "src/trace_processor/types/task_state.h"
29 #include "src/trace_processor/types/trace_processor_context.h"
30 
31 namespace perfetto {
32 namespace trace_processor {
33 
34 namespace {
35 // Record types
36 constexpr uint32_t kMetadata = 0;
37 constexpr uint32_t kInitialization = 1;
38 constexpr uint32_t kString = 2;
39 constexpr uint32_t kThread = 3;
40 constexpr uint32_t kEvent = 4;
41 constexpr uint32_t kKernelObject = 7;
42 constexpr uint32_t kContextSwitch = 8;
43 
44 // Metadata types
45 constexpr uint32_t kProviderInfo = 1;
46 constexpr uint32_t kProviderSection = 2;
47 constexpr uint32_t kProviderEvent = 3;
48 
49 // Thread states
50 constexpr uint32_t kThreadNew = 0;
51 constexpr uint32_t kThreadRunning = 1;
52 constexpr uint32_t kThreadSuspended = 2;
53 constexpr uint32_t kThreadBlocked = 3;
54 constexpr uint32_t kThreadDying = 4;
55 constexpr uint32_t kThreadDead = 5;
56 
57 // Zircon object types
58 constexpr uint32_t kZxObjTypeProcess = 1;
59 constexpr uint32_t kZxObjTypeThread = 2;
60 
61 // Argument types
62 constexpr uint32_t kArgString = 6;
63 constexpr uint32_t kArgKernelObject = 8;
64 }  // namespace
65 
FuchsiaTraceTokenizer(TraceProcessorContext * context)66 FuchsiaTraceTokenizer::FuchsiaTraceTokenizer(TraceProcessorContext* context)
67     : context_(context) {
68   RegisterProvider(0, "");
69 }
70 
71 FuchsiaTraceTokenizer::~FuchsiaTraceTokenizer() = default;
72 
Parse(std::unique_ptr<uint8_t[]> data,size_t size)73 util::Status FuchsiaTraceTokenizer::Parse(std::unique_ptr<uint8_t[]> data,
74                                           size_t size) {
75   // The relevant internal state is |leftover_bytes_|. Each call to Parse should
76   // maintain the following properties, unless a fatal error occurs in which
77   // case it should return false and no assumptions should be made about the
78   // resulting internal state:
79   //
80   // 1) Every byte passed to |Parse| has either been passed to |ParseRecord| or
81   // is present in |leftover_bytes_|, but not both.
82   // 2) |leftover_bytes_| does not contain a complete record.
83   //
84   // Parse is responsible for creating the "full" |TraceBlobView|s, which own
85   // the underlying data. Generally, there will be one such view. However, if
86   // there is a record that started in an earlier call, then a new buffer is
87   // created here to make the bytes in that record contiguous.
88   //
89   // Because some of the bytes in |data| might belong to the record starting in
90   // |leftover_bytes_|, we track the offset at which the following record will
91   // start.
92   size_t byte_offset = 0;
93 
94   // Look for a record starting with the leftover bytes.
95   if (leftover_bytes_.size() + size < 8) {
96     // Even with the new bytes, we can't even read the header of the next
97     // record, so just add the new bytes to |leftover_bytes_| and return.
98     leftover_bytes_.insert(leftover_bytes_.end(), data.get() + byte_offset,
99                            data.get() + size);
100     return util::OkStatus();
101   }
102   if (!leftover_bytes_.empty()) {
103     // There is a record starting from leftover bytes.
104     if (leftover_bytes_.size() < 8) {
105       // Header was previously incomplete, but we have enough now.
106       // Copy bytes into |leftover_bytes_| so that the whole header is present,
107       // and update |byte_offset| and |size| accordingly.
108       size_t needed_bytes = 8 - leftover_bytes_.size();
109       leftover_bytes_.insert(leftover_bytes_.end(), data.get() + byte_offset,
110                              data.get() + needed_bytes);
111       byte_offset += needed_bytes;
112       size -= needed_bytes;
113     }
114     // Read the record length from the header.
115     uint64_t header =
116         *reinterpret_cast<const uint64_t*>(leftover_bytes_.data());
117     uint32_t record_len_words =
118         fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15);
119     uint32_t record_len_bytes = record_len_words * sizeof(uint64_t);
120 
121     // From property (2) above, leftover_bytes_ must have had less than a full
122     // record to start with. We padded leftover_bytes_ out to read the header,
123     // so it may now be a full record (in the case that the record consists of
124     // only the header word), but it still cannot have any extra bytes.
125     PERFETTO_DCHECK(leftover_bytes_.size() <= record_len_bytes);
126     size_t missing_bytes = record_len_bytes - leftover_bytes_.size();
127 
128     if (missing_bytes <= size) {
129       // We have enough bytes to complete the partial record. Create a new
130       // buffer for that record.
131       std::unique_ptr<uint8_t[]> buf(new uint8_t[record_len_bytes]);
132       memcpy(&buf[0], leftover_bytes_.data(), leftover_bytes_.size());
133       memcpy(&buf[leftover_bytes_.size()], &data[byte_offset], missing_bytes);
134       byte_offset += missing_bytes;
135       size -= missing_bytes;
136       leftover_bytes_.clear();
137 
138       TraceBlobView leftover_record(std::move(buf), 0, record_len_bytes);
139       ParseRecord(std::move(leftover_record));
140     } else {
141       // There are not enough bytes for the full record. Add all the bytes we
142       // have to leftover_bytes_ and wait for more.
143       leftover_bytes_.insert(leftover_bytes_.end(), data.get() + byte_offset,
144                              data.get() + byte_offset + size);
145       return util::OkStatus();
146     }
147   }
148 
149   TraceBlobView full_view(std::move(data), byte_offset, size);
150 
151   // |record_offset| is a number of bytes past |byte_offset| where the record
152   // under consideration starts. As a result, it must always be in the range [0,
153   // size-8]. Any larger offset means we don't have enough bytes for the header.
154   size_t record_offset = 0;
155   while (record_offset + 8 <= size) {
156     uint64_t header =
157         *reinterpret_cast<const uint64_t*>(full_view.data() + record_offset);
158     uint32_t record_len_bytes =
159         fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15) *
160         sizeof(uint64_t);
161     if (record_len_bytes == 0)
162       return util::ErrStatus("Unexpected record of size 0");
163 
164     if (record_offset + record_len_bytes > size)
165       break;
166 
167     TraceBlobView record =
168         full_view.slice(byte_offset + record_offset, record_len_bytes);
169     ParseRecord(std::move(record));
170 
171     record_offset += record_len_bytes;
172   }
173 
174   leftover_bytes_.insert(leftover_bytes_.end(),
175                          full_view.data() + record_offset,
176                          full_view.data() + size);
177   return util::OkStatus();
178 }
179 
180 // Most record types are read and recorded in |TraceStorage| here directly.
181 // Event records are sorted by timestamp before processing, so instead of
182 // recording them in |TraceStorage| they are given to |TraceSorter|. In order to
183 // facilitate the parsing after sorting, a small view of the provider's string
184 // and thread tables is passed alongside the record. See |FuchsiaProviderView|.
ParseRecord(TraceBlobView tbv)185 void FuchsiaTraceTokenizer::ParseRecord(TraceBlobView tbv) {
186   TraceStorage* storage = context_->storage.get();
187   ProcessTracker* procs = context_->process_tracker.get();
188   TraceSorter* sorter = context_->sorter.get();
189 
190   fuchsia_trace_utils::RecordCursor cursor(tbv.data(), tbv.length());
191   uint64_t header;
192   if (!cursor.ReadUint64(&header)) {
193     context_->storage->IncrementStats(stats::fuchsia_invalid_event);
194     return;
195   }
196 
197   uint32_t record_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 0, 3);
198 
199   // All non-metadata events require current_provider_ to be set.
200   if (record_type != kMetadata && current_provider_ == nullptr) {
201     context_->storage->IncrementStats(stats::fuchsia_invalid_event);
202     return;
203   }
204 
205   switch (record_type) {
206     case kMetadata: {
207       uint32_t metadata_type =
208           fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
209       switch (metadata_type) {
210         case kProviderInfo: {
211           uint32_t provider_id =
212               fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
213           uint32_t name_len =
214               fuchsia_trace_utils::ReadField<uint32_t>(header, 52, 59);
215           base::StringView name_view;
216           if (!cursor.ReadInlineString(name_len, &name_view)) {
217             context_->storage->IncrementStats(stats::fuchsia_invalid_event);
218             return;
219           }
220           RegisterProvider(provider_id, name_view.ToStdString());
221           break;
222         }
223         case kProviderSection: {
224           uint32_t provider_id =
225               fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
226           current_provider_ = providers_[provider_id].get();
227           break;
228         }
229         case kProviderEvent: {
230           // TODO(bhamrick): Handle buffer fill events
231           PERFETTO_DLOG(
232               "Ignoring provider event. Events may have been dropped");
233           break;
234         }
235       }
236       break;
237     }
238     case kInitialization: {
239       if (!cursor.ReadUint64(&current_provider_->ticks_per_second)) {
240         context_->storage->IncrementStats(stats::fuchsia_invalid_event);
241         return;
242       }
243       break;
244     }
245     case kString: {
246       uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 30);
247       if (index != 0) {
248         uint32_t len = fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
249         base::StringView s;
250         if (!cursor.ReadInlineString(len, &s)) {
251           context_->storage->IncrementStats(stats::fuchsia_invalid_event);
252           return;
253         }
254         StringId id = storage->InternString(s);
255 
256         current_provider_->string_table[index] = id;
257       }
258       break;
259     }
260     case kThread: {
261       uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
262       if (index != 0) {
263         fuchsia_trace_utils::ThreadInfo tinfo;
264         if (!cursor.ReadInlineThread(&tinfo)) {
265           context_->storage->IncrementStats(stats::fuchsia_invalid_event);
266           return;
267         }
268 
269         current_provider_->thread_table[index] = tinfo;
270       }
271       break;
272     }
273     case kEvent: {
274       uint32_t thread_ref =
275           fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 31);
276       uint32_t cat_ref =
277           fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 47);
278       uint32_t name_ref =
279           fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 63);
280 
281       // Build the FuchsiaRecord for the event, i.e. extract the thread
282       // information if not inline, and any non-inline strings (name, category
283       // for now, arg names and string values in the future).
284       auto record =
285           std::unique_ptr<FuchsiaRecord>(new FuchsiaRecord(std::move(tbv)));
286       record->set_ticks_per_second(current_provider_->ticks_per_second);
287 
288       uint64_t ticks;
289       if (!cursor.ReadUint64(&ticks)) {
290         context_->storage->IncrementStats(stats::fuchsia_invalid_event);
291         return;
292       }
293       int64_t ts = fuchsia_trace_utils::TicksToNs(
294           ticks, current_provider_->ticks_per_second);
295       if (ts < 0) {
296         storage->IncrementStats(stats::fuchsia_timestamp_overflow);
297         return;
298       }
299 
300       if (fuchsia_trace_utils::IsInlineThread(thread_ref)) {
301         // Skip over inline thread
302         cursor.ReadInlineThread(nullptr);
303       } else {
304         record->InsertThread(thread_ref,
305                              current_provider_->thread_table[thread_ref]);
306       }
307 
308       if (fuchsia_trace_utils::IsInlineString(cat_ref)) {
309         // Skip over inline string
310         cursor.ReadInlineString(cat_ref, nullptr);
311       } else {
312         record->InsertString(cat_ref, current_provider_->string_table[cat_ref]);
313       }
314 
315       if (fuchsia_trace_utils::IsInlineString(name_ref)) {
316         // Skip over inline string
317         cursor.ReadInlineString(name_ref, nullptr);
318       } else {
319         record->InsertString(name_ref,
320                              current_provider_->string_table[name_ref]);
321       }
322 
323       uint32_t n_args =
324           fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 23);
325       for (uint32_t i = 0; i < n_args; i++) {
326         const size_t arg_base = cursor.WordIndex();
327         uint64_t arg_header;
328         if (!cursor.ReadUint64(&arg_header)) {
329           storage->IncrementStats(stats::fuchsia_invalid_event);
330           return;
331         }
332         uint32_t arg_type =
333             fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
334         uint32_t arg_size_words =
335             fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
336         uint32_t arg_name_ref =
337             fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
338 
339         if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
340           // Skip over inline string
341           cursor.ReadInlineString(arg_name_ref, nullptr);
342         } else {
343           record->InsertString(arg_name_ref,
344                                current_provider_->string_table[arg_name_ref]);
345         }
346 
347         if (arg_type == kArgString) {
348           uint32_t arg_value_ref =
349               fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 32, 47);
350           if (fuchsia_trace_utils::IsInlineString(arg_value_ref)) {
351             // Skip over inline string
352             cursor.ReadInlineString(arg_value_ref, nullptr);
353           } else {
354             record->InsertString(
355                 arg_value_ref, current_provider_->string_table[arg_value_ref]);
356           }
357         }
358 
359         cursor.SetWordIndex(arg_base + arg_size_words);
360       }
361 
362       sorter->PushFuchsiaRecord(ts, std::move(record));
363       break;
364     }
365     case kKernelObject: {
366       uint32_t obj_type =
367           fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
368       uint32_t name_ref =
369           fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 39);
370 
371       uint64_t obj_id;
372       if (!cursor.ReadUint64(&obj_id)) {
373         storage->IncrementStats(stats::fuchsia_invalid_event);
374         return;
375       }
376 
377       StringId name = StringId();
378       if (fuchsia_trace_utils::IsInlineString(name_ref)) {
379         base::StringView name_view;
380         if (!cursor.ReadInlineString(name_ref, &name_view)) {
381           storage->IncrementStats(stats::fuchsia_invalid_event);
382           return;
383         }
384         name = storage->InternString(name_view);
385       } else {
386         name = current_provider_->string_table[name_ref];
387       }
388 
389       switch (obj_type) {
390         case kZxObjTypeProcess: {
391           // Note: Fuchsia pid/tids are 64 bits but Perfetto's tables only
392           // support 32 bits. This is usually not an issue except for
393           // artificial koids which have the 2^63 bit set. This is used for
394           // things such as virtual threads.
395           procs->SetProcessMetadata(
396               static_cast<uint32_t>(obj_id), base::Optional<uint32_t>(),
397               base::StringView(storage->GetString(name)), base::StringView());
398           break;
399         }
400         case kZxObjTypeThread: {
401           uint32_t n_args =
402               fuchsia_trace_utils::ReadField<uint32_t>(header, 40, 43);
403           uint64_t pid = 0;
404 
405           // Scan for a Kernel Object argument named "process"
406           for (uint32_t i = 0; i < n_args; i++) {
407             const size_t arg_base = cursor.WordIndex();
408             uint64_t arg_header;
409             if (!cursor.ReadUint64(&arg_header)) {
410               storage->IncrementStats(stats::fuchsia_invalid_event);
411               return;
412             }
413             uint32_t arg_type =
414                 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
415             uint32_t arg_size =
416                 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
417             if (arg_type == kArgKernelObject) {
418               uint32_t arg_name_ref =
419                   fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
420               base::StringView arg_name;
421               if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
422                 if (!cursor.ReadInlineString(arg_name_ref, &arg_name)) {
423                   storage->IncrementStats(stats::fuchsia_invalid_event);
424                   return;
425                 }
426               } else {
427                 arg_name = storage->GetString(
428                     current_provider_->string_table[arg_name_ref]);
429               }
430 
431               if (arg_name == "process") {
432                 if (!cursor.ReadUint64(&pid)) {
433                   storage->IncrementStats(stats::fuchsia_invalid_event);
434                   return;
435                 }
436               }
437             }
438 
439             cursor.SetWordIndex(arg_base + arg_size);
440           }
441 
442           pid_table_[obj_id] = pid;
443 
444           UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(obj_id),
445                                                static_cast<uint32_t>(pid));
446           storage->mutable_thread_table()->mutable_name()->Set(utid, name);
447           break;
448         }
449         default: {
450           PERFETTO_DLOG("Skipping Kernel Object record with type %d", obj_type);
451           break;
452         }
453       }
454       break;
455     }
456     case kContextSwitch: {
457       // Context switch records come in order, so they do not need to go through
458       // TraceSorter.
459       uint32_t cpu = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
460       uint32_t outgoing_state =
461           fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 27);
462       uint32_t outgoing_thread_ref =
463           fuchsia_trace_utils::ReadField<uint32_t>(header, 28, 35);
464       uint32_t incoming_thread_ref =
465           fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 43);
466       int32_t outgoing_priority =
467           fuchsia_trace_utils::ReadField<int32_t>(header, 44, 51);
468 
469       int64_t ts;
470       if (!cursor.ReadTimestamp(current_provider_->ticks_per_second, &ts)) {
471         context_->storage->IncrementStats(stats::fuchsia_invalid_event);
472         return;
473       }
474       if (ts == -1) {
475         context_->storage->IncrementStats(stats::fuchsia_invalid_event);
476         return;
477       }
478 
479       fuchsia_trace_utils::ThreadInfo outgoing_thread;
480       if (fuchsia_trace_utils::IsInlineThread(outgoing_thread_ref)) {
481         if (!cursor.ReadInlineThread(&outgoing_thread)) {
482           context_->storage->IncrementStats(stats::fuchsia_invalid_event);
483           return;
484         }
485       } else {
486         outgoing_thread = current_provider_->thread_table[outgoing_thread_ref];
487       }
488 
489       fuchsia_trace_utils::ThreadInfo incoming_thread;
490       if (fuchsia_trace_utils::IsInlineThread(incoming_thread_ref)) {
491         if (!cursor.ReadInlineThread(&incoming_thread)) {
492           context_->storage->IncrementStats(stats::fuchsia_invalid_event);
493           return;
494         }
495       } else {
496         incoming_thread = current_provider_->thread_table[incoming_thread_ref];
497       }
498 
499       // A thread with priority 0 represents an idle CPU
500       if (cpu_threads_.count(cpu) != 0 && outgoing_priority != 0) {
501         // TODO(bhamrick): Some early events will fail to associate with their
502         // pid because the kernel object info event hasn't been processed yet.
503         if (pid_table_.count(outgoing_thread.tid) > 0) {
504           outgoing_thread.pid = pid_table_[outgoing_thread.tid];
505         }
506 
507         UniqueTid utid =
508             procs->UpdateThread(static_cast<uint32_t>(outgoing_thread.tid),
509                                 static_cast<uint32_t>(outgoing_thread.pid));
510         RunningThread previous_thread = cpu_threads_[cpu];
511 
512         ftrace_utils::TaskState end_state;
513         switch (outgoing_state) {
514           case kThreadNew:
515           case kThreadRunning: {
516             end_state =
517                 ftrace_utils::TaskState(ftrace_utils::TaskState::kRunnable);
518             break;
519           }
520           case kThreadBlocked: {
521             end_state = ftrace_utils::TaskState(
522                 ftrace_utils::TaskState::kInterruptibleSleep);
523             break;
524           }
525           case kThreadSuspended: {
526             end_state =
527                 ftrace_utils::TaskState(ftrace_utils::TaskState::kStopped);
528             break;
529           }
530           case kThreadDying: {
531             end_state =
532                 ftrace_utils::TaskState(ftrace_utils::TaskState::kExitZombie);
533             break;
534           }
535           case kThreadDead: {
536             end_state =
537                 ftrace_utils::TaskState(ftrace_utils::TaskState::kExitDead);
538             break;
539           }
540           default: {
541             break;
542           }
543         }
544 
545         auto id =
546             end_state.is_valid()
547                 ? context_->storage->InternString(end_state.ToString().data())
548                 : kNullStringId;
549         storage->mutable_sched_slice_table()->Insert(
550             {previous_thread.start_ts, ts - previous_thread.start_ts, cpu, utid,
551              id, outgoing_priority});
552       }
553 
554       RunningThread new_running;
555       new_running.info = incoming_thread;
556       new_running.start_ts = ts;
557       cpu_threads_[cpu] = new_running;
558       break;
559     }
560     default: {
561       PERFETTO_DLOG("Skipping record of unknown type %d", record_type);
562       break;
563     }
564   }
565 }
566 
RegisterProvider(uint32_t provider_id,std::string name)567 void FuchsiaTraceTokenizer::RegisterProvider(uint32_t provider_id,
568                                              std::string name) {
569   std::unique_ptr<ProviderInfo> provider(new ProviderInfo());
570   provider->name = name;
571   current_provider_ = provider.get();
572   providers_[provider_id] = std::move(provider);
573 }
574 
NotifyEndOfFile()575 void FuchsiaTraceTokenizer::NotifyEndOfFile() {}
576 
577 }  // namespace trace_processor
578 }  // namespace perfetto
579