• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/trace_processor/fuchsia_trace_tokenizer.h"
18 
19 #include <inttypes.h>
20 #include <unordered_map>
21 
22 #include "perfetto/base/logging.h"
23 #include "perfetto/base/string_view.h"
24 #include "src/trace_processor/ftrace_utils.h"
25 #include "src/trace_processor/fuchsia_provider_view.h"
26 #include "src/trace_processor/process_tracker.h"
27 #include "src/trace_processor/slice_tracker.h"
28 #include "src/trace_processor/trace_processor_context.h"
29 #include "src/trace_processor/trace_sorter.h"
30 
31 namespace perfetto {
32 namespace trace_processor {
33 
34 namespace {
35 // Record types
36 constexpr uint32_t kMetadata = 0;
37 constexpr uint32_t kInitialization = 1;
38 constexpr uint32_t kString = 2;
39 constexpr uint32_t kThread = 3;
40 constexpr uint32_t kEvent = 4;
41 constexpr uint32_t kKernelObject = 7;
42 constexpr uint32_t kContextSwitch = 8;
43 
44 // Metadata types
45 constexpr uint32_t kProviderInfo = 1;
46 constexpr uint32_t kProviderSection = 2;
47 constexpr uint32_t kProviderEvent = 3;
48 
49 // Thread states
50 constexpr uint32_t kThreadNew = 0;
51 constexpr uint32_t kThreadRunning = 1;
52 constexpr uint32_t kThreadSuspended = 2;
53 constexpr uint32_t kThreadBlocked = 3;
54 constexpr uint32_t kThreadDying = 4;
55 constexpr uint32_t kThreadDead = 5;
56 
57 // Zircon object types
58 constexpr uint32_t kZxObjTypeProcess = 1;
59 constexpr uint32_t kZxObjTypeThread = 2;
60 
61 // Argument types
62 constexpr uint32_t kArgKernelObject = 8;
63 }  // namespace
64 
FuchsiaTraceTokenizer(TraceProcessorContext * context)65 FuchsiaTraceTokenizer::FuchsiaTraceTokenizer(TraceProcessorContext* context)
66     : context_(context) {
67   RegisterProvider(0, "");
68 }
69 
70 FuchsiaTraceTokenizer::~FuchsiaTraceTokenizer() = default;
71 
Parse(std::unique_ptr<uint8_t[]> data,size_t size)72 bool FuchsiaTraceTokenizer::Parse(std::unique_ptr<uint8_t[]> data,
73                                   size_t size) {
74   // The relevant internal state is |leftover_bytes_|. Each call to Parse should
75   // maintain the following properties, unless a fatal error occurs in which
76   // case it should return false and no assumptions should be made about the
77   // resulting internal state:
78   //
79   // 1) Every byte passed to |Parse| has either been passed to |ParseRecord| or
80   // is present in |leftover_bytes_|, but not both.
81   // 2) |leftover_bytes_| does not contain a complete record.
82   //
83   // Parse is responsible for creating the "full" |TraceBlobView|s, which own
84   // the underlying data. Generally, there will be one such view. However, if
85   // there is a record that started in an earlier call, then a new buffer is
86   // created here to make the bytes in that record contiguous.
87   //
88   // Because some of the bytes in |data| might belong to the record starting in
89   // |leftover_bytes_|, we track the offset at which the following record will
90   // start.
91   size_t byte_offset = 0;
92 
93   // Look for a record starting with the leftover bytes.
94   if (leftover_bytes_.size() + size < 8) {
95     // Even with the new bytes, we can't even read the header of the next
96     // record, so just add the new bytes to |leftover_bytes_| and return.
97     leftover_bytes_.insert(leftover_bytes_.end(), data.get() + byte_offset,
98                            data.get() + size);
99     return true;
100   }
101   if (leftover_bytes_.size() > 0) {
102     // There is a record starting from leftover bytes.
103     if (leftover_bytes_.size() < 8) {
104       // Header was previously incomplete, but we have enough now.
105       // Copy bytes into |leftover_bytes_| so that the whole header is present,
106       // and update |byte_offset| and |size| accordingly.
107       size_t needed_bytes = 8 - leftover_bytes_.size();
108       leftover_bytes_.insert(leftover_bytes_.end(), data.get() + byte_offset,
109                              data.get() + needed_bytes);
110       byte_offset += needed_bytes;
111       size -= needed_bytes;
112     }
113     // Read the record length from the header.
114     uint64_t header =
115         *reinterpret_cast<const uint64_t*>(leftover_bytes_.data());
116     uint32_t record_len_words =
117         fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15);
118     uint32_t record_len_bytes = record_len_words * sizeof(uint64_t);
119 
120     // From property (2) above, leftover_bytes_ must have had less than a full
121     // record to start with. We padded leftover_bytes_ out to read the header,
122     // so it may now be a full record (in the case that the record consists of
123     // only the header word), but it still cannot have any extra bytes.
124     PERFETTO_DCHECK(leftover_bytes_.size() <= record_len_bytes);
125     size_t missing_bytes = record_len_bytes - leftover_bytes_.size();
126 
127     if (missing_bytes <= size) {
128       // We have enough bytes to complete the partial record. Create a new
129       // buffer for that record.
130       std::unique_ptr<uint8_t[]> buf(new uint8_t[record_len_bytes]);
131       memcpy(&buf[0], leftover_bytes_.data(), leftover_bytes_.size());
132       memcpy(&buf[leftover_bytes_.size()], &data[byte_offset], missing_bytes);
133       byte_offset += missing_bytes;
134       size -= missing_bytes;
135       leftover_bytes_.clear();
136 
137       TraceBlobView leftover_record(std::move(buf), 0, record_len_bytes);
138       ParseRecord(std::move(leftover_record));
139     } else {
140       // There are not enough bytes for the full record. Add all the bytes we
141       // have to leftover_bytes_ and wait for more.
142       leftover_bytes_.insert(leftover_bytes_.end(), data.get() + byte_offset,
143                              data.get() + byte_offset + size);
144       return true;
145     }
146   }
147 
148   TraceBlobView full_view(std::move(data), byte_offset, size);
149 
150   // |record_offset| is a number of bytes past |byte_offset| where the record
151   // under consideration starts. As a result, it must always be in the range [0,
152   // size-8]. Any larger offset means we don't have enough bytes for the header.
153   size_t record_offset = 0;
154   while (record_offset + 8 <= size) {
155     uint64_t header =
156         *reinterpret_cast<const uint64_t*>(full_view.data() + record_offset);
157     uint32_t record_len_bytes =
158         fuchsia_trace_utils::ReadField<uint32_t>(header, 4, 15) *
159         sizeof(uint64_t);
160     if (record_len_bytes == 0) {
161       PERFETTO_DLOG("Unexpected record of size 0");
162       return false;
163     }
164 
165     if (record_offset + record_len_bytes > size)
166       break;
167 
168     TraceBlobView record =
169         full_view.slice(byte_offset + record_offset, record_len_bytes);
170     ParseRecord(std::move(record));
171 
172     record_offset += record_len_bytes;
173   }
174 
175   leftover_bytes_.insert(leftover_bytes_.end(),
176                          full_view.data() + record_offset,
177                          full_view.data() + size);
178   return true;
179 }
180 
181 // Most record types are read and recorded in |TraceStorage| here directly.
182 // Event records are sorted by timestamp before processing, so instead of
183 // recording them in |TraceStorage| they are given to |TraceSorter|. In order to
184 // facilitate the parsing after sorting, a small view of the provider's string
185 // and thread tables is passed alongside the record. See |FuchsiaProviderView|.
ParseRecord(TraceBlobView tbv)186 void FuchsiaTraceTokenizer::ParseRecord(TraceBlobView tbv) {
187   TraceStorage* storage = context_->storage.get();
188   ProcessTracker* procs = context_->process_tracker.get();
189   TraceSorter* sorter = context_->sorter.get();
190 
191   const uint64_t* record = reinterpret_cast<const uint64_t*>(tbv.data());
192   uint64_t header = *record;
193 
194   uint32_t record_type = fuchsia_trace_utils::ReadField<uint32_t>(header, 0, 3);
195   switch (record_type) {
196     case kMetadata: {
197       uint32_t metadata_type =
198           fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 19);
199       switch (metadata_type) {
200         case kProviderInfo: {
201           uint32_t provider_id =
202               fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
203           uint32_t name_len =
204               fuchsia_trace_utils::ReadField<uint32_t>(header, 52, 59);
205           std::string name(reinterpret_cast<const char*>(&record[1]), name_len);
206           RegisterProvider(provider_id, name);
207           break;
208         }
209         case kProviderSection: {
210           uint32_t provider_id =
211               fuchsia_trace_utils::ReadField<uint32_t>(header, 20, 51);
212           current_provider_ = providers_[provider_id].get();
213           break;
214         }
215         case kProviderEvent: {
216           // TODO(bhamrick): Handle buffer fill events
217           PERFETTO_DLOG(
218               "Ignoring provider event. Events may have been dropped");
219           break;
220         }
221       }
222       break;
223     }
224     case kInitialization: {
225       current_provider_->ticks_per_second = record[1];
226       break;
227     }
228     case kString: {
229       uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 30);
230       if (index != 0) {
231         uint32_t len = fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 46);
232         base::StringView s(reinterpret_cast<const char*>(&record[1]), len);
233         StringId id = storage->InternString(s);
234         current_provider_->string_table[index] = id;
235       }
236       break;
237     }
238     case kThread: {
239       uint32_t index = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
240       if (index != 0) {
241         fuchsia_trace_utils::ThreadInfo tinfo;
242         tinfo.pid = record[1];
243         tinfo.tid = record[2];
244 
245         current_provider_->thread_table[index] = tinfo;
246       }
247       break;
248     }
249     case kEvent: {
250       uint32_t thread_ref =
251           fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 31);
252       uint32_t cat_ref =
253           fuchsia_trace_utils::ReadField<uint32_t>(header, 32, 47);
254       uint32_t name_ref =
255           fuchsia_trace_utils::ReadField<uint32_t>(header, 48, 63);
256 
257       // Build the minimal FuchsiaProviderView needed by
258       // the record. This means the thread information if not inline, and any
259       // non-inline strings (name, category for now, arg names and string values
260       // in the future.
261       const uint64_t* current = &record[1];
262       auto provider_view =
263           std::unique_ptr<FuchsiaProviderView>(new FuchsiaProviderView());
264       provider_view->set_ticks_per_second(current_provider_->ticks_per_second);
265 
266       uint64_t ticks = *current++;
267       int64_t ts = fuchsia_trace_utils::TicksToNs(
268           ticks, current_provider_->ticks_per_second);
269 
270       if (fuchsia_trace_utils::IsInlineThread(thread_ref)) {
271         // Skip over inline thread
272         fuchsia_trace_utils::ReadInlineThread(&current);
273       } else {
274         provider_view->InsertThread(
275             thread_ref, current_provider_->thread_table[thread_ref]);
276       }
277 
278       if (fuchsia_trace_utils::IsInlineString(cat_ref)) {
279         // Skip over inline string
280         fuchsia_trace_utils::ReadInlineString(&current, cat_ref);
281       } else {
282         provider_view->InsertString(cat_ref,
283                                     current_provider_->string_table[cat_ref]);
284       }
285 
286       if (fuchsia_trace_utils::IsInlineString(name_ref)) {
287         // Skip over inline string
288         fuchsia_trace_utils::ReadInlineString(&current, name_ref);
289       } else {
290         provider_view->InsertString(name_ref,
291                                     current_provider_->string_table[name_ref]);
292       }
293 
294       sorter->PushFuchsiaRecord(ts, std::move(tbv), std::move(provider_view));
295 
296       break;
297     }
298     case kKernelObject: {
299       uint32_t obj_type =
300           fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
301       uint32_t name_ref =
302           fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 39);
303 
304       const uint64_t* current = &record[1];
305       uint64_t obj_id = *current++;
306 
307       StringId name = StringId();
308       if (fuchsia_trace_utils::IsInlineString(name_ref)) {
309         name = storage->InternString(
310             fuchsia_trace_utils::ReadInlineString(&current, name_ref));
311       } else {
312         name = current_provider_->string_table[name_ref];
313       }
314 
315       switch (obj_type) {
316         case kZxObjTypeProcess: {
317           // Note: Fuchsia pid/tids are 64 bits but Perfetto's tables only
318           // support 32 bits. This is usually not an issue except for
319           // artificial koids which have the 2^63 bit set. This is used for
320           // things such as virtual threads.
321           procs->UpdateProcess(static_cast<uint32_t>(obj_id),
322                                base::Optional<uint32_t>(),
323                                base::StringView(storage->GetString(name)));
324           break;
325         }
326         case kZxObjTypeThread: {
327           uint32_t n_args =
328               fuchsia_trace_utils::ReadField<uint32_t>(header, 40, 43);
329           uint64_t pid = 0;
330 
331           // Scan for a Kernel Object argument named "process"
332           for (uint32_t i = 0; i < n_args; i++) {
333             const uint64_t* arg_base = current;
334             uint64_t arg_header = *current++;
335             uint32_t arg_type =
336                 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 0, 3);
337             uint32_t arg_size =
338                 fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 4, 15);
339             if (arg_type == kArgKernelObject) {
340               uint32_t arg_name_ref =
341                   fuchsia_trace_utils::ReadField<uint32_t>(arg_header, 16, 31);
342               base::StringView arg_name;
343               if (fuchsia_trace_utils::IsInlineString(arg_name_ref)) {
344                 arg_name = fuchsia_trace_utils::ReadInlineString(&current,
345                                                                  arg_name_ref);
346               } else {
347                 arg_name = storage->GetString(
348                     current_provider_->string_table[arg_name_ref]);
349               }
350 
351               if (arg_name == "process") {
352                 pid = *current++;
353               }
354             }
355 
356             current = arg_base + arg_size;
357           }
358 
359           pid_table_[obj_id] = pid;
360 
361           UniqueTid utid = procs->UpdateThread(static_cast<uint32_t>(obj_id),
362                                                static_cast<uint32_t>(pid));
363           storage->GetMutableThread(utid)->name_id = name;
364           break;
365         }
366         default: {
367           PERFETTO_DLOG("Skipping Kernel Object record with type %d", obj_type);
368           break;
369         }
370       }
371       break;
372     }
373     case kContextSwitch: {
374       // Context switch records come in order, so they do not need to go through
375       // TraceSorter.
376       uint32_t cpu = fuchsia_trace_utils::ReadField<uint32_t>(header, 16, 23);
377       uint32_t outgoing_state =
378           fuchsia_trace_utils::ReadField<uint32_t>(header, 24, 27);
379       uint32_t outgoing_thread_ref =
380           fuchsia_trace_utils::ReadField<uint32_t>(header, 28, 35);
381       uint32_t incoming_thread_ref =
382           fuchsia_trace_utils::ReadField<uint32_t>(header, 36, 43);
383       int32_t outgoing_priority =
384           fuchsia_trace_utils::ReadField<int32_t>(header, 44, 51);
385 
386       uint64_t ticks = record[1];
387       int64_t ts = fuchsia_trace_utils::TicksToNs(
388           ticks, current_provider_->ticks_per_second);
389 
390       const uint64_t* current = &record[2];
391 
392       fuchsia_trace_utils::ThreadInfo outgoing_thread;
393       if (fuchsia_trace_utils::IsInlineThread(outgoing_thread_ref)) {
394         outgoing_thread = fuchsia_trace_utils::ReadInlineThread(&current);
395       } else {
396         outgoing_thread = current_provider_->thread_table[outgoing_thread_ref];
397       }
398 
399       fuchsia_trace_utils::ThreadInfo incoming_thread;
400       if (fuchsia_trace_utils::IsInlineThread(incoming_thread_ref)) {
401         incoming_thread = fuchsia_trace_utils::ReadInlineThread(&current);
402       } else {
403         incoming_thread = current_provider_->thread_table[incoming_thread_ref];
404       }
405 
406       // A thread with priority 0 represents an idle CPU
407       if (cpu_threads_.count(cpu) != 0 && outgoing_priority != 0) {
408         // TODO(bhamrick): Some early events will fail to associate with their
409         // pid because the kernel object info event hasn't been processed yet.
410         if (pid_table_.count(outgoing_thread.tid) > 0) {
411           outgoing_thread.pid = pid_table_[outgoing_thread.tid];
412         }
413 
414         UniqueTid utid =
415             procs->UpdateThread(static_cast<uint32_t>(outgoing_thread.tid),
416                                 static_cast<uint32_t>(outgoing_thread.pid));
417         RunningThread previous_thread = cpu_threads_[cpu];
418 
419         ftrace_utils::TaskState end_state;
420         switch (outgoing_state) {
421           case kThreadNew:
422           case kThreadRunning: {
423             end_state =
424                 ftrace_utils::TaskState(ftrace_utils::TaskState::kRunnable);
425             break;
426           }
427           case kThreadBlocked: {
428             end_state = ftrace_utils::TaskState(
429                 ftrace_utils::TaskState::kInterruptibleSleep);
430             break;
431           }
432           case kThreadSuspended: {
433             end_state =
434                 ftrace_utils::TaskState(ftrace_utils::TaskState::kStopped);
435             break;
436           }
437           case kThreadDying: {
438             end_state =
439                 ftrace_utils::TaskState(ftrace_utils::TaskState::kExitZombie);
440             break;
441           }
442           case kThreadDead: {
443             end_state =
444                 ftrace_utils::TaskState(ftrace_utils::TaskState::kExitDead);
445             break;
446           }
447           default: { break; }
448         }
449 
450         storage->mutable_slices()->AddSlice(cpu, previous_thread.start_ts,
451                                             ts - previous_thread.start_ts, utid,
452                                             end_state, outgoing_priority);
453       }
454 
455       RunningThread new_running;
456       new_running.info = incoming_thread;
457       new_running.start_ts = ts;
458       cpu_threads_[cpu] = new_running;
459       break;
460     }
461     default: {
462       PERFETTO_DLOG("Skipping record of unknown type %d", record_type);
463       break;
464     }
465   }
466 }
467 
RegisterProvider(uint32_t provider_id,std::string name)468 void FuchsiaTraceTokenizer::RegisterProvider(uint32_t provider_id,
469                                              std::string name) {
470   std::unique_ptr<ProviderInfo> provider(new ProviderInfo());
471   provider->name = name;
472   current_provider_ = provider.get();
473   providers_[provider_id] = std::move(provider);
474 }
475 
476 }  // namespace trace_processor
477 }  // namespace perfetto
478