• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "perfetto/ext/trace_processor/export_json.h"
18 #include "src/trace_processor/export_json.h"
19 
20 #include <inttypes.h>
21 #include <stdio.h>
22 
23 #include <algorithm>
24 #include <cmath>
25 #include <cstring>
26 #include <deque>
27 #include <limits>
28 
29 #include "perfetto/base/build_config.h"
30 #include "perfetto/ext/base/string_splitter.h"
31 #include "perfetto/ext/base/string_utils.h"
32 #include "src/trace_processor/importers/json/json_utils.h"
33 #include "src/trace_processor/storage/metadata.h"
34 #include "src/trace_processor/storage/trace_storage.h"
35 #include "src/trace_processor/trace_processor_storage_impl.h"
36 #include "src/trace_processor/types/trace_processor_context.h"
37 
38 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
39 #include <json/reader.h>
40 #include <json/writer.h>
41 #endif
42 
43 namespace perfetto {
44 namespace trace_processor {
45 namespace json {
46 
47 namespace {
48 
49 class FileWriter : public OutputWriter {
50  public:
FileWriter(FILE * file)51   FileWriter(FILE* file) : file_(file) {}
~FileWriter()52   ~FileWriter() override { fflush(file_); }
53 
AppendString(const std::string & s)54   util::Status AppendString(const std::string& s) override {
55     size_t written =
56         fwrite(s.data(), sizeof(std::string::value_type), s.size(), file_);
57     if (written != s.size())
58       return util::ErrStatus("Error writing to file: %d", ferror(file_));
59     return util::OkStatus();
60   }
61 
62  private:
63   FILE* file_;
64 };
65 
66 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
67 using IndexMap = perfetto::trace_processor::TraceStorage::Stats::IndexMap;
68 
69 const char kLegacyEventArgsKey[] = "legacy_event";
70 const char kLegacyEventPassthroughUtidKey[] = "passthrough_utid";
71 const char kLegacyEventCategoryKey[] = "category";
72 const char kLegacyEventNameKey[] = "name";
73 const char kLegacyEventPhaseKey[] = "phase";
74 const char kLegacyEventDurationNsKey[] = "duration_ns";
75 const char kLegacyEventThreadTimestampNsKey[] = "thread_timestamp_ns";
76 const char kLegacyEventThreadDurationNsKey[] = "thread_duration_ns";
77 const char kLegacyEventThreadInstructionCountKey[] = "thread_instruction_count";
78 const char kLegacyEventThreadInstructionDeltaKey[] = "thread_instruction_delta";
79 const char kLegacyEventUseAsyncTtsKey[] = "use_async_tts";
80 const char kLegacyEventUnscopedIdKey[] = "unscoped_id";
81 const char kLegacyEventGlobalIdKey[] = "global_id";
82 const char kLegacyEventLocalIdKey[] = "local_id";
83 const char kLegacyEventIdScopeKey[] = "id_scope";
84 const char kLegacyEventBindIdKey[] = "bind_id";
85 const char kLegacyEventBindToEnclosingKey[] = "bind_to_enclosing";
86 const char kLegacyEventFlowDirectionKey[] = "flow_direction";
87 const char kFlowDirectionValueIn[] = "in";
88 const char kFlowDirectionValueOut[] = "out";
89 const char kFlowDirectionValueInout[] = "inout";
90 const char kStrippedArgument[] = "__stripped__";
91 
GetNonNullString(const TraceStorage * storage,StringId id)92 const char* GetNonNullString(const TraceStorage* storage, StringId id) {
93   return id == kNullStringId ? "" : storage->GetString(id).c_str();
94 }
95 
PrintUint64(uint64_t x)96 std::string PrintUint64(uint64_t x) {
97   char hex_str[19];
98   sprintf(hex_str, "0x%" PRIx64, x);
99   return hex_str;
100 }
101 
ConvertLegacyFlowEventArgs(const Json::Value & legacy_args,Json::Value * event)102 void ConvertLegacyFlowEventArgs(const Json::Value& legacy_args,
103                                 Json::Value* event) {
104   if (legacy_args.isMember(kLegacyEventBindIdKey)) {
105     (*event)["bind_id"] =
106         PrintUint64(legacy_args[kLegacyEventBindIdKey].asUInt64());
107   }
108 
109   if (legacy_args.isMember(kLegacyEventBindToEnclosingKey))
110     (*event)["bp"] = "e";
111 
112   if (legacy_args.isMember(kLegacyEventFlowDirectionKey)) {
113     const char* val = legacy_args[kLegacyEventFlowDirectionKey].asCString();
114     if (strcmp(val, kFlowDirectionValueIn) == 0) {
115       (*event)["flow_in"] = true;
116     } else if (strcmp(val, kFlowDirectionValueOut) == 0) {
117       (*event)["flow_out"] = true;
118     } else {
119       PERFETTO_DCHECK(strcmp(val, kFlowDirectionValueInout) == 0);
120       (*event)["flow_in"] = true;
121       (*event)["flow_out"] = true;
122     }
123   }
124 }
125 
126 class JsonExporter {
127  public:
JsonExporter(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)128   JsonExporter(const TraceStorage* storage,
129                OutputWriter* output,
130                ArgumentFilterPredicate argument_filter,
131                MetadataFilterPredicate metadata_filter,
132                LabelFilterPredicate label_filter)
133       : storage_(storage),
134         args_builder_(storage_),
135         writer_(output, argument_filter, metadata_filter, label_filter) {}
136 
Export()137   util::Status Export() {
138     util::Status status = MapUniquePidsAndTids();
139     if (!status.ok())
140       return status;
141 
142     status = ExportThreadNames();
143     if (!status.ok())
144       return status;
145 
146     status = ExportProcessNames();
147     if (!status.ok())
148       return status;
149 
150     status = ExportSlices();
151     if (!status.ok())
152       return status;
153 
154     status = ExportRawEvents();
155     if (!status.ok())
156       return status;
157 
158     status = ExportCpuProfileSamples();
159     if (!status.ok())
160       return status;
161 
162     status = ExportMetadata();
163     if (!status.ok())
164       return status;
165 
166     status = ExportStats();
167     if (!status.ok())
168       return status;
169 
170     return util::OkStatus();
171   }
172 
173  private:
174   class TraceFormatWriter {
175    public:
TraceFormatWriter(OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)176     TraceFormatWriter(OutputWriter* output,
177                       ArgumentFilterPredicate argument_filter,
178                       MetadataFilterPredicate metadata_filter,
179                       LabelFilterPredicate label_filter)
180         : output_(output),
181           argument_filter_(argument_filter),
182           metadata_filter_(metadata_filter),
183           label_filter_(label_filter),
184           first_event_(true) {
185       WriteHeader();
186     }
187 
~TraceFormatWriter()188     ~TraceFormatWriter() { WriteFooter(); }
189 
WriteCommonEvent(const Json::Value & event)190     void WriteCommonEvent(const Json::Value& event) {
191       if (label_filter_ && !label_filter_("traceEvents"))
192         return;
193 
194       DoWriteEvent(event);
195     }
196 
AddAsyncBeginEvent(const Json::Value & event)197     void AddAsyncBeginEvent(const Json::Value& event) {
198       if (label_filter_ && !label_filter_("traceEvents"))
199         return;
200 
201       async_begin_events_.push_back(event);
202     }
203 
AddAsyncInstantEvent(const Json::Value & event)204     void AddAsyncInstantEvent(const Json::Value& event) {
205       if (label_filter_ && !label_filter_("traceEvents"))
206         return;
207 
208       async_instant_events_.push_back(event);
209     }
210 
AddAsyncEndEvent(const Json::Value & event)211     void AddAsyncEndEvent(const Json::Value& event) {
212       if (label_filter_ && !label_filter_("traceEvents"))
213         return;
214 
215       async_end_events_.push_back(event);
216     }
217 
SortAndEmitAsyncEvents()218     void SortAndEmitAsyncEvents() {
219       // Catapult doesn't handle out-of-order begin/end events well, especially
220       // when their timestamps are the same, but their order is incorrect. Since
221       // we process events sorted by begin timestamp, |async_begin_events_| and
222       // |async_instant_events_| are already sorted. We now only have to sort
223       // |async_end_events_| and merge-sort all events into a single sequence.
224 
225       // Sort |async_end_events_|. Note that we should order by ascending
226       // timestamp, but in reverse-stable order. This way, a child slices's end
227       // is emitted before its parent's end event, even if both end events have
228       // the same timestamp. To accomplish this, we perform a stable sort in
229       // descending order and later iterate via reverse iterators.
230       struct {
231         bool operator()(const Json::Value& a, const Json::Value& b) const {
232           return a["ts"].asInt64() > b["ts"].asInt64();
233         }
234       } CompareEvents;
235       std::stable_sort(async_end_events_.begin(), async_end_events_.end(),
236                        CompareEvents);
237 
238       // Merge sort by timestamp. If events share the same timestamp, prefer
239       // instant events, then end events, so that old slices close before new
240       // ones are opened, but instant events remain in their deepest nesting
241       // level.
242       auto instant_event_it = async_instant_events_.begin();
243       auto end_event_it = async_end_events_.rbegin();
244       auto begin_event_it = async_begin_events_.begin();
245 
246       auto has_instant_event = instant_event_it != async_instant_events_.end();
247       auto has_end_event = end_event_it != async_end_events_.rend();
248       auto has_begin_event = begin_event_it != async_begin_events_.end();
249 
250       auto emit_next_instant = [&instant_event_it, &has_instant_event, this]() {
251         DoWriteEvent(*instant_event_it);
252         instant_event_it++;
253         has_instant_event = instant_event_it != async_instant_events_.end();
254       };
255       auto emit_next_end = [&end_event_it, &has_end_event, this]() {
256         DoWriteEvent(*end_event_it);
257         end_event_it++;
258         has_end_event = end_event_it != async_end_events_.rend();
259       };
260       auto emit_next_begin = [&begin_event_it, &has_begin_event, this]() {
261         DoWriteEvent(*begin_event_it);
262         begin_event_it++;
263         has_begin_event = begin_event_it != async_begin_events_.end();
264       };
265 
266       auto emit_next_instant_or_end = [&instant_event_it, &end_event_it,
267                                        &emit_next_instant, &emit_next_end]() {
268         if ((*instant_event_it)["ts"].asInt64() <=
269             (*end_event_it)["ts"].asInt64()) {
270           emit_next_instant();
271         } else {
272           emit_next_end();
273         }
274       };
275       auto emit_next_instant_or_begin = [&instant_event_it, &begin_event_it,
276                                          &emit_next_instant,
277                                          &emit_next_begin]() {
278         if ((*instant_event_it)["ts"].asInt64() <=
279             (*begin_event_it)["ts"].asInt64()) {
280           emit_next_instant();
281         } else {
282           emit_next_begin();
283         }
284       };
285       auto emit_next_end_or_begin = [&end_event_it, &begin_event_it,
286                                      &emit_next_end, &emit_next_begin]() {
287         if ((*end_event_it)["ts"].asInt64() <=
288             (*begin_event_it)["ts"].asInt64()) {
289           emit_next_end();
290         } else {
291           emit_next_begin();
292         }
293       };
294 
295       // While we still have events in all iterators, consider each.
296       while (has_instant_event && has_end_event && has_begin_event) {
297         if ((*instant_event_it)["ts"].asInt64() <=
298             (*end_event_it)["ts"].asInt64()) {
299           emit_next_instant_or_begin();
300         } else {
301           emit_next_end_or_begin();
302         }
303       }
304 
305       // Only instant and end events left.
306       while (has_instant_event && has_end_event) {
307         emit_next_instant_or_end();
308       }
309 
310       // Only instant and begin events left.
311       while (has_instant_event && has_begin_event) {
312         emit_next_instant_or_begin();
313       }
314 
315       // Only end and begin events left.
316       while (has_end_event && has_begin_event) {
317         emit_next_end_or_begin();
318       }
319 
320       // Remaining instant events.
321       while (has_instant_event) {
322         emit_next_instant();
323       }
324 
325       // Remaining end events.
326       while (has_end_event) {
327         emit_next_end();
328       }
329 
330       // Remaining begin events.
331       while (has_begin_event) {
332         emit_next_begin();
333       }
334     }
335 
WriteMetadataEvent(const char * metadata_type,const char * metadata_value,uint32_t pid,uint32_t tid)336     void WriteMetadataEvent(const char* metadata_type,
337                             const char* metadata_value,
338                             uint32_t pid,
339                             uint32_t tid) {
340       if (label_filter_ && !label_filter_("traceEvents"))
341         return;
342 
343       if (!first_event_)
344         output_->AppendString(",\n");
345 
346       Json::FastWriter writer;
347       writer.omitEndingLineFeed();
348       Json::Value value;
349       value["ph"] = "M";
350       value["cat"] = "__metadata";
351       value["ts"] = 0;
352       value["name"] = metadata_type;
353       value["pid"] = Json::Int(pid);
354       value["tid"] = Json::Int(tid);
355 
356       Json::Value args;
357       args["name"] = metadata_value;
358       value["args"] = args;
359 
360       output_->AppendString(writer.write(value));
361       first_event_ = false;
362     }
363 
MergeMetadata(const Json::Value & value)364     void MergeMetadata(const Json::Value& value) {
365       for (const auto& member : value.getMemberNames()) {
366         metadata_[member] = value[member];
367       }
368     }
369 
AppendTelemetryMetadataString(const char * key,const char * value)370     void AppendTelemetryMetadataString(const char* key, const char* value) {
371       metadata_["telemetry"][key].append(value);
372     }
373 
AppendTelemetryMetadataInt(const char * key,int64_t value)374     void AppendTelemetryMetadataInt(const char* key, int64_t value) {
375       metadata_["telemetry"][key].append(Json::Int64(value));
376     }
377 
AppendTelemetryMetadataBool(const char * key,bool value)378     void AppendTelemetryMetadataBool(const char* key, bool value) {
379       metadata_["telemetry"][key].append(value);
380     }
381 
SetTelemetryMetadataTimestamp(const char * key,int64_t value)382     void SetTelemetryMetadataTimestamp(const char* key, int64_t value) {
383       metadata_["telemetry"][key] = value / 1000.0;
384     }
385 
SetStats(const char * key,int64_t value)386     void SetStats(const char* key, int64_t value) {
387       metadata_["trace_processor_stats"][key] = Json::Int64(value);
388     }
389 
SetStats(const char * key,const IndexMap & indexed_values)390     void SetStats(const char* key, const IndexMap& indexed_values) {
391       constexpr const char* kBufferStatsPrefix = "traced_buf_";
392 
393       // Stats for the same buffer should be grouped together in the JSON.
394       if (strncmp(kBufferStatsPrefix, key, strlen(kBufferStatsPrefix)) == 0) {
395         for (const auto& value : indexed_values) {
396           metadata_["trace_processor_stats"]["traced_buf"][value.first]
397                    [key + strlen(kBufferStatsPrefix)] =
398                        Json::Int64(value.second);
399         }
400         return;
401       }
402 
403       // Other indexed value stats are exported as array under their key.
404       for (const auto& value : indexed_values) {
405         metadata_["trace_processor_stats"][key][value.first] =
406             Json::Int64(value.second);
407       }
408     }
409 
AddSystemTraceData(const std::string & data)410     void AddSystemTraceData(const std::string& data) {
411       system_trace_data_ += data;
412     }
413 
AddUserTraceData(const std::string & data)414     void AddUserTraceData(const std::string& data) {
415       if (user_trace_data_.empty())
416         user_trace_data_ = "[";
417       user_trace_data_ += data;
418     }
419 
420    private:
WriteHeader()421     void WriteHeader() {
422       if (!label_filter_)
423         output_->AppendString("{\"traceEvents\":[\n");
424     }
425 
WriteFooter()426     void WriteFooter() {
427       SortAndEmitAsyncEvents();
428 
429       // Filter metadata entries.
430       if (metadata_filter_) {
431         for (const auto& member : metadata_.getMemberNames()) {
432           if (!metadata_filter_(member.c_str()))
433             metadata_[member] = kStrippedArgument;
434         }
435       }
436 
437       Json::FastWriter writer;
438       writer.omitEndingLineFeed();
439       if ((!label_filter_ || label_filter_("traceEvents")) &&
440           !user_trace_data_.empty()) {
441         user_trace_data_ += "]";
442         Json::Reader reader;
443         Json::Value result;
444         if (reader.parse(user_trace_data_, result)) {
445           for (const auto& event : result) {
446             WriteCommonEvent(event);
447           }
448         } else {
449           PERFETTO_DLOG(
450               "can't parse legacy user json trace export, skipping. data: %s",
451               user_trace_data_.c_str());
452         }
453       }
454       if (!label_filter_)
455         output_->AppendString("]");
456       if ((!label_filter_ || label_filter_("systemTraceEvents")) &&
457           !system_trace_data_.empty()) {
458         output_->AppendString(",\"systemTraceEvents\":\n");
459         output_->AppendString(writer.write(Json::Value(system_trace_data_)));
460       }
461       if ((!label_filter_ || label_filter_("metadata")) && !metadata_.empty()) {
462         output_->AppendString(",\"metadata\":\n");
463         output_->AppendString(writer.write(metadata_));
464       }
465       if (!label_filter_)
466         output_->AppendString("}");
467     }
468 
DoWriteEvent(const Json::Value & event)469     void DoWriteEvent(const Json::Value& event) {
470       if (!first_event_)
471         output_->AppendString(",\n");
472 
473       Json::FastWriter writer;
474       writer.omitEndingLineFeed();
475 
476       ArgumentNameFilterPredicate argument_name_filter;
477       bool strip_args =
478           argument_filter_ &&
479           !argument_filter_(event["cat"].asCString(), event["name"].asCString(),
480                             &argument_name_filter);
481       if ((strip_args || argument_name_filter) && event.isMember("args")) {
482         Json::Value event_copy = event;
483         if (strip_args) {
484           event_copy["args"] = kStrippedArgument;
485         } else {
486           auto& args = event_copy["args"];
487           for (const auto& member : event["args"].getMemberNames()) {
488             if (!argument_name_filter(member.c_str()))
489               args[member] = kStrippedArgument;
490           }
491         }
492         output_->AppendString(writer.write(event_copy));
493       } else {
494         output_->AppendString(writer.write(event));
495       }
496       first_event_ = false;
497     }
498 
499     OutputWriter* output_;
500     ArgumentFilterPredicate argument_filter_;
501     MetadataFilterPredicate metadata_filter_;
502     LabelFilterPredicate label_filter_;
503 
504     bool first_event_;
505     Json::Value metadata_;
506     std::string system_trace_data_;
507     std::string user_trace_data_;
508     std::vector<Json::Value> async_begin_events_;
509     std::vector<Json::Value> async_instant_events_;
510     std::vector<Json::Value> async_end_events_;
511   };
512 
513   class ArgsBuilder {
514    public:
ArgsBuilder(const TraceStorage * storage)515     explicit ArgsBuilder(const TraceStorage* storage)
516         : storage_(storage),
517           empty_value_(Json::objectValue),
518           nan_value_(Json::StaticString("NaN")),
519           inf_value_(Json::StaticString("Infinity")),
520           neg_inf_value_(Json::StaticString("-Infinity")) {
521       const auto& arg_table = storage_->arg_table();
522       uint32_t count = arg_table.row_count();
523       if (count == 0) {
524         args_sets_.resize(1, empty_value_);
525         return;
526       }
527       args_sets_.resize(arg_table.arg_set_id()[count - 1] + 1, empty_value_);
528 
529       for (uint32_t i = 0; i < count; ++i) {
530         ArgSetId set_id = arg_table.arg_set_id()[i];
531         const char* key = arg_table.key().GetString(i).c_str();
532         Variadic value = storage_->GetArgValue(i);
533         AppendArg(set_id, key, VariadicToJson(value));
534       }
535       PostprocessArgs();
536     }
537 
GetArgs(ArgSetId set_id) const538     const Json::Value& GetArgs(ArgSetId set_id) const {
539       // If |set_id| was empty and added to the storage last, it may not be in
540       // args_sets_.
541       if (set_id > args_sets_.size())
542         return empty_value_;
543       return args_sets_[set_id];
544     }
545 
546    private:
VariadicToJson(Variadic variadic)547     Json::Value VariadicToJson(Variadic variadic) {
548       switch (variadic.type) {
549         case Variadic::kInt:
550           return Json::Int64(variadic.int_value);
551         case Variadic::kUint:
552           return Json::UInt64(variadic.uint_value);
553         case Variadic::kString:
554           return GetNonNullString(storage_, variadic.string_value);
555         case Variadic::kReal:
556           if (std::isnan(variadic.real_value)) {
557             return nan_value_;
558           } else if (std::isinf(variadic.real_value) &&
559                      variadic.real_value > 0) {
560             return inf_value_;
561           } else if (std::isinf(variadic.real_value) &&
562                      variadic.real_value < 0) {
563             return neg_inf_value_;
564           } else {
565             return variadic.real_value;
566           }
567         case Variadic::kPointer:
568           return PrintUint64(variadic.pointer_value);
569         case Variadic::kBool:
570           return variadic.bool_value;
571         case Variadic::kJson:
572           Json::Reader reader;
573           Json::Value result;
574           reader.parse(GetNonNullString(storage_, variadic.json_value), result);
575           return result;
576       }
577       PERFETTO_FATAL("Not reached");  // For gcc.
578     }
579 
AppendArg(ArgSetId set_id,const std::string & key,const Json::Value & value)580     void AppendArg(ArgSetId set_id,
581                    const std::string& key,
582                    const Json::Value& value) {
583       Json::Value* target = &args_sets_[set_id];
584       for (base::StringSplitter parts(key, '.'); parts.Next();) {
585         if (PERFETTO_UNLIKELY(!target->isNull() && !target->isObject())) {
586           PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
587                         key.c_str(),
588                         args_sets_[set_id].toStyledString().c_str());
589           return;
590         }
591         std::string key_part = parts.cur_token();
592         size_t bracketpos = key_part.find('[');
593         if (bracketpos == key_part.npos) {  // A single item
594           target = &(*target)[key_part];
595         } else {  // A list item
596           target = &(*target)[key_part.substr(0, bracketpos)];
597           while (bracketpos != key_part.npos) {
598             // We constructed this string from an int earlier in trace_processor
599             // so it shouldn't be possible for this (or the StringToUInt32
600             // below) to fail.
601             std::string s =
602                 key_part.substr(bracketpos + 1, key_part.find(']', bracketpos) -
603                                                     bracketpos - 1);
604             if (PERFETTO_UNLIKELY(!target->isNull() && !target->isArray())) {
605               PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
606                             key.c_str(),
607                             args_sets_[set_id].toStyledString().c_str());
608               return;
609             }
610             base::Optional<uint32_t> index = base::StringToUInt32(s);
611             if (PERFETTO_UNLIKELY(!index)) {
612               PERFETTO_ELOG("Expected to be able to extract index from %s",
613                             key_part.c_str());
614               return;
615             }
616             target = &(*target)[index.value()];
617             bracketpos = key_part.find('[', bracketpos + 1);
618           }
619         }
620       }
621       *target = value;
622     }
623 
PostprocessArgs()624     void PostprocessArgs() {
625       for (Json::Value& args : args_sets_) {
626         // Move all fields from "debug" key to upper level.
627         if (args.isMember("debug")) {
628           Json::Value debug = args["debug"];
629           args.removeMember("debug");
630           for (const auto& member : debug.getMemberNames()) {
631             args[member] = debug[member];
632           }
633         }
634 
635         // Rename source fields.
636         if (args.isMember("task")) {
637           if (args["task"].isMember("posted_from")) {
638             Json::Value posted_from = args["task"]["posted_from"];
639             args["task"].removeMember("posted_from");
640             if (posted_from.isMember("function_name")) {
641               args["src_func"] = posted_from["function_name"];
642               args["src_file"] = posted_from["file_name"];
643             } else if (posted_from.isMember("file_name")) {
644               args["src"] = posted_from["file_name"];
645             }
646           }
647           if (args["task"].empty())
648             args.removeMember("task");
649         }
650       }
651     }
652 
653     const TraceStorage* storage_;
654     std::vector<Json::Value> args_sets_;
655     const Json::Value empty_value_;
656     const Json::Value nan_value_;
657     const Json::Value inf_value_;
658     const Json::Value neg_inf_value_;
659   };
660 
MapUniquePidsAndTids()661   util::Status MapUniquePidsAndTids() {
662     const auto& process_table = storage_->process_table();
663     for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
664       uint32_t exported_pid = process_table.pid()[upid];
665       auto it_and_inserted =
666           exported_pids_to_upids_.emplace(exported_pid, upid);
667       if (!it_and_inserted.second) {
668         exported_pid = NextExportedPidOrTidForDuplicates();
669         it_and_inserted = exported_pids_to_upids_.emplace(exported_pid, upid);
670       }
671       upids_to_exported_pids_.emplace(upid, exported_pid);
672     }
673 
674     const auto& thread_table = storage_->thread_table();
675     for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
676       uint32_t exported_pid = 0;
677       base::Optional<UniquePid> upid = thread_table.upid()[utid];
678       if (upid) {
679         auto exported_pid_it = upids_to_exported_pids_.find(*upid);
680         PERFETTO_DCHECK(exported_pid_it != upids_to_exported_pids_.end());
681         exported_pid = exported_pid_it->second;
682       }
683 
684       uint32_t exported_tid = thread_table.tid()[utid];
685       auto it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
686           std::make_pair(exported_pid, exported_tid), utid);
687       if (!it_and_inserted.second) {
688         exported_tid = NextExportedPidOrTidForDuplicates();
689         it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
690             std::make_pair(exported_pid, exported_tid), utid);
691       }
692       utids_to_exported_pids_and_tids_.emplace(
693           utid, std::make_pair(exported_pid, exported_tid));
694     }
695 
696     return util::OkStatus();
697   }
698 
ExportThreadNames()699   util::Status ExportThreadNames() {
700     const auto& thread_table = storage_->thread_table();
701     for (UniqueTid utid = 0; utid < thread_table.row_count(); ++utid) {
702       auto opt_name = thread_table.name()[utid];
703       if (!opt_name.is_null()) {
704         const char* thread_name = GetNonNullString(storage_, opt_name);
705         auto pid_and_tid = UtidToPidAndTid(utid);
706         writer_.WriteMetadataEvent("thread_name", thread_name,
707                                    pid_and_tid.first, pid_and_tid.second);
708       }
709     }
710     return util::OkStatus();
711   }
712 
ExportProcessNames()713   util::Status ExportProcessNames() {
714     const auto& process_table = storage_->process_table();
715     for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
716       auto opt_name = process_table.name()[upid];
717       if (!opt_name.is_null()) {
718         const char* process_name = GetNonNullString(storage_, opt_name);
719         writer_.WriteMetadataEvent("process_name", process_name,
720                                    UpidToPid(upid), /*tid=*/0);
721       }
722     }
723     return util::OkStatus();
724   }
725 
ExportSlices()726   util::Status ExportSlices() {
727     const auto& slices = storage_->slice_table();
728     for (uint32_t i = 0; i < slices.row_count(); ++i) {
729       // Skip slices with empty category - these are ftrace/system slices that
730       // were also imported into the raw table and will be exported from there
731       // by trace_to_text.
732       // TODO(b/153609716): Add a src column or do_not_export flag instead.
733       auto cat = slices.category().GetString(i);
734       if (cat.c_str() == nullptr || cat == "binder")
735         continue;
736 
737       Json::Value event;
738       event["ts"] = Json::Int64(slices.ts()[i] / 1000);
739       event["cat"] = GetNonNullString(storage_, slices.category()[i]);
740       event["name"] = GetNonNullString(storage_, slices.name()[i]);
741       event["pid"] = 0;
742       event["tid"] = 0;
743 
744       base::Optional<UniqueTid> legacy_utid;
745 
746       event["args"] =
747           args_builder_.GetArgs(slices.arg_set_id()[i]);  // Makes a copy.
748       if (event["args"].isMember(kLegacyEventArgsKey)) {
749         ConvertLegacyFlowEventArgs(event["args"][kLegacyEventArgsKey], &event);
750 
751         if (event["args"][kLegacyEventArgsKey].isMember(
752                 kLegacyEventPassthroughUtidKey)) {
753           legacy_utid =
754               event["args"][kLegacyEventArgsKey][kLegacyEventPassthroughUtidKey]
755                   .asUInt();
756         }
757 
758         event["args"].removeMember(kLegacyEventArgsKey);
759       }
760 
761       // To prevent duplicate export of slices, only export slices on descriptor
762       // or chrome tracks (i.e. TrackEvent slices). Slices on other tracks may
763       // also be present as raw events and handled by trace_to_text. Only add
764       // more track types here if they are not already covered by trace_to_text.
765       TrackId track_id = slices.track_id()[i];
766 
767       const auto& track_table = storage_->track_table();
768 
769       uint32_t track_row = *track_table.id().IndexOf(track_id);
770       auto track_args_id = track_table.source_arg_set_id()[track_row];
771       const Json::Value* track_args = nullptr;
772       bool legacy_chrome_track = false;
773       bool is_child_track = false;
774       if (track_args_id) {
775         track_args = &args_builder_.GetArgs(*track_args_id);
776         legacy_chrome_track = (*track_args)["source"].asString() == "chrome";
777         is_child_track = track_args->isMember("parent_track_id");
778       }
779 
780       const auto& thread_track = storage_->thread_track_table();
781       const auto& process_track = storage_->process_track_table();
782       const auto& thread_slices = storage_->thread_slices();
783       const auto& virtual_track_slices = storage_->virtual_track_slices();
784 
785       int64_t duration_ns = slices.dur()[i];
786       int64_t thread_ts_ns = 0;
787       int64_t thread_duration_ns = 0;
788       int64_t thread_instruction_count = 0;
789       int64_t thread_instruction_delta = 0;
790 
791       base::Optional<uint32_t> thread_slice_row =
792           thread_slices.FindRowForSliceId(i);
793       if (thread_slice_row) {
794         thread_ts_ns = thread_slices.thread_timestamp_ns()[*thread_slice_row];
795         thread_duration_ns =
796             thread_slices.thread_duration_ns()[*thread_slice_row];
797         thread_instruction_count =
798             thread_slices.thread_instruction_counts()[*thread_slice_row];
799         thread_instruction_delta =
800             thread_slices.thread_instruction_deltas()[*thread_slice_row];
801       } else {
802         base::Optional<uint32_t> vtrack_slice_row =
803             virtual_track_slices.FindRowForSliceId(i);
804         if (vtrack_slice_row) {
805           thread_ts_ns =
806               virtual_track_slices.thread_timestamp_ns()[*vtrack_slice_row];
807           thread_duration_ns =
808               virtual_track_slices.thread_duration_ns()[*vtrack_slice_row];
809           thread_instruction_count =
810               virtual_track_slices
811                   .thread_instruction_counts()[*vtrack_slice_row];
812           thread_instruction_delta =
813               virtual_track_slices
814                   .thread_instruction_deltas()[*vtrack_slice_row];
815         }
816       }
817 
818       auto opt_thread_track_row = thread_track.id().IndexOf(TrackId{track_id});
819 
820       if (opt_thread_track_row && !is_child_track) {
821         // Synchronous (thread) slice or instant event.
822         UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
823         auto pid_and_tid = UtidToPidAndTid(utid);
824         event["pid"] = Json::Int(pid_and_tid.first);
825         event["tid"] = Json::Int(pid_and_tid.second);
826 
827         if (duration_ns == 0) {
828           // Use "I" instead of "i" phase for backwards-compat with old
829           // consumers.
830           event["ph"] = "I";
831           if (thread_ts_ns > 0) {
832             event["tts"] = Json::Int64(thread_ts_ns / 1000);
833           }
834           if (thread_instruction_count > 0) {
835             event["ticount"] = Json::Int64(thread_instruction_count);
836           }
837           event["s"] = "t";
838         } else {
839           if (duration_ns > 0) {
840             event["ph"] = "X";
841             event["dur"] = Json::Int64(duration_ns / 1000);
842           } else {
843             // If the slice didn't finish, the duration may be negative. Only
844             // write a begin event without end event in this case.
845             event["ph"] = "B";
846           }
847           if (thread_ts_ns > 0) {
848             event["tts"] = Json::Int64(thread_ts_ns / 1000);
849             // Only write thread duration for completed events.
850             if (duration_ns > 0)
851               event["tdur"] = Json::Int64(thread_duration_ns / 1000);
852           }
853           if (thread_instruction_count > 0) {
854             event["ticount"] = Json::Int64(thread_instruction_count);
855             // Only write thread instruction delta for completed events.
856             if (duration_ns > 0)
857               event["tidelta"] = Json::Int64(thread_instruction_delta);
858           }
859         }
860         writer_.WriteCommonEvent(event);
861       } else if (is_child_track ||
862                  (legacy_chrome_track && track_args->isMember("source_id"))) {
863         // Async event slice.
864         auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
865         if (legacy_chrome_track) {
866           // Legacy async tracks are always process-associated and have args.
867           PERFETTO_DCHECK(opt_process_row);
868           PERFETTO_DCHECK(track_args);
869           uint32_t upid = process_track.upid()[*opt_process_row];
870           uint32_t exported_pid = UpidToPid(upid);
871           event["pid"] = Json::Int(exported_pid);
872           event["tid"] =
873               Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
874                                     : exported_pid);
875 
876           // Preserve original event IDs for legacy tracks. This is so that e.g.
877           // memory dump IDs show up correctly in the JSON trace.
878           PERFETTO_DCHECK(track_args->isMember("source_id"));
879           PERFETTO_DCHECK(track_args->isMember("source_id_is_process_scoped"));
880           PERFETTO_DCHECK(track_args->isMember("source_scope"));
881           uint64_t source_id =
882               static_cast<uint64_t>((*track_args)["source_id"].asInt64());
883           std::string source_scope = (*track_args)["source_scope"].asString();
884           if (!source_scope.empty())
885             event["scope"] = source_scope;
886           bool source_id_is_process_scoped =
887               (*track_args)["source_id_is_process_scoped"].asBool();
888           if (source_id_is_process_scoped) {
889             event["id2"]["local"] = PrintUint64(source_id);
890           } else {
891             // Some legacy importers don't understand "id2" fields, so we use
892             // the "usually" global "id" field instead. This works as long as
893             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
894             // "LOCAL_ID_PHASES" in catapult.
895             event["id"] = PrintUint64(source_id);
896           }
897         } else {
898           if (opt_thread_track_row) {
899             UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
900             auto pid_and_tid = UtidToPidAndTid(utid);
901             event["pid"] = Json::Int(pid_and_tid.first);
902             event["tid"] = Json::Int(pid_and_tid.second);
903             event["id2"]["local"] = PrintUint64(track_id.value);
904           } else if (opt_process_row) {
905             uint32_t upid = process_track.upid()[*opt_process_row];
906             uint32_t exported_pid = UpidToPid(upid);
907             event["pid"] = Json::Int(exported_pid);
908             event["tid"] =
909                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
910                                       : exported_pid);
911             event["id2"]["local"] = PrintUint64(track_id.value);
912           } else {
913             if (legacy_utid) {
914               auto pid_and_tid = UtidToPidAndTid(*legacy_utid);
915               event["pid"] = Json::Int(pid_and_tid.first);
916               event["tid"] = Json::Int(pid_and_tid.second);
917             }
918 
919             // Some legacy importers don't understand "id2" fields, so we use
920             // the "usually" global "id" field instead. This works as long as
921             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
922             // "LOCAL_ID_PHASES" in catapult.
923             event["id"] = PrintUint64(track_id.value);
924           }
925         }
926 
927         if (thread_ts_ns > 0) {
928           event["tts"] = Json::Int64(thread_ts_ns / 1000);
929           event["use_async_tts"] = Json::Int(1);
930         }
931         if (thread_instruction_count > 0) {
932           event["ticount"] = Json::Int64(thread_instruction_count);
933           event["use_async_tts"] = Json::Int(1);
934         }
935 
936         if (duration_ns == 0) {  // Instant async event.
937           event["ph"] = "n";
938           writer_.AddAsyncInstantEvent(event);
939         } else {  // Async start and end.
940           event["ph"] = "b";
941           writer_.AddAsyncBeginEvent(event);
942           // If the slice didn't finish, the duration may be negative. Don't
943           // write the end event in this case.
944           if (duration_ns > 0) {
945             event["ph"] = "e";
946             event["ts"] = Json::Int64((slices.ts()[i] + duration_ns) / 1000);
947             if (thread_ts_ns > 0) {
948               event["tts"] =
949                   Json::Int64((thread_ts_ns + thread_duration_ns) / 1000);
950             }
951             if (thread_instruction_count > 0) {
952               event["ticount"] = Json::Int64(
953                   (thread_instruction_count + thread_instruction_delta));
954             }
955             event["args"].clear();
956             writer_.AddAsyncEndEvent(event);
957           }
958         }
959       } else {
960         // Global or process-scoped instant event.
961         PERFETTO_DCHECK(legacy_chrome_track || !is_child_track);
962         if (duration_ns != 0) {
963           // We don't support exporting slices on the default global or process
964           // track to JSON (JSON only supports instant events on these tracks).
965           PERFETTO_DLOG(
966               "skipping non-instant slice on global or process track");
967         } else {
968           // Use "I" instead of "i" phase for backwards-compat with old
969           // consumers.
970           event["ph"] = "I";
971 
972           auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
973           if (opt_process_row.has_value()) {
974             uint32_t upid = process_track.upid()[*opt_process_row];
975             uint32_t exported_pid = UpidToPid(upid);
976             event["pid"] = Json::Int(exported_pid);
977             event["tid"] =
978                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
979                                       : exported_pid);
980             event["s"] = "p";
981           } else {
982             event["s"] = "g";
983           }
984           writer_.WriteCommonEvent(event);
985         }
986       }
987     }
988     return util::OkStatus();
989   }
990 
ConvertLegacyRawEventToJson(uint32_t index)991   Json::Value ConvertLegacyRawEventToJson(uint32_t index) {
992     const auto& events = storage_->raw_table();
993 
994     Json::Value event;
995     event["ts"] = Json::Int64(events.ts()[index] / 1000);
996 
997     UniqueTid utid = static_cast<UniqueTid>(events.utid()[index]);
998     auto pid_and_tid = UtidToPidAndTid(utid);
999     event["pid"] = Json::Int(pid_and_tid.first);
1000     event["tid"] = Json::Int(pid_and_tid.second);
1001 
1002     // Raw legacy events store all other params in the arg set. Make a copy of
1003     // the converted args here, parse, and then remove the legacy params.
1004     event["args"] = args_builder_.GetArgs(events.arg_set_id()[index]);
1005     const Json::Value& legacy_args = event["args"][kLegacyEventArgsKey];
1006 
1007     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventCategoryKey));
1008     event["cat"] = legacy_args[kLegacyEventCategoryKey];
1009 
1010     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventNameKey));
1011     event["name"] = legacy_args[kLegacyEventNameKey];
1012 
1013     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventPhaseKey));
1014     event["ph"] = legacy_args[kLegacyEventPhaseKey];
1015 
1016     // Object snapshot events are supposed to have a mandatory "snapshot" arg,
1017     // which may be removed in trace processor if it is empty.
1018     if (legacy_args[kLegacyEventPhaseKey] == "O" &&
1019         !event["args"].isMember("snapshot")) {
1020       event["args"]["snapshot"] = Json::Value(Json::objectValue);
1021     }
1022 
1023     if (legacy_args.isMember(kLegacyEventDurationNsKey))
1024       event["dur"] = legacy_args[kLegacyEventDurationNsKey].asInt64() / 1000;
1025 
1026     if (legacy_args.isMember(kLegacyEventThreadTimestampNsKey)) {
1027       event["tts"] =
1028           legacy_args[kLegacyEventThreadTimestampNsKey].asInt64() / 1000;
1029     }
1030 
1031     if (legacy_args.isMember(kLegacyEventThreadDurationNsKey)) {
1032       event["tdur"] =
1033           legacy_args[kLegacyEventThreadDurationNsKey].asInt64() / 1000;
1034     }
1035 
1036     if (legacy_args.isMember(kLegacyEventThreadInstructionCountKey))
1037       event["ticount"] = legacy_args[kLegacyEventThreadInstructionCountKey];
1038 
1039     if (legacy_args.isMember(kLegacyEventThreadInstructionDeltaKey))
1040       event["tidelta"] = legacy_args[kLegacyEventThreadInstructionDeltaKey];
1041 
1042     if (legacy_args.isMember(kLegacyEventUseAsyncTtsKey))
1043       event["use_async_tts"] = legacy_args[kLegacyEventUseAsyncTtsKey];
1044 
1045     if (legacy_args.isMember(kLegacyEventUnscopedIdKey)) {
1046       event["id"] =
1047           PrintUint64(legacy_args[kLegacyEventUnscopedIdKey].asUInt64());
1048     }
1049 
1050     if (legacy_args.isMember(kLegacyEventGlobalIdKey)) {
1051       event["id2"]["global"] =
1052           PrintUint64(legacy_args[kLegacyEventGlobalIdKey].asUInt64());
1053     }
1054 
1055     if (legacy_args.isMember(kLegacyEventLocalIdKey)) {
1056       event["id2"]["local"] =
1057           PrintUint64(legacy_args[kLegacyEventLocalIdKey].asUInt64());
1058     }
1059 
1060     if (legacy_args.isMember(kLegacyEventIdScopeKey))
1061       event["scope"] = legacy_args[kLegacyEventIdScopeKey];
1062 
1063     ConvertLegacyFlowEventArgs(legacy_args, &event);
1064 
1065     event["args"].removeMember(kLegacyEventArgsKey);
1066 
1067     return event;
1068   }
1069 
ExportRawEvents()1070   util::Status ExportRawEvents() {
1071     base::Optional<StringId> raw_legacy_event_key_id =
1072         storage_->string_pool().GetId("track_event.legacy_event");
1073     base::Optional<StringId> raw_legacy_system_trace_event_id =
1074         storage_->string_pool().GetId("chrome_event.legacy_system_trace");
1075     base::Optional<StringId> raw_legacy_user_trace_event_id =
1076         storage_->string_pool().GetId("chrome_event.legacy_user_trace");
1077     base::Optional<StringId> raw_chrome_metadata_event_id =
1078         storage_->string_pool().GetId("chrome_event.metadata");
1079 
1080     const auto& events = storage_->raw_table();
1081     for (uint32_t i = 0; i < events.row_count(); ++i) {
1082       if (raw_legacy_event_key_id &&
1083           events.name()[i] == *raw_legacy_event_key_id) {
1084         Json::Value event = ConvertLegacyRawEventToJson(i);
1085         writer_.WriteCommonEvent(event);
1086       } else if (raw_legacy_system_trace_event_id &&
1087                  events.name()[i] == *raw_legacy_system_trace_event_id) {
1088         Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1089         PERFETTO_DCHECK(args.isMember("data"));
1090         writer_.AddSystemTraceData(args["data"].asString());
1091       } else if (raw_legacy_user_trace_event_id &&
1092                  events.name()[i] == *raw_legacy_user_trace_event_id) {
1093         Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1094         PERFETTO_DCHECK(args.isMember("data"));
1095         writer_.AddUserTraceData(args["data"].asString());
1096       } else if (raw_chrome_metadata_event_id &&
1097                  events.name()[i] == *raw_chrome_metadata_event_id) {
1098         Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1099         writer_.MergeMetadata(args);
1100       }
1101     }
1102     return util::OkStatus();
1103   }
1104 
ExportCpuProfileSamples()1105   util::Status ExportCpuProfileSamples() {
1106     const tables::CpuProfileStackSampleTable& samples =
1107         storage_->cpu_profile_stack_sample_table();
1108     for (uint32_t i = 0; i < samples.row_count(); ++i) {
1109       Json::Value event;
1110       event["ts"] = Json::Int64(samples.ts()[i] / 1000);
1111 
1112       UniqueTid utid = static_cast<UniqueTid>(samples.utid()[i]);
1113       auto pid_and_tid = UtidToPidAndTid(utid);
1114       event["pid"] = Json::Int(pid_and_tid.first);
1115       event["tid"] = Json::Int(pid_and_tid.second);
1116 
1117       event["ph"] = "n";
1118       event["cat"] = "disabled-by-default-cpu_profiler";
1119       event["name"] = "StackCpuSampling";
1120       event["s"] = "t";
1121 
1122       // Add a dummy thread timestamp to this event to match the format of
1123       // instant events. Useful in the UI to view args of a selected group of
1124       // samples.
1125       event["tts"] = Json::Int64(1);
1126 
1127       // "n"-phase events are nestable async events which get tied together with
1128       // their id, so we need to give each one a unique ID as we only
1129       // want the samples to show up on their own track in the trace-viewer but
1130       // not nested together.
1131       static size_t g_id_counter = 0;
1132       event["id"] = PrintUint64(++g_id_counter);
1133 
1134       const auto& callsites = storage_->stack_profile_callsite_table();
1135       const auto& frames = storage_->stack_profile_frame_table();
1136       const auto& mappings = storage_->stack_profile_mapping_table();
1137 
1138       std::vector<std::string> callstack;
1139       base::Optional<CallsiteId> opt_callsite_id = samples.callsite_id()[i];
1140 
1141       while (opt_callsite_id) {
1142         CallsiteId callsite_id = *opt_callsite_id;
1143         uint32_t callsite_row = *callsites.id().IndexOf(callsite_id);
1144 
1145         FrameId frame_id = callsites.frame_id()[callsite_row];
1146         uint32_t frame_row = *frames.id().IndexOf(frame_id);
1147 
1148         MappingId mapping_id = frames.mapping()[frame_row];
1149         uint32_t mapping_row = *mappings.id().IndexOf(mapping_id);
1150 
1151         NullTermStringView symbol_name;
1152         auto opt_symbol_set_id = frames.symbol_set_id()[frame_row];
1153         if (opt_symbol_set_id) {
1154           symbol_name = storage_->GetString(
1155               storage_->symbol_table().name()[*opt_symbol_set_id]);
1156         }
1157 
1158         char frame_entry[1024];
1159         snprintf(frame_entry, sizeof(frame_entry), "%s - %s [%s]\n",
1160                  (symbol_name.empty()
1161                       ? PrintUint64(
1162                             static_cast<uint64_t>(frames.rel_pc()[frame_row]))
1163                             .c_str()
1164                       : symbol_name.c_str()),
1165                  GetNonNullString(storage_, mappings.name()[mapping_row]),
1166                  GetNonNullString(storage_, mappings.build_id()[mapping_row]));
1167 
1168         callstack.emplace_back(frame_entry);
1169 
1170         opt_callsite_id = callsites.parent_id()[callsite_row];
1171       }
1172 
1173       std::string merged_callstack;
1174       for (auto entry = callstack.rbegin(); entry != callstack.rend();
1175            ++entry) {
1176         merged_callstack += *entry;
1177       }
1178 
1179       event["args"]["frames"] = merged_callstack;
1180       event["args"]["process_priority"] = samples.process_priority()[i];
1181 
1182       // TODO(oysteine): Used for backwards compatibility with the memlog
1183       // pipeline, should remove once we've switched to looking directly at the
1184       // tid.
1185       event["args"]["thread_id"] = Json::Int(pid_and_tid.second);
1186 
1187       writer_.WriteCommonEvent(event);
1188     }
1189 
1190     return util::OkStatus();
1191   }
1192 
ExportMetadata()1193   util::Status ExportMetadata() {
1194     const auto& trace_metadata = storage_->metadata_table();
1195     const auto& keys = trace_metadata.name();
1196     const auto& int_values = trace_metadata.int_value();
1197     const auto& str_values = trace_metadata.str_value();
1198 
1199     // Create a mapping from key string ids to keys.
1200     std::unordered_map<StringId, metadata::KeyIDs> key_map;
1201     for (uint32_t i = 0; i < metadata::kNumKeys; ++i) {
1202       auto id = *storage_->string_pool().GetId(metadata::kNames[i]);
1203       key_map[id] = static_cast<metadata::KeyIDs>(i);
1204     }
1205 
1206     for (uint32_t pos = 0; pos < trace_metadata.row_count(); pos++) {
1207       // Cast away from enum type, as otherwise -Wswitch-enum will demand an
1208       // exhaustive list of cases, even if there's a default case.
1209       metadata::KeyIDs key = key_map[keys[pos]];
1210       switch (static_cast<size_t>(key)) {
1211         case metadata::benchmark_description:
1212           writer_.AppendTelemetryMetadataString(
1213               "benchmarkDescriptions", str_values.GetString(pos).c_str());
1214           break;
1215 
1216         case metadata::benchmark_name:
1217           writer_.AppendTelemetryMetadataString(
1218               "benchmarks", str_values.GetString(pos).c_str());
1219           break;
1220 
1221         case metadata::benchmark_start_time_us:
1222           writer_.SetTelemetryMetadataTimestamp("benchmarkStart",
1223                                                 *int_values[pos]);
1224           break;
1225 
1226         case metadata::benchmark_had_failures:
1227           writer_.AppendTelemetryMetadataBool("hadFailures", *int_values[pos]);
1228           break;
1229 
1230         case metadata::benchmark_label:
1231           writer_.AppendTelemetryMetadataString(
1232               "labels", str_values.GetString(pos).c_str());
1233           break;
1234 
1235         case metadata::benchmark_story_name:
1236           writer_.AppendTelemetryMetadataString(
1237               "stories", str_values.GetString(pos).c_str());
1238           break;
1239 
1240         case metadata::benchmark_story_run_index:
1241           writer_.AppendTelemetryMetadataInt("storysetRepeats",
1242                                              *int_values[pos]);
1243           break;
1244 
1245         case metadata::benchmark_story_run_time_us:
1246           writer_.SetTelemetryMetadataTimestamp("traceStart", *int_values[pos]);
1247           break;
1248 
1249         case metadata::benchmark_story_tags:  // repeated
1250           writer_.AppendTelemetryMetadataString(
1251               "storyTags", str_values.GetString(pos).c_str());
1252           break;
1253 
1254         default:
1255           PERFETTO_DLOG("Ignoring metadata key %zu", static_cast<size_t>(key));
1256           break;
1257       }
1258     }
1259     return util::OkStatus();
1260   }
1261 
ExportStats()1262   util::Status ExportStats() {
1263     const auto& stats = storage_->stats();
1264 
1265     for (size_t idx = 0; idx < stats::kNumKeys; idx++) {
1266       if (stats::kTypes[idx] == stats::kSingle) {
1267         writer_.SetStats(stats::kNames[idx], stats[idx].value);
1268       } else {
1269         PERFETTO_DCHECK(stats::kTypes[idx] == stats::kIndexed);
1270         writer_.SetStats(stats::kNames[idx], stats[idx].indexed_values);
1271       }
1272     }
1273 
1274     return util::OkStatus();
1275   }
1276 
UpidToPid(UniquePid upid)1277   uint32_t UpidToPid(UniquePid upid) {
1278     auto pid_it = upids_to_exported_pids_.find(upid);
1279     PERFETTO_DCHECK(pid_it != upids_to_exported_pids_.end());
1280     return pid_it->second;
1281   }
1282 
UtidToPidAndTid(UniqueTid utid)1283   std::pair<uint32_t, uint32_t> UtidToPidAndTid(UniqueTid utid) {
1284     auto pid_and_tid_it = utids_to_exported_pids_and_tids_.find(utid);
1285     PERFETTO_DCHECK(pid_and_tid_it != utids_to_exported_pids_and_tids_.end());
1286     return pid_and_tid_it->second;
1287   }
1288 
NextExportedPidOrTidForDuplicates()1289   uint32_t NextExportedPidOrTidForDuplicates() {
1290     // Ensure that the exported substitute value does not represent a valid
1291     // pid/tid. This would be very unlikely in practice.
1292     while (IsValidPidOrTid(next_exported_pid_or_tid_for_duplicates_))
1293       next_exported_pid_or_tid_for_duplicates_--;
1294     return next_exported_pid_or_tid_for_duplicates_--;
1295   }
1296 
IsValidPidOrTid(uint32_t pid_or_tid)1297   bool IsValidPidOrTid(uint32_t pid_or_tid) {
1298     const auto& process_table = storage_->process_table();
1299     for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
1300       if (process_table.pid()[upid] == pid_or_tid)
1301         return true;
1302     }
1303 
1304     const auto& thread_table = storage_->thread_table();
1305     for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
1306       if (thread_table.tid()[utid] == pid_or_tid)
1307         return true;
1308     }
1309 
1310     return false;
1311   }
1312 
1313   const TraceStorage* storage_;
1314   ArgsBuilder args_builder_;
1315   TraceFormatWriter writer_;
1316 
1317   // If a pid/tid is duplicated between two or more  different processes/threads
1318   // (pid/tid reuse), we export the subsequent occurrences with different
1319   // pids/tids that is visibly different from regular pids/tids - counting down
1320   // from uint32_t max.
1321   uint32_t next_exported_pid_or_tid_for_duplicates_ =
1322       std::numeric_limits<uint32_t>::max();
1323 
1324   std::map<UniquePid, uint32_t> upids_to_exported_pids_;
1325   std::map<uint32_t, UniquePid> exported_pids_to_upids_;
1326   std::map<UniqueTid, std::pair<uint32_t, uint32_t>>
1327       utids_to_exported_pids_and_tids_;
1328   std::map<std::pair<uint32_t, uint32_t>, UniqueTid>
1329       exported_pids_and_tids_to_utids_;
1330 };
1331 
1332 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1333 
1334 }  // namespace
1335 
1336 OutputWriter::OutputWriter() = default;
1337 OutputWriter::~OutputWriter() = default;
1338 
ExportJson(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1339 util::Status ExportJson(const TraceStorage* storage,
1340                         OutputWriter* output,
1341                         ArgumentFilterPredicate argument_filter,
1342                         MetadataFilterPredicate metadata_filter,
1343                         LabelFilterPredicate label_filter) {
1344 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1345   JsonExporter exporter(storage, output, std::move(argument_filter),
1346                         std::move(metadata_filter), std::move(label_filter));
1347   return exporter.Export();
1348 #else
1349   perfetto::base::ignore_result(storage);
1350   perfetto::base::ignore_result(output);
1351   perfetto::base::ignore_result(argument_filter);
1352   perfetto::base::ignore_result(metadata_filter);
1353   perfetto::base::ignore_result(label_filter);
1354   return util::ErrStatus("JSON support is not compiled in this build");
1355 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1356 }
1357 
ExportJson(TraceProcessorStorage * tp,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1358 util::Status ExportJson(TraceProcessorStorage* tp,
1359                         OutputWriter* output,
1360                         ArgumentFilterPredicate argument_filter,
1361                         MetadataFilterPredicate metadata_filter,
1362                         LabelFilterPredicate label_filter) {
1363   const TraceStorage* storage = reinterpret_cast<TraceProcessorStorageImpl*>(tp)
1364                                     ->context()
1365                                     ->storage.get();
1366   return ExportJson(storage, output, argument_filter, metadata_filter,
1367                     label_filter);
1368 }
1369 
ExportJson(const TraceStorage * storage,FILE * output)1370 util::Status ExportJson(const TraceStorage* storage, FILE* output) {
1371   FileWriter writer(output);
1372   return ExportJson(storage, &writer, nullptr, nullptr, nullptr);
1373 }
1374 
1375 }  // namespace json
1376 }  // namespace trace_processor
1377 }  // namespace perfetto
1378 
1379