• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "perfetto/ext/trace_processor/export_json.h"
18 
19 #include <algorithm>
20 #include <cmath>
21 #include <cstdint>
22 #include <cstdio>
23 #include <cstring>
24 #include <deque>
25 #include <limits>
26 #include <map>
27 #include <memory>
28 #include <optional>
29 #include <sstream>
30 #include <string>
31 #include <string_view>
32 #include <tuple>
33 #include <unordered_map>
34 #include <utility>
35 #include <vector>
36 
37 #include "perfetto/base/build_config.h"
38 #include "perfetto/base/logging.h"
39 #include "perfetto/base/status.h"
40 #include "perfetto/ext/base/flat_hash_map.h"
41 #include "perfetto/ext/base/string_splitter.h"
42 #include "perfetto/ext/base/string_utils.h"
43 #include "perfetto/ext/base/string_view.h"
44 #include "perfetto/public/compiler.h"
45 #include "perfetto/trace_processor/basic_types.h"
46 #include "src/trace_processor/containers/null_term_string_view.h"
47 #include "src/trace_processor/export_json.h"
48 #include "src/trace_processor/importers/common/tracks_common.h"
49 #include "src/trace_processor/storage/metadata.h"
50 #include "src/trace_processor/storage/stats.h"
51 #include "src/trace_processor/storage/trace_storage.h"
52 #include "src/trace_processor/tables/metadata_tables_py.h"
53 #include "src/trace_processor/tables/profiler_tables_py.h"
54 #include "src/trace_processor/trace_processor_storage_impl.h"
55 #include "src/trace_processor/types/trace_processor_context.h"
56 #include "src/trace_processor/types/variadic.h"
57 #include "src/trace_processor/util/status_macros.h"
58 
59 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
60 #include <json/config.h>
61 #include <json/reader.h>
62 #include <json/value.h>
63 #include <json/writer.h>
64 #endif
65 
66 namespace perfetto::trace_processor::json {
67 
68 namespace {
69 
70 class FileWriter : public OutputWriter {
71  public:
FileWriter(FILE * file)72   explicit FileWriter(FILE* file) : file_(file) {}
~FileWriter()73   ~FileWriter() override { fflush(file_); }
74 
AppendString(const std::string & s)75   base::Status AppendString(const std::string& s) override {
76     size_t written =
77         fwrite(s.data(), sizeof(std::string::value_type), s.size(), file_);
78     if (written != s.size())
79       return base::ErrStatus("Error writing to file: %d", ferror(file_));
80     return base::OkStatus();
81   }
82 
83  private:
84   FILE* file_;
85 };
86 
87 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
88 using IndexMap = perfetto::trace_processor::TraceStorage::Stats::IndexMap;
89 
90 const char kLegacyEventArgsKey[] = "legacy_event";
91 const char kLegacyEventPassthroughUtidKey[] = "passthrough_utid";
92 const char kLegacyEventCategoryKey[] = "category";
93 const char kLegacyEventNameKey[] = "name";
94 const char kLegacyEventPhaseKey[] = "phase";
95 const char kLegacyEventDurationNsKey[] = "duration_ns";
96 const char kLegacyEventThreadTimestampNsKey[] = "thread_timestamp_ns";
97 const char kLegacyEventThreadDurationNsKey[] = "thread_duration_ns";
98 const char kLegacyEventThreadInstructionCountKey[] = "thread_instruction_count";
99 const char kLegacyEventThreadInstructionDeltaKey[] = "thread_instruction_delta";
100 const char kLegacyEventUseAsyncTtsKey[] = "use_async_tts";
101 const char kLegacyEventUnscopedIdKey[] = "unscoped_id";
102 const char kLegacyEventGlobalIdKey[] = "global_id";
103 const char kLegacyEventLocalIdKey[] = "local_id";
104 const char kLegacyEventIdScopeKey[] = "id_scope";
105 const char kStrippedArgument[] = "__stripped__";
106 
GetNonNullString(const TraceStorage * storage,std::optional<StringId> id)107 const char* GetNonNullString(const TraceStorage* storage,
108                              std::optional<StringId> id) {
109   return id == std::nullopt || *id == kNullStringId
110              ? ""
111              : storage->GetString(*id).c_str();
112 }
113 
114 class JsonExporter {
115  public:
JsonExporter(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)116   JsonExporter(const TraceStorage* storage,
117                OutputWriter* output,
118                ArgumentFilterPredicate argument_filter,
119                MetadataFilterPredicate metadata_filter,
120                LabelFilterPredicate label_filter)
121       : storage_(storage),
122         args_builder_(storage_),
123         writer_(output,
124                 std::move(argument_filter),
125                 std::move(metadata_filter),
126                 std::move(label_filter)) {}
127 
Export()128   base::Status Export() {
129     RETURN_IF_ERROR(MapUniquePidsAndTids());
130     RETURN_IF_ERROR(ExportThreadNames());
131     RETURN_IF_ERROR(ExportProcessNames());
132     RETURN_IF_ERROR(ExportProcessUptimes());
133     RETURN_IF_ERROR(ExportSlices());
134     RETURN_IF_ERROR(ExportFlows());
135     RETURN_IF_ERROR(ExportRawEvents());
136     RETURN_IF_ERROR(ExportCpuProfileSamples());
137     RETURN_IF_ERROR(ExportMetadata());
138     RETURN_IF_ERROR(ExportStats());
139     RETURN_IF_ERROR(ExportMemorySnapshots());
140     return base::OkStatus();
141   }
142 
143  private:
144   class TraceFormatWriter {
145    public:
TraceFormatWriter(OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)146     TraceFormatWriter(OutputWriter* output,
147                       ArgumentFilterPredicate argument_filter,
148                       MetadataFilterPredicate metadata_filter,
149                       LabelFilterPredicate label_filter)
150         : output_(output),
151           argument_filter_(std::move(argument_filter)),
152           metadata_filter_(std::move(metadata_filter)),
153           label_filter_(std::move(label_filter)),
154           first_event_(true) {
155       Json::StreamWriterBuilder b;
156       b.settings_["indentation"] = "";
157       writer_.reset(b.newStreamWriter());
158       WriteHeader();
159     }
160 
~TraceFormatWriter()161     ~TraceFormatWriter() { WriteFooter(); }
162 
WriteCommonEvent(const Json::Value & event)163     void WriteCommonEvent(const Json::Value& event) {
164       if (label_filter_ && !label_filter_("traceEvents"))
165         return;
166 
167       DoWriteEvent(event);
168     }
169 
AddAsyncBeginEvent(const Json::Value & event)170     void AddAsyncBeginEvent(const Json::Value& event) {
171       if (label_filter_ && !label_filter_("traceEvents"))
172         return;
173 
174       async_begin_events_.push_back(event);
175     }
176 
AddAsyncInstantEvent(const Json::Value & event)177     void AddAsyncInstantEvent(const Json::Value& event) {
178       if (label_filter_ && !label_filter_("traceEvents"))
179         return;
180 
181       async_instant_events_.push_back(event);
182     }
183 
AddAsyncEndEvent(const Json::Value & event)184     void AddAsyncEndEvent(const Json::Value& event) {
185       if (label_filter_ && !label_filter_("traceEvents"))
186         return;
187 
188       async_end_events_.push_back(event);
189     }
190 
SortAndEmitAsyncEvents()191     void SortAndEmitAsyncEvents() {
192       // Catapult doesn't handle out-of-order begin/end events well, especially
193       // when their timestamps are the same, but their order is incorrect. Since
194       // we process events sorted by begin timestamp, |async_begin_events_| and
195       // |async_instant_events_| are already sorted. We now only have to sort
196       // |async_end_events_| and merge-sort all events into a single sequence.
197 
198       // Sort |async_end_events_|. Note that we should order by ascending
199       // timestamp, but in reverse-stable order. This way, a child slices's end
200       // is emitted before its parent's end event, even if both end events have
201       // the same timestamp. To accomplish this, we perform a stable sort in
202       // descending order and later iterate via reverse iterators.
203       struct {
204         bool operator()(const Json::Value& a, const Json::Value& b) const {
205           return a["ts"].asInt64() > b["ts"].asInt64();
206         }
207       } CompareEvents;
208       std::stable_sort(async_end_events_.begin(), async_end_events_.end(),
209                        CompareEvents);
210 
211       // Merge sort by timestamp. If events share the same timestamp, prefer
212       // instant events, then end events, so that old slices close before new
213       // ones are opened, but instant events remain in their deepest nesting
214       // level.
215       auto instant_event_it = async_instant_events_.begin();
216       auto end_event_it = async_end_events_.rbegin();
217       auto begin_event_it = async_begin_events_.begin();
218 
219       auto has_instant_event = instant_event_it != async_instant_events_.end();
220       auto has_end_event = end_event_it != async_end_events_.rend();
221       auto has_begin_event = begin_event_it != async_begin_events_.end();
222 
223       auto emit_next_instant = [&instant_event_it, &has_instant_event, this]() {
224         DoWriteEvent(*instant_event_it);
225         instant_event_it++;
226         has_instant_event = instant_event_it != async_instant_events_.end();
227       };
228       auto emit_next_end = [&end_event_it, &has_end_event, this]() {
229         DoWriteEvent(*end_event_it);
230         end_event_it++;
231         has_end_event = end_event_it != async_end_events_.rend();
232       };
233       auto emit_next_begin = [&begin_event_it, &has_begin_event, this]() {
234         DoWriteEvent(*begin_event_it);
235         begin_event_it++;
236         has_begin_event = begin_event_it != async_begin_events_.end();
237       };
238 
239       auto emit_next_instant_or_end = [&instant_event_it, &end_event_it,
240                                        &emit_next_instant, &emit_next_end]() {
241         if ((*instant_event_it)["ts"].asInt64() <=
242             (*end_event_it)["ts"].asInt64()) {
243           emit_next_instant();
244         } else {
245           emit_next_end();
246         }
247       };
248       auto emit_next_instant_or_begin = [&instant_event_it, &begin_event_it,
249                                          &emit_next_instant,
250                                          &emit_next_begin]() {
251         if ((*instant_event_it)["ts"].asInt64() <=
252             (*begin_event_it)["ts"].asInt64()) {
253           emit_next_instant();
254         } else {
255           emit_next_begin();
256         }
257       };
258       auto emit_next_end_or_begin = [&end_event_it, &begin_event_it,
259                                      &emit_next_end, &emit_next_begin]() {
260         if ((*end_event_it)["ts"].asInt64() <=
261             (*begin_event_it)["ts"].asInt64()) {
262           emit_next_end();
263         } else {
264           emit_next_begin();
265         }
266       };
267 
268       // While we still have events in all iterators, consider each.
269       while (has_instant_event && has_end_event && has_begin_event) {
270         if ((*instant_event_it)["ts"].asInt64() <=
271             (*end_event_it)["ts"].asInt64()) {
272           emit_next_instant_or_begin();
273         } else {
274           emit_next_end_or_begin();
275         }
276       }
277 
278       // Only instant and end events left.
279       while (has_instant_event && has_end_event) {
280         emit_next_instant_or_end();
281       }
282 
283       // Only instant and begin events left.
284       while (has_instant_event && has_begin_event) {
285         emit_next_instant_or_begin();
286       }
287 
288       // Only end and begin events left.
289       while (has_end_event && has_begin_event) {
290         emit_next_end_or_begin();
291       }
292 
293       // Remaining instant events.
294       while (has_instant_event) {
295         emit_next_instant();
296       }
297 
298       // Remaining end events.
299       while (has_end_event) {
300         emit_next_end();
301       }
302 
303       // Remaining begin events.
304       while (has_begin_event) {
305         emit_next_begin();
306       }
307     }
308 
WriteMetadataEvent(const char * metadata_type,const char * metadata_arg_name,const char * metadata_arg_value,uint32_t pid,uint32_t tid)309     void WriteMetadataEvent(const char* metadata_type,
310                             const char* metadata_arg_name,
311                             const char* metadata_arg_value,
312                             uint32_t pid,
313                             uint32_t tid) {
314       if (label_filter_ && !label_filter_("traceEvents"))
315         return;
316 
317       std::ostringstream ss;
318       if (!first_event_)
319         ss << ",\n";
320 
321       Json::Value value;
322       value["ph"] = "M";
323       value["cat"] = "__metadata";
324       value["ts"] = 0;
325       value["name"] = metadata_type;
326       value["pid"] = Json::Int(pid);
327       value["tid"] = Json::Int(tid);
328 
329       Json::Value args;
330       args[metadata_arg_name] = metadata_arg_value;
331       value["args"] = args;
332 
333       writer_->write(value, &ss);
334       output_->AppendString(ss.str());
335       first_event_ = false;
336     }
337 
MergeMetadata(const Json::Value & value)338     void MergeMetadata(const Json::Value& value) {
339       for (const auto& member : value.getMemberNames()) {
340         metadata_[member] = value[member];
341       }
342     }
343 
AppendTelemetryMetadataString(const char * key,const char * value)344     void AppendTelemetryMetadataString(const char* key, const char* value) {
345       metadata_["telemetry"][key].append(value);
346     }
347 
AppendTelemetryMetadataInt(const char * key,int64_t value)348     void AppendTelemetryMetadataInt(const char* key, int64_t value) {
349       metadata_["telemetry"][key].append(Json::Int64(value));
350     }
351 
AppendTelemetryMetadataBool(const char * key,bool value)352     void AppendTelemetryMetadataBool(const char* key, bool value) {
353       metadata_["telemetry"][key].append(value);
354     }
355 
SetTelemetryMetadataTimestamp(const char * key,int64_t value)356     void SetTelemetryMetadataTimestamp(const char* key, int64_t value) {
357       metadata_["telemetry"][key] = static_cast<double>(value) / 1000.0;
358     }
359 
SetStats(const char * key,int64_t value)360     void SetStats(const char* key, int64_t value) {
361       metadata_["trace_processor_stats"][key] = Json::Int64(value);
362     }
363 
SetStats(const char * key,const IndexMap & indexed_values)364     void SetStats(const char* key, const IndexMap& indexed_values) {
365       constexpr const char* kBufferStatsPrefix = "traced_buf_";
366 
367       // Stats for the same buffer should be grouped together in the JSON.
368       if (strncmp(kBufferStatsPrefix, key, strlen(kBufferStatsPrefix)) == 0) {
369         for (const auto& value : indexed_values) {
370           metadata_["trace_processor_stats"]["traced_buf"][value.first]
371                    [key + strlen(kBufferStatsPrefix)] =
372                        Json::Int64(value.second);
373         }
374         return;
375       }
376 
377       // Other indexed value stats are exported as array under their key.
378       for (const auto& value : indexed_values) {
379         metadata_["trace_processor_stats"][key][value.first] =
380             Json::Int64(value.second);
381       }
382     }
383 
AddSystemTraceData(const std::string & data)384     void AddSystemTraceData(const std::string& data) {
385       system_trace_data_ += data;
386     }
387 
AddUserTraceData(const std::string & data)388     void AddUserTraceData(const std::string& data) {
389       if (user_trace_data_.empty())
390         user_trace_data_ = "[";
391       user_trace_data_ += data;
392     }
393 
394    private:
WriteHeader()395     void WriteHeader() {
396       if (!label_filter_)
397         output_->AppendString("{\"traceEvents\":[\n");
398     }
399 
WriteFooter()400     void WriteFooter() {
401       SortAndEmitAsyncEvents();
402 
403       // Filter metadata entries.
404       if (metadata_filter_) {
405         for (const auto& member : metadata_.getMemberNames()) {
406           if (!metadata_filter_(member.c_str()))
407             metadata_[member] = kStrippedArgument;
408         }
409       }
410 
411       if ((!label_filter_ || label_filter_("traceEvents")) &&
412           !user_trace_data_.empty()) {
413         user_trace_data_ += "]";
414 
415         Json::CharReaderBuilder builder;
416         auto reader =
417             std::unique_ptr<Json::CharReader>(builder.newCharReader());
418         Json::Value result;
419         if (reader->parse(user_trace_data_.data(),
420                           user_trace_data_.data() + user_trace_data_.length(),
421                           &result, nullptr)) {
422           for (const auto& event : result) {
423             WriteCommonEvent(event);
424           }
425         } else {
426           PERFETTO_DLOG(
427               "can't parse legacy user json trace export, skipping. data: %s",
428               user_trace_data_.c_str());
429         }
430       }
431 
432       std::ostringstream ss;
433       if (!label_filter_)
434         ss << "]";
435 
436       if ((!label_filter_ || label_filter_("systemTraceEvents")) &&
437           !system_trace_data_.empty()) {
438         ss << ",\"systemTraceEvents\":\n";
439         writer_->write(Json::Value(system_trace_data_), &ss);
440       }
441 
442       if ((!label_filter_ || label_filter_("metadata")) && !metadata_.empty()) {
443         ss << ",\"metadata\":\n";
444         writer_->write(metadata_, &ss);
445       }
446 
447       if (!label_filter_)
448         ss << "}";
449 
450       output_->AppendString(ss.str());
451     }
452 
DoWriteEvent(const Json::Value & event)453     void DoWriteEvent(const Json::Value& event) {
454       std::ostringstream ss;
455       if (!first_event_)
456         ss << ",\n";
457 
458       ArgumentNameFilterPredicate argument_name_filter;
459       bool strip_args =
460           argument_filter_ &&
461           !argument_filter_(event["cat"].asCString(), event["name"].asCString(),
462                             &argument_name_filter);
463       if ((strip_args || argument_name_filter) && event.isMember("args")) {
464         Json::Value event_copy = event;
465         if (strip_args) {
466           event_copy["args"] = kStrippedArgument;
467         } else {
468           auto& args = event_copy["args"];
469           for (const auto& member : event["args"].getMemberNames()) {
470             if (!argument_name_filter(member.c_str()))
471               args[member] = kStrippedArgument;
472           }
473         }
474         writer_->write(event_copy, &ss);
475       } else {
476         writer_->write(event, &ss);
477       }
478       first_event_ = false;
479 
480       output_->AppendString(ss.str());
481     }
482 
483     OutputWriter* output_;
484     ArgumentFilterPredicate argument_filter_;
485     MetadataFilterPredicate metadata_filter_;
486     LabelFilterPredicate label_filter_;
487 
488     std::unique_ptr<Json::StreamWriter> writer_;
489     bool first_event_;
490     Json::Value metadata_;
491     std::string system_trace_data_;
492     std::string user_trace_data_;
493     std::vector<Json::Value> async_begin_events_;
494     std::vector<Json::Value> async_instant_events_;
495     std::vector<Json::Value> async_end_events_;
496   };
497 
498   class ArgsBuilder {
499    public:
ArgsBuilder(const TraceStorage * storage)500     explicit ArgsBuilder(const TraceStorage* storage)
501         : storage_(storage),
502           empty_value_(Json::objectValue),
503           nan_value_(Json::StaticString("NaN")),
504           inf_value_(Json::StaticString("Infinity")),
505           neg_inf_value_(Json::StaticString("-Infinity")) {
506       const auto& arg_table = storage_->arg_table();
507       Json::Value* cur_args_ptr = nullptr;
508       uint32_t cur_args_set_id = std::numeric_limits<uint32_t>::max();
509       for (auto it = arg_table.IterateRows(); it; ++it) {
510         ArgSetId set_id = it.arg_set_id();
511         if (set_id != cur_args_set_id) {
512           cur_args_ptr =
513               args_sets_.Insert(set_id, Json::Value(Json::objectValue)).first;
514           cur_args_set_id = set_id;
515         }
516         const char* key = storage->GetString(it.key()).c_str();
517         Variadic value = storage_->GetArgValue(it.row_number().row_number());
518         AppendArg(cur_args_ptr, key, VariadicToJson(value));
519       }
520       PostprocessArgs();
521     }
522 
GetArgs(std::optional<ArgSetId> set_id) const523     const Json::Value& GetArgs(std::optional<ArgSetId> set_id) const {
524       return set_id ? *args_sets_.Find(*set_id) : empty_value_;
525     }
526 
527    private:
VariadicToJson(Variadic variadic)528     Json::Value VariadicToJson(Variadic variadic) {
529       switch (variadic.type) {
530         case Variadic::kInt:
531           return Json::Int64(variadic.int_value);
532         case Variadic::kUint:
533           return Json::UInt64(variadic.uint_value);
534         case Variadic::kString:
535           return GetNonNullString(storage_, variadic.string_value);
536         case Variadic::kReal:
537           if (std::isnan(variadic.real_value)) {
538             return nan_value_;
539           } else if (std::isinf(variadic.real_value) &&
540                      variadic.real_value > 0) {
541             return inf_value_;
542           } else if (std::isinf(variadic.real_value) &&
543                      variadic.real_value < 0) {
544             return neg_inf_value_;
545           } else {
546             return variadic.real_value;
547           }
548         case Variadic::kPointer:
549           return base::Uint64ToHexString(variadic.pointer_value);
550         case Variadic::kBool:
551           return variadic.bool_value;
552         case Variadic::kNull:
553           return base::Uint64ToHexString(0);
554         case Variadic::kJson:
555           Json::CharReaderBuilder b;
556           auto reader = std::unique_ptr<Json::CharReader>(b.newCharReader());
557 
558           Json::Value result;
559           std::string v = GetNonNullString(storage_, variadic.json_value);
560           reader->parse(v.data(), v.data() + v.length(), &result, nullptr);
561           return result;
562       }
563       PERFETTO_FATAL("Not reached");  // For gcc.
564     }
565 
AppendArg(Json::Value * target,const std::string & key,const Json::Value & value)566     static void AppendArg(Json::Value* target,
567                           const std::string& key,
568                           const Json::Value& value) {
569       for (base::StringSplitter parts(key, '.'); parts.Next();) {
570         if (PERFETTO_UNLIKELY(!target->isNull() && !target->isObject())) {
571           PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
572                         key.c_str(), target->toStyledString().c_str());
573           return;
574         }
575         std::string key_part = parts.cur_token();
576         size_t bracketpos = key_part.find('[');
577         if (bracketpos == std::string::npos) {  // A single item
578           target = &(*target)[key_part];
579         } else {  // A list item
580           target = &(*target)[key_part.substr(0, bracketpos)];
581           while (bracketpos != std::string::npos) {
582             // We constructed this string from an int earlier in trace_processor
583             // so it shouldn't be possible for this (or the StringToUInt32
584             // below) to fail.
585             std::string s =
586                 key_part.substr(bracketpos + 1, key_part.find(']', bracketpos) -
587                                                     bracketpos - 1);
588             if (PERFETTO_UNLIKELY(!target->isNull() && !target->isArray())) {
589               PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
590                             key.c_str(), target->toStyledString().c_str());
591               return;
592             }
593             std::optional<uint32_t> index = base::StringToUInt32(s);
594             if (PERFETTO_UNLIKELY(!index)) {
595               PERFETTO_ELOG("Expected to be able to extract index from %s",
596                             key_part.c_str());
597               return;
598             }
599             target = &(*target)[index.value()];
600             bracketpos = key_part.find('[', bracketpos + 1);
601           }
602         }
603       }
604       *target = value;
605     }
606 
PostprocessArgs()607     void PostprocessArgs() {
608       for (auto it = args_sets_.GetIterator(); it; ++it) {
609         auto& args = it.value();
610         // Move all fields from "debug" key to upper level.
611         if (args.isMember("debug")) {
612           Json::Value debug = args["debug"];
613           args.removeMember("debug");
614           for (const auto& member : debug.getMemberNames()) {
615             args[member] = debug[member];
616           }
617         }
618 
619         // Rename source fields.
620         if (args.isMember("task")) {
621           if (args["task"].isMember("posted_from")) {
622             Json::Value posted_from = args["task"]["posted_from"];
623             args["task"].removeMember("posted_from");
624             if (posted_from.isMember("function_name")) {
625               args["src_func"] = posted_from["function_name"];
626               args["src_file"] = posted_from["file_name"];
627             } else if (posted_from.isMember("file_name")) {
628               args["src"] = posted_from["file_name"];
629             }
630           }
631           if (args["task"].empty())
632             args.removeMember("task");
633         }
634         if (args.isMember("source")) {
635           Json::Value source = args["source"];
636           if (source.isObject() && source.isMember("function_name")) {
637             args["function_name"] = source["function_name"];
638             args["file_name"] = source["file_name"];
639             args.removeMember("source");
640           }
641         }
642       }
643     }
644 
645     const TraceStorage* storage_;
646     base::FlatHashMap<ArgSetId, Json::Value> args_sets_;
647     const Json::Value empty_value_;
648     const Json::Value nan_value_;
649     const Json::Value inf_value_;
650     const Json::Value neg_inf_value_;
651   };
652 
MapUniquePidsAndTids()653   base::Status MapUniquePidsAndTids() {
654     const auto& process_table = storage_->process_table();
655     for (auto it = process_table.IterateRows(); it; ++it) {
656       UniquePid upid = it.id().value;
657       uint32_t exported_pid = it.pid();
658       auto it_and_inserted =
659           exported_pids_to_upids_.emplace(exported_pid, upid);
660       if (!it_and_inserted.second) {
661         exported_pid = NextExportedPidOrTidForDuplicates();
662         it_and_inserted = exported_pids_to_upids_.emplace(exported_pid, upid);
663       }
664       upids_to_exported_pids_.emplace(upid, exported_pid);
665     }
666 
667     const auto& thread_table = storage_->thread_table();
668     for (auto it = thread_table.IterateRows(); it; ++it) {
669       UniqueTid utid = it.id().value;
670 
671       uint32_t exported_pid = 0;
672       std::optional<UniquePid> upid = it.upid();
673       if (upid) {
674         auto exported_pid_it = upids_to_exported_pids_.find(*upid);
675         PERFETTO_DCHECK(exported_pid_it != upids_to_exported_pids_.end());
676         exported_pid = exported_pid_it->second;
677       }
678 
679       uint32_t exported_tid = it.tid();
680       auto it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
681           std::make_pair(exported_pid, exported_tid), utid);
682       if (!it_and_inserted.second) {
683         exported_tid = NextExportedPidOrTidForDuplicates();
684         it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
685             std::make_pair(exported_pid, exported_tid), utid);
686       }
687       utids_to_exported_pids_and_tids_.emplace(
688           utid, std::make_pair(exported_pid, exported_tid));
689     }
690     return base::OkStatus();
691   }
692 
ExportThreadNames()693   base::Status ExportThreadNames() {
694     const auto& thread_table = storage_->thread_table();
695     for (auto it = thread_table.IterateRows(); it; ++it) {
696       auto opt_name = it.name();
697       if (opt_name.has_value()) {
698         UniqueTid utid = it.id().value;
699         const char* thread_name = GetNonNullString(storage_, opt_name);
700         auto pid_and_tid = UtidToPidAndTid(utid);
701         writer_.WriteMetadataEvent("thread_name", "name", thread_name,
702                                    pid_and_tid.first, pid_and_tid.second);
703       }
704     }
705     return base::OkStatus();
706   }
707 
ExportProcessNames()708   base::Status ExportProcessNames() {
709     const auto& process_table = storage_->process_table();
710     for (auto it = process_table.IterateRows(); it; ++it) {
711       auto opt_name = it.name();
712       if (opt_name.has_value()) {
713         UniquePid upid = it.id().value;
714         const char* process_name = GetNonNullString(storage_, opt_name);
715         writer_.WriteMetadataEvent("process_name", "name", process_name,
716                                    UpidToPid(upid), /*tid=*/0);
717       }
718     }
719     return base::OkStatus();
720   }
721 
722   // For each process it writes an approximate uptime, based on the process'
723   // start time and the last slice in the entire trace. This same last slice is
724   // used with all processes, so the process could have ended earlier.
ExportProcessUptimes()725   base::Status ExportProcessUptimes() {
726     int64_t last_timestamp_ns = FindLastSliceTimestamp();
727     if (last_timestamp_ns <= 0)
728       return base::OkStatus();
729 
730     const auto& process_table = storage_->process_table();
731     for (auto it = process_table.IterateRows(); it; ++it) {
732       std::optional<int64_t> start_timestamp_ns = it.start_ts();
733       if (!start_timestamp_ns.has_value()) {
734         continue;
735       }
736 
737       UniquePid upid = it.id().value;
738       int64_t process_uptime_seconds =
739           (last_timestamp_ns - start_timestamp_ns.value()) /
740           (1000l * 1000 * 1000);
741       writer_.WriteMetadataEvent("process_uptime_seconds", "uptime",
742                                  std::to_string(process_uptime_seconds).c_str(),
743                                  UpidToPid(upid), /*tid=*/0);
744     }
745 
746     return base::OkStatus();
747   }
748 
749   // Returns the last slice's end timestamp for the entire trace. If no slices
750   // are found 0 is returned.
FindLastSliceTimestamp()751   int64_t FindLastSliceTimestamp() {
752     int64_t last_ts = 0;
753     for (auto it = storage_->slice_table().IterateRows(); it; ++it) {
754       last_ts = std::max(last_ts, it.ts() + it.dur());
755     }
756     return last_ts;
757   }
758 
ExportSlices()759   base::Status ExportSlices() {
760     const auto& slices = storage_->slice_table();
761     for (auto it = slices.IterateRows(); it; ++it) {
762       // Skip slices with empty category - these are ftrace/system slices that
763       // were also imported into the raw table and will be exported from there
764       // by trace_to_text.
765       // TODO(b/153609716): Add a src column or do_not_export flag instead.
766       if (!it.category())
767         continue;
768       auto cat = storage_->GetString(*it.category());
769       if (cat.c_str() == nullptr || cat == "binder")
770         continue;
771 
772       Json::Value event;
773       event["ts"] = Json::Int64(it.ts() / 1000);
774       event["cat"] = GetNonNullString(storage_, it.category());
775       event["name"] = GetNonNullString(storage_, it.name());
776       event["pid"] = 0;
777       event["tid"] = 0;
778 
779       std::optional<UniqueTid> legacy_utid;
780       std::string legacy_phase;
781 
782       event["args"] = args_builder_.GetArgs(it.arg_set_id());  // Makes a copy.
783       if (event["args"].isMember(kLegacyEventArgsKey)) {
784         const auto& legacy_args = event["args"][kLegacyEventArgsKey];
785 
786         if (legacy_args.isMember(kLegacyEventPassthroughUtidKey)) {
787           legacy_utid = legacy_args[kLegacyEventPassthroughUtidKey].asUInt();
788         }
789         if (legacy_args.isMember(kLegacyEventPhaseKey)) {
790           legacy_phase = legacy_args[kLegacyEventPhaseKey].asString();
791         }
792 
793         event["args"].removeMember(kLegacyEventArgsKey);
794       }
795 
796       // To prevent duplicate export of slices, only export slices on descriptor
797       // or chrome tracks (i.e. TrackEvent slices). Slices on other tracks may
798       // also be present as raw events and handled by trace_to_text. Only add
799       // more track types here if they are not already covered by trace_to_text.
800       TrackId track_id = it.track_id();
801 
802       const auto& track_table = storage_->track_table();
803 
804       auto track_row_ref = *track_table.FindById(track_id);
805       auto track_args_id = track_row_ref.source_arg_set_id();
806       const Json::Value* track_args = nullptr;
807       bool legacy_chrome_track = false;
808       bool is_child_track = false;
809       if (track_args_id) {
810         track_args = &args_builder_.GetArgs(*track_args_id);
811         legacy_chrome_track = (*track_args)["source"].asString() == "chrome";
812         is_child_track = track_args->isMember("is_root_in_scope") &&
813                          !(*track_args)["is_root_in_scope"].asBool();
814       }
815 
816       const auto& virtual_track_slices = storage_->virtual_track_slices();
817 
818       int64_t duration_ns = it.dur();
819       std::optional<int64_t> thread_ts_ns;
820       std::optional<int64_t> thread_duration_ns;
821       std::optional<int64_t> thread_instruction_count;
822       std::optional<int64_t> thread_instruction_delta;
823 
824       if (it.thread_dur()) {
825         thread_ts_ns = it.thread_ts();
826         thread_duration_ns = it.thread_dur();
827         thread_instruction_count = it.thread_instruction_count();
828         thread_instruction_delta = it.thread_instruction_delta();
829       } else {
830         SliceId id = it.id();
831         std::optional<uint32_t> vtrack_slice_row =
832             virtual_track_slices.FindRowForSliceId(id);
833         if (vtrack_slice_row) {
834           thread_ts_ns =
835               virtual_track_slices.thread_timestamp_ns()[*vtrack_slice_row];
836           thread_duration_ns =
837               virtual_track_slices.thread_duration_ns()[*vtrack_slice_row];
838           thread_instruction_count =
839               virtual_track_slices
840                   .thread_instruction_counts()[*vtrack_slice_row];
841           thread_instruction_delta =
842               virtual_track_slices
843                   .thread_instruction_deltas()[*vtrack_slice_row];
844         }
845       }
846 
847       if (track_row_ref.utid() && !is_child_track) {
848         // Synchronous (thread) slice or instant event.
849         auto pid_and_tid = UtidToPidAndTid(*track_row_ref.utid());
850         event["pid"] = Json::Int(pid_and_tid.first);
851         event["tid"] = Json::Int(pid_and_tid.second);
852 
853         if (duration_ns == 0) {
854           if (legacy_phase.empty()) {
855             // Use "I" instead of "i" phase for backwards-compat with old
856             // consumers.
857             event["ph"] = "I";
858           } else {
859             event["ph"] = legacy_phase;
860           }
861           if (thread_ts_ns && thread_ts_ns > 0) {
862             event["tts"] = Json::Int64(*thread_ts_ns / 1000);
863           }
864           if (thread_instruction_count && *thread_instruction_count > 0) {
865             event["ticount"] = Json::Int64(*thread_instruction_count);
866           }
867           event["s"] = "t";
868         } else {
869           if (duration_ns > 0) {
870             event["ph"] = "X";
871             event["dur"] = Json::Int64(duration_ns / 1000);
872           } else {
873             // If the slice didn't finish, the duration may be negative. Only
874             // write a begin event without end event in this case.
875             event["ph"] = "B";
876           }
877           if (thread_ts_ns && *thread_ts_ns > 0) {
878             event["tts"] = Json::Int64(*thread_ts_ns / 1000);
879             // Only write thread duration for completed events.
880             if (duration_ns > 0 && thread_duration_ns)
881               event["tdur"] = Json::Int64(*thread_duration_ns / 1000);
882           }
883           if (thread_instruction_count && *thread_instruction_count > 0) {
884             event["ticount"] = Json::Int64(*thread_instruction_count);
885             // Only write thread instruction delta for completed events.
886             if (duration_ns > 0 && thread_instruction_delta)
887               event["tidelta"] = Json::Int64(*thread_instruction_delta);
888           }
889         }
890         writer_.WriteCommonEvent(event);
891       } else if (is_child_track ||
892                  (legacy_chrome_track && track_args->isMember("trace_id"))) {
893         // Async event slice.
894         if (legacy_chrome_track) {
895           // Legacy async tracks are always process-associated and have args.
896           PERFETTO_DCHECK(track_args);
897           PERFETTO_DCHECK(track_args->isMember("upid"));
898           uint32_t exported_pid = UpidToPid((*track_args)["upid"].asUInt());
899           event["pid"] = Json::Int(exported_pid);
900           event["tid"] =
901               Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
902                                     : exported_pid);
903 
904           // Preserve original event IDs for legacy tracks. This is so that e.g.
905           // memory dump IDs show up correctly in the JSON trace.
906           PERFETTO_DCHECK(track_args->isMember("trace_id"));
907           PERFETTO_DCHECK(track_args->isMember("trace_id_is_process_scoped"));
908           PERFETTO_DCHECK(track_args->isMember("source_scope"));
909           auto trace_id =
910               static_cast<uint64_t>((*track_args)["trace_id"].asInt64());
911           std::string source_scope = (*track_args)["source_scope"].asString();
912           if (!source_scope.empty())
913             event["scope"] = source_scope;
914           bool trace_id_is_process_scoped =
915               (*track_args)["trace_id_is_process_scoped"].asBool();
916           if (trace_id_is_process_scoped) {
917             event["id2"]["local"] = base::Uint64ToHexString(trace_id);
918           } else {
919             // Some legacy importers don't understand "id2" fields, so we use
920             // the "usually" global "id" field instead. This works as long as
921             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
922             // "LOCAL_ID_PHASES" in catapult.
923             event["id"] = base::Uint64ToHexString(trace_id);
924           }
925         } else {
926           if (track_row_ref.utid()) {
927             auto pid_and_tid = UtidToPidAndTid(*track_row_ref.utid());
928             event["pid"] = Json::Int(pid_and_tid.first);
929             event["tid"] = Json::Int(pid_and_tid.second);
930             event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
931           } else if (track_row_ref.upid()) {
932             uint32_t exported_pid = UpidToPid(*track_row_ref.upid());
933             event["pid"] = Json::Int(exported_pid);
934             event["tid"] =
935                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
936                                       : exported_pid);
937             event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
938           } else {
939             if (legacy_utid) {
940               auto pid_and_tid = UtidToPidAndTid(*legacy_utid);
941               event["pid"] = Json::Int(pid_and_tid.first);
942               event["tid"] = Json::Int(pid_and_tid.second);
943             }
944 
945             // Some legacy importers don't understand "id2" fields, so we use
946             // the "usually" global "id" field instead. This works as long as
947             // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
948             // "LOCAL_ID_PHASES" in catapult.
949             event["id"] = base::Uint64ToHexString(track_id.value);
950           }
951         }
952 
953         if (thread_ts_ns && *thread_ts_ns > 0) {
954           event["tts"] = Json::Int64(*thread_ts_ns / 1000);
955           event["use_async_tts"] = Json::Int(1);
956         }
957         if (thread_instruction_count && *thread_instruction_count > 0) {
958           event["ticount"] = Json::Int64(*thread_instruction_count);
959           event["use_async_tts"] = Json::Int(1);
960         }
961 
962         if (duration_ns == 0) {
963           if (legacy_phase.empty()) {
964             // Instant async event.
965             event["ph"] = "n";
966             writer_.AddAsyncInstantEvent(event);
967           } else {
968             // Async step events.
969             event["ph"] = legacy_phase;
970             writer_.AddAsyncBeginEvent(event);
971           }
972         } else {  // Async start and end.
973           event["ph"] = legacy_phase.empty() ? "b" : legacy_phase;
974           writer_.AddAsyncBeginEvent(event);
975           // If the slice didn't finish, the duration may be negative. Don't
976           // write the end event in this case.
977           if (duration_ns > 0) {
978             event["ph"] = legacy_phase.empty() ? "e" : "F";
979             event["ts"] = Json::Int64((it.ts() + duration_ns) / 1000);
980             if (thread_ts_ns && thread_duration_ns && *thread_ts_ns > 0) {
981               event["tts"] =
982                   Json::Int64((*thread_ts_ns + *thread_duration_ns) / 1000);
983             }
984             if (thread_instruction_count && thread_instruction_delta &&
985                 *thread_instruction_count > 0) {
986               event["ticount"] = Json::Int64(
987                   (*thread_instruction_count + *thread_instruction_delta));
988             }
989             event["args"].clear();
990             writer_.AddAsyncEndEvent(event);
991           }
992         }
993       } else {
994         // Global or process-scoped instant event.
995         PERFETTO_DCHECK(legacy_chrome_track || !is_child_track);
996         if (duration_ns != 0) {
997           // We don't support exporting slices on the default global or process
998           // track to JSON (JSON only supports instant events on these tracks).
999           PERFETTO_DLOG(
1000               "skipping non-instant slice on global or process track");
1001         } else {
1002           if (legacy_phase.empty()) {
1003             // Use "I" instead of "i" phase for backwards-compat with old
1004             // consumers.
1005             event["ph"] = "I";
1006           } else {
1007             event["ph"] = legacy_phase;
1008           }
1009 
1010           if (track_row_ref.upid()) {
1011             uint32_t exported_pid = UpidToPid(*track_row_ref.upid());
1012             event["pid"] = Json::Int(exported_pid);
1013             event["tid"] =
1014                 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
1015                                       : exported_pid);
1016             event["s"] = "p";
1017           } else {
1018             event["s"] = "g";
1019           }
1020           writer_.WriteCommonEvent(event);
1021         }
1022       }
1023     }
1024     return base::OkStatus();
1025   }
1026 
CreateFlowEventV1(uint32_t flow_id,SliceId slice_id,const std::string & name,const std::string & cat,Json::Value args,bool flow_begin)1027   std::optional<Json::Value> CreateFlowEventV1(uint32_t flow_id,
1028                                                SliceId slice_id,
1029                                                const std::string& name,
1030                                                const std::string& cat,
1031                                                Json::Value args,
1032                                                bool flow_begin) {
1033     const auto& slices = storage_->slice_table();
1034 
1035     auto opt_slice_rr = slices.FindById(slice_id);
1036     if (!opt_slice_rr)
1037       return std::nullopt;
1038     auto slice_rr = opt_slice_rr.value();
1039 
1040     TrackId track_id = slice_rr.track_id();
1041     auto rr = storage_->track_table().FindById(track_id);
1042 
1043     // catapult only supports flow events attached to thread-track slices
1044     if (!rr || !rr->utid()) {
1045       return std::nullopt;
1046     }
1047 
1048     UniqueTid utid = *rr->utid();
1049     auto pid_and_tid = UtidToPidAndTid(utid);
1050     Json::Value event;
1051     event["id"] = flow_id;
1052     event["pid"] = Json::Int(pid_and_tid.first);
1053     event["tid"] = Json::Int(pid_and_tid.second);
1054     event["cat"] = cat;
1055     event["name"] = name;
1056     event["ph"] = (flow_begin ? "s" : "f");
1057     event["ts"] = Json::Int64(slice_rr.ts() / 1000);
1058     if (!flow_begin) {
1059       event["bp"] = "e";
1060     }
1061     event["args"] = std::move(args);
1062     return std::move(event);
1063   }
1064 
ExportFlows()1065   base::Status ExportFlows() {
1066     const auto& flow_table = storage_->flow_table();
1067     const auto& slice_table = storage_->slice_table();
1068     for (auto it = flow_table.IterateRows(); it; ++it) {
1069       SliceId slice_out = it.slice_out();
1070       SliceId slice_in = it.slice_in();
1071       std::optional<uint32_t> arg_set_id = it.arg_set_id();
1072 
1073       std::string cat;
1074       std::string name;
1075       auto args = args_builder_.GetArgs(arg_set_id);
1076       if (arg_set_id != std::nullopt) {
1077         cat = args["cat"].asString();
1078         name = args["name"].asString();
1079         // Don't export these args since they are only used for this export and
1080         // weren't part of the original event.
1081         args.removeMember("name");
1082         args.removeMember("cat");
1083       } else {
1084         auto rr = slice_table.FindById(slice_out);
1085         PERFETTO_DCHECK(rr.has_value());
1086         cat = GetNonNullString(storage_, rr->category());
1087         name = GetNonNullString(storage_, rr->name());
1088       }
1089 
1090       uint32_t i = it.row_number().row_number();
1091       auto out_event = CreateFlowEventV1(i, slice_out, name, cat, args,
1092                                          /* flow_begin = */ true);
1093       auto in_event = CreateFlowEventV1(i, slice_in, name, cat, std::move(args),
1094                                         /* flow_begin = */ false);
1095 
1096       if (out_event && in_event) {
1097         writer_.WriteCommonEvent(out_event.value());
1098         writer_.WriteCommonEvent(in_event.value());
1099       }
1100     }
1101     return base::OkStatus();
1102   }
1103 
ConvertLegacyRawEventToJson(const tables::ChromeRawTable::ConstIterator & it)1104   Json::Value ConvertLegacyRawEventToJson(
1105       const tables::ChromeRawTable::ConstIterator& it) {
1106     Json::Value event;
1107     event["ts"] = Json::Int64(it.ts() / 1000);
1108 
1109     UniqueTid utid = static_cast<UniqueTid>(it.utid());
1110     auto pid_and_tid = UtidToPidAndTid(utid);
1111     event["pid"] = Json::Int(pid_and_tid.first);
1112     event["tid"] = Json::Int(pid_and_tid.second);
1113 
1114     // Raw legacy events store all other params in the arg set. Make a copy of
1115     // the converted args here, parse, and then remove the legacy params.
1116     event["args"] = args_builder_.GetArgs(it.arg_set_id());
1117     const Json::Value& legacy_args = event["args"][kLegacyEventArgsKey];
1118 
1119     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventCategoryKey));
1120     event["cat"] = legacy_args[kLegacyEventCategoryKey];
1121 
1122     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventNameKey));
1123     event["name"] = legacy_args[kLegacyEventNameKey];
1124 
1125     PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventPhaseKey));
1126     event["ph"] = legacy_args[kLegacyEventPhaseKey];
1127 
1128     // Object snapshot events are supposed to have a mandatory "snapshot" arg,
1129     // which may be removed in trace processor if it is empty.
1130     if (legacy_args[kLegacyEventPhaseKey] == "O" &&
1131         !event["args"].isMember("snapshot")) {
1132       event["args"]["snapshot"] = Json::Value(Json::objectValue);
1133     }
1134 
1135     if (legacy_args.isMember(kLegacyEventDurationNsKey))
1136       event["dur"] = legacy_args[kLegacyEventDurationNsKey].asInt64() / 1000;
1137 
1138     if (legacy_args.isMember(kLegacyEventThreadTimestampNsKey)) {
1139       event["tts"] =
1140           legacy_args[kLegacyEventThreadTimestampNsKey].asInt64() / 1000;
1141     }
1142 
1143     if (legacy_args.isMember(kLegacyEventThreadDurationNsKey)) {
1144       event["tdur"] =
1145           legacy_args[kLegacyEventThreadDurationNsKey].asInt64() / 1000;
1146     }
1147 
1148     if (legacy_args.isMember(kLegacyEventThreadInstructionCountKey))
1149       event["ticount"] = legacy_args[kLegacyEventThreadInstructionCountKey];
1150 
1151     if (legacy_args.isMember(kLegacyEventThreadInstructionDeltaKey))
1152       event["tidelta"] = legacy_args[kLegacyEventThreadInstructionDeltaKey];
1153 
1154     if (legacy_args.isMember(kLegacyEventUseAsyncTtsKey))
1155       event["use_async_tts"] = legacy_args[kLegacyEventUseAsyncTtsKey];
1156 
1157     if (legacy_args.isMember(kLegacyEventUnscopedIdKey)) {
1158       event["id"] = base::Uint64ToHexString(
1159           legacy_args[kLegacyEventUnscopedIdKey].asUInt64());
1160     }
1161 
1162     if (legacy_args.isMember(kLegacyEventGlobalIdKey)) {
1163       event["id2"]["global"] = base::Uint64ToHexString(
1164           legacy_args[kLegacyEventGlobalIdKey].asUInt64());
1165     }
1166 
1167     if (legacy_args.isMember(kLegacyEventLocalIdKey)) {
1168       event["id2"]["local"] = base::Uint64ToHexString(
1169           legacy_args[kLegacyEventLocalIdKey].asUInt64());
1170     }
1171 
1172     if (legacy_args.isMember(kLegacyEventIdScopeKey))
1173       event["scope"] = legacy_args[kLegacyEventIdScopeKey];
1174 
1175     event["args"].removeMember(kLegacyEventArgsKey);
1176 
1177     return event;
1178   }
1179 
ExportRawEvents()1180   base::Status ExportRawEvents() {
1181     std::optional<StringId> raw_legacy_event_key_id =
1182         storage_->string_pool().GetId("track_event.legacy_event");
1183     std::optional<StringId> raw_legacy_system_trace_event_id =
1184         storage_->string_pool().GetId("chrome_event.legacy_system_trace");
1185     std::optional<StringId> raw_legacy_user_trace_event_id =
1186         storage_->string_pool().GetId("chrome_event.legacy_user_trace");
1187     std::optional<StringId> raw_chrome_metadata_event_id =
1188         storage_->string_pool().GetId("chrome_event.metadata");
1189 
1190     const auto& events = storage_->chrome_raw_table();
1191     for (auto it = events.IterateRows(); it; ++it) {
1192       if (raw_legacy_event_key_id && it.name() == *raw_legacy_event_key_id) {
1193         Json::Value event = ConvertLegacyRawEventToJson(it);
1194         writer_.WriteCommonEvent(event);
1195       } else if (raw_legacy_system_trace_event_id &&
1196                  it.name() == *raw_legacy_system_trace_event_id) {
1197         Json::Value args = args_builder_.GetArgs(it.arg_set_id());
1198         PERFETTO_DCHECK(args.isMember("data"));
1199         writer_.AddSystemTraceData(args["data"].asString());
1200       } else if (raw_legacy_user_trace_event_id &&
1201                  it.name() == *raw_legacy_user_trace_event_id) {
1202         Json::Value args = args_builder_.GetArgs(it.arg_set_id());
1203         PERFETTO_DCHECK(args.isMember("data"));
1204         writer_.AddUserTraceData(args["data"].asString());
1205       } else if (raw_chrome_metadata_event_id &&
1206                  it.name() == *raw_chrome_metadata_event_id) {
1207         Json::Value args = args_builder_.GetArgs(it.arg_set_id());
1208         writer_.MergeMetadata(args);
1209       }
1210     }
1211     return base::OkStatus();
1212   }
1213 
1214   class MergedProfileSamplesEmitter {
1215    public:
1216     // The TraceFormatWriter must outlive this instance.
MergedProfileSamplesEmitter(TraceFormatWriter & writer)1217     explicit MergedProfileSamplesEmitter(TraceFormatWriter& writer)
1218         : writer_(writer) {}
1219 
1220     MergedProfileSamplesEmitter(const MergedProfileSamplesEmitter&) = delete;
1221     MergedProfileSamplesEmitter& operator=(const MergedProfileSamplesEmitter&) =
1222         delete;
1223     MergedProfileSamplesEmitter& operator=(
1224         MergedProfileSamplesEmitter&& value) = delete;
1225 
AddEventForUtid(UniqueTid utid,int64_t ts,CallsiteId callsite_id,const Json::Value & event)1226     uint64_t AddEventForUtid(UniqueTid utid,
1227                              int64_t ts,
1228                              CallsiteId callsite_id,
1229                              const Json::Value& event) {
1230       auto current_sample = current_events_.find(utid);
1231 
1232       // If there's a current entry for our thread and it matches the callsite
1233       // of the new sample, update the entry with the new timestamp. Otherwise
1234       // create a new entry.
1235       if (current_sample != current_events_.end() &&
1236           current_sample->second.callsite_id() == callsite_id) {
1237         current_sample->second.UpdateWithNewSample(ts);
1238         return current_sample->second.event_id();
1239       }
1240 
1241       if (current_sample != current_events_.end()) {
1242         current_events_.erase(current_sample);
1243       }
1244 
1245       auto new_entry = current_events_.emplace(
1246           std::piecewise_construct, std::forward_as_tuple(utid),
1247           std::forward_as_tuple(writer_, callsite_id, ts, event));
1248       return new_entry.first->second.event_id();
1249     }
1250 
GenerateNewEventId()1251     static uint64_t GenerateNewEventId() {
1252       // "n"-phase events are nestable async events which get tied together
1253       // with their id, so we need to give each one a unique ID as we only
1254       // want the samples to show up on their own track in the trace-viewer
1255       // but not nested together (unless they're nested under a merged event).
1256       static size_t g_id_counter = 0;
1257       return ++g_id_counter;
1258     }
1259 
1260    private:
1261     class Sample {
1262      public:
Sample(TraceFormatWriter & writer,CallsiteId callsite_id,int64_t ts,Json::Value event)1263       Sample(TraceFormatWriter& writer,
1264              CallsiteId callsite_id,
1265              int64_t ts,
1266              Json::Value event)
1267           : writer_(writer),
1268             callsite_id_(callsite_id),
1269             begin_ts_(ts),
1270             end_ts_(ts),
1271             event_(std::move(event)),
1272             event_id_(MergedProfileSamplesEmitter::GenerateNewEventId()),
1273             sample_count_(1) {}
1274 
1275       Sample(const Sample&) = delete;
1276       Sample& operator=(const Sample&) = delete;
1277 
1278       Sample(Sample&&) = delete;
1279       Sample& operator=(Sample&& value) = delete;
1280 
~Sample()1281       ~Sample() {
1282         // No point writing a merged event if we only got a single sample
1283         // as ExportCpuProfileSamples will already be writing the instant event.
1284         if (sample_count_ == 1)
1285           return;
1286 
1287         event_["id"] = base::Uint64ToHexString(event_id_);
1288 
1289         // Write the BEGIN event.
1290         event_["ph"] = "b";
1291         // We subtract 1us as a workaround for the first async event not
1292         // nesting underneath the parent event if the timestamp is identical.
1293         int64_t begin_in_us_ = begin_ts_ / 1000;
1294         event_["ts"] = Json::Int64(std::min(begin_in_us_ - 1, begin_in_us_));
1295         writer_.WriteCommonEvent(event_);
1296 
1297         // Write the END event.
1298         event_["ph"] = "e";
1299         event_["ts"] = Json::Int64(end_ts_ / 1000);
1300         // No need for args for the end event; remove them to save some space.
1301         event_["args"].clear();
1302         writer_.WriteCommonEvent(event_);
1303       }
1304 
UpdateWithNewSample(int64_t ts)1305       void UpdateWithNewSample(int64_t ts) {
1306         // We assume samples for a given thread will appear in timestamp
1307         // order; if this assumption stops holding true, we'll have to sort the
1308         // samples first.
1309         if (ts < end_ts_ || begin_ts_ > ts) {
1310           PERFETTO_ELOG(
1311               "Got an timestamp out of sequence while merging stack samples "
1312               "during JSON export!\n");
1313           PERFETTO_DCHECK(false);
1314         }
1315 
1316         end_ts_ = ts;
1317         sample_count_++;
1318       }
1319 
event_id() const1320       uint64_t event_id() const { return event_id_; }
callsite_id() const1321       CallsiteId callsite_id() const { return callsite_id_; }
1322 
1323       TraceFormatWriter& writer_;
1324       CallsiteId callsite_id_;
1325       int64_t begin_ts_;
1326       int64_t end_ts_;
1327       Json::Value event_;
1328       uint64_t event_id_;
1329       size_t sample_count_;
1330     };
1331 
1332     std::unordered_map<UniqueTid, Sample> current_events_;
1333     TraceFormatWriter& writer_;
1334   };
1335 
ExportCpuProfileSamples()1336   base::Status ExportCpuProfileSamples() {
1337     MergedProfileSamplesEmitter merged_sample_emitter(writer_);
1338 
1339     const tables::CpuProfileStackSampleTable& samples =
1340         storage_->cpu_profile_stack_sample_table();
1341     for (auto it = samples.IterateRows(); it; ++it) {
1342       Json::Value event;
1343       event["ts"] = Json::Int64(it.ts() / 1000);
1344 
1345       UniqueTid utid = static_cast<UniqueTid>(it.utid());
1346       auto pid_and_tid = UtidToPidAndTid(utid);
1347       event["pid"] = Json::Int(pid_and_tid.first);
1348       event["tid"] = Json::Int(pid_and_tid.second);
1349 
1350       event["ph"] = "n";
1351       event["cat"] = "disabled-by-default-cpu_profiler";
1352       event["name"] = "StackCpuSampling";
1353       event["s"] = "t";
1354 
1355       // Add a dummy thread timestamp to this event to match the format of
1356       // instant events. Useful in the UI to view args of a selected group of
1357       // samples.
1358       event["tts"] = Json::Int64(1);
1359 
1360       const auto& callsites = storage_->stack_profile_callsite_table();
1361       const auto& frames = storage_->stack_profile_frame_table();
1362       const auto& mappings = storage_->stack_profile_mapping_table();
1363 
1364       std::vector<std::string> callstack;
1365       std::optional<CallsiteId> opt_callsite_id = it.callsite_id();
1366 
1367       while (opt_callsite_id) {
1368         CallsiteId callsite_id = *opt_callsite_id;
1369         auto callsite_row = *callsites.FindById(callsite_id);
1370 
1371         FrameId frame_id = callsite_row.frame_id();
1372         auto frame_row = *frames.FindById(frame_id);
1373 
1374         MappingId mapping_id = frame_row.mapping();
1375         auto mapping_row = *mappings.FindById(mapping_id);
1376 
1377         NullTermStringView symbol_name;
1378         auto opt_symbol_set_id = frame_row.symbol_set_id();
1379         if (opt_symbol_set_id) {
1380           symbol_name = storage_->GetString(
1381               storage_->symbol_table()[*opt_symbol_set_id].name());
1382         }
1383 
1384         base::StackString<1024> frame_entry(
1385             "%s - %s [%s]\n",
1386             (symbol_name.empty()
1387                  ? base::Uint64ToHexString(
1388                        static_cast<uint64_t>(frame_row.rel_pc()))
1389                        .c_str()
1390                  : symbol_name.c_str()),
1391             GetNonNullString(storage_, mapping_row.name()),
1392             GetNonNullString(storage_, mapping_row.build_id()));
1393 
1394         callstack.emplace_back(frame_entry.ToStdString());
1395 
1396         opt_callsite_id = callsite_row.parent_id();
1397       }
1398 
1399       std::string merged_callstack;
1400       for (auto entry = callstack.rbegin(); entry != callstack.rend();
1401            ++entry) {
1402         merged_callstack += *entry;
1403       }
1404 
1405       event["args"]["frames"] = merged_callstack;
1406       event["args"]["process_priority"] = it.process_priority();
1407 
1408       // TODO(oysteine): Used for backwards compatibility with the memlog
1409       // pipeline, should remove once we've switched to looking directly at the
1410       // tid.
1411       event["args"]["thread_id"] = Json::Int(pid_and_tid.second);
1412 
1413       // Emit duration events for adjacent samples with the same callsite.
1414       // For now, only do this when the trace has already been symbolized i.e.
1415       // are not directly output by Chrome, to avoid interfering with other
1416       // processing pipelines.
1417       std::optional<CallsiteId> opt_current_callsite_id = it.callsite_id();
1418 
1419       if (opt_current_callsite_id && storage_->symbol_table().row_count() > 0) {
1420         uint64_t parent_event_id = merged_sample_emitter.AddEventForUtid(
1421             utid, it.ts(), *opt_current_callsite_id, event);
1422         event["id"] = base::Uint64ToHexString(parent_event_id);
1423       } else {
1424         event["id"] = base::Uint64ToHexString(
1425             MergedProfileSamplesEmitter::GenerateNewEventId());
1426       }
1427 
1428       writer_.WriteCommonEvent(event);
1429     }
1430 
1431     return base::OkStatus();
1432   }
1433 
ExportMetadata()1434   base::Status ExportMetadata() {
1435     const auto& trace_metadata = storage_->metadata_table();
1436 
1437     // Create a mapping from key string ids to keys.
1438     std::unordered_map<StringId, metadata::KeyId> key_map;
1439     for (uint32_t i = 0; i < metadata::kNumKeys; ++i) {
1440       auto id = *storage_->string_pool().GetId(metadata::kNames[i]);
1441       key_map[id] = static_cast<metadata::KeyId>(i);
1442     }
1443 
1444     for (auto it = trace_metadata.IterateRows(); it; ++it) {
1445       auto key_it = key_map.find(it.name());
1446       // Skip exporting dynamic entries; the cr-xxx entries that come from
1447       // the ChromeMetadata proto message are already exported from the raw
1448       // table.
1449       if (key_it == key_map.end())
1450         continue;
1451 
1452       // Cast away from enum type, as otherwise -Wswitch-enum will demand an
1453       // exhaustive list of cases, even if there's a default case.
1454       metadata::KeyId key = key_it->second;
1455       switch (static_cast<size_t>(key)) {
1456         case metadata::benchmark_description:
1457           writer_.AppendTelemetryMetadataString(
1458               "benchmarkDescriptions",
1459               storage_->string_pool().Get(*it.str_value()).c_str());
1460           break;
1461 
1462         case metadata::benchmark_name:
1463           writer_.AppendTelemetryMetadataString(
1464               "benchmarks",
1465               storage_->string_pool().Get(*it.str_value()).c_str());
1466           break;
1467 
1468         case metadata::benchmark_start_time_us:
1469           writer_.SetTelemetryMetadataTimestamp("benchmarkStart",
1470                                                 *it.int_value());
1471           break;
1472 
1473         case metadata::benchmark_had_failures:
1474           writer_.AppendTelemetryMetadataBool("hadFailures", *it.int_value());
1475           break;
1476 
1477         case metadata::benchmark_label:
1478           writer_.AppendTelemetryMetadataString(
1479               "labels", storage_->string_pool().Get(*it.str_value()).c_str());
1480           break;
1481 
1482         case metadata::benchmark_story_name:
1483           writer_.AppendTelemetryMetadataString(
1484               "stories", storage_->string_pool().Get(*it.str_value()).c_str());
1485           break;
1486 
1487         case metadata::benchmark_story_run_index:
1488           writer_.AppendTelemetryMetadataInt("storysetRepeats",
1489                                              *it.int_value());
1490           break;
1491 
1492         case metadata::benchmark_story_run_time_us:
1493           writer_.SetTelemetryMetadataTimestamp("traceStart", *it.int_value());
1494           break;
1495 
1496         case metadata::benchmark_story_tags:  // repeated
1497           writer_.AppendTelemetryMetadataString(
1498               "storyTags",
1499               storage_->string_pool().Get(*it.str_value()).c_str());
1500           break;
1501 
1502         default:
1503           PERFETTO_DLOG("Ignoring metadata key %zu", static_cast<size_t>(key));
1504           break;
1505       }
1506     }
1507     return base::OkStatus();
1508   }
1509 
ExportStats()1510   base::Status ExportStats() {
1511     const auto& stats = storage_->stats();
1512 
1513     for (size_t idx = 0; idx < stats::kNumKeys; idx++) {
1514       if (stats::kTypes[idx] == stats::kSingle) {
1515         writer_.SetStats(stats::kNames[idx], stats[idx].value);
1516       } else {
1517         PERFETTO_DCHECK(stats::kTypes[idx] == stats::kIndexed);
1518         writer_.SetStats(stats::kNames[idx], stats[idx].indexed_values);
1519       }
1520     }
1521 
1522     return base::OkStatus();
1523   }
1524 
ExportMemorySnapshots()1525   base::Status ExportMemorySnapshots() {
1526     const auto& memory_snapshots = storage_->memory_snapshot_table();
1527     std::optional<StringId> private_footprint_id =
1528         storage_->string_pool().GetId("chrome.private_footprint_kb");
1529     std::optional<StringId> peak_resident_set_id =
1530         storage_->string_pool().GetId("chrome.peak_resident_set_kb");
1531 
1532     std::string_view chrome_process_stats =
1533         tracks::kChromeProcessStatsBlueprint.type;
1534     std::optional<StringId> process_stats = storage_->string_pool().GetId(
1535         {chrome_process_stats.data(), chrome_process_stats.size()});
1536 
1537     for (auto sit = memory_snapshots.IterateRows(); sit; ++sit) {
1538       Json::Value event_base;
1539 
1540       event_base["ph"] = "v";
1541       event_base["cat"] = "disabled-by-default-memory-infra";
1542       auto snapshot_id = sit.id();
1543       event_base["id"] = base::Uint64ToHexString(snapshot_id.value);
1544       int64_t snapshot_ts = sit.timestamp();
1545       event_base["ts"] = Json::Int64(snapshot_ts / 1000);
1546       // TODO(crbug:1116359): Add dump type to the snapshot proto
1547       // to properly fill event_base["name"]
1548       event_base["name"] = "periodic_interval";
1549       event_base["args"]["dumps"]["level_of_detail"] =
1550           GetNonNullString(storage_, sit.detail_level());
1551 
1552       // Export OS dump events for processes with relevant data.
1553       const auto& process_table = storage_->process_table();
1554       const auto& track_table = storage_->track_table();
1555       for (auto pit = process_table.IterateRows(); pit; ++pit) {
1556         Json::Value event = FillInProcessEventDetails(event_base, pit.pid());
1557         Json::Value& totals = event["args"]["dumps"]["process_totals"];
1558 
1559         for (auto it = track_table.IterateRows(); it; ++it) {
1560           if (it.type() != process_stats) {
1561             continue;
1562           }
1563           if (it.upid() != pit.id().value) {
1564             continue;
1565           }
1566           TrackId track_id = it.id();
1567           if (private_footprint_id && (it.name() == private_footprint_id)) {
1568             totals["private_footprint_bytes"] = base::Uint64ToHexStringNoPrefix(
1569                 GetCounterValue(track_id, snapshot_ts));
1570           } else if (peak_resident_set_id &&
1571                      (it.name() == peak_resident_set_id)) {
1572             totals["peak_resident_set_size"] = base::Uint64ToHexStringNoPrefix(
1573                 GetCounterValue(track_id, snapshot_ts));
1574           }
1575         }
1576 
1577         auto process_args_id = pit.arg_set_id();
1578         if (process_args_id) {
1579           const Json::Value* process_args =
1580               &args_builder_.GetArgs(process_args_id);
1581           if (process_args->isMember("is_peak_rss_resettable")) {
1582             totals["is_peak_rss_resettable"] =
1583                 (*process_args)["is_peak_rss_resettable"];
1584           }
1585         }
1586 
1587         const auto& smaps_table = storage_->profiler_smaps_table();
1588         // Do not create vm_regions without memory maps, since catapult expects
1589         // to have rows.
1590         Json::Value* smaps =
1591             smaps_table.row_count() > 0
1592                 ? &event["args"]["dumps"]["process_mmaps"]["vm_regions"]
1593                 : nullptr;
1594         for (auto it = smaps_table.IterateRows(); it; ++it) {
1595           if (it.upid() != pit.id().value)
1596             continue;
1597           if (it.ts() != snapshot_ts)
1598             continue;
1599           Json::Value region;
1600           region["mf"] = GetNonNullString(storage_, it.file_name());
1601           region["pf"] = Json::Int64(it.protection_flags());
1602           region["sa"] = base::Uint64ToHexStringNoPrefix(
1603               static_cast<uint64_t>(it.start_address()));
1604           region["sz"] = base::Uint64ToHexStringNoPrefix(
1605               static_cast<uint64_t>(it.size_kb()) * 1024);
1606           region["ts"] = Json::Int64(it.module_timestamp());
1607           region["id"] = GetNonNullString(storage_, it.module_debugid());
1608           region["df"] = GetNonNullString(storage_, it.module_debug_path());
1609           region["bs"]["pc"] = base::Uint64ToHexStringNoPrefix(
1610               static_cast<uint64_t>(it.private_clean_resident_kb()) * 1024);
1611           region["bs"]["pd"] = base::Uint64ToHexStringNoPrefix(
1612               static_cast<uint64_t>(it.private_dirty_kb()) * 1024);
1613           region["bs"]["pss"] = base::Uint64ToHexStringNoPrefix(
1614               static_cast<uint64_t>(it.proportional_resident_kb()) * 1024);
1615           region["bs"]["sc"] = base::Uint64ToHexStringNoPrefix(
1616               static_cast<uint64_t>(it.shared_clean_resident_kb()) * 1024);
1617           region["bs"]["sd"] = base::Uint64ToHexStringNoPrefix(
1618               static_cast<uint64_t>(it.shared_dirty_resident_kb()) * 1024);
1619           region["bs"]["sw"] = base::Uint64ToHexStringNoPrefix(
1620               static_cast<uint64_t>(it.swap_kb()) * 1024);
1621           smaps->append(region);
1622         }
1623 
1624         if (!totals.empty() || (smaps && !smaps->empty()))
1625           writer_.WriteCommonEvent(event);
1626       }
1627 
1628       // Export chrome dump events for process snapshots in current memory
1629       // snapshot.
1630       const auto& process_snapshots = storage_->process_memory_snapshot_table();
1631 
1632       for (auto psit = process_snapshots.IterateRows(); psit; ++psit) {
1633         if (psit.snapshot_id() != snapshot_id)
1634           continue;
1635 
1636         auto process_snapshot_id = psit.id();
1637         uint32_t pid = UpidToPid(psit.upid());
1638 
1639         // Shared memory nodes are imported into a fake process with pid 0.
1640         // Catapult expects them to be associated with one of the real processes
1641         // of the snapshot, so we choose the first one we can find and replace
1642         // the pid.
1643         if (pid == 0) {
1644           for (auto iit = process_snapshots.IterateRows(); iit; ++iit) {
1645             if (iit.snapshot_id() != snapshot_id)
1646               continue;
1647             uint32_t new_pid = UpidToPid(iit.upid());
1648             if (new_pid != 0) {
1649               pid = new_pid;
1650               break;
1651             }
1652           }
1653         }
1654 
1655         Json::Value event = FillInProcessEventDetails(event_base, pid);
1656 
1657         const auto& sn = storage_->memory_snapshot_node_table();
1658 
1659         for (auto it = sn.IterateRows(); it; ++it) {
1660           if (it.process_snapshot_id() != process_snapshot_id) {
1661             continue;
1662           }
1663           const char* path = GetNonNullString(storage_, it.path());
1664           event["args"]["dumps"]["allocators"][path]["guid"] =
1665               base::Uint64ToHexStringNoPrefix(
1666                   static_cast<uint64_t>(it.id().value));
1667           if (it.size()) {
1668             AddAttributeToMemoryNode(&event, path, "size", it.size(), "bytes");
1669           }
1670           if (it.effective_size()) {
1671             AddAttributeToMemoryNode(&event, path, "effective_size",
1672                                      it.effective_size(), "bytes");
1673           }
1674 
1675           auto node_args_id = it.arg_set_id();
1676           if (!node_args_id)
1677             continue;
1678           const Json::Value* node_args =
1679               &args_builder_.GetArgs(node_args_id.value());
1680           for (const auto& arg_name : node_args->getMemberNames()) {
1681             const Json::Value& arg_value = (*node_args)[arg_name]["value"];
1682             if (arg_value.empty())
1683               continue;
1684             if (arg_value.isString()) {
1685               AddAttributeToMemoryNode(&event, path, arg_name,
1686                                        arg_value.asString());
1687             } else if (arg_value.isInt64()) {
1688               Json::Value unit = (*node_args)[arg_name]["unit"];
1689               if (unit.empty())
1690                 unit = "unknown";
1691               AddAttributeToMemoryNode(&event, path, arg_name,
1692                                        arg_value.asInt64(), unit.asString());
1693             }
1694           }
1695         }
1696 
1697         const auto& snapshot_edges = storage_->memory_snapshot_edge_table();
1698         for (auto it = snapshot_edges.IterateRows(); it; ++it) {
1699           SnapshotNodeId source_node_id = it.source_node_id();
1700           auto source_node_rr = *sn.FindById(source_node_id);
1701 
1702           if (source_node_rr.process_snapshot_id() != process_snapshot_id) {
1703             continue;
1704           }
1705           Json::Value edge;
1706           edge["source"] =
1707               base::Uint64ToHexStringNoPrefix(it.source_node_id().value);
1708           edge["target"] =
1709               base::Uint64ToHexStringNoPrefix(it.target_node_id().value);
1710           edge["importance"] = Json::Int(it.importance());
1711           edge["type"] = "ownership";
1712           event["args"]["dumps"]["allocators_graph"].append(edge);
1713         }
1714         writer_.WriteCommonEvent(event);
1715       }
1716     }
1717     return base::OkStatus();
1718   }
1719 
UpidToPid(UniquePid upid)1720   uint32_t UpidToPid(UniquePid upid) {
1721     auto pid_it = upids_to_exported_pids_.find(upid);
1722     PERFETTO_DCHECK(pid_it != upids_to_exported_pids_.end());
1723     return pid_it->second;
1724   }
1725 
UtidToPidAndTid(UniqueTid utid)1726   std::pair<uint32_t, uint32_t> UtidToPidAndTid(UniqueTid utid) {
1727     auto pid_and_tid_it = utids_to_exported_pids_and_tids_.find(utid);
1728     PERFETTO_DCHECK(pid_and_tid_it != utids_to_exported_pids_and_tids_.end());
1729     return pid_and_tid_it->second;
1730   }
1731 
NextExportedPidOrTidForDuplicates()1732   uint32_t NextExportedPidOrTidForDuplicates() {
1733     // Ensure that the exported substitute value does not represent a valid
1734     // pid/tid. This would be very unlikely in practice.
1735     while (IsValidPidOrTid(next_exported_pid_or_tid_for_duplicates_))
1736       next_exported_pid_or_tid_for_duplicates_--;
1737     return next_exported_pid_or_tid_for_duplicates_--;
1738   }
1739 
IsValidPidOrTid(uint32_t pid_or_tid)1740   bool IsValidPidOrTid(uint32_t pid_or_tid) {
1741     const auto& process_table = storage_->process_table();
1742     for (auto it = process_table.IterateRows(); it; ++it) {
1743       if (it.pid() == pid_or_tid)
1744         return true;
1745     }
1746 
1747     const auto& thread_table = storage_->thread_table();
1748     for (auto it = thread_table.IterateRows(); it; ++it) {
1749       if (it.tid() == pid_or_tid)
1750         return true;
1751     }
1752     return false;
1753   }
1754 
FillInProcessEventDetails(const Json::Value & event,uint32_t pid)1755   static Json::Value FillInProcessEventDetails(const Json::Value& event,
1756                                                uint32_t pid) {
1757     Json::Value output = event;
1758     output["pid"] = Json::Int(pid);
1759     output["tid"] = Json::Int(-1);
1760     return output;
1761   }
1762 
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,int64_t value,const std::string & units)1763   static void AddAttributeToMemoryNode(Json::Value* event,
1764                                        const std::string& path,
1765                                        const std::string& key,
1766                                        int64_t value,
1767                                        const std::string& units) {
1768     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1769         base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(value));
1770     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1771         "scalar";
1772     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1773         units;
1774   }
1775 
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,const std::string & value,const std::string & units="")1776   static void AddAttributeToMemoryNode(Json::Value* event,
1777                                        const std::string& path,
1778                                        const std::string& key,
1779                                        const std::string& value,
1780                                        const std::string& units = "") {
1781     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1782         value;
1783     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1784         "string";
1785     (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1786         units;
1787   }
1788 
GetCounterValue(TrackId track_id,int64_t ts)1789   uint64_t GetCounterValue(TrackId track_id, int64_t ts) {
1790     const auto& counter_table = storage_->counter_table();
1791     auto begin = counter_table.ts().begin();
1792     auto end = counter_table.ts().end();
1793     PERFETTO_DCHECK(counter_table.ts().IsSorted() &&
1794                     counter_table.ts().IsColumnType<int64_t>());
1795     // The timestamp column is sorted, so we can binary search for a matching
1796     // timestamp. Note that we don't use RowMap operations like FilterInto()
1797     // here because they bloat trace processor's binary size in Chrome too much.
1798     auto it = std::lower_bound(begin, end, ts,
1799                                [](const SqlValue& value, int64_t expected_ts) {
1800                                  return value.AsLong() < expected_ts;
1801                                });
1802     for (; it < end; ++it) {
1803       if ((*it).AsLong() != ts)
1804         break;
1805       if (auto rr = counter_table[it.row()]; rr.track_id() == track_id) {
1806         return static_cast<uint64_t>(rr.value());
1807       }
1808     }
1809     return 0;
1810   }
1811 
1812   const TraceStorage* storage_;
1813   ArgsBuilder args_builder_;
1814   TraceFormatWriter writer_;
1815 
1816   // If a pid/tid is duplicated between two or more  different processes/threads
1817   // (pid/tid reuse), we export the subsequent occurrences with different
1818   // pids/tids that is visibly different from regular pids/tids - counting down
1819   // from uint32_t max.
1820   uint32_t next_exported_pid_or_tid_for_duplicates_ =
1821       std::numeric_limits<uint32_t>::max();
1822 
1823   std::map<UniquePid, uint32_t> upids_to_exported_pids_;
1824   std::map<uint32_t, UniquePid> exported_pids_to_upids_;
1825   std::map<UniqueTid, std::pair<uint32_t, uint32_t>>
1826       utids_to_exported_pids_and_tids_;
1827   std::map<std::pair<uint32_t, uint32_t>, UniqueTid>
1828       exported_pids_and_tids_to_utids_;
1829 };
1830 
1831 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1832 
1833 }  // namespace
1834 
1835 OutputWriter::OutputWriter() = default;
1836 OutputWriter::~OutputWriter() = default;
1837 
ExportJson(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1838 base::Status ExportJson(const TraceStorage* storage,
1839                         OutputWriter* output,
1840                         ArgumentFilterPredicate argument_filter,
1841                         MetadataFilterPredicate metadata_filter,
1842                         LabelFilterPredicate label_filter) {
1843 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1844   JsonExporter exporter(storage, output, std::move(argument_filter),
1845                         std::move(metadata_filter), std::move(label_filter));
1846   return exporter.Export();
1847 #else
1848   perfetto::base::ignore_result(storage);
1849   perfetto::base::ignore_result(output);
1850   perfetto::base::ignore_result(argument_filter);
1851   perfetto::base::ignore_result(metadata_filter);
1852   perfetto::base::ignore_result(label_filter);
1853   return base::ErrStatus("JSON support is not compiled in this build");
1854 #endif  // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1855 }
1856 
ExportJson(TraceProcessorStorage * tp,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1857 base::Status ExportJson(TraceProcessorStorage* tp,
1858                         OutputWriter* output,
1859                         ArgumentFilterPredicate argument_filter,
1860                         MetadataFilterPredicate metadata_filter,
1861                         LabelFilterPredicate label_filter) {
1862   const TraceStorage* storage = reinterpret_cast<TraceProcessorStorageImpl*>(tp)
1863                                     ->context()
1864                                     ->storage.get();
1865   return ExportJson(storage, output, std::move(argument_filter),
1866                     std::move(metadata_filter), std::move(label_filter));
1867 }
1868 
ExportJson(const TraceStorage * storage,FILE * output)1869 base::Status ExportJson(const TraceStorage* storage, FILE* output) {
1870   FileWriter writer(output);
1871   return ExportJson(storage, &writer, nullptr, nullptr, nullptr);
1872 }
1873 
1874 }  // namespace perfetto::trace_processor::json
1875