1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "perfetto/ext/trace_processor/export_json.h"
18 #include "src/trace_processor/export_json.h"
19
20 #include <stdio.h>
21 #include <sstream>
22
23 #include <algorithm>
24 #include <cinttypes>
25 #include <cmath>
26 #include <cstring>
27 #include <deque>
28 #include <limits>
29 #include <memory>
30
31 #include "perfetto/base/build_config.h"
32 #include "perfetto/ext/base/string_splitter.h"
33 #include "perfetto/ext/base/string_utils.h"
34 #include "src/trace_processor/importers/json/json_utils.h"
35 #include "src/trace_processor/storage/metadata.h"
36 #include "src/trace_processor/storage/trace_storage.h"
37 #include "src/trace_processor/trace_processor_storage_impl.h"
38 #include "src/trace_processor/types/trace_processor_context.h"
39
40 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
41 #include <json/reader.h>
42 #include <json/writer.h>
43 #endif
44
45 namespace perfetto {
46 namespace trace_processor {
47 namespace json {
48
49 namespace {
50
51 class FileWriter : public OutputWriter {
52 public:
FileWriter(FILE * file)53 FileWriter(FILE* file) : file_(file) {}
~FileWriter()54 ~FileWriter() override { fflush(file_); }
55
AppendString(const std::string & s)56 util::Status AppendString(const std::string& s) override {
57 size_t written =
58 fwrite(s.data(), sizeof(std::string::value_type), s.size(), file_);
59 if (written != s.size())
60 return util::ErrStatus("Error writing to file: %d", ferror(file_));
61 return util::OkStatus();
62 }
63
64 private:
65 FILE* file_;
66 };
67
68 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
69 using IndexMap = perfetto::trace_processor::TraceStorage::Stats::IndexMap;
70
71 const char kLegacyEventArgsKey[] = "legacy_event";
72 const char kLegacyEventPassthroughUtidKey[] = "passthrough_utid";
73 const char kLegacyEventCategoryKey[] = "category";
74 const char kLegacyEventNameKey[] = "name";
75 const char kLegacyEventPhaseKey[] = "phase";
76 const char kLegacyEventDurationNsKey[] = "duration_ns";
77 const char kLegacyEventThreadTimestampNsKey[] = "thread_timestamp_ns";
78 const char kLegacyEventThreadDurationNsKey[] = "thread_duration_ns";
79 const char kLegacyEventThreadInstructionCountKey[] = "thread_instruction_count";
80 const char kLegacyEventThreadInstructionDeltaKey[] = "thread_instruction_delta";
81 const char kLegacyEventUseAsyncTtsKey[] = "use_async_tts";
82 const char kLegacyEventUnscopedIdKey[] = "unscoped_id";
83 const char kLegacyEventGlobalIdKey[] = "global_id";
84 const char kLegacyEventLocalIdKey[] = "local_id";
85 const char kLegacyEventIdScopeKey[] = "id_scope";
86 const char kStrippedArgument[] = "__stripped__";
87
GetNonNullString(const TraceStorage * storage,base::Optional<StringId> id)88 const char* GetNonNullString(const TraceStorage* storage,
89 base::Optional<StringId> id) {
90 return id == base::nullopt || *id == kNullStringId
91 ? ""
92 : storage->GetString(*id).c_str();
93 }
94
95 class JsonExporter {
96 public:
JsonExporter(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)97 JsonExporter(const TraceStorage* storage,
98 OutputWriter* output,
99 ArgumentFilterPredicate argument_filter,
100 MetadataFilterPredicate metadata_filter,
101 LabelFilterPredicate label_filter)
102 : storage_(storage),
103 args_builder_(storage_),
104 writer_(output, argument_filter, metadata_filter, label_filter) {}
105
Export()106 util::Status Export() {
107 util::Status status = MapUniquePidsAndTids();
108 if (!status.ok())
109 return status;
110
111 status = ExportThreadNames();
112 if (!status.ok())
113 return status;
114
115 status = ExportProcessNames();
116 if (!status.ok())
117 return status;
118
119 status = ExportProcessUptimes();
120 if (!status.ok())
121 return status;
122
123 status = ExportSlices();
124 if (!status.ok())
125 return status;
126
127 status = ExportFlows();
128 if (!status.ok())
129 return status;
130
131 status = ExportRawEvents();
132 if (!status.ok())
133 return status;
134
135 status = ExportCpuProfileSamples();
136 if (!status.ok())
137 return status;
138
139 status = ExportMetadata();
140 if (!status.ok())
141 return status;
142
143 status = ExportStats();
144 if (!status.ok())
145 return status;
146
147 status = ExportMemorySnapshots();
148 if (!status.ok())
149 return status;
150
151 return util::OkStatus();
152 }
153
154 private:
155 class TraceFormatWriter {
156 public:
TraceFormatWriter(OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)157 TraceFormatWriter(OutputWriter* output,
158 ArgumentFilterPredicate argument_filter,
159 MetadataFilterPredicate metadata_filter,
160 LabelFilterPredicate label_filter)
161 : output_(output),
162 argument_filter_(argument_filter),
163 metadata_filter_(metadata_filter),
164 label_filter_(label_filter),
165 first_event_(true) {
166 Json::StreamWriterBuilder b;
167 b.settings_["indentation"] = "";
168 writer_.reset(b.newStreamWriter());
169 WriteHeader();
170 }
171
~TraceFormatWriter()172 ~TraceFormatWriter() { WriteFooter(); }
173
WriteCommonEvent(const Json::Value & event)174 void WriteCommonEvent(const Json::Value& event) {
175 if (label_filter_ && !label_filter_("traceEvents"))
176 return;
177
178 DoWriteEvent(event);
179 }
180
AddAsyncBeginEvent(const Json::Value & event)181 void AddAsyncBeginEvent(const Json::Value& event) {
182 if (label_filter_ && !label_filter_("traceEvents"))
183 return;
184
185 async_begin_events_.push_back(event);
186 }
187
AddAsyncInstantEvent(const Json::Value & event)188 void AddAsyncInstantEvent(const Json::Value& event) {
189 if (label_filter_ && !label_filter_("traceEvents"))
190 return;
191
192 async_instant_events_.push_back(event);
193 }
194
AddAsyncEndEvent(const Json::Value & event)195 void AddAsyncEndEvent(const Json::Value& event) {
196 if (label_filter_ && !label_filter_("traceEvents"))
197 return;
198
199 async_end_events_.push_back(event);
200 }
201
SortAndEmitAsyncEvents()202 void SortAndEmitAsyncEvents() {
203 // Catapult doesn't handle out-of-order begin/end events well, especially
204 // when their timestamps are the same, but their order is incorrect. Since
205 // we process events sorted by begin timestamp, |async_begin_events_| and
206 // |async_instant_events_| are already sorted. We now only have to sort
207 // |async_end_events_| and merge-sort all events into a single sequence.
208
209 // Sort |async_end_events_|. Note that we should order by ascending
210 // timestamp, but in reverse-stable order. This way, a child slices's end
211 // is emitted before its parent's end event, even if both end events have
212 // the same timestamp. To accomplish this, we perform a stable sort in
213 // descending order and later iterate via reverse iterators.
214 struct {
215 bool operator()(const Json::Value& a, const Json::Value& b) const {
216 return a["ts"].asInt64() > b["ts"].asInt64();
217 }
218 } CompareEvents;
219 std::stable_sort(async_end_events_.begin(), async_end_events_.end(),
220 CompareEvents);
221
222 // Merge sort by timestamp. If events share the same timestamp, prefer
223 // instant events, then end events, so that old slices close before new
224 // ones are opened, but instant events remain in their deepest nesting
225 // level.
226 auto instant_event_it = async_instant_events_.begin();
227 auto end_event_it = async_end_events_.rbegin();
228 auto begin_event_it = async_begin_events_.begin();
229
230 auto has_instant_event = instant_event_it != async_instant_events_.end();
231 auto has_end_event = end_event_it != async_end_events_.rend();
232 auto has_begin_event = begin_event_it != async_begin_events_.end();
233
234 auto emit_next_instant = [&instant_event_it, &has_instant_event, this]() {
235 DoWriteEvent(*instant_event_it);
236 instant_event_it++;
237 has_instant_event = instant_event_it != async_instant_events_.end();
238 };
239 auto emit_next_end = [&end_event_it, &has_end_event, this]() {
240 DoWriteEvent(*end_event_it);
241 end_event_it++;
242 has_end_event = end_event_it != async_end_events_.rend();
243 };
244 auto emit_next_begin = [&begin_event_it, &has_begin_event, this]() {
245 DoWriteEvent(*begin_event_it);
246 begin_event_it++;
247 has_begin_event = begin_event_it != async_begin_events_.end();
248 };
249
250 auto emit_next_instant_or_end = [&instant_event_it, &end_event_it,
251 &emit_next_instant, &emit_next_end]() {
252 if ((*instant_event_it)["ts"].asInt64() <=
253 (*end_event_it)["ts"].asInt64()) {
254 emit_next_instant();
255 } else {
256 emit_next_end();
257 }
258 };
259 auto emit_next_instant_or_begin = [&instant_event_it, &begin_event_it,
260 &emit_next_instant,
261 &emit_next_begin]() {
262 if ((*instant_event_it)["ts"].asInt64() <=
263 (*begin_event_it)["ts"].asInt64()) {
264 emit_next_instant();
265 } else {
266 emit_next_begin();
267 }
268 };
269 auto emit_next_end_or_begin = [&end_event_it, &begin_event_it,
270 &emit_next_end, &emit_next_begin]() {
271 if ((*end_event_it)["ts"].asInt64() <=
272 (*begin_event_it)["ts"].asInt64()) {
273 emit_next_end();
274 } else {
275 emit_next_begin();
276 }
277 };
278
279 // While we still have events in all iterators, consider each.
280 while (has_instant_event && has_end_event && has_begin_event) {
281 if ((*instant_event_it)["ts"].asInt64() <=
282 (*end_event_it)["ts"].asInt64()) {
283 emit_next_instant_or_begin();
284 } else {
285 emit_next_end_or_begin();
286 }
287 }
288
289 // Only instant and end events left.
290 while (has_instant_event && has_end_event) {
291 emit_next_instant_or_end();
292 }
293
294 // Only instant and begin events left.
295 while (has_instant_event && has_begin_event) {
296 emit_next_instant_or_begin();
297 }
298
299 // Only end and begin events left.
300 while (has_end_event && has_begin_event) {
301 emit_next_end_or_begin();
302 }
303
304 // Remaining instant events.
305 while (has_instant_event) {
306 emit_next_instant();
307 }
308
309 // Remaining end events.
310 while (has_end_event) {
311 emit_next_end();
312 }
313
314 // Remaining begin events.
315 while (has_begin_event) {
316 emit_next_begin();
317 }
318 }
319
WriteMetadataEvent(const char * metadata_type,const char * metadata_arg_name,const char * metadata_arg_value,uint32_t pid,uint32_t tid)320 void WriteMetadataEvent(const char* metadata_type,
321 const char* metadata_arg_name,
322 const char* metadata_arg_value,
323 uint32_t pid,
324 uint32_t tid) {
325 if (label_filter_ && !label_filter_("traceEvents"))
326 return;
327
328 std::ostringstream ss;
329 if (!first_event_)
330 ss << ",\n";
331
332 Json::Value value;
333 value["ph"] = "M";
334 value["cat"] = "__metadata";
335 value["ts"] = 0;
336 value["name"] = metadata_type;
337 value["pid"] = Json::Int(pid);
338 value["tid"] = Json::Int(tid);
339
340 Json::Value args;
341 args[metadata_arg_name] = metadata_arg_value;
342 value["args"] = args;
343
344 writer_->write(value, &ss);
345 output_->AppendString(ss.str());
346 first_event_ = false;
347 }
348
MergeMetadata(const Json::Value & value)349 void MergeMetadata(const Json::Value& value) {
350 for (const auto& member : value.getMemberNames()) {
351 metadata_[member] = value[member];
352 }
353 }
354
AppendTelemetryMetadataString(const char * key,const char * value)355 void AppendTelemetryMetadataString(const char* key, const char* value) {
356 metadata_["telemetry"][key].append(value);
357 }
358
AppendTelemetryMetadataInt(const char * key,int64_t value)359 void AppendTelemetryMetadataInt(const char* key, int64_t value) {
360 metadata_["telemetry"][key].append(Json::Int64(value));
361 }
362
AppendTelemetryMetadataBool(const char * key,bool value)363 void AppendTelemetryMetadataBool(const char* key, bool value) {
364 metadata_["telemetry"][key].append(value);
365 }
366
SetTelemetryMetadataTimestamp(const char * key,int64_t value)367 void SetTelemetryMetadataTimestamp(const char* key, int64_t value) {
368 metadata_["telemetry"][key] = static_cast<double>(value) / 1000.0;
369 }
370
SetStats(const char * key,int64_t value)371 void SetStats(const char* key, int64_t value) {
372 metadata_["trace_processor_stats"][key] = Json::Int64(value);
373 }
374
SetStats(const char * key,const IndexMap & indexed_values)375 void SetStats(const char* key, const IndexMap& indexed_values) {
376 constexpr const char* kBufferStatsPrefix = "traced_buf_";
377
378 // Stats for the same buffer should be grouped together in the JSON.
379 if (strncmp(kBufferStatsPrefix, key, strlen(kBufferStatsPrefix)) == 0) {
380 for (const auto& value : indexed_values) {
381 metadata_["trace_processor_stats"]["traced_buf"][value.first]
382 [key + strlen(kBufferStatsPrefix)] =
383 Json::Int64(value.second);
384 }
385 return;
386 }
387
388 // Other indexed value stats are exported as array under their key.
389 for (const auto& value : indexed_values) {
390 metadata_["trace_processor_stats"][key][value.first] =
391 Json::Int64(value.second);
392 }
393 }
394
AddSystemTraceData(const std::string & data)395 void AddSystemTraceData(const std::string& data) {
396 system_trace_data_ += data;
397 }
398
AddUserTraceData(const std::string & data)399 void AddUserTraceData(const std::string& data) {
400 if (user_trace_data_.empty())
401 user_trace_data_ = "[";
402 user_trace_data_ += data;
403 }
404
405 private:
WriteHeader()406 void WriteHeader() {
407 if (!label_filter_)
408 output_->AppendString("{\"traceEvents\":[\n");
409 }
410
WriteFooter()411 void WriteFooter() {
412 SortAndEmitAsyncEvents();
413
414 // Filter metadata entries.
415 if (metadata_filter_) {
416 for (const auto& member : metadata_.getMemberNames()) {
417 if (!metadata_filter_(member.c_str()))
418 metadata_[member] = kStrippedArgument;
419 }
420 }
421
422 if ((!label_filter_ || label_filter_("traceEvents")) &&
423 !user_trace_data_.empty()) {
424 user_trace_data_ += "]";
425
426 Json::CharReaderBuilder builder;
427 auto reader =
428 std::unique_ptr<Json::CharReader>(builder.newCharReader());
429 Json::Value result;
430 if (reader->parse(user_trace_data_.data(),
431 user_trace_data_.data() + user_trace_data_.length(),
432 &result, nullptr)) {
433 for (const auto& event : result) {
434 WriteCommonEvent(event);
435 }
436 } else {
437 PERFETTO_DLOG(
438 "can't parse legacy user json trace export, skipping. data: %s",
439 user_trace_data_.c_str());
440 }
441 }
442
443 std::ostringstream ss;
444 if (!label_filter_)
445 ss << "]";
446
447 if ((!label_filter_ || label_filter_("systemTraceEvents")) &&
448 !system_trace_data_.empty()) {
449 ss << ",\"systemTraceEvents\":\n";
450 writer_->write(Json::Value(system_trace_data_), &ss);
451 }
452
453 if ((!label_filter_ || label_filter_("metadata")) && !metadata_.empty()) {
454 ss << ",\"metadata\":\n";
455 writer_->write(metadata_, &ss);
456 }
457
458 if (!label_filter_)
459 ss << "}";
460
461 output_->AppendString(ss.str());
462 }
463
DoWriteEvent(const Json::Value & event)464 void DoWriteEvent(const Json::Value& event) {
465 std::ostringstream ss;
466 if (!first_event_)
467 ss << ",\n";
468
469 ArgumentNameFilterPredicate argument_name_filter;
470 bool strip_args =
471 argument_filter_ &&
472 !argument_filter_(event["cat"].asCString(), event["name"].asCString(),
473 &argument_name_filter);
474 if ((strip_args || argument_name_filter) && event.isMember("args")) {
475 Json::Value event_copy = event;
476 if (strip_args) {
477 event_copy["args"] = kStrippedArgument;
478 } else {
479 auto& args = event_copy["args"];
480 for (const auto& member : event["args"].getMemberNames()) {
481 if (!argument_name_filter(member.c_str()))
482 args[member] = kStrippedArgument;
483 }
484 }
485 writer_->write(event_copy, &ss);
486 } else {
487 writer_->write(event, &ss);
488 }
489 first_event_ = false;
490
491 output_->AppendString(ss.str());
492 }
493
494 OutputWriter* output_;
495 ArgumentFilterPredicate argument_filter_;
496 MetadataFilterPredicate metadata_filter_;
497 LabelFilterPredicate label_filter_;
498
499 std::unique_ptr<Json::StreamWriter> writer_;
500 bool first_event_;
501 Json::Value metadata_;
502 std::string system_trace_data_;
503 std::string user_trace_data_;
504 std::vector<Json::Value> async_begin_events_;
505 std::vector<Json::Value> async_instant_events_;
506 std::vector<Json::Value> async_end_events_;
507 };
508
509 class ArgsBuilder {
510 public:
ArgsBuilder(const TraceStorage * storage)511 explicit ArgsBuilder(const TraceStorage* storage)
512 : storage_(storage),
513 empty_value_(Json::objectValue),
514 nan_value_(Json::StaticString("NaN")),
515 inf_value_(Json::StaticString("Infinity")),
516 neg_inf_value_(Json::StaticString("-Infinity")) {
517 const auto& arg_table = storage_->arg_table();
518 uint32_t count = arg_table.row_count();
519 if (count == 0) {
520 args_sets_.resize(1, empty_value_);
521 return;
522 }
523 args_sets_.resize(arg_table.arg_set_id()[count - 1] + 1, empty_value_);
524
525 for (uint32_t i = 0; i < count; ++i) {
526 ArgSetId set_id = arg_table.arg_set_id()[i];
527 const char* key = arg_table.key().GetString(i).c_str();
528 Variadic value = storage_->GetArgValue(i);
529 AppendArg(set_id, key, VariadicToJson(value));
530 }
531 PostprocessArgs();
532 }
533
GetArgs(ArgSetId set_id) const534 const Json::Value& GetArgs(ArgSetId set_id) const {
535 // If |set_id| was empty and added to the storage last, it may not be in
536 // args_sets_.
537 if (set_id > args_sets_.size())
538 return empty_value_;
539 return args_sets_[set_id];
540 }
541
542 private:
VariadicToJson(Variadic variadic)543 Json::Value VariadicToJson(Variadic variadic) {
544 switch (variadic.type) {
545 case Variadic::kInt:
546 return Json::Int64(variadic.int_value);
547 case Variadic::kUint:
548 return Json::UInt64(variadic.uint_value);
549 case Variadic::kString:
550 return GetNonNullString(storage_, variadic.string_value);
551 case Variadic::kReal:
552 if (std::isnan(variadic.real_value)) {
553 return nan_value_;
554 } else if (std::isinf(variadic.real_value) &&
555 variadic.real_value > 0) {
556 return inf_value_;
557 } else if (std::isinf(variadic.real_value) &&
558 variadic.real_value < 0) {
559 return neg_inf_value_;
560 } else {
561 return variadic.real_value;
562 }
563 case Variadic::kPointer:
564 return base::Uint64ToHexString(variadic.pointer_value);
565 case Variadic::kBool:
566 return variadic.bool_value;
567 case Variadic::kNull:
568 return base::Uint64ToHexString(0);
569 case Variadic::kJson:
570 Json::CharReaderBuilder b;
571 auto reader = std::unique_ptr<Json::CharReader>(b.newCharReader());
572
573 Json::Value result;
574 std::string v = GetNonNullString(storage_, variadic.json_value);
575 reader->parse(v.data(), v.data() + v.length(), &result, nullptr);
576 return result;
577 }
578 PERFETTO_FATAL("Not reached"); // For gcc.
579 }
580
AppendArg(ArgSetId set_id,const std::string & key,const Json::Value & value)581 void AppendArg(ArgSetId set_id,
582 const std::string& key,
583 const Json::Value& value) {
584 Json::Value* target = &args_sets_[set_id];
585 for (base::StringSplitter parts(key, '.'); parts.Next();) {
586 if (PERFETTO_UNLIKELY(!target->isNull() && !target->isObject())) {
587 PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
588 key.c_str(),
589 args_sets_[set_id].toStyledString().c_str());
590 return;
591 }
592 std::string key_part = parts.cur_token();
593 size_t bracketpos = key_part.find('[');
594 if (bracketpos == key_part.npos) { // A single item
595 target = &(*target)[key_part];
596 } else { // A list item
597 target = &(*target)[key_part.substr(0, bracketpos)];
598 while (bracketpos != key_part.npos) {
599 // We constructed this string from an int earlier in trace_processor
600 // so it shouldn't be possible for this (or the StringToUInt32
601 // below) to fail.
602 std::string s =
603 key_part.substr(bracketpos + 1, key_part.find(']', bracketpos) -
604 bracketpos - 1);
605 if (PERFETTO_UNLIKELY(!target->isNull() && !target->isArray())) {
606 PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
607 key.c_str(),
608 args_sets_[set_id].toStyledString().c_str());
609 return;
610 }
611 base::Optional<uint32_t> index = base::StringToUInt32(s);
612 if (PERFETTO_UNLIKELY(!index)) {
613 PERFETTO_ELOG("Expected to be able to extract index from %s",
614 key_part.c_str());
615 return;
616 }
617 target = &(*target)[index.value()];
618 bracketpos = key_part.find('[', bracketpos + 1);
619 }
620 }
621 }
622 *target = value;
623 }
624
PostprocessArgs()625 void PostprocessArgs() {
626 for (Json::Value& args : args_sets_) {
627 // Move all fields from "debug" key to upper level.
628 if (args.isMember("debug")) {
629 Json::Value debug = args["debug"];
630 args.removeMember("debug");
631 for (const auto& member : debug.getMemberNames()) {
632 args[member] = debug[member];
633 }
634 }
635
636 // Rename source fields.
637 if (args.isMember("task")) {
638 if (args["task"].isMember("posted_from")) {
639 Json::Value posted_from = args["task"]["posted_from"];
640 args["task"].removeMember("posted_from");
641 if (posted_from.isMember("function_name")) {
642 args["src_func"] = posted_from["function_name"];
643 args["src_file"] = posted_from["file_name"];
644 } else if (posted_from.isMember("file_name")) {
645 args["src"] = posted_from["file_name"];
646 }
647 }
648 if (args["task"].empty())
649 args.removeMember("task");
650 }
651 if (args.isMember("source")) {
652 Json::Value source = args["source"];
653 if (source.isObject() && source.isMember("function_name")) {
654 args["function_name"] = source["function_name"];
655 args["file_name"] = source["file_name"];
656 args.removeMember("source");
657 }
658 }
659 }
660 }
661
662 const TraceStorage* storage_;
663 std::vector<Json::Value> args_sets_;
664 const Json::Value empty_value_;
665 const Json::Value nan_value_;
666 const Json::Value inf_value_;
667 const Json::Value neg_inf_value_;
668 };
669
MapUniquePidsAndTids()670 util::Status MapUniquePidsAndTids() {
671 const auto& process_table = storage_->process_table();
672 for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
673 uint32_t exported_pid = process_table.pid()[upid];
674 auto it_and_inserted =
675 exported_pids_to_upids_.emplace(exported_pid, upid);
676 if (!it_and_inserted.second) {
677 exported_pid = NextExportedPidOrTidForDuplicates();
678 it_and_inserted = exported_pids_to_upids_.emplace(exported_pid, upid);
679 }
680 upids_to_exported_pids_.emplace(upid, exported_pid);
681 }
682
683 const auto& thread_table = storage_->thread_table();
684 for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
685 uint32_t exported_pid = 0;
686 base::Optional<UniquePid> upid = thread_table.upid()[utid];
687 if (upid) {
688 auto exported_pid_it = upids_to_exported_pids_.find(*upid);
689 PERFETTO_DCHECK(exported_pid_it != upids_to_exported_pids_.end());
690 exported_pid = exported_pid_it->second;
691 }
692
693 uint32_t exported_tid = thread_table.tid()[utid];
694 auto it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
695 std::make_pair(exported_pid, exported_tid), utid);
696 if (!it_and_inserted.second) {
697 exported_tid = NextExportedPidOrTidForDuplicates();
698 it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
699 std::make_pair(exported_pid, exported_tid), utid);
700 }
701 utids_to_exported_pids_and_tids_.emplace(
702 utid, std::make_pair(exported_pid, exported_tid));
703 }
704
705 return util::OkStatus();
706 }
707
ExportThreadNames()708 util::Status ExportThreadNames() {
709 const auto& thread_table = storage_->thread_table();
710 for (UniqueTid utid = 0; utid < thread_table.row_count(); ++utid) {
711 auto opt_name = thread_table.name()[utid];
712 if (opt_name.has_value()) {
713 const char* thread_name = GetNonNullString(storage_, opt_name);
714 auto pid_and_tid = UtidToPidAndTid(utid);
715 writer_.WriteMetadataEvent("thread_name", "name", thread_name,
716 pid_and_tid.first, pid_and_tid.second);
717 }
718 }
719 return util::OkStatus();
720 }
721
ExportProcessNames()722 util::Status ExportProcessNames() {
723 const auto& process_table = storage_->process_table();
724 for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
725 auto opt_name = process_table.name()[upid];
726 if (opt_name.has_value()) {
727 const char* process_name = GetNonNullString(storage_, opt_name);
728 writer_.WriteMetadataEvent("process_name", "name", process_name,
729 UpidToPid(upid), /*tid=*/0);
730 }
731 }
732 return util::OkStatus();
733 }
734
735 // For each process it writes an approximate uptime, based on the process'
736 // start time and the last slice in the entire trace. This same last slice is
737 // used with all processes, so the process could have ended earlier.
ExportProcessUptimes()738 util::Status ExportProcessUptimes() {
739 int64_t last_timestamp_ns = FindLastSliceTimestamp();
740 if (last_timestamp_ns <= 0)
741 return util::OkStatus();
742
743 const auto& process_table = storage_->process_table();
744 for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
745 base::Optional<int64_t> start_timestamp_ns =
746 process_table.start_ts()[upid];
747 if (!start_timestamp_ns.has_value())
748 continue;
749
750 int64_t process_uptime_seconds =
751 (last_timestamp_ns - start_timestamp_ns.value()) /
752 (1000 * 1000 * 1000);
753
754 writer_.WriteMetadataEvent("process_uptime_seconds", "uptime",
755 std::to_string(process_uptime_seconds).c_str(),
756 UpidToPid(upid), /*tid=*/0);
757 }
758
759 return util::OkStatus();
760 }
761
762 // Returns the last slice's end timestamp for the entire trace. If no slices
763 // are found 0 is returned.
FindLastSliceTimestamp()764 int64_t FindLastSliceTimestamp() {
765 int64_t last_ts = 0;
766 const auto& slices = storage_->slice_table();
767 for (uint32_t i = 0; i < slices.row_count(); ++i) {
768 int64_t duration_ns = slices.dur()[i];
769 int64_t timestamp_ns = slices.ts()[i];
770
771 if (duration_ns + timestamp_ns > last_ts) {
772 last_ts = duration_ns + timestamp_ns;
773 }
774 }
775 return last_ts;
776 }
777
ExportSlices()778 util::Status ExportSlices() {
779 const auto& slices = storage_->slice_table();
780 for (uint32_t i = 0; i < slices.row_count(); ++i) {
781 // Skip slices with empty category - these are ftrace/system slices that
782 // were also imported into the raw table and will be exported from there
783 // by trace_to_text.
784 // TODO(b/153609716): Add a src column or do_not_export flag instead.
785 auto cat = slices.category().GetString(i);
786 if (cat.c_str() == nullptr || cat == "binder")
787 continue;
788
789 Json::Value event;
790 event["ts"] = Json::Int64(slices.ts()[i] / 1000);
791 event["cat"] = GetNonNullString(storage_, slices.category()[i]);
792 event["name"] = GetNonNullString(storage_, slices.name()[i]);
793 event["pid"] = 0;
794 event["tid"] = 0;
795
796 base::Optional<UniqueTid> legacy_utid;
797 std::string legacy_phase;
798
799 event["args"] =
800 args_builder_.GetArgs(slices.arg_set_id()[i]); // Makes a copy.
801 if (event["args"].isMember(kLegacyEventArgsKey)) {
802 const auto& legacy_args = event["args"][kLegacyEventArgsKey];
803
804 if (legacy_args.isMember(kLegacyEventPassthroughUtidKey)) {
805 legacy_utid = legacy_args[kLegacyEventPassthroughUtidKey].asUInt();
806 }
807 if (legacy_args.isMember(kLegacyEventPhaseKey)) {
808 legacy_phase = legacy_args[kLegacyEventPhaseKey].asString();
809 }
810
811 event["args"].removeMember(kLegacyEventArgsKey);
812 }
813
814 // To prevent duplicate export of slices, only export slices on descriptor
815 // or chrome tracks (i.e. TrackEvent slices). Slices on other tracks may
816 // also be present as raw events and handled by trace_to_text. Only add
817 // more track types here if they are not already covered by trace_to_text.
818 TrackId track_id = slices.track_id()[i];
819
820 const auto& track_table = storage_->track_table();
821
822 uint32_t track_row = *track_table.id().IndexOf(track_id);
823 auto track_args_id = track_table.source_arg_set_id()[track_row];
824 const Json::Value* track_args = nullptr;
825 bool legacy_chrome_track = false;
826 bool is_child_track = false;
827 if (track_args_id) {
828 track_args = &args_builder_.GetArgs(*track_args_id);
829 legacy_chrome_track = (*track_args)["source"].asString() == "chrome";
830 is_child_track = track_args->isMember("is_root_in_scope") &&
831 !(*track_args)["is_root_in_scope"].asBool();
832 }
833
834 const auto& thread_track = storage_->thread_track_table();
835 const auto& process_track = storage_->process_track_table();
836 const auto& thread_slices = storage_->thread_slice_table();
837 const auto& virtual_track_slices = storage_->virtual_track_slices();
838
839 int64_t duration_ns = slices.dur()[i];
840 base::Optional<int64_t> thread_ts_ns;
841 base::Optional<int64_t> thread_duration_ns;
842 base::Optional<int64_t> thread_instruction_count;
843 base::Optional<int64_t> thread_instruction_delta;
844
845 SliceId id = slices.id()[i];
846 base::Optional<uint32_t> thread_slice_row =
847 thread_slices.id().IndexOf(id);
848 if (thread_slice_row) {
849 thread_ts_ns = thread_slices.thread_ts()[*thread_slice_row];
850 thread_duration_ns = thread_slices.thread_dur()[*thread_slice_row];
851 thread_instruction_count =
852 thread_slices.thread_instruction_count()[*thread_slice_row];
853 thread_instruction_delta =
854 thread_slices.thread_instruction_delta()[*thread_slice_row];
855 } else {
856 base::Optional<uint32_t> vtrack_slice_row =
857 virtual_track_slices.FindRowForSliceId(id);
858 if (vtrack_slice_row) {
859 thread_ts_ns =
860 virtual_track_slices.thread_timestamp_ns()[*vtrack_slice_row];
861 thread_duration_ns =
862 virtual_track_slices.thread_duration_ns()[*vtrack_slice_row];
863 thread_instruction_count =
864 virtual_track_slices
865 .thread_instruction_counts()[*vtrack_slice_row];
866 thread_instruction_delta =
867 virtual_track_slices
868 .thread_instruction_deltas()[*vtrack_slice_row];
869 }
870 }
871
872 auto opt_thread_track_row = thread_track.id().IndexOf(TrackId{track_id});
873
874 if (opt_thread_track_row && !is_child_track) {
875 // Synchronous (thread) slice or instant event.
876 UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
877 auto pid_and_tid = UtidToPidAndTid(utid);
878 event["pid"] = Json::Int(pid_and_tid.first);
879 event["tid"] = Json::Int(pid_and_tid.second);
880
881 if (duration_ns == 0) {
882 if (legacy_phase.empty()) {
883 // Use "I" instead of "i" phase for backwards-compat with old
884 // consumers.
885 event["ph"] = "I";
886 } else {
887 event["ph"] = legacy_phase;
888 }
889 if (thread_ts_ns && thread_ts_ns > 0) {
890 event["tts"] = Json::Int64(*thread_ts_ns / 1000);
891 }
892 if (thread_instruction_count && *thread_instruction_count > 0) {
893 event["ticount"] = Json::Int64(*thread_instruction_count);
894 }
895 event["s"] = "t";
896 } else {
897 if (duration_ns > 0) {
898 event["ph"] = "X";
899 event["dur"] = Json::Int64(duration_ns / 1000);
900 } else {
901 // If the slice didn't finish, the duration may be negative. Only
902 // write a begin event without end event in this case.
903 event["ph"] = "B";
904 }
905 if (thread_ts_ns && *thread_ts_ns > 0) {
906 event["tts"] = Json::Int64(*thread_ts_ns / 1000);
907 // Only write thread duration for completed events.
908 if (duration_ns > 0 && thread_duration_ns)
909 event["tdur"] = Json::Int64(*thread_duration_ns / 1000);
910 }
911 if (thread_instruction_count && *thread_instruction_count > 0) {
912 event["ticount"] = Json::Int64(*thread_instruction_count);
913 // Only write thread instruction delta for completed events.
914 if (duration_ns > 0 && thread_instruction_delta)
915 event["tidelta"] = Json::Int64(*thread_instruction_delta);
916 }
917 }
918 writer_.WriteCommonEvent(event);
919 } else if (is_child_track ||
920 (legacy_chrome_track && track_args->isMember("source_id"))) {
921 // Async event slice.
922 auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
923 if (legacy_chrome_track) {
924 // Legacy async tracks are always process-associated and have args.
925 PERFETTO_DCHECK(opt_process_row);
926 PERFETTO_DCHECK(track_args);
927 uint32_t upid = process_track.upid()[*opt_process_row];
928 uint32_t exported_pid = UpidToPid(upid);
929 event["pid"] = Json::Int(exported_pid);
930 event["tid"] =
931 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
932 : exported_pid);
933
934 // Preserve original event IDs for legacy tracks. This is so that e.g.
935 // memory dump IDs show up correctly in the JSON trace.
936 PERFETTO_DCHECK(track_args->isMember("source_id"));
937 PERFETTO_DCHECK(track_args->isMember("source_id_is_process_scoped"));
938 PERFETTO_DCHECK(track_args->isMember("source_scope"));
939 uint64_t source_id =
940 static_cast<uint64_t>((*track_args)["source_id"].asInt64());
941 std::string source_scope = (*track_args)["source_scope"].asString();
942 if (!source_scope.empty())
943 event["scope"] = source_scope;
944 bool source_id_is_process_scoped =
945 (*track_args)["source_id_is_process_scoped"].asBool();
946 if (source_id_is_process_scoped) {
947 event["id2"]["local"] = base::Uint64ToHexString(source_id);
948 } else {
949 // Some legacy importers don't understand "id2" fields, so we use
950 // the "usually" global "id" field instead. This works as long as
951 // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
952 // "LOCAL_ID_PHASES" in catapult.
953 event["id"] = base::Uint64ToHexString(source_id);
954 }
955 } else {
956 if (opt_thread_track_row) {
957 UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
958 auto pid_and_tid = UtidToPidAndTid(utid);
959 event["pid"] = Json::Int(pid_and_tid.first);
960 event["tid"] = Json::Int(pid_and_tid.second);
961 event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
962 } else if (opt_process_row) {
963 uint32_t upid = process_track.upid()[*opt_process_row];
964 uint32_t exported_pid = UpidToPid(upid);
965 event["pid"] = Json::Int(exported_pid);
966 event["tid"] =
967 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
968 : exported_pid);
969 event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
970 } else {
971 if (legacy_utid) {
972 auto pid_and_tid = UtidToPidAndTid(*legacy_utid);
973 event["pid"] = Json::Int(pid_and_tid.first);
974 event["tid"] = Json::Int(pid_and_tid.second);
975 }
976
977 // Some legacy importers don't understand "id2" fields, so we use
978 // the "usually" global "id" field instead. This works as long as
979 // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
980 // "LOCAL_ID_PHASES" in catapult.
981 event["id"] = base::Uint64ToHexString(track_id.value);
982 }
983 }
984
985 if (thread_ts_ns && *thread_ts_ns > 0) {
986 event["tts"] = Json::Int64(*thread_ts_ns / 1000);
987 event["use_async_tts"] = Json::Int(1);
988 }
989 if (thread_instruction_count && *thread_instruction_count > 0) {
990 event["ticount"] = Json::Int64(*thread_instruction_count);
991 event["use_async_tts"] = Json::Int(1);
992 }
993
994 if (duration_ns == 0) {
995 if (legacy_phase.empty()) {
996 // Instant async event.
997 event["ph"] = "n";
998 writer_.AddAsyncInstantEvent(event);
999 } else {
1000 // Async step events.
1001 event["ph"] = legacy_phase;
1002 writer_.AddAsyncBeginEvent(event);
1003 }
1004 } else { // Async start and end.
1005 event["ph"] = legacy_phase.empty() ? "b" : legacy_phase;
1006 writer_.AddAsyncBeginEvent(event);
1007 // If the slice didn't finish, the duration may be negative. Don't
1008 // write the end event in this case.
1009 if (duration_ns > 0) {
1010 event["ph"] = legacy_phase.empty() ? "e" : "F";
1011 event["ts"] = Json::Int64((slices.ts()[i] + duration_ns) / 1000);
1012 if (thread_ts_ns && thread_duration_ns && *thread_ts_ns > 0) {
1013 event["tts"] =
1014 Json::Int64((*thread_ts_ns + *thread_duration_ns) / 1000);
1015 }
1016 if (thread_instruction_count && thread_instruction_delta &&
1017 *thread_instruction_count > 0) {
1018 event["ticount"] = Json::Int64(
1019 (*thread_instruction_count + *thread_instruction_delta));
1020 }
1021 event["args"].clear();
1022 writer_.AddAsyncEndEvent(event);
1023 }
1024 }
1025 } else {
1026 // Global or process-scoped instant event.
1027 PERFETTO_DCHECK(legacy_chrome_track || !is_child_track);
1028 if (duration_ns != 0) {
1029 // We don't support exporting slices on the default global or process
1030 // track to JSON (JSON only supports instant events on these tracks).
1031 PERFETTO_DLOG(
1032 "skipping non-instant slice on global or process track");
1033 } else {
1034 if (legacy_phase.empty()) {
1035 // Use "I" instead of "i" phase for backwards-compat with old
1036 // consumers.
1037 event["ph"] = "I";
1038 } else {
1039 event["ph"] = legacy_phase;
1040 }
1041
1042 auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
1043 if (opt_process_row.has_value()) {
1044 uint32_t upid = process_track.upid()[*opt_process_row];
1045 uint32_t exported_pid = UpidToPid(upid);
1046 event["pid"] = Json::Int(exported_pid);
1047 event["tid"] =
1048 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
1049 : exported_pid);
1050 event["s"] = "p";
1051 } else {
1052 event["s"] = "g";
1053 }
1054 writer_.WriteCommonEvent(event);
1055 }
1056 }
1057 }
1058 return util::OkStatus();
1059 }
1060
CreateFlowEventV1(uint32_t flow_id,SliceId slice_id,std::string name,std::string cat,Json::Value args,bool flow_begin)1061 base::Optional<Json::Value> CreateFlowEventV1(uint32_t flow_id,
1062 SliceId slice_id,
1063 std::string name,
1064 std::string cat,
1065 Json::Value args,
1066 bool flow_begin) {
1067 const auto& slices = storage_->slice_table();
1068 const auto& thread_tracks = storage_->thread_track_table();
1069
1070 auto opt_slice_idx = slices.id().IndexOf(slice_id);
1071 if (!opt_slice_idx)
1072 return base::nullopt;
1073 uint32_t slice_idx = opt_slice_idx.value();
1074
1075 TrackId track_id = storage_->slice_table().track_id()[slice_idx];
1076 auto opt_thread_track_idx = thread_tracks.id().IndexOf(track_id);
1077 // catapult only supports flow events attached to thread-track slices
1078 if (!opt_thread_track_idx)
1079 return base::nullopt;
1080
1081 UniqueTid utid = thread_tracks.utid()[opt_thread_track_idx.value()];
1082 auto pid_and_tid = UtidToPidAndTid(utid);
1083 Json::Value event;
1084 event["id"] = flow_id;
1085 event["pid"] = Json::Int(pid_and_tid.first);
1086 event["tid"] = Json::Int(pid_and_tid.second);
1087 event["cat"] = cat;
1088 event["name"] = name;
1089 event["ph"] = (flow_begin ? "s" : "f");
1090 event["ts"] = Json::Int64(slices.ts()[slice_idx] / 1000);
1091 if (!flow_begin) {
1092 event["bp"] = "e";
1093 }
1094 event["args"] = std::move(args);
1095 return std::move(event);
1096 }
1097
ExportFlows()1098 util::Status ExportFlows() {
1099 const auto& flow_table = storage_->flow_table();
1100 const auto& slice_table = storage_->slice_table();
1101 for (uint32_t i = 0; i < flow_table.row_count(); i++) {
1102 SliceId slice_out = flow_table.slice_out()[i];
1103 SliceId slice_in = flow_table.slice_in()[i];
1104 uint32_t arg_set_id = flow_table.arg_set_id()[i];
1105
1106 std::string cat;
1107 std::string name;
1108 auto args = args_builder_.GetArgs(arg_set_id);
1109 if (arg_set_id != kInvalidArgSetId) {
1110 cat = args["cat"].asString();
1111 name = args["name"].asString();
1112 // Don't export these args since they are only used for this export and
1113 // weren't part of the original event.
1114 args.removeMember("name");
1115 args.removeMember("cat");
1116 } else {
1117 auto opt_slice_out_idx = slice_table.id().IndexOf(slice_out);
1118 PERFETTO_DCHECK(opt_slice_out_idx.has_value());
1119 base::Optional<StringId> cat_id =
1120 slice_table.category()[opt_slice_out_idx.value()];
1121 base::Optional<StringId> name_id =
1122 slice_table.name()[opt_slice_out_idx.value()];
1123 cat = GetNonNullString(storage_, cat_id);
1124 name = GetNonNullString(storage_, name_id);
1125 }
1126
1127 auto out_event = CreateFlowEventV1(i, slice_out, name, cat, args,
1128 /* flow_begin = */ true);
1129 auto in_event = CreateFlowEventV1(i, slice_in, name, cat, std::move(args),
1130 /* flow_begin = */ false);
1131
1132 if (out_event && in_event) {
1133 writer_.WriteCommonEvent(out_event.value());
1134 writer_.WriteCommonEvent(in_event.value());
1135 }
1136 }
1137 return util::OkStatus();
1138 }
1139
ConvertLegacyRawEventToJson(uint32_t index)1140 Json::Value ConvertLegacyRawEventToJson(uint32_t index) {
1141 const auto& events = storage_->raw_table();
1142
1143 Json::Value event;
1144 event["ts"] = Json::Int64(events.ts()[index] / 1000);
1145
1146 UniqueTid utid = static_cast<UniqueTid>(events.utid()[index]);
1147 auto pid_and_tid = UtidToPidAndTid(utid);
1148 event["pid"] = Json::Int(pid_and_tid.first);
1149 event["tid"] = Json::Int(pid_and_tid.second);
1150
1151 // Raw legacy events store all other params in the arg set. Make a copy of
1152 // the converted args here, parse, and then remove the legacy params.
1153 event["args"] = args_builder_.GetArgs(events.arg_set_id()[index]);
1154 const Json::Value& legacy_args = event["args"][kLegacyEventArgsKey];
1155
1156 PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventCategoryKey));
1157 event["cat"] = legacy_args[kLegacyEventCategoryKey];
1158
1159 PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventNameKey));
1160 event["name"] = legacy_args[kLegacyEventNameKey];
1161
1162 PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventPhaseKey));
1163 event["ph"] = legacy_args[kLegacyEventPhaseKey];
1164
1165 // Object snapshot events are supposed to have a mandatory "snapshot" arg,
1166 // which may be removed in trace processor if it is empty.
1167 if (legacy_args[kLegacyEventPhaseKey] == "O" &&
1168 !event["args"].isMember("snapshot")) {
1169 event["args"]["snapshot"] = Json::Value(Json::objectValue);
1170 }
1171
1172 if (legacy_args.isMember(kLegacyEventDurationNsKey))
1173 event["dur"] = legacy_args[kLegacyEventDurationNsKey].asInt64() / 1000;
1174
1175 if (legacy_args.isMember(kLegacyEventThreadTimestampNsKey)) {
1176 event["tts"] =
1177 legacy_args[kLegacyEventThreadTimestampNsKey].asInt64() / 1000;
1178 }
1179
1180 if (legacy_args.isMember(kLegacyEventThreadDurationNsKey)) {
1181 event["tdur"] =
1182 legacy_args[kLegacyEventThreadDurationNsKey].asInt64() / 1000;
1183 }
1184
1185 if (legacy_args.isMember(kLegacyEventThreadInstructionCountKey))
1186 event["ticount"] = legacy_args[kLegacyEventThreadInstructionCountKey];
1187
1188 if (legacy_args.isMember(kLegacyEventThreadInstructionDeltaKey))
1189 event["tidelta"] = legacy_args[kLegacyEventThreadInstructionDeltaKey];
1190
1191 if (legacy_args.isMember(kLegacyEventUseAsyncTtsKey))
1192 event["use_async_tts"] = legacy_args[kLegacyEventUseAsyncTtsKey];
1193
1194 if (legacy_args.isMember(kLegacyEventUnscopedIdKey)) {
1195 event["id"] = base::Uint64ToHexString(
1196 legacy_args[kLegacyEventUnscopedIdKey].asUInt64());
1197 }
1198
1199 if (legacy_args.isMember(kLegacyEventGlobalIdKey)) {
1200 event["id2"]["global"] = base::Uint64ToHexString(
1201 legacy_args[kLegacyEventGlobalIdKey].asUInt64());
1202 }
1203
1204 if (legacy_args.isMember(kLegacyEventLocalIdKey)) {
1205 event["id2"]["local"] = base::Uint64ToHexString(
1206 legacy_args[kLegacyEventLocalIdKey].asUInt64());
1207 }
1208
1209 if (legacy_args.isMember(kLegacyEventIdScopeKey))
1210 event["scope"] = legacy_args[kLegacyEventIdScopeKey];
1211
1212 event["args"].removeMember(kLegacyEventArgsKey);
1213
1214 return event;
1215 }
1216
ExportRawEvents()1217 util::Status ExportRawEvents() {
1218 base::Optional<StringId> raw_legacy_event_key_id =
1219 storage_->string_pool().GetId("track_event.legacy_event");
1220 base::Optional<StringId> raw_legacy_system_trace_event_id =
1221 storage_->string_pool().GetId("chrome_event.legacy_system_trace");
1222 base::Optional<StringId> raw_legacy_user_trace_event_id =
1223 storage_->string_pool().GetId("chrome_event.legacy_user_trace");
1224 base::Optional<StringId> raw_chrome_metadata_event_id =
1225 storage_->string_pool().GetId("chrome_event.metadata");
1226
1227 const auto& events = storage_->raw_table();
1228 for (uint32_t i = 0; i < events.row_count(); ++i) {
1229 if (raw_legacy_event_key_id &&
1230 events.name()[i] == *raw_legacy_event_key_id) {
1231 Json::Value event = ConvertLegacyRawEventToJson(i);
1232 writer_.WriteCommonEvent(event);
1233 } else if (raw_legacy_system_trace_event_id &&
1234 events.name()[i] == *raw_legacy_system_trace_event_id) {
1235 Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1236 PERFETTO_DCHECK(args.isMember("data"));
1237 writer_.AddSystemTraceData(args["data"].asString());
1238 } else if (raw_legacy_user_trace_event_id &&
1239 events.name()[i] == *raw_legacy_user_trace_event_id) {
1240 Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1241 PERFETTO_DCHECK(args.isMember("data"));
1242 writer_.AddUserTraceData(args["data"].asString());
1243 } else if (raw_chrome_metadata_event_id &&
1244 events.name()[i] == *raw_chrome_metadata_event_id) {
1245 Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1246 writer_.MergeMetadata(args);
1247 }
1248 }
1249 return util::OkStatus();
1250 }
1251
1252 class MergedProfileSamplesEmitter {
1253 public:
1254 // The TraceFormatWriter must outlive this instance.
MergedProfileSamplesEmitter(TraceFormatWriter & writer)1255 MergedProfileSamplesEmitter(TraceFormatWriter& writer) : writer_(writer) {}
1256
AddEventForUtid(UniqueTid utid,int64_t ts,CallsiteId callsite_id,const Json::Value & event)1257 uint64_t AddEventForUtid(UniqueTid utid,
1258 int64_t ts,
1259 CallsiteId callsite_id,
1260 const Json::Value& event) {
1261 auto current_sample = current_events_.find(utid);
1262
1263 // If there's a current entry for our thread and it matches the callsite
1264 // of the new sample, update the entry with the new timestamp. Otherwise
1265 // create a new entry.
1266 if (current_sample != current_events_.end() &&
1267 current_sample->second.callsite_id() == callsite_id) {
1268 current_sample->second.UpdateWithNewSample(ts);
1269 return current_sample->second.event_id();
1270 } else {
1271 if (current_sample != current_events_.end())
1272 current_events_.erase(current_sample);
1273
1274 auto new_entry = current_events_.emplace(
1275 std::piecewise_construct, std::forward_as_tuple(utid),
1276 std::forward_as_tuple(writer_, callsite_id, ts, event));
1277 return new_entry.first->second.event_id();
1278 }
1279 }
1280
GenerateNewEventId()1281 static uint64_t GenerateNewEventId() {
1282 // "n"-phase events are nestable async events which get tied together
1283 // with their id, so we need to give each one a unique ID as we only
1284 // want the samples to show up on their own track in the trace-viewer
1285 // but not nested together (unless they're nested under a merged event).
1286 static size_t g_id_counter = 0;
1287 return ++g_id_counter;
1288 }
1289
1290 private:
1291 class Sample {
1292 public:
Sample(TraceFormatWriter & writer,CallsiteId callsite_id,int64_t ts,const Json::Value & event)1293 Sample(TraceFormatWriter& writer,
1294 CallsiteId callsite_id,
1295 int64_t ts,
1296 const Json::Value& event)
1297 : writer_(writer),
1298 callsite_id_(callsite_id),
1299 begin_ts_(ts),
1300 end_ts_(ts),
1301 event_(event),
1302 event_id_(MergedProfileSamplesEmitter::GenerateNewEventId()),
1303 sample_count_(1) {}
1304
~Sample()1305 ~Sample() {
1306 // No point writing a merged event if we only got a single sample
1307 // as ExportCpuProfileSamples will already be writing the instant event.
1308 if (sample_count_ == 1)
1309 return;
1310
1311 event_["id"] = base::Uint64ToHexString(event_id_);
1312
1313 // Write the BEGIN event.
1314 event_["ph"] = "b";
1315 // We subtract 1us as a workaround for the first async event not
1316 // nesting underneath the parent event if the timestamp is identical.
1317 int64_t begin_in_us_ = begin_ts_ / 1000;
1318 event_["ts"] = Json::Int64(std::min(begin_in_us_ - 1, begin_in_us_));
1319 writer_.WriteCommonEvent(event_);
1320
1321 // Write the END event.
1322 event_["ph"] = "e";
1323 event_["ts"] = Json::Int64(end_ts_ / 1000);
1324 // No need for args for the end event; remove them to save some space.
1325 event_["args"].clear();
1326 writer_.WriteCommonEvent(event_);
1327 }
1328
UpdateWithNewSample(int64_t ts)1329 void UpdateWithNewSample(int64_t ts) {
1330 // We assume samples for a given thread will appear in timestamp
1331 // order; if this assumption stops holding true, we'll have to sort the
1332 // samples first.
1333 if (ts < end_ts_ || begin_ts_ > ts) {
1334 PERFETTO_ELOG(
1335 "Got an timestamp out of sequence while merging stack samples "
1336 "during JSON export!\n");
1337 PERFETTO_DCHECK(false);
1338 }
1339
1340 end_ts_ = ts;
1341 sample_count_++;
1342 }
1343
event_id() const1344 uint64_t event_id() const { return event_id_; }
callsite_id() const1345 CallsiteId callsite_id() const { return callsite_id_; }
1346
1347 public:
1348 Sample(const Sample&) = delete;
1349 Sample& operator=(const Sample&) = delete;
1350 Sample& operator=(Sample&& value) = delete;
1351
1352 TraceFormatWriter& writer_;
1353 CallsiteId callsite_id_;
1354 int64_t begin_ts_;
1355 int64_t end_ts_;
1356 Json::Value event_;
1357 uint64_t event_id_;
1358 size_t sample_count_;
1359 };
1360
1361 MergedProfileSamplesEmitter(const MergedProfileSamplesEmitter&) = delete;
1362 MergedProfileSamplesEmitter& operator=(const MergedProfileSamplesEmitter&) =
1363 delete;
1364 MergedProfileSamplesEmitter& operator=(
1365 MergedProfileSamplesEmitter&& value) = delete;
1366
1367 std::unordered_map<UniqueTid, Sample> current_events_;
1368 TraceFormatWriter& writer_;
1369 };
1370
ExportCpuProfileSamples()1371 util::Status ExportCpuProfileSamples() {
1372 MergedProfileSamplesEmitter merged_sample_emitter(writer_);
1373
1374 const tables::CpuProfileStackSampleTable& samples =
1375 storage_->cpu_profile_stack_sample_table();
1376 for (uint32_t i = 0; i < samples.row_count(); ++i) {
1377 Json::Value event;
1378 event["ts"] = Json::Int64(samples.ts()[i] / 1000);
1379
1380 UniqueTid utid = static_cast<UniqueTid>(samples.utid()[i]);
1381 auto pid_and_tid = UtidToPidAndTid(utid);
1382 event["pid"] = Json::Int(pid_and_tid.first);
1383 event["tid"] = Json::Int(pid_and_tid.second);
1384
1385 event["ph"] = "n";
1386 event["cat"] = "disabled-by-default-cpu_profiler";
1387 event["name"] = "StackCpuSampling";
1388 event["s"] = "t";
1389
1390 // Add a dummy thread timestamp to this event to match the format of
1391 // instant events. Useful in the UI to view args of a selected group of
1392 // samples.
1393 event["tts"] = Json::Int64(1);
1394
1395 const auto& callsites = storage_->stack_profile_callsite_table();
1396 const auto& frames = storage_->stack_profile_frame_table();
1397 const auto& mappings = storage_->stack_profile_mapping_table();
1398
1399 std::vector<std::string> callstack;
1400 base::Optional<CallsiteId> opt_callsite_id = samples.callsite_id()[i];
1401
1402 while (opt_callsite_id) {
1403 CallsiteId callsite_id = *opt_callsite_id;
1404 uint32_t callsite_row = *callsites.id().IndexOf(callsite_id);
1405
1406 FrameId frame_id = callsites.frame_id()[callsite_row];
1407 uint32_t frame_row = *frames.id().IndexOf(frame_id);
1408
1409 MappingId mapping_id = frames.mapping()[frame_row];
1410 uint32_t mapping_row = *mappings.id().IndexOf(mapping_id);
1411
1412 NullTermStringView symbol_name;
1413 auto opt_symbol_set_id = frames.symbol_set_id()[frame_row];
1414 if (opt_symbol_set_id) {
1415 symbol_name = storage_->GetString(
1416 storage_->symbol_table().name()[*opt_symbol_set_id]);
1417 }
1418
1419 base::StackString<1024> frame_entry(
1420 "%s - %s [%s]\n",
1421 (symbol_name.empty()
1422 ? base::Uint64ToHexString(
1423 static_cast<uint64_t>(frames.rel_pc()[frame_row]))
1424 .c_str()
1425 : symbol_name.c_str()),
1426 GetNonNullString(storage_, mappings.name()[mapping_row]),
1427 GetNonNullString(storage_, mappings.build_id()[mapping_row]));
1428
1429 callstack.emplace_back(frame_entry.ToStdString());
1430
1431 opt_callsite_id = callsites.parent_id()[callsite_row];
1432 }
1433
1434 std::string merged_callstack;
1435 for (auto entry = callstack.rbegin(); entry != callstack.rend();
1436 ++entry) {
1437 merged_callstack += *entry;
1438 }
1439
1440 event["args"]["frames"] = merged_callstack;
1441 event["args"]["process_priority"] = samples.process_priority()[i];
1442
1443 // TODO(oysteine): Used for backwards compatibility with the memlog
1444 // pipeline, should remove once we've switched to looking directly at the
1445 // tid.
1446 event["args"]["thread_id"] = Json::Int(pid_and_tid.second);
1447
1448 // Emit duration events for adjacent samples with the same callsite.
1449 // For now, only do this when the trace has already been symbolized i.e.
1450 // are not directly output by Chrome, to avoid interfering with other
1451 // processing pipelines.
1452 base::Optional<CallsiteId> opt_current_callsite_id =
1453 samples.callsite_id()[i];
1454
1455 if (opt_current_callsite_id && storage_->symbol_table().row_count() > 0) {
1456 uint64_t parent_event_id = merged_sample_emitter.AddEventForUtid(
1457 utid, samples.ts()[i], *opt_current_callsite_id, event);
1458 event["id"] = base::Uint64ToHexString(parent_event_id);
1459 } else {
1460 event["id"] = base::Uint64ToHexString(
1461 MergedProfileSamplesEmitter::GenerateNewEventId());
1462 }
1463
1464 writer_.WriteCommonEvent(event);
1465 }
1466
1467 return util::OkStatus();
1468 }
1469
ExportMetadata()1470 util::Status ExportMetadata() {
1471 const auto& trace_metadata = storage_->metadata_table();
1472 const auto& keys = trace_metadata.name();
1473 const auto& int_values = trace_metadata.int_value();
1474 const auto& str_values = trace_metadata.str_value();
1475
1476 // Create a mapping from key string ids to keys.
1477 std::unordered_map<StringId, metadata::KeyId> key_map;
1478 for (uint32_t i = 0; i < metadata::kNumKeys; ++i) {
1479 auto id = *storage_->string_pool().GetId(metadata::kNames[i]);
1480 key_map[id] = static_cast<metadata::KeyId>(i);
1481 }
1482
1483 for (uint32_t pos = 0; pos < trace_metadata.row_count(); pos++) {
1484 auto key_it = key_map.find(keys[pos]);
1485 // Skip exporting dynamic entries; the cr-xxx entries that come from
1486 // the ChromeMetadata proto message are already exported from the raw
1487 // table.
1488 if (key_it == key_map.end())
1489 continue;
1490
1491 // Cast away from enum type, as otherwise -Wswitch-enum will demand an
1492 // exhaustive list of cases, even if there's a default case.
1493 metadata::KeyId key = key_it->second;
1494 switch (static_cast<size_t>(key)) {
1495 case metadata::benchmark_description:
1496 writer_.AppendTelemetryMetadataString(
1497 "benchmarkDescriptions", str_values.GetString(pos).c_str());
1498 break;
1499
1500 case metadata::benchmark_name:
1501 writer_.AppendTelemetryMetadataString(
1502 "benchmarks", str_values.GetString(pos).c_str());
1503 break;
1504
1505 case metadata::benchmark_start_time_us:
1506 writer_.SetTelemetryMetadataTimestamp("benchmarkStart",
1507 *int_values[pos]);
1508 break;
1509
1510 case metadata::benchmark_had_failures:
1511 writer_.AppendTelemetryMetadataBool("hadFailures", *int_values[pos]);
1512 break;
1513
1514 case metadata::benchmark_label:
1515 writer_.AppendTelemetryMetadataString(
1516 "labels", str_values.GetString(pos).c_str());
1517 break;
1518
1519 case metadata::benchmark_story_name:
1520 writer_.AppendTelemetryMetadataString(
1521 "stories", str_values.GetString(pos).c_str());
1522 break;
1523
1524 case metadata::benchmark_story_run_index:
1525 writer_.AppendTelemetryMetadataInt("storysetRepeats",
1526 *int_values[pos]);
1527 break;
1528
1529 case metadata::benchmark_story_run_time_us:
1530 writer_.SetTelemetryMetadataTimestamp("traceStart", *int_values[pos]);
1531 break;
1532
1533 case metadata::benchmark_story_tags: // repeated
1534 writer_.AppendTelemetryMetadataString(
1535 "storyTags", str_values.GetString(pos).c_str());
1536 break;
1537
1538 default:
1539 PERFETTO_DLOG("Ignoring metadata key %zu", static_cast<size_t>(key));
1540 break;
1541 }
1542 }
1543 return util::OkStatus();
1544 }
1545
ExportStats()1546 util::Status ExportStats() {
1547 const auto& stats = storage_->stats();
1548
1549 for (size_t idx = 0; idx < stats::kNumKeys; idx++) {
1550 if (stats::kTypes[idx] == stats::kSingle) {
1551 writer_.SetStats(stats::kNames[idx], stats[idx].value);
1552 } else {
1553 PERFETTO_DCHECK(stats::kTypes[idx] == stats::kIndexed);
1554 writer_.SetStats(stats::kNames[idx], stats[idx].indexed_values);
1555 }
1556 }
1557
1558 return util::OkStatus();
1559 }
1560
ExportMemorySnapshots()1561 util::Status ExportMemorySnapshots() {
1562 const auto& memory_snapshots = storage_->memory_snapshot_table();
1563 base::Optional<StringId> private_footprint_id =
1564 storage_->string_pool().GetId("chrome.private_footprint_kb");
1565 base::Optional<StringId> peak_resident_set_id =
1566 storage_->string_pool().GetId("chrome.peak_resident_set_kb");
1567
1568 for (uint32_t memory_index = 0; memory_index < memory_snapshots.row_count();
1569 ++memory_index) {
1570 Json::Value event_base;
1571
1572 event_base["ph"] = "v";
1573 event_base["cat"] = "disabled-by-default-memory-infra";
1574 auto snapshot_id = memory_snapshots.id()[memory_index].value;
1575 event_base["id"] = base::Uint64ToHexString(snapshot_id);
1576 int64_t snapshot_ts = memory_snapshots.timestamp()[memory_index];
1577 event_base["ts"] = Json::Int64(snapshot_ts / 1000);
1578 // TODO(crbug:1116359): Add dump type to the snapshot proto
1579 // to properly fill event_base["name"]
1580 event_base["name"] = "periodic_interval";
1581 event_base["args"]["dumps"]["level_of_detail"] = GetNonNullString(
1582 storage_, memory_snapshots.detail_level()[memory_index]);
1583
1584 // Export OS dump events for processes with relevant data.
1585 const auto& process_table = storage_->process_table();
1586 for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
1587 Json::Value event =
1588 FillInProcessEventDetails(event_base, process_table.pid()[upid]);
1589 Json::Value& totals = event["args"]["dumps"]["process_totals"];
1590
1591 const auto& process_counters = storage_->process_counter_track_table();
1592
1593 for (uint32_t counter_index = 0;
1594 counter_index < process_counters.row_count(); ++counter_index) {
1595 if (process_counters.upid()[counter_index] != upid)
1596 continue;
1597 TrackId track_id = process_counters.id()[counter_index];
1598 if (private_footprint_id && (process_counters.name()[counter_index] ==
1599 private_footprint_id)) {
1600 totals["private_footprint_bytes"] = base::Uint64ToHexStringNoPrefix(
1601 GetCounterValue(track_id, snapshot_ts));
1602 } else if (peak_resident_set_id &&
1603 (process_counters.name()[counter_index] ==
1604 peak_resident_set_id)) {
1605 totals["peak_resident_set_size"] = base::Uint64ToHexStringNoPrefix(
1606 GetCounterValue(track_id, snapshot_ts));
1607 }
1608 }
1609
1610 auto process_args_id = process_table.arg_set_id()[upid];
1611 if (process_args_id) {
1612 const Json::Value* process_args =
1613 &args_builder_.GetArgs(process_args_id);
1614 if (process_args->isMember("is_peak_rss_resettable")) {
1615 totals["is_peak_rss_resettable"] =
1616 (*process_args)["is_peak_rss_resettable"];
1617 }
1618 }
1619
1620 const auto& smaps_table = storage_->profiler_smaps_table();
1621 // Do not create vm_regions without memory maps, since catapult expects
1622 // to have rows.
1623 Json::Value* smaps =
1624 smaps_table.row_count() > 0
1625 ? &event["args"]["dumps"]["process_mmaps"]["vm_regions"]
1626 : nullptr;
1627 for (uint32_t smaps_index = 0; smaps_index < smaps_table.row_count();
1628 ++smaps_index) {
1629 if (smaps_table.upid()[smaps_index] != upid)
1630 continue;
1631 if (smaps_table.ts()[smaps_index] != snapshot_ts)
1632 continue;
1633 Json::Value region;
1634 region["mf"] =
1635 GetNonNullString(storage_, smaps_table.file_name()[smaps_index]);
1636 region["pf"] =
1637 Json::Int64(smaps_table.protection_flags()[smaps_index]);
1638 region["sa"] = base::Uint64ToHexStringNoPrefix(
1639 static_cast<uint64_t>(smaps_table.start_address()[smaps_index]));
1640 region["sz"] = base::Uint64ToHexStringNoPrefix(
1641 static_cast<uint64_t>(smaps_table.size_kb()[smaps_index]) * 1024);
1642 region["ts"] =
1643 Json::Int64(smaps_table.module_timestamp()[smaps_index]);
1644 region["id"] = GetNonNullString(
1645 storage_, smaps_table.module_debugid()[smaps_index]);
1646 region["df"] = GetNonNullString(
1647 storage_, smaps_table.module_debug_path()[smaps_index]);
1648 region["bs"]["pc"] = base::Uint64ToHexStringNoPrefix(
1649 static_cast<uint64_t>(
1650 smaps_table.private_clean_resident_kb()[smaps_index]) *
1651 1024);
1652 region["bs"]["pd"] = base::Uint64ToHexStringNoPrefix(
1653 static_cast<uint64_t>(
1654 smaps_table.private_dirty_kb()[smaps_index]) *
1655 1024);
1656 region["bs"]["pss"] = base::Uint64ToHexStringNoPrefix(
1657 static_cast<uint64_t>(
1658 smaps_table.proportional_resident_kb()[smaps_index]) *
1659 1024);
1660 region["bs"]["sc"] = base::Uint64ToHexStringNoPrefix(
1661 static_cast<uint64_t>(
1662 smaps_table.shared_clean_resident_kb()[smaps_index]) *
1663 1024);
1664 region["bs"]["sd"] = base::Uint64ToHexStringNoPrefix(
1665 static_cast<uint64_t>(
1666 smaps_table.shared_dirty_resident_kb()[smaps_index]) *
1667 1024);
1668 region["bs"]["sw"] = base::Uint64ToHexStringNoPrefix(
1669 static_cast<uint64_t>(smaps_table.swap_kb()[smaps_index]) * 1024);
1670 smaps->append(region);
1671 }
1672
1673 if (!totals.empty() || (smaps && !smaps->empty()))
1674 writer_.WriteCommonEvent(event);
1675 }
1676
1677 // Export chrome dump events for process snapshots in current memory
1678 // snapshot.
1679 const auto& process_snapshots = storage_->process_memory_snapshot_table();
1680
1681 for (uint32_t process_index = 0;
1682 process_index < process_snapshots.row_count(); ++process_index) {
1683 if (process_snapshots.snapshot_id()[process_index].value != snapshot_id)
1684 continue;
1685
1686 auto process_snapshot_id = process_snapshots.id()[process_index].value;
1687 uint32_t pid = UpidToPid(process_snapshots.upid()[process_index]);
1688
1689 // Shared memory nodes are imported into a fake process with pid 0.
1690 // Catapult expects them to be associated with one of the real processes
1691 // of the snapshot, so we choose the first one we can find and replace
1692 // the pid.
1693 if (pid == 0) {
1694 for (uint32_t i = 0; i < process_snapshots.row_count(); ++i) {
1695 if (process_snapshots.snapshot_id()[i].value != snapshot_id)
1696 continue;
1697 uint32_t new_pid = UpidToPid(process_snapshots.upid()[i]);
1698 if (new_pid != 0) {
1699 pid = new_pid;
1700 break;
1701 }
1702 }
1703 }
1704
1705 Json::Value event = FillInProcessEventDetails(event_base, pid);
1706
1707 const auto& snapshot_nodes = storage_->memory_snapshot_node_table();
1708
1709 for (uint32_t node_index = 0; node_index < snapshot_nodes.row_count();
1710 ++node_index) {
1711 if (snapshot_nodes.process_snapshot_id()[node_index].value !=
1712 process_snapshot_id) {
1713 continue;
1714 }
1715 const char* path =
1716 GetNonNullString(storage_, snapshot_nodes.path()[node_index]);
1717 event["args"]["dumps"]["allocators"][path]["guid"] =
1718 base::Uint64ToHexStringNoPrefix(
1719 static_cast<uint64_t>(snapshot_nodes.id()[node_index].value));
1720 if (snapshot_nodes.size()[node_index]) {
1721 AddAttributeToMemoryNode(&event, path, "size",
1722 snapshot_nodes.size()[node_index],
1723 "bytes");
1724 }
1725 if (snapshot_nodes.effective_size()[node_index]) {
1726 AddAttributeToMemoryNode(
1727 &event, path, "effective_size",
1728 snapshot_nodes.effective_size()[node_index], "bytes");
1729 }
1730
1731 auto node_args_id = snapshot_nodes.arg_set_id()[node_index];
1732 if (!node_args_id)
1733 continue;
1734 const Json::Value* node_args =
1735 &args_builder_.GetArgs(node_args_id.value());
1736 for (const auto& arg_name : node_args->getMemberNames()) {
1737 const Json::Value& arg_value = (*node_args)[arg_name]["value"];
1738 if (arg_value.empty())
1739 continue;
1740 if (arg_value.isString()) {
1741 AddAttributeToMemoryNode(&event, path, arg_name,
1742 arg_value.asString());
1743 } else if (arg_value.isInt64()) {
1744 Json::Value unit = (*node_args)[arg_name]["unit"];
1745 if (unit.empty())
1746 unit = "unknown";
1747 AddAttributeToMemoryNode(&event, path, arg_name,
1748 arg_value.asInt64(), unit.asString());
1749 }
1750 }
1751 }
1752
1753 const auto& snapshot_edges = storage_->memory_snapshot_edge_table();
1754
1755 for (uint32_t edge_index = 0; edge_index < snapshot_edges.row_count();
1756 ++edge_index) {
1757 SnapshotNodeId source_node_id =
1758 snapshot_edges.source_node_id()[edge_index];
1759 uint32_t source_node_row =
1760 *snapshot_nodes.id().IndexOf(source_node_id);
1761
1762 if (snapshot_nodes.process_snapshot_id()[source_node_row].value !=
1763 process_snapshot_id) {
1764 continue;
1765 }
1766 Json::Value edge;
1767 edge["source"] = base::Uint64ToHexStringNoPrefix(
1768 snapshot_edges.source_node_id()[edge_index].value);
1769 edge["target"] = base::Uint64ToHexStringNoPrefix(
1770 snapshot_edges.target_node_id()[edge_index].value);
1771 edge["importance"] =
1772 Json::Int(snapshot_edges.importance()[edge_index]);
1773 edge["type"] = "ownership";
1774 event["args"]["dumps"]["allocators_graph"].append(edge);
1775 }
1776 writer_.WriteCommonEvent(event);
1777 }
1778 }
1779 return util::OkStatus();
1780 }
1781
UpidToPid(UniquePid upid)1782 uint32_t UpidToPid(UniquePid upid) {
1783 auto pid_it = upids_to_exported_pids_.find(upid);
1784 PERFETTO_DCHECK(pid_it != upids_to_exported_pids_.end());
1785 return pid_it->second;
1786 }
1787
UtidToPidAndTid(UniqueTid utid)1788 std::pair<uint32_t, uint32_t> UtidToPidAndTid(UniqueTid utid) {
1789 auto pid_and_tid_it = utids_to_exported_pids_and_tids_.find(utid);
1790 PERFETTO_DCHECK(pid_and_tid_it != utids_to_exported_pids_and_tids_.end());
1791 return pid_and_tid_it->second;
1792 }
1793
NextExportedPidOrTidForDuplicates()1794 uint32_t NextExportedPidOrTidForDuplicates() {
1795 // Ensure that the exported substitute value does not represent a valid
1796 // pid/tid. This would be very unlikely in practice.
1797 while (IsValidPidOrTid(next_exported_pid_or_tid_for_duplicates_))
1798 next_exported_pid_or_tid_for_duplicates_--;
1799 return next_exported_pid_or_tid_for_duplicates_--;
1800 }
1801
IsValidPidOrTid(uint32_t pid_or_tid)1802 bool IsValidPidOrTid(uint32_t pid_or_tid) {
1803 const auto& process_table = storage_->process_table();
1804 for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
1805 if (process_table.pid()[upid] == pid_or_tid)
1806 return true;
1807 }
1808
1809 const auto& thread_table = storage_->thread_table();
1810 for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
1811 if (thread_table.tid()[utid] == pid_or_tid)
1812 return true;
1813 }
1814
1815 return false;
1816 }
1817
FillInProcessEventDetails(const Json::Value & event,uint32_t pid)1818 Json::Value FillInProcessEventDetails(const Json::Value& event,
1819 uint32_t pid) {
1820 Json::Value output = event;
1821 output["pid"] = Json::Int(pid);
1822 output["tid"] = Json::Int(-1);
1823 return output;
1824 }
1825
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,int64_t value,const std::string & units)1826 void AddAttributeToMemoryNode(Json::Value* event,
1827 const std::string& path,
1828 const std::string& key,
1829 int64_t value,
1830 const std::string& units) {
1831 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1832 base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(value));
1833 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1834 "scalar";
1835 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1836 units;
1837 }
1838
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,const std::string & value,const std::string & units="")1839 void AddAttributeToMemoryNode(Json::Value* event,
1840 const std::string& path,
1841 const std::string& key,
1842 const std::string& value,
1843 const std::string& units = "") {
1844 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1845 value;
1846 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1847 "string";
1848 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1849 units;
1850 }
1851
GetCounterValue(TrackId track_id,int64_t ts)1852 uint64_t GetCounterValue(TrackId track_id, int64_t ts) {
1853 const auto& counter_table = storage_->counter_table();
1854 auto begin = counter_table.ts().begin();
1855 auto end = counter_table.ts().end();
1856 PERFETTO_DCHECK(counter_table.ts().IsSorted() &&
1857 counter_table.ts().IsColumnType<int64_t>());
1858 // The timestamp column is sorted, so we can binary search for a matching
1859 // timestamp. Note that we don't use RowMap operations like FilterInto()
1860 // here because they bloat trace processor's binary size in Chrome too much.
1861 auto it = std::lower_bound(begin, end, ts,
1862 [](const SqlValue& value, int64_t expected_ts) {
1863 return value.AsLong() < expected_ts;
1864 });
1865 for (; it < end; ++it) {
1866 if ((*it).AsLong() != ts)
1867 break;
1868 if (counter_table.track_id()[it.row()].value == track_id.value)
1869 return static_cast<uint64_t>(counter_table.value()[it.row()]);
1870 }
1871 return 0;
1872 }
1873
1874 const TraceStorage* storage_;
1875 ArgsBuilder args_builder_;
1876 TraceFormatWriter writer_;
1877
1878 // If a pid/tid is duplicated between two or more different processes/threads
1879 // (pid/tid reuse), we export the subsequent occurrences with different
1880 // pids/tids that is visibly different from regular pids/tids - counting down
1881 // from uint32_t max.
1882 uint32_t next_exported_pid_or_tid_for_duplicates_ =
1883 std::numeric_limits<uint32_t>::max();
1884
1885 std::map<UniquePid, uint32_t> upids_to_exported_pids_;
1886 std::map<uint32_t, UniquePid> exported_pids_to_upids_;
1887 std::map<UniqueTid, std::pair<uint32_t, uint32_t>>
1888 utids_to_exported_pids_and_tids_;
1889 std::map<std::pair<uint32_t, uint32_t>, UniqueTid>
1890 exported_pids_and_tids_to_utids_;
1891 };
1892
1893 #endif // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1894
1895 } // namespace
1896
1897 OutputWriter::OutputWriter() = default;
1898 OutputWriter::~OutputWriter() = default;
1899
ExportJson(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1900 util::Status ExportJson(const TraceStorage* storage,
1901 OutputWriter* output,
1902 ArgumentFilterPredicate argument_filter,
1903 MetadataFilterPredicate metadata_filter,
1904 LabelFilterPredicate label_filter) {
1905 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1906 JsonExporter exporter(storage, output, std::move(argument_filter),
1907 std::move(metadata_filter), std::move(label_filter));
1908 return exporter.Export();
1909 #else
1910 perfetto::base::ignore_result(storage);
1911 perfetto::base::ignore_result(output);
1912 perfetto::base::ignore_result(argument_filter);
1913 perfetto::base::ignore_result(metadata_filter);
1914 perfetto::base::ignore_result(label_filter);
1915 return util::ErrStatus("JSON support is not compiled in this build");
1916 #endif // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1917 }
1918
ExportJson(TraceProcessorStorage * tp,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1919 util::Status ExportJson(TraceProcessorStorage* tp,
1920 OutputWriter* output,
1921 ArgumentFilterPredicate argument_filter,
1922 MetadataFilterPredicate metadata_filter,
1923 LabelFilterPredicate label_filter) {
1924 const TraceStorage* storage = reinterpret_cast<TraceProcessorStorageImpl*>(tp)
1925 ->context()
1926 ->storage.get();
1927 return ExportJson(storage, output, argument_filter, metadata_filter,
1928 label_filter);
1929 }
1930
ExportJson(const TraceStorage * storage,FILE * output)1931 util::Status ExportJson(const TraceStorage* storage, FILE* output) {
1932 FileWriter writer(output);
1933 return ExportJson(storage, &writer, nullptr, nullptr, nullptr);
1934 }
1935
1936 } // namespace json
1937 } // namespace trace_processor
1938 } // namespace perfetto
1939