1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "perfetto/ext/trace_processor/export_json.h"
18 #include "src/trace_processor/export_json.h"
19
20 #include <inttypes.h>
21 #include <stdio.h>
22 #include <sstream>
23
24 #include <algorithm>
25 #include <cmath>
26 #include <cstring>
27 #include <deque>
28 #include <limits>
29 #include <memory>
30
31 #include "perfetto/base/build_config.h"
32 #include "perfetto/ext/base/string_splitter.h"
33 #include "perfetto/ext/base/string_utils.h"
34 #include "src/trace_processor/importers/json/json_utils.h"
35 #include "src/trace_processor/storage/metadata.h"
36 #include "src/trace_processor/storage/trace_storage.h"
37 #include "src/trace_processor/trace_processor_storage_impl.h"
38 #include "src/trace_processor/types/trace_processor_context.h"
39
40 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
41 #include <json/reader.h>
42 #include <json/writer.h>
43 #endif
44
45 namespace perfetto {
46 namespace trace_processor {
47 namespace json {
48
49 namespace {
50
51 class FileWriter : public OutputWriter {
52 public:
FileWriter(FILE * file)53 FileWriter(FILE* file) : file_(file) {}
~FileWriter()54 ~FileWriter() override { fflush(file_); }
55
AppendString(const std::string & s)56 util::Status AppendString(const std::string& s) override {
57 size_t written =
58 fwrite(s.data(), sizeof(std::string::value_type), s.size(), file_);
59 if (written != s.size())
60 return util::ErrStatus("Error writing to file: %d", ferror(file_));
61 return util::OkStatus();
62 }
63
64 private:
65 FILE* file_;
66 };
67
68 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
69 using IndexMap = perfetto::trace_processor::TraceStorage::Stats::IndexMap;
70
71 const char kLegacyEventArgsKey[] = "legacy_event";
72 const char kLegacyEventPassthroughUtidKey[] = "passthrough_utid";
73 const char kLegacyEventCategoryKey[] = "category";
74 const char kLegacyEventNameKey[] = "name";
75 const char kLegacyEventPhaseKey[] = "phase";
76 const char kLegacyEventDurationNsKey[] = "duration_ns";
77 const char kLegacyEventThreadTimestampNsKey[] = "thread_timestamp_ns";
78 const char kLegacyEventThreadDurationNsKey[] = "thread_duration_ns";
79 const char kLegacyEventThreadInstructionCountKey[] = "thread_instruction_count";
80 const char kLegacyEventThreadInstructionDeltaKey[] = "thread_instruction_delta";
81 const char kLegacyEventUseAsyncTtsKey[] = "use_async_tts";
82 const char kLegacyEventUnscopedIdKey[] = "unscoped_id";
83 const char kLegacyEventGlobalIdKey[] = "global_id";
84 const char kLegacyEventLocalIdKey[] = "local_id";
85 const char kLegacyEventIdScopeKey[] = "id_scope";
86 const char kStrippedArgument[] = "__stripped__";
87
GetNonNullString(const TraceStorage * storage,StringId id)88 const char* GetNonNullString(const TraceStorage* storage, StringId id) {
89 return id == kNullStringId ? "" : storage->GetString(id).c_str();
90 }
91
92 class JsonExporter {
93 public:
JsonExporter(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)94 JsonExporter(const TraceStorage* storage,
95 OutputWriter* output,
96 ArgumentFilterPredicate argument_filter,
97 MetadataFilterPredicate metadata_filter,
98 LabelFilterPredicate label_filter)
99 : storage_(storage),
100 args_builder_(storage_),
101 writer_(output, argument_filter, metadata_filter, label_filter) {}
102
Export()103 util::Status Export() {
104 util::Status status = MapUniquePidsAndTids();
105 if (!status.ok())
106 return status;
107
108 status = ExportThreadNames();
109 if (!status.ok())
110 return status;
111
112 status = ExportProcessNames();
113 if (!status.ok())
114 return status;
115
116 status = ExportProcessUptimes();
117 if (!status.ok())
118 return status;
119
120 status = ExportSlices();
121 if (!status.ok())
122 return status;
123
124 status = ExportFlows();
125 if (!status.ok())
126 return status;
127
128 status = ExportRawEvents();
129 if (!status.ok())
130 return status;
131
132 status = ExportCpuProfileSamples();
133 if (!status.ok())
134 return status;
135
136 status = ExportMetadata();
137 if (!status.ok())
138 return status;
139
140 status = ExportStats();
141 if (!status.ok())
142 return status;
143
144 status = ExportMemorySnapshots();
145 if (!status.ok())
146 return status;
147
148 return util::OkStatus();
149 }
150
151 private:
152 class TraceFormatWriter {
153 public:
TraceFormatWriter(OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)154 TraceFormatWriter(OutputWriter* output,
155 ArgumentFilterPredicate argument_filter,
156 MetadataFilterPredicate metadata_filter,
157 LabelFilterPredicate label_filter)
158 : output_(output),
159 argument_filter_(argument_filter),
160 metadata_filter_(metadata_filter),
161 label_filter_(label_filter),
162 first_event_(true) {
163 Json::StreamWriterBuilder b;
164 b.settings_["indentation"] = "";
165 writer_.reset(b.newStreamWriter());
166 WriteHeader();
167 }
168
~TraceFormatWriter()169 ~TraceFormatWriter() { WriteFooter(); }
170
WriteCommonEvent(const Json::Value & event)171 void WriteCommonEvent(const Json::Value& event) {
172 if (label_filter_ && !label_filter_("traceEvents"))
173 return;
174
175 DoWriteEvent(event);
176 }
177
AddAsyncBeginEvent(const Json::Value & event)178 void AddAsyncBeginEvent(const Json::Value& event) {
179 if (label_filter_ && !label_filter_("traceEvents"))
180 return;
181
182 async_begin_events_.push_back(event);
183 }
184
AddAsyncInstantEvent(const Json::Value & event)185 void AddAsyncInstantEvent(const Json::Value& event) {
186 if (label_filter_ && !label_filter_("traceEvents"))
187 return;
188
189 async_instant_events_.push_back(event);
190 }
191
AddAsyncEndEvent(const Json::Value & event)192 void AddAsyncEndEvent(const Json::Value& event) {
193 if (label_filter_ && !label_filter_("traceEvents"))
194 return;
195
196 async_end_events_.push_back(event);
197 }
198
SortAndEmitAsyncEvents()199 void SortAndEmitAsyncEvents() {
200 // Catapult doesn't handle out-of-order begin/end events well, especially
201 // when their timestamps are the same, but their order is incorrect. Since
202 // we process events sorted by begin timestamp, |async_begin_events_| and
203 // |async_instant_events_| are already sorted. We now only have to sort
204 // |async_end_events_| and merge-sort all events into a single sequence.
205
206 // Sort |async_end_events_|. Note that we should order by ascending
207 // timestamp, but in reverse-stable order. This way, a child slices's end
208 // is emitted before its parent's end event, even if both end events have
209 // the same timestamp. To accomplish this, we perform a stable sort in
210 // descending order and later iterate via reverse iterators.
211 struct {
212 bool operator()(const Json::Value& a, const Json::Value& b) const {
213 return a["ts"].asInt64() > b["ts"].asInt64();
214 }
215 } CompareEvents;
216 std::stable_sort(async_end_events_.begin(), async_end_events_.end(),
217 CompareEvents);
218
219 // Merge sort by timestamp. If events share the same timestamp, prefer
220 // instant events, then end events, so that old slices close before new
221 // ones are opened, but instant events remain in their deepest nesting
222 // level.
223 auto instant_event_it = async_instant_events_.begin();
224 auto end_event_it = async_end_events_.rbegin();
225 auto begin_event_it = async_begin_events_.begin();
226
227 auto has_instant_event = instant_event_it != async_instant_events_.end();
228 auto has_end_event = end_event_it != async_end_events_.rend();
229 auto has_begin_event = begin_event_it != async_begin_events_.end();
230
231 auto emit_next_instant = [&instant_event_it, &has_instant_event, this]() {
232 DoWriteEvent(*instant_event_it);
233 instant_event_it++;
234 has_instant_event = instant_event_it != async_instant_events_.end();
235 };
236 auto emit_next_end = [&end_event_it, &has_end_event, this]() {
237 DoWriteEvent(*end_event_it);
238 end_event_it++;
239 has_end_event = end_event_it != async_end_events_.rend();
240 };
241 auto emit_next_begin = [&begin_event_it, &has_begin_event, this]() {
242 DoWriteEvent(*begin_event_it);
243 begin_event_it++;
244 has_begin_event = begin_event_it != async_begin_events_.end();
245 };
246
247 auto emit_next_instant_or_end = [&instant_event_it, &end_event_it,
248 &emit_next_instant, &emit_next_end]() {
249 if ((*instant_event_it)["ts"].asInt64() <=
250 (*end_event_it)["ts"].asInt64()) {
251 emit_next_instant();
252 } else {
253 emit_next_end();
254 }
255 };
256 auto emit_next_instant_or_begin = [&instant_event_it, &begin_event_it,
257 &emit_next_instant,
258 &emit_next_begin]() {
259 if ((*instant_event_it)["ts"].asInt64() <=
260 (*begin_event_it)["ts"].asInt64()) {
261 emit_next_instant();
262 } else {
263 emit_next_begin();
264 }
265 };
266 auto emit_next_end_or_begin = [&end_event_it, &begin_event_it,
267 &emit_next_end, &emit_next_begin]() {
268 if ((*end_event_it)["ts"].asInt64() <=
269 (*begin_event_it)["ts"].asInt64()) {
270 emit_next_end();
271 } else {
272 emit_next_begin();
273 }
274 };
275
276 // While we still have events in all iterators, consider each.
277 while (has_instant_event && has_end_event && has_begin_event) {
278 if ((*instant_event_it)["ts"].asInt64() <=
279 (*end_event_it)["ts"].asInt64()) {
280 emit_next_instant_or_begin();
281 } else {
282 emit_next_end_or_begin();
283 }
284 }
285
286 // Only instant and end events left.
287 while (has_instant_event && has_end_event) {
288 emit_next_instant_or_end();
289 }
290
291 // Only instant and begin events left.
292 while (has_instant_event && has_begin_event) {
293 emit_next_instant_or_begin();
294 }
295
296 // Only end and begin events left.
297 while (has_end_event && has_begin_event) {
298 emit_next_end_or_begin();
299 }
300
301 // Remaining instant events.
302 while (has_instant_event) {
303 emit_next_instant();
304 }
305
306 // Remaining end events.
307 while (has_end_event) {
308 emit_next_end();
309 }
310
311 // Remaining begin events.
312 while (has_begin_event) {
313 emit_next_begin();
314 }
315 }
316
WriteMetadataEvent(const char * metadata_type,const char * metadata_arg_name,const char * metadata_arg_value,uint32_t pid,uint32_t tid)317 void WriteMetadataEvent(const char* metadata_type,
318 const char* metadata_arg_name,
319 const char* metadata_arg_value,
320 uint32_t pid,
321 uint32_t tid) {
322 if (label_filter_ && !label_filter_("traceEvents"))
323 return;
324
325 std::ostringstream ss;
326 if (!first_event_)
327 ss << ",\n";
328
329 Json::Value value;
330 value["ph"] = "M";
331 value["cat"] = "__metadata";
332 value["ts"] = 0;
333 value["name"] = metadata_type;
334 value["pid"] = Json::Int(pid);
335 value["tid"] = Json::Int(tid);
336
337 Json::Value args;
338 args[metadata_arg_name] = metadata_arg_value;
339 value["args"] = args;
340
341 writer_->write(value, &ss);
342 output_->AppendString(ss.str());
343 first_event_ = false;
344 }
345
MergeMetadata(const Json::Value & value)346 void MergeMetadata(const Json::Value& value) {
347 for (const auto& member : value.getMemberNames()) {
348 metadata_[member] = value[member];
349 }
350 }
351
AppendTelemetryMetadataString(const char * key,const char * value)352 void AppendTelemetryMetadataString(const char* key, const char* value) {
353 metadata_["telemetry"][key].append(value);
354 }
355
AppendTelemetryMetadataInt(const char * key,int64_t value)356 void AppendTelemetryMetadataInt(const char* key, int64_t value) {
357 metadata_["telemetry"][key].append(Json::Int64(value));
358 }
359
AppendTelemetryMetadataBool(const char * key,bool value)360 void AppendTelemetryMetadataBool(const char* key, bool value) {
361 metadata_["telemetry"][key].append(value);
362 }
363
SetTelemetryMetadataTimestamp(const char * key,int64_t value)364 void SetTelemetryMetadataTimestamp(const char* key, int64_t value) {
365 metadata_["telemetry"][key] = static_cast<double>(value) / 1000.0;
366 }
367
SetStats(const char * key,int64_t value)368 void SetStats(const char* key, int64_t value) {
369 metadata_["trace_processor_stats"][key] = Json::Int64(value);
370 }
371
SetStats(const char * key,const IndexMap & indexed_values)372 void SetStats(const char* key, const IndexMap& indexed_values) {
373 constexpr const char* kBufferStatsPrefix = "traced_buf_";
374
375 // Stats for the same buffer should be grouped together in the JSON.
376 if (strncmp(kBufferStatsPrefix, key, strlen(kBufferStatsPrefix)) == 0) {
377 for (const auto& value : indexed_values) {
378 metadata_["trace_processor_stats"]["traced_buf"][value.first]
379 [key + strlen(kBufferStatsPrefix)] =
380 Json::Int64(value.second);
381 }
382 return;
383 }
384
385 // Other indexed value stats are exported as array under their key.
386 for (const auto& value : indexed_values) {
387 metadata_["trace_processor_stats"][key][value.first] =
388 Json::Int64(value.second);
389 }
390 }
391
AddSystemTraceData(const std::string & data)392 void AddSystemTraceData(const std::string& data) {
393 system_trace_data_ += data;
394 }
395
AddUserTraceData(const std::string & data)396 void AddUserTraceData(const std::string& data) {
397 if (user_trace_data_.empty())
398 user_trace_data_ = "[";
399 user_trace_data_ += data;
400 }
401
402 private:
WriteHeader()403 void WriteHeader() {
404 if (!label_filter_)
405 output_->AppendString("{\"traceEvents\":[\n");
406 }
407
WriteFooter()408 void WriteFooter() {
409 SortAndEmitAsyncEvents();
410
411 // Filter metadata entries.
412 if (metadata_filter_) {
413 for (const auto& member : metadata_.getMemberNames()) {
414 if (!metadata_filter_(member.c_str()))
415 metadata_[member] = kStrippedArgument;
416 }
417 }
418
419 if ((!label_filter_ || label_filter_("traceEvents")) &&
420 !user_trace_data_.empty()) {
421 user_trace_data_ += "]";
422
423 Json::CharReaderBuilder builder;
424 auto reader =
425 std::unique_ptr<Json::CharReader>(builder.newCharReader());
426 Json::Value result;
427 if (reader->parse(user_trace_data_.data(),
428 user_trace_data_.data() + user_trace_data_.length(),
429 &result, nullptr)) {
430 for (const auto& event : result) {
431 WriteCommonEvent(event);
432 }
433 } else {
434 PERFETTO_DLOG(
435 "can't parse legacy user json trace export, skipping. data: %s",
436 user_trace_data_.c_str());
437 }
438 }
439
440 std::ostringstream ss;
441 if (!label_filter_)
442 ss << "]";
443
444 if ((!label_filter_ || label_filter_("systemTraceEvents")) &&
445 !system_trace_data_.empty()) {
446 ss << ",\"systemTraceEvents\":\n";
447 writer_->write(Json::Value(system_trace_data_), &ss);
448 }
449
450 if ((!label_filter_ || label_filter_("metadata")) && !metadata_.empty()) {
451 ss << ",\"metadata\":\n";
452 writer_->write(metadata_, &ss);
453 }
454
455 if (!label_filter_)
456 ss << "}";
457
458 output_->AppendString(ss.str());
459 }
460
DoWriteEvent(const Json::Value & event)461 void DoWriteEvent(const Json::Value& event) {
462 std::ostringstream ss;
463 if (!first_event_)
464 ss << ",\n";
465
466 ArgumentNameFilterPredicate argument_name_filter;
467 bool strip_args =
468 argument_filter_ &&
469 !argument_filter_(event["cat"].asCString(), event["name"].asCString(),
470 &argument_name_filter);
471 if ((strip_args || argument_name_filter) && event.isMember("args")) {
472 Json::Value event_copy = event;
473 if (strip_args) {
474 event_copy["args"] = kStrippedArgument;
475 } else {
476 auto& args = event_copy["args"];
477 for (const auto& member : event["args"].getMemberNames()) {
478 if (!argument_name_filter(member.c_str()))
479 args[member] = kStrippedArgument;
480 }
481 }
482 writer_->write(event_copy, &ss);
483 } else {
484 writer_->write(event, &ss);
485 }
486 first_event_ = false;
487
488 output_->AppendString(ss.str());
489 }
490
491 OutputWriter* output_;
492 ArgumentFilterPredicate argument_filter_;
493 MetadataFilterPredicate metadata_filter_;
494 LabelFilterPredicate label_filter_;
495
496 std::unique_ptr<Json::StreamWriter> writer_;
497 bool first_event_;
498 Json::Value metadata_;
499 std::string system_trace_data_;
500 std::string user_trace_data_;
501 std::vector<Json::Value> async_begin_events_;
502 std::vector<Json::Value> async_instant_events_;
503 std::vector<Json::Value> async_end_events_;
504 };
505
506 class ArgsBuilder {
507 public:
ArgsBuilder(const TraceStorage * storage)508 explicit ArgsBuilder(const TraceStorage* storage)
509 : storage_(storage),
510 empty_value_(Json::objectValue),
511 nan_value_(Json::StaticString("NaN")),
512 inf_value_(Json::StaticString("Infinity")),
513 neg_inf_value_(Json::StaticString("-Infinity")) {
514 const auto& arg_table = storage_->arg_table();
515 uint32_t count = arg_table.row_count();
516 if (count == 0) {
517 args_sets_.resize(1, empty_value_);
518 return;
519 }
520 args_sets_.resize(arg_table.arg_set_id()[count - 1] + 1, empty_value_);
521
522 for (uint32_t i = 0; i < count; ++i) {
523 ArgSetId set_id = arg_table.arg_set_id()[i];
524 const char* key = arg_table.key().GetString(i).c_str();
525 Variadic value = storage_->GetArgValue(i);
526 AppendArg(set_id, key, VariadicToJson(value));
527 }
528 PostprocessArgs();
529 }
530
GetArgs(ArgSetId set_id) const531 const Json::Value& GetArgs(ArgSetId set_id) const {
532 // If |set_id| was empty and added to the storage last, it may not be in
533 // args_sets_.
534 if (set_id > args_sets_.size())
535 return empty_value_;
536 return args_sets_[set_id];
537 }
538
539 private:
VariadicToJson(Variadic variadic)540 Json::Value VariadicToJson(Variadic variadic) {
541 switch (variadic.type) {
542 case Variadic::kInt:
543 return Json::Int64(variadic.int_value);
544 case Variadic::kUint:
545 return Json::UInt64(variadic.uint_value);
546 case Variadic::kString:
547 return GetNonNullString(storage_, variadic.string_value);
548 case Variadic::kReal:
549 if (std::isnan(variadic.real_value)) {
550 return nan_value_;
551 } else if (std::isinf(variadic.real_value) &&
552 variadic.real_value > 0) {
553 return inf_value_;
554 } else if (std::isinf(variadic.real_value) &&
555 variadic.real_value < 0) {
556 return neg_inf_value_;
557 } else {
558 return variadic.real_value;
559 }
560 case Variadic::kPointer:
561 return base::Uint64ToHexString(variadic.pointer_value);
562 case Variadic::kBool:
563 return variadic.bool_value;
564 case Variadic::kJson:
565 Json::CharReaderBuilder b;
566 auto reader = std::unique_ptr<Json::CharReader>(b.newCharReader());
567
568 Json::Value result;
569 std::string v = GetNonNullString(storage_, variadic.json_value);
570 reader->parse(v.data(), v.data() + v.length(), &result, nullptr);
571 return result;
572 }
573 PERFETTO_FATAL("Not reached"); // For gcc.
574 }
575
AppendArg(ArgSetId set_id,const std::string & key,const Json::Value & value)576 void AppendArg(ArgSetId set_id,
577 const std::string& key,
578 const Json::Value& value) {
579 Json::Value* target = &args_sets_[set_id];
580 for (base::StringSplitter parts(key, '.'); parts.Next();) {
581 if (PERFETTO_UNLIKELY(!target->isNull() && !target->isObject())) {
582 PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
583 key.c_str(),
584 args_sets_[set_id].toStyledString().c_str());
585 return;
586 }
587 std::string key_part = parts.cur_token();
588 size_t bracketpos = key_part.find('[');
589 if (bracketpos == key_part.npos) { // A single item
590 target = &(*target)[key_part];
591 } else { // A list item
592 target = &(*target)[key_part.substr(0, bracketpos)];
593 while (bracketpos != key_part.npos) {
594 // We constructed this string from an int earlier in trace_processor
595 // so it shouldn't be possible for this (or the StringToUInt32
596 // below) to fail.
597 std::string s =
598 key_part.substr(bracketpos + 1, key_part.find(']', bracketpos) -
599 bracketpos - 1);
600 if (PERFETTO_UNLIKELY(!target->isNull() && !target->isArray())) {
601 PERFETTO_DLOG("Malformed arguments. Can't append %s to %s.",
602 key.c_str(),
603 args_sets_[set_id].toStyledString().c_str());
604 return;
605 }
606 base::Optional<uint32_t> index = base::StringToUInt32(s);
607 if (PERFETTO_UNLIKELY(!index)) {
608 PERFETTO_ELOG("Expected to be able to extract index from %s",
609 key_part.c_str());
610 return;
611 }
612 target = &(*target)[index.value()];
613 bracketpos = key_part.find('[', bracketpos + 1);
614 }
615 }
616 }
617 *target = value;
618 }
619
PostprocessArgs()620 void PostprocessArgs() {
621 for (Json::Value& args : args_sets_) {
622 // Move all fields from "debug" key to upper level.
623 if (args.isMember("debug")) {
624 Json::Value debug = args["debug"];
625 args.removeMember("debug");
626 for (const auto& member : debug.getMemberNames()) {
627 args[member] = debug[member];
628 }
629 }
630
631 // Rename source fields.
632 if (args.isMember("task")) {
633 if (args["task"].isMember("posted_from")) {
634 Json::Value posted_from = args["task"]["posted_from"];
635 args["task"].removeMember("posted_from");
636 if (posted_from.isMember("function_name")) {
637 args["src_func"] = posted_from["function_name"];
638 args["src_file"] = posted_from["file_name"];
639 } else if (posted_from.isMember("file_name")) {
640 args["src"] = posted_from["file_name"];
641 }
642 }
643 if (args["task"].empty())
644 args.removeMember("task");
645 }
646 if (args.isMember("source")) {
647 Json::Value source = args["source"];
648 if (source.isObject() && source.isMember("function_name")) {
649 args["function_name"] = source["function_name"];
650 args["file_name"] = source["file_name"];
651 args.removeMember("source");
652 }
653 }
654 }
655 }
656
657 const TraceStorage* storage_;
658 std::vector<Json::Value> args_sets_;
659 const Json::Value empty_value_;
660 const Json::Value nan_value_;
661 const Json::Value inf_value_;
662 const Json::Value neg_inf_value_;
663 };
664
MapUniquePidsAndTids()665 util::Status MapUniquePidsAndTids() {
666 const auto& process_table = storage_->process_table();
667 for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
668 uint32_t exported_pid = process_table.pid()[upid];
669 auto it_and_inserted =
670 exported_pids_to_upids_.emplace(exported_pid, upid);
671 if (!it_and_inserted.second) {
672 exported_pid = NextExportedPidOrTidForDuplicates();
673 it_and_inserted = exported_pids_to_upids_.emplace(exported_pid, upid);
674 }
675 upids_to_exported_pids_.emplace(upid, exported_pid);
676 }
677
678 const auto& thread_table = storage_->thread_table();
679 for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
680 uint32_t exported_pid = 0;
681 base::Optional<UniquePid> upid = thread_table.upid()[utid];
682 if (upid) {
683 auto exported_pid_it = upids_to_exported_pids_.find(*upid);
684 PERFETTO_DCHECK(exported_pid_it != upids_to_exported_pids_.end());
685 exported_pid = exported_pid_it->second;
686 }
687
688 uint32_t exported_tid = thread_table.tid()[utid];
689 auto it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
690 std::make_pair(exported_pid, exported_tid), utid);
691 if (!it_and_inserted.second) {
692 exported_tid = NextExportedPidOrTidForDuplicates();
693 it_and_inserted = exported_pids_and_tids_to_utids_.emplace(
694 std::make_pair(exported_pid, exported_tid), utid);
695 }
696 utids_to_exported_pids_and_tids_.emplace(
697 utid, std::make_pair(exported_pid, exported_tid));
698 }
699
700 return util::OkStatus();
701 }
702
ExportThreadNames()703 util::Status ExportThreadNames() {
704 const auto& thread_table = storage_->thread_table();
705 for (UniqueTid utid = 0; utid < thread_table.row_count(); ++utid) {
706 auto opt_name = thread_table.name()[utid];
707 if (!opt_name.is_null()) {
708 const char* thread_name = GetNonNullString(storage_, opt_name);
709 auto pid_and_tid = UtidToPidAndTid(utid);
710 writer_.WriteMetadataEvent("thread_name", "name", thread_name,
711 pid_and_tid.first, pid_and_tid.second);
712 }
713 }
714 return util::OkStatus();
715 }
716
ExportProcessNames()717 util::Status ExportProcessNames() {
718 const auto& process_table = storage_->process_table();
719 for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
720 auto opt_name = process_table.name()[upid];
721 if (!opt_name.is_null()) {
722 const char* process_name = GetNonNullString(storage_, opt_name);
723 writer_.WriteMetadataEvent("process_name", "name", process_name,
724 UpidToPid(upid), /*tid=*/0);
725 }
726 }
727 return util::OkStatus();
728 }
729
730 // For each process it writes an approximate uptime, based on the process'
731 // start time and the last slice in the entire trace. This same last slice is
732 // used with all processes, so the process could have ended earlier.
ExportProcessUptimes()733 util::Status ExportProcessUptimes() {
734 int64_t last_timestamp_ns = FindLastSliceTimestamp();
735 if (last_timestamp_ns <= 0)
736 return util::OkStatus();
737
738 const auto& process_table = storage_->process_table();
739 for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
740 base::Optional<int64_t> start_timestamp_ns =
741 process_table.start_ts()[upid];
742 if (!start_timestamp_ns.has_value())
743 continue;
744
745 int64_t process_uptime_seconds =
746 (last_timestamp_ns - start_timestamp_ns.value()) /
747 (1000 * 1000 * 1000);
748
749 writer_.WriteMetadataEvent("process_uptime_seconds", "uptime",
750 std::to_string(process_uptime_seconds).c_str(),
751 UpidToPid(upid), /*tid=*/0);
752 }
753
754 return util::OkStatus();
755 }
756
757 // Returns the last slice's end timestamp for the entire trace. If no slices
758 // are found 0 is returned.
FindLastSliceTimestamp()759 int64_t FindLastSliceTimestamp() {
760 int64_t last_ts = 0;
761 const auto& slices = storage_->slice_table();
762 for (uint32_t i = 0; i < slices.row_count(); ++i) {
763 int64_t duration_ns = slices.dur()[i];
764 int64_t timestamp_ns = slices.ts()[i];
765
766 if (duration_ns + timestamp_ns > last_ts) {
767 last_ts = duration_ns + timestamp_ns;
768 }
769 }
770 return last_ts;
771 }
772
ExportSlices()773 util::Status ExportSlices() {
774 const auto& slices = storage_->slice_table();
775 for (uint32_t i = 0; i < slices.row_count(); ++i) {
776 // Skip slices with empty category - these are ftrace/system slices that
777 // were also imported into the raw table and will be exported from there
778 // by trace_to_text.
779 // TODO(b/153609716): Add a src column or do_not_export flag instead.
780 auto cat = slices.category().GetString(i);
781 if (cat.c_str() == nullptr || cat == "binder")
782 continue;
783
784 Json::Value event;
785 event["ts"] = Json::Int64(slices.ts()[i] / 1000);
786 event["cat"] = GetNonNullString(storage_, slices.category()[i]);
787 event["name"] = GetNonNullString(storage_, slices.name()[i]);
788 event["pid"] = 0;
789 event["tid"] = 0;
790
791 base::Optional<UniqueTid> legacy_utid;
792 std::string legacy_phase;
793
794 event["args"] =
795 args_builder_.GetArgs(slices.arg_set_id()[i]); // Makes a copy.
796 if (event["args"].isMember(kLegacyEventArgsKey)) {
797 const auto& legacy_args = event["args"][kLegacyEventArgsKey];
798
799 if (legacy_args.isMember(kLegacyEventPassthroughUtidKey)) {
800 legacy_utid = legacy_args[kLegacyEventPassthroughUtidKey].asUInt();
801 }
802 if (legacy_args.isMember(kLegacyEventPhaseKey)) {
803 legacy_phase = legacy_args[kLegacyEventPhaseKey].asString();
804 }
805
806 event["args"].removeMember(kLegacyEventArgsKey);
807 }
808
809 // To prevent duplicate export of slices, only export slices on descriptor
810 // or chrome tracks (i.e. TrackEvent slices). Slices on other tracks may
811 // also be present as raw events and handled by trace_to_text. Only add
812 // more track types here if they are not already covered by trace_to_text.
813 TrackId track_id = slices.track_id()[i];
814
815 const auto& track_table = storage_->track_table();
816
817 uint32_t track_row = *track_table.id().IndexOf(track_id);
818 auto track_args_id = track_table.source_arg_set_id()[track_row];
819 const Json::Value* track_args = nullptr;
820 bool legacy_chrome_track = false;
821 bool is_child_track = false;
822 if (track_args_id) {
823 track_args = &args_builder_.GetArgs(*track_args_id);
824 legacy_chrome_track = (*track_args)["source"].asString() == "chrome";
825 is_child_track = track_args->isMember("is_root_in_scope") &&
826 !(*track_args)["is_root_in_scope"].asBool();
827 }
828
829 const auto& thread_track = storage_->thread_track_table();
830 const auto& process_track = storage_->process_track_table();
831 const auto& thread_slices = storage_->thread_slice_table();
832 const auto& virtual_track_slices = storage_->virtual_track_slices();
833
834 int64_t duration_ns = slices.dur()[i];
835 base::Optional<int64_t> thread_ts_ns;
836 base::Optional<int64_t> thread_duration_ns;
837 base::Optional<int64_t> thread_instruction_count;
838 base::Optional<int64_t> thread_instruction_delta;
839
840 SliceId id = slices.id()[i];
841 base::Optional<uint32_t> thread_slice_row =
842 thread_slices.id().IndexOf(id);
843 if (thread_slice_row) {
844 thread_ts_ns = thread_slices.thread_ts()[*thread_slice_row];
845 thread_duration_ns = thread_slices.thread_dur()[*thread_slice_row];
846 thread_instruction_count =
847 thread_slices.thread_instruction_count()[*thread_slice_row];
848 thread_instruction_delta =
849 thread_slices.thread_instruction_delta()[*thread_slice_row];
850 } else {
851 base::Optional<uint32_t> vtrack_slice_row =
852 virtual_track_slices.FindRowForSliceId(id);
853 if (vtrack_slice_row) {
854 thread_ts_ns =
855 virtual_track_slices.thread_timestamp_ns()[*vtrack_slice_row];
856 thread_duration_ns =
857 virtual_track_slices.thread_duration_ns()[*vtrack_slice_row];
858 thread_instruction_count =
859 virtual_track_slices
860 .thread_instruction_counts()[*vtrack_slice_row];
861 thread_instruction_delta =
862 virtual_track_slices
863 .thread_instruction_deltas()[*vtrack_slice_row];
864 }
865 }
866
867 auto opt_thread_track_row = thread_track.id().IndexOf(TrackId{track_id});
868
869 if (opt_thread_track_row && !is_child_track) {
870 // Synchronous (thread) slice or instant event.
871 UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
872 auto pid_and_tid = UtidToPidAndTid(utid);
873 event["pid"] = Json::Int(pid_and_tid.first);
874 event["tid"] = Json::Int(pid_and_tid.second);
875
876 if (duration_ns == 0) {
877 if (legacy_phase.empty()) {
878 // Use "I" instead of "i" phase for backwards-compat with old
879 // consumers.
880 event["ph"] = "I";
881 } else {
882 event["ph"] = legacy_phase;
883 }
884 if (thread_ts_ns && thread_ts_ns > 0) {
885 event["tts"] = Json::Int64(*thread_ts_ns / 1000);
886 }
887 if (thread_instruction_count && *thread_instruction_count > 0) {
888 event["ticount"] = Json::Int64(*thread_instruction_count);
889 }
890 event["s"] = "t";
891 } else {
892 if (duration_ns > 0) {
893 event["ph"] = "X";
894 event["dur"] = Json::Int64(duration_ns / 1000);
895 } else {
896 // If the slice didn't finish, the duration may be negative. Only
897 // write a begin event without end event in this case.
898 event["ph"] = "B";
899 }
900 if (thread_ts_ns && *thread_ts_ns > 0) {
901 event["tts"] = Json::Int64(*thread_ts_ns / 1000);
902 // Only write thread duration for completed events.
903 if (duration_ns > 0 && thread_duration_ns)
904 event["tdur"] = Json::Int64(*thread_duration_ns / 1000);
905 }
906 if (thread_instruction_count && *thread_instruction_count > 0) {
907 event["ticount"] = Json::Int64(*thread_instruction_count);
908 // Only write thread instruction delta for completed events.
909 if (duration_ns > 0 && thread_instruction_delta)
910 event["tidelta"] = Json::Int64(*thread_instruction_delta);
911 }
912 }
913 writer_.WriteCommonEvent(event);
914 } else if (is_child_track ||
915 (legacy_chrome_track && track_args->isMember("source_id"))) {
916 // Async event slice.
917 auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
918 if (legacy_chrome_track) {
919 // Legacy async tracks are always process-associated and have args.
920 PERFETTO_DCHECK(opt_process_row);
921 PERFETTO_DCHECK(track_args);
922 uint32_t upid = process_track.upid()[*opt_process_row];
923 uint32_t exported_pid = UpidToPid(upid);
924 event["pid"] = Json::Int(exported_pid);
925 event["tid"] =
926 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
927 : exported_pid);
928
929 // Preserve original event IDs for legacy tracks. This is so that e.g.
930 // memory dump IDs show up correctly in the JSON trace.
931 PERFETTO_DCHECK(track_args->isMember("source_id"));
932 PERFETTO_DCHECK(track_args->isMember("source_id_is_process_scoped"));
933 PERFETTO_DCHECK(track_args->isMember("source_scope"));
934 uint64_t source_id =
935 static_cast<uint64_t>((*track_args)["source_id"].asInt64());
936 std::string source_scope = (*track_args)["source_scope"].asString();
937 if (!source_scope.empty())
938 event["scope"] = source_scope;
939 bool source_id_is_process_scoped =
940 (*track_args)["source_id_is_process_scoped"].asBool();
941 if (source_id_is_process_scoped) {
942 event["id2"]["local"] = base::Uint64ToHexString(source_id);
943 } else {
944 // Some legacy importers don't understand "id2" fields, so we use
945 // the "usually" global "id" field instead. This works as long as
946 // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
947 // "LOCAL_ID_PHASES" in catapult.
948 event["id"] = base::Uint64ToHexString(source_id);
949 }
950 } else {
951 if (opt_thread_track_row) {
952 UniqueTid utid = thread_track.utid()[*opt_thread_track_row];
953 auto pid_and_tid = UtidToPidAndTid(utid);
954 event["pid"] = Json::Int(pid_and_tid.first);
955 event["tid"] = Json::Int(pid_and_tid.second);
956 event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
957 } else if (opt_process_row) {
958 uint32_t upid = process_track.upid()[*opt_process_row];
959 uint32_t exported_pid = UpidToPid(upid);
960 event["pid"] = Json::Int(exported_pid);
961 event["tid"] =
962 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
963 : exported_pid);
964 event["id2"]["local"] = base::Uint64ToHexString(track_id.value);
965 } else {
966 if (legacy_utid) {
967 auto pid_and_tid = UtidToPidAndTid(*legacy_utid);
968 event["pid"] = Json::Int(pid_and_tid.first);
969 event["tid"] = Json::Int(pid_and_tid.second);
970 }
971
972 // Some legacy importers don't understand "id2" fields, so we use
973 // the "usually" global "id" field instead. This works as long as
974 // the event phase is not in {'N', 'D', 'O', '(', ')'}, see
975 // "LOCAL_ID_PHASES" in catapult.
976 event["id"] = base::Uint64ToHexString(track_id.value);
977 }
978 }
979
980 if (thread_ts_ns && *thread_ts_ns > 0) {
981 event["tts"] = Json::Int64(*thread_ts_ns / 1000);
982 event["use_async_tts"] = Json::Int(1);
983 }
984 if (thread_instruction_count && *thread_instruction_count > 0) {
985 event["ticount"] = Json::Int64(*thread_instruction_count);
986 event["use_async_tts"] = Json::Int(1);
987 }
988
989 if (duration_ns == 0) {
990 if (legacy_phase.empty()) {
991 // Instant async event.
992 event["ph"] = "n";
993 writer_.AddAsyncInstantEvent(event);
994 } else {
995 // Async step events.
996 event["ph"] = legacy_phase;
997 writer_.AddAsyncBeginEvent(event);
998 }
999 } else { // Async start and end.
1000 event["ph"] = legacy_phase.empty() ? "b" : legacy_phase;
1001 writer_.AddAsyncBeginEvent(event);
1002 // If the slice didn't finish, the duration may be negative. Don't
1003 // write the end event in this case.
1004 if (duration_ns > 0) {
1005 event["ph"] = legacy_phase.empty() ? "e" : "F";
1006 event["ts"] = Json::Int64((slices.ts()[i] + duration_ns) / 1000);
1007 if (thread_ts_ns && thread_duration_ns && *thread_ts_ns > 0) {
1008 event["tts"] =
1009 Json::Int64((*thread_ts_ns + *thread_duration_ns) / 1000);
1010 }
1011 if (thread_instruction_count && thread_instruction_delta &&
1012 *thread_instruction_count > 0) {
1013 event["ticount"] = Json::Int64(
1014 (*thread_instruction_count + *thread_instruction_delta));
1015 }
1016 event["args"].clear();
1017 writer_.AddAsyncEndEvent(event);
1018 }
1019 }
1020 } else {
1021 // Global or process-scoped instant event.
1022 PERFETTO_DCHECK(legacy_chrome_track || !is_child_track);
1023 if (duration_ns != 0) {
1024 // We don't support exporting slices on the default global or process
1025 // track to JSON (JSON only supports instant events on these tracks).
1026 PERFETTO_DLOG(
1027 "skipping non-instant slice on global or process track");
1028 } else {
1029 if (legacy_phase.empty()) {
1030 // Use "I" instead of "i" phase for backwards-compat with old
1031 // consumers.
1032 event["ph"] = "I";
1033 } else {
1034 event["ph"] = legacy_phase;
1035 }
1036
1037 auto opt_process_row = process_track.id().IndexOf(TrackId{track_id});
1038 if (opt_process_row.has_value()) {
1039 uint32_t upid = process_track.upid()[*opt_process_row];
1040 uint32_t exported_pid = UpidToPid(upid);
1041 event["pid"] = Json::Int(exported_pid);
1042 event["tid"] =
1043 Json::Int(legacy_utid ? UtidToPidAndTid(*legacy_utid).second
1044 : exported_pid);
1045 event["s"] = "p";
1046 } else {
1047 event["s"] = "g";
1048 }
1049 writer_.WriteCommonEvent(event);
1050 }
1051 }
1052 }
1053 return util::OkStatus();
1054 }
1055
CreateFlowEventV1(uint32_t flow_id,SliceId slice_id,std::string name,std::string cat,Json::Value args,bool flow_begin)1056 base::Optional<Json::Value> CreateFlowEventV1(uint32_t flow_id,
1057 SliceId slice_id,
1058 std::string name,
1059 std::string cat,
1060 Json::Value args,
1061 bool flow_begin) {
1062 const auto& slices = storage_->slice_table();
1063 const auto& thread_tracks = storage_->thread_track_table();
1064
1065 auto opt_slice_idx = slices.id().IndexOf(slice_id);
1066 if (!opt_slice_idx)
1067 return base::nullopt;
1068 uint32_t slice_idx = opt_slice_idx.value();
1069
1070 TrackId track_id = storage_->slice_table().track_id()[slice_idx];
1071 auto opt_thread_track_idx = thread_tracks.id().IndexOf(track_id);
1072 // catapult only supports flow events attached to thread-track slices
1073 if (!opt_thread_track_idx)
1074 return base::nullopt;
1075
1076 UniqueTid utid = thread_tracks.utid()[opt_thread_track_idx.value()];
1077 auto pid_and_tid = UtidToPidAndTid(utid);
1078 Json::Value event;
1079 event["id"] = flow_id;
1080 event["pid"] = Json::Int(pid_and_tid.first);
1081 event["tid"] = Json::Int(pid_and_tid.second);
1082 event["cat"] = cat;
1083 event["name"] = name;
1084 event["ph"] = (flow_begin ? "s" : "f");
1085 event["ts"] = Json::Int64(slices.ts()[slice_idx] / 1000);
1086 if (!flow_begin) {
1087 event["bp"] = "e";
1088 }
1089 event["args"] = std::move(args);
1090 return std::move(event);
1091 }
1092
ExportFlows()1093 util::Status ExportFlows() {
1094 const auto& flow_table = storage_->flow_table();
1095 const auto& slice_table = storage_->slice_table();
1096 for (uint32_t i = 0; i < flow_table.row_count(); i++) {
1097 SliceId slice_out = flow_table.slice_out()[i];
1098 SliceId slice_in = flow_table.slice_in()[i];
1099 uint32_t arg_set_id = flow_table.arg_set_id()[i];
1100
1101 std::string cat;
1102 std::string name;
1103 auto args = args_builder_.GetArgs(arg_set_id);
1104 if (arg_set_id != kInvalidArgSetId) {
1105 cat = args["cat"].asString();
1106 name = args["name"].asString();
1107 // Don't export these args since they are only used for this export and
1108 // weren't part of the original event.
1109 args.removeMember("name");
1110 args.removeMember("cat");
1111 } else {
1112 auto opt_slice_out_idx = slice_table.id().IndexOf(slice_out);
1113 PERFETTO_DCHECK(opt_slice_out_idx.has_value());
1114 StringId cat_id = slice_table.category()[opt_slice_out_idx.value()];
1115 StringId name_id = slice_table.name()[opt_slice_out_idx.value()];
1116 cat = GetNonNullString(storage_, cat_id);
1117 name = GetNonNullString(storage_, name_id);
1118 }
1119
1120 auto out_event = CreateFlowEventV1(i, slice_out, name, cat, args,
1121 /* flow_begin = */ true);
1122 auto in_event = CreateFlowEventV1(i, slice_in, name, cat, std::move(args),
1123 /* flow_begin = */ false);
1124
1125 if (out_event && in_event) {
1126 writer_.WriteCommonEvent(out_event.value());
1127 writer_.WriteCommonEvent(in_event.value());
1128 }
1129 }
1130 return util::OkStatus();
1131 }
1132
ConvertLegacyRawEventToJson(uint32_t index)1133 Json::Value ConvertLegacyRawEventToJson(uint32_t index) {
1134 const auto& events = storage_->raw_table();
1135
1136 Json::Value event;
1137 event["ts"] = Json::Int64(events.ts()[index] / 1000);
1138
1139 UniqueTid utid = static_cast<UniqueTid>(events.utid()[index]);
1140 auto pid_and_tid = UtidToPidAndTid(utid);
1141 event["pid"] = Json::Int(pid_and_tid.first);
1142 event["tid"] = Json::Int(pid_and_tid.second);
1143
1144 // Raw legacy events store all other params in the arg set. Make a copy of
1145 // the converted args here, parse, and then remove the legacy params.
1146 event["args"] = args_builder_.GetArgs(events.arg_set_id()[index]);
1147 const Json::Value& legacy_args = event["args"][kLegacyEventArgsKey];
1148
1149 PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventCategoryKey));
1150 event["cat"] = legacy_args[kLegacyEventCategoryKey];
1151
1152 PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventNameKey));
1153 event["name"] = legacy_args[kLegacyEventNameKey];
1154
1155 PERFETTO_DCHECK(legacy_args.isMember(kLegacyEventPhaseKey));
1156 event["ph"] = legacy_args[kLegacyEventPhaseKey];
1157
1158 // Object snapshot events are supposed to have a mandatory "snapshot" arg,
1159 // which may be removed in trace processor if it is empty.
1160 if (legacy_args[kLegacyEventPhaseKey] == "O" &&
1161 !event["args"].isMember("snapshot")) {
1162 event["args"]["snapshot"] = Json::Value(Json::objectValue);
1163 }
1164
1165 if (legacy_args.isMember(kLegacyEventDurationNsKey))
1166 event["dur"] = legacy_args[kLegacyEventDurationNsKey].asInt64() / 1000;
1167
1168 if (legacy_args.isMember(kLegacyEventThreadTimestampNsKey)) {
1169 event["tts"] =
1170 legacy_args[kLegacyEventThreadTimestampNsKey].asInt64() / 1000;
1171 }
1172
1173 if (legacy_args.isMember(kLegacyEventThreadDurationNsKey)) {
1174 event["tdur"] =
1175 legacy_args[kLegacyEventThreadDurationNsKey].asInt64() / 1000;
1176 }
1177
1178 if (legacy_args.isMember(kLegacyEventThreadInstructionCountKey))
1179 event["ticount"] = legacy_args[kLegacyEventThreadInstructionCountKey];
1180
1181 if (legacy_args.isMember(kLegacyEventThreadInstructionDeltaKey))
1182 event["tidelta"] = legacy_args[kLegacyEventThreadInstructionDeltaKey];
1183
1184 if (legacy_args.isMember(kLegacyEventUseAsyncTtsKey))
1185 event["use_async_tts"] = legacy_args[kLegacyEventUseAsyncTtsKey];
1186
1187 if (legacy_args.isMember(kLegacyEventUnscopedIdKey)) {
1188 event["id"] = base::Uint64ToHexString(
1189 legacy_args[kLegacyEventUnscopedIdKey].asUInt64());
1190 }
1191
1192 if (legacy_args.isMember(kLegacyEventGlobalIdKey)) {
1193 event["id2"]["global"] = base::Uint64ToHexString(
1194 legacy_args[kLegacyEventGlobalIdKey].asUInt64());
1195 }
1196
1197 if (legacy_args.isMember(kLegacyEventLocalIdKey)) {
1198 event["id2"]["local"] = base::Uint64ToHexString(
1199 legacy_args[kLegacyEventLocalIdKey].asUInt64());
1200 }
1201
1202 if (legacy_args.isMember(kLegacyEventIdScopeKey))
1203 event["scope"] = legacy_args[kLegacyEventIdScopeKey];
1204
1205 event["args"].removeMember(kLegacyEventArgsKey);
1206
1207 return event;
1208 }
1209
ExportRawEvents()1210 util::Status ExportRawEvents() {
1211 base::Optional<StringId> raw_legacy_event_key_id =
1212 storage_->string_pool().GetId("track_event.legacy_event");
1213 base::Optional<StringId> raw_legacy_system_trace_event_id =
1214 storage_->string_pool().GetId("chrome_event.legacy_system_trace");
1215 base::Optional<StringId> raw_legacy_user_trace_event_id =
1216 storage_->string_pool().GetId("chrome_event.legacy_user_trace");
1217 base::Optional<StringId> raw_chrome_metadata_event_id =
1218 storage_->string_pool().GetId("chrome_event.metadata");
1219
1220 const auto& events = storage_->raw_table();
1221 for (uint32_t i = 0; i < events.row_count(); ++i) {
1222 if (raw_legacy_event_key_id &&
1223 events.name()[i] == *raw_legacy_event_key_id) {
1224 Json::Value event = ConvertLegacyRawEventToJson(i);
1225 writer_.WriteCommonEvent(event);
1226 } else if (raw_legacy_system_trace_event_id &&
1227 events.name()[i] == *raw_legacy_system_trace_event_id) {
1228 Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1229 PERFETTO_DCHECK(args.isMember("data"));
1230 writer_.AddSystemTraceData(args["data"].asString());
1231 } else if (raw_legacy_user_trace_event_id &&
1232 events.name()[i] == *raw_legacy_user_trace_event_id) {
1233 Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1234 PERFETTO_DCHECK(args.isMember("data"));
1235 writer_.AddUserTraceData(args["data"].asString());
1236 } else if (raw_chrome_metadata_event_id &&
1237 events.name()[i] == *raw_chrome_metadata_event_id) {
1238 Json::Value args = args_builder_.GetArgs(events.arg_set_id()[i]);
1239 writer_.MergeMetadata(args);
1240 }
1241 }
1242 return util::OkStatus();
1243 }
1244
1245 class MergedProfileSamplesEmitter {
1246 public:
1247 // The TraceFormatWriter must outlive this instance.
MergedProfileSamplesEmitter(TraceFormatWriter & writer)1248 MergedProfileSamplesEmitter(TraceFormatWriter& writer) : writer_(writer) {}
1249
AddEventForUtid(UniqueTid utid,int64_t ts,CallsiteId callsite_id,const Json::Value & event)1250 uint64_t AddEventForUtid(UniqueTid utid,
1251 int64_t ts,
1252 CallsiteId callsite_id,
1253 const Json::Value& event) {
1254 auto current_sample = current_events_.find(utid);
1255
1256 // If there's a current entry for our thread and it matches the callsite
1257 // of the new sample, update the entry with the new timestamp. Otherwise
1258 // create a new entry.
1259 if (current_sample != current_events_.end() &&
1260 current_sample->second.callsite_id() == callsite_id) {
1261 current_sample->second.UpdateWithNewSample(ts);
1262 return current_sample->second.event_id();
1263 } else {
1264 if (current_sample != current_events_.end())
1265 current_events_.erase(current_sample);
1266
1267 auto new_entry = current_events_.emplace(
1268 std::piecewise_construct, std::forward_as_tuple(utid),
1269 std::forward_as_tuple(writer_, callsite_id, ts, event));
1270 return new_entry.first->second.event_id();
1271 }
1272 }
1273
GenerateNewEventId()1274 static uint64_t GenerateNewEventId() {
1275 // "n"-phase events are nestable async events which get tied together
1276 // with their id, so we need to give each one a unique ID as we only
1277 // want the samples to show up on their own track in the trace-viewer
1278 // but not nested together (unless they're nested under a merged event).
1279 static size_t g_id_counter = 0;
1280 return ++g_id_counter;
1281 }
1282
1283 private:
1284 class Sample {
1285 public:
Sample(TraceFormatWriter & writer,CallsiteId callsite_id,int64_t ts,const Json::Value & event)1286 Sample(TraceFormatWriter& writer,
1287 CallsiteId callsite_id,
1288 int64_t ts,
1289 const Json::Value& event)
1290 : writer_(writer),
1291 callsite_id_(callsite_id),
1292 begin_ts_(ts),
1293 end_ts_(ts),
1294 event_(event),
1295 event_id_(MergedProfileSamplesEmitter::GenerateNewEventId()),
1296 sample_count_(1) {}
1297
~Sample()1298 ~Sample() {
1299 // No point writing a merged event if we only got a single sample
1300 // as ExportCpuProfileSamples will already be writing the instant event.
1301 if (sample_count_ == 1)
1302 return;
1303
1304 event_["id"] = base::Uint64ToHexString(event_id_);
1305
1306 // Write the BEGIN event.
1307 event_["ph"] = "b";
1308 // We subtract 1us as a workaround for the first async event not
1309 // nesting underneath the parent event if the timestamp is identical.
1310 int64_t begin_in_us_ = begin_ts_ / 1000;
1311 event_["ts"] = Json::Int64(std::min(begin_in_us_ - 1, begin_in_us_));
1312 writer_.WriteCommonEvent(event_);
1313
1314 // Write the END event.
1315 event_["ph"] = "e";
1316 event_["ts"] = Json::Int64(end_ts_ / 1000);
1317 // No need for args for the end event; remove them to save some space.
1318 event_["args"].clear();
1319 writer_.WriteCommonEvent(event_);
1320 }
1321
UpdateWithNewSample(int64_t ts)1322 void UpdateWithNewSample(int64_t ts) {
1323 // We assume samples for a given thread will appear in timestamp
1324 // order; if this assumption stops holding true, we'll have to sort the
1325 // samples first.
1326 if (ts < end_ts_ || begin_ts_ > ts) {
1327 PERFETTO_ELOG(
1328 "Got an timestamp out of sequence while merging stack samples "
1329 "during JSON export!\n");
1330 PERFETTO_DCHECK(false);
1331 }
1332
1333 end_ts_ = ts;
1334 sample_count_++;
1335 }
1336
event_id() const1337 uint64_t event_id() const { return event_id_; }
callsite_id() const1338 CallsiteId callsite_id() const { return callsite_id_; }
1339
1340 public:
1341 Sample(const Sample&) = delete;
1342 Sample& operator=(const Sample&) = delete;
1343 Sample& operator=(Sample&& value) = delete;
1344
1345 TraceFormatWriter& writer_;
1346 CallsiteId callsite_id_;
1347 int64_t begin_ts_;
1348 int64_t end_ts_;
1349 Json::Value event_;
1350 uint64_t event_id_;
1351 size_t sample_count_;
1352 };
1353
1354 MergedProfileSamplesEmitter(const MergedProfileSamplesEmitter&) = delete;
1355 MergedProfileSamplesEmitter& operator=(const MergedProfileSamplesEmitter&) =
1356 delete;
1357 MergedProfileSamplesEmitter& operator=(
1358 MergedProfileSamplesEmitter&& value) = delete;
1359
1360 std::unordered_map<UniqueTid, Sample> current_events_;
1361 TraceFormatWriter& writer_;
1362 };
1363
ExportCpuProfileSamples()1364 util::Status ExportCpuProfileSamples() {
1365 MergedProfileSamplesEmitter merged_sample_emitter(writer_);
1366
1367 const tables::CpuProfileStackSampleTable& samples =
1368 storage_->cpu_profile_stack_sample_table();
1369 for (uint32_t i = 0; i < samples.row_count(); ++i) {
1370 Json::Value event;
1371 event["ts"] = Json::Int64(samples.ts()[i] / 1000);
1372
1373 UniqueTid utid = static_cast<UniqueTid>(samples.utid()[i]);
1374 auto pid_and_tid = UtidToPidAndTid(utid);
1375 event["pid"] = Json::Int(pid_and_tid.first);
1376 event["tid"] = Json::Int(pid_and_tid.second);
1377
1378 event["ph"] = "n";
1379 event["cat"] = "disabled-by-default-cpu_profiler";
1380 event["name"] = "StackCpuSampling";
1381 event["s"] = "t";
1382
1383 // Add a dummy thread timestamp to this event to match the format of
1384 // instant events. Useful in the UI to view args of a selected group of
1385 // samples.
1386 event["tts"] = Json::Int64(1);
1387
1388 const auto& callsites = storage_->stack_profile_callsite_table();
1389 const auto& frames = storage_->stack_profile_frame_table();
1390 const auto& mappings = storage_->stack_profile_mapping_table();
1391
1392 std::vector<std::string> callstack;
1393 base::Optional<CallsiteId> opt_callsite_id = samples.callsite_id()[i];
1394
1395 while (opt_callsite_id) {
1396 CallsiteId callsite_id = *opt_callsite_id;
1397 uint32_t callsite_row = *callsites.id().IndexOf(callsite_id);
1398
1399 FrameId frame_id = callsites.frame_id()[callsite_row];
1400 uint32_t frame_row = *frames.id().IndexOf(frame_id);
1401
1402 MappingId mapping_id = frames.mapping()[frame_row];
1403 uint32_t mapping_row = *mappings.id().IndexOf(mapping_id);
1404
1405 NullTermStringView symbol_name;
1406 auto opt_symbol_set_id = frames.symbol_set_id()[frame_row];
1407 if (opt_symbol_set_id) {
1408 symbol_name = storage_->GetString(
1409 storage_->symbol_table().name()[*opt_symbol_set_id]);
1410 }
1411
1412 char frame_entry[1024];
1413 snprintf(frame_entry, sizeof(frame_entry), "%s - %s [%s]\n",
1414 (symbol_name.empty()
1415 ? base::Uint64ToHexString(
1416 static_cast<uint64_t>(frames.rel_pc()[frame_row]))
1417 .c_str()
1418 : symbol_name.c_str()),
1419 GetNonNullString(storage_, mappings.name()[mapping_row]),
1420 GetNonNullString(storage_, mappings.build_id()[mapping_row]));
1421
1422 callstack.emplace_back(frame_entry);
1423
1424 opt_callsite_id = callsites.parent_id()[callsite_row];
1425 }
1426
1427 std::string merged_callstack;
1428 for (auto entry = callstack.rbegin(); entry != callstack.rend();
1429 ++entry) {
1430 merged_callstack += *entry;
1431 }
1432
1433 event["args"]["frames"] = merged_callstack;
1434 event["args"]["process_priority"] = samples.process_priority()[i];
1435
1436 // TODO(oysteine): Used for backwards compatibility with the memlog
1437 // pipeline, should remove once we've switched to looking directly at the
1438 // tid.
1439 event["args"]["thread_id"] = Json::Int(pid_and_tid.second);
1440
1441 // Emit duration events for adjacent samples with the same callsite.
1442 // For now, only do this when the trace has already been symbolized i.e.
1443 // are not directly output by Chrome, to avoid interfering with other
1444 // processing pipelines.
1445 base::Optional<CallsiteId> opt_current_callsite_id =
1446 samples.callsite_id()[i];
1447
1448 if (opt_current_callsite_id && storage_->symbol_table().row_count() > 0) {
1449 uint64_t parent_event_id = merged_sample_emitter.AddEventForUtid(
1450 utid, samples.ts()[i], *opt_current_callsite_id, event);
1451 event["id"] = base::Uint64ToHexString(parent_event_id);
1452 } else {
1453 event["id"] = base::Uint64ToHexString(
1454 MergedProfileSamplesEmitter::GenerateNewEventId());
1455 }
1456
1457 writer_.WriteCommonEvent(event);
1458 }
1459
1460 return util::OkStatus();
1461 }
1462
ExportMetadata()1463 util::Status ExportMetadata() {
1464 const auto& trace_metadata = storage_->metadata_table();
1465 const auto& keys = trace_metadata.name();
1466 const auto& int_values = trace_metadata.int_value();
1467 const auto& str_values = trace_metadata.str_value();
1468
1469 // Create a mapping from key string ids to keys.
1470 std::unordered_map<StringId, metadata::KeyId> key_map;
1471 for (uint32_t i = 0; i < metadata::kNumKeys; ++i) {
1472 auto id = *storage_->string_pool().GetId(metadata::kNames[i]);
1473 key_map[id] = static_cast<metadata::KeyId>(i);
1474 }
1475
1476 for (uint32_t pos = 0; pos < trace_metadata.row_count(); pos++) {
1477 auto key_it = key_map.find(keys[pos]);
1478 // Skip exporting dynamic entries; the cr-xxx entries that come from
1479 // the ChromeMetadata proto message are already exported from the raw
1480 // table.
1481 if (key_it == key_map.end())
1482 continue;
1483
1484 // Cast away from enum type, as otherwise -Wswitch-enum will demand an
1485 // exhaustive list of cases, even if there's a default case.
1486 metadata::KeyId key = key_it->second;
1487 switch (static_cast<size_t>(key)) {
1488 case metadata::benchmark_description:
1489 writer_.AppendTelemetryMetadataString(
1490 "benchmarkDescriptions", str_values.GetString(pos).c_str());
1491 break;
1492
1493 case metadata::benchmark_name:
1494 writer_.AppendTelemetryMetadataString(
1495 "benchmarks", str_values.GetString(pos).c_str());
1496 break;
1497
1498 case metadata::benchmark_start_time_us:
1499 writer_.SetTelemetryMetadataTimestamp("benchmarkStart",
1500 *int_values[pos]);
1501 break;
1502
1503 case metadata::benchmark_had_failures:
1504 writer_.AppendTelemetryMetadataBool("hadFailures", *int_values[pos]);
1505 break;
1506
1507 case metadata::benchmark_label:
1508 writer_.AppendTelemetryMetadataString(
1509 "labels", str_values.GetString(pos).c_str());
1510 break;
1511
1512 case metadata::benchmark_story_name:
1513 writer_.AppendTelemetryMetadataString(
1514 "stories", str_values.GetString(pos).c_str());
1515 break;
1516
1517 case metadata::benchmark_story_run_index:
1518 writer_.AppendTelemetryMetadataInt("storysetRepeats",
1519 *int_values[pos]);
1520 break;
1521
1522 case metadata::benchmark_story_run_time_us:
1523 writer_.SetTelemetryMetadataTimestamp("traceStart", *int_values[pos]);
1524 break;
1525
1526 case metadata::benchmark_story_tags: // repeated
1527 writer_.AppendTelemetryMetadataString(
1528 "storyTags", str_values.GetString(pos).c_str());
1529 break;
1530
1531 default:
1532 PERFETTO_DLOG("Ignoring metadata key %zu", static_cast<size_t>(key));
1533 break;
1534 }
1535 }
1536 return util::OkStatus();
1537 }
1538
ExportStats()1539 util::Status ExportStats() {
1540 const auto& stats = storage_->stats();
1541
1542 for (size_t idx = 0; idx < stats::kNumKeys; idx++) {
1543 if (stats::kTypes[idx] == stats::kSingle) {
1544 writer_.SetStats(stats::kNames[idx], stats[idx].value);
1545 } else {
1546 PERFETTO_DCHECK(stats::kTypes[idx] == stats::kIndexed);
1547 writer_.SetStats(stats::kNames[idx], stats[idx].indexed_values);
1548 }
1549 }
1550
1551 return util::OkStatus();
1552 }
1553
ExportMemorySnapshots()1554 util::Status ExportMemorySnapshots() {
1555 const auto& memory_snapshots = storage_->memory_snapshot_table();
1556 base::Optional<StringId> private_footprint_id =
1557 storage_->string_pool().GetId("chrome.private_footprint_kb");
1558 base::Optional<StringId> peak_resident_set_id =
1559 storage_->string_pool().GetId("chrome.peak_resident_set_kb");
1560
1561 for (uint32_t memory_index = 0; memory_index < memory_snapshots.row_count();
1562 ++memory_index) {
1563 Json::Value event_base;
1564
1565 event_base["ph"] = "v";
1566 event_base["cat"] = "disabled-by-default-memory-infra";
1567 auto snapshot_id = memory_snapshots.id()[memory_index].value;
1568 event_base["id"] = base::Uint64ToHexString(snapshot_id);
1569 int64_t snapshot_ts = memory_snapshots.timestamp()[memory_index];
1570 event_base["ts"] = Json::Int64(snapshot_ts / 1000);
1571 // TODO(crbug:1116359): Add dump type to the snapshot proto
1572 // to properly fill event_base["name"]
1573 event_base["name"] = "periodic_interval";
1574 event_base["args"]["dumps"]["level_of_detail"] = GetNonNullString(
1575 storage_, memory_snapshots.detail_level()[memory_index]);
1576
1577 // Export OS dump events for processes with relevant data.
1578 const auto& process_table = storage_->process_table();
1579 for (UniquePid upid = 0; upid < process_table.row_count(); ++upid) {
1580 Json::Value event =
1581 FillInProcessEventDetails(event_base, process_table.pid()[upid]);
1582 Json::Value& totals = event["args"]["dumps"]["process_totals"];
1583
1584 const auto& process_counters = storage_->process_counter_track_table();
1585
1586 for (uint32_t counter_index = 0;
1587 counter_index < process_counters.row_count(); ++counter_index) {
1588 if (process_counters.upid()[counter_index] != upid)
1589 continue;
1590 TrackId track_id = process_counters.id()[counter_index];
1591 if (private_footprint_id && (process_counters.name()[counter_index] ==
1592 private_footprint_id)) {
1593 totals["private_footprint_bytes"] = base::Uint64ToHexStringNoPrefix(
1594 GetCounterValue(track_id, snapshot_ts));
1595 } else if (peak_resident_set_id &&
1596 (process_counters.name()[counter_index] ==
1597 peak_resident_set_id)) {
1598 totals["peak_resident_set_size"] = base::Uint64ToHexStringNoPrefix(
1599 GetCounterValue(track_id, snapshot_ts));
1600 }
1601 }
1602
1603 auto process_args_id = process_table.arg_set_id()[upid];
1604 if (process_args_id) {
1605 const Json::Value* process_args =
1606 &args_builder_.GetArgs(process_args_id);
1607 if (process_args->isMember("is_peak_rss_resettable")) {
1608 totals["is_peak_rss_resettable"] =
1609 (*process_args)["is_peak_rss_resettable"];
1610 }
1611 }
1612
1613 const auto& smaps_table = storage_->profiler_smaps_table();
1614 // Do not create vm_regions without memory maps, since catapult expects
1615 // to have rows.
1616 Json::Value* smaps =
1617 smaps_table.row_count() > 0
1618 ? &event["args"]["dumps"]["process_mmaps"]["vm_regions"]
1619 : nullptr;
1620 for (uint32_t smaps_index = 0; smaps_index < smaps_table.row_count();
1621 ++smaps_index) {
1622 if (smaps_table.upid()[smaps_index] != upid)
1623 continue;
1624 if (smaps_table.ts()[smaps_index] != snapshot_ts)
1625 continue;
1626 Json::Value region;
1627 region["mf"] =
1628 GetNonNullString(storage_, smaps_table.file_name()[smaps_index]);
1629 region["pf"] =
1630 Json::Int64(smaps_table.protection_flags()[smaps_index]);
1631 region["sa"] = base::Uint64ToHexStringNoPrefix(
1632 static_cast<uint64_t>(smaps_table.start_address()[smaps_index]));
1633 region["sz"] = base::Uint64ToHexStringNoPrefix(
1634 static_cast<uint64_t>(smaps_table.size_kb()[smaps_index]) * 1024);
1635 region["ts"] =
1636 Json::Int64(smaps_table.module_timestamp()[smaps_index]);
1637 region["id"] = GetNonNullString(
1638 storage_, smaps_table.module_debugid()[smaps_index]);
1639 region["df"] = GetNonNullString(
1640 storage_, smaps_table.module_debug_path()[smaps_index]);
1641 region["bs"]["pc"] = base::Uint64ToHexStringNoPrefix(
1642 static_cast<uint64_t>(
1643 smaps_table.private_clean_resident_kb()[smaps_index]) *
1644 1024);
1645 region["bs"]["pd"] = base::Uint64ToHexStringNoPrefix(
1646 static_cast<uint64_t>(
1647 smaps_table.private_dirty_kb()[smaps_index]) *
1648 1024);
1649 region["bs"]["pss"] = base::Uint64ToHexStringNoPrefix(
1650 static_cast<uint64_t>(
1651 smaps_table.proportional_resident_kb()[smaps_index]) *
1652 1024);
1653 region["bs"]["sc"] = base::Uint64ToHexStringNoPrefix(
1654 static_cast<uint64_t>(
1655 smaps_table.shared_clean_resident_kb()[smaps_index]) *
1656 1024);
1657 region["bs"]["sd"] = base::Uint64ToHexStringNoPrefix(
1658 static_cast<uint64_t>(
1659 smaps_table.shared_dirty_resident_kb()[smaps_index]) *
1660 1024);
1661 region["bs"]["sw"] = base::Uint64ToHexStringNoPrefix(
1662 static_cast<uint64_t>(smaps_table.swap_kb()[smaps_index]) * 1024);
1663 smaps->append(region);
1664 }
1665
1666 if (!totals.empty() || (smaps && !smaps->empty()))
1667 writer_.WriteCommonEvent(event);
1668 }
1669
1670 // Export chrome dump events for process snapshots in current memory
1671 // snapshot.
1672 const auto& process_snapshots = storage_->process_memory_snapshot_table();
1673
1674 for (uint32_t process_index = 0;
1675 process_index < process_snapshots.row_count(); ++process_index) {
1676 if (process_snapshots.snapshot_id()[process_index].value != snapshot_id)
1677 continue;
1678
1679 auto process_snapshot_id = process_snapshots.id()[process_index].value;
1680 uint32_t pid = UpidToPid(process_snapshots.upid()[process_index]);
1681
1682 // Shared memory nodes are imported into a fake process with pid 0.
1683 // Catapult expects them to be associated with one of the real processes
1684 // of the snapshot, so we choose the first one we can find and replace
1685 // the pid.
1686 if (pid == 0) {
1687 for (uint32_t i = 0; i < process_snapshots.row_count(); ++i) {
1688 if (process_snapshots.snapshot_id()[i].value != snapshot_id)
1689 continue;
1690 uint32_t new_pid = UpidToPid(process_snapshots.upid()[i]);
1691 if (new_pid != 0) {
1692 pid = new_pid;
1693 break;
1694 }
1695 }
1696 }
1697
1698 Json::Value event = FillInProcessEventDetails(event_base, pid);
1699
1700 const auto& snapshot_nodes = storage_->memory_snapshot_node_table();
1701
1702 for (uint32_t node_index = 0; node_index < snapshot_nodes.row_count();
1703 ++node_index) {
1704 if (snapshot_nodes.process_snapshot_id()[node_index].value !=
1705 process_snapshot_id) {
1706 continue;
1707 }
1708 const char* path =
1709 GetNonNullString(storage_, snapshot_nodes.path()[node_index]);
1710 event["args"]["dumps"]["allocators"][path]["guid"] =
1711 base::Uint64ToHexStringNoPrefix(
1712 static_cast<uint64_t>(snapshot_nodes.id()[node_index].value));
1713 if (snapshot_nodes.size()[node_index]) {
1714 AddAttributeToMemoryNode(&event, path, "size",
1715 snapshot_nodes.size()[node_index],
1716 "bytes");
1717 }
1718 if (snapshot_nodes.effective_size()[node_index]) {
1719 AddAttributeToMemoryNode(
1720 &event, path, "effective_size",
1721 snapshot_nodes.effective_size()[node_index], "bytes");
1722 }
1723
1724 auto node_args_id = snapshot_nodes.arg_set_id()[node_index];
1725 if (!node_args_id)
1726 continue;
1727 const Json::Value* node_args =
1728 &args_builder_.GetArgs(node_args_id.value());
1729 for (const auto& arg_name : node_args->getMemberNames()) {
1730 const Json::Value& arg_value = (*node_args)[arg_name]["value"];
1731 if (arg_value.empty())
1732 continue;
1733 if (arg_value.isString()) {
1734 AddAttributeToMemoryNode(&event, path, arg_name,
1735 arg_value.asString());
1736 } else if (arg_value.isInt64()) {
1737 Json::Value unit = (*node_args)[arg_name]["unit"];
1738 if (unit.empty())
1739 unit = "unknown";
1740 AddAttributeToMemoryNode(&event, path, arg_name,
1741 arg_value.asInt64(), unit.asString());
1742 }
1743 }
1744 }
1745
1746 const auto& snapshot_edges = storage_->memory_snapshot_edge_table();
1747
1748 for (uint32_t edge_index = 0; edge_index < snapshot_edges.row_count();
1749 ++edge_index) {
1750 SnapshotNodeId source_node_id =
1751 snapshot_edges.source_node_id()[edge_index];
1752 uint32_t source_node_row =
1753 *snapshot_nodes.id().IndexOf(source_node_id);
1754
1755 if (snapshot_nodes.process_snapshot_id()[source_node_row].value !=
1756 process_snapshot_id) {
1757 continue;
1758 }
1759 Json::Value edge;
1760 edge["source"] = base::Uint64ToHexStringNoPrefix(
1761 snapshot_edges.source_node_id()[edge_index].value);
1762 edge["target"] = base::Uint64ToHexStringNoPrefix(
1763 snapshot_edges.target_node_id()[edge_index].value);
1764 edge["importance"] =
1765 Json::Int(snapshot_edges.importance()[edge_index]);
1766 edge["type"] = "ownership";
1767 event["args"]["dumps"]["allocators_graph"].append(edge);
1768 }
1769 writer_.WriteCommonEvent(event);
1770 }
1771 }
1772 return util::OkStatus();
1773 }
1774
UpidToPid(UniquePid upid)1775 uint32_t UpidToPid(UniquePid upid) {
1776 auto pid_it = upids_to_exported_pids_.find(upid);
1777 PERFETTO_DCHECK(pid_it != upids_to_exported_pids_.end());
1778 return pid_it->second;
1779 }
1780
UtidToPidAndTid(UniqueTid utid)1781 std::pair<uint32_t, uint32_t> UtidToPidAndTid(UniqueTid utid) {
1782 auto pid_and_tid_it = utids_to_exported_pids_and_tids_.find(utid);
1783 PERFETTO_DCHECK(pid_and_tid_it != utids_to_exported_pids_and_tids_.end());
1784 return pid_and_tid_it->second;
1785 }
1786
NextExportedPidOrTidForDuplicates()1787 uint32_t NextExportedPidOrTidForDuplicates() {
1788 // Ensure that the exported substitute value does not represent a valid
1789 // pid/tid. This would be very unlikely in practice.
1790 while (IsValidPidOrTid(next_exported_pid_or_tid_for_duplicates_))
1791 next_exported_pid_or_tid_for_duplicates_--;
1792 return next_exported_pid_or_tid_for_duplicates_--;
1793 }
1794
IsValidPidOrTid(uint32_t pid_or_tid)1795 bool IsValidPidOrTid(uint32_t pid_or_tid) {
1796 const auto& process_table = storage_->process_table();
1797 for (UniquePid upid = 0; upid < process_table.row_count(); upid++) {
1798 if (process_table.pid()[upid] == pid_or_tid)
1799 return true;
1800 }
1801
1802 const auto& thread_table = storage_->thread_table();
1803 for (UniqueTid utid = 0; utid < thread_table.row_count(); utid++) {
1804 if (thread_table.tid()[utid] == pid_or_tid)
1805 return true;
1806 }
1807
1808 return false;
1809 }
1810
FillInProcessEventDetails(const Json::Value & event,uint32_t pid)1811 Json::Value FillInProcessEventDetails(const Json::Value& event,
1812 uint32_t pid) {
1813 Json::Value output = event;
1814 output["pid"] = Json::Int(pid);
1815 output["tid"] = Json::Int(-1);
1816 return output;
1817 }
1818
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,int64_t value,const std::string & units)1819 void AddAttributeToMemoryNode(Json::Value* event,
1820 const std::string& path,
1821 const std::string& key,
1822 int64_t value,
1823 const std::string& units) {
1824 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1825 base::Uint64ToHexStringNoPrefix(static_cast<uint64_t>(value));
1826 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1827 "scalar";
1828 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1829 units;
1830 }
1831
AddAttributeToMemoryNode(Json::Value * event,const std::string & path,const std::string & key,const std::string & value,const std::string & units="")1832 void AddAttributeToMemoryNode(Json::Value* event,
1833 const std::string& path,
1834 const std::string& key,
1835 const std::string& value,
1836 const std::string& units = "") {
1837 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["value"] =
1838 value;
1839 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["type"] =
1840 "string";
1841 (*event)["args"]["dumps"]["allocators"][path]["attrs"][key]["units"] =
1842 units;
1843 }
1844
GetCounterValue(TrackId track_id,int64_t ts)1845 uint64_t GetCounterValue(TrackId track_id, int64_t ts) {
1846 const auto& counter_table = storage_->counter_table();
1847 auto begin = counter_table.ts().begin();
1848 auto end = counter_table.ts().end();
1849 PERFETTO_DCHECK(counter_table.ts().IsSorted() &&
1850 counter_table.ts().IsColumnType<int64_t>());
1851 // The timestamp column is sorted, so we can binary search for a matching
1852 // timestamp. Note that we don't use RowMap operations like FilterInto()
1853 // here because they bloat trace processor's binary size in Chrome too much.
1854 auto it = std::lower_bound(begin, end, ts,
1855 [](const SqlValue& value, int64_t expected_ts) {
1856 return value.AsLong() < expected_ts;
1857 });
1858 for (; it < end; ++it) {
1859 if ((*it).AsLong() != ts)
1860 break;
1861 if (counter_table.track_id()[it.row()].value == track_id.value)
1862 return static_cast<uint64_t>(counter_table.value()[it.row()]);
1863 }
1864 return 0;
1865 }
1866
1867 const TraceStorage* storage_;
1868 ArgsBuilder args_builder_;
1869 TraceFormatWriter writer_;
1870
1871 // If a pid/tid is duplicated between two or more different processes/threads
1872 // (pid/tid reuse), we export the subsequent occurrences with different
1873 // pids/tids that is visibly different from regular pids/tids - counting down
1874 // from uint32_t max.
1875 uint32_t next_exported_pid_or_tid_for_duplicates_ =
1876 std::numeric_limits<uint32_t>::max();
1877
1878 std::map<UniquePid, uint32_t> upids_to_exported_pids_;
1879 std::map<uint32_t, UniquePid> exported_pids_to_upids_;
1880 std::map<UniqueTid, std::pair<uint32_t, uint32_t>>
1881 utids_to_exported_pids_and_tids_;
1882 std::map<std::pair<uint32_t, uint32_t>, UniqueTid>
1883 exported_pids_and_tids_to_utids_;
1884 };
1885
1886 #endif // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1887
1888 } // namespace
1889
1890 OutputWriter::OutputWriter() = default;
1891 OutputWriter::~OutputWriter() = default;
1892
ExportJson(const TraceStorage * storage,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1893 util::Status ExportJson(const TraceStorage* storage,
1894 OutputWriter* output,
1895 ArgumentFilterPredicate argument_filter,
1896 MetadataFilterPredicate metadata_filter,
1897 LabelFilterPredicate label_filter) {
1898 #if PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1899 JsonExporter exporter(storage, output, std::move(argument_filter),
1900 std::move(metadata_filter), std::move(label_filter));
1901 return exporter.Export();
1902 #else
1903 perfetto::base::ignore_result(storage);
1904 perfetto::base::ignore_result(output);
1905 perfetto::base::ignore_result(argument_filter);
1906 perfetto::base::ignore_result(metadata_filter);
1907 perfetto::base::ignore_result(label_filter);
1908 return util::ErrStatus("JSON support is not compiled in this build");
1909 #endif // PERFETTO_BUILDFLAG(PERFETTO_TP_JSON)
1910 }
1911
ExportJson(TraceProcessorStorage * tp,OutputWriter * output,ArgumentFilterPredicate argument_filter,MetadataFilterPredicate metadata_filter,LabelFilterPredicate label_filter)1912 util::Status ExportJson(TraceProcessorStorage* tp,
1913 OutputWriter* output,
1914 ArgumentFilterPredicate argument_filter,
1915 MetadataFilterPredicate metadata_filter,
1916 LabelFilterPredicate label_filter) {
1917 const TraceStorage* storage = reinterpret_cast<TraceProcessorStorageImpl*>(tp)
1918 ->context()
1919 ->storage.get();
1920 return ExportJson(storage, output, argument_filter, metadata_filter,
1921 label_filter);
1922 }
1923
ExportJson(const TraceStorage * storage,FILE * output)1924 util::Status ExportJson(const TraceStorage* storage, FILE* output) {
1925 FileWriter writer(output);
1926 return ExportJson(storage, &writer, nullptr, nullptr, nullptr);
1927 }
1928
1929 } // namespace json
1930 } // namespace trace_processor
1931 } // namespace perfetto
1932
1933