1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/trace_log.h"
6
7 #include <cmath>
8 #include <limits>
9 #include <memory>
10 #include <unordered_set>
11 #include <utility>
12
13 #include "base/auto_reset.h"
14 #include "base/base_switches.h"
15 #include "base/command_line.h"
16 #include "base/containers/contains.h"
17 #include "base/debug/leak_annotations.h"
18 #include "base/format_macros.h"
19 #include "base/functional/bind.h"
20 #include "base/location.h"
21 #include "base/logging.h"
22 #include "base/memory/ptr_util.h"
23 #include "base/memory/raw_ptr.h"
24 #include "base/memory/raw_ptr_exclusion.h"
25 #include "base/memory/ref_counted_memory.h"
26 #include "base/no_destructor.h"
27 #include "base/notreached.h"
28 #include "base/process/process.h"
29 #include "base/process/process_metrics.h"
30 #include "base/ranges/algorithm.h"
31 #include "base/strings/string_number_conversions.h"
32 #include "base/strings/string_piece.h"
33 #include "base/strings/string_split.h"
34 #include "base/strings/string_tokenizer.h"
35 #include "base/strings/stringprintf.h"
36 #include "base/system/sys_info.h"
37 #include "base/task/current_thread.h"
38 #include "base/task/sequenced_task_runner.h"
39 #include "base/task/single_thread_task_runner.h"
40 #include "base/task/thread_pool.h"
41 #include "base/threading/platform_thread.h"
42 #include "base/threading/thread_id_name_manager.h"
43 #include "base/time/time.h"
44 #include "base/trace_event/heap_profiler.h"
45 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
46 #include "base/trace_event/memory_dump_manager.h"
47 #include "base/trace_event/memory_dump_provider.h"
48 #include "base/trace_event/process_memory_dump.h"
49 #include "base/trace_event/trace_buffer.h"
50 #include "base/trace_event/trace_event.h"
51 #include "build/build_config.h"
52
53 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
54 #include "base/numerics/safe_conversions.h"
55 #include "base/run_loop.h"
56 #include "base/task/thread_pool/thread_pool_instance.h"
57 #include "base/tracing/perfetto_platform.h"
58 #include "third_party/perfetto/include/perfetto/ext/trace_processor/export_json.h" // nogncheck
59 #include "third_party/perfetto/include/perfetto/trace_processor/trace_processor_storage.h" // nogncheck
60 #include "third_party/perfetto/include/perfetto/tracing/console_interceptor.h"
61 #include "third_party/perfetto/protos/perfetto/config/chrome/chrome_config.gen.h" // nogncheck
62 #include "third_party/perfetto/protos/perfetto/config/interceptor_config.gen.h" // nogncheck
63 #include "third_party/perfetto/protos/perfetto/trace/track_event/process_descriptor.gen.h" // nogncheck
64 #include "third_party/perfetto/protos/perfetto/trace/track_event/thread_descriptor.gen.h" // nogncheck
65 #endif
66
67 #if BUILDFLAG(IS_WIN)
68 #include "base/trace_event/trace_event_etw_export_win.h"
69 #endif
70
71 #if BUILDFLAG(IS_ANDROID)
72 #include "base/debug/elf_reader.h"
73
74 // The linker assigns the virtual address of the start of current library to
75 // this symbol.
76 extern char __executable_start;
77 #endif
78
79 namespace base {
80 namespace trace_event {
81
82 namespace {
83
84 // Controls the number of trace events we will buffer in-memory
85 // before throwing them away.
86 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
87
88 const size_t kTraceEventVectorBigBufferChunks =
89 512000000 / kTraceBufferChunkSize;
90 static_assert(
91 kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
92 "Too many big buffer chunks");
93 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
94 static_assert(
95 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
96 "Too many vector buffer chunks");
97 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
98
99 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
100 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
101
102 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
103
104 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
105 bool g_perfetto_initialized_by_tracelog = false;
106 #else
107 constexpr TimeDelta kThreadFlushTimeout = Seconds(3);
108 #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
109
110 TraceLog* g_trace_log_for_testing = nullptr;
111
112 ABSL_CONST_INIT thread_local TraceLog::ThreadLocalEventBuffer*
113 thread_local_event_buffer = nullptr;
114 ABSL_CONST_INIT thread_local bool thread_blocks_message_loop = false;
115 ABSL_CONST_INIT thread_local bool thread_is_in_trace_event = false;
116
ThreadNow()117 ThreadTicks ThreadNow() {
118 return ThreadTicks::IsSupported()
119 ? base::subtle::ThreadTicksNowIgnoringOverride()
120 : ThreadTicks();
121 }
122
123 template <typename T>
InitializeMetadataEvent(TraceEvent * trace_event,PlatformThreadId thread_id,const char * metadata_name,const char * arg_name,const T & value)124 void InitializeMetadataEvent(TraceEvent* trace_event,
125 PlatformThreadId thread_id,
126 const char* metadata_name,
127 const char* arg_name,
128 const T& value) {
129 if (!trace_event)
130 return;
131
132 TraceArguments args(arg_name, value);
133 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
134 ThreadTicks thread_now;
135 trace_event->Reset(
136 thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
137 TraceLog::GetInstance()->GetCategoryGroupEnabled("__metadata"),
138 metadata_name,
139 trace_event_internal::kGlobalScope, // scope
140 trace_event_internal::kNoId, // id
141 trace_event_internal::kNoId, // bind_id
142 &args, TRACE_EVENT_FLAG_NONE);
143 }
144
145 // Use this function instead of TraceEventHandle constructor to keep the
146 // overhead of ScopedTracer (trace_event.h) constructor minimum.
MakeHandle(uint32_t chunk_seq,size_t chunk_index,size_t event_index,TraceEventHandle * handle)147 void MakeHandle(uint32_t chunk_seq,
148 size_t chunk_index,
149 size_t event_index,
150 TraceEventHandle* handle) {
151 DCHECK(chunk_seq);
152 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
153 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
154 DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
155 handle->chunk_seq = chunk_seq;
156 handle->chunk_index = static_cast<uint16_t>(chunk_index);
157 handle->event_index = static_cast<uint16_t>(event_index);
158 }
159
160 // The fallback arguments filtering function will filter away every argument.
DefaultIsTraceEventArgsAllowlisted(const char * category_group_name,const char * event_name,base::trace_event::ArgumentNameFilterPredicate * arg_name_filter)161 bool DefaultIsTraceEventArgsAllowlisted(
162 const char* category_group_name,
163 const char* event_name,
164 base::trace_event::ArgumentNameFilterPredicate* arg_name_filter) {
165 return false;
166 }
167
168 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
169 class PerfettoProtoAppender
170 : public base::trace_event::ConvertableToTraceFormat::ProtoAppender {
171 public:
PerfettoProtoAppender(perfetto::protos::pbzero::DebugAnnotation * proto)172 explicit PerfettoProtoAppender(
173 perfetto::protos::pbzero::DebugAnnotation* proto)
174 : annotation_proto_(proto) {}
175 ~PerfettoProtoAppender() override = default;
176
177 // ProtoAppender implementation
AddBuffer(uint8_t * begin,uint8_t * end)178 void AddBuffer(uint8_t* begin, uint8_t* end) override {
179 ranges_.emplace_back();
180 ranges_.back().begin = begin;
181 ranges_.back().end = end;
182 }
183
Finalize(uint32_t field_id)184 size_t Finalize(uint32_t field_id) override {
185 return annotation_proto_->AppendScatteredBytes(field_id, ranges_.data(),
186 ranges_.size());
187 }
188
189 private:
190 std::vector<protozero::ContiguousMemoryRange> ranges_;
191 raw_ptr<perfetto::protos::pbzero::DebugAnnotation> annotation_proto_;
192 };
193
AddConvertableToTraceFormat(base::trace_event::ConvertableToTraceFormat * value,perfetto::protos::pbzero::DebugAnnotation * annotation)194 void AddConvertableToTraceFormat(
195 base::trace_event::ConvertableToTraceFormat* value,
196 perfetto::protos::pbzero::DebugAnnotation* annotation) {
197 PerfettoProtoAppender proto_appender(annotation);
198 if (value->AppendToProto(&proto_appender)) {
199 return;
200 }
201
202 std::string json;
203 value->AppendAsTraceFormat(&json);
204 annotation->set_legacy_json_value(json.c_str());
205 }
206
WriteDebugAnnotations(base::trace_event::TraceEvent * trace_event,perfetto::protos::pbzero::TrackEvent * track_event)207 void WriteDebugAnnotations(base::trace_event::TraceEvent* trace_event,
208 perfetto::protos::pbzero::TrackEvent* track_event) {
209 for (size_t i = 0; i < trace_event->arg_size() && trace_event->arg_name(i);
210 ++i) {
211 auto type = trace_event->arg_type(i);
212 auto* annotation = track_event->add_debug_annotations();
213
214 annotation->set_name(trace_event->arg_name(i));
215
216 if (type == TRACE_VALUE_TYPE_CONVERTABLE) {
217 AddConvertableToTraceFormat(trace_event->arg_convertible_value(i),
218 annotation);
219 continue;
220 }
221
222 auto& value = trace_event->arg_value(i);
223 switch (type) {
224 case TRACE_VALUE_TYPE_BOOL:
225 annotation->set_bool_value(value.as_bool);
226 break;
227 case TRACE_VALUE_TYPE_UINT:
228 annotation->set_uint_value(value.as_uint);
229 break;
230 case TRACE_VALUE_TYPE_INT:
231 annotation->set_int_value(value.as_int);
232 break;
233 case TRACE_VALUE_TYPE_DOUBLE:
234 annotation->set_double_value(value.as_double);
235 break;
236 case TRACE_VALUE_TYPE_POINTER:
237 annotation->set_pointer_value(static_cast<uint64_t>(
238 reinterpret_cast<uintptr_t>(value.as_pointer)));
239 break;
240 case TRACE_VALUE_TYPE_STRING:
241 case TRACE_VALUE_TYPE_COPY_STRING:
242 annotation->set_string_value(value.as_string ? value.as_string
243 : "NULL");
244 break;
245 case TRACE_VALUE_TYPE_PROTO: {
246 auto data = value.as_proto->SerializeAsArray();
247 annotation->AppendRawProtoBytes(data.data(), data.size());
248 } break;
249
250 default:
251 NOTREACHED() << "Don't know how to serialize this value";
252 break;
253 }
254 }
255 }
256
OnAddLegacyTraceEvent(TraceEvent * trace_event,bool thread_will_flush,base::trace_event::TraceEventHandle * handle)257 void OnAddLegacyTraceEvent(TraceEvent* trace_event,
258 bool thread_will_flush,
259 base::trace_event::TraceEventHandle* handle) {
260 perfetto::DynamicCategory category(
261 TraceLog::GetInstance()->GetCategoryGroupName(
262 trace_event->category_group_enabled()));
263 auto write_args = [trace_event](perfetto::EventContext ctx) {
264 WriteDebugAnnotations(trace_event, ctx.event());
265 uint32_t id_flags = trace_event->flags() & (TRACE_EVENT_FLAG_HAS_ID |
266 TRACE_EVENT_FLAG_HAS_LOCAL_ID |
267 TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
268 if (!id_flags &&
269 perfetto::internal::TrackEventLegacy::PhaseToType(
270 trace_event->phase()) !=
271 perfetto::protos::pbzero::TrackEvent::TYPE_UNSPECIFIED) {
272 return;
273 }
274 auto* legacy_event = ctx.event()->set_legacy_event();
275 legacy_event->set_phase(trace_event->phase());
276 switch (id_flags) {
277 case TRACE_EVENT_FLAG_HAS_ID:
278 legacy_event->set_unscoped_id(trace_event->id());
279 break;
280 case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
281 legacy_event->set_local_id(trace_event->id());
282 break;
283 case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
284 legacy_event->set_global_id(trace_event->id());
285 break;
286 default:
287 break;
288 }
289 };
290
291 auto phase = trace_event->phase();
292 auto flags = trace_event->flags();
293 base::TimeTicks timestamp = trace_event->timestamp().is_null()
294 ? TRACE_TIME_TICKS_NOW()
295 : trace_event->timestamp();
296 if (phase == TRACE_EVENT_PHASE_COMPLETE) {
297 phase = TRACE_EVENT_PHASE_BEGIN;
298 } else if (phase == TRACE_EVENT_PHASE_INSTANT) {
299 auto scope = flags & TRACE_EVENT_FLAG_SCOPE_MASK;
300 switch (scope) {
301 case TRACE_EVENT_SCOPE_GLOBAL:
302 PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
303 phase, category, trace_event->name(), ::perfetto::Track::Global(0),
304 timestamp, write_args);
305 return;
306 case TRACE_EVENT_SCOPE_PROCESS:
307 PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
308 phase, category, trace_event->name(),
309 ::perfetto::ProcessTrack::Current(), timestamp, write_args);
310 return;
311 default:
312 case TRACE_EVENT_SCOPE_THREAD: /* Fallthrough. */
313 break;
314 }
315 }
316 if (trace_event->thread_id() &&
317 trace_event->thread_id() != base::PlatformThread::CurrentId()) {
318 PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
319 phase, category, trace_event->name(),
320 perfetto::ThreadTrack::ForThread(trace_event->thread_id()), timestamp,
321 write_args);
322 return;
323 }
324 PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
325 phase, category, trace_event->name(),
326 perfetto::internal::TrackEventInternal::kDefaultTrack, timestamp,
327 write_args);
328 }
329
OnUpdateLegacyTraceEventDuration(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle,PlatformThreadId thread_id,bool explicit_timestamps,const TimeTicks & now,const ThreadTicks & thread_now)330 void OnUpdateLegacyTraceEventDuration(
331 const unsigned char* category_group_enabled,
332 const char* name,
333 TraceEventHandle handle,
334 PlatformThreadId thread_id,
335 bool explicit_timestamps,
336 const TimeTicks& now,
337 const ThreadTicks& thread_now) {
338 perfetto::DynamicCategory category(
339 TraceLog::GetInstance()->GetCategoryGroupName(category_group_enabled));
340 auto phase = TRACE_EVENT_PHASE_END;
341 base::TimeTicks timestamp =
342 explicit_timestamps ? now : TRACE_TIME_TICKS_NOW();
343 if (thread_id && thread_id != base::PlatformThread::CurrentId()) {
344 PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
345 phase, category, name, perfetto::ThreadTrack::ForThread(thread_id),
346 timestamp);
347 return;
348 }
349 PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
350 phase, category, name,
351 perfetto::internal::TrackEventInternal::kDefaultTrack, timestamp);
352 }
353 #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
354
355 } // namespace
356
357 #if BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
358 namespace {
359 // Perfetto provides us with a fully formed JSON trace file, while
360 // TraceResultBuffer wants individual JSON fragments without a containing
361 // object. We therefore need to strip away the outer object, including the
362 // metadata fields, from the JSON stream.
363 static constexpr char kJsonPrefix[] = "{\"traceEvents\":[\n";
364 static constexpr char kJsonJoiner[] = ",\n";
365 static constexpr char kJsonSuffix[] = "],\"metadata\":";
366 } // namespace
367
368 class JsonStringOutputWriter
369 : public perfetto::trace_processor::json::OutputWriter {
370 public:
JsonStringOutputWriter(scoped_refptr<SequencedTaskRunner> flush_task_runner,TraceLog::OutputCallback flush_callback)371 JsonStringOutputWriter(scoped_refptr<SequencedTaskRunner> flush_task_runner,
372 TraceLog::OutputCallback flush_callback)
373 : flush_task_runner_(flush_task_runner),
374 flush_callback_(std::move(flush_callback)) {
375 buffer_->data().reserve(kBufferReserveCapacity);
376 }
377
~JsonStringOutputWriter()378 ~JsonStringOutputWriter() override { Flush(/*has_more=*/false); }
379
AppendString(const std::string & string)380 perfetto::trace_processor::util::Status AppendString(
381 const std::string& string) override {
382 if (!did_strip_prefix_) {
383 DCHECK_EQ(string, kJsonPrefix);
384 did_strip_prefix_ = true;
385 return perfetto::trace_processor::util::OkStatus();
386 } else if (buffer_->data().empty() &&
387 !strncmp(string.c_str(), kJsonJoiner, strlen(kJsonJoiner))) {
388 // We only remove the leading joiner comma for the first chunk in a buffer
389 // since the consumer is expected to insert commas between the buffers we
390 // provide.
391 buffer_->data() += string.substr(strlen(kJsonJoiner));
392 } else if (!strncmp(string.c_str(), kJsonSuffix, strlen(kJsonSuffix))) {
393 return perfetto::trace_processor::util::OkStatus();
394 } else {
395 buffer_->data() += string;
396 }
397 if (buffer_->data().size() > kBufferLimitInBytes) {
398 Flush(/*has_more=*/true);
399 // Reset the buffer_ after moving it above.
400 buffer_ = new RefCountedString();
401 buffer_->data().reserve(kBufferReserveCapacity);
402 }
403 return perfetto::trace_processor::util::OkStatus();
404 }
405
406 private:
Flush(bool has_more)407 void Flush(bool has_more) {
408 if (flush_task_runner_) {
409 flush_task_runner_->PostTask(
410 FROM_HERE,
411 base::BindOnce(flush_callback_, std::move(buffer_), has_more));
412 } else {
413 flush_callback_.Run(std::move(buffer_), has_more);
414 }
415 }
416
417 static constexpr size_t kBufferLimitInBytes = 100 * 1024;
418 // Since we write each string before checking the limit, we'll always go
419 // slightly over and hence we reserve some extra space to avoid most
420 // reallocs.
421 static constexpr size_t kBufferReserveCapacity = kBufferLimitInBytes * 5 / 4;
422
423 scoped_refptr<SequencedTaskRunner> flush_task_runner_;
424 TraceLog::OutputCallback flush_callback_;
425 scoped_refptr<RefCountedString> buffer_ = new RefCountedString();
426 bool did_strip_prefix_ = false;
427 };
428 #endif // BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
429
430 // A helper class that allows the lock to be acquired in the middle of the scope
431 // and unlocks at the end of scope if locked.
432 class TraceLog::OptionalAutoLock {
433 public:
OptionalAutoLock(Lock * lock)434 explicit OptionalAutoLock(Lock* lock) : lock_(lock) {}
435
436 OptionalAutoLock(const OptionalAutoLock&) = delete;
437 OptionalAutoLock& operator=(const OptionalAutoLock&) = delete;
438
~OptionalAutoLock()439 ~OptionalAutoLock() {
440 if (locked_)
441 lock_->Release();
442 }
443
EnsureAcquired()444 void EnsureAcquired() EXCLUSIVE_LOCK_FUNCTION(lock_) {
445 if (!locked_) {
446 lock_->Acquire();
447 locked_ = true;
448 } else {
449 lock_->AssertAcquired();
450 }
451 }
452
453 private:
454 // This field is not a raw_ptr<> because it is needed for lock annotations.
455 RAW_PTR_EXCLUSION Lock* lock_;
456 bool locked_ = false;
457 };
458
459 class TraceLog::ThreadLocalEventBuffer
460 : public CurrentThread::DestructionObserver,
461 public MemoryDumpProvider {
462 public:
463 explicit ThreadLocalEventBuffer(TraceLog* trace_log);
464 ThreadLocalEventBuffer(const ThreadLocalEventBuffer&) = delete;
465 ThreadLocalEventBuffer& operator=(const ThreadLocalEventBuffer&) = delete;
466 ~ThreadLocalEventBuffer() override;
467
468 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
469
GetEventByHandle(TraceEventHandle handle)470 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
471 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
472 handle.chunk_index != chunk_index_) {
473 return nullptr;
474 }
475
476 return chunk_->GetEventAt(handle.event_index);
477 }
478
generation() const479 int generation() const { return generation_; }
480
481 private:
482 // CurrentThread::DestructionObserver
483 void WillDestroyCurrentMessageLoop() override;
484
485 // MemoryDumpProvider implementation.
486 bool OnMemoryDump(const MemoryDumpArgs& args,
487 ProcessMemoryDump* pmd) override;
488
489 void FlushWhileLocked();
490
CheckThisIsCurrentBuffer() const491 void CheckThisIsCurrentBuffer() const {
492 DCHECK_EQ(thread_local_event_buffer, this);
493 }
494
495 const AutoReset<ThreadLocalEventBuffer*> resetter_{&thread_local_event_buffer,
496 this, nullptr};
497 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
498 // as long as the thread exists.
499 raw_ptr<TraceLog> trace_log_;
500 std::unique_ptr<TraceBufferChunk> chunk_;
501 size_t chunk_index_ = 0;
502 int generation_;
503 };
504
ThreadLocalEventBuffer(TraceLog * trace_log)505 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
506 : trace_log_(trace_log),
507 generation_(trace_log->generation()) {
508 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
509 // the following message_loop won't be NULL.
510 CurrentThread::Get()->AddDestructionObserver(this);
511
512 // This is to report the local memory usage when memory-infra is enabled.
513 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
514 this, "ThreadLocalEventBuffer",
515 SingleThreadTaskRunner::GetCurrentDefault());
516
517 auto thread_id = PlatformThread::CurrentId();
518
519 AutoLock lock(trace_log->lock_);
520 trace_log->thread_task_runners_[thread_id] =
521 SingleThreadTaskRunner::GetCurrentDefault();
522 }
523
~ThreadLocalEventBuffer()524 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
525 CheckThisIsCurrentBuffer();
526 CurrentThread::Get()->RemoveDestructionObserver(this);
527 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
528
529 AutoLock lock(trace_log_->lock_);
530 FlushWhileLocked();
531 trace_log_->thread_task_runners_.erase(PlatformThread::CurrentId());
532 }
533
AddTraceEvent(TraceEventHandle * handle)534 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
535 TraceEventHandle* handle) {
536 CheckThisIsCurrentBuffer();
537
538 if (chunk_ && chunk_->IsFull()) {
539 AutoLock lock(trace_log_->lock_);
540 FlushWhileLocked();
541 chunk_.reset();
542 }
543 if (!chunk_) {
544 AutoLock lock(trace_log_->lock_);
545 chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
546 trace_log_->CheckIfBufferIsFullWhileLocked();
547 }
548 if (!chunk_)
549 return nullptr;
550
551 size_t event_index;
552 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
553 if (trace_event && handle)
554 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
555
556 return trace_event;
557 }
558
WillDestroyCurrentMessageLoop()559 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
560 delete this;
561 }
562
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)563 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
564 ProcessMemoryDump* pmd) {
565 if (!chunk_)
566 return true;
567 std::string dump_base_name =
568 "tracing/thread_" + NumberToString(PlatformThread::CurrentId());
569 TraceEventMemoryOverhead overhead;
570 chunk_->EstimateTraceMemoryOverhead(&overhead);
571 overhead.DumpInto(dump_base_name.c_str(), pmd);
572 return true;
573 }
574
FlushWhileLocked()575 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
576 if (!chunk_)
577 return;
578
579 trace_log_->lock_.AssertAcquired();
580 if (trace_log_->CheckGeneration(generation_)) {
581 // Return the chunk to the buffer only if the generation matches.
582 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
583 }
584 // Otherwise this method may be called from the destructor, or TraceLog will
585 // find the generation mismatch and delete this buffer soon.
586 }
587
SetAddTraceEventOverrides(const AddTraceEventOverrideFunction & add_event_override,const OnFlushFunction & on_flush_override,const UpdateDurationFunction & update_duration_override)588 void TraceLog::SetAddTraceEventOverrides(
589 const AddTraceEventOverrideFunction& add_event_override,
590 const OnFlushFunction& on_flush_override,
591 const UpdateDurationFunction& update_duration_override) {
592 add_trace_event_override_.store(add_event_override);
593 on_flush_override_.store(on_flush_override);
594 update_duration_override_.store(update_duration_override);
595 }
596
597 struct TraceLog::RegisteredAsyncObserver {
RegisteredAsyncObserverbase::trace_event::TraceLog::RegisteredAsyncObserver598 explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
599 : observer(observer),
600 task_runner(SequencedTaskRunner::GetCurrentDefault()) {}
601 ~RegisteredAsyncObserver() = default;
602
603 WeakPtr<AsyncEnabledStateObserver> observer;
604 scoped_refptr<SequencedTaskRunner> task_runner;
605 };
606
TraceLogStatus()607 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
608
609 TraceLogStatus::~TraceLogStatus() = default;
610
611 // static
GetInstance()612 TraceLog* TraceLog::GetInstance() {
613 static base::NoDestructor<TraceLog> instance(0);
614 return instance.get();
615 }
616
617 // static
ResetForTesting()618 void TraceLog::ResetForTesting() {
619 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
620 auto* self = GetInstance();
621 AutoLock lock(self->observers_lock_);
622 self->enabled_state_observers_.clear();
623 self->owned_enabled_state_observer_copy_.clear();
624 self->async_observers_.clear();
625 self->InitializePerfettoIfNeeded();
626 #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
627 if (!g_trace_log_for_testing)
628 return;
629 {
630 AutoLock lock(g_trace_log_for_testing->lock_);
631 CategoryRegistry::ResetForTesting();
632 }
633 // Don't reset the generation value back to 0. TraceLog is normally
634 // supposed to be a singleton and the value of generation is never
635 // supposed to decrease.
636 const int generation = g_trace_log_for_testing->generation() + 1;
637 g_trace_log_for_testing->~TraceLog();
638 new (g_trace_log_for_testing) TraceLog(generation);
639 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
640 }
641
TraceLog(int generation)642 TraceLog::TraceLog(int generation)
643 : process_sort_index_(0),
644 process_id_hash_(0),
645 process_id_(base::kNullProcessId),
646 trace_options_(kInternalRecordUntilFull),
647 trace_config_(TraceConfig()),
648 thread_shared_chunk_index_(0),
649 generation_(generation),
650 use_worker_thread_(false) {
651 CategoryRegistry::Initialize();
652
653 #if BUILDFLAG(IS_NACL) // NaCl shouldn't expose the process id.
654 SetProcessID(0);
655 #else
656 SetProcessID(GetCurrentProcId());
657 #endif
658
659 logged_events_.reset(CreateTraceBuffer());
660
661 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
662 nullptr);
663 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
664 TrackEvent::AddSessionObserver(this);
665 // When using the Perfetto client library, TRACE_EVENT macros will bypass
666 // TraceLog entirely. However, trace event embedders which haven't been ported
667 // to Perfetto yet will still be using TRACE_EVENT_API_ADD_TRACE_EVENT, so we
668 // need to route these events to Perfetto using an override here. This
669 // override is also used to capture internal metadata events.
670 SetAddTraceEventOverrides(&OnAddLegacyTraceEvent, nullptr,
671 &OnUpdateLegacyTraceEventDuration);
672 #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
673 g_trace_log_for_testing = this;
674 }
675
~TraceLog()676 TraceLog::~TraceLog() {
677 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
678 TrackEvent::RemoveSessionObserver(this);
679 #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
680 }
681
InitializeThreadLocalEventBufferIfSupported()682 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
683 // A ThreadLocalEventBuffer needs the message loop with a task runner
684 // - to know when the thread exits;
685 // - to handle the final flush.
686 // For a thread without a message loop or if the message loop may be blocked,
687 // the trace events will be added into the main buffer directly.
688 if (thread_blocks_message_loop || !CurrentThread::IsSet() ||
689 !SingleThreadTaskRunner::HasCurrentDefault()) {
690 return;
691 }
692 HEAP_PROFILER_SCOPED_IGNORE;
693 if (thread_local_event_buffer &&
694 !CheckGeneration(thread_local_event_buffer->generation())) {
695 delete thread_local_event_buffer;
696 }
697 if (!thread_local_event_buffer) {
698 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
699 }
700 }
701
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)702 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
703 ProcessMemoryDump* pmd) {
704 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
705 // (crbug.com/499731).
706 TraceEventMemoryOverhead overhead;
707 overhead.Add(TraceEventMemoryOverhead::kOther, sizeof(*this));
708 {
709 AutoLock lock(lock_);
710 if (logged_events_)
711 logged_events_->EstimateTraceMemoryOverhead(&overhead);
712
713 for (auto& metadata_event : metadata_events_)
714 metadata_event->EstimateTraceMemoryOverhead(&overhead);
715 }
716 overhead.AddSelf();
717 overhead.DumpInto("tracing/main_trace_log", pmd);
718 return true;
719 }
720
GetCategoryGroupEnabled(const char * category_group)721 const unsigned char* TraceLog::GetCategoryGroupEnabled(
722 const char* category_group) {
723 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
724 return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group);
725 #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
726 TraceLog* tracelog = GetInstance();
727 if (!tracelog) {
728 DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
729 return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
730 }
731 TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
732 if (!category) {
733 // Slow path: in the case of a new category we have to repeat the check
734 // holding the lock, as multiple threads might have reached this point
735 // at the same time.
736 auto category_initializer = [](TraceCategory* category) {
737 TraceLog::GetInstance()->UpdateCategoryState(category);
738 };
739 AutoLock lock(tracelog->lock_);
740 CategoryRegistry::GetOrCreateCategoryLocked(
741 category_group, category_initializer, &category);
742 }
743 DCHECK(category->state_ptr());
744 return category->state_ptr();
745 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
746 }
747
GetCategoryGroupName(const unsigned char * category_group_enabled)748 const char* TraceLog::GetCategoryGroupName(
749 const unsigned char* category_group_enabled) {
750 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
751 return TRACE_EVENT_API_GET_CATEGORY_GROUP_NAME(category_group_enabled);
752 #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
753 return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
754 ->name();
755 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
756 }
757
UpdateCategoryState(TraceCategory * category)758 void TraceLog::UpdateCategoryState(TraceCategory* category) {
759 lock_.AssertAcquired();
760 DCHECK(category->is_valid());
761 unsigned char state_flags = 0;
762 if (enabled_ && trace_config_.IsCategoryGroupEnabled(category->name())) {
763 state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
764 }
765
766 // TODO(primiano): this is a temporary workaround for catapult:#2341,
767 // to guarantee that metadata events are always added even if the category
768 // filter is "-*". See crbug.com/618054 for more details and long-term fix.
769 if (enabled_ && category == CategoryRegistry::kCategoryMetadata) {
770 state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
771 }
772
773 #if BUILDFLAG(IS_WIN)
774 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
775 category->name())) {
776 state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
777 }
778 #endif
779
780 category->set_state(state_flags);
781 }
782
UpdateCategoryRegistry()783 void TraceLog::UpdateCategoryRegistry() {
784 lock_.AssertAcquired();
785 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
786 UpdateCategoryState(&category);
787 }
788 }
789
SetEnabled(const TraceConfig & trace_config,uint8_t modes_to_enable)790 void TraceLog::SetEnabled(const TraceConfig& trace_config,
791 uint8_t modes_to_enable) {
792 // FILTERING_MODE is no longer supported.
793 DCHECK(modes_to_enable == RECORDING_MODE);
794 DCHECK(trace_config.process_filter_config().IsEnabled(process_id_));
795
796 AutoLock lock(lock_);
797
798 // Perfetto only supports basic wildcard filtering, so check that we're not
799 // trying to use more complex filters.
800 for (const auto& excluded :
801 trace_config.category_filter().excluded_categories()) {
802 DCHECK(excluded.find("?") == std::string::npos);
803 DCHECK(excluded.find("*") == std::string::npos ||
804 excluded.find("*") == excluded.size() - 1);
805 }
806 for (const auto& included :
807 trace_config.category_filter().included_categories()) {
808 DCHECK(included.find("?") == std::string::npos);
809 DCHECK(included.find("*") == std::string::npos ||
810 included.find("*") == included.size() - 1);
811 }
812 for (const auto& disabled :
813 trace_config.category_filter().disabled_categories()) {
814 DCHECK(disabled.find("?") == std::string::npos);
815 DCHECK(disabled.find("*") == std::string::npos ||
816 disabled.find("*") == disabled.size() - 1);
817 }
818
819 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
820 DCHECK(!trace_config.IsArgumentFilterEnabled());
821
822 // TODO(khokhlov): Avoid duplication between this code and
823 // services/tracing/public/cpp/perfetto/perfetto_config.cc.
824 perfetto::TraceConfig perfetto_config;
825 size_t size_limit = trace_config.GetTraceBufferSizeInKb();
826 if (size_limit == 0)
827 size_limit = 200 * 1024;
828 auto* buffer_config = perfetto_config.add_buffers();
829 buffer_config->set_size_kb(checked_cast<uint32_t>(size_limit));
830 switch (trace_config.GetTraceRecordMode()) {
831 case base::trace_event::RECORD_UNTIL_FULL:
832 case base::trace_event::RECORD_AS_MUCH_AS_POSSIBLE:
833 buffer_config->set_fill_policy(
834 perfetto::TraceConfig::BufferConfig::DISCARD);
835 break;
836 case base::trace_event::RECORD_CONTINUOUSLY:
837 buffer_config->set_fill_policy(
838 perfetto::TraceConfig::BufferConfig::RING_BUFFER);
839 break;
840 case base::trace_event::ECHO_TO_CONSOLE:
841 // Handled below.
842 break;
843 }
844
845 // Add the track event data source.
846 // TODO(skyostil): Configure kTraceClockId as the primary trace clock.
847 auto* data_source = perfetto_config.add_data_sources();
848 auto* source_config = data_source->mutable_config();
849 source_config->set_name("track_event");
850 source_config->set_target_buffer(0);
851 auto* source_chrome_config = source_config->mutable_chrome_config();
852 source_chrome_config->set_trace_config(trace_config.ToString());
853 source_chrome_config->set_convert_to_legacy_json(true);
854
855 if (trace_config.GetTraceRecordMode() == base::trace_event::ECHO_TO_CONSOLE) {
856 perfetto::ConsoleInterceptor::Register();
857 source_config->mutable_interceptor_config()->set_name("console");
858 }
859
860 source_config->set_track_event_config_raw(
861 trace_config.ToPerfettoTrackEventConfigRaw(
862 /*privacy_filtering_enabled = */ false));
863
864 if (trace_config.IsCategoryGroupEnabled("disabled-by-default-memory-infra")) {
865 data_source = perfetto_config.add_data_sources();
866 source_config = data_source->mutable_config();
867 source_config->set_name("org.chromium.memory_instrumentation");
868 source_config->set_target_buffer(0);
869 source_chrome_config = source_config->mutable_chrome_config();
870 source_chrome_config->set_trace_config(trace_config.ToString());
871 source_chrome_config->set_convert_to_legacy_json(true);
872 }
873
874 // Clear incremental state every 0.5 seconds, so that we lose at most the
875 // first 0.5 seconds of the trace (if we wrap around Perfetto's central
876 // buffer).
877 // This value strikes balance between minimizing interned data overhead, and
878 // reducing the risk of data loss in ring buffer mode.
879 perfetto_config.mutable_incremental_state_config()->set_clear_period_ms(500);
880
881 SetEnabledImpl(trace_config, perfetto_config);
882 #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
883 // Can't enable tracing when Flush() is in progress.
884 DCHECK(!flush_task_runner_);
885
886 InternalTraceOptions new_options =
887 GetInternalOptionsFromTraceConfig(trace_config);
888
889 InternalTraceOptions old_options = trace_options();
890
891 if (dispatching_to_observers_) {
892 // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
893 DLOG(ERROR)
894 << "Cannot manipulate TraceLog::Enabled state from an observer.";
895 return;
896 }
897
898 // Update trace config for recording.
899 const bool already_recording = enabled_;
900 if (already_recording) {
901 trace_config_.Merge(trace_config);
902 } else {
903 trace_config_ = trace_config;
904 }
905
906 enabled_ = true;
907 UpdateCategoryRegistry();
908
909 // Do not notify observers or create trace buffer if only enabled for
910 // filtering or if recording was already enabled.
911 if (already_recording)
912 return;
913
914 // Discard events if new trace options are different. Reducing trace buffer
915 // size is not supported while already recording, so only replace trace
916 // buffer if we were not already recording.
917 if (new_options != old_options ||
918 (trace_config_.GetTraceBufferSizeInEvents() && !already_recording)) {
919 trace_options_.store(new_options, std::memory_order_relaxed);
920 UseNextTraceBuffer();
921 }
922
923 num_traces_recorded_++;
924
925 UpdateCategoryRegistry();
926
927 dispatching_to_observers_ = true;
928 {
929 // Notify observers outside of the thread events lock, so they can trigger
930 // trace events.
931 AutoUnlock unlock(lock_);
932 AutoLock lock2(observers_lock_);
933 for (EnabledStateObserver* observer : enabled_state_observers_)
934 observer->OnTraceLogEnabled();
935 for (const auto& it : async_observers_) {
936 it.second.task_runner->PostTask(
937 FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
938 it.second.observer));
939 }
940 }
941 dispatching_to_observers_ = false;
942 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
943 }
944
945 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
GetCurrentTrackEventDataSourceConfig() const946 perfetto::DataSourceConfig TraceLog::GetCurrentTrackEventDataSourceConfig()
947 const {
948 AutoLock lock(track_event_lock_);
949 if (track_event_sessions_.empty()) {
950 return perfetto::DataSourceConfig();
951 }
952 return track_event_sessions_[0].config;
953 }
954
InitializePerfettoIfNeeded()955 void TraceLog::InitializePerfettoIfNeeded() {
956 // When we're using the Perfetto client library, only tests should be
957 // recording traces directly through TraceLog. Production code should instead
958 // use perfetto::Tracing::NewTrace(). Let's make sure the tracing service
959 // didn't already initialize Perfetto in this process, because it's not safe
960 // to consume trace data from arbitrary processes through TraceLog as the JSON
961 // conversion here isn't sandboxed like with the real tracing service.
962 //
963 // Note that initializing Perfetto here requires the thread pool to be ready.
964 CHECK(!perfetto::Tracing::IsInitialized() ||
965 g_perfetto_initialized_by_tracelog)
966 << "Don't use TraceLog for recording traces from non-test code. Use "
967 "perfetto::Tracing::NewTrace() instead.";
968
969 if (perfetto::Tracing::IsInitialized())
970 return;
971 g_perfetto_initialized_by_tracelog = true;
972 perfetto::TracingInitArgs init_args;
973 init_args.backends = perfetto::BackendType::kInProcessBackend;
974 init_args.shmem_batch_commits_duration_ms = 1000;
975 init_args.shmem_size_hint_kb = 4 * 1024;
976 init_args.shmem_direct_patching_enabled = true;
977 init_args.disallow_merging_with_system_tracks = true;
978 perfetto::Tracing::Initialize(init_args);
979 TrackEvent::Register();
980 }
981
IsPerfettoInitializedByTraceLog() const982 bool TraceLog::IsPerfettoInitializedByTraceLog() const {
983 return g_perfetto_initialized_by_tracelog;
984 }
985
SetEnabled(const TraceConfig & trace_config,const perfetto::TraceConfig & perfetto_config)986 void TraceLog::SetEnabled(const TraceConfig& trace_config,
987 const perfetto::TraceConfig& perfetto_config) {
988 AutoLock lock(lock_);
989 SetEnabledImpl(trace_config, perfetto_config);
990 }
991
SetEnabledImpl(const TraceConfig & trace_config,const perfetto::TraceConfig & perfetto_config)992 void TraceLog::SetEnabledImpl(const TraceConfig& trace_config,
993 const perfetto::TraceConfig& perfetto_config) {
994 DCHECK(!TrackEvent::IsEnabled());
995 lock_.AssertAcquired();
996 InitializePerfettoIfNeeded();
997 trace_config_ = trace_config;
998 perfetto_config_ = perfetto_config;
999 tracing_session_ = perfetto::Tracing::NewTrace();
1000
1001 AutoUnlock unlock(lock_);
1002 tracing_session_->Setup(perfetto_config);
1003 tracing_session_->StartBlocking();
1004 }
1005 #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1006
SetArgumentFilterPredicate(const ArgumentFilterPredicate & argument_filter_predicate)1007 void TraceLog::SetArgumentFilterPredicate(
1008 const ArgumentFilterPredicate& argument_filter_predicate) {
1009 AutoLock lock(lock_);
1010 DCHECK(!argument_filter_predicate.is_null());
1011 // Replace the existing argument filter.
1012 argument_filter_predicate_ = argument_filter_predicate;
1013 }
1014
GetArgumentFilterPredicate() const1015 ArgumentFilterPredicate TraceLog::GetArgumentFilterPredicate() const {
1016 AutoLock lock(lock_);
1017 return argument_filter_predicate_;
1018 }
1019
SetMetadataFilterPredicate(const MetadataFilterPredicate & metadata_filter_predicate)1020 void TraceLog::SetMetadataFilterPredicate(
1021 const MetadataFilterPredicate& metadata_filter_predicate) {
1022 AutoLock lock(lock_);
1023 DCHECK(!metadata_filter_predicate.is_null());
1024 // Replace the existing argument filter.
1025 metadata_filter_predicate_ = metadata_filter_predicate;
1026 }
1027
GetMetadataFilterPredicate() const1028 MetadataFilterPredicate TraceLog::GetMetadataFilterPredicate() const {
1029 AutoLock lock(lock_);
1030 return metadata_filter_predicate_;
1031 }
1032
SetRecordHostAppPackageName(bool record_host_app_package_name)1033 void TraceLog::SetRecordHostAppPackageName(bool record_host_app_package_name) {
1034 record_host_app_package_name_ = record_host_app_package_name;
1035 }
1036
ShouldRecordHostAppPackageName() const1037 bool TraceLog::ShouldRecordHostAppPackageName() const {
1038 return record_host_app_package_name_;
1039 }
1040
GetInternalOptionsFromTraceConfig(const TraceConfig & config)1041 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
1042 const TraceConfig& config) {
1043 InternalTraceOptions ret = config.IsArgumentFilterEnabled()
1044 ? kInternalEnableArgumentFilter
1045 : kInternalNone;
1046 switch (config.GetTraceRecordMode()) {
1047 case RECORD_UNTIL_FULL:
1048 return ret | kInternalRecordUntilFull;
1049 case RECORD_CONTINUOUSLY:
1050 return ret | kInternalRecordContinuously;
1051 case ECHO_TO_CONSOLE:
1052 return ret | kInternalEchoToConsole;
1053 case RECORD_AS_MUCH_AS_POSSIBLE:
1054 return ret | kInternalRecordAsMuchAsPossible;
1055 }
1056 NOTREACHED();
1057 return kInternalNone;
1058 }
1059
GetCurrentTraceConfig() const1060 TraceConfig TraceLog::GetCurrentTraceConfig() const {
1061 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1062 const auto chrome_config =
1063 GetCurrentTrackEventDataSourceConfig().chrome_config();
1064 return TraceConfig(chrome_config.trace_config());
1065 #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1066 AutoLock lock(lock_);
1067 return trace_config_;
1068 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1069 }
1070
SetDisabled()1071 void TraceLog::SetDisabled() {
1072 AutoLock lock(lock_);
1073 SetDisabledWhileLocked(RECORDING_MODE);
1074 }
1075
SetDisabled(uint8_t modes_to_disable)1076 void TraceLog::SetDisabled(uint8_t modes_to_disable) {
1077 AutoLock lock(lock_);
1078 SetDisabledWhileLocked(modes_to_disable);
1079 }
1080
SetDisabledWhileLocked(uint8_t modes_to_disable)1081 void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
1082 DCHECK(modes_to_disable == RECORDING_MODE);
1083 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1084 if (!tracing_session_)
1085 return;
1086
1087 AddMetadataEventsWhileLocked();
1088 // Remove metadata events so they will not get added to a subsequent trace.
1089 metadata_events_.clear();
1090
1091 TrackEvent::Flush();
1092 // If the current thread has an active task runner, allow nested tasks to run
1093 // while stopping the session. This is needed by some tests, e.g., to allow
1094 // data sources to properly flush themselves.
1095 if (SingleThreadTaskRunner::HasCurrentDefault()) {
1096 RunLoop stop_loop(RunLoop::Type::kNestableTasksAllowed);
1097 auto quit_closure = stop_loop.QuitClosure();
1098 tracing_session_->SetOnStopCallback(
1099 [&quit_closure] { quit_closure.Run(); });
1100 tracing_session_->Stop();
1101 AutoUnlock unlock(lock_);
1102 stop_loop.Run();
1103 } else {
1104 tracing_session_->StopBlocking();
1105 }
1106 #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1107
1108 if (dispatching_to_observers_) {
1109 // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
1110 DLOG(ERROR)
1111 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1112 return;
1113 }
1114
1115 enabled_ = false;
1116 trace_config_.Clear();
1117 UpdateCategoryRegistry();
1118
1119 AddMetadataEventsWhileLocked();
1120
1121 // Remove metadata events so they will not get added to a subsequent trace.
1122 metadata_events_.clear();
1123
1124 dispatching_to_observers_ = true;
1125 {
1126 // Release trace events lock, so observers can trigger trace events.
1127 AutoUnlock unlock(lock_);
1128 AutoLock lock2(observers_lock_);
1129 for (auto* it : enabled_state_observers_)
1130 it->OnTraceLogDisabled();
1131 for (const auto& it : async_observers_) {
1132 it.second.task_runner->PostTask(
1133 FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
1134 it.second.observer));
1135 }
1136 }
1137 dispatching_to_observers_ = false;
1138 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1139 }
1140
GetNumTracesRecorded()1141 int TraceLog::GetNumTracesRecorded() {
1142 AutoLock lock(lock_);
1143 return enabled_ ? num_traces_recorded_ : -1;
1144 }
1145
AddEnabledStateObserver(EnabledStateObserver * listener)1146 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1147 AutoLock lock(observers_lock_);
1148 enabled_state_observers_.push_back(listener);
1149 }
1150
RemoveEnabledStateObserver(EnabledStateObserver * listener)1151 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1152 AutoLock lock(observers_lock_);
1153 enabled_state_observers_.erase(
1154 ranges::remove(enabled_state_observers_, listener),
1155 enabled_state_observers_.end());
1156 }
1157
AddOwnedEnabledStateObserver(std::unique_ptr<EnabledStateObserver> listener)1158 void TraceLog::AddOwnedEnabledStateObserver(
1159 std::unique_ptr<EnabledStateObserver> listener) {
1160 AutoLock lock(observers_lock_);
1161 enabled_state_observers_.push_back(listener.get());
1162 owned_enabled_state_observer_copy_.push_back(std::move(listener));
1163 }
1164
HasEnabledStateObserver(EnabledStateObserver * listener) const1165 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
1166 AutoLock lock(observers_lock_);
1167 return Contains(enabled_state_observers_, listener);
1168 }
1169
AddAsyncEnabledStateObserver(WeakPtr<AsyncEnabledStateObserver> listener)1170 void TraceLog::AddAsyncEnabledStateObserver(
1171 WeakPtr<AsyncEnabledStateObserver> listener) {
1172 AutoLock lock(observers_lock_);
1173 async_observers_.emplace(listener.get(), RegisteredAsyncObserver(listener));
1174 }
1175
RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener)1176 void TraceLog::RemoveAsyncEnabledStateObserver(
1177 AsyncEnabledStateObserver* listener) {
1178 AutoLock lock(observers_lock_);
1179 async_observers_.erase(listener);
1180 }
1181
HasAsyncEnabledStateObserver(AsyncEnabledStateObserver * listener) const1182 bool TraceLog::HasAsyncEnabledStateObserver(
1183 AsyncEnabledStateObserver* listener) const {
1184 AutoLock lock(observers_lock_);
1185 return Contains(async_observers_, listener);
1186 }
1187
AddIncrementalStateObserver(IncrementalStateObserver * listener)1188 void TraceLog::AddIncrementalStateObserver(IncrementalStateObserver* listener) {
1189 AutoLock lock(observers_lock_);
1190 incremental_state_observers_.push_back(listener);
1191 }
1192
RemoveIncrementalStateObserver(IncrementalStateObserver * listener)1193 void TraceLog::RemoveIncrementalStateObserver(
1194 IncrementalStateObserver* listener) {
1195 AutoLock lock(observers_lock_);
1196 incremental_state_observers_.erase(
1197 ranges::remove(incremental_state_observers_, listener),
1198 incremental_state_observers_.end());
1199 }
1200
OnIncrementalStateCleared()1201 void TraceLog::OnIncrementalStateCleared() {
1202 AutoLock lock(observers_lock_);
1203 for (IncrementalStateObserver* observer : incremental_state_observers_)
1204 observer->OnIncrementalStateCleared();
1205 }
1206
GetStatus() const1207 TraceLogStatus TraceLog::GetStatus() const {
1208 AutoLock lock(lock_);
1209 TraceLogStatus result;
1210 result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
1211 result.event_count = static_cast<uint32_t>(logged_events_->Size());
1212 return result;
1213 }
1214
1215 #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
BufferIsFull() const1216 bool TraceLog::BufferIsFull() const {
1217 AutoLock lock(lock_);
1218 return logged_events_->IsFull();
1219 }
1220 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1221
AddEventToThreadSharedChunkWhileLocked(TraceEventHandle * handle,bool check_buffer_is_full)1222 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1223 TraceEventHandle* handle,
1224 bool check_buffer_is_full) {
1225 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1226 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1227 std::move(thread_shared_chunk_));
1228 }
1229
1230 if (!thread_shared_chunk_) {
1231 thread_shared_chunk_ =
1232 logged_events_->GetChunk(&thread_shared_chunk_index_);
1233 if (check_buffer_is_full)
1234 CheckIfBufferIsFullWhileLocked();
1235 }
1236 if (!thread_shared_chunk_)
1237 return nullptr;
1238
1239 size_t event_index;
1240 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
1241 if (trace_event && handle) {
1242 MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
1243 event_index, handle);
1244 }
1245 return trace_event;
1246 }
1247
CheckIfBufferIsFullWhileLocked()1248 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1249 if (logged_events_->IsFull()) {
1250 if (buffer_limit_reached_timestamp_.is_null()) {
1251 buffer_limit_reached_timestamp_ = OffsetNow();
1252 }
1253 SetDisabledWhileLocked(RECORDING_MODE);
1254 }
1255 }
1256
1257 // Flush() works as the following:
1258 // 1. Flush() is called in thread A whose task runner is saved in
1259 // flush_task_runner_;
1260 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
1261 // loop to flush the thread local buffers; otherwise finish the flush;
1262 // 3. FlushCurrentThread() deletes the thread local event buffer:
1263 // - The last batch of events of the thread are flushed into the main buffer;
1264 // - The message loop will be removed from thread_message_loops_;
1265 // If this is the last message loop, finish the flush;
1266 // 4. If any thread hasn't finish its flush in time, finish the flush.
Flush(const TraceLog::OutputCallback & cb,bool use_worker_thread)1267 void TraceLog::Flush(const TraceLog::OutputCallback& cb,
1268 bool use_worker_thread) {
1269 FlushInternal(cb, use_worker_thread, false);
1270 }
1271
CancelTracing(const OutputCallback & cb)1272 void TraceLog::CancelTracing(const OutputCallback& cb) {
1273 SetDisabled();
1274 FlushInternal(cb, false, true);
1275 }
1276
FlushInternal(const TraceLog::OutputCallback & cb,bool use_worker_thread,bool discard_events)1277 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
1278 bool use_worker_thread,
1279 bool discard_events) {
1280 use_worker_thread_ = use_worker_thread;
1281
1282 #if BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
1283 TrackEvent::Flush();
1284
1285 if (!tracing_session_ || discard_events) {
1286 tracing_session_.reset();
1287 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1288 cb.Run(empty_result, /*has_more_events=*/false);
1289 return;
1290 }
1291
1292 bool convert_to_json = true;
1293 for (const auto& data_source : perfetto_config_.data_sources()) {
1294 if (data_source.config().has_chrome_config() &&
1295 data_source.config().chrome_config().has_convert_to_legacy_json()) {
1296 convert_to_json =
1297 data_source.config().chrome_config().convert_to_legacy_json();
1298 break;
1299 }
1300 }
1301
1302 if (convert_to_json) {
1303 perfetto::trace_processor::Config processor_config;
1304 trace_processor_ =
1305 perfetto::trace_processor::TraceProcessorStorage::CreateInstance(
1306 processor_config);
1307 json_output_writer_.reset(new JsonStringOutputWriter(
1308 use_worker_thread ? SingleThreadTaskRunner::GetCurrentDefault()
1309 : nullptr,
1310 cb));
1311 } else {
1312 proto_output_callback_ = std::move(cb);
1313 }
1314
1315 if (use_worker_thread) {
1316 tracing_session_->ReadTrace(
1317 [this](perfetto::TracingSession::ReadTraceCallbackArgs args) {
1318 OnTraceData(args.data, args.size, args.has_more);
1319 });
1320 } else {
1321 auto data = tracing_session_->ReadTraceBlocking();
1322 OnTraceData(data.data(), data.size(), /*has_more=*/false);
1323 }
1324 #elif BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
1325 // Trace processor isn't enabled so we can't convert the resulting trace into
1326 // JSON.
1327 CHECK(false) << "JSON tracing isn't supported";
1328 #else
1329 if (IsEnabled()) {
1330 // Can't flush when tracing is enabled because otherwise PostTask would
1331 // - generate more trace events;
1332 // - deschedule the calling thread on some platforms causing inaccurate
1333 // timing of the trace events.
1334 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1335 if (!cb.is_null())
1336 cb.Run(empty_result, false);
1337 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
1338 return;
1339 }
1340
1341 int gen = generation();
1342 // Copy of thread_task_runners_ to be used without locking.
1343 std::vector<scoped_refptr<SingleThreadTaskRunner>> task_runners;
1344 {
1345 AutoLock lock(lock_);
1346 DCHECK(!flush_task_runner_);
1347 flush_task_runner_ = SequencedTaskRunner::HasCurrentDefault()
1348 ? SequencedTaskRunner::GetCurrentDefault()
1349 : nullptr;
1350 DCHECK(thread_task_runners_.empty() || flush_task_runner_);
1351 flush_output_callback_ = cb;
1352
1353 if (thread_shared_chunk_) {
1354 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1355 std::move(thread_shared_chunk_));
1356 }
1357
1358 for (const auto& it : thread_task_runners_)
1359 task_runners.push_back(it.second);
1360 }
1361
1362 if (!task_runners.empty()) {
1363 for (auto& task_runner : task_runners) {
1364 task_runner->PostTask(
1365 FROM_HERE, BindOnce(&TraceLog::FlushCurrentThread, Unretained(this),
1366 gen, discard_events));
1367 }
1368 flush_task_runner_->PostDelayedTask(
1369 FROM_HERE,
1370 BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
1371 discard_events),
1372 kThreadFlushTimeout);
1373 return;
1374 }
1375
1376 FinishFlush(gen, discard_events);
1377 #endif // BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
1378 }
1379
1380 #if BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
OnTraceData(const char * data,size_t size,bool has_more)1381 void TraceLog::OnTraceData(const char* data, size_t size, bool has_more) {
1382 if (proto_output_callback_) {
1383 scoped_refptr<RefCountedString> chunk = new RefCountedString();
1384 if (size)
1385 chunk->data().assign(data, size);
1386 proto_output_callback_.Run(std::move(chunk), has_more);
1387 if (!has_more) {
1388 proto_output_callback_.Reset();
1389 tracing_session_.reset();
1390 }
1391 return;
1392 }
1393 if (size) {
1394 std::unique_ptr<uint8_t[]> data_copy(new uint8_t[size]);
1395 memcpy(&data_copy[0], data, size);
1396 auto status = trace_processor_->Parse(std::move(data_copy), size);
1397 DCHECK(status.ok()) << status.message();
1398 }
1399 if (has_more)
1400 return;
1401 trace_processor_->NotifyEndOfFile();
1402
1403 auto status = perfetto::trace_processor::json::ExportJson(
1404 trace_processor_.get(), json_output_writer_.get());
1405 DCHECK(status.ok()) << status.message();
1406 trace_processor_.reset();
1407 tracing_session_.reset();
1408 json_output_writer_.reset();
1409 }
1410 #endif // BUILDFLAG(USE_PERFETTO_TRACE_PROCESSOR)
1411
1412 // Usually it runs on a different thread.
ConvertTraceEventsToTraceFormat(std::unique_ptr<TraceBuffer> logged_events,const OutputCallback & flush_output_callback,const ArgumentFilterPredicate & argument_filter_predicate)1413 void TraceLog::ConvertTraceEventsToTraceFormat(
1414 std::unique_ptr<TraceBuffer> logged_events,
1415 const OutputCallback& flush_output_callback,
1416 const ArgumentFilterPredicate& argument_filter_predicate) {
1417 if (flush_output_callback.is_null())
1418 return;
1419
1420 HEAP_PROFILER_SCOPED_IGNORE;
1421 // The callback need to be called at least once even if there is no events
1422 // to let the caller know the completion of flush.
1423 scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
1424 const size_t kReserveCapacity = kTraceEventBufferSizeInBytes * 5 / 4;
1425 json_events_str_ptr->data().reserve(kReserveCapacity);
1426 while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
1427 for (size_t j = 0; j < chunk->size(); ++j) {
1428 size_t size = json_events_str_ptr->size();
1429 if (size > kTraceEventBufferSizeInBytes) {
1430 flush_output_callback.Run(json_events_str_ptr, true);
1431 json_events_str_ptr = new RefCountedString();
1432 json_events_str_ptr->data().reserve(kReserveCapacity);
1433 } else if (size) {
1434 json_events_str_ptr->data().append(",\n");
1435 }
1436 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
1437 argument_filter_predicate);
1438 }
1439 }
1440 flush_output_callback.Run(json_events_str_ptr, false);
1441 }
1442
FinishFlush(int generation,bool discard_events)1443 void TraceLog::FinishFlush(int generation, bool discard_events) {
1444 std::unique_ptr<TraceBuffer> previous_logged_events;
1445 OutputCallback flush_output_callback;
1446 ArgumentFilterPredicate argument_filter_predicate;
1447
1448 if (!CheckGeneration(generation))
1449 return;
1450
1451 {
1452 AutoLock lock(lock_);
1453
1454 previous_logged_events.swap(logged_events_);
1455 UseNextTraceBuffer();
1456 thread_task_runners_.clear();
1457
1458 flush_task_runner_ = nullptr;
1459 flush_output_callback = flush_output_callback_;
1460 flush_output_callback_.Reset();
1461
1462 if (trace_options() & kInternalEnableArgumentFilter) {
1463 // If argument filtering is activated and there is no filtering predicate,
1464 // use the safe default filtering predicate.
1465 if (argument_filter_predicate_.is_null()) {
1466 argument_filter_predicate =
1467 base::BindRepeating(&DefaultIsTraceEventArgsAllowlisted);
1468 } else {
1469 argument_filter_predicate = argument_filter_predicate_;
1470 }
1471 }
1472 }
1473
1474 if (discard_events) {
1475 if (!flush_output_callback.is_null()) {
1476 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1477 flush_output_callback.Run(empty_result, false);
1478 }
1479 return;
1480 }
1481
1482 if (use_worker_thread_) {
1483 base::ThreadPool::PostTask(
1484 FROM_HERE,
1485 {MayBlock(), TaskPriority::BEST_EFFORT,
1486 TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
1487 BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
1488 std::move(previous_logged_events), flush_output_callback,
1489 argument_filter_predicate));
1490 return;
1491 }
1492
1493 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
1494 flush_output_callback,
1495 argument_filter_predicate);
1496 }
1497
1498 // Run in each thread holding a local event buffer.
FlushCurrentThread(int generation,bool discard_events)1499 void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
1500 {
1501 AutoLock lock(lock_);
1502 if (!CheckGeneration(generation) || !flush_task_runner_) {
1503 // This is late. The corresponding flush has finished.
1504 return;
1505 }
1506 }
1507
1508 // This will flush the thread local buffer.
1509 delete thread_local_event_buffer;
1510
1511 auto on_flush_override = on_flush_override_.load(std::memory_order_relaxed);
1512 if (on_flush_override) {
1513 on_flush_override();
1514 }
1515
1516 // Scheduler uses TRACE_EVENT macros when posting a task, which can lead
1517 // to acquiring a tracing lock. Given that posting a task requires grabbing
1518 // a scheduler lock, we need to post this task outside tracing lock to avoid
1519 // deadlocks.
1520 scoped_refptr<SequencedTaskRunner> cached_flush_task_runner;
1521 {
1522 AutoLock lock(lock_);
1523 cached_flush_task_runner = flush_task_runner_;
1524 if (!CheckGeneration(generation) || !flush_task_runner_ ||
1525 !thread_task_runners_.empty())
1526 return;
1527 }
1528 cached_flush_task_runner->PostTask(
1529 FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
1530 discard_events));
1531 }
1532
OnFlushTimeout(int generation,bool discard_events)1533 void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
1534 {
1535 AutoLock lock(lock_);
1536 if (!CheckGeneration(generation) || !flush_task_runner_) {
1537 // Flush has finished before timeout.
1538 return;
1539 }
1540
1541 LOG(WARNING)
1542 << "The following threads haven't finished flush in time. "
1543 "If this happens stably for some thread, please call "
1544 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1545 "the thread to avoid its trace events from being lost.";
1546 for (const auto& it : thread_task_runners_) {
1547 LOG(WARNING) << "Thread: "
1548 << ThreadIdNameManager::GetInstance()->GetName(it.first);
1549 }
1550 }
1551 FinishFlush(generation, discard_events);
1552 }
1553
UseNextTraceBuffer()1554 void TraceLog::UseNextTraceBuffer() {
1555 logged_events_.reset(CreateTraceBuffer());
1556 generation_.fetch_add(1, std::memory_order_relaxed);
1557 thread_shared_chunk_.reset();
1558 thread_shared_chunk_index_ = 0;
1559 }
1560
ShouldAddAfterUpdatingState(char phase,const unsigned char * category_group_enabled,const char * name,uint64_t id,PlatformThreadId thread_id,const TimeTicks timestamp,TraceArguments * args)1561 bool TraceLog::ShouldAddAfterUpdatingState(
1562 char phase,
1563 const unsigned char* category_group_enabled,
1564 const char* name,
1565 uint64_t id,
1566 PlatformThreadId thread_id,
1567 const TimeTicks timestamp,
1568 TraceArguments* args) {
1569 if (!*category_group_enabled)
1570 return false;
1571
1572 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1573 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1574 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1575 if (thread_is_in_trace_event) {
1576 return false;
1577 }
1578
1579 // Check and update the current thread name only if the event is for the
1580 // current thread to avoid locks in most cases.
1581 if (thread_id == PlatformThread::CurrentId()) {
1582 const char* new_name =
1583 ThreadIdNameManager::GetInstance()->GetNameForCurrentThread();
1584 // Check if the thread name has been set or changed since the previous
1585 // call (if any), but don't bother if the new name is empty. Note this will
1586 // not detect a thread name change within the same char* buffer address: we
1587 // favor common case performance over corner case correctness.
1588 thread_local const char* current_thread_name = nullptr;
1589 if (new_name != current_thread_name && new_name && *new_name) {
1590 current_thread_name = new_name;
1591
1592 AutoLock thread_info_lock(thread_info_lock_);
1593
1594 auto existing_name = thread_names_.find(thread_id);
1595 if (existing_name == thread_names_.end()) {
1596 // This is a new thread id, and a new name.
1597 thread_names_[thread_id] = new_name;
1598 } else {
1599 // This is a thread id that we've seen before, but potentially with a
1600 // new name.
1601 std::vector<StringPiece> existing_names = base::SplitStringPiece(
1602 existing_name->second, ",", base::KEEP_WHITESPACE,
1603 base::SPLIT_WANT_NONEMPTY);
1604 if (!Contains(existing_names, new_name)) {
1605 if (!existing_names.empty()) {
1606 existing_name->second.push_back(',');
1607 }
1608 existing_name->second.append(new_name);
1609 }
1610 }
1611 }
1612 }
1613
1614 #if BUILDFLAG(IS_WIN)
1615 // This is done sooner rather than later, to avoid creating the event and
1616 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1617 if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT) {
1618 // ETW export expects non-null event names.
1619 name = name ? name : "";
1620 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
1621 timestamp, args);
1622 }
1623 #endif // BUILDFLAG(IS_WIN)
1624 return true;
1625 }
1626
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,TraceArguments * args,unsigned int flags)1627 TraceEventHandle TraceLog::AddTraceEvent(
1628 char phase,
1629 const unsigned char* category_group_enabled,
1630 const char* name,
1631 const char* scope,
1632 uint64_t id,
1633 TraceArguments* args,
1634 unsigned int flags) {
1635 auto thread_id = base::PlatformThread::CurrentId();
1636 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1637 return AddTraceEventWithThreadIdAndTimestamp(
1638 phase, category_group_enabled, name, scope, id,
1639 trace_event_internal::kNoId, // bind_id
1640 thread_id, now, args, flags);
1641 }
1642
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,TraceArguments * args,unsigned int flags)1643 TraceEventHandle TraceLog::AddTraceEventWithBindId(
1644 char phase,
1645 const unsigned char* category_group_enabled,
1646 const char* name,
1647 const char* scope,
1648 uint64_t id,
1649 uint64_t bind_id,
1650 TraceArguments* args,
1651 unsigned int flags) {
1652 auto thread_id = base::PlatformThread::CurrentId();
1653 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1654 return AddTraceEventWithThreadIdAndTimestamp(
1655 phase, category_group_enabled, name, scope, id, bind_id, thread_id, now,
1656 args, flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
1657 }
1658
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,ProcessId process_id,TraceArguments * args,unsigned int flags)1659 TraceEventHandle TraceLog::AddTraceEventWithProcessId(
1660 char phase,
1661 const unsigned char* category_group_enabled,
1662 const char* name,
1663 const char* scope,
1664 uint64_t id,
1665 ProcessId process_id,
1666 TraceArguments* args,
1667 unsigned int flags) {
1668 base::TimeTicks now = TRACE_TIME_TICKS_NOW();
1669 return AddTraceEventWithThreadIdAndTimestamp(
1670 phase, category_group_enabled, name, scope, id,
1671 trace_event_internal::kNoId, // bind_id
1672 static_cast<PlatformThreadId>(process_id), now, args,
1673 flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
1674 }
1675
1676 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
1677 // with kNoId as bind_id
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,PlatformThreadId thread_id,const TimeTicks & timestamp,TraceArguments * args,unsigned int flags)1678 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1679 char phase,
1680 const unsigned char* category_group_enabled,
1681 const char* name,
1682 const char* scope,
1683 uint64_t id,
1684 PlatformThreadId thread_id,
1685 const TimeTicks& timestamp,
1686 TraceArguments* args,
1687 unsigned int flags) {
1688 return AddTraceEventWithThreadIdAndTimestamp(
1689 phase, category_group_enabled, name, scope, id,
1690 trace_event_internal::kNoId, // bind_id
1691 thread_id, timestamp, args, flags);
1692 }
1693
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,PlatformThreadId thread_id,const TimeTicks & timestamp,TraceArguments * args,unsigned int flags)1694 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1695 char phase,
1696 const unsigned char* category_group_enabled,
1697 const char* name,
1698 const char* scope,
1699 uint64_t id,
1700 uint64_t bind_id,
1701 PlatformThreadId thread_id,
1702 const TimeTicks& timestamp,
1703 TraceArguments* args,
1704 unsigned int flags) {
1705 ThreadTicks thread_now;
1706 // If timestamp is provided explicitly, don't record thread time as it would
1707 // be for the wrong timestamp. Similarly, if we record an event for another
1708 // process or thread, we shouldn't report the current thread's thread time.
1709 if (!(flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ||
1710 flags & TRACE_EVENT_FLAG_HAS_PROCESS_ID ||
1711 thread_id != PlatformThread::CurrentId())) {
1712 thread_now = ThreadNow();
1713 }
1714 return AddTraceEventWithThreadIdAndTimestamps(
1715 phase, category_group_enabled, name, scope, id, bind_id, thread_id,
1716 timestamp, thread_now, args, flags);
1717 }
1718
AddTraceEventWithThreadIdAndTimestamps(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,PlatformThreadId thread_id,const TimeTicks & timestamp,const ThreadTicks & thread_timestamp,TraceArguments * args,unsigned int flags)1719 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamps(
1720 char phase,
1721 const unsigned char* category_group_enabled,
1722 const char* name,
1723 const char* scope,
1724 uint64_t id,
1725 uint64_t bind_id,
1726 PlatformThreadId thread_id,
1727 const TimeTicks& timestamp,
1728 const ThreadTicks& thread_timestamp,
1729 TraceArguments* args,
1730 unsigned int flags) NO_THREAD_SAFETY_ANALYSIS {
1731 TraceEventHandle handle = {0, 0, 0};
1732 if (!ShouldAddAfterUpdatingState(phase, category_group_enabled, name, id,
1733 thread_id, timestamp, args)) {
1734 return handle;
1735 }
1736 DCHECK(!timestamp.is_null());
1737
1738 const AutoReset<bool> resetter(&thread_is_in_trace_event, true, false);
1739
1740 // Flow bind_ids don't have scopes, so we need to mangle in-process ones to
1741 // avoid collisions.
1742 bool has_flow =
1743 flags & (TRACE_EVENT_FLAG_FLOW_OUT | TRACE_EVENT_FLAG_FLOW_IN);
1744 if (has_flow && (flags & TRACE_EVENT_FLAG_HAS_LOCAL_ID))
1745 bind_id = MangleEventId(bind_id);
1746
1747 TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1748
1749 ThreadLocalEventBuffer* event_buffer = nullptr;
1750 if (*category_group_enabled & RECORDING_MODE) {
1751 // |thread_local_event_buffer| can be null if the current thread doesn't
1752 // have a message loop or the message loop is blocked.
1753 InitializeThreadLocalEventBufferIfSupported();
1754 event_buffer = thread_local_event_buffer;
1755 }
1756
1757 if (*category_group_enabled & RECORDING_MODE) {
1758 auto trace_event_override =
1759 add_trace_event_override_.load(std::memory_order_relaxed);
1760 if (trace_event_override) {
1761 TraceEvent new_trace_event(
1762 thread_id, offset_event_timestamp, thread_timestamp, phase,
1763 category_group_enabled, name, scope, id, bind_id, args, flags);
1764
1765 trace_event_override(&new_trace_event,
1766 /*thread_will_flush=*/event_buffer != nullptr,
1767 &handle);
1768 return handle;
1769 }
1770 }
1771
1772 std::string console_message;
1773
1774 // If enabled for recording, the event should be added only if one of the
1775 // filters indicates or category is not enabled for filtering.
1776 if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING)) {
1777 OptionalAutoLock lock(&lock_);
1778
1779 TraceEvent* trace_event = nullptr;
1780 if (event_buffer) {
1781 trace_event = event_buffer->AddTraceEvent(&handle);
1782 } else {
1783 lock.EnsureAcquired();
1784 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1785 }
1786
1787 // NO_THREAD_SAFETY_ANALYSIS: Conditional locking above.
1788 if (trace_event) {
1789 trace_event->Reset(thread_id, offset_event_timestamp, thread_timestamp,
1790 phase, category_group_enabled, name, scope, id,
1791 bind_id, args, flags);
1792 }
1793
1794 if (trace_options() & kInternalEchoToConsole) {
1795 console_message = EventToConsoleMessage(
1796 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1797 timestamp, trace_event);
1798 }
1799 }
1800
1801 if (!console_message.empty())
1802 LOG(ERROR) << console_message;
1803
1804 return handle;
1805 }
1806
AddMetadataEvent(const unsigned char * category_group_enabled,const char * name,TraceArguments * args,unsigned int flags)1807 void TraceLog::AddMetadataEvent(const unsigned char* category_group_enabled,
1808 const char* name,
1809 TraceArguments* args,
1810 unsigned int flags) {
1811 HEAP_PROFILER_SCOPED_IGNORE;
1812 auto thread_id = base::PlatformThread::CurrentId();
1813 ThreadTicks thread_now = ThreadNow();
1814 TimeTicks now = OffsetNow();
1815 AutoLock lock(lock_);
1816 auto trace_event = std::make_unique<TraceEvent>(
1817 thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
1818 category_group_enabled, name,
1819 trace_event_internal::kGlobalScope, // scope
1820 trace_event_internal::kNoId, // id
1821 trace_event_internal::kNoId, // bind_id
1822 args, flags);
1823 metadata_events_.push_back(std::move(trace_event));
1824 }
1825
1826 // May be called when a COMPELETE event ends and the unfinished event has been
1827 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
EventToConsoleMessage(char phase,const TimeTicks & timestamp,TraceEvent * trace_event)1828 std::string TraceLog::EventToConsoleMessage(char phase,
1829 const TimeTicks& timestamp,
1830 TraceEvent* trace_event) {
1831 HEAP_PROFILER_SCOPED_IGNORE;
1832 AutoLock thread_info_lock(thread_info_lock_);
1833
1834 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1835 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1836 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1837
1838 TimeDelta duration;
1839 auto thread_id =
1840 trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
1841 if (phase == TRACE_EVENT_PHASE_END) {
1842 duration = timestamp - thread_event_start_times_[thread_id].top();
1843 thread_event_start_times_[thread_id].pop();
1844 }
1845
1846 std::string thread_name = thread_names_[thread_id];
1847 if (thread_colors_.find(thread_name) == thread_colors_.end()) {
1848 size_t next_color = (thread_colors_.size() % 6) + 1;
1849 thread_colors_[thread_name] = next_color;
1850 }
1851
1852 std::ostringstream log;
1853 log << base::StringPrintf("%s: \x1b[0;3%" PRIuS "m", thread_name.c_str(),
1854 thread_colors_[thread_name]);
1855
1856 size_t depth = 0;
1857 auto it = thread_event_start_times_.find(thread_id);
1858 if (it != thread_event_start_times_.end())
1859 depth = it->second.size();
1860
1861 for (size_t i = 0; i < depth; ++i)
1862 log << "| ";
1863
1864 if (trace_event)
1865 trace_event->AppendPrettyPrinted(&log);
1866 if (phase == TRACE_EVENT_PHASE_END)
1867 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1868
1869 log << "\x1b[0;m";
1870
1871 if (phase == TRACE_EVENT_PHASE_BEGIN)
1872 thread_event_start_times_[thread_id].push(timestamp);
1873
1874 return log.str();
1875 }
1876
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle)1877 void TraceLog::UpdateTraceEventDuration(
1878 const unsigned char* category_group_enabled,
1879 const char* name,
1880 TraceEventHandle handle) {
1881 if (!*category_group_enabled)
1882 return;
1883
1884 UpdateTraceEventDurationExplicit(
1885 category_group_enabled, name, handle, base::PlatformThread::CurrentId(),
1886 /*explicit_timestamps=*/false, OffsetNow(), ThreadNow());
1887 }
1888
UpdateTraceEventDurationExplicit(const unsigned char * category_group_enabled,const char * name,TraceEventHandle handle,PlatformThreadId thread_id,bool explicit_timestamps,const TimeTicks & now,const ThreadTicks & thread_now)1889 void TraceLog::UpdateTraceEventDurationExplicit(
1890 const unsigned char* category_group_enabled,
1891 const char* name,
1892 TraceEventHandle handle,
1893 PlatformThreadId thread_id,
1894 bool explicit_timestamps,
1895 const TimeTicks& now,
1896 const ThreadTicks& thread_now) {
1897 if (!*category_group_enabled)
1898 return;
1899
1900 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1901 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1902 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1903 if (thread_is_in_trace_event) {
1904 return;
1905 }
1906 const AutoReset<bool> resetter(&thread_is_in_trace_event, true);
1907
1908 #if BUILDFLAG(IS_WIN)
1909 // Generate an ETW event that marks the end of a complete event.
1910 if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
1911 TraceEventETWExport::AddCompleteEndEvent(category_group_enabled, name);
1912 #endif // BUILDFLAG(IS_WIN)
1913
1914 if (*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) {
1915 auto update_duration_override =
1916 update_duration_override_.load(std::memory_order_relaxed);
1917 if (update_duration_override) {
1918 update_duration_override(category_group_enabled, name, handle, thread_id,
1919 explicit_timestamps, now, thread_now);
1920 return;
1921 }
1922 }
1923
1924 std::string console_message;
1925 if (*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) {
1926 OptionalAutoLock lock(&lock_);
1927
1928 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1929 if (trace_event) {
1930 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1931
1932 trace_event->UpdateDuration(now, thread_now);
1933 }
1934
1935 if (trace_options() & kInternalEchoToConsole) {
1936 console_message =
1937 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
1938 }
1939 }
1940
1941 if (!console_message.empty())
1942 LOG(ERROR) << console_message;
1943 }
1944
MangleEventId(uint64_t id)1945 uint64_t TraceLog::MangleEventId(uint64_t id) {
1946 return id ^ process_id_hash_;
1947 }
1948
1949 template <typename T>
AddMetadataEventWhileLocked(PlatformThreadId thread_id,const char * metadata_name,const char * arg_name,const T & value)1950 void TraceLog::AddMetadataEventWhileLocked(PlatformThreadId thread_id,
1951 const char* metadata_name,
1952 const char* arg_name,
1953 const T& value) {
1954 auto trace_event_override =
1955 add_trace_event_override_.load(std::memory_order_relaxed);
1956 if (trace_event_override) {
1957 TraceEvent trace_event;
1958 InitializeMetadataEvent(&trace_event, thread_id, metadata_name, arg_name,
1959 value);
1960 trace_event_override(&trace_event, /*thread_will_flush=*/true, nullptr);
1961 } else {
1962 InitializeMetadataEvent(
1963 AddEventToThreadSharedChunkWhileLocked(nullptr, false), thread_id,
1964 metadata_name, arg_name, value);
1965 }
1966 }
1967
AddMetadataEventsWhileLocked()1968 void TraceLog::AddMetadataEventsWhileLocked() {
1969 auto trace_event_override =
1970 add_trace_event_override_.load(std::memory_order_relaxed);
1971
1972 // Move metadata added by |AddMetadataEvent| into the trace log.
1973 if (trace_event_override) {
1974 while (!metadata_events_.empty()) {
1975 trace_event_override(metadata_events_.back().get(),
1976 /*thread_will_flush=*/true, nullptr);
1977 metadata_events_.pop_back();
1978 }
1979 } else {
1980 while (!metadata_events_.empty()) {
1981 TraceEvent* event =
1982 AddEventToThreadSharedChunkWhileLocked(nullptr, false);
1983 *event = std::move(*metadata_events_.back());
1984 metadata_events_.pop_back();
1985 }
1986 }
1987
1988 #if !BUILDFLAG(IS_NACL) // NaCl shouldn't expose the process id.
1989 AddMetadataEventWhileLocked(0, "num_cpus", "number",
1990 base::SysInfo::NumberOfProcessors());
1991 #endif
1992
1993 auto current_thread_id = base::PlatformThread::CurrentId();
1994 if (process_sort_index_ != 0) {
1995 AddMetadataEventWhileLocked(current_thread_id, "process_sort_index",
1996 "sort_index", process_sort_index_);
1997 }
1998
1999 #if BUILDFLAG(IS_ANDROID)
2000 AddMetadataEventWhileLocked(current_thread_id, "chrome_library_address",
2001 "start_address",
2002 base::StringPrintf("%p", &__executable_start));
2003 base::debug::ElfBuildIdBuffer build_id;
2004 size_t build_id_length =
2005 base::debug::ReadElfBuildId(&__executable_start, true, build_id);
2006 if (build_id_length > 0) {
2007 AddMetadataEventWhileLocked(current_thread_id, "chrome_library_module",
2008 "id", std::string(build_id));
2009 }
2010 #endif
2011
2012 if (!process_labels_.empty()) {
2013 std::vector<base::StringPiece> labels;
2014 for (const auto& it : process_labels_)
2015 labels.push_back(it.second);
2016 AddMetadataEventWhileLocked(current_thread_id, "process_labels", "labels",
2017 base::JoinString(labels, ","));
2018 }
2019
2020 // Thread sort indices.
2021 for (const auto& it : thread_sort_indices_) {
2022 if (it.second == 0)
2023 continue;
2024 AddMetadataEventWhileLocked(it.first, "thread_sort_index", "sort_index",
2025 it.second);
2026 }
2027
2028 // If buffer is full, add a metadata record to report this.
2029 if (!buffer_limit_reached_timestamp_.is_null()) {
2030 AddMetadataEventWhileLocked(current_thread_id, "trace_buffer_overflowed",
2031 "overflowed_at_ts",
2032 buffer_limit_reached_timestamp_);
2033 }
2034 }
2035
GetEventByHandle(TraceEventHandle handle)2036 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
2037 return GetEventByHandleInternal(handle, nullptr);
2038 }
2039
GetEventByHandleInternal(TraceEventHandle handle,OptionalAutoLock * lock)2040 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
2041 OptionalAutoLock* lock)
2042 NO_THREAD_SAFETY_ANALYSIS {
2043 if (!handle.chunk_seq)
2044 return nullptr;
2045
2046 DCHECK(handle.chunk_seq);
2047 DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
2048 DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
2049
2050 if (thread_local_event_buffer) {
2051 TraceEvent* trace_event =
2052 thread_local_event_buffer->GetEventByHandle(handle);
2053 if (trace_event)
2054 return trace_event;
2055 }
2056
2057 // The event has been out-of-control of the thread local buffer.
2058 // Try to get the event from the main buffer with a lock.
2059 // NO_THREAD_SAFETY_ANALYSIS: runtime-dependent locking here.
2060 if (lock)
2061 lock->EnsureAcquired();
2062
2063 if (thread_shared_chunk_ &&
2064 handle.chunk_index == thread_shared_chunk_index_) {
2065 return handle.chunk_seq == thread_shared_chunk_->seq()
2066 ? thread_shared_chunk_->GetEventAt(handle.event_index)
2067 : nullptr;
2068 }
2069
2070 return logged_events_->GetEventByHandle(handle);
2071 }
2072
SetProcessID(ProcessId process_id)2073 void TraceLog::SetProcessID(ProcessId process_id) {
2074 process_id_ = process_id;
2075 // Create a FNV hash from the process ID for XORing.
2076 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2077 const uint64_t kOffsetBasis = 14695981039346656037ull;
2078 const uint64_t kFnvPrime = 1099511628211ull;
2079 const uint64_t pid = static_cast<uint64_t>(process_id_);
2080 process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
2081 }
2082
SetProcessSortIndex(int sort_index)2083 void TraceLog::SetProcessSortIndex(int sort_index) {
2084 AutoLock lock(lock_);
2085 process_sort_index_ = sort_index;
2086 }
2087
OnSetProcessName(const std::string & process_name)2088 void TraceLog::OnSetProcessName(const std::string& process_name) {
2089 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2090 if (perfetto::Tracing::IsInitialized()) {
2091 auto track = perfetto::ProcessTrack::Current();
2092 auto desc = track.Serialize();
2093 desc.mutable_process()->set_process_name(process_name);
2094 desc.mutable_process()->set_pid(static_cast<int>(process_id_));
2095 TrackEvent::SetTrackDescriptor(track, std::move(desc));
2096 }
2097 #endif
2098 }
2099
UpdateProcessLabel(int label_id,const std::string & current_label)2100 void TraceLog::UpdateProcessLabel(int label_id,
2101 const std::string& current_label) {
2102 if (!current_label.length())
2103 return RemoveProcessLabel(label_id);
2104
2105 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2106 if (perfetto::Tracing::IsInitialized()) {
2107 auto track = perfetto::ProcessTrack::Current();
2108 auto desc = track.Serialize();
2109 desc.mutable_process()->add_process_labels(current_label);
2110 TrackEvent::SetTrackDescriptor(track, std::move(desc));
2111 }
2112 #endif
2113
2114 AutoLock lock(lock_);
2115 process_labels_[label_id] = current_label;
2116 }
2117
RemoveProcessLabel(int label_id)2118 void TraceLog::RemoveProcessLabel(int label_id) {
2119 AutoLock lock(lock_);
2120 process_labels_.erase(label_id);
2121 }
2122
SetThreadSortIndex(PlatformThreadId thread_id,int sort_index)2123 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
2124 AutoLock lock(lock_);
2125 thread_sort_indices_[thread_id] = sort_index;
2126 }
2127
2128 #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
SetTimeOffset(TimeDelta offset)2129 void TraceLog::SetTimeOffset(TimeDelta offset) {
2130 time_offset_ = offset;
2131 }
2132 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2133
GetObserverCountForTest() const2134 size_t TraceLog::GetObserverCountForTest() const {
2135 AutoLock lock(observers_lock_);
2136 return enabled_state_observers_.size();
2137 }
2138
SetCurrentThreadBlocksMessageLoop()2139 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2140 thread_blocks_message_loop = true;
2141 // This will flush the thread local buffer.
2142 delete thread_local_event_buffer;
2143 }
2144
CreateTraceBuffer()2145 TraceBuffer* TraceLog::CreateTraceBuffer() {
2146 HEAP_PROFILER_SCOPED_IGNORE;
2147 InternalTraceOptions options = trace_options();
2148 const size_t config_buffer_chunks =
2149 trace_config_.GetTraceBufferSizeInEvents() / kTraceBufferChunkSize;
2150 if (options & kInternalRecordContinuously) {
2151 return TraceBuffer::CreateTraceBufferRingBuffer(
2152 config_buffer_chunks > 0 ? config_buffer_chunks
2153 : kTraceEventRingBufferChunks);
2154 }
2155 if (options & kInternalEchoToConsole) {
2156 return TraceBuffer::CreateTraceBufferRingBuffer(
2157 config_buffer_chunks > 0 ? config_buffer_chunks
2158 : kEchoToConsoleTraceEventBufferChunks);
2159 }
2160 if (options & kInternalRecordAsMuchAsPossible) {
2161 return TraceBuffer::CreateTraceBufferVectorOfSize(
2162 config_buffer_chunks > 0 ? config_buffer_chunks
2163 : kTraceEventVectorBigBufferChunks);
2164 }
2165 return TraceBuffer::CreateTraceBufferVectorOfSize(
2166 config_buffer_chunks > 0 ? config_buffer_chunks
2167 : kTraceEventVectorBufferChunks);
2168 }
2169
2170 #if BUILDFLAG(IS_WIN)
UpdateETWCategoryGroupEnabledFlags()2171 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
2172 // Go through each category and set/clear the ETW bit depending on whether the
2173 // category is enabled.
2174 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
2175 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
2176 category.name())) {
2177 category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
2178 } else {
2179 category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
2180 }
2181 }
2182 }
2183 #endif // BUILDFLAG(IS_WIN)
2184
SetTraceBufferForTesting(std::unique_ptr<TraceBuffer> trace_buffer)2185 void TraceLog::SetTraceBufferForTesting(
2186 std::unique_ptr<TraceBuffer> trace_buffer) {
2187 AutoLock lock(lock_);
2188 logged_events_ = std::move(trace_buffer);
2189 }
2190
2191 #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
OnSetup(const perfetto::DataSourceBase::SetupArgs & args)2192 void TraceLog::OnSetup(const perfetto::DataSourceBase::SetupArgs& args) {
2193 AutoLock lock(track_event_lock_);
2194 track_event_sessions_.emplace_back(args.internal_instance_index, *args.config,
2195 args.backend_type);
2196 }
2197
OnStart(const perfetto::DataSourceBase::StartArgs &)2198 void TraceLog::OnStart(const perfetto::DataSourceBase::StartArgs&) {
2199 ++active_track_event_sessions_;
2200 // Legacy observers don't support multiple tracing sessions. So we only
2201 // notify them about the first one.
2202 if (active_track_event_sessions_ > 1) {
2203 return;
2204 }
2205
2206 AutoLock lock(observers_lock_);
2207 for (EnabledStateObserver* observer : enabled_state_observers_)
2208 observer->OnTraceLogEnabled();
2209 for (const auto& it : async_observers_) {
2210 it.second.task_runner->PostTask(
2211 FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
2212 it.second.observer));
2213 }
2214 }
2215
OnStop(const perfetto::DataSourceBase::StopArgs & args)2216 void TraceLog::OnStop(const perfetto::DataSourceBase::StopArgs& args) {
2217 {
2218 // We can't use |lock_| because OnStop() can be called from within
2219 // SetDisabled(). We also can't use |observers_lock_|, because observers
2220 // below can call into IsEnabled(), which needs to access
2221 // |track_event_sessions_|. So we use a separate lock.
2222 AutoLock track_event_lock(track_event_lock_);
2223 std::erase_if(track_event_sessions_, [&args](
2224 const TrackEventSession& session) {
2225 return session.internal_instance_index == args.internal_instance_index;
2226 });
2227 }
2228
2229 --active_track_event_sessions_;
2230 // Legacy observers don't support multiple tracing sessions. So we only
2231 // notify them when the last one stopped.
2232 if (active_track_event_sessions_ > 0) {
2233 return;
2234 }
2235
2236 AutoLock lock(observers_lock_);
2237 for (auto* it : enabled_state_observers_)
2238 it->OnTraceLogDisabled();
2239 for (const auto& it : async_observers_) {
2240 it.second.task_runner->PostTask(
2241 FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
2242 it.second.observer));
2243 }
2244 }
2245 #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2246
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)2247 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
2248 TraceEventMemoryOverhead* overhead) {
2249 overhead->Add(TraceEventMemoryOverhead::kConvertableToTraceFormat,
2250 sizeof(*this));
2251 }
2252
2253 } // namespace trace_event
2254 } // namespace base
2255
2256 namespace trace_event_internal {
2257
AddTraceEvent(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::trace_event::TraceArguments * args,unsigned int flags)2258 base::trace_event::TraceEventHandle AddTraceEvent(
2259 char phase,
2260 const unsigned char* category_group_enabled,
2261 const char* name,
2262 const char* scope,
2263 uint64_t id,
2264 base::trace_event::TraceArguments* args,
2265 unsigned int flags) {
2266 return base::trace_event::TraceLog::GetInstance()->AddTraceEvent(
2267 phase, category_group_enabled, name, scope, id, args, flags);
2268 }
2269
AddTraceEventWithBindId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,base::trace_event::TraceArguments * args,unsigned int flags)2270 base::trace_event::TraceEventHandle AddTraceEventWithBindId(
2271 char phase,
2272 const unsigned char* category_group_enabled,
2273 const char* name,
2274 const char* scope,
2275 uint64_t id,
2276 uint64_t bind_id,
2277 base::trace_event::TraceArguments* args,
2278 unsigned int flags) {
2279 return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithBindId(
2280 phase, category_group_enabled, name, scope, id, bind_id, args, flags);
2281 }
2282
AddTraceEventWithProcessId(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::ProcessId process_id,base::trace_event::TraceArguments * args,unsigned int flags)2283 base::trace_event::TraceEventHandle AddTraceEventWithProcessId(
2284 char phase,
2285 const unsigned char* category_group_enabled,
2286 const char* name,
2287 const char* scope,
2288 uint64_t id,
2289 base::ProcessId process_id,
2290 base::trace_event::TraceArguments* args,
2291 unsigned int flags) {
2292 return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithProcessId(
2293 phase, category_group_enabled, name, scope, id, process_id, args, flags);
2294 }
2295
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::PlatformThreadId thread_id,const base::TimeTicks & timestamp,base::trace_event::TraceArguments * args,unsigned int flags)2296 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
2297 char phase,
2298 const unsigned char* category_group_enabled,
2299 const char* name,
2300 const char* scope,
2301 uint64_t id,
2302 base::PlatformThreadId thread_id,
2303 const base::TimeTicks& timestamp,
2304 base::trace_event::TraceArguments* args,
2305 unsigned int flags) {
2306 return base::trace_event::TraceLog::GetInstance()
2307 ->AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
2308 name, scope, id, thread_id,
2309 timestamp, args, flags);
2310 }
2311
AddTraceEventWithThreadIdAndTimestamp(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,uint64_t bind_id,base::PlatformThreadId thread_id,const base::TimeTicks & timestamp,base::trace_event::TraceArguments * args,unsigned int flags)2312 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
2313 char phase,
2314 const unsigned char* category_group_enabled,
2315 const char* name,
2316 const char* scope,
2317 uint64_t id,
2318 uint64_t bind_id,
2319 base::PlatformThreadId thread_id,
2320 const base::TimeTicks& timestamp,
2321 base::trace_event::TraceArguments* args,
2322 unsigned int flags) {
2323 return base::trace_event::TraceLog::GetInstance()
2324 ->AddTraceEventWithThreadIdAndTimestamp(
2325 phase, category_group_enabled, name, scope, id, bind_id, thread_id,
2326 timestamp, args, flags);
2327 }
2328
AddTraceEventWithThreadIdAndTimestamps(char phase,const unsigned char * category_group_enabled,const char * name,const char * scope,uint64_t id,base::PlatformThreadId thread_id,const base::TimeTicks & timestamp,const base::ThreadTicks & thread_timestamp,unsigned int flags)2329 base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamps(
2330 char phase,
2331 const unsigned char* category_group_enabled,
2332 const char* name,
2333 const char* scope,
2334 uint64_t id,
2335 base::PlatformThreadId thread_id,
2336 const base::TimeTicks& timestamp,
2337 const base::ThreadTicks& thread_timestamp,
2338 unsigned int flags) {
2339 return base::trace_event::TraceLog::GetInstance()
2340 ->AddTraceEventWithThreadIdAndTimestamps(
2341 phase, category_group_enabled, name, scope, id,
2342 /*bind_id=*/trace_event_internal::kNoId, thread_id, timestamp,
2343 thread_timestamp, nullptr, flags);
2344 }
2345
AddMetadataEvent(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceArguments * args,unsigned int flags)2346 void AddMetadataEvent(const unsigned char* category_group_enabled,
2347 const char* name,
2348 base::trace_event::TraceArguments* args,
2349 unsigned int flags) {
2350 return base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
2351 category_group_enabled, name, args, flags);
2352 }
2353
GetNumTracesRecorded()2354 int GetNumTracesRecorded() {
2355 return base::trace_event::TraceLog::GetInstance()->GetNumTracesRecorded();
2356 }
2357
UpdateTraceEventDuration(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceEventHandle handle)2358 void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
2359 const char* name,
2360 base::trace_event::TraceEventHandle handle) {
2361 return base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDuration(
2362 category_group_enabled, name, handle);
2363 }
2364
UpdateTraceEventDurationExplicit(const unsigned char * category_group_enabled,const char * name,base::trace_event::TraceEventHandle handle,base::PlatformThreadId thread_id,bool explicit_timestamps,const base::TimeTicks & now,const base::ThreadTicks & thread_now)2365 void UpdateTraceEventDurationExplicit(
2366 const unsigned char* category_group_enabled,
2367 const char* name,
2368 base::trace_event::TraceEventHandle handle,
2369 base::PlatformThreadId thread_id,
2370 bool explicit_timestamps,
2371 const base::TimeTicks& now,
2372 const base::ThreadTicks& thread_now) {
2373 return base::trace_event::TraceLog::GetInstance()
2374 ->UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
2375 thread_id, explicit_timestamps, now,
2376 thread_now);
2377 }
2378
2379 #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
ScopedTraceBinaryEfficient(const char * category_group,const char * name)2380 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2381 const char* category_group,
2382 const char* name) {
2383 // The single atom works because for now the category_group can only be "gpu".
2384 DCHECK_EQ(strcmp(category_group, "gpu"), 0);
2385 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2386 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2387 category_group, atomic, category_group_enabled_);
2388 name_ = name;
2389 if (*category_group_enabled_) {
2390 event_handle_ =
2391 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2392 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2393 trace_event_internal::kGlobalScope, // scope
2394 trace_event_internal::kNoId, // id
2395 base::PlatformThread::CurrentId(), // thread_id
2396 TRACE_TIME_TICKS_NOW(), nullptr, TRACE_EVENT_FLAG_NONE);
2397 }
2398 }
2399
~ScopedTraceBinaryEfficient()2400 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2401 if (*category_group_enabled_) {
2402 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
2403 event_handle_);
2404 }
2405 }
2406 #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
2407
2408 } // namespace trace_event_internal
2409