1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_ 7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_ 8 9 #include <stack> 10 #include <string> 11 #include <vector> 12 13 #include "base/atomicops.h" 14 #include "base/callback.h" 15 #include "base/containers/hash_tables.h" 16 #include "base/gtest_prod_util.h" 17 #include "base/memory/ref_counted_memory.h" 18 #include "base/memory/scoped_vector.h" 19 #include "base/observer_list.h" 20 #include "base/strings/string_util.h" 21 #include "base/synchronization/condition_variable.h" 22 #include "base/synchronization/lock.h" 23 #include "base/threading/thread.h" 24 #include "base/threading/thread_local.h" 25 #include "base/timer/timer.h" 26 27 // Older style trace macros with explicit id and extra data 28 // Only these macros result in publishing data to ETW as currently implemented. 29 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \ 30 base::debug::TraceLog::AddTraceEventEtw( \ 31 TRACE_EVENT_PHASE_BEGIN, \ 32 name, reinterpret_cast<const void*>(id), extra) 33 34 #define TRACE_EVENT_END_ETW(name, id, extra) \ 35 base::debug::TraceLog::AddTraceEventEtw( \ 36 TRACE_EVENT_PHASE_END, \ 37 name, reinterpret_cast<const void*>(id), extra) 38 39 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \ 40 base::debug::TraceLog::AddTraceEventEtw( \ 41 TRACE_EVENT_PHASE_INSTANT, \ 42 name, reinterpret_cast<const void*>(id), extra) 43 44 template <typename Type> 45 struct DefaultSingletonTraits; 46 47 #if defined(COMPILER_GCC) 48 namespace BASE_HASH_NAMESPACE { 49 template <> 50 struct hash<base::MessageLoop*> { 51 std::size_t operator()(base::MessageLoop* value) const { 52 return reinterpret_cast<std::size_t>(value); 53 } 54 }; 55 } // BASE_HASH_NAMESPACE 56 #endif 57 58 namespace base { 59 60 class WaitableEvent; 61 class MessageLoop; 62 63 namespace debug { 64 65 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided 66 // class must implement this interface. 67 class ConvertableToTraceFormat : public RefCounted<ConvertableToTraceFormat> { 68 public: 69 // Append the class info to the provided |out| string. The appended 70 // data must be a valid JSON object. Strings must be properly quoted, and 71 // escaped. There is no processing applied to the content after it is 72 // appended. 73 virtual void AppendAsTraceFormat(std::string* out) const = 0; 74 75 protected: 76 virtual ~ConvertableToTraceFormat() {} 77 78 private: 79 friend class RefCounted<ConvertableToTraceFormat>; 80 }; 81 82 struct TraceEventHandle { 83 uint32 chunk_seq; 84 uint16 chunk_index; 85 uint16 event_index; 86 }; 87 88 const int kTraceMaxNumArgs = 2; 89 90 class BASE_EXPORT TraceEvent { 91 public: 92 union TraceValue { 93 bool as_bool; 94 unsigned long long as_uint; 95 long long as_int; 96 double as_double; 97 const void* as_pointer; 98 const char* as_string; 99 }; 100 101 TraceEvent(); 102 ~TraceEvent(); 103 104 // We don't need to copy TraceEvent except when TraceEventBuffer is cloned. 105 // Use explicit copy method to avoid accidentally misuse of copy. 106 void CopyFrom(const TraceEvent& other); 107 108 void Initialize( 109 int thread_id, 110 TimeTicks timestamp, 111 TimeTicks thread_timestamp, 112 char phase, 113 const unsigned char* category_group_enabled, 114 const char* name, 115 unsigned long long id, 116 int num_args, 117 const char** arg_names, 118 const unsigned char* arg_types, 119 const unsigned long long* arg_values, 120 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 121 unsigned char flags); 122 123 void Reset(); 124 125 void UpdateDuration(const TimeTicks& now, const TimeTicks& thread_now); 126 127 // Serialize event data to JSON 128 void AppendAsJSON(std::string* out) const; 129 void AppendPrettyPrinted(std::ostringstream* out) const; 130 131 static void AppendValueAsJSON(unsigned char type, 132 TraceValue value, 133 std::string* out); 134 135 TimeTicks timestamp() const { return timestamp_; } 136 TimeTicks thread_timestamp() const { return thread_timestamp_; } 137 char phase() const { return phase_; } 138 int thread_id() const { return thread_id_; } 139 TimeDelta duration() const { return duration_; } 140 TimeDelta thread_duration() const { return thread_duration_; } 141 unsigned long long id() const { return id_; } 142 unsigned char flags() const { return flags_; } 143 144 // Exposed for unittesting: 145 146 const base::RefCountedString* parameter_copy_storage() const { 147 return parameter_copy_storage_.get(); 148 } 149 150 const unsigned char* category_group_enabled() const { 151 return category_group_enabled_; 152 } 153 154 const char* name() const { return name_; } 155 156 #if defined(OS_ANDROID) 157 void SendToATrace(); 158 #endif 159 160 private: 161 // Note: these are ordered by size (largest first) for optimal packing. 162 TimeTicks timestamp_; 163 TimeTicks thread_timestamp_; 164 TimeDelta duration_; 165 TimeDelta thread_duration_; 166 // id_ can be used to store phase-specific data. 167 unsigned long long id_; 168 TraceValue arg_values_[kTraceMaxNumArgs]; 169 const char* arg_names_[kTraceMaxNumArgs]; 170 scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs]; 171 const unsigned char* category_group_enabled_; 172 const char* name_; 173 scoped_refptr<base::RefCountedString> parameter_copy_storage_; 174 int thread_id_; 175 char phase_; 176 unsigned char flags_; 177 unsigned char arg_types_[kTraceMaxNumArgs]; 178 179 DISALLOW_COPY_AND_ASSIGN(TraceEvent); 180 }; 181 182 // TraceBufferChunk is the basic unit of TraceBuffer. 183 class BASE_EXPORT TraceBufferChunk { 184 public: 185 TraceBufferChunk(uint32 seq) 186 : next_free_(0), 187 seq_(seq) { 188 } 189 190 void Reset(uint32 new_seq); 191 TraceEvent* AddTraceEvent(size_t* event_index); 192 bool IsFull() const { return next_free_ == kTraceBufferChunkSize; } 193 194 uint32 seq() const { return seq_; } 195 size_t capacity() const { return kTraceBufferChunkSize; } 196 size_t size() const { return next_free_; } 197 198 TraceEvent* GetEventAt(size_t index) { 199 DCHECK(index < size()); 200 return &chunk_[index]; 201 } 202 const TraceEvent* GetEventAt(size_t index) const { 203 DCHECK(index < size()); 204 return &chunk_[index]; 205 } 206 207 scoped_ptr<TraceBufferChunk> Clone() const; 208 209 static const size_t kTraceBufferChunkSize = 64; 210 211 private: 212 size_t next_free_; 213 TraceEvent chunk_[kTraceBufferChunkSize]; 214 uint32 seq_; 215 }; 216 217 // TraceBuffer holds the events as they are collected. 218 class BASE_EXPORT TraceBuffer { 219 public: 220 virtual ~TraceBuffer() {} 221 222 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0; 223 virtual void ReturnChunk(size_t index, 224 scoped_ptr<TraceBufferChunk> chunk) = 0; 225 226 virtual bool IsFull() const = 0; 227 virtual size_t Size() const = 0; 228 virtual size_t Capacity() const = 0; 229 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0; 230 231 // For iteration. Each TraceBuffer can only be iterated once. 232 virtual const TraceBufferChunk* NextChunk() = 0; 233 234 virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0; 235 }; 236 237 // TraceResultBuffer collects and converts trace fragments returned by TraceLog 238 // to JSON output. 239 class BASE_EXPORT TraceResultBuffer { 240 public: 241 typedef base::Callback<void(const std::string&)> OutputCallback; 242 243 // If you don't need to stream JSON chunks out efficiently, and just want to 244 // get a complete JSON string after calling Finish, use this struct to collect 245 // JSON trace output. 246 struct BASE_EXPORT SimpleOutput { 247 OutputCallback GetCallback(); 248 void Append(const std::string& json_string); 249 250 // Do what you want with the json_output_ string after calling 251 // TraceResultBuffer::Finish. 252 std::string json_output; 253 }; 254 255 TraceResultBuffer(); 256 ~TraceResultBuffer(); 257 258 // Set callback. The callback will be called during Start with the initial 259 // JSON output and during AddFragment and Finish with following JSON output 260 // chunks. The callback target must live past the last calls to 261 // TraceResultBuffer::Start/AddFragment/Finish. 262 void SetOutputCallback(const OutputCallback& json_chunk_callback); 263 264 // Start JSON output. This resets all internal state, so you can reuse 265 // the TraceResultBuffer by calling Start. 266 void Start(); 267 268 // Call AddFragment 0 or more times to add trace fragments from TraceLog. 269 void AddFragment(const std::string& trace_fragment); 270 271 // When all fragments have been added, call Finish to complete the JSON 272 // formatted output. 273 void Finish(); 274 275 private: 276 OutputCallback output_callback_; 277 bool append_comma_; 278 }; 279 280 class BASE_EXPORT CategoryFilter { 281 public: 282 typedef std::vector<std::string> StringList; 283 284 // The default category filter, used when none is provided. 285 // Allows all categories through, except if they end in the suffix 'Debug' or 286 // 'Test'. 287 static const char* kDefaultCategoryFilterString; 288 289 // |filter_string| is a comma-delimited list of category wildcards. 290 // A category can have an optional '-' prefix to make it an excluded category. 291 // All the same rules apply above, so for example, having both included and 292 // excluded categories in the same list would not be supported. 293 // 294 // Example: CategoryFilter"test_MyTest*"); 295 // Example: CategoryFilter("test_MyTest*,test_OtherStuff"); 296 // Example: CategoryFilter("-excluded_category1,-excluded_category2"); 297 // Example: CategoryFilter("-*,webkit"); would disable everything but webkit. 298 // Example: CategoryFilter("-webkit"); would enable everything but webkit. 299 // 300 // Category filters can also be used to configure synthetic delays. 301 // 302 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap 303 // buffers always take at least 16 ms. 304 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would 305 // make swap buffers take at least 16 ms the first time it is 306 // called. 307 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)"); 308 // would make swap buffers take at least 16 ms every other time it 309 // is called. 310 explicit CategoryFilter(const std::string& filter_string); 311 312 CategoryFilter(const CategoryFilter& cf); 313 314 ~CategoryFilter(); 315 316 CategoryFilter& operator=(const CategoryFilter& rhs); 317 318 // Writes the string representation of the CategoryFilter. This is a comma 319 // separated string, similar in nature to the one used to determine 320 // enabled/disabled category patterns, except here there is an arbitrary 321 // order, included categories go first, then excluded categories. Excluded 322 // categories are distinguished from included categories by the prefix '-'. 323 std::string ToString() const; 324 325 // Determines whether category group would be enabled or 326 // disabled by this category filter. 327 bool IsCategoryGroupEnabled(const char* category_group) const; 328 329 // Return a list of the synthetic delays specified in this category filter. 330 const StringList& GetSyntheticDelayValues() const; 331 332 // Merges nested_filter with the current CategoryFilter 333 void Merge(const CategoryFilter& nested_filter); 334 335 // Clears both included/excluded pattern lists. This would be equivalent to 336 // creating a CategoryFilter with an empty string, through the constructor. 337 // i.e: CategoryFilter(""). 338 // 339 // When using an empty filter, all categories are considered included as we 340 // are not excluding anything. 341 void Clear(); 342 343 private: 344 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, CategoryFilter); 345 346 static bool IsEmptyOrContainsLeadingOrTrailingWhitespace( 347 const std::string& str); 348 349 void Initialize(const std::string& filter_string); 350 void WriteString(const StringList& values, 351 std::string* out, 352 bool included) const; 353 void WriteString(const StringList& delays, std::string* out) const; 354 bool HasIncludedPatterns() const; 355 356 bool DoesCategoryGroupContainCategory(const char* category_group, 357 const char* category) const; 358 359 StringList included_; 360 StringList disabled_; 361 StringList excluded_; 362 StringList delays_; 363 }; 364 365 class TraceSamplingThread; 366 367 class BASE_EXPORT TraceLog { 368 public: 369 enum Mode { 370 DISABLED = 0, 371 RECORDING_MODE, 372 MONITORING_MODE, 373 }; 374 375 // Options determines how the trace buffer stores data. 376 enum Options { 377 // Record until the trace buffer is full. 378 RECORD_UNTIL_FULL = 1 << 0, 379 380 // Record until the user ends the trace. The trace buffer is a fixed size 381 // and we use it as a ring buffer during recording. 382 RECORD_CONTINUOUSLY = 1 << 1, 383 384 // Enable the sampling profiler in the recording mode. 385 ENABLE_SAMPLING = 1 << 2, 386 387 // Echo to console. Events are discarded. 388 ECHO_TO_CONSOLE = 1 << 3, 389 }; 390 391 // The pointer returned from GetCategoryGroupEnabledInternal() points to a 392 // value with zero or more of the following bits. Used in this class only. 393 // The TRACE_EVENT macros should only use the value as a bool. 394 // These values must be in sync with macro values in TraceEvent.h in Blink. 395 enum CategoryGroupEnabledFlags { 396 // Category group enabled for the recording mode. 397 ENABLED_FOR_RECORDING = 1 << 0, 398 // Category group enabled for the monitoring mode. 399 ENABLED_FOR_MONITORING = 1 << 1, 400 // Category group enabled by SetEventCallbackEnabled(). 401 ENABLED_FOR_EVENT_CALLBACK = 1 << 2, 402 }; 403 404 static TraceLog* GetInstance(); 405 406 // Get set of known category groups. This can change as new code paths are 407 // reached. The known category groups are inserted into |category_groups|. 408 void GetKnownCategoryGroups(std::vector<std::string>* category_groups); 409 410 // Retrieves a copy (for thread-safety) of the current CategoryFilter. 411 CategoryFilter GetCurrentCategoryFilter(); 412 413 Options trace_options() const { 414 return static_cast<Options>(subtle::NoBarrier_Load(&trace_options_)); 415 } 416 417 // Enables normal tracing (recording trace events in the trace buffer). 418 // See CategoryFilter comments for details on how to control what categories 419 // will be traced. If tracing has already been enabled, |category_filter| will 420 // be merged into the current category filter. 421 void SetEnabled(const CategoryFilter& category_filter, 422 Mode mode, Options options); 423 424 // Disables normal tracing for all categories. 425 void SetDisabled(); 426 427 bool IsEnabled() { return mode_ != DISABLED; } 428 429 // The number of times we have begun recording traces. If tracing is off, 430 // returns -1. If tracing is on, then it returns the number of times we have 431 // recorded a trace. By watching for this number to increment, you can 432 // passively discover when a new trace has begun. This is then used to 433 // implement the TRACE_EVENT_IS_NEW_TRACE() primitive. 434 int GetNumTracesRecorded(); 435 436 #if defined(OS_ANDROID) 437 void StartATrace(); 438 void StopATrace(); 439 void AddClockSyncMetadataEvent(); 440 #endif 441 442 // Enabled state listeners give a callback when tracing is enabled or 443 // disabled. This can be used to tie into other library's tracing systems 444 // on-demand. 445 class EnabledStateObserver { 446 public: 447 // Called just after the tracing system becomes enabled, outside of the 448 // |lock_|. TraceLog::IsEnabled() is true at this point. 449 virtual void OnTraceLogEnabled() = 0; 450 451 // Called just after the tracing system disables, outside of the |lock_|. 452 // TraceLog::IsEnabled() is false at this point. 453 virtual void OnTraceLogDisabled() = 0; 454 }; 455 void AddEnabledStateObserver(EnabledStateObserver* listener); 456 void RemoveEnabledStateObserver(EnabledStateObserver* listener); 457 bool HasEnabledStateObserver(EnabledStateObserver* listener) const; 458 459 float GetBufferPercentFull() const; 460 bool BufferIsFull() const; 461 462 // Not using base::Callback because of its limited by 7 parameters. 463 // Also, using primitive type allows directly passing callback from WebCore. 464 // WARNING: It is possible for the previously set callback to be called 465 // after a call to SetEventCallbackEnabled() that replaces or a call to 466 // SetEventCallbackDisabled() that disables the callback. 467 // This callback may be invoked on any thread. 468 // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs 469 // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the 470 // interface simple. 471 typedef void (*EventCallback)(TimeTicks timestamp, 472 char phase, 473 const unsigned char* category_group_enabled, 474 const char* name, 475 unsigned long long id, 476 int num_args, 477 const char* const arg_names[], 478 const unsigned char arg_types[], 479 const unsigned long long arg_values[], 480 unsigned char flags); 481 482 // Enable tracing for EventCallback. 483 void SetEventCallbackEnabled(const CategoryFilter& category_filter, 484 EventCallback cb); 485 void SetEventCallbackDisabled(); 486 487 // Flush all collected events to the given output callback. The callback will 488 // be called one or more times either synchronously or asynchronously from 489 // the current thread with IPC-bite-size chunks. The string format is 490 // undefined. Use TraceResultBuffer to convert one or more trace strings to 491 // JSON. The callback can be null if the caller doesn't want any data. 492 // Due to the implementation of thread-local buffers, flush can't be 493 // done when tracing is enabled. If called when tracing is enabled, the 494 // callback will be called directly with (empty_string, false) to indicate 495 // the end of this unsuccessful flush. 496 typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&, 497 bool has_more_events)> OutputCallback; 498 void Flush(const OutputCallback& cb); 499 void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback); 500 501 // Called by TRACE_EVENT* macros, don't call this directly. 502 // The name parameter is a category group for example: 503 // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent") 504 static const unsigned char* GetCategoryGroupEnabled(const char* name); 505 static const char* GetCategoryGroupName( 506 const unsigned char* category_group_enabled); 507 508 // Called by TRACE_EVENT* macros, don't call this directly. 509 // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied 510 // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above. 511 TraceEventHandle AddTraceEvent( 512 char phase, 513 const unsigned char* category_group_enabled, 514 const char* name, 515 unsigned long long id, 516 int num_args, 517 const char** arg_names, 518 const unsigned char* arg_types, 519 const unsigned long long* arg_values, 520 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 521 unsigned char flags); 522 TraceEventHandle AddTraceEventWithThreadIdAndTimestamp( 523 char phase, 524 const unsigned char* category_group_enabled, 525 const char* name, 526 unsigned long long id, 527 int thread_id, 528 const TimeTicks& timestamp, 529 int num_args, 530 const char** arg_names, 531 const unsigned char* arg_types, 532 const unsigned long long* arg_values, 533 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 534 unsigned char flags); 535 static void AddTraceEventEtw(char phase, 536 const char* category_group, 537 const void* id, 538 const char* extra); 539 static void AddTraceEventEtw(char phase, 540 const char* category_group, 541 const void* id, 542 const std::string& extra); 543 544 void UpdateTraceEventDuration(const unsigned char* category_group_enabled, 545 const char* name, 546 TraceEventHandle handle); 547 548 // For every matching event, the callback will be called. 549 typedef base::Callback<void()> WatchEventCallback; 550 void SetWatchEvent(const std::string& category_name, 551 const std::string& event_name, 552 const WatchEventCallback& callback); 553 // Cancel the watch event. If tracing is enabled, this may race with the 554 // watch event notification firing. 555 void CancelWatchEvent(); 556 557 int process_id() const { return process_id_; } 558 559 // Exposed for unittesting: 560 561 void WaitSamplingEventForTesting(); 562 563 // Allows deleting our singleton instance. 564 static void DeleteForTesting(); 565 566 // Allow tests to inspect TraceEvents. 567 size_t GetEventsSize() const { return logged_events_->Size(); } 568 TraceEvent* GetEventByHandle(TraceEventHandle handle); 569 570 void SetProcessID(int process_id); 571 572 // Process sort indices, if set, override the order of a process will appear 573 // relative to other processes in the trace viewer. Processes are sorted first 574 // on their sort index, ascending, then by their name, and then tid. 575 void SetProcessSortIndex(int sort_index); 576 577 // Sets the name of the process. 578 void SetProcessName(const std::string& process_name); 579 580 // Processes can have labels in addition to their names. Use labels, for 581 // instance, to list out the web page titles that a process is handling. 582 void UpdateProcessLabel(int label_id, const std::string& current_label); 583 void RemoveProcessLabel(int label_id); 584 585 // Thread sort indices, if set, override the order of a thread will appear 586 // within its process in the trace viewer. Threads are sorted first on their 587 // sort index, ascending, then by their name, and then tid. 588 void SetThreadSortIndex(PlatformThreadId , int sort_index); 589 590 // Allow setting an offset between the current TimeTicks time and the time 591 // that should be reported. 592 void SetTimeOffset(TimeDelta offset); 593 594 size_t GetObserverCountForTest() const; 595 596 // Call this method if the current thread may block the message loop to 597 // prevent the thread from using the thread-local buffer because the thread 598 // may not handle the flush request in time causing lost of unflushed events. 599 void SetCurrentThreadBlocksMessageLoop(); 600 601 private: 602 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, 603 TraceBufferRingBufferGetReturnChunk); 604 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, 605 TraceBufferRingBufferHalfIteration); 606 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, 607 TraceBufferRingBufferFullIteration); 608 609 // This allows constructor and destructor to be private and usable only 610 // by the Singleton class. 611 friend struct DefaultSingletonTraits<TraceLog>; 612 613 // Enable/disable each category group based on the current mode_, 614 // category_filter_, event_callback_ and event_callback_category_filter_. 615 // Enable the category group in the enabled mode if category_filter_ matches 616 // the category group, or event_callback_ is not null and 617 // event_callback_category_filter_ matches the category group. 618 void UpdateCategoryGroupEnabledFlags(); 619 void UpdateCategoryGroupEnabledFlag(int category_index); 620 621 // Configure synthetic delays based on the values set in the current 622 // category filter. 623 void UpdateSyntheticDelaysFromCategoryFilter(); 624 625 class ThreadLocalEventBuffer; 626 class OptionalAutoLock; 627 628 TraceLog(); 629 ~TraceLog(); 630 const unsigned char* GetCategoryGroupEnabledInternal(const char* name); 631 void AddMetadataEventsWhileLocked(); 632 633 TraceBuffer* trace_buffer() const { return logged_events_.get(); } 634 TraceBuffer* CreateTraceBuffer(); 635 636 std::string EventToConsoleMessage(unsigned char phase, 637 const TimeTicks& timestamp, 638 TraceEvent* trace_event); 639 640 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle, 641 bool check_buffer_is_full); 642 void CheckIfBufferIsFullWhileLocked(); 643 void SetDisabledWhileLocked(); 644 645 TraceEvent* GetEventByHandleInternal(TraceEventHandle handle, 646 OptionalAutoLock* lock); 647 648 // |generation| is used in the following callbacks to check if the callback 649 // is called for the flush of the current |logged_events_|. 650 void FlushCurrentThread(int generation); 651 void ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events, 652 const TraceLog::OutputCallback& flush_output_callback); 653 void FinishFlush(int generation); 654 void OnFlushTimeout(int generation); 655 656 int generation() const { 657 return static_cast<int>(subtle::NoBarrier_Load(&generation_)); 658 } 659 bool CheckGeneration(int generation) const { 660 return generation == this->generation(); 661 } 662 void UseNextTraceBuffer(); 663 664 TimeTicks OffsetNow() const { 665 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime()); 666 } 667 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const { 668 return timestamp - time_offset_; 669 } 670 671 // This lock protects TraceLog member accesses (except for members protected 672 // by thread_info_lock_) from arbitrary threads. 673 mutable Lock lock_; 674 // This lock protects accesses to thread_names_, thread_event_start_times_ 675 // and thread_colors_. 676 Lock thread_info_lock_; 677 int locked_line_; 678 Mode mode_; 679 int num_traces_recorded_; 680 scoped_ptr<TraceBuffer> logged_events_; 681 subtle::AtomicWord /* EventCallback */ event_callback_; 682 bool dispatching_to_observer_list_; 683 std::vector<EnabledStateObserver*> enabled_state_observer_list_; 684 685 std::string process_name_; 686 base::hash_map<int, std::string> process_labels_; 687 int process_sort_index_; 688 base::hash_map<int, int> thread_sort_indices_; 689 base::hash_map<int, std::string> thread_names_; 690 691 // The following two maps are used only when ECHO_TO_CONSOLE. 692 base::hash_map<int, std::stack<TimeTicks> > thread_event_start_times_; 693 base::hash_map<std::string, int> thread_colors_; 694 695 // XORed with TraceID to make it unlikely to collide with other processes. 696 unsigned long long process_id_hash_; 697 698 int process_id_; 699 700 TimeDelta time_offset_; 701 702 // Allow tests to wake up when certain events occur. 703 WatchEventCallback watch_event_callback_; 704 subtle::AtomicWord /* const unsigned char* */ watch_category_; 705 std::string watch_event_name_; 706 707 subtle::AtomicWord /* Options */ trace_options_; 708 709 // Sampling thread handles. 710 scoped_ptr<TraceSamplingThread> sampling_thread_; 711 PlatformThreadHandle sampling_thread_handle_; 712 713 CategoryFilter category_filter_; 714 CategoryFilter event_callback_category_filter_; 715 716 ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_; 717 ThreadLocalBoolean thread_blocks_message_loop_; 718 ThreadLocalBoolean thread_is_in_trace_event_; 719 720 // Contains the message loops of threads that have had at least one event 721 // added into the local event buffer. Not using MessageLoopProxy because we 722 // need to know the life time of the message loops. 723 hash_set<MessageLoop*> thread_message_loops_; 724 725 // For events which can't be added into the thread local buffer, e.g. events 726 // from threads without a message loop. 727 scoped_ptr<TraceBufferChunk> thread_shared_chunk_; 728 size_t thread_shared_chunk_index_; 729 730 // Set when asynchronous Flush is in progress. 731 OutputCallback flush_output_callback_; 732 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_; 733 subtle::AtomicWord generation_; 734 735 DISALLOW_COPY_AND_ASSIGN(TraceLog); 736 }; 737 738 } // namespace debug 739 } // namespace base 740 741 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_ 742