• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 
6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_
7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_
8 
9 #include <stack>
10 #include <string>
11 #include <vector>
12 
13 #include "base/atomicops.h"
14 #include "base/base_export.h"
15 #include "base/callback.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/gtest_prod_util.h"
18 #include "base/memory/ref_counted_memory.h"
19 #include "base/memory/scoped_vector.h"
20 #include "base/observer_list.h"
21 #include "base/strings/string_util.h"
22 #include "base/synchronization/condition_variable.h"
23 #include "base/synchronization/lock.h"
24 #include "base/threading/thread.h"
25 #include "base/threading/thread_local.h"
26 #include "base/timer/timer.h"
27 
28 // Older style trace macros with explicit id and extra data
29 // Only these macros result in publishing data to ETW as currently implemented.
30 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
31     base::debug::TraceLog::AddTraceEventEtw( \
32         TRACE_EVENT_PHASE_BEGIN, \
33         name, reinterpret_cast<const void*>(id), extra)
34 
35 #define TRACE_EVENT_END_ETW(name, id, extra) \
36     base::debug::TraceLog::AddTraceEventEtw( \
37         TRACE_EVENT_PHASE_END, \
38         name, reinterpret_cast<const void*>(id), extra)
39 
40 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
41     base::debug::TraceLog::AddTraceEventEtw( \
42         TRACE_EVENT_PHASE_INSTANT, \
43         name, reinterpret_cast<const void*>(id), extra)
44 
45 template <typename Type>
46 struct DefaultSingletonTraits;
47 
48 #if defined(COMPILER_GCC)
49 namespace BASE_HASH_NAMESPACE {
50 template <>
51 struct hash<base::MessageLoop*> {
52   std::size_t operator()(base::MessageLoop* value) const {
53     return reinterpret_cast<std::size_t>(value);
54   }
55 };
56 }  // BASE_HASH_NAMESPACE
57 #endif
58 
59 namespace base {
60 
61 class WaitableEvent;
62 class MessageLoop;
63 
64 namespace debug {
65 
66 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
67 // class must implement this interface.
68 class BASE_EXPORT ConvertableToTraceFormat
69     : public RefCounted<ConvertableToTraceFormat> {
70  public:
71   // Append the class info to the provided |out| string. The appended
72   // data must be a valid JSON object. Strings must be properly quoted, and
73   // escaped. There is no processing applied to the content after it is
74   // appended.
75   virtual void AppendAsTraceFormat(std::string* out) const = 0;
76 
77   std::string ToString() const {
78     std::string result;
79     AppendAsTraceFormat(&result);
80     return result;
81   }
82 
83  protected:
84   virtual ~ConvertableToTraceFormat() {}
85 
86  private:
87   friend class RefCounted<ConvertableToTraceFormat>;
88 };
89 
90 struct TraceEventHandle {
91   uint32 chunk_seq;
92   uint16 chunk_index;
93   uint16 event_index;
94 };
95 
96 const int kTraceMaxNumArgs = 2;
97 
98 class BASE_EXPORT TraceEvent {
99  public:
100   union TraceValue {
101     bool as_bool;
102     unsigned long long as_uint;
103     long long as_int;
104     double as_double;
105     const void* as_pointer;
106     const char* as_string;
107   };
108 
109   TraceEvent();
110   ~TraceEvent();
111 
112   // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
113   // Use explicit copy method to avoid accidentally misuse of copy.
114   void CopyFrom(const TraceEvent& other);
115 
116   void Initialize(
117       int thread_id,
118       TimeTicks timestamp,
119       TimeTicks thread_timestamp,
120       char phase,
121       const unsigned char* category_group_enabled,
122       const char* name,
123       unsigned long long id,
124       int num_args,
125       const char** arg_names,
126       const unsigned char* arg_types,
127       const unsigned long long* arg_values,
128       const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
129       unsigned char flags);
130 
131   void Reset();
132 
133   void UpdateDuration(const TimeTicks& now, const TimeTicks& thread_now);
134 
135   // Serialize event data to JSON
136   void AppendAsJSON(std::string* out) const;
137   void AppendPrettyPrinted(std::ostringstream* out) const;
138 
139   static void AppendValueAsJSON(unsigned char type,
140                                 TraceValue value,
141                                 std::string* out);
142 
143   TimeTicks timestamp() const { return timestamp_; }
144   TimeTicks thread_timestamp() const { return thread_timestamp_; }
145   char phase() const { return phase_; }
146   int thread_id() const { return thread_id_; }
147   TimeDelta duration() const { return duration_; }
148   TimeDelta thread_duration() const { return thread_duration_; }
149   unsigned long long id() const { return id_; }
150   unsigned char flags() const { return flags_; }
151 
152   // Exposed for unittesting:
153 
154   const base::RefCountedString* parameter_copy_storage() const {
155     return parameter_copy_storage_.get();
156   }
157 
158   const unsigned char* category_group_enabled() const {
159     return category_group_enabled_;
160   }
161 
162   const char* name() const { return name_; }
163 
164 #if defined(OS_ANDROID)
165   void SendToATrace();
166 #endif
167 
168  private:
169   // Note: these are ordered by size (largest first) for optimal packing.
170   TimeTicks timestamp_;
171   TimeTicks thread_timestamp_;
172   TimeDelta duration_;
173   TimeDelta thread_duration_;
174   // id_ can be used to store phase-specific data.
175   unsigned long long id_;
176   TraceValue arg_values_[kTraceMaxNumArgs];
177   const char* arg_names_[kTraceMaxNumArgs];
178   scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
179   const unsigned char* category_group_enabled_;
180   const char* name_;
181   scoped_refptr<base::RefCountedString> parameter_copy_storage_;
182   int thread_id_;
183   char phase_;
184   unsigned char flags_;
185   unsigned char arg_types_[kTraceMaxNumArgs];
186 
187   DISALLOW_COPY_AND_ASSIGN(TraceEvent);
188 };
189 
190 // TraceBufferChunk is the basic unit of TraceBuffer.
191 class BASE_EXPORT TraceBufferChunk {
192  public:
193   TraceBufferChunk(uint32 seq)
194       : next_free_(0),
195         seq_(seq) {
196   }
197 
198   void Reset(uint32 new_seq);
199   TraceEvent* AddTraceEvent(size_t* event_index);
200   bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
201 
202   uint32 seq() const { return seq_; }
203   size_t capacity() const { return kTraceBufferChunkSize; }
204   size_t size() const { return next_free_; }
205 
206   TraceEvent* GetEventAt(size_t index) {
207     DCHECK(index < size());
208     return &chunk_[index];
209   }
210   const TraceEvent* GetEventAt(size_t index) const {
211     DCHECK(index < size());
212     return &chunk_[index];
213   }
214 
215   scoped_ptr<TraceBufferChunk> Clone() const;
216 
217   static const size_t kTraceBufferChunkSize = 64;
218 
219  private:
220   size_t next_free_;
221   TraceEvent chunk_[kTraceBufferChunkSize];
222   uint32 seq_;
223 };
224 
225 // TraceBuffer holds the events as they are collected.
226 class BASE_EXPORT TraceBuffer {
227  public:
228   virtual ~TraceBuffer() {}
229 
230   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
231   virtual void ReturnChunk(size_t index,
232                            scoped_ptr<TraceBufferChunk> chunk) = 0;
233 
234   virtual bool IsFull() const = 0;
235   virtual size_t Size() const = 0;
236   virtual size_t Capacity() const = 0;
237   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
238 
239   // For iteration. Each TraceBuffer can only be iterated once.
240   virtual const TraceBufferChunk* NextChunk() = 0;
241 
242   virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
243 };
244 
245 // TraceResultBuffer collects and converts trace fragments returned by TraceLog
246 // to JSON output.
247 class BASE_EXPORT TraceResultBuffer {
248  public:
249   typedef base::Callback<void(const std::string&)> OutputCallback;
250 
251   // If you don't need to stream JSON chunks out efficiently, and just want to
252   // get a complete JSON string after calling Finish, use this struct to collect
253   // JSON trace output.
254   struct BASE_EXPORT SimpleOutput {
255     OutputCallback GetCallback();
256     void Append(const std::string& json_string);
257 
258     // Do what you want with the json_output_ string after calling
259     // TraceResultBuffer::Finish.
260     std::string json_output;
261   };
262 
263   TraceResultBuffer();
264   ~TraceResultBuffer();
265 
266   // Set callback. The callback will be called during Start with the initial
267   // JSON output and during AddFragment and Finish with following JSON output
268   // chunks. The callback target must live past the last calls to
269   // TraceResultBuffer::Start/AddFragment/Finish.
270   void SetOutputCallback(const OutputCallback& json_chunk_callback);
271 
272   // Start JSON output. This resets all internal state, so you can reuse
273   // the TraceResultBuffer by calling Start.
274   void Start();
275 
276   // Call AddFragment 0 or more times to add trace fragments from TraceLog.
277   void AddFragment(const std::string& trace_fragment);
278 
279   // When all fragments have been added, call Finish to complete the JSON
280   // formatted output.
281   void Finish();
282 
283  private:
284   OutputCallback output_callback_;
285   bool append_comma_;
286 };
287 
288 class BASE_EXPORT CategoryFilter {
289  public:
290   typedef std::vector<std::string> StringList;
291 
292   // The default category filter, used when none is provided.
293   // Allows all categories through, except if they end in the suffix 'Debug' or
294   // 'Test'.
295   static const char* kDefaultCategoryFilterString;
296 
297   // |filter_string| is a comma-delimited list of category wildcards.
298   // A category can have an optional '-' prefix to make it an excluded category.
299   // All the same rules apply above, so for example, having both included and
300   // excluded categories in the same list would not be supported.
301   //
302   // Example: CategoryFilter"test_MyTest*");
303   // Example: CategoryFilter("test_MyTest*,test_OtherStuff");
304   // Example: CategoryFilter("-excluded_category1,-excluded_category2");
305   // Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
306   // Example: CategoryFilter("-webkit"); would enable everything but webkit.
307   //
308   // Category filters can also be used to configure synthetic delays.
309   //
310   // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap
311   //          buffers always take at least 16 ms.
312   // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would
313   //          make swap buffers take at least 16 ms the first time it is
314   //          called.
315   // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)");
316   //          would make swap buffers take at least 16 ms every other time it
317   //          is called.
318   explicit CategoryFilter(const std::string& filter_string);
319 
320   CategoryFilter();
321 
322   CategoryFilter(const CategoryFilter& cf);
323 
324   ~CategoryFilter();
325 
326   CategoryFilter& operator=(const CategoryFilter& rhs);
327 
328   // Writes the string representation of the CategoryFilter. This is a comma
329   // separated string, similar in nature to the one used to determine
330   // enabled/disabled category patterns, except here there is an arbitrary
331   // order, included categories go first, then excluded categories. Excluded
332   // categories are distinguished from included categories by the prefix '-'.
333   std::string ToString() const;
334 
335   // Determines whether category group would be enabled or
336   // disabled by this category filter.
337   bool IsCategoryGroupEnabled(const char* category_group) const;
338 
339   // Return a list of the synthetic delays specified in this category filter.
340   const StringList& GetSyntheticDelayValues() const;
341 
342   // Merges nested_filter with the current CategoryFilter
343   void Merge(const CategoryFilter& nested_filter);
344 
345   // Clears both included/excluded pattern lists. This would be equivalent to
346   // creating a CategoryFilter with an empty string, through the constructor.
347   // i.e: CategoryFilter().
348   //
349   // When using an empty filter, all categories are considered included as we
350   // are not excluding anything.
351   void Clear();
352 
353  private:
354   FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, CategoryFilter);
355 
356   static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
357       const std::string& str);
358 
359   void Initialize(const std::string& filter_string);
360   void WriteString(const StringList& values,
361                    std::string* out,
362                    bool included) const;
363   void WriteString(const StringList& delays, std::string* out) const;
364   bool HasIncludedPatterns() const;
365 
366   bool DoesCategoryGroupContainCategory(const char* category_group,
367                                         const char* category) const;
368 
369   StringList included_;
370   StringList disabled_;
371   StringList excluded_;
372   StringList delays_;
373 };
374 
375 class TraceSamplingThread;
376 
377 // Options determines how the trace buffer stores data.
378 enum TraceRecordMode {
379   // Record until the trace buffer is full.
380   RECORD_UNTIL_FULL,
381 
382   // Record until the user ends the trace. The trace buffer is a fixed size
383   // and we use it as a ring buffer during recording.
384   RECORD_CONTINUOUSLY,
385 
386   // Echo to console. Events are discarded.
387   ECHO_TO_CONSOLE,
388 
389   // Record until the trace buffer is full, but with a huge buffer size.
390   RECORD_AS_MUCH_AS_POSSIBLE
391 };
392 
393 struct BASE_EXPORT TraceOptions {
394 
395   TraceOptions()
396       : record_mode(RECORD_UNTIL_FULL),
397         enable_sampling(false),
398         enable_systrace(false) {}
399 
400   TraceOptions(TraceRecordMode record_mode)
401       : record_mode(record_mode),
402         enable_sampling(false),
403         enable_systrace(false) {}
404 
405   // |options_string| is a comma-delimited list of trace options.
406   // Possible options are: "record-until-full", "record-continuously",
407   // "trace-to-console", "enable-sampling" and "enable-systrace".
408   // The first 3 options are trace recoding modes and hence
409   // mutually exclusive. If more than one trace recording modes appear in the
410   // options_string, the last one takes precedence. If none of the trace
411   // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
412   //
413   // The trace option will first be reset to the default option
414   // (record_mode set to RECORD_UNTIL_FULL, enable_sampling and enable_systrace
415   // set to false) before options parsed from |options_string| are applied on
416   // it.
417   // If |options_string| is invalid, the final state of trace_options is
418   // undefined.
419   //
420   // Example: trace_options.SetFromString("record-until-full")
421   // Example: trace_options.SetFromString(
422   //              "record-continuously, enable-sampling")
423   // Example: trace_options.SetFromString("record-until-full, trace-to-console")
424   // will set ECHO_TO_CONSOLE as the recording mode.
425   //
426   // Returns true on success.
427   bool SetFromString(const std::string& options_string);
428 
429   std::string ToString() const;
430 
431   TraceRecordMode record_mode;
432   bool enable_sampling;
433   bool enable_systrace;
434 };
435 
436 class BASE_EXPORT TraceLog {
437  public:
438   enum Mode {
439     DISABLED = 0,
440     RECORDING_MODE,
441     MONITORING_MODE,
442   };
443 
444   // The pointer returned from GetCategoryGroupEnabledInternal() points to a
445   // value with zero or more of the following bits. Used in this class only.
446   // The TRACE_EVENT macros should only use the value as a bool.
447   // These values must be in sync with macro values in TraceEvent.h in Blink.
448   enum CategoryGroupEnabledFlags {
449     // Category group enabled for the recording mode.
450     ENABLED_FOR_RECORDING = 1 << 0,
451     // Category group enabled for the monitoring mode.
452     ENABLED_FOR_MONITORING = 1 << 1,
453     // Category group enabled by SetEventCallbackEnabled().
454     ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
455   };
456 
457   static TraceLog* GetInstance();
458 
459   // Get set of known category groups. This can change as new code paths are
460   // reached. The known category groups are inserted into |category_groups|.
461   void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
462 
463   // Retrieves a copy (for thread-safety) of the current CategoryFilter.
464   CategoryFilter GetCurrentCategoryFilter();
465 
466   // Retrieves a copy (for thread-safety) of the current TraceOptions.
467   TraceOptions GetCurrentTraceOptions() const;
468 
469   // Enables normal tracing (recording trace events in the trace buffer).
470   // See CategoryFilter comments for details on how to control what categories
471   // will be traced. If tracing has already been enabled, |category_filter| will
472   // be merged into the current category filter.
473   void SetEnabled(const CategoryFilter& category_filter,
474                   Mode mode, const TraceOptions& options);
475 
476   // Disables normal tracing for all categories.
477   void SetDisabled();
478 
479   bool IsEnabled() { return mode_ != DISABLED; }
480 
481   // The number of times we have begun recording traces. If tracing is off,
482   // returns -1. If tracing is on, then it returns the number of times we have
483   // recorded a trace. By watching for this number to increment, you can
484   // passively discover when a new trace has begun. This is then used to
485   // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
486   int GetNumTracesRecorded();
487 
488 #if defined(OS_ANDROID)
489   void StartATrace();
490   void StopATrace();
491   void AddClockSyncMetadataEvent();
492 #endif
493 
494   // Enabled state listeners give a callback when tracing is enabled or
495   // disabled. This can be used to tie into other library's tracing systems
496   // on-demand.
497   class BASE_EXPORT EnabledStateObserver {
498    public:
499     // Called just after the tracing system becomes enabled, outside of the
500     // |lock_|. TraceLog::IsEnabled() is true at this point.
501     virtual void OnTraceLogEnabled() = 0;
502 
503     // Called just after the tracing system disables, outside of the |lock_|.
504     // TraceLog::IsEnabled() is false at this point.
505     virtual void OnTraceLogDisabled() = 0;
506   };
507   void AddEnabledStateObserver(EnabledStateObserver* listener);
508   void RemoveEnabledStateObserver(EnabledStateObserver* listener);
509   bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
510 
511   float GetBufferPercentFull() const;
512   bool BufferIsFull() const;
513 
514   // Not using base::Callback because of its limited by 7 parameters.
515   // Also, using primitive type allows directly passing callback from WebCore.
516   // WARNING: It is possible for the previously set callback to be called
517   // after a call to SetEventCallbackEnabled() that replaces or a call to
518   // SetEventCallbackDisabled() that disables the callback.
519   // This callback may be invoked on any thread.
520   // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
521   // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
522   // interface simple.
523   typedef void (*EventCallback)(TimeTicks timestamp,
524                                 char phase,
525                                 const unsigned char* category_group_enabled,
526                                 const char* name,
527                                 unsigned long long id,
528                                 int num_args,
529                                 const char* const arg_names[],
530                                 const unsigned char arg_types[],
531                                 const unsigned long long arg_values[],
532                                 unsigned char flags);
533 
534   // Enable tracing for EventCallback.
535   void SetEventCallbackEnabled(const CategoryFilter& category_filter,
536                                EventCallback cb);
537   void SetEventCallbackDisabled();
538 
539   // Flush all collected events to the given output callback. The callback will
540   // be called one or more times either synchronously or asynchronously from
541   // the current thread with IPC-bite-size chunks. The string format is
542   // undefined. Use TraceResultBuffer to convert one or more trace strings to
543   // JSON. The callback can be null if the caller doesn't want any data.
544   // Due to the implementation of thread-local buffers, flush can't be
545   // done when tracing is enabled. If called when tracing is enabled, the
546   // callback will be called directly with (empty_string, false) to indicate
547   // the end of this unsuccessful flush.
548   typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
549                               bool has_more_events)> OutputCallback;
550   void Flush(const OutputCallback& cb);
551   void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
552 
553   // Called by TRACE_EVENT* macros, don't call this directly.
554   // The name parameter is a category group for example:
555   // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
556   static const unsigned char* GetCategoryGroupEnabled(const char* name);
557   static const char* GetCategoryGroupName(
558       const unsigned char* category_group_enabled);
559 
560   // Called by TRACE_EVENT* macros, don't call this directly.
561   // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
562   // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
563   TraceEventHandle AddTraceEvent(
564       char phase,
565       const unsigned char* category_group_enabled,
566       const char* name,
567       unsigned long long id,
568       int num_args,
569       const char** arg_names,
570       const unsigned char* arg_types,
571       const unsigned long long* arg_values,
572       const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
573       unsigned char flags);
574   TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
575       char phase,
576       const unsigned char* category_group_enabled,
577       const char* name,
578       unsigned long long id,
579       int thread_id,
580       const TimeTicks& timestamp,
581       int num_args,
582       const char** arg_names,
583       const unsigned char* arg_types,
584       const unsigned long long* arg_values,
585       const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
586       unsigned char flags);
587   static void AddTraceEventEtw(char phase,
588                                const char* category_group,
589                                const void* id,
590                                const char* extra);
591   static void AddTraceEventEtw(char phase,
592                                const char* category_group,
593                                const void* id,
594                                const std::string& extra);
595 
596   void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
597                                 const char* name,
598                                 TraceEventHandle handle);
599 
600   // For every matching event, the callback will be called.
601   typedef base::Callback<void()> WatchEventCallback;
602   void SetWatchEvent(const std::string& category_name,
603                      const std::string& event_name,
604                      const WatchEventCallback& callback);
605   // Cancel the watch event. If tracing is enabled, this may race with the
606   // watch event notification firing.
607   void CancelWatchEvent();
608 
609   int process_id() const { return process_id_; }
610 
611   // Exposed for unittesting:
612 
613   void WaitSamplingEventForTesting();
614 
615   // Allows deleting our singleton instance.
616   static void DeleteForTesting();
617 
618   // Allow tests to inspect TraceEvents.
619   size_t GetEventsSize() const { return logged_events_->Size(); }
620   TraceEvent* GetEventByHandle(TraceEventHandle handle);
621 
622   void SetProcessID(int process_id);
623 
624   // Process sort indices, if set, override the order of a process will appear
625   // relative to other processes in the trace viewer. Processes are sorted first
626   // on their sort index, ascending, then by their name, and then tid.
627   void SetProcessSortIndex(int sort_index);
628 
629   // Sets the name of the process.
630   void SetProcessName(const std::string& process_name);
631 
632   // Processes can have labels in addition to their names. Use labels, for
633   // instance, to list out the web page titles that a process is handling.
634   void UpdateProcessLabel(int label_id, const std::string& current_label);
635   void RemoveProcessLabel(int label_id);
636 
637   // Thread sort indices, if set, override the order of a thread will appear
638   // within its process in the trace viewer. Threads are sorted first on their
639   // sort index, ascending, then by their name, and then tid.
640   void SetThreadSortIndex(PlatformThreadId , int sort_index);
641 
642   // Allow setting an offset between the current TimeTicks time and the time
643   // that should be reported.
644   void SetTimeOffset(TimeDelta offset);
645 
646   size_t GetObserverCountForTest() const;
647 
648   // Call this method if the current thread may block the message loop to
649   // prevent the thread from using the thread-local buffer because the thread
650   // may not handle the flush request in time causing lost of unflushed events.
651   void SetCurrentThreadBlocksMessageLoop();
652 
653  private:
654   typedef unsigned int InternalTraceOptions;
655 
656   FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
657                            TraceBufferRingBufferGetReturnChunk);
658   FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
659                            TraceBufferRingBufferHalfIteration);
660   FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
661                            TraceBufferRingBufferFullIteration);
662   FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
663                            TraceBufferVectorReportFull);
664   FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
665                            ConvertTraceOptionsToInternalOptions);
666   FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
667                            TraceRecordAsMuchAsPossibleMode);
668 
669   // This allows constructor and destructor to be private and usable only
670   // by the Singleton class.
671   friend struct DefaultSingletonTraits<TraceLog>;
672 
673   // Enable/disable each category group based on the current mode_,
674   // category_filter_, event_callback_ and event_callback_category_filter_.
675   // Enable the category group in the enabled mode if category_filter_ matches
676   // the category group, or event_callback_ is not null and
677   // event_callback_category_filter_ matches the category group.
678   void UpdateCategoryGroupEnabledFlags();
679   void UpdateCategoryGroupEnabledFlag(size_t category_index);
680 
681   // Configure synthetic delays based on the values set in the current
682   // category filter.
683   void UpdateSyntheticDelaysFromCategoryFilter();
684 
685   InternalTraceOptions GetInternalOptionsFromTraceOptions(
686       const TraceOptions& options);
687 
688   class ThreadLocalEventBuffer;
689   class OptionalAutoLock;
690 
691   TraceLog();
692   ~TraceLog();
693   const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
694   void AddMetadataEventsWhileLocked();
695 
696   InternalTraceOptions trace_options() const {
697     return static_cast<InternalTraceOptions>(
698         subtle::NoBarrier_Load(&trace_options_));
699   }
700 
701   TraceBuffer* trace_buffer() const { return logged_events_.get(); }
702   TraceBuffer* CreateTraceBuffer();
703   TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
704 
705   std::string EventToConsoleMessage(unsigned char phase,
706                                     const TimeTicks& timestamp,
707                                     TraceEvent* trace_event);
708 
709   TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
710                                                      bool check_buffer_is_full);
711   void CheckIfBufferIsFullWhileLocked();
712   void SetDisabledWhileLocked();
713 
714   TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
715                                        OptionalAutoLock* lock);
716 
717   // |generation| is used in the following callbacks to check if the callback
718   // is called for the flush of the current |logged_events_|.
719   void FlushCurrentThread(int generation);
720   void ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events,
721       const TraceLog::OutputCallback& flush_output_callback);
722   void FinishFlush(int generation);
723   void OnFlushTimeout(int generation);
724 
725   int generation() const {
726     return static_cast<int>(subtle::NoBarrier_Load(&generation_));
727   }
728   bool CheckGeneration(int generation) const {
729     return generation == this->generation();
730   }
731   void UseNextTraceBuffer();
732 
733   TimeTicks OffsetNow() const {
734     return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime());
735   }
736   TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
737     return timestamp - time_offset_;
738   }
739 
740   // Internal representation of trace options since we store the currently used
741   // trace option as an AtomicWord.
742   static const InternalTraceOptions kInternalNone;
743   static const InternalTraceOptions kInternalRecordUntilFull;
744   static const InternalTraceOptions kInternalRecordContinuously;
745   static const InternalTraceOptions kInternalEchoToConsole;
746   static const InternalTraceOptions kInternalEnableSampling;
747   static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
748 
749   // This lock protects TraceLog member accesses (except for members protected
750   // by thread_info_lock_) from arbitrary threads.
751   mutable Lock lock_;
752   // This lock protects accesses to thread_names_, thread_event_start_times_
753   // and thread_colors_.
754   Lock thread_info_lock_;
755   int locked_line_;
756   Mode mode_;
757   int num_traces_recorded_;
758   scoped_ptr<TraceBuffer> logged_events_;
759   subtle::AtomicWord /* EventCallback */ event_callback_;
760   bool dispatching_to_observer_list_;
761   std::vector<EnabledStateObserver*> enabled_state_observer_list_;
762 
763   std::string process_name_;
764   base::hash_map<int, std::string> process_labels_;
765   int process_sort_index_;
766   base::hash_map<int, int> thread_sort_indices_;
767   base::hash_map<int, std::string> thread_names_;
768 
769   // The following two maps are used only when ECHO_TO_CONSOLE.
770   base::hash_map<int, std::stack<TimeTicks> > thread_event_start_times_;
771   base::hash_map<std::string, int> thread_colors_;
772 
773   TimeTicks buffer_limit_reached_timestamp_;
774 
775   // XORed with TraceID to make it unlikely to collide with other processes.
776   unsigned long long process_id_hash_;
777 
778   int process_id_;
779 
780   TimeDelta time_offset_;
781 
782   // Allow tests to wake up when certain events occur.
783   WatchEventCallback watch_event_callback_;
784   subtle::AtomicWord /* const unsigned char* */ watch_category_;
785   std::string watch_event_name_;
786 
787   subtle::AtomicWord /* Options */ trace_options_;
788 
789   // Sampling thread handles.
790   scoped_ptr<TraceSamplingThread> sampling_thread_;
791   PlatformThreadHandle sampling_thread_handle_;
792 
793   CategoryFilter category_filter_;
794   CategoryFilter event_callback_category_filter_;
795 
796   ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
797   ThreadLocalBoolean thread_blocks_message_loop_;
798   ThreadLocalBoolean thread_is_in_trace_event_;
799 
800   // Contains the message loops of threads that have had at least one event
801   // added into the local event buffer. Not using MessageLoopProxy because we
802   // need to know the life time of the message loops.
803   hash_set<MessageLoop*> thread_message_loops_;
804 
805   // For events which can't be added into the thread local buffer, e.g. events
806   // from threads without a message loop.
807   scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
808   size_t thread_shared_chunk_index_;
809 
810   // Set when asynchronous Flush is in progress.
811   OutputCallback flush_output_callback_;
812   scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_;
813   subtle::AtomicWord generation_;
814 
815   DISALLOW_COPY_AND_ASSIGN(TraceLog);
816 };
817 
818 }  // namespace debug
819 }  // namespace base
820 
821 #endif  // BASE_DEBUG_TRACE_EVENT_IMPL_H_
822