• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TRACKED_OBJECTS_H_
6 #define BASE_TRACKED_OBJECTS_H_
7 
8 #include <stdint.h>
9 
10 #include <map>
11 #include <set>
12 #include <stack>
13 #include <string>
14 #include <utility>
15 #include <vector>
16 
17 #include "base/atomicops.h"
18 #include "base/base_export.h"
19 #include "base/containers/hash_tables.h"
20 #include "base/gtest_prod_util.h"
21 #include "base/lazy_instance.h"
22 #include "base/location.h"
23 #include "base/macros.h"
24 #include "base/process/process_handle.h"
25 #include "base/profiler/tracked_time.h"
26 #include "base/synchronization/lock.h"
27 #include "base/threading/thread_checker.h"
28 #include "base/threading/thread_local_storage.h"
29 
30 namespace base {
31 struct TrackingInfo;
32 }
33 
34 // TrackedObjects provides a database of stats about objects (generally Tasks)
35 // that are tracked.  Tracking means their birth, death, duration, birth thread,
36 // death thread, and birth place are recorded.  This data is carefully spread
37 // across a series of objects so that the counts and times can be rapidly
38 // updated without (usually) having to lock the data, and hence there is usually
39 // very little contention caused by the tracking.  The data can be viewed via
40 // the about:profiler URL, with a variety of sorting and filtering choices.
41 //
42 // These classes serve as the basis of a profiler of sorts for the Tasks system.
43 // As a result, design decisions were made to maximize speed, by minimizing
44 // recurring allocation/deallocation, lock contention and data copying.  In the
45 // "stable" state, which is reached relatively quickly, there is no separate
46 // marginal allocation cost associated with construction or destruction of
47 // tracked objects, no locks are generally employed, and probably the largest
48 // computational cost is associated with obtaining start and stop times for
49 // instances as they are created and destroyed.
50 //
51 // The following describes the life cycle of tracking an instance.
52 //
53 // First off, when the instance is created, the FROM_HERE macro is expanded
54 // to specify the birth place (file, line, function) where the instance was
55 // created.  That data is used to create a transient Location instance
56 // encapsulating the above triple of information.  The strings (like __FILE__)
57 // are passed around by reference, with the assumption that they are static, and
58 // will never go away.  This ensures that the strings can be dealt with as atoms
59 // with great efficiency (i.e., copying of strings is never needed, and
60 // comparisons for equality can be based on pointer comparisons).
61 //
62 // Next, a Births instance is created for use ONLY on the thread where this
63 // instance was created.  That Births instance records (in a base class
64 // BirthOnThread) references to the static data provided in a Location instance,
65 // as well as a pointer specifying the thread on which the birth takes place.
66 // Hence there is at most one Births instance for each Location on each thread.
67 // The derived Births class contains slots for recording statistics about all
68 // instances born at the same location.  Statistics currently include only the
69 // count of instances constructed.
70 //
71 // Since the base class BirthOnThread contains only constant data, it can be
72 // freely accessed by any thread at any time (i.e., only the statistic needs to
73 // be handled carefully, and stats are updated exclusively on the birth thread).
74 //
75 // For Tasks, having now either constructed or found the Births instance
76 // described above, a pointer to the Births instance is then recorded into the
77 // PendingTask structure in MessageLoop.  This fact alone is very useful in
78 // debugging, when there is a question of where an instance came from.  In
79 // addition, the birth time is also recorded and used to later evaluate the
80 // lifetime duration of the whole Task.  As a result of the above embedding, we
81 // can find out a Task's location of birth, and thread of birth, without using
82 // any locks, as all that data is constant across the life of the process.
83 //
84 // The above work *could* also be done for any other object as well by calling
85 // TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
86 //
87 // The amount of memory used in the above data structures depends on how many
88 // threads there are, and how many Locations of construction there are.
89 // Fortunately, we don't use memory that is the product of those two counts, but
90 // rather we only need one Births instance for each thread that constructs an
91 // instance at a Location.  In many cases, instances are only created on one
92 // thread, so the memory utilization is actually fairly restrained.
93 //
94 // Lastly, when an instance is deleted, the final tallies of statistics are
95 // carefully accumulated.  That tallying writes into slots (members) in a
96 // collection of DeathData instances.  For each birth place Location that is
97 // destroyed on a thread, there is a DeathData instance to record the additional
98 // death count, as well as accumulate the run-time and queue-time durations for
99 // the instance as it is destroyed (dies).  By maintaining a single place to
100 // aggregate this running sum *only* for the given thread, we avoid the need to
101 // lock such DeathData instances. (i.e., these accumulated stats in a DeathData
102 // instance are exclusively updated by the singular owning thread).
103 //
104 // With the above life cycle description complete, the major remaining detail
105 // is explaining how each thread maintains a list of DeathData instances, and
106 // of Births instances, and is able to avoid additional (redundant/unnecessary)
107 // allocations.
108 //
109 // Each thread maintains a list of data items specific to that thread in a
110 // ThreadData instance (for that specific thread only).  The two critical items
111 // are lists of DeathData and Births instances.  These lists are maintained in
112 // STL maps, which are indexed by Location.  As noted earlier, we can compare
113 // locations very efficiently as we consider the underlying data (file,
114 // function, line) to be atoms, and hence pointer comparison is used rather than
115 // (slow) string comparisons.
116 //
117 // To provide a mechanism for iterating over all "known threads," which means
118 // threads that have recorded a birth or a death, we create a singly linked list
119 // of ThreadData instances.  Each such instance maintains a pointer to the next
120 // one.  A static member of ThreadData provides a pointer to the first item on
121 // this global list, and access via that all_thread_data_list_head_ item
122 // requires the use of the list_lock_.
123 // When new ThreadData instances is added to the global list, it is pre-pended,
124 // which ensures that any prior acquisition of the list is valid (i.e., the
125 // holder can iterate over it without fear of it changing, or the necessity of
126 // using an additional lock.  Iterations are actually pretty rare (used
127 // primarily for cleanup, or snapshotting data for display), so this lock has
128 // very little global performance impact.
129 //
130 // The above description tries to define the high performance (run time)
131 // portions of these classes.  After gathering statistics, calls instigated
132 // by visiting about:profiler will assemble and aggregate data for display.  The
133 // following data structures are used for producing such displays.  They are
134 // not performance critical, and their only major constraint is that they should
135 // be able to run concurrently with ongoing augmentation of the birth and death
136 // data.
137 //
138 // This header also exports collection of classes that provide "snapshotted"
139 // representations of the core tracked_objects:: classes.  These snapshotted
140 // representations are designed for safe transmission of the tracked_objects::
141 // data across process boundaries.  Each consists of:
142 // (1) a default constructor, to support the IPC serialization macros,
143 // (2) a constructor that extracts data from the type being snapshotted, and
144 // (3) the snapshotted data.
145 //
146 // For a given birth location, information about births is spread across data
147 // structures that are asynchronously changing on various threads.  For
148 // serialization and display purposes, we need to construct TaskSnapshot
149 // instances for each combination of birth thread, death thread, and location,
150 // along with the count of such lifetimes.  We gather such data into a
151 // TaskSnapshot instances, so that such instances can be sorted and
152 // aggregated (and remain frozen during our processing).
153 //
154 // Profiling consists of phases.  The concrete phase in the sequence of phases
155 // is identified by its 0-based index.
156 //
157 // The ProcessDataPhaseSnapshot struct is a serialized representation of the
158 // list of ThreadData objects for a process for a concrete profiling phase.  It
159 // holds a set of TaskSnapshots.  The statistics in a snapshot are gathered
160 // asynhcronously relative to their ongoing updates.
161 // It is possible, though highly unlikely, that stats could be incorrectly
162 // recorded by this process (all data is held in 32 bit ints, but we are not
163 // atomically collecting all data, so we could have count that does not, for
164 // example, match with the number of durations we accumulated).  The advantage
165 // to having fast (non-atomic) updates of the data outweighs the minimal risk of
166 // a singular corrupt statistic snapshot (only the snapshot could be corrupt,
167 // not the underlying and ongoing statistic).  In contrast, pointer data that
168 // is accessed during snapshotting is completely invariant, and hence is
169 // perfectly acquired (i.e., no potential corruption, and no risk of a bad
170 // memory reference).
171 //
172 // TODO(jar): We can implement a Snapshot system that *tries* to grab the
173 // snapshots on the source threads *when* they have MessageLoops available
174 // (worker threads don't have message loops generally, and hence gathering from
175 // them will continue to be asynchronous).  We had an implementation of this in
176 // the past, but the difficulty is dealing with message loops being terminated.
177 // We can *try* to spam the available threads via some task runner to
178 // achieve this feat, and it *might* be valuable when we are collecting data
179 // for upload via UMA (where correctness of data may be more significant than
180 // for a single screen of about:profiler).
181 //
182 // TODO(jar): We need to store DataCollections, and provide facilities for
183 // taking the difference between two gathered DataCollections.  For now, we're
184 // just adding a hack that Reset()s to zero all counts and stats.  This is also
185 // done in a slightly thread-unsafe fashion, as the resetting is done
186 // asynchronously relative to ongoing updates (but all data is 32 bit in size).
187 // For basic profiling, this will work "most of the time," and should be
188 // sufficient... but storing away DataCollections is the "right way" to do this.
189 // We'll accomplish this via JavaScript storage of snapshots, and then we'll
190 // remove the Reset() methods.  We may also need a short-term-max value in
191 // DeathData that is reset (as synchronously as possible) during each snapshot.
192 // This will facilitate displaying a max value for each snapshot period.
193 
194 namespace tracked_objects {
195 
196 //------------------------------------------------------------------------------
197 // For a specific thread, and a specific birth place, the collection of all
198 // death info (with tallies for each death thread, to prevent access conflicts).
199 class ThreadData;
200 class BASE_EXPORT BirthOnThread {
201  public:
202   BirthOnThread(const Location& location, const ThreadData& current);
203 
location()204   const Location& location() const { return location_; }
birth_thread()205   const ThreadData* birth_thread() const { return birth_thread_; }
206 
207  private:
208   // File/lineno of birth.  This defines the essence of the task, as the context
209   // of the birth (construction) often tell what the item is for.  This field
210   // is const, and hence safe to access from any thread.
211   const Location location_;
212 
213   // The thread that records births into this object.  Only this thread is
214   // allowed to update birth_count_ (which changes over time).
215   const ThreadData* const birth_thread_;
216 
217   DISALLOW_COPY_AND_ASSIGN(BirthOnThread);
218 };
219 
220 //------------------------------------------------------------------------------
221 // A "snapshotted" representation of the BirthOnThread class.
222 
223 struct BASE_EXPORT BirthOnThreadSnapshot {
224   BirthOnThreadSnapshot();
225   explicit BirthOnThreadSnapshot(const BirthOnThread& birth);
226   ~BirthOnThreadSnapshot();
227 
228   LocationSnapshot location;
229   std::string thread_name;
230 };
231 
232 //------------------------------------------------------------------------------
233 // A class for accumulating counts of births (without bothering with a map<>).
234 
235 class BASE_EXPORT Births: public BirthOnThread {
236  public:
237   Births(const Location& location, const ThreadData& current);
238 
239   int birth_count() const;
240 
241   // When we have a birth we update the count for this birthplace.
242   void RecordBirth();
243 
244  private:
245   // The number of births on this thread for our location_.
246   int birth_count_;
247 
248   DISALLOW_COPY_AND_ASSIGN(Births);
249 };
250 
251 //------------------------------------------------------------------------------
252 // A "snapshotted" representation of the DeathData class.
253 
254 struct BASE_EXPORT DeathDataSnapshot {
255   DeathDataSnapshot();
256 
257   // Constructs the snapshot from individual values.
258   // The alternative would be taking a DeathData parameter, but this would
259   // create a loop since DeathData indirectly refers DeathDataSnapshot.  Passing
260   // a wrapper structure as a param or using an empty constructor for
261   // snapshotting DeathData would be less efficient.
262   DeathDataSnapshot(int count,
263                     int32_t run_duration_sum,
264                     int32_t run_duration_max,
265                     int32_t run_duration_sample,
266                     int32_t queue_duration_sum,
267                     int32_t queue_duration_max,
268                     int32_t queue_duration_sample);
269   ~DeathDataSnapshot();
270 
271   // Calculates and returns the delta between this snapshot and an earlier
272   // snapshot of the same task |older|.
273   DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
274 
275   int count;
276   int32_t run_duration_sum;
277   int32_t run_duration_max;
278   int32_t run_duration_sample;
279   int32_t queue_duration_sum;
280   int32_t queue_duration_max;
281   int32_t queue_duration_sample;
282 };
283 
284 //------------------------------------------------------------------------------
285 // A "snapshotted" representation of the DeathData for a particular profiling
286 // phase.  Used as an element of the list of phase snapshots owned by DeathData.
287 
288 struct DeathDataPhaseSnapshot {
289   DeathDataPhaseSnapshot(int profiling_phase,
290                          int count,
291                          int32_t run_duration_sum,
292                          int32_t run_duration_max,
293                          int32_t run_duration_sample,
294                          int32_t queue_duration_sum,
295                          int32_t queue_duration_max,
296                          int32_t queue_duration_sample,
297                          const DeathDataPhaseSnapshot* prev);
298 
299   // Profiling phase at which completion this snapshot was taken.
300   int profiling_phase;
301 
302   // Death data snapshot.
303   DeathDataSnapshot death_data;
304 
305   // Pointer to a snapshot from the previous phase.
306   const DeathDataPhaseSnapshot* prev;
307 };
308 
309 //------------------------------------------------------------------------------
310 // Information about deaths of a task on a given thread, called "death thread".
311 // Access to members of this class is never protected by a lock.  The fields
312 // are accessed in such a way that corruptions resulting from race conditions
313 // are not significant, and don't accumulate as a result of multiple accesses.
314 // All invocations of DeathData::OnProfilingPhaseCompleted and
315 // ThreadData::SnapshotMaps (which takes DeathData snapshot) in a given process
316 // must be called from the same thread. It doesn't matter what thread it is, but
317 // it's important the same thread is used as a snapshot thread during the whole
318 // process lifetime.  All fields except sample_probability_count_ can be
319 // snapshotted.
320 
321 class BASE_EXPORT DeathData {
322  public:
323   DeathData();
324   DeathData(const DeathData& other);
325   ~DeathData();
326 
327   // Update stats for a task destruction (death) that had a Run() time of
328   // |duration|, and has had a queueing delay of |queue_duration|.
329   void RecordDeath(const int32_t queue_duration,
330                    const int32_t run_duration,
331                    const uint32_t random_number);
332 
333   // Metrics and past snapshots accessors, used only for serialization and in
334   // tests.
count()335   int count() const { return base::subtle::NoBarrier_Load(&count_); }
run_duration_sum()336   int32_t run_duration_sum() const {
337     return base::subtle::NoBarrier_Load(&run_duration_sum_);
338   }
run_duration_max()339   int32_t run_duration_max() const {
340     return base::subtle::NoBarrier_Load(&run_duration_max_);
341   }
run_duration_sample()342   int32_t run_duration_sample() const {
343     return base::subtle::NoBarrier_Load(&run_duration_sample_);
344   }
queue_duration_sum()345   int32_t queue_duration_sum() const {
346     return base::subtle::NoBarrier_Load(&queue_duration_sum_);
347   }
queue_duration_max()348   int32_t queue_duration_max() const {
349     return base::subtle::NoBarrier_Load(&queue_duration_max_);
350   }
queue_duration_sample()351   int32_t queue_duration_sample() const {
352     return base::subtle::NoBarrier_Load(&queue_duration_sample_);
353   }
last_phase_snapshot()354   const DeathDataPhaseSnapshot* last_phase_snapshot() const {
355     return last_phase_snapshot_;
356   }
357 
358   // Called when the current profiling phase, identified by |profiling_phase|,
359   // ends.
360   // Must be called only on the snapshot thread.
361   void OnProfilingPhaseCompleted(int profiling_phase);
362 
363  private:
364   // Members are ordered from most regularly read and updated, to least
365   // frequently used.  This might help a bit with cache lines.
366   // Number of runs seen (divisor for calculating averages).
367   // Can be incremented only on the death thread.
368   base::subtle::Atomic32 count_;
369 
370   // Count used in determining probability of selecting exec/queue times from a
371   // recorded death as samples.
372   // Gets incremented only on the death thread, but can be set to 0 by
373   // OnProfilingPhaseCompleted() on the snapshot thread.
374   base::subtle::Atomic32 sample_probability_count_;
375 
376   // Basic tallies, used to compute averages.  Can be incremented only on the
377   // death thread.
378   base::subtle::Atomic32 run_duration_sum_;
379   base::subtle::Atomic32 queue_duration_sum_;
380   // Max values, used by local visualization routines.  These are often read,
381   // but rarely updated.  The max values get assigned only on the death thread,
382   // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
383   // snapshot thread.
384   base::subtle::Atomic32 run_duration_max_;
385   base::subtle::Atomic32 queue_duration_max_;
386   // Samples, used by crowd sourcing gatherers.  These are almost never read,
387   // and rarely updated.  They can be modified only on the death thread.
388   base::subtle::Atomic32 run_duration_sample_;
389   base::subtle::Atomic32 queue_duration_sample_;
390 
391   // Snapshot of this death data made at the last profiling phase completion, if
392   // any.  DeathData owns the whole list starting with this pointer.
393   // Can be accessed only on the snapshot thread.
394   const DeathDataPhaseSnapshot* last_phase_snapshot_;
395 
396   DISALLOW_ASSIGN(DeathData);
397 };
398 
399 //------------------------------------------------------------------------------
400 // A temporary collection of data that can be sorted and summarized.  It is
401 // gathered (carefully) from many threads.  Instances are held in arrays and
402 // processed, filtered, and rendered.
403 // The source of this data was collected on many threads, and is asynchronously
404 // changing.  The data in this instance is not asynchronously changing.
405 
406 struct BASE_EXPORT TaskSnapshot {
407   TaskSnapshot();
408   TaskSnapshot(const BirthOnThreadSnapshot& birth,
409                const DeathDataSnapshot& death_data,
410                const std::string& death_thread_name);
411   ~TaskSnapshot();
412 
413   BirthOnThreadSnapshot birth;
414   // Delta between death data for a thread for a certain profiling phase and the
415   // snapshot for the pervious phase, if any.  Otherwise, just a snapshot.
416   DeathDataSnapshot death_data;
417   std::string death_thread_name;
418 };
419 
420 //------------------------------------------------------------------------------
421 // For each thread, we have a ThreadData that stores all tracking info generated
422 // on this thread.  This prevents the need for locking as data accumulates.
423 // We use ThreadLocalStorage to quickly identfy the current ThreadData context.
424 // We also have a linked list of ThreadData instances, and that list is used to
425 // harvest data from all existing instances.
426 
427 struct ProcessDataPhaseSnapshot;
428 struct ProcessDataSnapshot;
429 class BASE_EXPORT TaskStopwatch;
430 
431 // Map from profiling phase number to the process-wide snapshotted
432 // representation of the list of ThreadData objects that died during the given
433 // phase.
434 typedef std::map<int, ProcessDataPhaseSnapshot> PhasedProcessDataSnapshotMap;
435 
436 class BASE_EXPORT ThreadData {
437  public:
438   // Current allowable states of the tracking system.  The states can vary
439   // between ACTIVE and DEACTIVATED, but can never go back to UNINITIALIZED.
440   enum Status {
441     UNINITIALIZED,         // Pristine, link-time state before running.
442     DORMANT_DURING_TESTS,  // Only used during testing.
443     DEACTIVATED,           // No longer recording profiling.
444     PROFILING_ACTIVE,      // Recording profiles.
445     STATUS_LAST = PROFILING_ACTIVE
446   };
447 
448   typedef base::hash_map<Location, Births*, Location::Hash> BirthMap;
449   typedef std::map<const Births*, DeathData> DeathMap;
450 
451   // Initialize the current thread context with a new instance of ThreadData.
452   // This is used by all threads that have names, and should be explicitly
453   // set *before* any births on the threads have taken place.  It is generally
454   // only used by the message loop, which has a well defined thread name.
455   static void InitializeThreadContext(const std::string& suggested_name);
456 
457   // Using Thread Local Store, find the current instance for collecting data.
458   // If an instance does not exist, construct one (and remember it for use on
459   // this thread.
460   // This may return NULL if the system is disabled for any reason.
461   static ThreadData* Get();
462 
463   // Fills |process_data_snapshot| with phased snapshots of all profiling
464   // phases, including the current one, identified by |current_profiling_phase|.
465   // |current_profiling_phase| is necessary because a child process can start
466   // after several phase-changing events, so it needs to receive the current
467   // phase number from the browser process to fill the correct entry for the
468   // current phase in the |process_data_snapshot| map.
469   static void Snapshot(int current_profiling_phase,
470                        ProcessDataSnapshot* process_data_snapshot);
471 
472   // Called when the current profiling phase, identified by |profiling_phase|,
473   // ends.
474   // |profiling_phase| is necessary because a child process can start after
475   // several phase-changing events, so it needs to receive the phase number from
476   // the browser process to fill the correct entry in the
477   // completed_phases_snapshots_ map.
478   static void OnProfilingPhaseCompleted(int profiling_phase);
479 
480   // Finds (or creates) a place to count births from the given location in this
481   // thread, and increment that tally.
482   // TallyABirthIfActive will returns NULL if the birth cannot be tallied.
483   static Births* TallyABirthIfActive(const Location& location);
484 
485   // Records the end of a timed run of an object.  The |completed_task| contains
486   // a pointer to a Births, the time_posted, and a delayed_start_time if any.
487   // The |start_of_run| indicates when we started to perform the run of the
488   // task.  The delayed_start_time is non-null for tasks that were posted as
489   // delayed tasks, and it indicates when the task should have run (i.e., when
490   // it should have posted out of the timer queue, and into the work queue.
491   // The |end_of_run| was just obtained by a call to Now() (just after the task
492   // finished).  It is provided as an argument to help with testing.
493   static void TallyRunOnNamedThreadIfTracking(
494       const base::TrackingInfo& completed_task,
495       const TaskStopwatch& stopwatch);
496 
497   // Record the end of a timed run of an object.  The |birth| is the record for
498   // the instance, the |time_posted| records that instant, which is presumed to
499   // be when the task was posted into a queue to run on a worker thread.
500   // The |start_of_run| is when the worker thread started to perform the run of
501   // the task.
502   // The |end_of_run| was just obtained by a call to Now() (just after the task
503   // finished).
504   static void TallyRunOnWorkerThreadIfTracking(const Births* births,
505                                                const TrackedTime& time_posted,
506                                                const TaskStopwatch& stopwatch);
507 
508   // Record the end of execution in region, generally corresponding to a scope
509   // being exited.
510   static void TallyRunInAScopedRegionIfTracking(const Births* births,
511                                                 const TaskStopwatch& stopwatch);
512 
thread_name()513   const std::string& thread_name() const { return thread_name_; }
514 
515   // Initializes all statics if needed (this initialization call should be made
516   // while we are single threaded).
517   static void EnsureTlsInitialization();
518 
519   // Sets internal status_.
520   // If |status| is false, then status_ is set to DEACTIVATED.
521   // If |status| is true, then status_ is set to PROFILING_ACTIVE.
522   static void InitializeAndSetTrackingStatus(Status status);
523 
524   static Status status();
525 
526   // Indicate if any sort of profiling is being done (i.e., we are more than
527   // DEACTIVATED).
528   static bool TrackingStatus();
529 
530   // Enables profiler timing.
531   static void EnableProfilerTiming();
532 
533   // Provide a time function that does nothing (runs fast) when we don't have
534   // the profiler enabled.  It will generally be optimized away when it is
535   // ifdef'ed to be small enough (allowing the profiler to be "compiled out" of
536   // the code).
537   static TrackedTime Now();
538 
539   // This function can be called at process termination to validate that thread
540   // cleanup routines have been called for at least some number of named
541   // threads.
542   static void EnsureCleanupWasCalled(int major_threads_shutdown_count);
543 
544  private:
545   friend class TaskStopwatch;
546   // Allow only tests to call ShutdownSingleThreadedCleanup.  We NEVER call it
547   // in production code.
548   // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a
549   // better change of optimizing (inlining? etc.) private methods (knowing that
550   // there will be no need for an external entry point).
551   friend class TrackedObjectsTest;
552   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
553   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
554 
555   // Type for an alternate timer function (testing only).
556   typedef unsigned int NowFunction();
557 
558   typedef std::map<const BirthOnThread*, int> BirthCountMap;
559   typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
560       DeathsSnapshot;
561 
562   // Worker thread construction creates a name since there is none.
563   explicit ThreadData(int thread_number);
564 
565   // Message loop based construction should provide a name.
566   explicit ThreadData(const std::string& suggested_name);
567 
568   ~ThreadData();
569 
570   // Push this instance to the head of all_thread_data_list_head_, linking it to
571   // the previous head.  This is performed after each construction, and leaves
572   // the instance permanently on that list.
573   void PushToHeadOfList();
574 
575   // (Thread safe) Get start of list of all ThreadData instances using the lock.
576   static ThreadData* first();
577 
578   // Iterate through the null terminated list of ThreadData instances.
579   ThreadData* next() const;
580 
581 
582   // In this thread's data, record a new birth.
583   Births* TallyABirth(const Location& location);
584 
585   // Find a place to record a death on this thread.
586   void TallyADeath(const Births& births,
587                    int32_t queue_duration,
588                    const TaskStopwatch& stopwatch);
589 
590   // Snapshots (under a lock) the profiled data for the tasks for this thread
591   // and writes all of the executed tasks' data -- i.e. the data for all
592   // profiling phases (including the current one: |current_profiling_phase|) for
593   // the tasks with with entries in the death_map_ -- into |phased_snapshots|.
594   // Also updates the |birth_counts| tally for each task to keep track of the
595   // number of living instances of the task -- that is, each task maps to the
596   // number of births for the task that have not yet been balanced by a death.
597   void SnapshotExecutedTasks(int current_profiling_phase,
598                              PhasedProcessDataSnapshotMap* phased_snapshots,
599                              BirthCountMap* birth_counts);
600 
601   // Using our lock, make a copy of the specified maps.  This call may be made
602   // on  non-local threads, which necessitate the use of the lock to prevent
603   // the map(s) from being reallocated while they are copied.
604   void SnapshotMaps(int profiling_phase,
605                     BirthMap* birth_map,
606                     DeathsSnapshot* deaths);
607 
608   // Called for this thread when the current profiling phase, identified by
609   // |profiling_phase|, ends.
610   void OnProfilingPhaseCompletedOnThread(int profiling_phase);
611 
612   // This method is called by the TLS system when a thread terminates.
613   // The argument may be NULL if this thread has never tracked a birth or death.
614   static void OnThreadTermination(void* thread_data);
615 
616   // This method should be called when a worker thread terminates, so that we
617   // can save all the thread data into a cache of reusable ThreadData instances.
618   void OnThreadTerminationCleanup();
619 
620   // Cleans up data structures, and returns statics to near pristine (mostly
621   // uninitialized) state.  If there is any chance that other threads are still
622   // using the data structures, then the |leak| argument should be passed in as
623   // true, and the data structures (birth maps, death maps, ThreadData
624   // insntances, etc.) will be leaked and not deleted.  If you have joined all
625   // threads since the time that InitializeAndSetTrackingStatus() was called,
626   // then you can pass in a |leak| value of false, and this function will
627   // delete recursively all data structures, starting with the list of
628   // ThreadData instances.
629   static void ShutdownSingleThreadedCleanup(bool leak);
630 
631   // When non-null, this specifies an external function that supplies monotone
632   // increasing time functcion.
633   static NowFunction* now_function_for_testing_;
634 
635   // We use thread local store to identify which ThreadData to interact with.
636   static base::ThreadLocalStorage::StaticSlot tls_index_;
637 
638   // List of ThreadData instances for use with worker threads.  When a worker
639   // thread is done (terminated), we push it onto this list.  When a new worker
640   // thread is created, we first try to re-use a ThreadData instance from the
641   // list, and if none are available, construct a new one.
642   // This is only accessed while list_lock_ is held.
643   static ThreadData* first_retired_worker_;
644 
645   // Link to the most recently created instance (starts a null terminated list).
646   // The list is traversed by about:profiler when it needs to snapshot data.
647   // This is only accessed while list_lock_ is held.
648   static ThreadData* all_thread_data_list_head_;
649 
650   // The next available worker thread number.  This should only be accessed when
651   // the list_lock_ is held.
652   static int worker_thread_data_creation_count_;
653 
654   // The number of times TLS has called us back to cleanup a ThreadData
655   // instance.  This is only accessed while list_lock_ is held.
656   static int cleanup_count_;
657 
658   // Incarnation sequence number, indicating how many times (during unittests)
659   // we've either transitioned out of UNINITIALIZED, or into that state.  This
660   // value is only accessed while the list_lock_ is held.
661   static int incarnation_counter_;
662 
663   // Protection for access to all_thread_data_list_head_, and to
664   // unregistered_thread_data_pool_.  This lock is leaked at shutdown.
665   // The lock is very infrequently used, so we can afford to just make a lazy
666   // instance and be safe.
667   static base::LazyInstance<base::Lock>::Leaky list_lock_;
668 
669   // We set status_ to SHUTDOWN when we shut down the tracking service.
670   static base::subtle::Atomic32 status_;
671 
672   // Link to next instance (null terminated list).  Used to globally track all
673   // registered instances (corresponds to all registered threads where we keep
674   // data).
675   ThreadData* next_;
676 
677   // Pointer to another ThreadData instance for a Worker-Thread that has been
678   // retired (its thread was terminated).  This value is non-NULL only for a
679   // retired ThreadData associated with a Worker-Thread.
680   ThreadData* next_retired_worker_;
681 
682   // The name of the thread that is being recorded.  If this thread has no
683   // message_loop, then this is a worker thread, with a sequence number postfix.
684   std::string thread_name_;
685 
686   // Indicate if this is a worker thread, and the ThreadData contexts should be
687   // stored in the unregistered_thread_data_pool_ when not in use.
688   // Value is zero when it is not a worker thread.  Value is a positive integer
689   // corresponding to the created thread name if it is a worker thread.
690   int worker_thread_number_;
691 
692   // A map used on each thread to keep track of Births on this thread.
693   // This map should only be accessed on the thread it was constructed on.
694   // When a snapshot is needed, this structure can be locked in place for the
695   // duration of the snapshotting activity.
696   BirthMap birth_map_;
697 
698   // Similar to birth_map_, this records informations about death of tracked
699   // instances (i.e., when a tracked instance was destroyed on this thread).
700   // It is locked before changing, and hence other threads may access it by
701   // locking before reading it.
702   DeathMap death_map_;
703 
704   // Lock to protect *some* access to BirthMap and DeathMap.  The maps are
705   // regularly read and written on this thread, but may only be read from other
706   // threads.  To support this, we acquire this lock if we are writing from this
707   // thread, or reading from another thread.  For reading from this thread we
708   // don't need a lock, as there is no potential for a conflict since the
709   // writing is only done from this thread.
710   mutable base::Lock map_lock_;
711 
712   // A random number that we used to select decide which sample to keep as a
713   // representative sample in each DeathData instance.  We can't start off with
714   // much randomness (because we can't call RandInt() on all our threads), so
715   // we stir in more and more as we go.
716   uint32_t random_number_;
717 
718   // Record of what the incarnation_counter_ was when this instance was created.
719   // If the incarnation_counter_ has changed, then we avoid pushing into the
720   // pool (this is only critical in tests which go through multiple
721   // incarnations).
722   int incarnation_count_for_pool_;
723 
724   // Most recently started (i.e. most nested) stopwatch on the current thread,
725   // if it exists; NULL otherwise.
726   TaskStopwatch* current_stopwatch_;
727 
728   DISALLOW_COPY_AND_ASSIGN(ThreadData);
729 };
730 
731 //------------------------------------------------------------------------------
732 // Stopwatch to measure task run time or simply create a time interval that will
733 // be subtracted from the current most nested task's run time.  Stopwatches
734 // coordinate with the stopwatches in which they are nested to avoid
735 // double-counting nested tasks run times.
736 
737 class BASE_EXPORT TaskStopwatch {
738  public:
739   // Starts the stopwatch.
740   TaskStopwatch();
741   ~TaskStopwatch();
742 
743   // Starts stopwatch.
744   void Start();
745 
746   // Stops stopwatch.
747   void Stop();
748 
749   // Returns the start time.
750   TrackedTime StartTime() const;
751 
752   // Task's duration is calculated as the wallclock duration between starting
753   // and stopping this stopwatch, minus the wallclock durations of any other
754   // instances that are immediately nested in this one, started and stopped on
755   // this thread during that period.
756   int32_t RunDurationMs() const;
757 
758   // Returns tracking info for the current thread.
759   ThreadData* GetThreadData() const;
760 
761  private:
762   // Time when the stopwatch was started.
763   TrackedTime start_time_;
764 
765   // Wallclock duration of the task.
766   int32_t wallclock_duration_ms_;
767 
768   // Tracking info for the current thread.
769   ThreadData* current_thread_data_;
770 
771   // Sum of wallclock durations of all stopwatches that were directly nested in
772   // this one.
773   int32_t excluded_duration_ms_;
774 
775   // Stopwatch which was running on our thread when this stopwatch was started.
776   // That preexisting stopwatch must be adjusted to the exclude the wallclock
777   // duration of this stopwatch.
778   TaskStopwatch* parent_;
779 
780 #if DCHECK_IS_ON()
781   // State of the stopwatch.  Stopwatch is first constructed in a created state
782   // state, then is optionally started/stopped, then destructed.
783   enum { CREATED, RUNNING, STOPPED } state_;
784 
785   // Currently running stopwatch that is directly nested in this one, if such
786   // stopwatch exists.  NULL otherwise.
787   TaskStopwatch* child_;
788 #endif
789 };
790 
791 //------------------------------------------------------------------------------
792 // A snapshotted representation of the list of ThreadData objects for a process,
793 // for a single profiling phase.
794 
795 struct BASE_EXPORT ProcessDataPhaseSnapshot {
796  public:
797   ProcessDataPhaseSnapshot();
798   ProcessDataPhaseSnapshot(const ProcessDataPhaseSnapshot& other);
799   ~ProcessDataPhaseSnapshot();
800 
801   std::vector<TaskSnapshot> tasks;
802 };
803 
804 //------------------------------------------------------------------------------
805 // A snapshotted representation of the list of ThreadData objects for a process,
806 // for all profiling phases, including the current one.
807 
808 struct BASE_EXPORT ProcessDataSnapshot {
809  public:
810   ProcessDataSnapshot();
811   ProcessDataSnapshot(const ProcessDataSnapshot& other);
812   ~ProcessDataSnapshot();
813 
814   PhasedProcessDataSnapshotMap phased_snapshots;
815   base::ProcessId process_id;
816 };
817 
818 }  // namespace tracked_objects
819 
820 #endif  // BASE_TRACKED_OBJECTS_H_
821