1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef BASE_TRACKED_OBJECTS_H_ 6 #define BASE_TRACKED_OBJECTS_H_ 7 8 #include <map> 9 #include <set> 10 #include <stack> 11 #include <string> 12 #include <utility> 13 #include <vector> 14 15 #include "base/base_export.h" 16 #include "base/basictypes.h" 17 #include "base/gtest_prod_util.h" 18 #include "base/lazy_instance.h" 19 #include "base/location.h" 20 #include "base/profiler/alternate_timer.h" 21 #include "base/profiler/tracked_time.h" 22 #include "base/synchronization/lock.h" 23 #include "base/threading/thread_local_storage.h" 24 25 namespace base { 26 struct TrackingInfo; 27 } 28 29 // TrackedObjects provides a database of stats about objects (generally Tasks) 30 // that are tracked. Tracking means their birth, death, duration, birth thread, 31 // death thread, and birth place are recorded. This data is carefully spread 32 // across a series of objects so that the counts and times can be rapidly 33 // updated without (usually) having to lock the data, and hence there is usually 34 // very little contention caused by the tracking. The data can be viewed via 35 // the about:profiler URL, with a variety of sorting and filtering choices. 36 // 37 // These classes serve as the basis of a profiler of sorts for the Tasks system. 38 // As a result, design decisions were made to maximize speed, by minimizing 39 // recurring allocation/deallocation, lock contention and data copying. In the 40 // "stable" state, which is reached relatively quickly, there is no separate 41 // marginal allocation cost associated with construction or destruction of 42 // tracked objects, no locks are generally employed, and probably the largest 43 // computational cost is associated with obtaining start and stop times for 44 // instances as they are created and destroyed. 45 // 46 // The following describes the lifecycle of tracking an instance. 47 // 48 // First off, when the instance is created, the FROM_HERE macro is expanded 49 // to specify the birth place (file, line, function) where the instance was 50 // created. That data is used to create a transient Location instance 51 // encapsulating the above triple of information. The strings (like __FILE__) 52 // are passed around by reference, with the assumption that they are static, and 53 // will never go away. This ensures that the strings can be dealt with as atoms 54 // with great efficiency (i.e., copying of strings is never needed, and 55 // comparisons for equality can be based on pointer comparisons). 56 // 57 // Next, a Births instance is created for use ONLY on the thread where this 58 // instance was created. That Births instance records (in a base class 59 // BirthOnThread) references to the static data provided in a Location instance, 60 // as well as a pointer specifying the thread on which the birth takes place. 61 // Hence there is at most one Births instance for each Location on each thread. 62 // The derived Births class contains slots for recording statistics about all 63 // instances born at the same location. Statistics currently include only the 64 // count of instances constructed. 65 // 66 // Since the base class BirthOnThread contains only constant data, it can be 67 // freely accessed by any thread at any time (i.e., only the statistic needs to 68 // be handled carefully, and stats are updated exclusively on the birth thread). 69 // 70 // For Tasks, having now either constructed or found the Births instance 71 // described above, a pointer to the Births instance is then recorded into the 72 // PendingTask structure in MessageLoop. This fact alone is very useful in 73 // debugging, when there is a question of where an instance came from. In 74 // addition, the birth time is also recorded and used to later evaluate the 75 // lifetime duration of the whole Task. As a result of the above embedding, we 76 // can find out a Task's location of birth, and thread of birth, without using 77 // any locks, as all that data is constant across the life of the process. 78 // 79 // The above work *could* also be done for any other object as well by calling 80 // TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate. 81 // 82 // The amount of memory used in the above data structures depends on how many 83 // threads there are, and how many Locations of construction there are. 84 // Fortunately, we don't use memory that is the product of those two counts, but 85 // rather we only need one Births instance for each thread that constructs an 86 // instance at a Location. In many cases, instances are only created on one 87 // thread, so the memory utilization is actually fairly restrained. 88 // 89 // Lastly, when an instance is deleted, the final tallies of statistics are 90 // carefully accumulated. That tallying writes into slots (members) in a 91 // collection of DeathData instances. For each birth place Location that is 92 // destroyed on a thread, there is a DeathData instance to record the additional 93 // death count, as well as accumulate the run-time and queue-time durations for 94 // the instance as it is destroyed (dies). By maintaining a single place to 95 // aggregate this running sum *only* for the given thread, we avoid the need to 96 // lock such DeathData instances. (i.e., these accumulated stats in a DeathData 97 // instance are exclusively updated by the singular owning thread). 98 // 99 // With the above lifecycle description complete, the major remaining detail is 100 // explaining how each thread maintains a list of DeathData instances, and of 101 // Births instances, and is able to avoid additional (redundant/unnecessary) 102 // allocations. 103 // 104 // Each thread maintains a list of data items specific to that thread in a 105 // ThreadData instance (for that specific thread only). The two critical items 106 // are lists of DeathData and Births instances. These lists are maintained in 107 // STL maps, which are indexed by Location. As noted earlier, we can compare 108 // locations very efficiently as we consider the underlying data (file, 109 // function, line) to be atoms, and hence pointer comparison is used rather than 110 // (slow) string comparisons. 111 // 112 // To provide a mechanism for iterating over all "known threads," which means 113 // threads that have recorded a birth or a death, we create a singly linked list 114 // of ThreadData instances. Each such instance maintains a pointer to the next 115 // one. A static member of ThreadData provides a pointer to the first item on 116 // this global list, and access via that all_thread_data_list_head_ item 117 // requires the use of the list_lock_. 118 // When new ThreadData instances is added to the global list, it is pre-pended, 119 // which ensures that any prior acquisition of the list is valid (i.e., the 120 // holder can iterate over it without fear of it changing, or the necessity of 121 // using an additional lock. Iterations are actually pretty rare (used 122 // primarilly for cleanup, or snapshotting data for display), so this lock has 123 // very little global performance impact. 124 // 125 // The above description tries to define the high performance (run time) 126 // portions of these classes. After gathering statistics, calls instigated 127 // by visiting about:profiler will assemble and aggregate data for display. The 128 // following data structures are used for producing such displays. They are 129 // not performance critical, and their only major constraint is that they should 130 // be able to run concurrently with ongoing augmentation of the birth and death 131 // data. 132 // 133 // This header also exports collection of classes that provide "snapshotted" 134 // representations of the core tracked_objects:: classes. These snapshotted 135 // representations are designed for safe transmission of the tracked_objects:: 136 // data across process boundaries. Each consists of: 137 // (1) a default constructor, to support the IPC serialization macros, 138 // (2) a constructor that extracts data from the type being snapshotted, and 139 // (3) the snapshotted data. 140 // 141 // For a given birth location, information about births is spread across data 142 // structures that are asynchronously changing on various threads. For 143 // serialization and display purposes, we need to construct TaskSnapshot 144 // instances for each combination of birth thread, death thread, and location, 145 // along with the count of such lifetimes. We gather such data into a 146 // TaskSnapshot instances, so that such instances can be sorted and 147 // aggregated (and remain frozen during our processing). 148 // 149 // The ProcessDataSnapshot struct is a serialized representation of the list 150 // of ThreadData objects for a process. It holds a set of TaskSnapshots 151 // and tracks parent/child relationships for the executed tasks. The statistics 152 // in a snapshot are gathered asynhcronously relative to their ongoing updates. 153 // It is possible, though highly unlikely, that stats could be incorrectly 154 // recorded by this process (all data is held in 32 bit ints, but we are not 155 // atomically collecting all data, so we could have count that does not, for 156 // example, match with the number of durations we accumulated). The advantage 157 // to having fast (non-atomic) updates of the data outweighs the minimal risk of 158 // a singular corrupt statistic snapshot (only the snapshot could be corrupt, 159 // not the underlying and ongoing statistic). In constrast, pointer data that 160 // is accessed during snapshotting is completely invariant, and hence is 161 // perfectly acquired (i.e., no potential corruption, and no risk of a bad 162 // memory reference). 163 // 164 // TODO(jar): We can implement a Snapshot system that *tries* to grab the 165 // snapshots on the source threads *when* they have MessageLoops available 166 // (worker threads don't have message loops generally, and hence gathering from 167 // them will continue to be asynchronous). We had an implementation of this in 168 // the past, but the difficulty is dealing with message loops being terminated. 169 // We can *try* to spam the available threads via some message loop proxy to 170 // achieve this feat, and it *might* be valuable when we are colecting data for 171 // upload via UMA (where correctness of data may be more significant than for a 172 // single screen of about:profiler). 173 // 174 // TODO(jar): We should support (optionally) the recording of parent-child 175 // relationships for tasks. This should be done by detecting what tasks are 176 // Born during the running of a parent task. The resulting data can be used by 177 // a smarter profiler to aggregate the cost of a series of child tasks into 178 // the ancestor task. It can also be used to illuminate what child or parent is 179 // related to each task. 180 // 181 // TODO(jar): We need to store DataCollections, and provide facilities for 182 // taking the difference between two gathered DataCollections. For now, we're 183 // just adding a hack that Reset()s to zero all counts and stats. This is also 184 // done in a slighly thread-unsafe fashion, as the resetting is done 185 // asynchronously relative to ongoing updates (but all data is 32 bit in size). 186 // For basic profiling, this will work "most of the time," and should be 187 // sufficient... but storing away DataCollections is the "right way" to do this. 188 // We'll accomplish this via JavaScript storage of snapshots, and then we'll 189 // remove the Reset() methods. We may also need a short-term-max value in 190 // DeathData that is reset (as synchronously as possible) during each snapshot. 191 // This will facilitate displaying a max value for each snapshot period. 192 193 namespace tracked_objects { 194 195 //------------------------------------------------------------------------------ 196 // For a specific thread, and a specific birth place, the collection of all 197 // death info (with tallies for each death thread, to prevent access conflicts). 198 class ThreadData; 199 class BASE_EXPORT BirthOnThread { 200 public: 201 BirthOnThread(const Location& location, const ThreadData& current); 202 location()203 const Location location() const { return location_; } birth_thread()204 const ThreadData* birth_thread() const { return birth_thread_; } 205 206 private: 207 // File/lineno of birth. This defines the essence of the task, as the context 208 // of the birth (construction) often tell what the item is for. This field 209 // is const, and hence safe to access from any thread. 210 const Location location_; 211 212 // The thread that records births into this object. Only this thread is 213 // allowed to update birth_count_ (which changes over time). 214 const ThreadData* const birth_thread_; 215 216 DISALLOW_COPY_AND_ASSIGN(BirthOnThread); 217 }; 218 219 //------------------------------------------------------------------------------ 220 // A "snapshotted" representation of the BirthOnThread class. 221 222 struct BASE_EXPORT BirthOnThreadSnapshot { 223 BirthOnThreadSnapshot(); 224 explicit BirthOnThreadSnapshot(const BirthOnThread& birth); 225 ~BirthOnThreadSnapshot(); 226 227 LocationSnapshot location; 228 std::string thread_name; 229 }; 230 231 //------------------------------------------------------------------------------ 232 // A class for accumulating counts of births (without bothering with a map<>). 233 234 class BASE_EXPORT Births: public BirthOnThread { 235 public: 236 Births(const Location& location, const ThreadData& current); 237 238 int birth_count() const; 239 240 // When we have a birth we update the count for this birthplace. 241 void RecordBirth(); 242 243 // When a birthplace is changed (updated), we need to decrement the counter 244 // for the old instance. 245 void ForgetBirth(); 246 247 // Hack to quickly reset all counts to zero. 248 void Clear(); 249 250 private: 251 // The number of births on this thread for our location_. 252 int birth_count_; 253 254 DISALLOW_COPY_AND_ASSIGN(Births); 255 }; 256 257 //------------------------------------------------------------------------------ 258 // Basic info summarizing multiple destructions of a tracked object with a 259 // single birthplace (fixed Location). Used both on specific threads, and also 260 // in snapshots when integrating assembled data. 261 262 class BASE_EXPORT DeathData { 263 public: 264 // Default initializer. 265 DeathData(); 266 267 // When deaths have not yet taken place, and we gather data from all the 268 // threads, we create DeathData stats that tally the number of births without 269 // a corresponding death. 270 explicit DeathData(int count); 271 272 // Update stats for a task destruction (death) that had a Run() time of 273 // |duration|, and has had a queueing delay of |queue_duration|. 274 void RecordDeath(const int32 queue_duration, 275 const int32 run_duration, 276 int random_number); 277 278 // Metrics accessors, used only for serialization and in tests. 279 int count() const; 280 int32 run_duration_sum() const; 281 int32 run_duration_max() const; 282 int32 run_duration_sample() const; 283 int32 queue_duration_sum() const; 284 int32 queue_duration_max() const; 285 int32 queue_duration_sample() const; 286 287 // Reset the max values to zero. 288 void ResetMax(); 289 290 // Reset all tallies to zero. This is used as a hack on realtime data. 291 void Clear(); 292 293 private: 294 // Members are ordered from most regularly read and updated, to least 295 // frequently used. This might help a bit with cache lines. 296 // Number of runs seen (divisor for calculating averages). 297 int count_; 298 // Basic tallies, used to compute averages. 299 int32 run_duration_sum_; 300 int32 queue_duration_sum_; 301 // Max values, used by local visualization routines. These are often read, 302 // but rarely updated. 303 int32 run_duration_max_; 304 int32 queue_duration_max_; 305 // Samples, used by crowd sourcing gatherers. These are almost never read, 306 // and rarely updated. 307 int32 run_duration_sample_; 308 int32 queue_duration_sample_; 309 }; 310 311 //------------------------------------------------------------------------------ 312 // A "snapshotted" representation of the DeathData class. 313 314 struct BASE_EXPORT DeathDataSnapshot { 315 DeathDataSnapshot(); 316 explicit DeathDataSnapshot(const DeathData& death_data); 317 ~DeathDataSnapshot(); 318 319 int count; 320 int32 run_duration_sum; 321 int32 run_duration_max; 322 int32 run_duration_sample; 323 int32 queue_duration_sum; 324 int32 queue_duration_max; 325 int32 queue_duration_sample; 326 }; 327 328 //------------------------------------------------------------------------------ 329 // A temporary collection of data that can be sorted and summarized. It is 330 // gathered (carefully) from many threads. Instances are held in arrays and 331 // processed, filtered, and rendered. 332 // The source of this data was collected on many threads, and is asynchronously 333 // changing. The data in this instance is not asynchronously changing. 334 335 struct BASE_EXPORT TaskSnapshot { 336 TaskSnapshot(); 337 TaskSnapshot(const BirthOnThread& birth, 338 const DeathData& death_data, 339 const std::string& death_thread_name); 340 ~TaskSnapshot(); 341 342 BirthOnThreadSnapshot birth; 343 DeathDataSnapshot death_data; 344 std::string death_thread_name; 345 }; 346 347 //------------------------------------------------------------------------------ 348 // For each thread, we have a ThreadData that stores all tracking info generated 349 // on this thread. This prevents the need for locking as data accumulates. 350 // We use ThreadLocalStorage to quickly identfy the current ThreadData context. 351 // We also have a linked list of ThreadData instances, and that list is used to 352 // harvest data from all existing instances. 353 354 struct ProcessDataSnapshot; 355 class BASE_EXPORT TaskStopwatch; 356 357 class BASE_EXPORT ThreadData { 358 public: 359 // Current allowable states of the tracking system. The states can vary 360 // between ACTIVE and DEACTIVATED, but can never go back to UNINITIALIZED. 361 enum Status { 362 UNINITIALIZED, // PRistine, link-time state before running. 363 DORMANT_DURING_TESTS, // Only used during testing. 364 DEACTIVATED, // No longer recording profling. 365 PROFILING_ACTIVE, // Recording profiles (no parent-child links). 366 PROFILING_CHILDREN_ACTIVE, // Fully active, recording parent-child links. 367 STATUS_LAST = PROFILING_CHILDREN_ACTIVE 368 }; 369 370 typedef std::map<Location, Births*> BirthMap; 371 typedef std::map<const Births*, DeathData> DeathMap; 372 typedef std::pair<const Births*, const Births*> ParentChildPair; 373 typedef std::set<ParentChildPair> ParentChildSet; 374 typedef std::stack<const Births*> ParentStack; 375 376 // Initialize the current thread context with a new instance of ThreadData. 377 // This is used by all threads that have names, and should be explicitly 378 // set *before* any births on the threads have taken place. It is generally 379 // only used by the message loop, which has a well defined thread name. 380 static void InitializeThreadContext(const std::string& suggested_name); 381 382 // Using Thread Local Store, find the current instance for collecting data. 383 // If an instance does not exist, construct one (and remember it for use on 384 // this thread. 385 // This may return NULL if the system is disabled for any reason. 386 static ThreadData* Get(); 387 388 // Fills |process_data| with all the recursive results in our process. 389 // During the scavenging, if |reset_max| is true, then the DeathData instances 390 // max-values are reset to zero during this scan. 391 static void Snapshot(bool reset_max, ProcessDataSnapshot* process_data); 392 393 // Finds (or creates) a place to count births from the given location in this 394 // thread, and increment that tally. 395 // TallyABirthIfActive will returns NULL if the birth cannot be tallied. 396 static Births* TallyABirthIfActive(const Location& location); 397 398 // Records the end of a timed run of an object. The |completed_task| contains 399 // a pointer to a Births, the time_posted, and a delayed_start_time if any. 400 // The |start_of_run| indicates when we started to perform the run of the 401 // task. The delayed_start_time is non-null for tasks that were posted as 402 // delayed tasks, and it indicates when the task should have run (i.e., when 403 // it should have posted out of the timer queue, and into the work queue. 404 // The |end_of_run| was just obtained by a call to Now() (just after the task 405 // finished). It is provided as an argument to help with testing. 406 static void TallyRunOnNamedThreadIfTracking( 407 const base::TrackingInfo& completed_task, 408 const TaskStopwatch& stopwatch); 409 410 // Record the end of a timed run of an object. The |birth| is the record for 411 // the instance, the |time_posted| records that instant, which is presumed to 412 // be when the task was posted into a queue to run on a worker thread. 413 // The |start_of_run| is when the worker thread started to perform the run of 414 // the task. 415 // The |end_of_run| was just obtained by a call to Now() (just after the task 416 // finished). 417 static void TallyRunOnWorkerThreadIfTracking( 418 const Births* birth, 419 const TrackedTime& time_posted, 420 const TaskStopwatch& stopwatch); 421 422 // Record the end of execution in region, generally corresponding to a scope 423 // being exited. 424 static void TallyRunInAScopedRegionIfTracking( 425 const Births* birth, 426 const TaskStopwatch& stopwatch); 427 thread_name()428 const std::string& thread_name() const { return thread_name_; } 429 430 // Hack: asynchronously clear all birth counts and death tallies data values 431 // in all ThreadData instances. The numerical (zeroing) part is done without 432 // use of a locks or atomics exchanges, and may (for int64 values) produce 433 // bogus counts VERY rarely. 434 static void ResetAllThreadData(); 435 436 // Initializes all statics if needed (this initialization call should be made 437 // while we are single threaded). Returns false if unable to initialize. 438 static bool Initialize(); 439 440 // Sets internal status_. 441 // If |status| is false, then status_ is set to DEACTIVATED. 442 // If |status| is true, then status_ is set to, PROFILING_ACTIVE, or 443 // PROFILING_CHILDREN_ACTIVE. 444 // If tracking is not compiled in, this function will return false. 445 // If parent-child tracking is not compiled in, then an attempt to set the 446 // status to PROFILING_CHILDREN_ACTIVE will only result in a status of 447 // PROFILING_ACTIVE (i.e., it can't be set to a higher level than what is 448 // compiled into the binary, and parent-child tracking at the 449 // PROFILING_CHILDREN_ACTIVE level might not be compiled in). 450 static bool InitializeAndSetTrackingStatus(Status status); 451 452 static Status status(); 453 454 // Indicate if any sort of profiling is being done (i.e., we are more than 455 // DEACTIVATED). 456 static bool TrackingStatus(); 457 458 // For testing only, indicate if the status of parent-child tracking is turned 459 // on. This is currently a compiled option, atop TrackingStatus(). 460 static bool TrackingParentChildStatus(); 461 462 // Marks a start of a tracked run. It's super fast when tracking is disabled, 463 // and has some internal side effects when we are tracking, so that we can 464 // deduce the amount of time accumulated outside of execution of tracked runs. 465 // The task that will be tracked is passed in as |parent| so that parent-child 466 // relationships can be (optionally) calculated. 467 static void PrepareForStartOfRun(const Births* parent); 468 469 // Provide a time function that does nothing (runs fast) when we don't have 470 // the profiler enabled. It will generally be optimized away when it is 471 // ifdef'ed to be small enough (allowing the profiler to be "compiled out" of 472 // the code). 473 static TrackedTime Now(); 474 475 // Use the function |now| to provide current times, instead of calling the 476 // TrackedTime::Now() function. Since this alternate function is being used, 477 // the other time arguments (used for calculating queueing delay) will be 478 // ignored. 479 static void SetAlternateTimeSource(NowFunction* now); 480 481 // This function can be called at process termination to validate that thread 482 // cleanup routines have been called for at least some number of named 483 // threads. 484 static void EnsureCleanupWasCalled(int major_threads_shutdown_count); 485 486 private: 487 friend class TaskStopwatch; 488 // Allow only tests to call ShutdownSingleThreadedCleanup. We NEVER call it 489 // in production code. 490 // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a 491 // better change of optimizing (inlining? etc.) private methods (knowing that 492 // there will be no need for an external entry point). 493 friend class TrackedObjectsTest; 494 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown); 495 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown); 496 FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, ParentChildTest); 497 498 typedef std::map<const BirthOnThread*, int> BirthCountMap; 499 500 // Worker thread construction creates a name since there is none. 501 explicit ThreadData(int thread_number); 502 503 // Message loop based construction should provide a name. 504 explicit ThreadData(const std::string& suggested_name); 505 506 ~ThreadData(); 507 508 // Push this instance to the head of all_thread_data_list_head_, linking it to 509 // the previous head. This is performed after each construction, and leaves 510 // the instance permanently on that list. 511 void PushToHeadOfList(); 512 513 // (Thread safe) Get start of list of all ThreadData instances using the lock. 514 static ThreadData* first(); 515 516 // Iterate through the null terminated list of ThreadData instances. 517 ThreadData* next() const; 518 519 520 // In this thread's data, record a new birth. 521 Births* TallyABirth(const Location& location); 522 523 // Find a place to record a death on this thread. 524 void TallyADeath(const Births& birth, 525 int32 queue_duration, 526 const TaskStopwatch& stopwatch); 527 528 // Snapshot (under a lock) the profiled data for the tasks in each ThreadData 529 // instance. Also updates the |birth_counts| tally for each task to keep 530 // track of the number of living instances of the task. If |reset_max| is 531 // true, then the max values in each DeathData instance are reset during the 532 // scan. 533 static void SnapshotAllExecutedTasks(bool reset_max, 534 ProcessDataSnapshot* process_data, 535 BirthCountMap* birth_counts); 536 537 // Snapshots (under a lock) the profiled data for the tasks for this thread 538 // and writes all of the executed tasks' data -- i.e. the data for the tasks 539 // with with entries in the death_map_ -- into |process_data|. Also updates 540 // the |birth_counts| tally for each task to keep track of the number of 541 // living instances of the task -- that is, each task maps to the number of 542 // births for the task that have not yet been balanced by a death. If 543 // |reset_max| is true, then the max values in each DeathData instance are 544 // reset during the scan. 545 void SnapshotExecutedTasks(bool reset_max, 546 ProcessDataSnapshot* process_data, 547 BirthCountMap* birth_counts); 548 549 // Using our lock, make a copy of the specified maps. This call may be made 550 // on non-local threads, which necessitate the use of the lock to prevent 551 // the map(s) from being reallocaed while they are copied. If |reset_max| is 552 // true, then, just after we copy the DeathMap, we will set the max values to 553 // zero in the active DeathMap (not the snapshot). 554 void SnapshotMaps(bool reset_max, 555 BirthMap* birth_map, 556 DeathMap* death_map, 557 ParentChildSet* parent_child_set); 558 559 // Using our lock to protect the iteration, Clear all birth and death data. 560 void Reset(); 561 562 // This method is called by the TLS system when a thread terminates. 563 // The argument may be NULL if this thread has never tracked a birth or death. 564 static void OnThreadTermination(void* thread_data); 565 566 // This method should be called when a worker thread terminates, so that we 567 // can save all the thread data into a cache of reusable ThreadData instances. 568 void OnThreadTerminationCleanup(); 569 570 // Cleans up data structures, and returns statics to near pristine (mostly 571 // uninitialized) state. If there is any chance that other threads are still 572 // using the data structures, then the |leak| argument should be passed in as 573 // true, and the data structures (birth maps, death maps, ThreadData 574 // insntances, etc.) will be leaked and not deleted. If you have joined all 575 // threads since the time that InitializeAndSetTrackingStatus() was called, 576 // then you can pass in a |leak| value of false, and this function will 577 // delete recursively all data structures, starting with the list of 578 // ThreadData instances. 579 static void ShutdownSingleThreadedCleanup(bool leak); 580 581 // When non-null, this specifies an external function that supplies monotone 582 // increasing time functcion. 583 static NowFunction* now_function_; 584 585 // If true, now_function_ returns values that can be used to calculate queue 586 // time. 587 static bool now_function_is_time_; 588 589 // We use thread local store to identify which ThreadData to interact with. 590 static base::ThreadLocalStorage::StaticSlot tls_index_; 591 592 // List of ThreadData instances for use with worker threads. When a worker 593 // thread is done (terminated), we push it onto this llist. When a new worker 594 // thread is created, we first try to re-use a ThreadData instance from the 595 // list, and if none are available, construct a new one. 596 // This is only accessed while list_lock_ is held. 597 static ThreadData* first_retired_worker_; 598 599 // Link to the most recently created instance (starts a null terminated list). 600 // The list is traversed by about:profiler when it needs to snapshot data. 601 // This is only accessed while list_lock_ is held. 602 static ThreadData* all_thread_data_list_head_; 603 604 // The next available worker thread number. This should only be accessed when 605 // the list_lock_ is held. 606 static int worker_thread_data_creation_count_; 607 608 // The number of times TLS has called us back to cleanup a ThreadData 609 // instance. This is only accessed while list_lock_ is held. 610 static int cleanup_count_; 611 612 // Incarnation sequence number, indicating how many times (during unittests) 613 // we've either transitioned out of UNINITIALIZED, or into that state. This 614 // value is only accessed while the list_lock_ is held. 615 static int incarnation_counter_; 616 617 // Protection for access to all_thread_data_list_head_, and to 618 // unregistered_thread_data_pool_. This lock is leaked at shutdown. 619 // The lock is very infrequently used, so we can afford to just make a lazy 620 // instance and be safe. 621 static base::LazyInstance<base::Lock>::Leaky list_lock_; 622 623 // We set status_ to SHUTDOWN when we shut down the tracking service. 624 static Status status_; 625 626 // Link to next instance (null terminated list). Used to globally track all 627 // registered instances (corresponds to all registered threads where we keep 628 // data). 629 ThreadData* next_; 630 631 // Pointer to another ThreadData instance for a Worker-Thread that has been 632 // retired (its thread was terminated). This value is non-NULL only for a 633 // retired ThreadData associated with a Worker-Thread. 634 ThreadData* next_retired_worker_; 635 636 // The name of the thread that is being recorded. If this thread has no 637 // message_loop, then this is a worker thread, with a sequence number postfix. 638 std::string thread_name_; 639 640 // Indicate if this is a worker thread, and the ThreadData contexts should be 641 // stored in the unregistered_thread_data_pool_ when not in use. 642 // Value is zero when it is not a worker thread. Value is a positive integer 643 // corresponding to the created thread name if it is a worker thread. 644 int worker_thread_number_; 645 646 // A map used on each thread to keep track of Births on this thread. 647 // This map should only be accessed on the thread it was constructed on. 648 // When a snapshot is needed, this structure can be locked in place for the 649 // duration of the snapshotting activity. 650 BirthMap birth_map_; 651 652 // Similar to birth_map_, this records informations about death of tracked 653 // instances (i.e., when a tracked instance was destroyed on this thread). 654 // It is locked before changing, and hence other threads may access it by 655 // locking before reading it. 656 DeathMap death_map_; 657 658 // A set of parents that created children tasks on this thread. Each pair 659 // corresponds to potentially non-local Births (location and thread), and a 660 // local Births (that took place on this thread). 661 ParentChildSet parent_child_set_; 662 663 // Lock to protect *some* access to BirthMap and DeathMap. The maps are 664 // regularly read and written on this thread, but may only be read from other 665 // threads. To support this, we acquire this lock if we are writing from this 666 // thread, or reading from another thread. For reading from this thread we 667 // don't need a lock, as there is no potential for a conflict since the 668 // writing is only done from this thread. 669 mutable base::Lock map_lock_; 670 671 // The stack of parents that are currently being profiled. This includes only 672 // tasks that have started a timer recently via PrepareForStartOfRun(), but 673 // not yet concluded with a NowForEndOfRun(). Usually this stack is one deep, 674 // but if a scoped region is profiled, or <sigh> a task runs a nested-message 675 // loop, then the stack can grow larger. Note that we don't try to deduct 676 // time in nested porfiles, as our current timer is based on wall-clock time, 677 // and not CPU time (and we're hopeful that nested timing won't be a 678 // significant additional cost). 679 ParentStack parent_stack_; 680 681 // A random number that we used to select decide which sample to keep as a 682 // representative sample in each DeathData instance. We can't start off with 683 // much randomness (because we can't call RandInt() on all our threads), so 684 // we stir in more and more as we go. 685 int32 random_number_; 686 687 // Record of what the incarnation_counter_ was when this instance was created. 688 // If the incarnation_counter_ has changed, then we avoid pushing into the 689 // pool (this is only critical in tests which go through multiple 690 // incarnations). 691 int incarnation_count_for_pool_; 692 693 // Most recently started (i.e. most nested) stopwatch on the current thread, 694 // if it exists; NULL otherwise. 695 TaskStopwatch* current_stopwatch_; 696 697 DISALLOW_COPY_AND_ASSIGN(ThreadData); 698 }; 699 700 //------------------------------------------------------------------------------ 701 // Stopwatch to measure task run time or simply create a time interval that will 702 // be subtracted from the current most nested task's run time. Stopwatches 703 // coordinate with the stopwatches in which they are nested to avoid 704 // double-counting nested tasks run times. 705 706 class BASE_EXPORT TaskStopwatch { 707 public: 708 // Starts the stopwatch. 709 TaskStopwatch(); 710 ~TaskStopwatch(); 711 712 // Stops stopwatch. 713 void Stop(); 714 715 // Returns the start time. 716 TrackedTime StartTime() const; 717 718 // Task's duration is calculated as the wallclock duration between starting 719 // and stopping this stopwatch, minus the wallclock durations of any other 720 // instances that are immediately nested in this one, started and stopped on 721 // this thread during that period. 722 int32 RunDurationMs() const; 723 724 // Returns tracking info for the current thread. 725 ThreadData* GetThreadData() const; 726 727 private: 728 // Time when the stopwatch was started. 729 TrackedTime start_time_; 730 731 // Wallclock duration of the task. 732 int32 wallclock_duration_ms_; 733 734 // Tracking info for the current thread. 735 ThreadData* current_thread_data_; 736 737 // Sum of wallclock durations of all stopwatches that were directly nested in 738 // this one. 739 int32 excluded_duration_ms_; 740 741 // Stopwatch which was running on our thread when this stopwatch was started. 742 // That preexisting stopwatch must be adjusted to the exclude the wallclock 743 // duration of this stopwatch. 744 TaskStopwatch* parent_; 745 746 #if DCHECK_IS_ON 747 // State of the stopwatch. Stopwatch is first constructed in a running state, 748 // then stopped, then destructed. 749 enum { 750 RUNNING, 751 STOPPED 752 } state_; 753 754 // Currently running stopwatch that is directly nested in this one, if such 755 // stopwatch exists. NULL otherwise. 756 TaskStopwatch* child_; 757 #endif 758 }; 759 760 //------------------------------------------------------------------------------ 761 // A snapshotted representation of a (parent, child) task pair, for tracking 762 // hierarchical profiles. 763 764 struct BASE_EXPORT ParentChildPairSnapshot { 765 public: 766 ParentChildPairSnapshot(); 767 explicit ParentChildPairSnapshot( 768 const ThreadData::ParentChildPair& parent_child); 769 ~ParentChildPairSnapshot(); 770 771 BirthOnThreadSnapshot parent; 772 BirthOnThreadSnapshot child; 773 }; 774 775 //------------------------------------------------------------------------------ 776 // A snapshotted representation of the list of ThreadData objects for a process. 777 778 struct BASE_EXPORT ProcessDataSnapshot { 779 public: 780 ProcessDataSnapshot(); 781 ~ProcessDataSnapshot(); 782 783 std::vector<TaskSnapshot> tasks; 784 std::vector<ParentChildPairSnapshot> descendants; 785 int process_id; 786 }; 787 788 } // namespace tracked_objects 789 790 #endif // BASE_TRACKED_OBJECTS_H_ 791