• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/debug/activity_tracker.h"
6 
7 #include <algorithm>
8 #include <limits>
9 #include <utility>
10 
11 #include "base/atomic_sequence_num.h"
12 #include "base/debug/stack_trace.h"
13 #include "base/files/file.h"
14 #include "base/files/file_path.h"
15 #include "base/files/memory_mapped_file.h"
16 #include "base/logging.h"
17 #include "base/memory/ptr_util.h"
18 #include "base/metrics/field_trial.h"
19 #include "base/metrics/histogram_macros.h"
20 #include "base/pending_task.h"
21 #include "base/pickle.h"
22 #include "base/process/process.h"
23 #include "base/process/process_handle.h"
24 #include "base/stl_util.h"
25 #include "base/strings/string_util.h"
26 #include "base/strings/utf_string_conversions.h"
27 #include "base/threading/platform_thread.h"
28 #include "build/build_config.h"
29 
30 namespace base {
31 namespace debug {
32 
33 namespace {
34 
35 // The minimum depth a stack should support.
36 const int kMinStackDepth = 2;
37 
38 // The amount of memory set aside for holding arbitrary user data (key/value
39 // pairs) globally or associated with ActivityData entries.
40 const size_t kUserDataSize = 1 << 10;     // 1 KiB
41 const size_t kProcessDataSize = 4 << 10;  // 4 KiB
42 const size_t kMaxUserDataNameLength =
43     static_cast<size_t>(std::numeric_limits<uint8_t>::max());
44 
45 // A constant used to indicate that module information is changing.
46 const uint32_t kModuleInformationChanging = 0x80000000;
47 
48 // The key used to record process information.
49 const char kProcessPhaseDataKey[] = "process-phase";
50 
51 // An atomically incrementing number, used to check for recreations of objects
52 // in the same memory space.
53 AtomicSequenceNumber g_next_id;
54 
55 // Gets the next non-zero identifier. It is only unique within a process.
GetNextDataId()56 uint32_t GetNextDataId() {
57   uint32_t id;
58   while ((id = g_next_id.GetNext()) == 0)
59     ;
60   return id;
61 }
62 
63 // Gets the current process-id, either from the GlobalActivityTracker if it
64 // exists (where the PID can be defined for testing) or from the system if
65 // there isn't such.
GetProcessId()66 int64_t GetProcessId() {
67   GlobalActivityTracker* global = GlobalActivityTracker::Get();
68   if (global)
69     return global->process_id();
70   return GetCurrentProcId();
71 }
72 
73 // Finds and reuses a specific allocation or creates a new one.
AllocateFrom(PersistentMemoryAllocator * allocator,uint32_t from_type,size_t size,uint32_t to_type)74 PersistentMemoryAllocator::Reference AllocateFrom(
75     PersistentMemoryAllocator* allocator,
76     uint32_t from_type,
77     size_t size,
78     uint32_t to_type) {
79   PersistentMemoryAllocator::Iterator iter(allocator);
80   PersistentMemoryAllocator::Reference ref;
81   while ((ref = iter.GetNextOfType(from_type)) != 0) {
82     DCHECK_LE(size, allocator->GetAllocSize(ref));
83     // This can fail if a another thread has just taken it. It is assumed that
84     // the memory is cleared during the "free" operation.
85     if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
86       return ref;
87   }
88 
89   return allocator->Allocate(size, to_type);
90 }
91 
92 // Determines the previous aligned index.
RoundDownToAlignment(size_t index,size_t alignment)93 size_t RoundDownToAlignment(size_t index, size_t alignment) {
94   return index & (0 - alignment);
95 }
96 
97 // Determines the next aligned index.
RoundUpToAlignment(size_t index,size_t alignment)98 size_t RoundUpToAlignment(size_t index, size_t alignment) {
99   return (index + (alignment - 1)) & (0 - alignment);
100 }
101 
102 // Converts "tick" timing into wall time.
WallTimeFromTickTime(int64_t ticks_start,int64_t ticks,Time time_start)103 Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
104   return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
105 }
106 
107 }  // namespace
108 
109 union ThreadRef {
110   int64_t as_id;
111 #if defined(OS_WIN)
112   // On Windows, the handle itself is often a pseudo-handle with a common
113   // value meaning "this thread" and so the thread-id is used. The former
114   // can be converted to a thread-id with a system call.
115   PlatformThreadId as_tid;
116 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
117   // On Posix and Fuchsia, the handle is always a unique identifier so no
118   // conversion needs to be done. However, its value is officially opaque so
119   // there is no one correct way to convert it to a numerical identifier.
120   PlatformThreadHandle::Handle as_handle;
121 #endif
122 };
123 
124 OwningProcess::OwningProcess() = default;
125 OwningProcess::~OwningProcess() = default;
126 
Release_Initialize(int64_t pid)127 void OwningProcess::Release_Initialize(int64_t pid) {
128   uint32_t old_id = data_id.load(std::memory_order_acquire);
129   DCHECK_EQ(0U, old_id);
130   process_id = pid != 0 ? pid : GetProcessId();
131   create_stamp = Time::Now().ToInternalValue();
132   data_id.store(GetNextDataId(), std::memory_order_release);
133 }
134 
SetOwningProcessIdForTesting(int64_t pid,int64_t stamp)135 void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
136   DCHECK_NE(0U, data_id);
137   process_id = pid;
138   create_stamp = stamp;
139 }
140 
141 // static
GetOwningProcessId(const void * memory,int64_t * out_id,int64_t * out_stamp)142 bool OwningProcess::GetOwningProcessId(const void* memory,
143                                        int64_t* out_id,
144                                        int64_t* out_stamp) {
145   const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
146   uint32_t id = info->data_id.load(std::memory_order_acquire);
147   if (id == 0)
148     return false;
149 
150   *out_id = info->process_id;
151   *out_stamp = info->create_stamp;
152   return id == info->data_id.load(std::memory_order_seq_cst);
153 }
154 
155 // It doesn't matter what is contained in this (though it will be all zeros)
156 // as only the address of it is important.
157 const ActivityData kNullActivityData = {};
158 
ForThread(const PlatformThreadHandle & handle)159 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
160   ThreadRef thread_ref;
161   thread_ref.as_id = 0;  // Zero the union in case other is smaller.
162 #if defined(OS_WIN)
163   thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
164 #elif defined(OS_POSIX)
165   thread_ref.as_handle = handle.platform_handle();
166 #endif
167   return ForThread(thread_ref.as_id);
168 }
169 
ActivityTrackerMemoryAllocator(PersistentMemoryAllocator * allocator,uint32_t object_type,uint32_t object_free_type,size_t object_size,size_t cache_size,bool make_iterable)170 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
171     PersistentMemoryAllocator* allocator,
172     uint32_t object_type,
173     uint32_t object_free_type,
174     size_t object_size,
175     size_t cache_size,
176     bool make_iterable)
177     : allocator_(allocator),
178       object_type_(object_type),
179       object_free_type_(object_free_type),
180       object_size_(object_size),
181       cache_size_(cache_size),
182       make_iterable_(make_iterable),
183       iterator_(allocator),
184       cache_values_(new Reference[cache_size]),
185       cache_used_(0) {
186   DCHECK(allocator);
187 }
188 
189 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() = default;
190 
191 ActivityTrackerMemoryAllocator::Reference
GetObjectReference()192 ActivityTrackerMemoryAllocator::GetObjectReference() {
193   // First see if there is a cached value that can be returned. This is much
194   // faster than searching the memory system for free blocks.
195   while (cache_used_ > 0) {
196     Reference cached = cache_values_[--cache_used_];
197     // Change the type of the cached object to the proper type and return it.
198     // If the type-change fails that means another thread has taken this from
199     // under us (via the search below) so ignore it and keep trying. Don't
200     // clear the memory because that was done when the type was made "free".
201     if (allocator_->ChangeType(cached, object_type_, object_free_type_, false))
202       return cached;
203   }
204 
205   // Fetch the next "free" object from persistent memory. Rather than restart
206   // the iterator at the head each time and likely waste time going again
207   // through objects that aren't relevant, the iterator continues from where
208   // it last left off and is only reset when the end is reached. If the
209   // returned reference matches |last|, then it has wrapped without finding
210   // anything.
211   const Reference last = iterator_.GetLast();
212   while (true) {
213     uint32_t type;
214     Reference found = iterator_.GetNext(&type);
215     if (found && type == object_free_type_) {
216       // Found a free object. Change it to the proper type and return it. If
217       // the type-change fails that means another thread has taken this from
218       // under us so ignore it and keep trying.
219       if (allocator_->ChangeType(found, object_type_, object_free_type_, false))
220         return found;
221     }
222     if (found == last) {
223       // Wrapped. No desired object was found.
224       break;
225     }
226     if (!found) {
227       // Reached end; start over at the beginning.
228       iterator_.Reset();
229     }
230   }
231 
232   // No free block was found so instead allocate a new one.
233   Reference allocated = allocator_->Allocate(object_size_, object_type_);
234   if (allocated && make_iterable_)
235     allocator_->MakeIterable(allocated);
236   return allocated;
237 }
238 
ReleaseObjectReference(Reference ref)239 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
240   // Mark object as free.
241   bool success = allocator_->ChangeType(ref, object_free_type_, object_type_,
242                                         /*clear=*/true);
243   DCHECK(success);
244 
245   // Add this reference to our "free" cache if there is space. If not, the type
246   // has still been changed to indicate that it is free so this (or another)
247   // thread can find it, albeit more slowly, using the iteration method above.
248   if (cache_used_ < cache_size_)
249     cache_values_[cache_used_++] = ref;
250 }
251 
252 // static
FillFrom(Activity * activity,const void * program_counter,const void * origin,Type type,const ActivityData & data)253 void Activity::FillFrom(Activity* activity,
254                         const void* program_counter,
255                         const void* origin,
256                         Type type,
257                         const ActivityData& data) {
258   activity->time_internal = base::TimeTicks::Now().ToInternalValue();
259   activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
260   activity->origin_address = reinterpret_cast<uintptr_t>(origin);
261   activity->activity_type = type;
262   activity->data = data;
263 
264 #if (!defined(OS_NACL) && DCHECK_IS_ON()) || defined(ADDRESS_SANITIZER)
265   // Create a stacktrace from the current location and get the addresses for
266   // improved debuggability.
267   StackTrace stack_trace;
268   size_t stack_depth;
269   const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
270   // Copy the stack addresses, ignoring the first one (here).
271   size_t i;
272   for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
273     activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
274   }
275   activity->call_stack[i - 1] = 0;
276 #else
277   activity->call_stack[0] = 0;
278 #endif
279 }
280 
281 ActivityUserData::TypedValue::TypedValue() = default;
282 ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
283 ActivityUserData::TypedValue::~TypedValue() = default;
284 
Get() const285 StringPiece ActivityUserData::TypedValue::Get() const {
286   DCHECK_EQ(RAW_VALUE, type_);
287   return long_value_;
288 }
289 
GetString() const290 StringPiece ActivityUserData::TypedValue::GetString() const {
291   DCHECK_EQ(STRING_VALUE, type_);
292   return long_value_;
293 }
294 
GetBool() const295 bool ActivityUserData::TypedValue::GetBool() const {
296   DCHECK_EQ(BOOL_VALUE, type_);
297   return short_value_ != 0;
298 }
299 
GetChar() const300 char ActivityUserData::TypedValue::GetChar() const {
301   DCHECK_EQ(CHAR_VALUE, type_);
302   return static_cast<char>(short_value_);
303 }
304 
GetInt() const305 int64_t ActivityUserData::TypedValue::GetInt() const {
306   DCHECK_EQ(SIGNED_VALUE, type_);
307   return static_cast<int64_t>(short_value_);
308 }
309 
GetUint() const310 uint64_t ActivityUserData::TypedValue::GetUint() const {
311   DCHECK_EQ(UNSIGNED_VALUE, type_);
312   return static_cast<uint64_t>(short_value_);
313 }
314 
GetReference() const315 StringPiece ActivityUserData::TypedValue::GetReference() const {
316   DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
317   return ref_value_;
318 }
319 
GetStringReference() const320 StringPiece ActivityUserData::TypedValue::GetStringReference() const {
321   DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
322   return ref_value_;
323 }
324 
325 // These are required because std::atomic is (currently) not a POD type and
326 // thus clang requires explicit out-of-line constructors and destructors even
327 // when they do nothing.
328 ActivityUserData::ValueInfo::ValueInfo() = default;
329 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
330 ActivityUserData::ValueInfo::~ValueInfo() = default;
331 ActivityUserData::MemoryHeader::MemoryHeader() = default;
332 ActivityUserData::MemoryHeader::~MemoryHeader() = default;
333 ActivityUserData::FieldHeader::FieldHeader() = default;
334 ActivityUserData::FieldHeader::~FieldHeader() = default;
335 
ActivityUserData()336 ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
337 
ActivityUserData(void * memory,size_t size,int64_t pid)338 ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
339     : memory_(reinterpret_cast<char*>(memory)),
340       available_(RoundDownToAlignment(size, kMemoryAlignment)),
341       header_(reinterpret_cast<MemoryHeader*>(memory)),
342       orig_data_id(0),
343       orig_process_id(0),
344       orig_create_stamp(0) {
345   // It's possible that no user data is being stored.
346   if (!memory_)
347     return;
348 
349   static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
350   DCHECK_LT(sizeof(MemoryHeader), available_);
351   if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
352     header_->owner.Release_Initialize(pid);
353   memory_ += sizeof(MemoryHeader);
354   available_ -= sizeof(MemoryHeader);
355 
356   // Make a copy of identifying information for later comparison.
357   *const_cast<uint32_t*>(&orig_data_id) =
358       header_->owner.data_id.load(std::memory_order_acquire);
359   *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
360   *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
361 
362   // If there is already data present, load that. This allows the same class
363   // to be used for analysis through snapshots.
364   ImportExistingData();
365 }
366 
367 ActivityUserData::~ActivityUserData() = default;
368 
CreateSnapshot(Snapshot * output_snapshot) const369 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
370   DCHECK(output_snapshot);
371   DCHECK(output_snapshot->empty());
372 
373   // Find any new data that may have been added by an active instance of this
374   // class that is adding records.
375   ImportExistingData();
376 
377   // Add all the values to the snapshot.
378   for (const auto& entry : values_) {
379     TypedValue value;
380     const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
381     value.type_ = entry.second.type;
382     DCHECK_GE(entry.second.extent, size);
383 
384     switch (entry.second.type) {
385       case RAW_VALUE:
386       case STRING_VALUE:
387         value.long_value_ =
388             std::string(reinterpret_cast<char*>(entry.second.memory), size);
389         break;
390       case RAW_VALUE_REFERENCE:
391       case STRING_VALUE_REFERENCE: {
392         ReferenceRecord* ref =
393             reinterpret_cast<ReferenceRecord*>(entry.second.memory);
394         value.ref_value_ = StringPiece(
395             reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
396             static_cast<size_t>(ref->size));
397       } break;
398       case BOOL_VALUE:
399       case CHAR_VALUE:
400         value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
401         break;
402       case SIGNED_VALUE:
403       case UNSIGNED_VALUE:
404         value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
405         break;
406       case END_OF_VALUES:  // Included for completeness purposes.
407         NOTREACHED();
408     }
409     auto inserted = output_snapshot->insert(
410         std::make_pair(entry.second.name.as_string(), std::move(value)));
411     DCHECK(inserted.second);  // True if inserted, false if existed.
412   }
413 
414   // Another import attempt will validate that the underlying memory has not
415   // been reused for another purpose. Entries added since the first import
416   // will be ignored here but will be returned if another snapshot is created.
417   ImportExistingData();
418   if (!memory_) {
419     output_snapshot->clear();
420     return false;
421   }
422 
423   // Successful snapshot.
424   return true;
425 }
426 
GetBaseAddress() const427 const void* ActivityUserData::GetBaseAddress() const {
428   // The |memory_| pointer advances as elements are written but the |header_|
429   // value is always at the start of the block so just return that.
430   return header_;
431 }
432 
SetOwningProcessIdForTesting(int64_t pid,int64_t stamp)433 void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
434                                                     int64_t stamp) {
435   if (!header_)
436     return;
437   header_->owner.SetOwningProcessIdForTesting(pid, stamp);
438 }
439 
440 // static
GetOwningProcessId(const void * memory,int64_t * out_id,int64_t * out_stamp)441 bool ActivityUserData::GetOwningProcessId(const void* memory,
442                                           int64_t* out_id,
443                                           int64_t* out_stamp) {
444   const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
445   return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
446 }
447 
Set(StringPiece name,ValueType type,const void * memory,size_t size)448 void ActivityUserData::Set(StringPiece name,
449                            ValueType type,
450                            const void* memory,
451                            size_t size) {
452   DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
453   size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
454                   size);
455 
456   // It's possible that no user data is being stored.
457   if (!memory_)
458     return;
459 
460   // The storage of a name is limited so use that limit during lookup.
461   if (name.length() > kMaxUserDataNameLength)
462     name.set(name.data(), kMaxUserDataNameLength);
463 
464   ValueInfo* info;
465   auto existing = values_.find(name);
466   if (existing != values_.end()) {
467     info = &existing->second;
468   } else {
469     // The name size is limited to what can be held in a single byte but
470     // because there are not alignment constraints on strings, it's set tight
471     // against the header. Its extent (the reserved space, even if it's not
472     // all used) is calculated so that, when pressed against the header, the
473     // following field will be aligned properly.
474     size_t name_size = name.length();
475     size_t name_extent =
476         RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
477         sizeof(FieldHeader);
478     size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
479 
480     // The "base size" is the size of the header and (padded) string key. Stop
481     // now if there's not room enough for even this.
482     size_t base_size = sizeof(FieldHeader) + name_extent;
483     if (base_size > available_)
484       return;
485 
486     // The "full size" is the size for storing the entire value.
487     size_t full_size = std::min(base_size + value_extent, available_);
488 
489     // If the value is actually a single byte, see if it can be stuffed at the
490     // end of the name extent rather than wasting kMemoryAlignment bytes.
491     if (size == 1 && name_extent > name_size) {
492       full_size = base_size;
493       --name_extent;
494       --base_size;
495     }
496 
497     // Truncate the stored size to the amount of available memory. Stop now if
498     // there's not any room for even part of the value.
499     if (size != 0) {
500       size = std::min(full_size - base_size, size);
501       if (size == 0)
502         return;
503     }
504 
505     // Allocate a chunk of memory.
506     FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
507     memory_ += full_size;
508     available_ -= full_size;
509 
510     // Datafill the header and name records. Memory must be zeroed. The |type|
511     // is written last, atomically, to release all the other values.
512     DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
513     DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
514     header->name_size = static_cast<uint8_t>(name_size);
515     header->record_size = full_size;
516     char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
517     void* value_memory =
518         reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
519     memcpy(name_memory, name.data(), name_size);
520     header->type.store(type, std::memory_order_release);
521 
522     // Create an entry in |values_| so that this field can be found and changed
523     // later on without having to allocate new entries.
524     StringPiece persistent_name(name_memory, name_size);
525     auto inserted =
526         values_.insert(std::make_pair(persistent_name, ValueInfo()));
527     DCHECK(inserted.second);  // True if inserted, false if existed.
528     info = &inserted.first->second;
529     info->name = persistent_name;
530     info->memory = value_memory;
531     info->size_ptr = &header->value_size;
532     info->extent = full_size - sizeof(FieldHeader) - name_extent;
533     info->type = type;
534   }
535 
536   // Copy the value data to storage. The |size| is written last, atomically, to
537   // release the copied data. Until then, a parallel reader will just ignore
538   // records with a zero size.
539   DCHECK_EQ(type, info->type);
540   size = std::min(size, info->extent);
541   info->size_ptr->store(0, std::memory_order_seq_cst);
542   memcpy(info->memory, memory, size);
543   info->size_ptr->store(size, std::memory_order_release);
544 }
545 
SetReference(StringPiece name,ValueType type,const void * memory,size_t size)546 void ActivityUserData::SetReference(StringPiece name,
547                                     ValueType type,
548                                     const void* memory,
549                                     size_t size) {
550   ReferenceRecord rec;
551   rec.address = reinterpret_cast<uintptr_t>(memory);
552   rec.size = size;
553   Set(name, type, &rec, sizeof(rec));
554 }
555 
ImportExistingData() const556 void ActivityUserData::ImportExistingData() const {
557   // It's possible that no user data is being stored.
558   if (!memory_)
559     return;
560 
561   while (available_ > sizeof(FieldHeader)) {
562     FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
563     ValueType type =
564         static_cast<ValueType>(header->type.load(std::memory_order_acquire));
565     if (type == END_OF_VALUES)
566       return;
567     if (header->record_size > available_)
568       return;
569 
570     size_t value_offset = RoundUpToAlignment(
571         sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
572     if (header->record_size == value_offset &&
573         header->value_size.load(std::memory_order_relaxed) == 1) {
574       value_offset -= 1;
575     }
576     if (value_offset + header->value_size > header->record_size)
577       return;
578 
579     ValueInfo info;
580     info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
581     info.type = type;
582     info.memory = memory_ + value_offset;
583     info.size_ptr = &header->value_size;
584     info.extent = header->record_size - value_offset;
585 
586     StringPiece key(info.name);
587     values_.insert(std::make_pair(key, std::move(info)));
588 
589     memory_ += header->record_size;
590     available_ -= header->record_size;
591   }
592 
593   // Check if memory has been completely reused.
594   if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
595       header_->owner.process_id != orig_process_id ||
596       header_->owner.create_stamp != orig_create_stamp) {
597     memory_ = nullptr;
598     values_.clear();
599   }
600 }
601 
602 // This information is kept for every thread that is tracked. It is filled
603 // the very first time the thread is seen. All fields must be of exact sizes
604 // so there is no issue moving between 32 and 64-bit builds.
605 struct ThreadActivityTracker::Header {
606   // Defined in .h for analyzer access. Increment this if structure changes!
607   static constexpr uint32_t kPersistentTypeId =
608       GlobalActivityTracker::kTypeIdActivityTracker;
609 
610   // Expected size for 32/64-bit check.
611   static constexpr size_t kExpectedInstanceSize =
612       OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
613       72;
614 
615   // This information uniquely identifies a process.
616   OwningProcess owner;
617 
618   // The thread-id (thread_ref.as_id) to which this data belongs. This number
619   // is not guaranteed to mean anything but combined with the process-id from
620   // OwningProcess is unique among all active trackers.
621   ThreadRef thread_ref;
622 
623   // The start-time and start-ticks when the data was created. Each activity
624   // record has a |time_internal| value that can be converted to a "wall time"
625   // with these two values.
626   int64_t start_time;
627   int64_t start_ticks;
628 
629   // The number of Activity slots (spaces that can hold an Activity) that
630   // immediately follow this structure in memory.
631   uint32_t stack_slots;
632 
633   // Some padding to keep everything 64-bit aligned.
634   uint32_t padding;
635 
636   // The current depth of the stack. This may be greater than the number of
637   // slots. If the depth exceeds the number of slots, the newest entries
638   // won't be recorded.
639   std::atomic<uint32_t> current_depth;
640 
641   // A memory location used to indicate if changes have been made to the data
642   // that would invalidate an in-progress read of its contents. The active
643   // tracker will increment the value whenever something gets popped from the
644   // stack. A monitoring tracker can check the value before and after access
645   // to know, if it's still the same, that the contents didn't change while
646   // being copied.
647   std::atomic<uint32_t> data_version;
648 
649   // The last "exception" activity. This can't be stored on the stack because
650   // that could get popped as things unwind.
651   Activity last_exception;
652 
653   // The name of the thread (up to a maximum length). Dynamic-length names
654   // are not practical since the memory has to come from the same persistent
655   // allocator that holds this structure and to which this object has no
656   // reference.
657   char thread_name[32];
658 };
659 
660 ThreadActivityTracker::Snapshot::Snapshot() = default;
661 ThreadActivityTracker::Snapshot::~Snapshot() = default;
662 
ScopedActivity(ThreadActivityTracker * tracker,const void * program_counter,const void * origin,Activity::Type type,const ActivityData & data)663 ThreadActivityTracker::ScopedActivity::ScopedActivity(
664     ThreadActivityTracker* tracker,
665     const void* program_counter,
666     const void* origin,
667     Activity::Type type,
668     const ActivityData& data)
669     : tracker_(tracker) {
670   if (tracker_)
671     activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
672 }
673 
~ScopedActivity()674 ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
675   if (tracker_)
676     tracker_->PopActivity(activity_id_);
677 }
678 
ChangeTypeAndData(Activity::Type type,const ActivityData & data)679 void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
680     Activity::Type type,
681     const ActivityData& data) {
682   if (tracker_)
683     tracker_->ChangeActivity(activity_id_, type, data);
684 }
685 
ThreadActivityTracker(void * base,size_t size)686 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
687     : header_(static_cast<Header*>(base)),
688       stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
689                                          sizeof(Header))),
690 #if DCHECK_IS_ON()
691       thread_id_(PlatformThreadRef()),
692 #endif
693       stack_slots_(
694           static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
695 
696   // Verify the parameters but fail gracefully if they're not valid so that
697   // production code based on external inputs will not crash.  IsValid() will
698   // return false in this case.
699   if (!base ||
700       // Ensure there is enough space for the header and at least a few records.
701       size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
702       // Ensure that the |stack_slots_| calculation didn't overflow.
703       (size - sizeof(Header)) / sizeof(Activity) >
704           std::numeric_limits<uint32_t>::max()) {
705     NOTREACHED();
706     return;
707   }
708 
709   // Ensure that the thread reference doesn't exceed the size of the ID number.
710   // This won't compile at the global scope because Header is a private struct.
711   static_assert(
712       sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
713       "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
714 
715   // Ensure that the alignment of Activity.data is properly aligned to a
716   // 64-bit boundary so there are no interoperability-issues across cpu
717   // architectures.
718   static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
719                 "ActivityData.data is not 64-bit aligned");
720 
721   // Provided memory should either be completely initialized or all zeros.
722   if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
723     // This is a new file. Double-check other fields and then initialize.
724     DCHECK_EQ(0, header_->owner.process_id);
725     DCHECK_EQ(0, header_->owner.create_stamp);
726     DCHECK_EQ(0, header_->thread_ref.as_id);
727     DCHECK_EQ(0, header_->start_time);
728     DCHECK_EQ(0, header_->start_ticks);
729     DCHECK_EQ(0U, header_->stack_slots);
730     DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
731     DCHECK_EQ(0U, header_->data_version.load(std::memory_order_relaxed));
732     DCHECK_EQ(0, stack_[0].time_internal);
733     DCHECK_EQ(0U, stack_[0].origin_address);
734     DCHECK_EQ(0U, stack_[0].call_stack[0]);
735     DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
736 
737 #if defined(OS_WIN)
738     header_->thread_ref.as_tid = PlatformThread::CurrentId();
739 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
740     header_->thread_ref.as_handle =
741         PlatformThread::CurrentHandle().platform_handle();
742 #endif
743 
744     header_->start_time = base::Time::Now().ToInternalValue();
745     header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
746     header_->stack_slots = stack_slots_;
747     strlcpy(header_->thread_name, PlatformThread::GetName(),
748             sizeof(header_->thread_name));
749 
750     // This is done last so as to guarantee that everything above is "released"
751     // by the time this value gets written.
752     header_->owner.Release_Initialize();
753 
754     valid_ = true;
755     DCHECK(IsValid());
756   } else {
757     // This is a file with existing data. Perform basic consistency checks.
758     valid_ = true;
759     valid_ = IsValid();
760   }
761 }
762 
763 ThreadActivityTracker::~ThreadActivityTracker() = default;
764 
PushActivity(const void * program_counter,const void * origin,Activity::Type type,const ActivityData & data)765 ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
766     const void* program_counter,
767     const void* origin,
768     Activity::Type type,
769     const ActivityData& data) {
770   // A thread-checker creates a lock to check the thread-id which means
771   // re-entry into this code if lock acquisitions are being tracked.
772   DCHECK(type == Activity::ACT_LOCK_ACQUIRE || CalledOnValidThread());
773 
774   // Get the current depth of the stack. No access to other memory guarded
775   // by this variable is done here so a "relaxed" load is acceptable.
776   uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
777 
778   // Handle the case where the stack depth has exceeded the storage capacity.
779   // Extra entries will be lost leaving only the base of the stack.
780   if (depth >= stack_slots_) {
781     // Since no other threads modify the data, no compare/exchange is needed.
782     // Since no other memory is being modified, a "relaxed" store is acceptable.
783     header_->current_depth.store(depth + 1, std::memory_order_relaxed);
784     return depth;
785   }
786 
787   // Get a pointer to the next activity and load it. No atomicity is required
788   // here because the memory is known only to this thread. It will be made
789   // known to other threads once the depth is incremented.
790   Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
791 
792   // Save the incremented depth. Because this guards |activity| memory filled
793   // above that may be read by another thread once the recorded depth changes,
794   // a "release" store is required.
795   header_->current_depth.store(depth + 1, std::memory_order_release);
796 
797   // The current depth is used as the activity ID because it simply identifies
798   // an entry. Once an entry is pop'd, it's okay to reuse the ID.
799   return depth;
800 }
801 
ChangeActivity(ActivityId id,Activity::Type type,const ActivityData & data)802 void ThreadActivityTracker::ChangeActivity(ActivityId id,
803                                            Activity::Type type,
804                                            const ActivityData& data) {
805   DCHECK(CalledOnValidThread());
806   DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
807   DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
808 
809   // Update the information if it is being recorded (i.e. within slot limit).
810   if (id < stack_slots_) {
811     Activity* activity = &stack_[id];
812 
813     if (type != Activity::ACT_NULL) {
814       DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
815                 type & Activity::ACT_CATEGORY_MASK);
816       activity->activity_type = type;
817     }
818 
819     if (&data != &kNullActivityData)
820       activity->data = data;
821   }
822 }
823 
PopActivity(ActivityId id)824 void ThreadActivityTracker::PopActivity(ActivityId id) {
825   // Do an atomic decrement of the depth. No changes to stack entries guarded
826   // by this variable are done here so a "relaxed" operation is acceptable.
827   // |depth| will receive the value BEFORE it was modified which means the
828   // return value must also be decremented. The slot will be "free" after
829   // this call but since only a single thread can access this object, the
830   // data will remain valid until this method returns or calls outside.
831   uint32_t depth =
832       header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
833 
834   // Validate that everything is running correctly.
835   DCHECK_EQ(id, depth);
836 
837   // A thread-checker creates a lock to check the thread-id which means
838   // re-entry into this code if lock acquisitions are being tracked.
839   DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
840          CalledOnValidThread());
841 
842   // The stack has shrunk meaning that some other thread trying to copy the
843   // contents for reporting purposes could get bad data. Increment the data
844   // version so that it con tell that things have changed. This needs to
845   // happen after the atomic |depth| operation above so a "release" store
846   // is required.
847   header_->data_version.fetch_add(1, std::memory_order_release);
848 }
849 
GetUserData(ActivityId id,ActivityTrackerMemoryAllocator * allocator)850 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
851     ActivityId id,
852     ActivityTrackerMemoryAllocator* allocator) {
853   // Don't allow user data for lock acquisition as recursion may occur.
854   if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
855     NOTREACHED();
856     return std::make_unique<ActivityUserData>();
857   }
858 
859   // User-data is only stored for activities actually held in the stack.
860   if (id >= stack_slots_)
861     return std::make_unique<ActivityUserData>();
862 
863   // Create and return a real UserData object.
864   return CreateUserDataForActivity(&stack_[id], allocator);
865 }
866 
HasUserData(ActivityId id)867 bool ThreadActivityTracker::HasUserData(ActivityId id) {
868   // User-data is only stored for activities actually held in the stack.
869   return (id < stack_slots_ && stack_[id].user_data_ref);
870 }
871 
ReleaseUserData(ActivityId id,ActivityTrackerMemoryAllocator * allocator)872 void ThreadActivityTracker::ReleaseUserData(
873     ActivityId id,
874     ActivityTrackerMemoryAllocator* allocator) {
875   // User-data is only stored for activities actually held in the stack.
876   if (id < stack_slots_ && stack_[id].user_data_ref) {
877     allocator->ReleaseObjectReference(stack_[id].user_data_ref);
878     stack_[id].user_data_ref = 0;
879   }
880 }
881 
RecordExceptionActivity(const void * program_counter,const void * origin,Activity::Type type,const ActivityData & data)882 void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
883                                                     const void* origin,
884                                                     Activity::Type type,
885                                                     const ActivityData& data) {
886   // A thread-checker creates a lock to check the thread-id which means
887   // re-entry into this code if lock acquisitions are being tracked.
888   DCHECK(CalledOnValidThread());
889 
890   // Fill the reusable exception activity.
891   Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
892                      data);
893 
894   // The data has changed meaning that some other thread trying to copy the
895   // contents for reporting purposes could get bad data.
896   header_->data_version.fetch_add(1, std::memory_order_relaxed);
897 }
898 
IsValid() const899 bool ThreadActivityTracker::IsValid() const {
900   if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
901       header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
902       header_->start_time == 0 || header_->start_ticks == 0 ||
903       header_->stack_slots != stack_slots_ ||
904       header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
905     return false;
906   }
907 
908   return valid_;
909 }
910 
CreateSnapshot(Snapshot * output_snapshot) const911 bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
912   DCHECK(output_snapshot);
913 
914   // There is no "called on valid thread" check for this method as it can be
915   // called from other threads or even other processes. It is also the reason
916   // why atomic operations must be used in certain places above.
917 
918   // It's possible for the data to change while reading it in such a way that it
919   // invalidates the read. Make several attempts but don't try forever.
920   const int kMaxAttempts = 10;
921   uint32_t depth;
922 
923   // Stop here if the data isn't valid.
924   if (!IsValid())
925     return false;
926 
927   // Allocate the maximum size for the stack so it doesn't have to be done
928   // during the time-sensitive snapshot operation. It is shrunk once the
929   // actual size is known.
930   output_snapshot->activity_stack.reserve(stack_slots_);
931 
932   for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
933     // Remember the data IDs to ensure nothing is replaced during the snapshot
934     // operation. Use "acquire" so that all the non-atomic fields of the
935     // structure are valid (at least at the current moment in time).
936     const uint32_t starting_id =
937         header_->owner.data_id.load(std::memory_order_acquire);
938     const int64_t starting_create_stamp = header_->owner.create_stamp;
939     const int64_t starting_process_id = header_->owner.process_id;
940     const int64_t starting_thread_id = header_->thread_ref.as_id;
941 
942     // Note the current |data_version| so it's possible to detect at the end
943     // that nothing has changed since copying the data began. A "cst" operation
944     // is required to ensure it occurs before everything else. Using "cst"
945     // memory ordering is relatively expensive but this is only done during
946     // analysis so doesn't directly affect the worker threads.
947     const uint32_t pre_version =
948         header_->data_version.load(std::memory_order_seq_cst);
949 
950     // Fetching the current depth also "acquires" the contents of the stack.
951     depth = header_->current_depth.load(std::memory_order_acquire);
952     uint32_t count = std::min(depth, stack_slots_);
953     output_snapshot->activity_stack.resize(count);
954     if (count > 0) {
955       // Copy the existing contents. Memcpy is used for speed.
956       memcpy(&output_snapshot->activity_stack[0], stack_,
957              count * sizeof(Activity));
958     }
959 
960     // Capture the last exception.
961     memcpy(&output_snapshot->last_exception, &header_->last_exception,
962            sizeof(Activity));
963 
964     // TODO(bcwhite): Snapshot other things here.
965 
966     // Retry if something changed during the copy. A "cst" operation ensures
967     // it must happen after all the above operations.
968     if (header_->data_version.load(std::memory_order_seq_cst) != pre_version)
969       continue;
970 
971     // Stack copied. Record it's full depth.
972     output_snapshot->activity_stack_depth = depth;
973 
974     // Get the general thread information.
975     output_snapshot->thread_name =
976         std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
977     output_snapshot->create_stamp = header_->owner.create_stamp;
978     output_snapshot->thread_id = header_->thread_ref.as_id;
979     output_snapshot->process_id = header_->owner.process_id;
980 
981     // All characters of the thread-name buffer were copied so as to not break
982     // if the trailing NUL were missing. Now limit the length if the actual
983     // name is shorter.
984     output_snapshot->thread_name.resize(
985         strlen(output_snapshot->thread_name.c_str()));
986 
987     // If the data ID has changed then the tracker has exited and the memory
988     // reused by a new one. Try again.
989     if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
990         output_snapshot->create_stamp != starting_create_stamp ||
991         output_snapshot->process_id != starting_process_id ||
992         output_snapshot->thread_id != starting_thread_id) {
993       continue;
994     }
995 
996     // Only successful if the data is still valid once everything is done since
997     // it's possible for the thread to end somewhere in the middle and all its
998     // values become garbage.
999     if (!IsValid())
1000       return false;
1001 
1002     // Change all the timestamps in the activities from "ticks" to "wall" time.
1003     const Time start_time = Time::FromInternalValue(header_->start_time);
1004     const int64_t start_ticks = header_->start_ticks;
1005     for (Activity& activity : output_snapshot->activity_stack) {
1006       activity.time_internal =
1007           WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
1008               .ToInternalValue();
1009     }
1010     output_snapshot->last_exception.time_internal =
1011         WallTimeFromTickTime(start_ticks,
1012                              output_snapshot->last_exception.time_internal,
1013                              start_time)
1014             .ToInternalValue();
1015 
1016     // Success!
1017     return true;
1018   }
1019 
1020   // Too many attempts.
1021   return false;
1022 }
1023 
GetBaseAddress()1024 const void* ThreadActivityTracker::GetBaseAddress() {
1025   return header_;
1026 }
1027 
GetDataVersionForTesting()1028 uint32_t ThreadActivityTracker::GetDataVersionForTesting() {
1029   return header_->data_version.load(std::memory_order_relaxed);
1030 }
1031 
SetOwningProcessIdForTesting(int64_t pid,int64_t stamp)1032 void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
1033                                                          int64_t stamp) {
1034   header_->owner.SetOwningProcessIdForTesting(pid, stamp);
1035 }
1036 
1037 // static
GetOwningProcessId(const void * memory,int64_t * out_id,int64_t * out_stamp)1038 bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
1039                                                int64_t* out_id,
1040                                                int64_t* out_stamp) {
1041   const Header* header = reinterpret_cast<const Header*>(memory);
1042   return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
1043 }
1044 
1045 // static
SizeForStackDepth(int stack_depth)1046 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
1047   return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
1048 }
1049 
CalledOnValidThread()1050 bool ThreadActivityTracker::CalledOnValidThread() {
1051 #if DCHECK_IS_ON()
1052   return thread_id_ == PlatformThreadRef();
1053 #else
1054   return true;
1055 #endif
1056 }
1057 
1058 std::unique_ptr<ActivityUserData>
CreateUserDataForActivity(Activity * activity,ActivityTrackerMemoryAllocator * allocator)1059 ThreadActivityTracker::CreateUserDataForActivity(
1060     Activity* activity,
1061     ActivityTrackerMemoryAllocator* allocator) {
1062   DCHECK_EQ(0U, activity->user_data_ref);
1063 
1064   PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
1065   void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
1066   if (memory) {
1067     std::unique_ptr<ActivityUserData> user_data =
1068         std::make_unique<ActivityUserData>(memory, kUserDataSize);
1069     activity->user_data_ref = ref;
1070     activity->user_data_id = user_data->id();
1071     return user_data;
1072   }
1073 
1074   // Return a dummy object that will still accept (but ignore) Set() calls.
1075   return std::make_unique<ActivityUserData>();
1076 }
1077 
1078 // The instantiation of the GlobalActivityTracker object.
1079 // The object held here will obviously not be destructed at process exit
1080 // but that's best since PersistentMemoryAllocator objects (that underlie
1081 // GlobalActivityTracker objects) are explicitly forbidden from doing anything
1082 // essential at exit anyway due to the fact that they depend on data managed
1083 // elsewhere and which could be destructed first. An AtomicWord is used instead
1084 // of std::atomic because the latter can create global ctors and dtors.
1085 subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
1086 
1087 GlobalActivityTracker::ModuleInfo::ModuleInfo() = default;
1088 GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
1089 GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
1090 GlobalActivityTracker::ModuleInfo::~ModuleInfo() = default;
1091 
1092 GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
1093     ModuleInfo&& rhs) = default;
1094 GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
1095     const ModuleInfo& rhs) = default;
1096 
1097 GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() = default;
1098 GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() = default;
1099 
DecodeTo(GlobalActivityTracker::ModuleInfo * info,size_t record_size) const1100 bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
1101     GlobalActivityTracker::ModuleInfo* info,
1102     size_t record_size) const {
1103   // Get the current "changes" indicator, acquiring all the other values.
1104   uint32_t current_changes = changes.load(std::memory_order_acquire);
1105 
1106   // Copy out the dynamic information.
1107   info->is_loaded = loaded != 0;
1108   info->address = static_cast<uintptr_t>(address);
1109   info->load_time = load_time;
1110 
1111   // Check to make sure no information changed while being read. A "seq-cst"
1112   // operation is expensive but is only done during analysis and it's the only
1113   // way to ensure this occurs after all the accesses above. If changes did
1114   // occur then return a "not loaded" result so that |size| and |address|
1115   // aren't expected to be accurate.
1116   if ((current_changes & kModuleInformationChanging) != 0 ||
1117       changes.load(std::memory_order_seq_cst) != current_changes) {
1118     info->is_loaded = false;
1119   }
1120 
1121   // Copy out the static information. These never change so don't have to be
1122   // protected by the atomic |current_changes| operations.
1123   info->size = static_cast<size_t>(size);
1124   info->timestamp = timestamp;
1125   info->age = age;
1126   memcpy(info->identifier, identifier, sizeof(info->identifier));
1127 
1128   if (offsetof(ModuleInfoRecord, pickle) + pickle_size > record_size)
1129     return false;
1130   Pickle pickler(pickle, pickle_size);
1131   PickleIterator iter(pickler);
1132   return iter.ReadString(&info->file) && iter.ReadString(&info->debug_file);
1133 }
1134 
1135 GlobalActivityTracker::ModuleInfoRecord*
CreateFrom(const GlobalActivityTracker::ModuleInfo & info,PersistentMemoryAllocator * allocator)1136 GlobalActivityTracker::ModuleInfoRecord::CreateFrom(
1137     const GlobalActivityTracker::ModuleInfo& info,
1138     PersistentMemoryAllocator* allocator) {
1139   Pickle pickler;
1140   pickler.WriteString(info.file);
1141   pickler.WriteString(info.debug_file);
1142   size_t required_size = offsetof(ModuleInfoRecord, pickle) + pickler.size();
1143   ModuleInfoRecord* record = allocator->New<ModuleInfoRecord>(required_size);
1144   if (!record)
1145     return nullptr;
1146 
1147   // These fields never changes and are done before the record is made
1148   // iterable so no thread protection is necessary.
1149   record->size = info.size;
1150   record->timestamp = info.timestamp;
1151   record->age = info.age;
1152   memcpy(record->identifier, info.identifier, sizeof(identifier));
1153   memcpy(record->pickle, pickler.data(), pickler.size());
1154   record->pickle_size = pickler.size();
1155   record->changes.store(0, std::memory_order_relaxed);
1156 
1157   // Initialize the owner info.
1158   record->owner.Release_Initialize();
1159 
1160   // Now set those fields that can change.
1161   bool success = record->UpdateFrom(info);
1162   DCHECK(success);
1163   return record;
1164 }
1165 
UpdateFrom(const GlobalActivityTracker::ModuleInfo & info)1166 bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
1167     const GlobalActivityTracker::ModuleInfo& info) {
1168   // Updates can occur after the record is made visible so make changes atomic.
1169   // A "strong" exchange ensures no false failures.
1170   uint32_t old_changes = changes.load(std::memory_order_relaxed);
1171   uint32_t new_changes = old_changes | kModuleInformationChanging;
1172   if ((old_changes & kModuleInformationChanging) != 0 ||
1173       !changes.compare_exchange_strong(old_changes, new_changes,
1174                                        std::memory_order_acquire,
1175                                        std::memory_order_acquire)) {
1176     NOTREACHED() << "Multiple sources are updating module information.";
1177     return false;
1178   }
1179 
1180   loaded = info.is_loaded ? 1 : 0;
1181   address = info.address;
1182   load_time = Time::Now().ToInternalValue();
1183 
1184   bool success = changes.compare_exchange_strong(new_changes, old_changes + 1,
1185                                                  std::memory_order_release,
1186                                                  std::memory_order_relaxed);
1187   DCHECK(success);
1188   return true;
1189 }
1190 
ScopedThreadActivity(const void * program_counter,const void * origin,Activity::Type type,const ActivityData & data,bool lock_allowed)1191 GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
1192     const void* program_counter,
1193     const void* origin,
1194     Activity::Type type,
1195     const ActivityData& data,
1196     bool lock_allowed)
1197     : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
1198                                             program_counter,
1199                                             origin,
1200                                             type,
1201                                             data) {}
1202 
~ScopedThreadActivity()1203 GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
1204   if (tracker_ && tracker_->HasUserData(activity_id_)) {
1205     GlobalActivityTracker* global = GlobalActivityTracker::Get();
1206     AutoLock lock(global->user_data_allocator_lock_);
1207     tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
1208   }
1209 }
1210 
user_data()1211 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
1212   if (!user_data_) {
1213     if (tracker_) {
1214       GlobalActivityTracker* global = GlobalActivityTracker::Get();
1215       AutoLock lock(global->user_data_allocator_lock_);
1216       user_data_ =
1217           tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
1218     } else {
1219       user_data_ = std::make_unique<ActivityUserData>();
1220     }
1221   }
1222   return *user_data_;
1223 }
1224 
ThreadSafeUserData(void * memory,size_t size,int64_t pid)1225 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
1226                                                               size_t size,
1227                                                               int64_t pid)
1228     : ActivityUserData(memory, size, pid) {}
1229 
1230 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() = default;
1231 
Set(StringPiece name,ValueType type,const void * memory,size_t size)1232 void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
1233                                                     ValueType type,
1234                                                     const void* memory,
1235                                                     size_t size) {
1236   AutoLock lock(data_lock_);
1237   ActivityUserData::Set(name, type, memory, size);
1238 }
1239 
ManagedActivityTracker(PersistentMemoryAllocator::Reference mem_reference,void * base,size_t size)1240 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
1241     PersistentMemoryAllocator::Reference mem_reference,
1242     void* base,
1243     size_t size)
1244     : ThreadActivityTracker(base, size),
1245       mem_reference_(mem_reference),
1246       mem_base_(base) {}
1247 
~ManagedActivityTracker()1248 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
1249   // The global |g_tracker_| must point to the owner of this class since all
1250   // objects of this type must be destructed before |g_tracker_| can be changed
1251   // (something that only occurs in tests).
1252   DCHECK(g_tracker_);
1253   GlobalActivityTracker::Get()->ReturnTrackerMemory(this);
1254 }
1255 
CreateWithAllocator(std::unique_ptr<PersistentMemoryAllocator> allocator,int stack_depth,int64_t process_id)1256 void GlobalActivityTracker::CreateWithAllocator(
1257     std::unique_ptr<PersistentMemoryAllocator> allocator,
1258     int stack_depth,
1259     int64_t process_id) {
1260   // There's no need to do anything with the result. It is self-managing.
1261   GlobalActivityTracker* global_tracker =
1262       new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
1263   // Create a tracker for this thread since it is known.
1264   global_tracker->CreateTrackerForCurrentThread();
1265 }
1266 
1267 #if !defined(OS_NACL)
1268 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,StringPiece name,int stack_depth)1269 bool GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
1270                                            size_t size,
1271                                            uint64_t id,
1272                                            StringPiece name,
1273                                            int stack_depth) {
1274   DCHECK(!file_path.empty());
1275   DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
1276 
1277   // Create and map the file into memory and make it globally available.
1278   std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
1279   bool success = mapped_file->Initialize(
1280       File(file_path, File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
1281                           File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
1282       {0, size}, MemoryMappedFile::READ_WRITE_EXTEND);
1283   if (!success)
1284     return false;
1285   if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mapped_file, false))
1286     return false;
1287   CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
1288                           std::move(mapped_file), size, id, name, false),
1289                       stack_depth, 0);
1290   return true;
1291 }
1292 #endif  // !defined(OS_NACL)
1293 
1294 // static
CreateWithLocalMemory(size_t size,uint64_t id,StringPiece name,int stack_depth,int64_t process_id)1295 bool GlobalActivityTracker::CreateWithLocalMemory(size_t size,
1296                                                   uint64_t id,
1297                                                   StringPiece name,
1298                                                   int stack_depth,
1299                                                   int64_t process_id) {
1300   CreateWithAllocator(
1301       std::make_unique<LocalPersistentMemoryAllocator>(size, id, name),
1302       stack_depth, process_id);
1303   return true;
1304 }
1305 
1306 // static
CreateWithSharedMemory(std::unique_ptr<SharedMemory> shm,uint64_t id,StringPiece name,int stack_depth)1307 bool GlobalActivityTracker::CreateWithSharedMemory(
1308     std::unique_ptr<SharedMemory> shm,
1309     uint64_t id,
1310     StringPiece name,
1311     int stack_depth) {
1312   if (shm->mapped_size() == 0 ||
1313       !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
1314     return false;
1315   }
1316   CreateWithAllocator(std::make_unique<SharedPersistentMemoryAllocator>(
1317                           std::move(shm), id, name, false),
1318                       stack_depth, 0);
1319   return true;
1320 }
1321 
1322 // static
CreateWithSharedMemoryHandle(const SharedMemoryHandle & handle,size_t size,uint64_t id,StringPiece name,int stack_depth)1323 bool GlobalActivityTracker::CreateWithSharedMemoryHandle(
1324     const SharedMemoryHandle& handle,
1325     size_t size,
1326     uint64_t id,
1327     StringPiece name,
1328     int stack_depth) {
1329   std::unique_ptr<SharedMemory> shm(
1330       new SharedMemory(handle, /*readonly=*/false));
1331   if (!shm->Map(size))
1332     return false;
1333   return CreateWithSharedMemory(std::move(shm), id, name, stack_depth);
1334 }
1335 
1336 // static
SetForTesting(std::unique_ptr<GlobalActivityTracker> tracker)1337 void GlobalActivityTracker::SetForTesting(
1338     std::unique_ptr<GlobalActivityTracker> tracker) {
1339   CHECK(!subtle::NoBarrier_Load(&g_tracker_));
1340   subtle::Release_Store(&g_tracker_,
1341                         reinterpret_cast<uintptr_t>(tracker.release()));
1342 }
1343 
1344 // static
1345 std::unique_ptr<GlobalActivityTracker>
ReleaseForTesting()1346 GlobalActivityTracker::ReleaseForTesting() {
1347   GlobalActivityTracker* tracker = Get();
1348   if (!tracker)
1349     return nullptr;
1350 
1351   // Thread trackers assume that the global tracker is present for some
1352   // operations so ensure that there aren't any.
1353   tracker->ReleaseTrackerForCurrentThreadForTesting();
1354   DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
1355 
1356   subtle::Release_Store(&g_tracker_, 0);
1357   return WrapUnique(tracker);
1358 }
1359 
CreateTrackerForCurrentThread()1360 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
1361   DCHECK(!this_thread_tracker_.Get());
1362 
1363   PersistentMemoryAllocator::Reference mem_reference;
1364 
1365   {
1366     base::AutoLock autolock(thread_tracker_allocator_lock_);
1367     mem_reference = thread_tracker_allocator_.GetObjectReference();
1368   }
1369 
1370   if (!mem_reference) {
1371     // Failure. This shouldn't happen. But be graceful if it does, probably
1372     // because the underlying allocator wasn't given enough memory to satisfy
1373     // to all possible requests.
1374     NOTREACHED();
1375     // Report the thread-count at which the allocator was full so that the
1376     // failure can be seen and underlying memory resized appropriately.
1377     UMA_HISTOGRAM_COUNTS_1000(
1378         "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
1379         thread_tracker_count_.load(std::memory_order_relaxed));
1380     // Return null, just as if tracking wasn't enabled.
1381     return nullptr;
1382   }
1383 
1384   // Convert the memory block found above into an actual memory address.
1385   // Doing the conversion as a Header object enacts the 32/64-bit size
1386   // consistency checks which would not otherwise be done. Unfortunately,
1387   // some older compilers and MSVC don't have standard-conforming definitions
1388   // of std::atomic which cause it not to be plain-old-data. Don't check on
1389   // those platforms assuming that the checks on other platforms will be
1390   // sufficient.
1391   // TODO(bcwhite): Review this after major compiler releases.
1392   DCHECK(mem_reference);
1393   void* mem_base;
1394   mem_base =
1395       allocator_->GetAsObject<ThreadActivityTracker::Header>(mem_reference);
1396 
1397   DCHECK(mem_base);
1398   DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
1399 
1400   // Create a tracker with the acquired memory and set it as the tracker
1401   // for this particular thread in thread-local-storage.
1402   ManagedActivityTracker* tracker =
1403       new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
1404   DCHECK(tracker->IsValid());
1405   this_thread_tracker_.Set(tracker);
1406   int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
1407 
1408   UMA_HISTOGRAM_EXACT_LINEAR("ActivityTracker.ThreadTrackers.Count",
1409                              old_count + 1, static_cast<int>(kMaxThreadCount));
1410   return tracker;
1411 }
1412 
ReleaseTrackerForCurrentThreadForTesting()1413 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
1414   ThreadActivityTracker* tracker =
1415       reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
1416   if (tracker) {
1417     this_thread_tracker_.Set(nullptr);
1418     delete tracker;
1419   }
1420 }
1421 
SetBackgroundTaskRunner(const scoped_refptr<TaskRunner> & runner)1422 void GlobalActivityTracker::SetBackgroundTaskRunner(
1423     const scoped_refptr<TaskRunner>& runner) {
1424   AutoLock lock(global_tracker_lock_);
1425   background_task_runner_ = runner;
1426 }
1427 
SetProcessExitCallback(ProcessExitCallback callback)1428 void GlobalActivityTracker::SetProcessExitCallback(
1429     ProcessExitCallback callback) {
1430   AutoLock lock(global_tracker_lock_);
1431   process_exit_callback_ = callback;
1432 }
1433 
RecordProcessLaunch(ProcessId process_id,const FilePath::StringType & cmd)1434 void GlobalActivityTracker::RecordProcessLaunch(
1435     ProcessId process_id,
1436     const FilePath::StringType& cmd) {
1437   const int64_t pid = process_id;
1438   DCHECK_NE(GetProcessId(), pid);
1439   DCHECK_NE(0, pid);
1440 
1441   base::AutoLock lock(global_tracker_lock_);
1442   if (base::ContainsKey(known_processes_, pid)) {
1443     // TODO(bcwhite): Measure this in UMA.
1444     NOTREACHED() << "Process #" << process_id
1445                  << " was previously recorded as \"launched\""
1446                  << " with no corresponding exit.\n"
1447                  << known_processes_[pid];
1448     known_processes_.erase(pid);
1449   }
1450 
1451 #if defined(OS_WIN)
1452   known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
1453 #else
1454   known_processes_.insert(std::make_pair(pid, cmd));
1455 #endif
1456 }
1457 
RecordProcessLaunch(ProcessId process_id,const FilePath::StringType & exe,const FilePath::StringType & args)1458 void GlobalActivityTracker::RecordProcessLaunch(
1459     ProcessId process_id,
1460     const FilePath::StringType& exe,
1461     const FilePath::StringType& args) {
1462   if (exe.find(FILE_PATH_LITERAL(" "))) {
1463     RecordProcessLaunch(process_id,
1464                         FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
1465                             FILE_PATH_LITERAL("\" ") + args);
1466   } else {
1467     RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
1468   }
1469 }
1470 
RecordProcessExit(ProcessId process_id,int exit_code)1471 void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
1472                                               int exit_code) {
1473   const int64_t pid = process_id;
1474   DCHECK_NE(GetProcessId(), pid);
1475   DCHECK_NE(0, pid);
1476 
1477   scoped_refptr<TaskRunner> task_runner;
1478   std::string command_line;
1479   {
1480     base::AutoLock lock(global_tracker_lock_);
1481     task_runner = background_task_runner_;
1482     auto found = known_processes_.find(pid);
1483     if (found != known_processes_.end()) {
1484       command_line = std::move(found->second);
1485       known_processes_.erase(found);
1486     } else {
1487       DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
1488     }
1489   }
1490 
1491   // Use the current time to differentiate the process that just exited
1492   // from any that might be created in the future with the same ID.
1493   int64_t now_stamp = Time::Now().ToInternalValue();
1494 
1495   // The persistent allocator is thread-safe so run the iteration and
1496   // adjustments on a worker thread if one was provided.
1497   if (task_runner && !task_runner->RunsTasksInCurrentSequence()) {
1498     task_runner->PostTask(
1499         FROM_HERE,
1500         BindOnce(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
1501                  pid, now_stamp, exit_code, std::move(command_line)));
1502     return;
1503   }
1504 
1505   CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
1506 }
1507 
SetProcessPhase(ProcessPhase phase)1508 void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
1509   process_data().SetInt(kProcessPhaseDataKey, phase);
1510 }
1511 
CleanupAfterProcess(int64_t process_id,int64_t exit_stamp,int exit_code,std::string && command_line)1512 void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
1513                                                 int64_t exit_stamp,
1514                                                 int exit_code,
1515                                                 std::string&& command_line) {
1516   // The process may not have exited cleanly so its necessary to go through
1517   // all the data structures it may have allocated in the persistent memory
1518   // segment and mark them as "released". This will allow them to be reused
1519   // later on.
1520 
1521   PersistentMemoryAllocator::Iterator iter(allocator_.get());
1522   PersistentMemoryAllocator::Reference ref;
1523 
1524   ProcessExitCallback process_exit_callback;
1525   {
1526     AutoLock lock(global_tracker_lock_);
1527     process_exit_callback = process_exit_callback_;
1528   }
1529   if (process_exit_callback) {
1530     // Find the processes user-data record so the process phase can be passed
1531     // to the callback.
1532     ActivityUserData::Snapshot process_data_snapshot;
1533     while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
1534       const void* memory = allocator_->GetAsArray<char>(
1535           ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
1536       if (!memory)
1537         continue;
1538       int64_t found_id;
1539       int64_t create_stamp;
1540       if (ActivityUserData::GetOwningProcessId(memory, &found_id,
1541                                                &create_stamp)) {
1542         if (found_id == process_id && create_stamp < exit_stamp) {
1543           const ActivityUserData process_data(const_cast<void*>(memory),
1544                                               allocator_->GetAllocSize(ref));
1545           process_data.CreateSnapshot(&process_data_snapshot);
1546           break;  // No need to look for any others.
1547         }
1548       }
1549     }
1550     iter.Reset();  // So it starts anew when used below.
1551 
1552     // Record the process's phase at exit so callback doesn't need to go
1553     // searching based on a private key value.
1554     ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
1555     auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
1556     if (phase != process_data_snapshot.end())
1557       exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
1558 
1559     // Perform the callback.
1560     process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
1561                               std::move(command_line),
1562                               std::move(process_data_snapshot));
1563   }
1564 
1565   // Find all allocations associated with the exited process and free them.
1566   uint32_t type;
1567   while ((ref = iter.GetNext(&type)) != 0) {
1568     switch (type) {
1569       case kTypeIdActivityTracker:
1570       case kTypeIdUserDataRecord:
1571       case kTypeIdProcessDataRecord:
1572       case ModuleInfoRecord::kPersistentTypeId: {
1573         const void* memory = allocator_->GetAsArray<char>(
1574             ref, type, PersistentMemoryAllocator::kSizeAny);
1575         if (!memory)
1576           continue;
1577         int64_t found_id;
1578         int64_t create_stamp;
1579 
1580         // By convention, the OwningProcess structure is always the first
1581         // field of the structure so there's no need to handle all the
1582         // cases separately.
1583         if (OwningProcess::GetOwningProcessId(memory, &found_id,
1584                                               &create_stamp)) {
1585           // Only change the type to be "free" if the process ID matches and
1586           // the creation time is before the exit time (so PID re-use doesn't
1587           // cause the erasure of something that is in-use). Memory is cleared
1588           // here, rather than when it's needed, so as to limit the impact at
1589           // that critical time.
1590           if (found_id == process_id && create_stamp < exit_stamp)
1591             allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
1592         }
1593       } break;
1594     }
1595   }
1596 }
1597 
RecordLogMessage(StringPiece message)1598 void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
1599   // Allocate at least one extra byte so the string is NUL terminated. All
1600   // memory returned by the allocator is guaranteed to be zeroed.
1601   PersistentMemoryAllocator::Reference ref =
1602       allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
1603   char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
1604                                               message.size() + 1);
1605   if (memory) {
1606     memcpy(memory, message.data(), message.size());
1607     allocator_->MakeIterable(ref);
1608   }
1609 }
1610 
RecordModuleInfo(const ModuleInfo & info)1611 void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
1612   AutoLock lock(modules_lock_);
1613   auto found = modules_.find(info.file);
1614   if (found != modules_.end()) {
1615     ModuleInfoRecord* record = found->second;
1616     DCHECK(record);
1617 
1618     // Update the basic state of module information that has been already
1619     // recorded. It is assumed that the string information (identifier,
1620     // version, etc.) remain unchanged which means that there's no need
1621     // to create a new record to accommodate a possibly longer length.
1622     record->UpdateFrom(info);
1623     return;
1624   }
1625 
1626   ModuleInfoRecord* record =
1627       ModuleInfoRecord::CreateFrom(info, allocator_.get());
1628   if (!record)
1629     return;
1630   allocator_->MakeIterable(record);
1631   modules_.emplace(info.file, record);
1632 }
1633 
RecordFieldTrial(const std::string & trial_name,StringPiece group_name)1634 void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
1635                                              StringPiece group_name) {
1636   const std::string key = std::string("FieldTrial.") + trial_name;
1637   process_data_.SetString(key, group_name);
1638 }
1639 
RecordException(const void * pc,const void * origin,uint32_t code)1640 void GlobalActivityTracker::RecordException(const void* pc,
1641                                             const void* origin,
1642                                             uint32_t code) {
1643   RecordExceptionImpl(pc, origin, code);
1644 }
1645 
MarkDeleted()1646 void GlobalActivityTracker::MarkDeleted() {
1647   allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
1648 }
1649 
GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,int stack_depth,int64_t process_id)1650 GlobalActivityTracker::GlobalActivityTracker(
1651     std::unique_ptr<PersistentMemoryAllocator> allocator,
1652     int stack_depth,
1653     int64_t process_id)
1654     : allocator_(std::move(allocator)),
1655       stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
1656       process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
1657       this_thread_tracker_(&OnTLSDestroy),
1658       thread_tracker_count_(0),
1659       thread_tracker_allocator_(allocator_.get(),
1660                                 kTypeIdActivityTracker,
1661                                 kTypeIdActivityTrackerFree,
1662                                 stack_memory_size_,
1663                                 kCachedThreadMemories,
1664                                 /*make_iterable=*/true),
1665       user_data_allocator_(allocator_.get(),
1666                            kTypeIdUserDataRecord,
1667                            kTypeIdUserDataRecordFree,
1668                            kUserDataSize,
1669                            kCachedUserDataMemories,
1670                            /*make_iterable=*/true),
1671       process_data_(allocator_->GetAsArray<char>(
1672                         AllocateFrom(allocator_.get(),
1673                                      kTypeIdProcessDataRecordFree,
1674                                      kProcessDataSize,
1675                                      kTypeIdProcessDataRecord),
1676                         kTypeIdProcessDataRecord,
1677                         kProcessDataSize),
1678                     kProcessDataSize,
1679                     process_id_) {
1680   DCHECK_NE(0, process_id_);
1681 
1682   // Ensure that there is no other global object and then make this one such.
1683   DCHECK(!g_tracker_);
1684   subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
1685 
1686   // The data records must be iterable in order to be found by an analyzer.
1687   allocator_->MakeIterable(allocator_->GetAsReference(
1688       process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
1689 
1690   // Note that this process has launched.
1691   SetProcessPhase(PROCESS_LAUNCHED);
1692 
1693   // Fetch and record all activated field trials.
1694   FieldTrial::ActiveGroups active_groups;
1695   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
1696   for (auto& group : active_groups)
1697     RecordFieldTrial(group.trial_name, group.group_name);
1698 }
1699 
~GlobalActivityTracker()1700 GlobalActivityTracker::~GlobalActivityTracker() {
1701   DCHECK(Get() == nullptr || Get() == this);
1702   DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
1703   subtle::Release_Store(&g_tracker_, 0);
1704 }
1705 
ReturnTrackerMemory(ManagedActivityTracker * tracker)1706 void GlobalActivityTracker::ReturnTrackerMemory(
1707     ManagedActivityTracker* tracker) {
1708   PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
1709   void* mem_base = tracker->mem_base_;
1710   DCHECK(mem_reference);
1711   DCHECK(mem_base);
1712 
1713   // Remove the destructed tracker from the set of known ones.
1714   DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
1715   thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
1716 
1717   // Release this memory for re-use at a later time.
1718   base::AutoLock autolock(thread_tracker_allocator_lock_);
1719   thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
1720 }
1721 
RecordExceptionImpl(const void * pc,const void * origin,uint32_t code)1722 void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
1723                                                 const void* origin,
1724                                                 uint32_t code) {
1725   // Get an existing tracker for this thread. It's not possible to create
1726   // one at this point because such would involve memory allocations and
1727   // other potentially complex operations that can cause failures if done
1728   // within an exception handler. In most cases various operations will
1729   // have already created the tracker so this shouldn't generally be a
1730   // problem.
1731   ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
1732   if (!tracker)
1733     return;
1734 
1735   tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
1736                                    ActivityData::ForException(code));
1737 }
1738 
1739 // static
OnTLSDestroy(void * value)1740 void GlobalActivityTracker::OnTLSDestroy(void* value) {
1741   delete reinterpret_cast<ManagedActivityTracker*>(value);
1742 }
1743 
ScopedActivity(const void * program_counter,uint8_t action,uint32_t id,int32_t info)1744 ScopedActivity::ScopedActivity(const void* program_counter,
1745                                uint8_t action,
1746                                uint32_t id,
1747                                int32_t info)
1748     : GlobalActivityTracker::ScopedThreadActivity(
1749           program_counter,
1750           nullptr,
1751           static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
1752           ActivityData::ForGeneric(id, info),
1753           /*lock_allowed=*/true),
1754       id_(id) {
1755   // The action must not affect the category bits of the activity type.
1756   DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
1757 }
1758 
ChangeAction(uint8_t action)1759 void ScopedActivity::ChangeAction(uint8_t action) {
1760   DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
1761   ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
1762                     kNullActivityData);
1763 }
1764 
ChangeInfo(int32_t info)1765 void ScopedActivity::ChangeInfo(int32_t info) {
1766   ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
1767 }
1768 
ChangeActionAndInfo(uint8_t action,int32_t info)1769 void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
1770   DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
1771   ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
1772                     ActivityData::ForGeneric(id_, info));
1773 }
1774 
ScopedTaskRunActivity(const void * program_counter,const base::PendingTask & task)1775 ScopedTaskRunActivity::ScopedTaskRunActivity(
1776     const void* program_counter,
1777     const base::PendingTask& task)
1778     : GlobalActivityTracker::ScopedThreadActivity(
1779           program_counter,
1780           task.posted_from.program_counter(),
1781           Activity::ACT_TASK_RUN,
1782           ActivityData::ForTask(task.sequence_num),
1783           /*lock_allowed=*/true) {}
1784 
ScopedLockAcquireActivity(const void * program_counter,const base::internal::LockImpl * lock)1785 ScopedLockAcquireActivity::ScopedLockAcquireActivity(
1786     const void* program_counter,
1787     const base::internal::LockImpl* lock)
1788     : GlobalActivityTracker::ScopedThreadActivity(
1789           program_counter,
1790           nullptr,
1791           Activity::ACT_LOCK_ACQUIRE,
1792           ActivityData::ForLock(lock),
1793           /*lock_allowed=*/false) {}
1794 
ScopedEventWaitActivity(const void * program_counter,const base::WaitableEvent * event)1795 ScopedEventWaitActivity::ScopedEventWaitActivity(
1796     const void* program_counter,
1797     const base::WaitableEvent* event)
1798     : GlobalActivityTracker::ScopedThreadActivity(
1799           program_counter,
1800           nullptr,
1801           Activity::ACT_EVENT_WAIT,
1802           ActivityData::ForEvent(event),
1803           /*lock_allowed=*/true) {}
1804 
ScopedThreadJoinActivity(const void * program_counter,const base::PlatformThreadHandle * thread)1805 ScopedThreadJoinActivity::ScopedThreadJoinActivity(
1806     const void* program_counter,
1807     const base::PlatformThreadHandle* thread)
1808     : GlobalActivityTracker::ScopedThreadActivity(
1809           program_counter,
1810           nullptr,
1811           Activity::ACT_THREAD_JOIN,
1812           ActivityData::ForThread(*thread),
1813           /*lock_allowed=*/true) {}
1814 
1815 #if !defined(OS_NACL) && !defined(OS_IOS)
ScopedProcessWaitActivity(const void * program_counter,const base::Process * process)1816 ScopedProcessWaitActivity::ScopedProcessWaitActivity(
1817     const void* program_counter,
1818     const base::Process* process)
1819     : GlobalActivityTracker::ScopedThreadActivity(
1820           program_counter,
1821           nullptr,
1822           Activity::ACT_PROCESS_WAIT,
1823           ActivityData::ForProcess(process->Pid()),
1824           /*lock_allowed=*/true) {}
1825 #endif
1826 
1827 }  // namespace debug
1828 }  // namespace base
1829