1 // Copyright 2016 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_H 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_H 7 8 // Replace with stub implementation. 9 #if 1 10 #define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \ 11 trace_event_internal::HeapProfilerScopedTaskExecutionTracker 12 13 namespace trace_event_internal { 14 15 class HeapProfilerScopedTaskExecutionTracker { 16 public: HeapProfilerScopedTaskExecutionTracker(const char *)17 explicit HeapProfilerScopedTaskExecutionTracker(const char*) {} 18 }; 19 20 } // namespace trace_event_internal 21 22 #else 23 24 #include "base/compiler_specific.h" 25 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 26 27 // This header file defines the set of macros that are used to track memory 28 // usage in the heap profiler. This is in addition to the macros defined in 29 // trace_event.h and are specific to heap profiler. This file also defines 30 // implementation details of these macros. 31 32 // Implementation detail: heap profiler macros create temporary variables to 33 // keep instrumentation overhead low. These macros give each temporary variable 34 // a unique name based on the line number to prevent name collisions. 35 #define INTERNAL_HEAP_PROFILER_UID3(a, b) heap_profiler_unique_##a##b 36 #define INTERNAL_HEAP_PROFILER_UID2(a, b) INTERNAL_HEAP_PROFILER_UID3(a, b) 37 #define INTERNAL_HEAP_PROFILER_UID(name_prefix) \ 38 INTERNAL_HEAP_PROFILER_UID2(name_prefix, __LINE__) 39 40 // Scoped tracker for task execution context in the heap profiler. 41 #define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \ 42 trace_event_internal::HeapProfilerScopedTaskExecutionTracker 43 44 // Scoped tracker that tracks the given program counter as a native stack frame 45 // in the heap profiler. 46 #define TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \ 47 trace_event_internal::HeapProfilerScopedStackFrame 48 49 // A scoped ignore event used to tell heap profiler to ignore all the 50 // allocations in the scope. It is useful to exclude allocations made for 51 // tracing from the heap profiler dumps. 52 #define HEAP_PROFILER_SCOPED_IGNORE \ 53 trace_event_internal::HeapProfilerScopedIgnore INTERNAL_HEAP_PROFILER_UID( \ 54 scoped_ignore) 55 56 namespace trace_event_internal { 57 58 // HeapProfilerScopedTaskExecutionTracker records the current task's context in 59 // the heap profiler. 60 class HeapProfilerScopedTaskExecutionTracker { 61 public: HeapProfilerScopedTaskExecutionTracker(const char * task_context)62 inline explicit HeapProfilerScopedTaskExecutionTracker( 63 const char* task_context) 64 : context_(task_context) { 65 using base::trace_event::AllocationContextTracker; 66 if (UNLIKELY(AllocationContextTracker::capture_mode() != 67 AllocationContextTracker::CaptureMode::DISABLED)) { 68 AllocationContextTracker::GetInstanceForCurrentThread() 69 ->PushCurrentTaskContext(context_); 70 } 71 } 72 ~HeapProfilerScopedTaskExecutionTracker()73 inline ~HeapProfilerScopedTaskExecutionTracker() { 74 using base::trace_event::AllocationContextTracker; 75 if (UNLIKELY(AllocationContextTracker::capture_mode() != 76 AllocationContextTracker::CaptureMode::DISABLED)) { 77 AllocationContextTracker::GetInstanceForCurrentThread() 78 ->PopCurrentTaskContext(context_); 79 } 80 } 81 82 private: 83 const char* context_; 84 }; 85 86 class HeapProfilerScopedStackFrame { 87 public: HeapProfilerScopedStackFrame(const void * program_counter)88 inline explicit HeapProfilerScopedStackFrame(const void* program_counter) 89 : program_counter_(program_counter) { 90 using base::trace_event::AllocationContextTracker; 91 if (UNLIKELY(AllocationContextTracker::capture_mode() == 92 AllocationContextTracker::CaptureMode::MIXED_STACK)) { 93 AllocationContextTracker::GetInstanceForCurrentThread() 94 ->PushNativeStackFrame(program_counter_); 95 } 96 } 97 ~HeapProfilerScopedStackFrame()98 inline ~HeapProfilerScopedStackFrame() { 99 using base::trace_event::AllocationContextTracker; 100 if (UNLIKELY(AllocationContextTracker::capture_mode() == 101 AllocationContextTracker::CaptureMode::MIXED_STACK)) { 102 AllocationContextTracker::GetInstanceForCurrentThread() 103 ->PopNativeStackFrame(program_counter_); 104 } 105 } 106 107 private: 108 const void* const program_counter_; 109 }; 110 111 class BASE_EXPORT HeapProfilerScopedIgnore { 112 public: HeapProfilerScopedIgnore()113 inline HeapProfilerScopedIgnore() { 114 using base::trace_event::AllocationContextTracker; 115 if (UNLIKELY( 116 AllocationContextTracker::capture_mode() != 117 AllocationContextTracker::CaptureMode::DISABLED)) { 118 AllocationContextTracker::GetInstanceForCurrentThread() 119 ->begin_ignore_scope(); 120 } 121 } ~HeapProfilerScopedIgnore()122 inline ~HeapProfilerScopedIgnore() { 123 using base::trace_event::AllocationContextTracker; 124 if (UNLIKELY( 125 AllocationContextTracker::capture_mode() != 126 AllocationContextTracker::CaptureMode::DISABLED)) { 127 AllocationContextTracker::GetInstanceForCurrentThread() 128 ->end_ignore_scope(); 129 } 130 } 131 }; 132 133 } // namespace trace_event_internal 134 135 #endif 136 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_H 137