1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // -----------------------------------------------------------------------------
16 // File: sample_recorder.h
17 // -----------------------------------------------------------------------------
18 //
19 // This header file defines a lock-free linked list for recording samples
20 // collected from a random/stochastic process.
21 //
22 // This utility is internal-only. Use at your own risk.
23
24 #ifndef ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
25 #define ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
26
27 #include <atomic>
28 #include <cstddef>
29 #include <functional>
30
31 #include "absl/base/config.h"
32 #include "absl/base/thread_annotations.h"
33 #include "absl/synchronization/mutex.h"
34 #include "absl/time/time.h"
35
36 namespace absl {
37 ABSL_NAMESPACE_BEGIN
38 namespace profiling_internal {
39
40 // Sample<T> that has members required for linking samples in the linked list of
41 // samples maintained by the SampleRecorder. Type T defines the sampled data.
42 template <typename T>
43 struct Sample {
44 // Guards the ability to restore the sample to a pristine state. This
45 // prevents races with sampling and resurrecting an object.
46 absl::Mutex init_mu;
47 T* next = nullptr;
48 T* dead ABSL_GUARDED_BY(init_mu) = nullptr;
49 };
50
51 // Holds samples and their associated stack traces with a soft limit of
52 // `SetHashtablezMaxSamples()`.
53 //
54 // Thread safe.
55 template <typename T>
56 class SampleRecorder {
57 public:
58 SampleRecorder();
59 ~SampleRecorder();
60
61 // Registers for sampling. Returns an opaque registration info.
62 T* Register();
63
64 // Unregisters the sample.
65 void Unregister(T* sample);
66
67 // The dispose callback will be called on all samples the moment they are
68 // being unregistered. Only affects samples that are unregistered after the
69 // callback has been set.
70 // Returns the previous callback.
71 using DisposeCallback = void (*)(const T&);
72 DisposeCallback SetDisposeCallback(DisposeCallback f);
73
74 // Iterates over all the registered `StackInfo`s. Returning the number of
75 // samples that have been dropped.
76 int64_t Iterate(const std::function<void(const T& stack)>& f);
77
78 void SetMaxSamples(int32_t max);
79
80 private:
81 void PushNew(T* sample);
82 void PushDead(T* sample);
83 T* PopDead();
84
85 std::atomic<size_t> dropped_samples_;
86 std::atomic<size_t> size_estimate_;
87 std::atomic<int32_t> max_samples_{1 << 20};
88
89 // Intrusive lock free linked lists for tracking samples.
90 //
91 // `all_` records all samples (they are never removed from this list) and is
92 // terminated with a `nullptr`.
93 //
94 // `graveyard_.dead` is a circular linked list. When it is empty,
95 // `graveyard_.dead == &graveyard`. The list is circular so that
96 // every item on it (even the last) has a non-null dead pointer. This allows
97 // `Iterate` to determine if a given sample is live or dead using only
98 // information on the sample itself.
99 //
100 // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
101 // looks like this (G is the Graveyard):
102 //
103 // +---+ +---+ +---+ +---+ +---+
104 // all -->| A |--->| B |--->| C |--->| D |--->| E |
105 // | | | | | | | | | |
106 // +---+ | | +->| |-+ | | +->| |-+ | |
107 // | G | +---+ | +---+ | +---+ | +---+ | +---+
108 // | | | | | |
109 // | | --------+ +--------+ |
110 // +---+ |
111 // ^ |
112 // +--------------------------------------+
113 //
114 std::atomic<T*> all_;
115 T graveyard_;
116
117 std::atomic<DisposeCallback> dispose_;
118 };
119
120 template <typename T>
121 typename SampleRecorder<T>::DisposeCallback
SetDisposeCallback(DisposeCallback f)122 SampleRecorder<T>::SetDisposeCallback(DisposeCallback f) {
123 return dispose_.exchange(f, std::memory_order_relaxed);
124 }
125
126 template <typename T>
SampleRecorder()127 SampleRecorder<T>::SampleRecorder()
128 : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
129 absl::MutexLock l(&graveyard_.init_mu);
130 graveyard_.dead = &graveyard_;
131 }
132
133 template <typename T>
~SampleRecorder()134 SampleRecorder<T>::~SampleRecorder() {
135 T* s = all_.load(std::memory_order_acquire);
136 while (s != nullptr) {
137 T* next = s->next;
138 delete s;
139 s = next;
140 }
141 }
142
143 template <typename T>
PushNew(T * sample)144 void SampleRecorder<T>::PushNew(T* sample) {
145 sample->next = all_.load(std::memory_order_relaxed);
146 while (!all_.compare_exchange_weak(sample->next, sample,
147 std::memory_order_release,
148 std::memory_order_relaxed)) {
149 }
150 }
151
152 template <typename T>
PushDead(T * sample)153 void SampleRecorder<T>::PushDead(T* sample) {
154 if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
155 dispose(*sample);
156 }
157
158 absl::MutexLock graveyard_lock(&graveyard_.init_mu);
159 absl::MutexLock sample_lock(&sample->init_mu);
160 sample->dead = graveyard_.dead;
161 graveyard_.dead = sample;
162 }
163
164 template <typename T>
PopDead()165 T* SampleRecorder<T>::PopDead() {
166 absl::MutexLock graveyard_lock(&graveyard_.init_mu);
167
168 // The list is circular, so eventually it collapses down to
169 // graveyard_.dead == &graveyard_
170 // when it is empty.
171 T* sample = graveyard_.dead;
172 if (sample == &graveyard_) return nullptr;
173
174 absl::MutexLock sample_lock(&sample->init_mu);
175 graveyard_.dead = sample->dead;
176 sample->dead = nullptr;
177 sample->PrepareForSampling();
178 return sample;
179 }
180
181 template <typename T>
Register()182 T* SampleRecorder<T>::Register() {
183 int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
184 if (size > max_samples_.load(std::memory_order_relaxed)) {
185 size_estimate_.fetch_sub(1, std::memory_order_relaxed);
186 dropped_samples_.fetch_add(1, std::memory_order_relaxed);
187 return nullptr;
188 }
189
190 T* sample = PopDead();
191 if (sample == nullptr) {
192 // Resurrection failed. Hire a new warlock.
193 sample = new T();
194 PushNew(sample);
195 }
196
197 return sample;
198 }
199
200 template <typename T>
Unregister(T * sample)201 void SampleRecorder<T>::Unregister(T* sample) {
202 PushDead(sample);
203 size_estimate_.fetch_sub(1, std::memory_order_relaxed);
204 }
205
206 template <typename T>
Iterate(const std::function<void (const T & stack)> & f)207 int64_t SampleRecorder<T>::Iterate(
208 const std::function<void(const T& stack)>& f) {
209 T* s = all_.load(std::memory_order_acquire);
210 while (s != nullptr) {
211 absl::MutexLock l(&s->init_mu);
212 if (s->dead == nullptr) {
213 f(*s);
214 }
215 s = s->next;
216 }
217
218 return dropped_samples_.load(std::memory_order_relaxed);
219 }
220
221 template <typename T>
SetMaxSamples(int32_t max)222 void SampleRecorder<T>::SetMaxSamples(int32_t max) {
223 max_samples_.store(max, std::memory_order_release);
224 }
225
226 } // namespace profiling_internal
227 ABSL_NAMESPACE_END
228 } // namespace absl
229
230 #endif // ABSL_PROFILING_INTERNAL_SAMPLE_RECORDER_H_
231