1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // -----------------------------------------------------------------------------
16 // File: hashtablez_sampler.h
17 // -----------------------------------------------------------------------------
18 //
19 // This header file defines the API for a low level library to sample hashtables
20 // and collect runtime statistics about them.
21 //
22 // `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
23 // store information about a single sample.
24 //
25 // `Record*` methods store information into samples.
26 // `Sample()` and `Unsample()` make use of a single global sampler with
27 // properties controlled by the flags hashtablez_enabled,
28 // hashtablez_sample_rate, and hashtablez_max_samples.
29 //
30 // WARNING
31 //
32 // Using this sampling API may cause sampled Swiss tables to use the global
33 // allocator (operator `new`) in addition to any custom allocator. If you
34 // are using a table in an unusual circumstance where allocation or calling a
35 // linux syscall is unacceptable, this could interfere.
36 //
37 // This utility is internal-only. Use at your own risk.
38
39 #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
40 #define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
41
42 #include <atomic>
43 #include <functional>
44 #include <memory>
45 #include <vector>
46
47 #include "absl/base/internal/per_thread_tls.h"
48 #include "absl/base/optimization.h"
49 #include "absl/container/internal/have_sse.h"
50 #include "absl/profiling/internal/sample_recorder.h"
51 #include "absl/synchronization/mutex.h"
52 #include "absl/utility/utility.h"
53
54 namespace absl {
55 ABSL_NAMESPACE_BEGIN
56 namespace container_internal {
57
58 // Stores information about a sampled hashtable. All mutations to this *must*
59 // be made through `Record*` functions below. All reads from this *must* only
60 // occur in the callback to `HashtablezSampler::Iterate`.
61 struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
62 // Constructs the object but does not fill in any fields.
63 HashtablezInfo();
64 ~HashtablezInfo();
65 HashtablezInfo(const HashtablezInfo&) = delete;
66 HashtablezInfo& operator=(const HashtablezInfo&) = delete;
67
68 // Puts the object into a clean state, fills in the logically `const` members,
69 // blocking for any readers that are currently sampling the object.
70 void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
71
72 // These fields are mutated by the various Record* APIs and need to be
73 // thread-safe.
74 std::atomic<size_t> capacity;
75 std::atomic<size_t> size;
76 std::atomic<size_t> num_erases;
77 std::atomic<size_t> num_rehashes;
78 std::atomic<size_t> max_probe_length;
79 std::atomic<size_t> total_probe_length;
80 std::atomic<size_t> hashes_bitwise_or;
81 std::atomic<size_t> hashes_bitwise_and;
82 std::atomic<size_t> hashes_bitwise_xor;
83 std::atomic<size_t> max_reserve;
84
85 // All of the fields below are set by `PrepareForSampling`, they must not be
86 // mutated in `Record*` functions. They are logically `const` in that sense.
87 // These are guarded by init_mu, but that is not externalized to clients, who
88 // can only read them during `HashtablezSampler::Iterate` which will hold the
89 // lock.
90 static constexpr int kMaxStackDepth = 64;
91 absl::Time create_time;
92 int32_t depth;
93 void* stack[kMaxStackDepth];
94 size_t inline_element_size;
95 };
96
RecordRehashSlow(HashtablezInfo * info,size_t total_probe_length)97 inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
98 #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
99 total_probe_length /= 16;
100 #else
101 total_probe_length /= 8;
102 #endif
103 info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
104 info->num_erases.store(0, std::memory_order_relaxed);
105 // There is only one concurrent writer, so `load` then `store` is sufficient
106 // instead of using `fetch_add`.
107 info->num_rehashes.store(
108 1 + info->num_rehashes.load(std::memory_order_relaxed),
109 std::memory_order_relaxed);
110 }
111
RecordReservationSlow(HashtablezInfo * info,size_t target_capacity)112 inline void RecordReservationSlow(HashtablezInfo* info,
113 size_t target_capacity) {
114 info->max_reserve.store(
115 (std::max)(info->max_reserve.load(std::memory_order_relaxed),
116 target_capacity),
117 std::memory_order_relaxed);
118 }
119
RecordClearedReservationSlow(HashtablezInfo * info)120 inline void RecordClearedReservationSlow(HashtablezInfo* info) {
121 info->max_reserve.store(0, std::memory_order_relaxed);
122 }
123
RecordStorageChangedSlow(HashtablezInfo * info,size_t size,size_t capacity)124 inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
125 size_t capacity) {
126 info->size.store(size, std::memory_order_relaxed);
127 info->capacity.store(capacity, std::memory_order_relaxed);
128 if (size == 0) {
129 // This is a clear, reset the total/num_erases too.
130 info->total_probe_length.store(0, std::memory_order_relaxed);
131 info->num_erases.store(0, std::memory_order_relaxed);
132 }
133 }
134
135 void RecordInsertSlow(HashtablezInfo* info, size_t hash,
136 size_t distance_from_desired);
137
RecordEraseSlow(HashtablezInfo * info)138 inline void RecordEraseSlow(HashtablezInfo* info) {
139 info->size.fetch_sub(1, std::memory_order_relaxed);
140 // There is only one concurrent writer, so `load` then `store` is sufficient
141 // instead of using `fetch_add`.
142 info->num_erases.store(
143 1 + info->num_erases.load(std::memory_order_relaxed),
144 std::memory_order_relaxed);
145 }
146
147 HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
148 void UnsampleSlow(HashtablezInfo* info);
149
150 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
151 #error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
152 #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
153
154 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
155 class HashtablezInfoHandle {
156 public:
HashtablezInfoHandle()157 explicit HashtablezInfoHandle() : info_(nullptr) {}
HashtablezInfoHandle(HashtablezInfo * info)158 explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
~HashtablezInfoHandle()159 ~HashtablezInfoHandle() {
160 if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
161 UnsampleSlow(info_);
162 }
163
164 HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
165 HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
166
HashtablezInfoHandle(HashtablezInfoHandle && o)167 HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
168 : info_(absl::exchange(o.info_, nullptr)) {}
169 HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
170 if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
171 UnsampleSlow(info_);
172 }
173 info_ = absl::exchange(o.info_, nullptr);
174 return *this;
175 }
176
RecordStorageChanged(size_t size,size_t capacity)177 inline void RecordStorageChanged(size_t size, size_t capacity) {
178 if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
179 RecordStorageChangedSlow(info_, size, capacity);
180 }
181
RecordRehash(size_t total_probe_length)182 inline void RecordRehash(size_t total_probe_length) {
183 if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
184 RecordRehashSlow(info_, total_probe_length);
185 }
186
RecordReservation(size_t target_capacity)187 inline void RecordReservation(size_t target_capacity) {
188 if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
189 RecordReservationSlow(info_, target_capacity);
190 }
191
RecordClearedReservation()192 inline void RecordClearedReservation() {
193 if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
194 RecordClearedReservationSlow(info_);
195 }
196
RecordInsert(size_t hash,size_t distance_from_desired)197 inline void RecordInsert(size_t hash, size_t distance_from_desired) {
198 if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
199 RecordInsertSlow(info_, hash, distance_from_desired);
200 }
201
RecordErase()202 inline void RecordErase() {
203 if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
204 RecordEraseSlow(info_);
205 }
206
swap(HashtablezInfoHandle & lhs,HashtablezInfoHandle & rhs)207 friend inline void swap(HashtablezInfoHandle& lhs,
208 HashtablezInfoHandle& rhs) {
209 std::swap(lhs.info_, rhs.info_);
210 }
211
212 private:
213 friend class HashtablezInfoHandlePeer;
214 HashtablezInfo* info_;
215 };
216 #else
217 // Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
218 // be removed by the linker, in order to reduce the binary size.
219 class HashtablezInfoHandle {
220 public:
221 explicit HashtablezInfoHandle() = default;
HashtablezInfoHandle(std::nullptr_t)222 explicit HashtablezInfoHandle(std::nullptr_t) {}
223
RecordStorageChanged(size_t,size_t)224 inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
RecordRehash(size_t)225 inline void RecordRehash(size_t /*total_probe_length*/) {}
RecordReservation(size_t)226 inline void RecordReservation(size_t /*target_capacity*/) {}
RecordClearedReservation()227 inline void RecordClearedReservation() {}
RecordInsert(size_t,size_t)228 inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
RecordErase()229 inline void RecordErase() {}
230
swap(HashtablezInfoHandle &,HashtablezInfoHandle &)231 friend inline void swap(HashtablezInfoHandle& /*lhs*/,
232 HashtablezInfoHandle& /*rhs*/) {}
233 };
234 #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
235
236 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
237 extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
238 #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
239
240 // Returns an RAII sampling handle that manages registration and unregistation
241 // with the global sampler.
Sample(size_t inline_element_size ABSL_ATTRIBUTE_UNUSED)242 inline HashtablezInfoHandle Sample(
243 size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
244 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
245 if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
246 return HashtablezInfoHandle(nullptr);
247 }
248 return HashtablezInfoHandle(
249 SampleSlow(&global_next_sample, inline_element_size));
250 #else
251 return HashtablezInfoHandle(nullptr);
252 #endif // !ABSL_PER_THREAD_TLS
253 }
254
255 using HashtablezSampler =
256 ::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
257
258 // Returns a global Sampler.
259 HashtablezSampler& GlobalHashtablezSampler();
260
261 // Enables or disables sampling for Swiss tables.
262 void SetHashtablezEnabled(bool enabled);
263
264 // Sets the rate at which Swiss tables will be sampled.
265 void SetHashtablezSampleParameter(int32_t rate);
266
267 // Sets a soft max for the number of samples that will be kept.
268 void SetHashtablezMaxSamples(int32_t max);
269
270 // Configuration override.
271 // This allows process-wide sampling without depending on order of
272 // initialization of static storage duration objects.
273 // The definition of this constant is weak, which allows us to inject a
274 // different value for it at link time.
275 extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
276
277 } // namespace container_internal
278 ABSL_NAMESPACE_END
279 } // namespace absl
280
281 #endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
282