1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 // A bunch of threads repeatedly hash an array of ints protected by a
16 // spinlock. If the spinlock is working properly, all elements of the
17 // array should be equal at the end of the test.
18
19 #include <cstdint>
20 #include <limits>
21 #include <random>
22 #include <thread> // NOLINT(build/c++11)
23 #include <type_traits>
24 #include <vector>
25
26 #include "gtest/gtest.h"
27 #include "absl/base/attributes.h"
28 #include "absl/base/config.h"
29 #include "absl/base/internal/low_level_scheduling.h"
30 #include "absl/base/internal/scheduling_mode.h"
31 #include "absl/base/internal/spinlock.h"
32 #include "absl/base/internal/sysinfo.h"
33 #include "absl/base/macros.h"
34 #include "absl/synchronization/blocking_counter.h"
35 #include "absl/synchronization/notification.h"
36
37 constexpr int32_t kNumThreads = 10;
38 constexpr int32_t kIters = 1000;
39
40 namespace absl {
41 ABSL_NAMESPACE_BEGIN
42 namespace base_internal {
43
44 // This is defined outside of anonymous namespace so that it can be
45 // a friend of SpinLock to access protected methods for testing.
46 struct SpinLockTest {
EncodeWaitCyclesabsl::base_internal::SpinLockTest47 static uint32_t EncodeWaitCycles(int64_t wait_start_time,
48 int64_t wait_end_time) {
49 return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
50 }
DecodeWaitCyclesabsl::base_internal::SpinLockTest51 static uint64_t DecodeWaitCycles(uint32_t lock_value) {
52 return SpinLock::DecodeWaitCycles(lock_value);
53 }
54 };
55
56 namespace {
57
58 static constexpr int kArrayLength = 10;
59 static uint32_t values[kArrayLength];
60
61 ABSL_CONST_INIT static SpinLock static_cooperative_spinlock(
62 absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
63 ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock(
64 absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
65
66 // Simple integer hash function based on the public domain lookup2 hash.
67 // http://burtleburtle.net/bob/c/lookup2.c
Hash32(uint32_t a,uint32_t c)68 static uint32_t Hash32(uint32_t a, uint32_t c) {
69 uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value.
70 a -= b; a -= c; a ^= (c >> 13);
71 b -= c; b -= a; b ^= (a << 8);
72 c -= a; c -= b; c ^= (b >> 13);
73 a -= b; a -= c; a ^= (c >> 12);
74 b -= c; b -= a; b ^= (a << 16);
75 c -= a; c -= b; c ^= (b >> 5);
76 a -= b; a -= c; a ^= (c >> 3);
77 b -= c; b -= a; b ^= (a << 10);
78 c -= a; c -= b; c ^= (b >> 15);
79 return c;
80 }
81
TestFunction(int thread_salt,SpinLock * spinlock)82 static void TestFunction(int thread_salt, SpinLock* spinlock) {
83 for (int i = 0; i < kIters; i++) {
84 SpinLockHolder h(spinlock);
85 for (int j = 0; j < kArrayLength; j++) {
86 const int index = (j + thread_salt) % kArrayLength;
87 values[index] = Hash32(values[index], thread_salt);
88 std::this_thread::yield();
89 }
90 }
91 }
92
ThreadedTest(SpinLock * spinlock)93 static void ThreadedTest(SpinLock* spinlock) {
94 std::vector<std::thread> threads;
95 threads.reserve(kNumThreads);
96 for (int i = 0; i < kNumThreads; ++i) {
97 threads.push_back(std::thread(TestFunction, i, spinlock));
98 }
99 for (auto& thread : threads) {
100 thread.join();
101 }
102
103 SpinLockHolder h(spinlock);
104 for (int i = 1; i < kArrayLength; i++) {
105 EXPECT_EQ(values[0], values[i]);
106 }
107 }
108
109 #ifndef ABSL_HAVE_THREAD_SANITIZER
110 static_assert(std::is_trivially_destructible<SpinLock>(), "");
111 #endif
112
TEST(SpinLock,StackNonCooperativeDisablesScheduling)113 TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
114 SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
115 spinlock.Lock();
116 EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
117 spinlock.Unlock();
118 }
119
TEST(SpinLock,StaticNonCooperativeDisablesScheduling)120 TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {
121 static_noncooperative_spinlock.Lock();
122 EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
123 static_noncooperative_spinlock.Unlock();
124 }
125
TEST(SpinLock,WaitCyclesEncoding)126 TEST(SpinLock, WaitCyclesEncoding) {
127 // These are implementation details not exported by SpinLock.
128 const int kProfileTimestampShift = 7;
129 const int kLockwordReservedShift = 3;
130 const uint32_t kSpinLockSleeper = 8;
131
132 // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
133 // but the lower kProfileTimestampShift will be dropped.
134 const int kMaxCyclesShift =
135 32 - kLockwordReservedShift + kProfileTimestampShift;
136 const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
137
138 // These bits should be zero after encoding.
139 const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
140
141 // These bits are dropped when wait cycles are encoded.
142 const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
143
144 // Test a bunch of random values
145 std::default_random_engine generator;
146 // Shift to avoid overflow below.
147 std::uniform_int_distribution<uint64_t> time_distribution(
148 0, std::numeric_limits<uint64_t>::max() >> 4);
149 std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
150
151 for (int i = 0; i < 100; i++) {
152 int64_t start_time = time_distribution(generator);
153 int64_t cycles = cycle_distribution(generator);
154 int64_t end_time = start_time + cycles;
155 uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
156 EXPECT_EQ(0, lock_value & kLockwordReservedMask);
157 uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
158 EXPECT_EQ(0, decoded & kProfileTimestampMask);
159 EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
160 }
161
162 // Test corner cases
163 int64_t start_time = time_distribution(generator);
164 EXPECT_EQ(kSpinLockSleeper,
165 SpinLockTest::EncodeWaitCycles(start_time, start_time));
166 EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));
167 EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));
168 EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,
169 SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
170
171 // Check that we cannot produce kSpinLockSleeper during encoding.
172 int64_t sleeper_cycles =
173 kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
174 uint32_t sleeper_value =
175 SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
176 EXPECT_NE(sleeper_value, kSpinLockSleeper);
177
178 // Test clamping
179 uint32_t max_value =
180 SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
181 uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
182 uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
183 EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
184
185 const int64_t step = (1 << kProfileTimestampShift);
186 uint32_t after_max_value =
187 SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
188 uint64_t after_max_value_decoded =
189 SpinLockTest::DecodeWaitCycles(after_max_value);
190 EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
191
192 uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
193 start_time, start_time + kMaxCycles - step);
194 uint64_t before_max_value_decoded =
195 SpinLockTest::DecodeWaitCycles(before_max_value);
196 EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
197 }
198
TEST(SpinLockWithThreads,StackSpinLock)199 TEST(SpinLockWithThreads, StackSpinLock) {
200 SpinLock spinlock;
201 ThreadedTest(&spinlock);
202 }
203
TEST(SpinLockWithThreads,StackCooperativeSpinLock)204 TEST(SpinLockWithThreads, StackCooperativeSpinLock) {
205 SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
206 ThreadedTest(&spinlock);
207 }
208
TEST(SpinLockWithThreads,StackNonCooperativeSpinLock)209 TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {
210 SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
211 ThreadedTest(&spinlock);
212 }
213
TEST(SpinLockWithThreads,StaticCooperativeSpinLock)214 TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {
215 ThreadedTest(&static_cooperative_spinlock);
216 }
217
TEST(SpinLockWithThreads,StaticNonCooperativeSpinLock)218 TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {
219 ThreadedTest(&static_noncooperative_spinlock);
220 }
221
TEST(SpinLockWithThreads,DoesNotDeadlock)222 TEST(SpinLockWithThreads, DoesNotDeadlock) {
223 struct Helper {
224 static void NotifyThenLock(Notification* locked, SpinLock* spinlock,
225 BlockingCounter* b) {
226 locked->WaitForNotification(); // Wait for LockThenWait() to hold "s".
227 b->DecrementCount();
228 SpinLockHolder l(spinlock);
229 }
230
231 static void LockThenWait(Notification* locked, SpinLock* spinlock,
232 BlockingCounter* b) {
233 SpinLockHolder l(spinlock);
234 locked->Notify();
235 b->Wait();
236 }
237
238 static void DeadlockTest(SpinLock* spinlock, int num_spinners) {
239 Notification locked;
240 BlockingCounter counter(num_spinners);
241 std::vector<std::thread> threads;
242
243 threads.push_back(
244 std::thread(Helper::LockThenWait, &locked, spinlock, &counter));
245 for (int i = 0; i < num_spinners; ++i) {
246 threads.push_back(
247 std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));
248 }
249
250 for (auto& thread : threads) {
251 thread.join();
252 }
253 }
254 };
255
256 SpinLock stack_cooperative_spinlock(
257 base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
258 SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
259 Helper::DeadlockTest(&stack_cooperative_spinlock,
260 base_internal::NumCPUs() * 2);
261 Helper::DeadlockTest(&stack_noncooperative_spinlock,
262 base_internal::NumCPUs() * 2);
263 Helper::DeadlockTest(&static_cooperative_spinlock,
264 base_internal::NumCPUs() * 2);
265 Helper::DeadlockTest(&static_noncooperative_spinlock,
266 base_internal::NumCPUs() * 2);
267 }
268
269 } // namespace
270 } // namespace base_internal
271 ABSL_NAMESPACE_END
272 } // namespace absl
273