• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // A bunch of threads repeatedly hash an array of ints protected by a
16 // spinlock.  If the spinlock is working properly, all elements of the
17 // array should be equal at the end of the test.
18 
19 #include <cstdint>
20 #include <limits>
21 #include <random>
22 #include <thread>  // NOLINT(build/c++11)
23 #include <vector>
24 
25 #include "gtest/gtest.h"
26 #include "absl/base/attributes.h"
27 #include "absl/base/internal/low_level_scheduling.h"
28 #include "absl/base/internal/scheduling_mode.h"
29 #include "absl/base/internal/spinlock.h"
30 #include "absl/base/internal/sysinfo.h"
31 #include "absl/base/macros.h"
32 #include "absl/synchronization/blocking_counter.h"
33 #include "absl/synchronization/notification.h"
34 
35 constexpr int32_t kNumThreads = 10;
36 constexpr int32_t kIters = 1000;
37 
38 namespace absl {
39 ABSL_NAMESPACE_BEGIN
40 namespace base_internal {
41 
42 // This is defined outside of anonymous namespace so that it can be
43 // a friend of SpinLock to access protected methods for testing.
44 struct SpinLockTest {
EncodeWaitCyclesabsl::base_internal::SpinLockTest45   static uint32_t EncodeWaitCycles(int64_t wait_start_time,
46                                    int64_t wait_end_time) {
47     return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
48   }
DecodeWaitCyclesabsl::base_internal::SpinLockTest49   static uint64_t DecodeWaitCycles(uint32_t lock_value) {
50     return SpinLock::DecodeWaitCycles(lock_value);
51   }
52 };
53 
54 namespace {
55 
56 static constexpr int kArrayLength = 10;
57 static uint32_t values[kArrayLength];
58 
59 static SpinLock static_spinlock(base_internal::kLinkerInitialized);
60 static SpinLock static_cooperative_spinlock(
61     base_internal::kLinkerInitialized,
62     base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
63 static SpinLock static_noncooperative_spinlock(
64     base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY);
65 
66 // Simple integer hash function based on the public domain lookup2 hash.
67 // http://burtleburtle.net/bob/c/lookup2.c
Hash32(uint32_t a,uint32_t c)68 static uint32_t Hash32(uint32_t a, uint32_t c) {
69   uint32_t b = 0x9e3779b9UL;  // The golden ratio; an arbitrary value.
70   a -= b; a -= c; a ^= (c >> 13);
71   b -= c; b -= a; b ^= (a << 8);
72   c -= a; c -= b; c ^= (b >> 13);
73   a -= b; a -= c; a ^= (c >> 12);
74   b -= c; b -= a; b ^= (a << 16);
75   c -= a; c -= b; c ^= (b >> 5);
76   a -= b; a -= c; a ^= (c >> 3);
77   b -= c; b -= a; b ^= (a << 10);
78   c -= a; c -= b; c ^= (b >> 15);
79   return c;
80 }
81 
TestFunction(int thread_salt,SpinLock * spinlock)82 static void TestFunction(int thread_salt, SpinLock* spinlock) {
83   for (int i = 0; i < kIters; i++) {
84     SpinLockHolder h(spinlock);
85     for (int j = 0; j < kArrayLength; j++) {
86       const int index = (j + thread_salt) % kArrayLength;
87       values[index] = Hash32(values[index], thread_salt);
88       std::this_thread::yield();
89     }
90   }
91 }
92 
ThreadedTest(SpinLock * spinlock)93 static void ThreadedTest(SpinLock* spinlock) {
94   std::vector<std::thread> threads;
95   for (int i = 0; i < kNumThreads; ++i) {
96     threads.push_back(std::thread(TestFunction, i, spinlock));
97   }
98   for (auto& thread : threads) {
99     thread.join();
100   }
101 
102   SpinLockHolder h(spinlock);
103   for (int i = 1; i < kArrayLength; i++) {
104     EXPECT_EQ(values[0], values[i]);
105   }
106 }
107 
TEST(SpinLock,StackNonCooperativeDisablesScheduling)108 TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
109   SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
110   spinlock.Lock();
111   EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
112   spinlock.Unlock();
113 }
114 
TEST(SpinLock,StaticNonCooperativeDisablesScheduling)115 TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {
116   static_noncooperative_spinlock.Lock();
117   EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
118   static_noncooperative_spinlock.Unlock();
119 }
120 
TEST(SpinLock,WaitCyclesEncoding)121 TEST(SpinLock, WaitCyclesEncoding) {
122   // These are implementation details not exported by SpinLock.
123   const int kProfileTimestampShift = 7;
124   const int kLockwordReservedShift = 3;
125   const uint32_t kSpinLockSleeper = 8;
126 
127   // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
128   // but the lower kProfileTimestampShift will be dropped.
129   const int kMaxCyclesShift =
130     32 - kLockwordReservedShift + kProfileTimestampShift;
131   const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
132 
133   // These bits should be zero after encoding.
134   const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
135 
136   // These bits are dropped when wait cycles are encoded.
137   const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
138 
139   // Test a bunch of random values
140   std::default_random_engine generator;
141   // Shift to avoid overflow below.
142   std::uniform_int_distribution<uint64_t> time_distribution(
143       0, std::numeric_limits<uint64_t>::max() >> 4);
144   std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
145 
146   for (int i = 0; i < 100; i++) {
147     int64_t start_time = time_distribution(generator);
148     int64_t cycles = cycle_distribution(generator);
149     int64_t end_time = start_time + cycles;
150     uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
151     EXPECT_EQ(0, lock_value & kLockwordReservedMask);
152     uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
153     EXPECT_EQ(0, decoded & kProfileTimestampMask);
154     EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
155   }
156 
157   // Test corner cases
158   int64_t start_time = time_distribution(generator);
159   EXPECT_EQ(kSpinLockSleeper,
160             SpinLockTest::EncodeWaitCycles(start_time, start_time));
161   EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));
162   EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));
163   EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,
164             SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
165 
166   // Check that we cannot produce kSpinLockSleeper during encoding.
167   int64_t sleeper_cycles =
168       kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
169   uint32_t sleeper_value =
170       SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
171   EXPECT_NE(sleeper_value, kSpinLockSleeper);
172 
173   // Test clamping
174   uint32_t max_value =
175     SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
176   uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
177   uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
178   EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
179 
180   const int64_t step = (1 << kProfileTimestampShift);
181   uint32_t after_max_value =
182     SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
183   uint64_t after_max_value_decoded =
184       SpinLockTest::DecodeWaitCycles(after_max_value);
185   EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
186 
187   uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
188       start_time, start_time + kMaxCycles - step);
189   uint64_t before_max_value_decoded =
190     SpinLockTest::DecodeWaitCycles(before_max_value);
191   EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
192 }
193 
TEST(SpinLockWithThreads,StaticSpinLock)194 TEST(SpinLockWithThreads, StaticSpinLock) {
195   ThreadedTest(&static_spinlock);
196 }
197 
TEST(SpinLockWithThreads,StackSpinLock)198 TEST(SpinLockWithThreads, StackSpinLock) {
199   SpinLock spinlock;
200   ThreadedTest(&spinlock);
201 }
202 
TEST(SpinLockWithThreads,StackCooperativeSpinLock)203 TEST(SpinLockWithThreads, StackCooperativeSpinLock) {
204   SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
205   ThreadedTest(&spinlock);
206 }
207 
TEST(SpinLockWithThreads,StackNonCooperativeSpinLock)208 TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {
209   SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
210   ThreadedTest(&spinlock);
211 }
212 
TEST(SpinLockWithThreads,StaticCooperativeSpinLock)213 TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {
214   ThreadedTest(&static_cooperative_spinlock);
215 }
216 
TEST(SpinLockWithThreads,StaticNonCooperativeSpinLock)217 TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {
218   ThreadedTest(&static_noncooperative_spinlock);
219 }
220 
TEST(SpinLockWithThreads,DoesNotDeadlock)221 TEST(SpinLockWithThreads, DoesNotDeadlock) {
222   struct Helper {
223     static void NotifyThenLock(Notification* locked, SpinLock* spinlock,
224                                BlockingCounter* b) {
225       locked->WaitForNotification();  // Wait for LockThenWait() to hold "s".
226       b->DecrementCount();
227       SpinLockHolder l(spinlock);
228     }
229 
230     static void LockThenWait(Notification* locked, SpinLock* spinlock,
231                              BlockingCounter* b) {
232       SpinLockHolder l(spinlock);
233       locked->Notify();
234       b->Wait();
235     }
236 
237     static void DeadlockTest(SpinLock* spinlock, int num_spinners) {
238       Notification locked;
239       BlockingCounter counter(num_spinners);
240       std::vector<std::thread> threads;
241 
242       threads.push_back(
243           std::thread(Helper::LockThenWait, &locked, spinlock, &counter));
244       for (int i = 0; i < num_spinners; ++i) {
245         threads.push_back(
246             std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));
247       }
248 
249       for (auto& thread : threads) {
250         thread.join();
251       }
252     }
253   };
254 
255   SpinLock stack_cooperative_spinlock(
256       base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
257   SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
258   Helper::DeadlockTest(&stack_cooperative_spinlock,
259                        base_internal::NumCPUs() * 2);
260   Helper::DeadlockTest(&stack_noncooperative_spinlock,
261                        base_internal::NumCPUs() * 2);
262   Helper::DeadlockTest(&static_cooperative_spinlock,
263                        base_internal::NumCPUs() * 2);
264   Helper::DeadlockTest(&static_noncooperative_spinlock,
265                        base_internal::NumCPUs() * 2);
266 }
267 
268 }  // namespace
269 }  // namespace base_internal
270 ABSL_NAMESPACE_END
271 }  // namespace absl
272