1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/base/internal/spinlock.h"
16
17 #include <algorithm>
18 #include <atomic>
19 #include <limits>
20
21 #include "absl/base/attributes.h"
22 #include "absl/base/internal/atomic_hook.h"
23 #include "absl/base/internal/cycleclock.h"
24 #include "absl/base/internal/spinlock_wait.h"
25 #include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
26 #include "absl/base/call_once.h"
27
28 // Description of lock-word:
29 // 31..00: [............................3][2][1][0]
30 //
31 // [0]: kSpinLockHeld
32 // [1]: kSpinLockCooperative
33 // [2]: kSpinLockDisabledScheduling
34 // [31..3]: ONLY kSpinLockSleeper OR
35 // Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
36 //
37 // Detailed descriptions:
38 //
39 // Bit [0]: The lock is considered held iff kSpinLockHeld is set.
40 //
41 // Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
42 // contended iff kSpinLockCooperative is set.
43 //
44 // Bit [2]: This bit is exclusive from bit [1]. It is used only by a
45 // non-cooperative lock. When set, indicates that scheduling was
46 // successfully disabled when the lock was acquired. May be unset,
47 // even if non-cooperative, if a ThreadIdentity did not yet exist at
48 // time of acquisition.
49 //
50 // Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
51 // acquired without contention, however, at least one waiter exists.
52 //
53 // Otherwise, bits [31..3] represent the time spent by the current lock
54 // holder to acquire the lock. There may be outstanding waiter(s).
55
56 namespace absl {
57 ABSL_NAMESPACE_BEGIN
58 namespace base_internal {
59
60 ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
61 const void *lock, int64_t wait_cycles)>
62 submit_profile_data;
63
RegisterSpinLockProfiler(void (* fn)(const void * contendedlock,int64_t wait_cycles))64 void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
65 int64_t wait_cycles)) {
66 submit_profile_data.Store(fn);
67 }
68
69 // Static member variable definitions.
70 constexpr uint32_t SpinLock::kSpinLockHeld;
71 constexpr uint32_t SpinLock::kSpinLockCooperative;
72 constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
73 constexpr uint32_t SpinLock::kSpinLockSleeper;
74 constexpr uint32_t SpinLock::kWaitTimeMask;
75
76 // Uncommon constructors.
SpinLock(base_internal::SchedulingMode mode)77 SpinLock::SpinLock(base_internal::SchedulingMode mode)
78 : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
79 ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
80 }
81
82 // Monitor the lock to see if its value changes within some time period
83 // (adaptive_spin_count loop iterations). The last value read from the lock
84 // is returned from the method.
SpinLoop()85 uint32_t SpinLock::SpinLoop() {
86 // We are already in the slow path of SpinLock, initialize the
87 // adaptive_spin_count here.
88 ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
89 ABSL_CONST_INIT static int adaptive_spin_count = 0;
90 base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
91 adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
92 });
93
94 int c = adaptive_spin_count;
95 uint32_t lock_value;
96 do {
97 lock_value = lockword_.load(std::memory_order_relaxed);
98 } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
99 return lock_value;
100 }
101
SlowLock()102 void SpinLock::SlowLock() {
103 uint32_t lock_value = SpinLoop();
104 lock_value = TryLockInternal(lock_value, 0);
105 if ((lock_value & kSpinLockHeld) == 0) {
106 return;
107 }
108
109 base_internal::SchedulingMode scheduling_mode;
110 if ((lock_value & kSpinLockCooperative) != 0) {
111 scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
112 } else {
113 scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
114 }
115
116 // The lock was not obtained initially, so this thread needs to wait for
117 // it. Record the current timestamp in the local variable wait_start_time
118 // so the total wait time can be stored in the lockword once this thread
119 // obtains the lock.
120 int64_t wait_start_time = CycleClock::Now();
121 uint32_t wait_cycles = 0;
122 int lock_wait_call_count = 0;
123 while ((lock_value & kSpinLockHeld) != 0) {
124 // If the lock is currently held, but not marked as having a sleeper, mark
125 // it as having a sleeper.
126 if ((lock_value & kWaitTimeMask) == 0) {
127 // Here, just "mark" that the thread is going to sleep. Don't store the
128 // lock wait time in the lock -- the lock word stores the amount of time
129 // that the current holder waited before acquiring the lock, not the wait
130 // time of any thread currently waiting to acquire it.
131 if (lockword_.compare_exchange_strong(
132 lock_value, lock_value | kSpinLockSleeper,
133 std::memory_order_relaxed, std::memory_order_relaxed)) {
134 // Successfully transitioned to kSpinLockSleeper. Pass
135 // kSpinLockSleeper to the SpinLockWait routine to properly indicate
136 // the last lock_value observed.
137 lock_value |= kSpinLockSleeper;
138 } else if ((lock_value & kSpinLockHeld) == 0) {
139 // Lock is free again, so try and acquire it before sleeping. The
140 // new lock state will be the number of cycles this thread waited if
141 // this thread obtains the lock.
142 lock_value = TryLockInternal(lock_value, wait_cycles);
143 continue; // Skip the delay at the end of the loop.
144 } else if ((lock_value & kWaitTimeMask) == 0) {
145 // The lock is still held, without a waiter being marked, but something
146 // else about the lock word changed, causing our CAS to fail. For
147 // example, a new lock holder may have acquired the lock with
148 // kSpinLockDisabledScheduling set, whereas the previous holder had not
149 // set that flag. In this case, attempt again to mark ourselves as a
150 // waiter.
151 continue;
152 }
153 }
154
155 // SpinLockDelay() calls into fiber scheduler, we need to see
156 // synchronization there to avoid false positives.
157 ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
158 // Wait for an OS specific delay.
159 base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
160 scheduling_mode);
161 ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
162 // Spin again after returning from the wait routine to give this thread
163 // some chance of obtaining the lock.
164 lock_value = SpinLoop();
165 wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
166 lock_value = TryLockInternal(lock_value, wait_cycles);
167 }
168 }
169
SlowUnlock(uint32_t lock_value)170 void SpinLock::SlowUnlock(uint32_t lock_value) {
171 base_internal::SpinLockWake(&lockword_,
172 false); // wake waiter if necessary
173
174 // If our acquisition was contended, collect contentionz profile info. We
175 // reserve a unitary wait time to represent that a waiter exists without our
176 // own acquisition having been contended.
177 if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
178 const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
179 ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
180 submit_profile_data(this, wait_cycles);
181 ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
182 }
183 }
184
185 // We use the upper 29 bits of the lock word to store the time spent waiting to
186 // acquire this lock. This is reported by contentionz profiling. Since the
187 // lower bits of the cycle counter wrap very quickly on high-frequency
188 // processors we divide to reduce the granularity to 2^kProfileTimestampShift
189 // sized units. On a 4Ghz machine this will lose track of wait times greater
190 // than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
191 static constexpr int kProfileTimestampShift = 7;
192
193 // We currently reserve the lower 3 bits.
194 static constexpr int kLockwordReservedShift = 3;
195
EncodeWaitCycles(int64_t wait_start_time,int64_t wait_end_time)196 uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
197 int64_t wait_end_time) {
198 static const int64_t kMaxWaitTime =
199 std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
200 int64_t scaled_wait_time =
201 (wait_end_time - wait_start_time) >> kProfileTimestampShift;
202
203 // Return a representation of the time spent waiting that can be stored in
204 // the lock word's upper bits.
205 uint32_t clamped = static_cast<uint32_t>(
206 std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
207
208 if (clamped == 0) {
209 return kSpinLockSleeper; // Just wake waiters, but don't record contention.
210 }
211 // Bump up value if necessary to avoid returning kSpinLockSleeper.
212 const uint32_t kMinWaitTime =
213 kSpinLockSleeper + (1 << kLockwordReservedShift);
214 if (clamped == kSpinLockSleeper) {
215 return kMinWaitTime;
216 }
217 return clamped;
218 }
219
DecodeWaitCycles(uint32_t lock_value)220 uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
221 // Cast to uint32_t first to ensure bits [63:32] are cleared.
222 const uint64_t scaled_wait_time =
223 static_cast<uint32_t>(lock_value & kWaitTimeMask);
224 return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
225 }
226
227 } // namespace base_internal
228 ABSL_NAMESPACE_END
229 } // namespace absl
230