• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2017 The Abseil Authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      https://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 //  Most users requiring mutual exclusion should use Mutex.
18 //  SpinLock is provided for use in two situations:
19 //   - for use in code that Mutex itself depends on
20 //   - for async signal safety (see below)
21 
22 // SpinLock is async signal safe.  If a spinlock is used within a signal
23 // handler, all code that acquires the lock must ensure that the signal cannot
24 // arrive while they are holding the lock.  Typically, this is done by blocking
25 // the signal.
26 
27 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
28 #define ABSL_BASE_INTERNAL_SPINLOCK_H_
29 
30 #include <stdint.h>
31 #include <sys/types.h>
32 
33 #include <atomic>
34 
35 #include "absl/base/attributes.h"
36 #include "absl/base/const_init.h"
37 #include "absl/base/dynamic_annotations.h"
38 #include "absl/base/internal/low_level_scheduling.h"
39 #include "absl/base/internal/raw_logging.h"
40 #include "absl/base/internal/scheduling_mode.h"
41 #include "absl/base/internal/tsan_mutex_interface.h"
42 #include "absl/base/macros.h"
43 #include "absl/base/port.h"
44 #include "absl/base/thread_annotations.h"
45 
46 namespace absl {
47 ABSL_NAMESPACE_BEGIN
48 namespace base_internal {
49 
50 class ABSL_LOCKABLE SpinLock {
51  public:
SpinLock()52   SpinLock() : lockword_(kSpinLockCooperative) {
53     ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
54   }
55 
56   // Constructors that allow non-cooperative spinlocks to be created for use
57   // inside thread schedulers.  Normal clients should not use these.
58   explicit SpinLock(base_internal::SchedulingMode mode);
59 
60   // Constructor for global SpinLock instances.  See absl/base/const_init.h.
SpinLock(absl::ConstInitType,base_internal::SchedulingMode mode)61   constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
62       : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
63 
64   // For global SpinLock instances prefer trivial destructor when possible.
65   // Default but non-trivial destructor in some build configurations causes an
66   // extra static initializer.
67 #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
~SpinLock()68   ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
69 #else
70   ~SpinLock() = default;
71 #endif
72 
73   // Acquire this SpinLock.
Lock()74   inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
75     ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
76     if (!TryLockImpl()) {
77       SlowLock();
78     }
79     ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
80   }
81 
82   // Try to acquire this SpinLock without blocking and return true if the
83   // acquisition was successful.  If the lock was not acquired, false is
84   // returned.  If this SpinLock is free at the time of the call, TryLock
85   // will return true with high probability.
TryLock()86   inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
87     ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
88     bool res = TryLockImpl();
89     ABSL_TSAN_MUTEX_POST_LOCK(
90         this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
91         0);
92     return res;
93   }
94 
95   // Release this SpinLock, which must be held by the calling thread.
Unlock()96   inline void Unlock() ABSL_UNLOCK_FUNCTION() {
97     ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
98     uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
99     lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
100                                     std::memory_order_release);
101 
102     if ((lock_value & kSpinLockDisabledScheduling) != 0) {
103       base_internal::SchedulingGuard::EnableRescheduling(true);
104     }
105     if ((lock_value & kWaitTimeMask) != 0) {
106       // Collect contentionz profile info, and speed the wakeup of any waiter.
107       // The wait_cycles value indicates how long this thread spent waiting
108       // for the lock.
109       SlowUnlock(lock_value);
110     }
111     ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
112   }
113 
114   // Determine if the lock is held.  When the lock is held by the invoking
115   // thread, true will always be returned. Intended to be used as
116   // CHECK(lock.IsHeld()).
IsHeld()117   inline bool IsHeld() const {
118     return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
119   }
120 
121  protected:
122   // These should not be exported except for testing.
123 
124   // Store number of cycles between wait_start_time and wait_end_time in a
125   // lock value.
126   static uint32_t EncodeWaitCycles(int64_t wait_start_time,
127                                    int64_t wait_end_time);
128 
129   // Extract number of wait cycles in a lock value.
130   static uint64_t DecodeWaitCycles(uint32_t lock_value);
131 
132   // Provide access to protected method above.  Use for testing only.
133   friend struct SpinLockTest;
134 
135  private:
136   // lockword_ is used to store the following:
137   //
138   // bit[0] encodes whether a lock is being held.
139   // bit[1] encodes whether a lock uses cooperative scheduling.
140   // bit[2] encodes whether the current lock holder disabled scheduling when
141   //        acquiring the lock. Only set when kSpinLockHeld is also set.
142   // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
143   //        This is set by the lock holder to indicate how long it waited on
144   //        the lock before eventually acquiring it. The number of cycles is
145   //        encoded as a 29-bit unsigned int, or in the case that the current
146   //        holder did not wait but another waiter is queued, the LSB
147   //        (kSpinLockSleeper) is set. The implementation does not explicitly
148   //        track the number of queued waiters beyond this. It must always be
149   //        assumed that waiters may exist if the current holder was required to
150   //        queue.
151   //
152   // Invariant: if the lock is not held, the value is either 0 or
153   // kSpinLockCooperative.
154   static constexpr uint32_t kSpinLockHeld = 1;
155   static constexpr uint32_t kSpinLockCooperative = 2;
156   static constexpr uint32_t kSpinLockDisabledScheduling = 4;
157   static constexpr uint32_t kSpinLockSleeper = 8;
158   // Includes kSpinLockSleeper.
159   static constexpr uint32_t kWaitTimeMask =
160       ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
161 
162   // Returns true if the provided scheduling mode is cooperative.
IsCooperative(base_internal::SchedulingMode scheduling_mode)163   static constexpr bool IsCooperative(
164       base_internal::SchedulingMode scheduling_mode) {
165     return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
166   }
167 
168   uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
169   void SlowLock() ABSL_ATTRIBUTE_COLD;
170   void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
171   uint32_t SpinLoop();
172 
TryLockImpl()173   inline bool TryLockImpl() {
174     uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
175     return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
176   }
177 
178   std::atomic<uint32_t> lockword_;
179 
180   SpinLock(const SpinLock&) = delete;
181   SpinLock& operator=(const SpinLock&) = delete;
182 };
183 
184 // Corresponding locker object that arranges to acquire a spinlock for
185 // the duration of a C++ scope.
186 class ABSL_SCOPED_LOCKABLE SpinLockHolder {
187  public:
SpinLockHolder(SpinLock * l)188   inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
189       : lock_(l) {
190     l->Lock();
191   }
ABSL_UNLOCK_FUNCTION()192   inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
193 
194   SpinLockHolder(const SpinLockHolder&) = delete;
195   SpinLockHolder& operator=(const SpinLockHolder&) = delete;
196 
197  private:
198   SpinLock* lock_;
199 };
200 
201 // Register a hook for profiling support.
202 //
203 // The function pointer registered here will be called whenever a spinlock is
204 // contended.  The callback is given an opaque handle to the contended spinlock
205 // and the number of wait cycles.  This is thread-safe, but only a single
206 // profiler can be registered.  It is an error to call this function multiple
207 // times with different arguments.
208 void RegisterSpinLockProfiler(void (*fn)(const void* lock,
209                                          int64_t wait_cycles));
210 
211 //------------------------------------------------------------------------------
212 // Public interface ends here.
213 //------------------------------------------------------------------------------
214 
215 // If (result & kSpinLockHeld) == 0, then *this was successfully locked.
216 // Otherwise, returns last observed value for lockword_.
TryLockInternal(uint32_t lock_value,uint32_t wait_cycles)217 inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
218                                           uint32_t wait_cycles) {
219   if ((lock_value & kSpinLockHeld) != 0) {
220     return lock_value;
221   }
222 
223   uint32_t sched_disabled_bit = 0;
224   if ((lock_value & kSpinLockCooperative) == 0) {
225     // For non-cooperative locks we must make sure we mark ourselves as
226     // non-reschedulable before we attempt to CompareAndSwap.
227     if (base_internal::SchedulingGuard::DisableRescheduling()) {
228       sched_disabled_bit = kSpinLockDisabledScheduling;
229     }
230   }
231 
232   if (!lockword_.compare_exchange_strong(
233           lock_value,
234           kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
235           std::memory_order_acquire, std::memory_order_relaxed)) {
236     base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
237   }
238 
239   return lock_value;
240 }
241 
242 }  // namespace base_internal
243 ABSL_NAMESPACE_END
244 }  // namespace absl
245 
246 #endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_
247