• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef SANITIZER_MUTEX_H
15 #define SANITIZER_MUTEX_H
16 
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_libc.h"
20 
21 namespace __sanitizer {
22 
23 class StaticSpinMutex {
24  public:
Init()25   void Init() {
26     atomic_store(&state_, 0, memory_order_relaxed);
27   }
28 
Lock()29   void Lock() {
30     if (TryLock())
31       return;
32     LockSlow();
33   }
34 
TryLock()35   bool TryLock() {
36     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
37   }
38 
Unlock()39   void Unlock() {
40     atomic_store(&state_, 0, memory_order_release);
41   }
42 
CheckLocked()43   void CheckLocked() {
44     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
45   }
46 
47  private:
48   atomic_uint8_t state_;
49 
LockSlow()50   void NOINLINE LockSlow() {
51     for (int i = 0;; i++) {
52       if (i < 10)
53         proc_yield(10);
54       else
55         internal_sched_yield();
56       if (atomic_load(&state_, memory_order_relaxed) == 0
57           && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
58         return;
59     }
60   }
61 };
62 
63 class SpinMutex : public StaticSpinMutex {
64  public:
SpinMutex()65   SpinMutex() {
66     Init();
67   }
68 
69  private:
70   SpinMutex(const SpinMutex&);
71   void operator=(const SpinMutex&);
72 };
73 
74 class BlockingMutex {
75  public:
76   explicit BlockingMutex(LinkerInitialized);
77   BlockingMutex();
78   void Lock();
79   void Unlock();
80   void CheckLocked();
81  private:
82   uptr opaque_storage_[10];
83   uptr owner_;  // for debugging
84 };
85 
86 // Reader-writer spin mutex.
87 class RWMutex {
88  public:
RWMutex()89   RWMutex() {
90     atomic_store(&state_, kUnlocked, memory_order_relaxed);
91   }
92 
~RWMutex()93   ~RWMutex() {
94     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
95   }
96 
Lock()97   void Lock() {
98     u32 cmp = kUnlocked;
99     if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
100                                        memory_order_acquire))
101       return;
102     LockSlow();
103   }
104 
Unlock()105   void Unlock() {
106     u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
107     DCHECK_NE(prev & kWriteLock, 0);
108     (void)prev;
109   }
110 
ReadLock()111   void ReadLock() {
112     u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
113     if ((prev & kWriteLock) == 0)
114       return;
115     ReadLockSlow();
116   }
117 
ReadUnlock()118   void ReadUnlock() {
119     u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
120     DCHECK_EQ(prev & kWriteLock, 0);
121     DCHECK_GT(prev & ~kWriteLock, 0);
122     (void)prev;
123   }
124 
CheckLocked()125   void CheckLocked() {
126     CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
127   }
128 
129  private:
130   atomic_uint32_t state_;
131 
132   enum {
133     kUnlocked = 0,
134     kWriteLock = 1,
135     kReadLock = 2
136   };
137 
LockSlow()138   void NOINLINE LockSlow() {
139     for (int i = 0;; i++) {
140       if (i < 10)
141         proc_yield(10);
142       else
143         internal_sched_yield();
144       u32 cmp = atomic_load(&state_, memory_order_relaxed);
145       if (cmp == kUnlocked &&
146           atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
147                                        memory_order_acquire))
148           return;
149     }
150   }
151 
ReadLockSlow()152   void NOINLINE ReadLockSlow() {
153     for (int i = 0;; i++) {
154       if (i < 10)
155         proc_yield(10);
156       else
157         internal_sched_yield();
158       u32 prev = atomic_load(&state_, memory_order_acquire);
159       if ((prev & kWriteLock) == 0)
160         return;
161     }
162   }
163 
164   RWMutex(const RWMutex&);
165   void operator = (const RWMutex&);
166 };
167 
168 template<typename MutexType>
169 class GenericScopedLock {
170  public:
GenericScopedLock(MutexType * mu)171   explicit GenericScopedLock(MutexType *mu)
172       : mu_(mu) {
173     mu_->Lock();
174   }
175 
~GenericScopedLock()176   ~GenericScopedLock() {
177     mu_->Unlock();
178   }
179 
180  private:
181   MutexType *mu_;
182 
183   GenericScopedLock(const GenericScopedLock&);
184   void operator=(const GenericScopedLock&);
185 };
186 
187 template<typename MutexType>
188 class GenericScopedReadLock {
189  public:
GenericScopedReadLock(MutexType * mu)190   explicit GenericScopedReadLock(MutexType *mu)
191       : mu_(mu) {
192     mu_->ReadLock();
193   }
194 
~GenericScopedReadLock()195   ~GenericScopedReadLock() {
196     mu_->ReadUnlock();
197   }
198 
199  private:
200   MutexType *mu_;
201 
202   GenericScopedReadLock(const GenericScopedReadLock&);
203   void operator=(const GenericScopedReadLock&);
204 };
205 
206 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
207 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
208 typedef GenericScopedLock<RWMutex> RWMutexLock;
209 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
210 
211 }  // namespace __sanitizer
212 
213 #endif  // SANITIZER_MUTEX_H
214