• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2014 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #pragma once
16 
17 #include "aemu/base/Compiler.h"
18 
19 #include "aemu/base/ThreadAnnotations.h"
20 
21 #include <atomic>
22 
23 #ifdef _WIN32
24 #define WIN32_LEAN_AND_MEAN 1
25 #include <windows.h>
26 #else
27 #include <pthread.h>
28 #endif
29 
30 #include <assert.h>
31 
32 namespace android {
33 namespace base {
34 
35 class AutoLock;
36 class AutoWriteLock;
37 class AutoReadLock;
38 
39 // A wrapper class for mutexes only suitable for using in static context,
40 // where it's OK to leak the underlying system object. Use Lock for scoped or
41 // member locks.
42 class CAPABILITY("mutex") StaticLock {
43 public:
44     using AutoLock = android::base::AutoLock;
45 
46     constexpr StaticLock() = default;
47 
48     // Acquire the lock.
lock()49     void lock() ACQUIRE() {
50 #ifdef _WIN32
51         ::AcquireSRWLockExclusive(&mLock);
52 #else
53         ::pthread_mutex_lock(&mLock);
54 #endif
55     }
56 
tryLock()57     bool tryLock() TRY_ACQUIRE(true) {
58         bool ret = false;
59 #ifdef _WIN32
60         ret = ::TryAcquireSRWLockExclusive(&mLock);
61 #else
62         ret = ::pthread_mutex_trylock(&mLock) == 0;
63 #endif
64         return ret;
65     }
66 
67     // Release the lock.
unlock()68     void unlock() RELEASE() {
69 #ifdef _WIN32
70         ::ReleaseSRWLockExclusive(&mLock);
71 #else
72         ::pthread_mutex_unlock(&mLock);
73 #endif
74     }
75 
76 protected:
77     friend class ConditionVariable;
78 
79 #ifdef _WIN32
80     // Benchmarks show that on Windows SRWLOCK performs a little bit better than
81     // CRITICAL_SECTION for uncontended mode and much better in case of
82     // contention.
83     SRWLOCK mLock = SRWLOCK_INIT;
84 #else
85     pthread_mutex_t mLock = PTHREAD_MUTEX_INITIALIZER;
86 #endif
87     // Both POSIX threads and WinAPI don't allow move (undefined behavior).
88     DISALLOW_COPY_ASSIGN_AND_MOVE(StaticLock);
89 };
90 
91 // Simple wrapper class for mutexes used in non-static context.
92 class Lock : public StaticLock {
93 public:
94     using StaticLock::AutoLock;
95 
96     constexpr Lock() = default;
97 #ifndef _WIN32
98     // The only difference is that POSIX requires a deallocation function call
99     // for its mutexes.
~Lock()100     ~Lock() { ::pthread_mutex_destroy(&mLock); }
101 #endif
102 };
103 
104 class ReadWriteLock {
105 public:
106     using AutoWriteLock = android::base::AutoWriteLock;
107     using AutoReadLock = android::base::AutoReadLock;
108 
109 #ifdef _WIN32
110     constexpr ReadWriteLock() = default;
111     ~ReadWriteLock() = default;
lockRead()112     void lockRead() { ::AcquireSRWLockShared(&mLock); }
unlockRead()113     void unlockRead() { ::ReleaseSRWLockShared(&mLock); }
lockWrite()114     void lockWrite() { ::AcquireSRWLockExclusive(&mLock); }
unlockWrite()115     void unlockWrite() { ::ReleaseSRWLockExclusive(&mLock); }
116 
117 private:
118     SRWLOCK mLock = SRWLOCK_INIT;
119 #else   // !_WIN32
ReadWriteLock()120     ReadWriteLock() { ::pthread_rwlock_init(&mLock, NULL); }
~ReadWriteLock()121     ~ReadWriteLock() { ::pthread_rwlock_destroy(&mLock); }
lockRead()122     void lockRead() { ::pthread_rwlock_rdlock(&mLock); }
unlockRead()123     void unlockRead() { ::pthread_rwlock_unlock(&mLock); }
lockWrite()124     void lockWrite() { ::pthread_rwlock_wrlock(&mLock); }
unlockWrite()125     void unlockWrite() { ::pthread_rwlock_unlock(&mLock); }
126 
127 private:
128     pthread_rwlock_t mLock;
129 #endif  // !_WIN32
130 
131     friend class ConditionVariable;
132     DISALLOW_COPY_ASSIGN_AND_MOVE(ReadWriteLock);
133 };
134 
135 // Helper class to lock / unlock a mutex automatically on scope
136 // entry and exit.
137 // NB: not thread-safe (as opposed to the Lock class)
138 class SCOPED_CAPABILITY AutoLock {
139 public:
AutoLock(StaticLock & lock)140     AutoLock(StaticLock& lock) ACQUIRE(mLock) : mLock(lock) { mLock.lock(); }
141 
AutoLock(AutoLock && other)142     AutoLock(AutoLock&& other) : mLock(other.mLock), mLocked(other.mLocked) {
143         other.mLocked = false;
144     }
145 
lock()146     void lock() ACQUIRE(mLock) {
147         assert(!mLocked);
148         mLock.lock();
149         mLocked = true;
150     }
151 
unlock()152     void unlock() RELEASE(mLock) {
153         assert(mLocked);
154         mLock.unlock();
155         mLocked = false;
156     }
157 
isLocked()158     bool isLocked() const { return mLocked; }
159 
RELEASE()160     ~AutoLock() RELEASE() {
161         if (mLocked) {
162             mLock.unlock();
163         }
164     }
165 
166 private:
167     StaticLock& mLock;
168     bool mLocked = true;
169 
170     friend class ConditionVariable;
171     // Don't allow move because this class has a non-movable object.
172     DISALLOW_COPY_AND_ASSIGN(AutoLock);
173 };
174 
175 class AutoWriteLock {
176 public:
AutoWriteLock(ReadWriteLock & lock)177     AutoWriteLock(ReadWriteLock& lock) : mLock(lock) { mLock.lockWrite(); }
178 
lockWrite()179     void lockWrite() {
180         assert(!mWriteLocked);
181         mLock.lockWrite();
182         mWriteLocked = true;
183     }
184 
unlockWrite()185     void unlockWrite() {
186         assert(mWriteLocked);
187         mLock.unlockWrite();
188         mWriteLocked = false;
189     }
190 
~AutoWriteLock()191     ~AutoWriteLock() {
192         if (mWriteLocked) {
193             mLock.unlockWrite();
194         }
195     }
196 
197 private:
198     ReadWriteLock& mLock;
199     bool mWriteLocked = true;
200     // This class has a non-movable object.
201     DISALLOW_COPY_ASSIGN_AND_MOVE(AutoWriteLock);
202 };
203 
204 class AutoReadLock {
205 public:
AutoReadLock(ReadWriteLock & lock)206     AutoReadLock(ReadWriteLock& lock) : mLock(lock) { mLock.lockRead(); }
207 
lockRead()208     void lockRead() {
209         assert(!mReadLocked);
210         mLock.lockRead();
211         mReadLocked = true;
212     }
213 
unlockRead()214     void unlockRead() {
215         assert(mReadLocked);
216         mLock.unlockRead();
217         mReadLocked = false;
218     }
219 
~AutoReadLock()220     ~AutoReadLock() {
221         if (mReadLocked) {
222             mLock.unlockRead();
223         }
224     }
225 
226 private:
227     ReadWriteLock& mLock;
228     bool mReadLocked = true;
229     // This class has a non-movable object.
230     DISALLOW_COPY_ASSIGN_AND_MOVE(AutoReadLock);
231 };
232 
233 // Seqlock (cross platform)
234 // Based on:
235 // https://lwn.net/Articles/21812/
236 // https://github.com/rigtorp/Seqlock
237 //
238 // A seqlock is meant to address performance issues with using reader/writer
239 // locks to protect data structures where the time spent performing operations
240 // while the lock is held is very short or even comparable to the time spent
241 // locking/unlocking in the first place. This is very common in situations
242 // where we have some globally accessible array of objects and multiple threads
243 // performing short little read/write operations on them (i.e., pretty much
244 // anything that uses entity component system architecture that needs to be
245 // accessed by multiple threads).
246 //
247 // The basic idea of a seqlock is to store a sequence number (like a version
248 // number) that writers increment, but readers only read. When beginning write
249 // access, the sequence number is incremented, and after write access ends, the
250 // sequence number is incremented again. This way, when a reader is trying to
251 // read and it notices a change in the sequence number (or, as an optimization,
252 // that the number is odd (because writes should always end up incrementing the
253 // sequence number by 2 if they complete)), it can try again until there is no
254 // change.
255 //
256 // The problem, however, is that we need to be very careful about how we set
257 // and compare the sequence numbers, because compilers/hardware easily reorder
258 // instructions involving what seems to be just simple integer arithmetic.
259 // (see https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf) Atomic
260 // primitives need to be used for all accesses to the sequence number.
261 //
262 // In particular, the atomic updates to the sequence number and the actual
263 // non-atomic data accesses are allowed to be reordered by the compiler, which
264 // introduces problems when accessing the data (still allowing reads of an
265 // update in progress); we need smp_rmb.
266 // https://elixir.bootlin.com/linux/latest/source/tools/arch/arm64/include/asm/barrier.h#L25
267 //
268 // arm64: memory barrier instruction
269 // asm volatile("dmb ishld" ::: "memory")
270 // x86: compiler barrier
271 // std::atomic_signal_fence(std::memory_order_acq_rel);
272 //
273 // This smp_rmb needs to be added before and after the read operation.
274 //
275 // On the write side, we use
276 // arm64: memory barrier instruction
277 // asm volatile("dmb ishst" ::: "memory")
278 // x86: compiler barrier
279 // std::atomic_signal_fence(std::memory_order_acq_rel);
280 //
281 // https://github.com/rigtorp/Seqlock has a version that seems to address these issues, while
282 // https://elixir.bootlin.com/linux/latest/source/include/linux/seqlock.h shows how to implement in the kernel.
283 //
SmpWmb()284 static inline __attribute__((always_inline)) void SmpWmb() {
285 #if defined(__aarch64__)
286         asm volatile("dmb ishst" ::: "memory");
287 #elif defined(__x86_64__)
288         std::atomic_thread_fence(std::memory_order_release);
289 #else
290 #error "Unimplemented SmpWmb for current CPU architecture"
291 #endif
292 }
293 
SmpRmb()294 static inline __attribute__((always_inline)) void SmpRmb() {
295 #if defined(__aarch64__)
296         asm volatile("dmb ishld" ::: "memory");
297 #elif defined(__x86_64__)
298         std::atomic_thread_fence(std::memory_order_acquire);
299 #else
300 #error "Unimplemented SmpRmb for current CPU architecture"
301 #endif
302 }
303 
304 class SeqLock {
305 public:
beginWrite()306     void beginWrite() ACQUIRE(mWriteLock) {
307         mWriteLock.lock();
308         mSeq.fetch_add(1, std::memory_order_release);
309         SmpWmb();
310     }
311 
endWrite()312     void endWrite() RELEASE(mWriteLock) {
313         SmpWmb();
314         mSeq.fetch_add(1, std::memory_order_release);
315         mWriteLock.unlock();
316     }
317 
318 #ifdef __cplusplus
319 #   define SEQLOCK_LIKELY( exp )    (__builtin_expect( !!(exp), true ))
320 #   define SEQLOCK_UNLIKELY( exp )  (__builtin_expect( !!(exp), false ))
321 #else
322 #   define SEQLOCK_LIKELY( exp )    (__builtin_expect( !!(exp), 1 ))
323 #   define SEQLOCK_UNLIKELY( exp )  (__builtin_expect( !!(exp), 0 ))
324 #endif
325 
beginRead()326     uint32_t beginRead() {
327         uint32_t res;
328 
329         // see https://elixir.bootlin.com/linux/latest/source/include/linux/seqlock.h#L128; if odd we definitely know there's a write in progress, and shouldn't proceed any further.
330 repeat:
331         res = mSeq.load(std::memory_order_acquire);
332         if (SEQLOCK_UNLIKELY(res & 1)) {
333             goto repeat;
334         }
335 
336         SmpRmb();
337         return res;
338     }
339 
shouldRetryRead(uint32_t prevSeq)340     bool shouldRetryRead(uint32_t prevSeq) {
341         SmpRmb();
342         uint32_t res = mSeq.load(std::memory_order_acquire);
343         return (res != prevSeq);
344     }
345 
346     // Convenience class for write
347     class ScopedWrite {
348     public:
ScopedWrite(SeqLock * lock)349         ScopedWrite(SeqLock* lock) : mLock(lock) {
350             mLock->beginWrite();
351         }
~ScopedWrite()352         ~ScopedWrite() {
353             mLock->endWrite();
354         }
355     private:
356         SeqLock* mLock;
357     };
358 
359     // Convenience macro for read (no std::function due to its considerable overhead)
360 #define AEMU_SEQLOCK_READ_WITH_RETRY(lock, readStuff) { uint32_t aemu_seqlock_curr_seq; do { \
361     aemu_seqlock_curr_seq = (lock)->beginRead(); \
362     readStuff; \
363     } while ((lock)->shouldRetryRead(aemu_seqlock_curr_seq)); }
364 
365 private:
366     std::atomic<uint32_t> mSeq { 0 }; // The sequence number
367     Lock mWriteLock; // Just use a normal mutex to protect writes
368 };
369 
370 }  // namespace base
371 }  // namespace android
372