• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef PANDA_LIBPANDABASE_PBASE_OS_UNIX__FUTEX_MUTEX_H_
17 #define PANDA_LIBPANDABASE_PBASE_OS_UNIX__FUTEX_MUTEX_H_
18 
19 #include "clang.h"
20 #include "macros.h"
21 #include "os/thread.h"
22 #include "fmutex.h"
23 
24 #include <atomic>
25 #include <limits>
26 #include <iostream>
27 
28 #include <unistd.h>
29 #include <linux/futex.h>
30 #include <sys/syscall.h>
31 
32 namespace panda::os::unix::memory::futex {
33 
34 // We need to update TLS current tid after fork
35 void PostFork();
36 
37 class ConditionVariable;
38 
39 class CAPABILITY("mutex") Mutex {
40 public:
41     Mutex();
42 
43     ~Mutex();
44 
45     void Lock() ACQUIRE();
46 
47     bool TryLock() TRY_ACQUIRE(true);
48 
49     bool TryLockWithSpinning() TRY_ACQUIRE(true);
50 
51     void Unlock() RELEASE();
52 
53     // Should be used only in monitor. Intended to be used with just created mutexes which aren't in use yet
54     // Registers `thread` as mutex's owner and locks it
55     void LockForOther(thread::ThreadId thread);
56 
57     // Should be used only in monitor. Intended to be used with just created mutexes which aren't in use yet
58     // Unegisters `thread` as mutex's owner and unlocks it
59     void UnlockForOther(thread::ThreadId thread);
60 
DoNotCheckOnDeadlock()61     static bool DoNotCheckOnDeadlock()
62     {
63         return MutexDoNotCheckOnDeadlock();
64     }
65 
IgnoreChecksOnDeadlock()66     static void IgnoreChecksOnDeadlock()
67     {
68         MutexIgnoreChecksOnDeadlock();
69     }
70 
71 private:
72     struct fmutex mutex_;
73 
GetStateAddr()74     int *GetStateAddr()
75     {
76         return futex::GetStateAddr(&mutex_);
77     }
78 
IncrementWaiters()79     void IncrementWaiters()
80     {
81         futex::IncrementWaiters(&mutex_);
82     }
83 
DecrementWaiters()84     void DecrementWaiters()
85     {
86         futex::DecrementWaiters(&mutex_);
87     }
88 
GetWaiters()89     int32_t GetWaiters()
90     {
91         // Atomic with relaxed order reason: mutex synchronization
92         // NOLINTNEXTLINE(hicpp-signed-bitwise)
93         return futex::GetWaiters(&mutex_);
94     }
95 
IsHeld(thread::ThreadId thread)96     bool IsHeld(thread::ThreadId thread)
97     {
98         return futex::IsHeld(&mutex_, thread);
99     }
100 
GetRecursiveCount()101     int GetRecursiveCount()
102     {
103         return mutex_.recursiveCount;
104     }
105 
SetRecursiveCount(int count)106     void SetRecursiveCount(int count)
107     {
108         mutex_.recursiveCount = count;
109     }
110 
111     static_assert(std::atomic<thread::ThreadId>::is_always_lock_free);
112 
113     NO_COPY_SEMANTIC(Mutex);
114     NO_MOVE_SEMANTIC(Mutex);
115 
116     friend ConditionVariable;
117 
118 protected:
Mutex(bool recursive)119     explicit Mutex(bool recursive) : Mutex()
120     {
121         mutex_.recursive_mutex_ = recursive;
122     };
123 };
124 
125 class CAPABILITY("mutex") RecursiveMutex : public Mutex {
126 public:
RecursiveMutex()127     RecursiveMutex() : Mutex(true) {}
128 
129     ~RecursiveMutex() = default;
130 
131 private:
132     NO_COPY_SEMANTIC(RecursiveMutex);
133     NO_MOVE_SEMANTIC(RecursiveMutex);
134 };
135 
136 class SHARED_CAPABILITY("mutex") RWLock {
137 public:
138     RWLock() = default;
139 
140     ~RWLock();
141 
142     // ReadLock and ReadUnlock are used in mutator lock often, prefer inlining over call to libpandabase
ReadLock()143     ALWAYS_INLINE void ReadLock() ACQUIRE_SHARED()
144     {
145         bool done = false;
146         while (!done) {
147             // Atomic with relaxed order reason: mutex synchronization
148             auto cur_state = state_.load(std::memory_order_relaxed);
149             if (LIKELY(cur_state >= UNLOCKED)) {
150                 auto new_state = cur_state + READ_INCREMENT;
151                 done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire);
152             } else {
153                 HandleReadLockWait(cur_state);
154             }
155         }
156         ASSERT(!HasExclusiveHolder());
157     }
158 
Unlock()159     ALWAYS_INLINE void Unlock() RELEASE_GENERIC()
160     {
161         if (HasExclusiveHolder()) {
162             WriteUnlock();
163         } else {
164             ReadUnlock();
165         }
166     }
167 
168     void WriteLock() ACQUIRE();
169 
170     bool TryReadLock() TRY_ACQUIRE_SHARED(true);
171 
172     bool TryWriteLock() TRY_ACQUIRE(true);
173 
174 private:
ReadUnlock()175     ALWAYS_INLINE void ReadUnlock() RELEASE_SHARED()
176     {
177         ASSERT(!HasExclusiveHolder());
178         bool done = false;
179         // Atomic with relaxed order reason: mutex synchronization
180         auto cur_state = state_.load(std::memory_order_relaxed);
181         while (!done) {
182             if (LIKELY(cur_state > 0)) {
183                 // Reduce state by 1 and do release store.
184                 // waiters_ load should not be reordered before state_, so it's done with seq cst.
185                 auto new_state = cur_state - READ_INCREMENT;
186                 // cur_state should be updated with fetched value on fail
187                 // Atomic with seq_cst order reason: mutex synchronization
188                 done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_seq_cst);
189                 if (done && new_state == UNLOCKED) {
190                     // Atomic with seq_cst order reason: mutex synchronization
191                     if (waiters_.load(std::memory_order_seq_cst) > 0) {
192                         // Wake one exclusive waiter as there are now no readers.
193                         // NOLINTNEXTLINE(hicpp-signed-bitwise)
194                         futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0);
195                     }
196                 }
197             } else {
198                 // Cannot use logger in header
199                 std::cout << "RWLock ReadUnlock got unexpected state, RWLock is unlocked?" << std::endl;
200                 std::abort();
201             }
202         }
203     }
204 
205     void WriteUnlock() RELEASE();
206 
207     // Non-inline path for handling waiting.
208     void HandleReadLockWait(int32_t cur_state);
209 
210     static constexpr int32_t WRITE_LOCKED = -1;
211     static constexpr int32_t UNLOCKED = 0;
212     static constexpr int32_t READ_INCREMENT = 1;
213     // -1 - write locked; 0 - unlocked; > 0 - read locked by state_ owners.
214     std::atomic_int32_t state_ {0};
215 
GetStateAddr()216     int *GetStateAddr()
217     {
218         return reinterpret_cast<int *>(&state_);
219     }
220 
221     // Exclusive owner.
222     alignas(alignof(uint32_t)) std::atomic<thread::ThreadId> exclusive_owner_ {0};
223     static_assert(std::atomic<thread::ThreadId>::is_always_lock_free);
224 
HasExclusiveHolder()225     bool HasExclusiveHolder()
226     {
227         // Atomic with relaxed order reason: mutex synchronization
228         return exclusive_owner_.load(std::memory_order_relaxed) != 0;
229     }
IsExclusiveHeld(thread::ThreadId thread)230     bool IsExclusiveHeld(thread::ThreadId thread)
231     {
232         // Atomic with relaxed order reason: mutex synchronization
233         return exclusive_owner_.load(std::memory_order_relaxed) == thread;
234     }
235 
236     // Number of waiters both for read and write locks.
237     std::atomic_uint32_t waiters_ {0};
238 
IncrementWaiters()239     void IncrementWaiters()
240     {
241         // Atomic with relaxed order reason: mutex synchronization
242         waiters_.fetch_add(1, std::memory_order_relaxed);
243     }
DecrementWaiters()244     void DecrementWaiters()
245     {
246         // Atomic with relaxed order reason: mutex synchronization
247         waiters_.fetch_sub(1, std::memory_order_relaxed);
248     }
249 
250     // Extra padding to make RWLock 16 bytes long
251     static constexpr size_t PADDING_SIZE = 1;
252     std::array<uint32_t, PADDING_SIZE> padding_ = {0};
253     // [[maybe_unused]] causes issues, dummy accessor for `padding_` as workaround
dummy_access_padding()254     uint32_t dummy_access_padding()
255     {
256         return padding_[0];
257     }
258 
259     NO_COPY_SEMANTIC(RWLock);
260     NO_MOVE_SEMANTIC(RWLock);
261 };
262 
263 class ConditionVariable {
264 public:
265     ConditionVariable() = default;
266 
267     ~ConditionVariable();
268 
Signal()269     void Signal()
270     {
271         SignalCount(WAKE_ONE);
272     }
273 
SignalAll()274     void SignalAll()
275     {
276         SignalCount(WAKE_ALL);
277     }
278 
279     void Wait(Mutex *mutex) NO_THREAD_SAFETY_ANALYSIS;
280 
281     bool TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns = 0, bool is_absolute = false) NO_THREAD_SAFETY_ANALYSIS;
282 
283 private:
284     alignas(alignof(uint64_t)) std::atomic<Mutex *> mutex_ptr_ {nullptr};
285     std::atomic_int32_t cond_ {0};
286     std::atomic_int32_t waiters_ {0};
287     static_assert(std::atomic<Mutex *>::is_always_lock_free);
288 
289     void SignalCount(int32_t to_wake);
290 
GetCondAddr()291     int *GetCondAddr()
292     {
293         return reinterpret_cast<int *>(&cond_);
294     }
295 
296     NO_COPY_SEMANTIC(ConditionVariable);
297     NO_MOVE_SEMANTIC(ConditionVariable);
298 };
299 
300 static constexpr size_t ALL_STRUCTURES_SIZE = 16U;
301 static_assert(sizeof(ConditionVariable) == ALL_STRUCTURES_SIZE);
302 static_assert(sizeof(RWLock) == ALL_STRUCTURES_SIZE);
303 
304 }  // namespace panda::os::unix::memory::futex
305 
306 #endif  // PANDA_LIBPANDABASE_PBASE_OS_UNIX__FUTEX_MUTEX_H_
307