• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef PLATFORMS_UNIX_LIBPANDABASE_FUTEX_MUTEX_H
17 #define PLATFORMS_UNIX_LIBPANDABASE_FUTEX_MUTEX_H
18 
19 #include "clang.h"
20 #include "macros.h"
21 #include "os/thread.h"
22 #include "fmutex.h"
23 
24 #include <array>
25 #include <atomic>
26 #include <iostream>
27 #include <limits>
28 
29 #include <unistd.h>
30 #include <linux/futex.h>
31 #include <sys/syscall.h>
32 
33 namespace panda::os::unix::memory::futex {
34 
35 // We need to update TLS current tid after fork
36 void PostFork();
37 
38 class ConditionVariable;
39 
40 class CAPABILITY("mutex") Mutex {
41 public:
42     Mutex();
43 
44     ~Mutex();
45 
46     void Lock() ACQUIRE();
47 
48     bool TryLock() TRY_ACQUIRE(true);
49 
50     bool TryLockWithSpinning() TRY_ACQUIRE(true);
51 
52     void Unlock() RELEASE();
53 
54     // Should be used only in monitor. Intended to be used with just created mutexes which aren't in use yet
55     // Registers `thread` as mutex's owner and locks it
56     void LockForOther(thread::ThreadId thread);
57 
58     // Should be used only in monitor. Intended to be used with just created mutexes which aren't in use yet
59     // Unegisters `thread` as mutex's owner and unlocks it
60     void UnlockForOther(thread::ThreadId thread);
61 
DoNotCheckOnDeadlock()62     static bool DoNotCheckOnDeadlock()
63     {
64         return MutexDoNotCheckOnDeadlock();
65     }
66 
IgnoreChecksOnDeadlock()67     static void IgnoreChecksOnDeadlock()
68     {
69         MutexIgnoreChecksOnDeadlock();
70     }
71 
72 private:
73     struct fmutex mutex_;
74 
GetStateAddr()75     int *GetStateAddr()
76     {
77         return futex::GetStateAddr(&mutex_);
78     }
79 
IncrementWaiters()80     void IncrementWaiters()
81     {
82         futex::IncrementWaiters(&mutex_);
83     }
84 
DecrementWaiters()85     void DecrementWaiters()
86     {
87         futex::DecrementWaiters(&mutex_);
88     }
89 
GetWaiters()90     int32_t GetWaiters()
91     {
92         // Atomic with relaxed order reason: mutex synchronization
93         // NOLINTNEXTLINE(hicpp-signed-bitwise)
94         return futex::GetWaiters(&mutex_);
95     }
96 
IsHeld(thread::ThreadId thread)97     bool IsHeld(thread::ThreadId thread)
98     {
99         return futex::IsHeld(&mutex_, thread);
100     }
101 
GetRecursiveCount()102     int GetRecursiveCount()
103     {
104         return mutex_.recursiveCount;
105     }
106 
SetRecursiveCount(int count)107     void SetRecursiveCount(int count)
108     {
109         mutex_.recursiveCount = count;
110     }
111 
112     static_assert(std::atomic<thread::ThreadId>::is_always_lock_free);
113 
114     NO_COPY_SEMANTIC(Mutex);
115     NO_MOVE_SEMANTIC(Mutex);
116 
117     friend ConditionVariable;
118 
119 protected:
Mutex(bool recursive)120     explicit Mutex(bool recursive) : Mutex()
121     {
122         mutex_.recursive_mutex_ = recursive;
123     };
124 };
125 
126 class CAPABILITY("mutex") RecursiveMutex : public Mutex {
127 public:
RecursiveMutex()128     RecursiveMutex() : Mutex(true) {}
129 
130     ~RecursiveMutex() = default;
131 
132 private:
133     NO_COPY_SEMANTIC(RecursiveMutex);
134     NO_MOVE_SEMANTIC(RecursiveMutex);
135 };
136 
137 class SHARED_CAPABILITY("mutex") RWLock {
138 public:
139     RWLock() = default;
140 
141     ~RWLock();
142 
143     // ReadLock and ReadUnlock are used in mutator lock often, prefer inlining over call to libpandabase
ReadLock()144     ALWAYS_INLINE void ReadLock() ACQUIRE_SHARED()
145     {
146         bool done = false;
147         while (!done) {
148             // Atomic with relaxed order reason: mutex synchronization
149             auto cur_state = state_.load(std::memory_order_relaxed);
150             if (LIKELY(cur_state >= UNLOCKED)) {
151                 auto new_state = cur_state + READ_INCREMENT;
152                 done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire);
153             } else {
154                 HandleReadLockWait(cur_state);
155             }
156         }
157         ASSERT(!HasExclusiveHolder());
158     }
159 
Unlock()160     ALWAYS_INLINE void Unlock() RELEASE_GENERIC()
161     {
162         if (HasExclusiveHolder()) {
163             WriteUnlock();
164         } else {
165             ReadUnlock();
166         }
167     }
168 
169     void WriteLock() ACQUIRE();
170 
171     bool TryReadLock() TRY_ACQUIRE_SHARED(true);
172 
173     bool TryWriteLock() TRY_ACQUIRE(true);
174 
175 private:
ReadUnlock()176     ALWAYS_INLINE void ReadUnlock() RELEASE_SHARED()
177     {
178         ASSERT(!HasExclusiveHolder());
179         bool done = false;
180         // Atomic with relaxed order reason: mutex synchronization
181         auto cur_state = state_.load(std::memory_order_relaxed);
182         while (!done) {
183             if (LIKELY(cur_state > 0)) {
184                 // Reduce state by 1 and do release store.
185                 // waiters_ load should not be reordered before state_, so it's done with seq cst.
186                 auto new_state = cur_state - READ_INCREMENT;
187                 // cur_state should be updated with fetched value on fail
188                 // Atomic with seq_cst order reason: mutex synchronization
189                 done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_seq_cst);
190                 if (done && new_state == UNLOCKED) {
191                     // Atomic with seq_cst order reason: mutex synchronization
192                     if (waiters_.load(std::memory_order_seq_cst) > 0) {
193                         // Wake one exclusive waiter as there are now no readers.
194                         // NOLINTNEXTLINE(hicpp-signed-bitwise)
195                         futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0);
196                     }
197                 }
198             } else {
199                 // Cannot use logger in header
200                 std::cout << "RWLock ReadUnlock got unexpected state, RWLock is unlocked?" << std::endl;
201                 std::abort();
202             }
203         }
204     }
205 
206     void WriteUnlock() RELEASE();
207 
208     // Non-inline path for handling waiting.
209     void HandleReadLockWait(int32_t cur_state);
210 
211     static constexpr int32_t WRITE_LOCKED = -1;
212     static constexpr int32_t UNLOCKED = 0;
213     static constexpr int32_t READ_INCREMENT = 1;
214     // -1 - write locked; 0 - unlocked; > 0 - read locked by state_ owners.
215     std::atomic_int32_t state_ {0};
216 
GetStateAddr()217     int *GetStateAddr()
218     {
219         return reinterpret_cast<int *>(&state_);
220     }
221 
222     // Exclusive owner.
223     alignas(alignof(uint32_t)) std::atomic<thread::ThreadId> exclusive_owner_ {0};
224     static_assert(std::atomic<thread::ThreadId>::is_always_lock_free);
225 
HasExclusiveHolder()226     bool HasExclusiveHolder()
227     {
228         // Atomic with relaxed order reason: mutex synchronization
229         return exclusive_owner_.load(std::memory_order_relaxed) != 0;
230     }
IsExclusiveHeld(thread::ThreadId thread)231     bool IsExclusiveHeld(thread::ThreadId thread)
232     {
233         // Atomic with relaxed order reason: mutex synchronization
234         return exclusive_owner_.load(std::memory_order_relaxed) == thread;
235     }
236 
237     // Number of waiters both for read and write locks.
238     std::atomic_uint32_t waiters_ {0};
239 
IncrementWaiters()240     void IncrementWaiters()
241     {
242         // Atomic with relaxed order reason: mutex synchronization
243         waiters_.fetch_add(1, std::memory_order_relaxed);
244     }
DecrementWaiters()245     void DecrementWaiters()
246     {
247         // Atomic with relaxed order reason: mutex synchronization
248         waiters_.fetch_sub(1, std::memory_order_relaxed);
249     }
250 
251     // Extra padding to make RWLock 16 bytes long
252     static constexpr size_t PADDING_SIZE = 1;
253     std::array<uint32_t, PADDING_SIZE> padding_ = {0};
254     // [[maybe_unused]] causes issues, dummy accessor for `padding_` as workaround
dummy_access_padding()255     uint32_t dummy_access_padding()
256     {
257         return padding_[0];
258     }
259 
260     NO_COPY_SEMANTIC(RWLock);
261     NO_MOVE_SEMANTIC(RWLock);
262 };
263 
264 class ConditionVariable {
265 public:
266     ConditionVariable() = default;
267 
268     ~ConditionVariable();
269 
Signal()270     void Signal()
271     {
272         SignalCount(WAKE_ONE);
273     }
274 
SignalAll()275     void SignalAll()
276     {
277         SignalCount(WAKE_ALL);
278     }
279 
280     void Wait(Mutex *mutex) NO_THREAD_SAFETY_ANALYSIS;
281 
282     bool TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns = 0, bool is_absolute = false) NO_THREAD_SAFETY_ANALYSIS;
283 
284 private:
285     alignas(alignof(uint64_t)) std::atomic<Mutex *> mutex_ptr_ {nullptr};
286     std::atomic_int32_t cond_ {0};
287     std::atomic_int32_t waiters_ {0};
288     static_assert(std::atomic<Mutex *>::is_always_lock_free);
289 
290     void SignalCount(int32_t to_wake);
291 
GetCondAddr()292     int *GetCondAddr()
293     {
294         return reinterpret_cast<int *>(&cond_);
295     }
296 
297     NO_COPY_SEMANTIC(ConditionVariable);
298     NO_MOVE_SEMANTIC(ConditionVariable);
299 };
300 
301 static constexpr size_t ALL_STRUCTURES_SIZE = 16U;
302 static_assert(sizeof(ConditionVariable) == ALL_STRUCTURES_SIZE);
303 static_assert(sizeof(RWLock) == ALL_STRUCTURES_SIZE);
304 
305 }  // namespace panda::os::unix::memory::futex
306 
307 #endif  // PLATFORMS_UNIX_LIBPANDABASE_FUTEX_MUTEX_H
308