• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "mutex.h"
17 #include "utils/logger.h"
18 
19 namespace panda::os::unix::memory::futex {
20 
21 // Avoid repeatedly calling GetCurrentThreadId by storing tid locally
22 thread_local thread::ThreadId current_tid {0};
23 
PostFork()24 void PostFork()
25 {
26     current_tid = os::thread::GetCurrentThreadId();
27 }
28 
29 // Spin for small arguments and yield for longer ones.
BackOff(uint32_t i)30 static void BackOff(uint32_t i)
31 {
32     static constexpr uint32_t SPIN_MAX = 10;
33     if (i <= SPIN_MAX) {
34         volatile uint32_t x = 0;  // Volatile to make sure loop is not optimized out.
35         const uint32_t spin_count = 10 * i;
36         for (uint32_t spin = 0; spin < spin_count; spin++) {
37             ++x;
38         }
39     } else {
40         thread::ThreadYield();
41     }
42 }
43 
44 // Wait until pred is true, or until timeout is reached.
45 // Return true if the predicate test succeeded, false if we timed out.
46 template <typename Pred>
WaitBrieflyFor(std::atomic_int * addr,Pred pred)47 static inline bool WaitBrieflyFor(std::atomic_int *addr, Pred pred)
48 {
49     // We probably don't want to do syscall (switch context) when we use WaitBrieflyFor
50     static constexpr uint32_t MAX_BACK_OFF = 10;
51     static constexpr uint32_t MAX_ITER = 50;
52     for (uint32_t i = 1; i <= MAX_ITER; i++) {
53         BackOff(std::min(i, MAX_BACK_OFF));
54         // Atomic with relaxed order reason: mutex synchronization
55         if (pred(addr->load(std::memory_order_relaxed))) {
56             return true;
57         }
58     }
59     return false;
60 }
61 
62 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
Mutex()63 Mutex::Mutex()
64 {
65     MutexInit(&mutex_);
66 }
67 
~Mutex()68 Mutex::~Mutex()
69 {
70     MutexDestroy(&mutex_);
71 }
72 
Lock()73 void Mutex::Lock()
74 {
75     MutexLock(&mutex_, false);
76 }
77 
TryLock()78 bool Mutex::TryLock()
79 {
80     return MutexLock(&mutex_, true);
81 }
82 
TryLockWithSpinning()83 bool Mutex::TryLockWithSpinning()
84 {
85     return MutexTryLockWithSpinning(&mutex_);
86 }
87 
Unlock()88 void Mutex::Unlock()
89 {
90     MutexUnlock(&mutex_);
91 }
92 
LockForOther(thread::ThreadId thread)93 void Mutex::LockForOther(thread::ThreadId thread)
94 {
95     MutexLockForOther(&mutex_, thread);
96 }
97 
UnlockForOther(thread::ThreadId thread)98 void Mutex::UnlockForOther(thread::ThreadId thread)
99 {
100     MutexUnlockForOther(&mutex_, thread);
101 }
102 
~RWLock()103 RWLock::~RWLock()
104 {
105 #ifndef PANDA_TARGET_MOBILE
106     if (!Mutex::DoNotCheckOnDeadlock()) {
107 #endif  // PANDA_TARGET_MOBILE
108         // Atomic with relaxed order reason: mutex synchronization
109         if (state_.load(std::memory_order_relaxed) != 0) {
110             LOG(FATAL, COMMON) << "RWLock destruction failed; state_ is non zero!";
111             // Atomic with relaxed order reason: mutex synchronization
112         } else if (exclusive_owner_.load(std::memory_order_relaxed) != 0) {
113             LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has an owner!";
114             // Atomic with relaxed order reason: mutex synchronization
115         } else if (waiters_.load(std::memory_order_relaxed) != 0) {
116             LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has waiters!";
117         }
118 #ifndef PANDA_TARGET_MOBILE
119     } else {
120         LOG(WARNING, COMMON) << "Deadlock detected, ignoring RWLock";
121     }
122 #endif  // PANDA_TARGET_MOBILE
123 }
124 
WriteLock()125 void RWLock::WriteLock()
126 {
127     if (current_tid == 0) {
128         current_tid = os::thread::GetCurrentThreadId();
129     }
130     bool done = false;
131     while (!done) {
132         // Atomic with relaxed order reason: mutex synchronization
133         auto cur_state = state_.load(std::memory_order_relaxed);
134         if (LIKELY(cur_state == UNLOCKED)) {
135             // Unlocked, can acquire writelock
136             // Do CAS in case other thread beats us and acquires readlock first
137             done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire);
138         } else {
139             // Wait until RWLock is unlocked
140             if (!WaitBrieflyFor(&state_, [](int32_t state) { return state == UNLOCKED; })) {
141                 // WaitBrieflyFor failed, go to futex wait
142                 // Increment waiters count.
143                 IncrementWaiters();
144                 // Retry wait until lock not held. If we have more than one reader, cur_state check fail
145                 // doesn't mean this lock is unlocked.
146                 while (cur_state != UNLOCKED) {
147                     // NOLINTNEXTLINE(hicpp-signed-bitwise)
148                     if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
149                         if ((errno != EAGAIN) && (errno != EINTR)) {
150                             LOG(FATAL, COMMON) << "Futex wait failed!";
151                         }
152                     }
153                     // Atomic with relaxed order reason: mutex synchronization
154                     cur_state = state_.load(std::memory_order_relaxed);
155                 }
156                 DecrementWaiters();
157             }
158         }
159     }
160     // RWLock is held now
161     // Atomic with relaxed order reason: mutex synchronization
162     ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED);
163     // Atomic with relaxed order reason: mutex synchronization
164     ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
165     // Atomic with relaxed order reason: mutex synchronization
166     exclusive_owner_.store(current_tid, std::memory_order_relaxed);
167 }
168 
HandleReadLockWait(int32_t cur_state)169 void RWLock::HandleReadLockWait(int32_t cur_state)
170 {
171     // Wait until RWLock WriteLock is unlocked
172     if (!WaitBrieflyFor(&state_, [](int32_t state) { return state >= UNLOCKED; })) {
173         // WaitBrieflyFor failed, go to futex wait
174         IncrementWaiters();
175         // Retry wait until WriteLock not held.
176         while (cur_state == WRITE_LOCKED) {
177             // NOLINTNEXTLINE(hicpp-signed-bitwise)
178             if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
179                 if ((errno != EAGAIN) && (errno != EINTR)) {
180                     LOG(FATAL, COMMON) << "Futex wait failed!";
181                 }
182             }
183             // Atomic with relaxed order reason: mutex synchronization
184             cur_state = state_.load(std::memory_order_relaxed);
185         }
186         DecrementWaiters();
187     }
188 }
189 
TryReadLock()190 bool RWLock::TryReadLock()
191 {
192     bool done = false;
193     // Atomic with relaxed order reason: mutex synchronization
194     auto cur_state = state_.load(std::memory_order_relaxed);
195     while (!done) {
196         if (cur_state >= UNLOCKED) {
197             auto new_state = cur_state + READ_INCREMENT;
198             // cur_state should be updated with fetched value on fail
199             done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire);
200         } else {
201             // RWLock is Write held, trylock failed.
202             return false;
203         }
204     }
205     ASSERT(!HasExclusiveHolder());
206     return true;
207 }
208 
TryWriteLock()209 bool RWLock::TryWriteLock()
210 {
211     if (current_tid == 0) {
212         current_tid = os::thread::GetCurrentThreadId();
213     }
214     bool done = false;
215     // Atomic with relaxed order reason: mutex synchronization
216     auto cur_state = state_.load(std::memory_order_relaxed);
217     while (!done) {
218         if (LIKELY(cur_state == UNLOCKED)) {
219             // Unlocked, can acquire writelock
220             // Do CAS in case other thread beats us and acquires readlock first
221             // cur_state should be updated with fetched value on fail
222             done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire);
223         } else {
224             // RWLock is held, trylock failed.
225             return false;
226         }
227     }
228     // RWLock is held now
229     // Atomic with relaxed order reason: mutex synchronization
230     ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED);
231     // Atomic with relaxed order reason: mutex synchronization
232     ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
233     // Atomic with relaxed order reason: mutex synchronization
234     exclusive_owner_.store(current_tid, std::memory_order_relaxed);
235     return true;
236 }
237 
WriteUnlock()238 void RWLock::WriteUnlock()
239 {
240     if (current_tid == 0) {
241         current_tid = os::thread::GetCurrentThreadId();
242     }
243     ASSERT(IsExclusiveHeld(current_tid));
244 
245     bool done = false;
246     // Atomic with relaxed order reason: mutex synchronization
247     int32_t cur_state = state_.load(std::memory_order_relaxed);
248     // CAS is weak and might fail, do in loop
249     while (!done) {
250         if (LIKELY(cur_state == WRITE_LOCKED)) {
251             // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED
252             // Atomic with relaxed order reason: mutex synchronization
253             exclusive_owner_.store(0, std::memory_order_relaxed);
254             // Change state to unlocked and do release store.
255             // waiters_ load should not be reordered before state_, so it's done with seq cst.
256             // cur_state should be updated with fetched value on fail
257             done = state_.compare_exchange_weak(cur_state, UNLOCKED, std::memory_order_seq_cst);
258             if (LIKELY(done)) {
259                 // We are doing write unlock, all waiters could be ReadLocks so we need to wake all.
260                 // Atomic with seq_cst order reason: mutex synchronization
261                 if (waiters_.load(std::memory_order_seq_cst) > 0) {
262                     // NOLINTNEXTLINE(hicpp-signed-bitwise)
263                     futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0);
264                 }
265             }
266         } else {
267             LOG(FATAL, COMMON) << "RWLock WriteUnlock got unexpected state, RWLock is not writelocked?";
268         }
269     }
270 }
271 
~ConditionVariable()272 ConditionVariable::~ConditionVariable()
273 {
274 #ifndef PANDA_TARGET_MOBILE
275     if (!Mutex::DoNotCheckOnDeadlock()) {
276 #endif  // PANDA_TARGET_MOBILE
277         // Atomic with relaxed order reason: mutex synchronization
278         if (waiters_.load(std::memory_order_relaxed) != 0) {
279             LOG(FATAL, COMMON) << "CondVar destruction failed; waiters_ is non zero!";
280         }
281 #ifndef PANDA_TARGET_MOBILE
282     } else {
283         LOG(WARNING, COMMON) << "Deadlock detected, ignoring CondVar";
284     }
285 #endif  // PANDA_TARGET_MOBILE
286 }
287 
Wait(Mutex * mutex)288 void ConditionVariable::Wait(Mutex *mutex)
289 {
290     if (current_tid == 0) {
291         current_tid = os::thread::GetCurrentThreadId();
292     }
293     if (!mutex->IsHeld(current_tid)) {
294         LOG(FATAL, COMMON) << "CondVar Wait failed; provided mutex is not held by current thread";
295     }
296 
297     // It's undefined behavior to call Wait with different mutexes on the same condvar
298     Mutex *old_mutex = nullptr;
299     // Atomic with relaxed order reason: mutex synchronization
300     while (!mutex_ptr_.compare_exchange_weak(old_mutex, mutex, std::memory_order_relaxed)) {
301         // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current
302         if (old_mutex != mutex && old_mutex != nullptr) {
303             LOG(FATAL, COMMON) << "CondVar Wait failed; mutex_ptr_ doesn't equal to provided mutex";
304         }
305     }
306 
307     // Atomic with relaxed order reason: mutex synchronization
308     waiters_.fetch_add(1, std::memory_order_relaxed);
309     mutex->IncrementWaiters();
310     auto old_count = mutex->GetRecursiveCount();
311     mutex->SetRecursiveCount(1);
312     // Atomic with relaxed order reason: mutex synchronization
313     auto cur_cond = cond_.load(std::memory_order_relaxed);
314     mutex->Unlock();
315     // NOLINTNEXTLINE(hicpp-signed-bitwise)
316     if (futex(GetCondAddr(), FUTEX_WAIT_PRIVATE, cur_cond, nullptr, nullptr, 0) != 0) {
317         if ((errno != EAGAIN) && (errno != EINTR)) {
318             LOG(FATAL, COMMON) << "Futex wait failed!";
319         }
320     }
321     mutex->Lock();
322     mutex->SetRecursiveCount(old_count);
323     mutex->DecrementWaiters();
324     // Atomic with relaxed order reason: mutex synchronization
325     waiters_.fetch_sub(1, std::memory_order_relaxed);
326 }
327 
ConvertTime(uint64_t ms,uint64_t ns)328 struct timespec ConvertTime(uint64_t ms, uint64_t ns)
329 {
330     struct timespec time = {0, 0};
331     const int64_t MILLISECONDS_PER_SEC = 1000;
332     const int64_t NANOSECONDS_PER_MILLISEC = 1000000;
333     const int64_t NANOSECONDS_PER_SEC = 1000000000;
334     auto seconds = static_cast<time_t>(ms / MILLISECONDS_PER_SEC);
335     auto nanoseconds = static_cast<time_t>((ms % MILLISECONDS_PER_SEC) * NANOSECONDS_PER_MILLISEC + ns);
336     time.tv_sec += seconds;
337     time.tv_nsec += nanoseconds;
338     if (time.tv_nsec >= NANOSECONDS_PER_SEC) {
339         time.tv_nsec -= NANOSECONDS_PER_SEC;
340         time.tv_sec++;
341     }
342     return time;
343 }
344 
TimedWait(Mutex * mutex,uint64_t ms,uint64_t ns,bool is_absolute)345 bool ConditionVariable::TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns, bool is_absolute)
346 {
347     if (current_tid == 0) {
348         current_tid = os::thread::GetCurrentThreadId();
349     }
350     if (!mutex->IsHeld(current_tid)) {
351         LOG(FATAL, COMMON) << "CondVar Wait failed; provided mutex is not held by current thread";
352     }
353 
354     // It's undefined behavior to call Wait with different mutexes on the same condvar
355     Mutex *old_mutex = nullptr;
356     // Atomic with relaxed order reason: mutex synchronization
357     while (!mutex_ptr_.compare_exchange_weak(old_mutex, mutex, std::memory_order_relaxed)) {
358         // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current
359         if (old_mutex != mutex && old_mutex != nullptr) {
360             LOG(FATAL, COMMON) << "CondVar Wait failed; mutex_ptr_ doesn't equal to provided mutex";
361         }
362     }
363 
364     bool timeout = false;
365     struct timespec time = ConvertTime(ms, ns);
366     // Atomic with relaxed order reason: mutex synchronization
367     waiters_.fetch_add(1, std::memory_order_relaxed);
368     mutex->IncrementWaiters();
369     auto old_count = mutex->GetRecursiveCount();
370     mutex->SetRecursiveCount(1);
371     // Atomic with relaxed order reason: mutex synchronization
372     auto cur_cond = cond_.load(std::memory_order_relaxed);
373     mutex->Unlock();
374     int futex_call_res = 0;
375     if (is_absolute) {
376         // FUTEX_WAIT_BITSET uses absolute time
377         // NOLINTNEXTLINE(hicpp-signed-bitwise)
378         static constexpr int WAIT_BITSET = FUTEX_WAIT_BITSET_PRIVATE;
379         // NOLINTNEXTLINE(hicpp-signed-bitwise)
380         static constexpr int MATCH_ANY = FUTEX_BITSET_MATCH_ANY;
381         futex_call_res = futex(GetCondAddr(), WAIT_BITSET, cur_cond, &time, nullptr, MATCH_ANY);
382     } else {
383         // FUTEX_WAIT uses relative time
384         // NOLINTNEXTLINE(hicpp-signed-bitwise)
385         futex_call_res = futex(GetCondAddr(), FUTEX_WAIT_PRIVATE, cur_cond, &time, nullptr, 0);
386     }
387     if (futex_call_res != 0) {
388         if (errno == ETIMEDOUT) {
389             timeout = true;
390         } else if ((errno != EAGAIN) && (errno != EINTR)) {
391             LOG(FATAL, COMMON) << "Futex wait failed!";
392         }
393     }
394     mutex->Lock();
395     mutex->SetRecursiveCount(old_count);
396     mutex->DecrementWaiters();
397     // Atomic with relaxed order reason: mutex synchronization
398     waiters_.fetch_sub(1, std::memory_order_relaxed);
399     return timeout;
400 }
401 
SignalCount(int32_t to_wake)402 void ConditionVariable::SignalCount(int32_t to_wake)
403 {
404     // Atomic with relaxed order reason: mutex synchronization
405     if (waiters_.load(std::memory_order_relaxed) == 0) {
406         // No waiters, do nothing
407         return;
408     }
409 
410     if (current_tid == 0) {
411         current_tid = os::thread::GetCurrentThreadId();
412     }
413     // Atomic with relaxed order reason: mutex synchronization
414     auto mutex = mutex_ptr_.load(std::memory_order_relaxed);
415     // If this condvar has waiters, mutex_ptr_ should be set
416     ASSERT(mutex != nullptr);
417     // Atomic with relaxed order reason: mutex synchronization
418     cond_.fetch_add(1, std::memory_order_relaxed);
419     if (mutex->IsHeld(current_tid)) {
420         // This thread is owner of current mutex, do requeue to mutex waitqueue
421         // NOLINTNEXTLINE(hicpp-signed-bitwise)
422         bool success = futex(GetCondAddr(), FUTEX_REQUEUE_PRIVATE, 0, reinterpret_cast<const timespec *>(to_wake),
423                              mutex->GetStateAddr(), 0) != -1;
424         if (!success) {
425             LOG(FATAL, COMMON) << "Futex requeue failed!";
426         }
427     } else {
428         // Mutex is not held by this thread, do wake
429         // NOLINTNEXTLINE(hicpp-signed-bitwise)
430         futex(GetCondAddr(), FUTEX_WAKE_PRIVATE, to_wake, nullptr, nullptr, 0);
431     }
432 }
433 
434 }  // namespace panda::os::unix::memory::futex
435