1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "mutex.h"
17 #include "utils/logger.h"
18 #include "utils/type_helpers.h"
19
20 #include <cstring>
21 #include <cerrno>
22 #include <ctime>
23
24 #include <sched.h>
25
26 namespace panda::os::unix::memory::futex {
27
28 // Avoid repeatedly calling GetCurrentThreadId by storing tid locally
29 thread_local thread::ThreadId current_tid {0};
30
PostFork()31 void PostFork()
32 {
33 current_tid = os::thread::GetCurrentThreadId();
34 }
35
36 // Spin for small arguments and yield for longer ones.
BackOff(uint32_t i)37 static void BackOff(uint32_t i)
38 {
39 static constexpr uint32_t SPIN_MAX = 10;
40 if (i <= SPIN_MAX) {
41 volatile uint32_t x = 0; // Volatile to make sure loop is not optimized out.
42 const uint32_t spin_count = 10 * i;
43 for (uint32_t spin = 0; spin < spin_count; spin++) {
44 ++x;
45 }
46 } else {
47 thread::Yield();
48 }
49 }
50
51 // Wait until pred is true, or until timeout is reached.
52 // Return true if the predicate test succeeded, false if we timed out.
53 template <typename Pred>
WaitBrieflyFor(std::atomic_int * addr,Pred pred)54 static inline bool WaitBrieflyFor(std::atomic_int *addr, Pred pred)
55 {
56 // We probably don't want to do syscall (switch context) when we use WaitBrieflyFor
57 static constexpr uint32_t MAX_BACK_OFF = 10;
58 static constexpr uint32_t MAX_ITER = 50;
59 for (uint32_t i = 1; i <= MAX_ITER; i++) {
60 BackOff(std::min(i, MAX_BACK_OFF));
61 if (pred(addr->load(std::memory_order_relaxed))) {
62 return true;
63 }
64 }
65 return false;
66 }
67
~Mutex()68 Mutex::~Mutex()
69 {
70 if (state_and_waiters_.load(std::memory_order_relaxed) != 0) {
71 LOG(FATAL, COMMON) << "Mutex destruction failed; state_and_waiters_ is non zero!";
72 } else if (exclusive_owner_.load(std::memory_order_relaxed) != 0) {
73 LOG(FATAL, COMMON) << "Mutex destruction failed; mutex has an owner!";
74 }
75 }
76
Lock()77 void Mutex::Lock()
78 {
79 if (current_tid == 0) {
80 current_tid = os::thread::GetCurrentThreadId();
81 }
82 if (recursive_mutex_) {
83 if (IsHeld(current_tid)) {
84 recursive_count_++;
85 return;
86 }
87 }
88
89 ASSERT(!IsHeld(current_tid));
90 bool done = false;
91 while (!done) {
92 auto cur_state = state_and_waiters_.load(std::memory_order_relaxed);
93 if (LIKELY((helpers::ToUnsigned(cur_state) & helpers::ToUnsigned(HELD_MASK)) == 0)) {
94 // Lock not held, try acquiring it.
95 auto new_state = static_cast<int32_t>(helpers::ToUnsigned(cur_state) | helpers::ToUnsigned(HELD_MASK));
96 done = state_and_waiters_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire);
97 } else {
98 // Failed to acquire, wait for unlock
99 auto res = WaitBrieflyFor(&state_and_waiters_, [](int32_t state) {
100 return (helpers::ToUnsigned(state) & helpers::ToUnsigned(HELD_MASK)) == 0;
101 });
102 if (!res) {
103 // WaitBrieflyFor failed, go to futex wait
104 // Increment waiters count.
105 IncrementWaiters();
106 // Update cur_state to be equal to current expected state_and_waiters_.
107 cur_state += WAITER_INCREMENT;
108 // Retry wait until lock is not held. In heavy contention situations cur_state check can fail
109 // immediately due to repeatedly decrementing and incrementing waiters.
110 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
111 while ((helpers::ToUnsigned(cur_state) & helpers::ToUnsigned(HELD_MASK)) != 0) {
112 // NOLINTNEXTLINE(hicpp-signed-bitwise), CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
113 if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
114 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
115 if ((errno != EAGAIN) && (errno != EINTR)) {
116 LOG(FATAL, COMMON) << "Futex wait failed!";
117 }
118 }
119 cur_state = state_and_waiters_.load(std::memory_order_relaxed);
120 }
121 DecrementWaiters();
122 }
123 }
124 }
125 // Mutex is held now
126 ASSERT((helpers::ToUnsigned(state_and_waiters_.load(std::memory_order_relaxed)) & helpers::ToUnsigned(HELD_MASK)) !=
127 0);
128 ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
129 exclusive_owner_.store(current_tid, std::memory_order_relaxed);
130 recursive_count_++;
131 ASSERT(recursive_count_ == 1); // should be 1 here, there's a separate path for recursive mutex above
132 }
133
TryLock()134 bool Mutex::TryLock()
135 {
136 if (current_tid == 0) {
137 current_tid = os::thread::GetCurrentThreadId();
138 }
139 if (recursive_mutex_) {
140 if (IsHeld(current_tid)) {
141 recursive_count_++;
142 return true;
143 }
144 }
145
146 ASSERT(!IsHeld(current_tid));
147 bool done = false;
148 auto cur_state = state_and_waiters_.load(std::memory_order_relaxed);
149 while (!done) {
150 if (LIKELY((helpers::ToUnsigned(cur_state) & helpers::ToUnsigned(HELD_MASK)) == 0)) {
151 // Lock not held, retry acquiring it until it's held.
152 auto new_state = static_cast<int32_t>(helpers::ToUnsigned(cur_state) | helpers::ToUnsigned(HELD_MASK));
153 // cur_state should be updated with fetched value on fail
154 done = state_and_waiters_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire);
155 } else {
156 // Lock is held by someone, exit
157 return false;
158 }
159 }
160 // Mutex is held now
161 ASSERT((helpers::ToUnsigned(state_and_waiters_.load(std::memory_order_relaxed)) & helpers::ToUnsigned(HELD_MASK)) !=
162 0);
163 ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
164 exclusive_owner_.store(current_tid, std::memory_order_relaxed);
165 recursive_count_++;
166 ASSERT(recursive_count_ == 1); // should be 1 here, there's a separate path for recursive mutex above
167 return true;
168 }
169
TryLockWithSpinning()170 bool Mutex::TryLockWithSpinning()
171 {
172 const int MAX_ITER = 10;
173 for (int i = 0; i < MAX_ITER; i++) {
174 if (TryLock()) {
175 return true;
176 }
177 auto res = WaitBrieflyFor(&state_and_waiters_, [](int32_t state) {
178 return (helpers::ToUnsigned(state) & helpers::ToUnsigned(HELD_MASK)) == 0;
179 });
180 if (!res) {
181 // WaitBrieflyFor failed, means lock is held
182 return false;
183 }
184 }
185 return false;
186 }
187
Unlock()188 void Mutex::Unlock()
189 {
190 if (current_tid == 0) {
191 current_tid = os::thread::GetCurrentThreadId();
192 }
193 if (!IsHeld(current_tid)) {
194 LOG(FATAL, COMMON) << "Trying to unlock mutex which is not held by current thread";
195 }
196 recursive_count_--;
197 if (recursive_mutex_) {
198 if (recursive_count_ > 0) {
199 return;
200 }
201 }
202
203 ASSERT(recursive_count_ == 0); // should be 0 here, there's a separate path for recursive mutex above
204 bool done = false;
205 auto cur_state = state_and_waiters_.load(std::memory_order_relaxed);
206 // Retry CAS until success
207 while (!done) {
208 auto new_state = helpers::ToUnsigned(cur_state) & ~helpers::ToUnsigned(HELD_MASK); // State without holding bit
209 if ((helpers::ToUnsigned(cur_state) & helpers::ToUnsigned(HELD_MASK)) == 0) {
210 LOG(FATAL, COMMON) << "Mutex unlock got unexpected state, maybe mutex is unlocked?";
211 }
212 // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED
213 exclusive_owner_.store(0, std::memory_order_relaxed);
214 // cur_state should be updated with fetched value on fail
215 done = state_and_waiters_.compare_exchange_weak(cur_state, new_state, std::memory_order_release);
216 if (LIKELY(done)) {
217 // If we had waiters, we need to do futex call
218 if (UNLIKELY(new_state != 0)) {
219 // NOLINTNEXTLINE(hicpp-signed-bitwise)
220 futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ONE, nullptr, nullptr, 0);
221 }
222 }
223 }
224 }
225
LockForOther(thread::ThreadId thread)226 void Mutex::LockForOther(thread::ThreadId thread)
227 {
228 ASSERT(state_and_waiters_.load() == 0);
229 state_and_waiters_.store(HELD_MASK, std::memory_order_relaxed);
230 recursive_count_ = 1;
231 exclusive_owner_.store(thread, std::memory_order_relaxed);
232 }
233
UnlockForOther(thread::ThreadId thread)234 void Mutex::UnlockForOther(thread::ThreadId thread)
235 {
236 if (!IsHeld(thread)) {
237 LOG(FATAL, COMMON) << "Unlocking for thread which doesn't own this mutex";
238 }
239 ASSERT(state_and_waiters_.load() == HELD_MASK);
240 state_and_waiters_.store(0, std::memory_order_relaxed);
241 recursive_count_ = 0;
242 exclusive_owner_.store(0, std::memory_order_relaxed);
243 }
244
~RWLock()245 RWLock::~RWLock()
246 {
247 if (state_.load(std::memory_order_relaxed) != 0) {
248 LOG(FATAL, COMMON) << "RWLock destruction failed; state_ is non zero!";
249 } else if (exclusive_owner_.load(std::memory_order_relaxed) != 0) {
250 LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has an owner!";
251 } else if (waiters_.load(std::memory_order_relaxed) != 0) {
252 LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has waiters!";
253 }
254 }
255
WriteLock()256 void RWLock::WriteLock()
257 {
258 if (current_tid == 0) {
259 current_tid = os::thread::GetCurrentThreadId();
260 }
261 bool done = false;
262 while (!done) {
263 auto cur_state = state_.load(std::memory_order_relaxed);
264 if (LIKELY(cur_state == UNLOCKED)) {
265 // Unlocked, can acquire writelock
266 // Do CAS in case other thread beats us and acquires readlock first
267 done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire);
268 } else {
269 // Wait until RWLock is unlocked
270 if (!WaitBrieflyFor(&state_, [](int32_t state) { return state == UNLOCKED; })) {
271 // WaitBrieflyFor failed, go to futex wait
272 // Increment waiters count.
273 IncrementWaiters();
274 // Retry wait until lock not held. If we have more than one reader, cur_state check failure
275 // doesn't mean this lock is unlocked.
276 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
277 while (cur_state != UNLOCKED) {
278 // NOLINTNEXTLINE(hicpp-signed-bitwise), CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
279 if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
280 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
281 if ((errno != EAGAIN) && (errno != EINTR)) {
282 LOG(FATAL, COMMON) << "Futex wait failed!";
283 }
284 }
285 cur_state = state_.load(std::memory_order_relaxed);
286 }
287 DecrementWaiters();
288 }
289 }
290 }
291 // RWLock is held now
292 ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED);
293 ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
294 exclusive_owner_.store(current_tid, std::memory_order_relaxed);
295 }
296
HandleReadLockWait(int32_t cur_state)297 void RWLock::HandleReadLockWait(int32_t cur_state)
298 {
299 // Wait until RWLock WriteLock is unlocked
300 if (!WaitBrieflyFor(&state_, [](int32_t state) { return state >= UNLOCKED; })) {
301 // WaitBrieflyFor failed, go to futex wait
302 IncrementWaiters();
303 // Retry wait until WriteLock is not held
304 while (cur_state == WRITE_LOCKED) {
305 // NOLINTNEXTLINE(hicpp-signed-bitwise), CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
306 if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
307 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
308 if ((errno != EAGAIN) && (errno != EINTR)) {
309 LOG(FATAL, COMMON) << "Futex wait failed!";
310 }
311 }
312 cur_state = state_.load(std::memory_order_relaxed);
313 }
314 DecrementWaiters();
315 }
316 }
317
TryReadLock()318 bool RWLock::TryReadLock()
319 {
320 bool done = false;
321 auto cur_state = state_.load(std::memory_order_relaxed);
322 while (!done) {
323 if (cur_state >= UNLOCKED) {
324 auto new_state = cur_state + READ_INCREMENT;
325 // cur_state should be updated with fetched value on fail
326 done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire);
327 } else {
328 // RWLock is Write held, trylock failed.
329 return false;
330 }
331 }
332 ASSERT(!HasExclusiveHolder());
333 return true;
334 }
335
TryWriteLock()336 bool RWLock::TryWriteLock()
337 {
338 if (current_tid == 0) {
339 current_tid = os::thread::GetCurrentThreadId();
340 }
341 bool done = false;
342 auto cur_state = state_.load(std::memory_order_relaxed);
343 while (!done) {
344 if (LIKELY(cur_state == UNLOCKED)) {
345 // Unlocked, can acquire writelock
346 // Do CAS in case other thread beats us and acquires readlock first
347 // cur_state should be updated with fetched value on fail
348 done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire);
349 } else {
350 // RWLock is held, trylock failed.
351 return false;
352 }
353 }
354 // RWLock is held now
355 ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED);
356 ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
357 exclusive_owner_.store(current_tid, std::memory_order_relaxed);
358 return true;
359 }
360
WriteUnlock()361 void RWLock::WriteUnlock()
362 {
363 if (current_tid == 0) {
364 current_tid = os::thread::GetCurrentThreadId();
365 }
366 ASSERT(IsExclusiveHeld(current_tid));
367
368 bool done = false;
369 int32_t cur_state = state_.load(std::memory_order_relaxed);
370 // CAS is weak and might fail, do in loop
371 while (!done) {
372 if (LIKELY(cur_state == WRITE_LOCKED)) {
373 // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED
374 exclusive_owner_.store(0, std::memory_order_relaxed);
375 // Change state to unlocked and do release store.
376 // waiters_ load should not be reordered before state_, so it's done with seq cst.
377 // cur_state should be updated with fetched value on fail
378 done = state_.compare_exchange_weak(cur_state, UNLOCKED, std::memory_order_seq_cst);
379 if (LIKELY(done)) {
380 // We are doing write unlock. All waiters could be ReadLocks so we need to wake all.
381 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
382 if (waiters_.load(std::memory_order_seq_cst) > 0) {
383 // NOLINTNEXTLINE(hicpp-signed-bitwise)
384 futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0);
385 }
386 }
387 } else {
388 LOG(FATAL, COMMON) << "RWLock WriteUnlock got unexpected state, maybe RWLock is not writelocked?";
389 }
390 }
391 }
392
~ConditionVariable()393 ConditionVariable::~ConditionVariable()
394 {
395 if (waiters_.load(std::memory_order_relaxed) != 0) {
396 LOG(FATAL, COMMON) << "CondVar destruction failed; waiters_ is non zero!";
397 }
398 }
399
Wait(Mutex * mutex)400 void ConditionVariable::Wait(Mutex *mutex)
401 {
402 if (current_tid == 0) {
403 current_tid = os::thread::GetCurrentThreadId();
404 }
405 if (!mutex->IsHeld(current_tid)) {
406 LOG(FATAL, COMMON) << "CondVar Wait failed; provided mutex is not held by current thread";
407 }
408
409 // It's undefined behavior to call Wait with different mutexes on the same condvar
410 Mutex *old_mutex = nullptr;
411 while (!mutex_ptr_.compare_exchange_weak(old_mutex, mutex, std::memory_order_relaxed)) {
412 // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current
413 if (old_mutex != mutex && old_mutex != nullptr) {
414 LOG(FATAL, COMMON) << "CondVar Wait failed; mutex_ptr_ doesn't equal to provided mutex";
415 }
416 }
417
418 waiters_.fetch_add(1, std::memory_order_relaxed);
419 mutex->IncrementWaiters();
420 auto old_count = mutex->recursive_count_;
421 mutex->recursive_count_ = 1;
422 auto cur_cond = cond_.load(std::memory_order_relaxed);
423 mutex->Unlock();
424 // NOLINTNEXTLINE(hicpp-signed-bitwise)
425 if (futex(GetCondAddr(), FUTEX_WAIT_PRIVATE, cur_cond, nullptr, nullptr, 0) != 0) {
426 if ((errno != EAGAIN) && (errno != EINTR)) {
427 LOG(FATAL, COMMON) << "Futex wait failed!";
428 }
429 }
430 mutex->Lock();
431 mutex->recursive_count_ = old_count;
432 mutex->DecrementWaiters();
433 waiters_.fetch_sub(1, std::memory_order_relaxed);
434 }
435
436 const int64_t MILLISECONDS_PER_SEC = 1000;
437 const int64_t NANOSECONDS_PER_MILLISEC = 1000000;
438 const int64_t NANOSECONDS_PER_SEC = 1000000000;
439
ConvertTime(uint64_t ms,uint64_t ns)440 struct timespec ConvertTime(uint64_t ms, uint64_t ns)
441 {
442 struct timespec time = {0, 0};
443 auto seconds = static_cast<time_t>(ms / MILLISECONDS_PER_SEC);
444 auto nanoseconds = static_cast<time_t>((ms % MILLISECONDS_PER_SEC) * NANOSECONDS_PER_MILLISEC + ns);
445 time.tv_sec += seconds;
446 time.tv_nsec += nanoseconds;
447 if (time.tv_nsec >= NANOSECONDS_PER_SEC) {
448 time.tv_nsec -= NANOSECONDS_PER_SEC;
449 time.tv_sec++;
450 }
451 return time;
452 }
453
TimedWait(Mutex * mutex,uint64_t ms,uint64_t ns,bool is_absolute)454 bool ConditionVariable::TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns, bool is_absolute)
455 {
456 if (current_tid == 0) {
457 current_tid = os::thread::GetCurrentThreadId();
458 }
459 if (!mutex->IsHeld(current_tid)) {
460 LOG(FATAL, COMMON) << "CondVar Wait failed; provided mutex is not held by current thread";
461 }
462
463 // It's undefined behavior to call Wait with different mutexes on the same condvar
464 Mutex *old_mutex = nullptr;
465 while (!mutex_ptr_.compare_exchange_weak(old_mutex, mutex, std::memory_order_relaxed)) {
466 // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current
467 if (old_mutex != mutex && old_mutex != nullptr) {
468 LOG(FATAL, COMMON) << "CondVar Wait failed; mutex_ptr_ doesn't equal to provided mutex";
469 }
470 }
471
472 bool timeout = false;
473 struct timespec time = ConvertTime(ms, ns);
474 waiters_.fetch_add(1, std::memory_order_relaxed);
475 mutex->IncrementWaiters();
476 auto old_count = mutex->recursive_count_;
477 mutex->recursive_count_ = 1;
478 // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_REDUNDANT_INIT)
479 auto cur_cond = cond_.load(std::memory_order_relaxed);
480 mutex->Unlock();
481
482 int futex_call_res;
483 if (is_absolute) {
484 // FUTEX_WAIT_BITSET uses absolute time
485 // NOLINTNEXTLINE(hicpp-signed-bitwise)
486 static constexpr int WAIT_BITSET = FUTEX_WAIT_BITSET_PRIVATE;
487 // NOLINTNEXTLINE(hicpp-signed-bitwise)
488 static constexpr int MATCH_ANY = FUTEX_BITSET_MATCH_ANY;
489 // NOLINTNEXTLINE(hicpp-signed-bitwise)
490 futex_call_res = futex(GetCondAddr(), WAIT_BITSET, cur_cond, &time, nullptr, MATCH_ANY);
491 } else {
492 // FUTEX_WAIT uses relative time
493 // NOLINTNEXTLINE(hicpp-signed-bitwise)
494 futex_call_res = futex(GetCondAddr(), FUTEX_WAIT_PRIVATE, cur_cond, &time, nullptr, 0);
495 }
496 if (futex_call_res != 0) {
497 if (errno == ETIMEDOUT) {
498 timeout = true;
499 } else if ((errno != EAGAIN) && (errno != EINTR)) {
500 LOG(FATAL, COMMON) << "Futex wait failed!";
501 }
502 }
503 mutex->Lock();
504 mutex->recursive_count_ = old_count;
505 mutex->DecrementWaiters();
506 waiters_.fetch_sub(1, std::memory_order_relaxed);
507 return timeout;
508 }
509
SignalCount(int32_t to_wake)510 void ConditionVariable::SignalCount(int32_t to_wake)
511 {
512 if (waiters_.load(std::memory_order_relaxed) == 0) {
513 // No waiters, do nothing
514 return;
515 }
516
517 if (current_tid == 0) {
518 current_tid = os::thread::GetCurrentThreadId();
519 }
520 auto mutex = mutex_ptr_.load(std::memory_order_relaxed);
521 // If this condvar has waiters, mutex_ptr_ should be set
522 ASSERT(mutex != nullptr);
523 cond_.fetch_add(1, std::memory_order_relaxed);
524 if (mutex->IsHeld(current_tid)) {
525 // This thread is owner of current mutex, do requeue to mutex waitqueue
526 // NOLINTNEXTLINE(hicpp-signed-bitwise)
527 bool success = futex(GetCondAddr(), FUTEX_REQUEUE_PRIVATE, 0, reinterpret_cast<const timespec *>(to_wake),
528 mutex->GetStateAddr(), 0) != -1;
529 if (!success) {
530 LOG(FATAL, COMMON) << "Futex requeue failed!";
531 }
532 } else {
533 // Mutex is not held by this thread, do wake
534 // NOLINTNEXTLINE(hicpp-signed-bitwise)
535 futex(GetCondAddr(), FUTEX_WAKE_PRIVATE, to_wake, nullptr, nullptr, 0);
536 }
537 }
538
539 } // namespace panda::os::unix::memory::futex
540