1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/synchronization/mutex.h"
16
17 #ifdef _WIN32
18 #include <windows.h>
19 #endif
20
21 #include <algorithm>
22 #include <atomic>
23 #include <cstdlib>
24 #include <functional>
25 #include <memory>
26 #include <random>
27 #include <string>
28 #include <thread> // NOLINT(build/c++11)
29 #include <type_traits>
30 #include <vector>
31
32 #include "gtest/gtest.h"
33 #include "absl/base/attributes.h"
34 #include "absl/base/config.h"
35 #include "absl/base/internal/sysinfo.h"
36 #include "absl/log/check.h"
37 #include "absl/log/log.h"
38 #include "absl/memory/memory.h"
39 #include "absl/synchronization/internal/thread_pool.h"
40 #include "absl/time/clock.h"
41 #include "absl/time/time.h"
42
43 namespace {
44
45 // TODO(dmauro): Replace with a commandline flag.
46 static constexpr bool kExtendedTest = false;
47
CreatePool(int threads)48 std::unique_ptr<absl::synchronization_internal::ThreadPool> CreatePool(
49 int threads) {
50 return absl::make_unique<absl::synchronization_internal::ThreadPool>(threads);
51 }
52
53 std::unique_ptr<absl::synchronization_internal::ThreadPool>
CreateDefaultPool()54 CreateDefaultPool() {
55 return CreatePool(kExtendedTest ? 32 : 10);
56 }
57
58 // Hack to schedule a function to run on a thread pool thread after a
59 // duration has elapsed.
ScheduleAfter(absl::synchronization_internal::ThreadPool * tp,absl::Duration after,const std::function<void ()> & func)60 static void ScheduleAfter(absl::synchronization_internal::ThreadPool *tp,
61 absl::Duration after,
62 const std::function<void()> &func) {
63 tp->Schedule([func, after] {
64 absl::SleepFor(after);
65 func();
66 });
67 }
68
69 struct TestContext {
70 int iterations;
71 int threads;
72 int g0; // global 0
73 int g1; // global 1
74 absl::Mutex mu;
75 absl::CondVar cv;
76 };
77
78 // To test whether the invariant check call occurs
79 static std::atomic<bool> invariant_checked;
80
GetInvariantChecked()81 static bool GetInvariantChecked() {
82 return invariant_checked.load(std::memory_order_relaxed);
83 }
84
SetInvariantChecked(bool new_value)85 static void SetInvariantChecked(bool new_value) {
86 invariant_checked.store(new_value, std::memory_order_relaxed);
87 }
88
CheckSumG0G1(void * v)89 static void CheckSumG0G1(void *v) {
90 TestContext *cxt = static_cast<TestContext *>(v);
91 CHECK_EQ(cxt->g0, -cxt->g1) << "Error in CheckSumG0G1";
92 SetInvariantChecked(true);
93 }
94
TestMu(TestContext * cxt,int c)95 static void TestMu(TestContext *cxt, int c) {
96 for (int i = 0; i != cxt->iterations; i++) {
97 absl::MutexLock l(&cxt->mu);
98 int a = cxt->g0 + 1;
99 cxt->g0 = a;
100 cxt->g1--;
101 }
102 }
103
TestTry(TestContext * cxt,int c)104 static void TestTry(TestContext *cxt, int c) {
105 for (int i = 0; i != cxt->iterations; i++) {
106 do {
107 std::this_thread::yield();
108 } while (!cxt->mu.TryLock());
109 int a = cxt->g0 + 1;
110 cxt->g0 = a;
111 cxt->g1--;
112 cxt->mu.Unlock();
113 }
114 }
115
TestR20ms(TestContext * cxt,int c)116 static void TestR20ms(TestContext *cxt, int c) {
117 for (int i = 0; i != cxt->iterations; i++) {
118 absl::ReaderMutexLock l(&cxt->mu);
119 absl::SleepFor(absl::Milliseconds(20));
120 cxt->mu.AssertReaderHeld();
121 }
122 }
123
TestRW(TestContext * cxt,int c)124 static void TestRW(TestContext *cxt, int c) {
125 if ((c & 1) == 0) {
126 for (int i = 0; i != cxt->iterations; i++) {
127 absl::WriterMutexLock l(&cxt->mu);
128 cxt->g0++;
129 cxt->g1--;
130 cxt->mu.AssertHeld();
131 cxt->mu.AssertReaderHeld();
132 }
133 } else {
134 for (int i = 0; i != cxt->iterations; i++) {
135 absl::ReaderMutexLock l(&cxt->mu);
136 CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW";
137 cxt->mu.AssertReaderHeld();
138 }
139 }
140 }
141
142 struct MyContext {
143 int target;
144 TestContext *cxt;
145 bool MyTurn();
146 };
147
MyTurn()148 bool MyContext::MyTurn() {
149 TestContext *cxt = this->cxt;
150 return cxt->g0 == this->target || cxt->g0 == cxt->iterations;
151 }
152
TestAwait(TestContext * cxt,int c)153 static void TestAwait(TestContext *cxt, int c) {
154 MyContext mc;
155 mc.target = c;
156 mc.cxt = cxt;
157 absl::MutexLock l(&cxt->mu);
158 cxt->mu.AssertHeld();
159 while (cxt->g0 < cxt->iterations) {
160 cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
161 CHECK(mc.MyTurn()) << "Error in TestAwait";
162 cxt->mu.AssertHeld();
163 if (cxt->g0 < cxt->iterations) {
164 int a = cxt->g0 + 1;
165 cxt->g0 = a;
166 mc.target += cxt->threads;
167 }
168 }
169 }
170
TestSignalAll(TestContext * cxt,int c)171 static void TestSignalAll(TestContext *cxt, int c) {
172 int target = c;
173 absl::MutexLock l(&cxt->mu);
174 cxt->mu.AssertHeld();
175 while (cxt->g0 < cxt->iterations) {
176 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
177 cxt->cv.Wait(&cxt->mu);
178 }
179 if (cxt->g0 < cxt->iterations) {
180 int a = cxt->g0 + 1;
181 cxt->g0 = a;
182 cxt->cv.SignalAll();
183 target += cxt->threads;
184 }
185 }
186 }
187
TestSignal(TestContext * cxt,int c)188 static void TestSignal(TestContext *cxt, int c) {
189 CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads";
190 int target = c;
191 absl::MutexLock l(&cxt->mu);
192 cxt->mu.AssertHeld();
193 while (cxt->g0 < cxt->iterations) {
194 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
195 cxt->cv.Wait(&cxt->mu);
196 }
197 if (cxt->g0 < cxt->iterations) {
198 int a = cxt->g0 + 1;
199 cxt->g0 = a;
200 cxt->cv.Signal();
201 target += cxt->threads;
202 }
203 }
204 }
205
TestCVTimeout(TestContext * cxt,int c)206 static void TestCVTimeout(TestContext *cxt, int c) {
207 int target = c;
208 absl::MutexLock l(&cxt->mu);
209 cxt->mu.AssertHeld();
210 while (cxt->g0 < cxt->iterations) {
211 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
212 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
213 }
214 if (cxt->g0 < cxt->iterations) {
215 int a = cxt->g0 + 1;
216 cxt->g0 = a;
217 cxt->cv.SignalAll();
218 target += cxt->threads;
219 }
220 }
221 }
222
G0GE2(TestContext * cxt)223 static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
224
TestTime(TestContext * cxt,int c,bool use_cv)225 static void TestTime(TestContext *cxt, int c, bool use_cv) {
226 CHECK_EQ(cxt->iterations, 1) << "TestTime should only use 1 iteration";
227 CHECK_GT(cxt->threads, 2) << "TestTime should use more than 2 threads";
228 const bool kFalse = false;
229 absl::Condition false_cond(&kFalse);
230 absl::Condition g0ge2(G0GE2, cxt);
231 if (c == 0) {
232 absl::MutexLock l(&cxt->mu);
233
234 absl::Time start = absl::Now();
235 if (use_cv) {
236 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
237 } else {
238 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
239 << "TestTime failed";
240 }
241 absl::Duration elapsed = absl::Now() - start;
242 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
243 << "TestTime failed";
244 CHECK_EQ(cxt->g0, 1) << "TestTime failed";
245
246 start = absl::Now();
247 if (use_cv) {
248 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
249 } else {
250 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
251 << "TestTime failed";
252 }
253 elapsed = absl::Now() - start;
254 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
255 << "TestTime failed";
256 cxt->g0++;
257 if (use_cv) {
258 cxt->cv.Signal();
259 }
260
261 start = absl::Now();
262 if (use_cv) {
263 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
264 } else {
265 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)))
266 << "TestTime failed";
267 }
268 elapsed = absl::Now() - start;
269 CHECK(absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0))
270 << "TestTime failed";
271 CHECK_GE(cxt->g0, 3) << "TestTime failed";
272
273 start = absl::Now();
274 if (use_cv) {
275 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
276 } else {
277 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
278 << "TestTime failed";
279 }
280 elapsed = absl::Now() - start;
281 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
282 << "TestTime failed";
283 if (use_cv) {
284 cxt->cv.SignalAll();
285 }
286
287 start = absl::Now();
288 if (use_cv) {
289 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
290 } else {
291 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
292 << "TestTime failed";
293 }
294 elapsed = absl::Now() - start;
295 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
296 << "TestTime failed";
297 CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed";
298
299 } else if (c == 1) {
300 absl::MutexLock l(&cxt->mu);
301 const absl::Time start = absl::Now();
302 if (use_cv) {
303 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
304 } else {
305 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)))
306 << "TestTime failed";
307 }
308 const absl::Duration elapsed = absl::Now() - start;
309 CHECK(absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9))
310 << "TestTime failed";
311 cxt->g0++;
312 } else if (c == 2) {
313 absl::MutexLock l(&cxt->mu);
314 if (use_cv) {
315 while (cxt->g0 < 2) {
316 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
317 }
318 } else {
319 CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)))
320 << "TestTime failed";
321 }
322 cxt->g0++;
323 } else {
324 absl::MutexLock l(&cxt->mu);
325 if (use_cv) {
326 while (cxt->g0 < 2) {
327 cxt->cv.Wait(&cxt->mu);
328 }
329 } else {
330 cxt->mu.Await(g0ge2);
331 }
332 cxt->g0++;
333 }
334 }
335
TestMuTime(TestContext * cxt,int c)336 static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); }
337
TestCVTime(TestContext * cxt,int c)338 static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); }
339
EndTest(int * c0,int * c1,absl::Mutex * mu,absl::CondVar * cv,const std::function<void (int)> & cb)340 static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
341 const std::function<void(int)> &cb) {
342 mu->Lock();
343 int c = (*c0)++;
344 mu->Unlock();
345 cb(c);
346 absl::MutexLock l(mu);
347 (*c1)++;
348 cv->Signal();
349 }
350
351 // Code common to RunTest() and RunTestWithInvariantDebugging().
RunTestCommon(TestContext * cxt,void (* test)(TestContext * cxt,int),int threads,int iterations,int operations)352 static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
353 int threads, int iterations, int operations) {
354 absl::Mutex mu2;
355 absl::CondVar cv2;
356 int c0 = 0;
357 int c1 = 0;
358 cxt->g0 = 0;
359 cxt->g1 = 0;
360 cxt->iterations = iterations;
361 cxt->threads = threads;
362 absl::synchronization_internal::ThreadPool tp(threads);
363 for (int i = 0; i != threads; i++) {
364 tp.Schedule(std::bind(
365 &EndTest, &c0, &c1, &mu2, &cv2,
366 std::function<void(int)>(std::bind(test, cxt, std::placeholders::_1))));
367 }
368 mu2.Lock();
369 while (c1 != threads) {
370 cv2.Wait(&mu2);
371 }
372 mu2.Unlock();
373 return cxt->g0;
374 }
375
376 // Basis for the parameterized tests configured below.
RunTest(void (* test)(TestContext * cxt,int),int threads,int iterations,int operations)377 static int RunTest(void (*test)(TestContext *cxt, int), int threads,
378 int iterations, int operations) {
379 TestContext cxt;
380 return RunTestCommon(&cxt, test, threads, iterations, operations);
381 }
382
383 // Like RunTest(), but sets an invariant on the tested Mutex and
384 // verifies that the invariant check happened. The invariant function
385 // will be passed the TestContext* as its arg and must call
386 // SetInvariantChecked(true);
387 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
RunTestWithInvariantDebugging(void (* test)(TestContext * cxt,int),int threads,int iterations,int operations,void (* invariant)(void *))388 static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
389 int threads, int iterations,
390 int operations,
391 void (*invariant)(void *)) {
392 absl::EnableMutexInvariantDebugging(true);
393 SetInvariantChecked(false);
394 TestContext cxt;
395 cxt.mu.EnableInvariantDebugging(invariant, &cxt);
396 int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
397 CHECK(GetInvariantChecked()) << "Invariant not checked";
398 absl::EnableMutexInvariantDebugging(false); // Restore.
399 return ret;
400 }
401 #endif
402
403 // --------------------------------------------------------
404 // Test for fix of bug in TryRemove()
405 struct TimeoutBugStruct {
406 absl::Mutex mu;
407 bool a;
408 int a_waiter_count;
409 };
410
WaitForA(TimeoutBugStruct * x)411 static void WaitForA(TimeoutBugStruct *x) {
412 x->mu.LockWhen(absl::Condition(&x->a));
413 x->a_waiter_count--;
414 x->mu.Unlock();
415 }
416
NoAWaiters(TimeoutBugStruct * x)417 static bool NoAWaiters(TimeoutBugStruct *x) { return x->a_waiter_count == 0; }
418
419 // Test that a CondVar.Wait(&mutex) can un-block a call to mutex.Await() in
420 // another thread.
TEST(Mutex,CondVarWaitSignalsAwait)421 TEST(Mutex, CondVarWaitSignalsAwait) {
422 // Use a struct so the lock annotations apply.
423 struct {
424 absl::Mutex barrier_mu;
425 bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
426
427 absl::Mutex release_mu;
428 bool release ABSL_GUARDED_BY(release_mu) = false;
429 absl::CondVar released_cv;
430 } state;
431
432 auto pool = CreateDefaultPool();
433
434 // Thread A. Sets barrier, waits for release using Mutex::Await, then
435 // signals released_cv.
436 pool->Schedule([&state] {
437 state.release_mu.Lock();
438
439 state.barrier_mu.Lock();
440 state.barrier = true;
441 state.barrier_mu.Unlock();
442
443 state.release_mu.Await(absl::Condition(&state.release));
444 state.released_cv.Signal();
445 state.release_mu.Unlock();
446 });
447
448 state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
449 state.barrier_mu.Unlock();
450 state.release_mu.Lock();
451 // Thread A is now blocked on release by way of Mutex::Await().
452
453 // Set release. Calling released_cv.Wait() should un-block thread A,
454 // which will signal released_cv. If not, the test will hang.
455 state.release = true;
456 state.released_cv.Wait(&state.release_mu);
457 state.release_mu.Unlock();
458 }
459
460 // Test that a CondVar.WaitWithTimeout(&mutex) can un-block a call to
461 // mutex.Await() in another thread.
TEST(Mutex,CondVarWaitWithTimeoutSignalsAwait)462 TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
463 // Use a struct so the lock annotations apply.
464 struct {
465 absl::Mutex barrier_mu;
466 bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
467
468 absl::Mutex release_mu;
469 bool release ABSL_GUARDED_BY(release_mu) = false;
470 absl::CondVar released_cv;
471 } state;
472
473 auto pool = CreateDefaultPool();
474
475 // Thread A. Sets barrier, waits for release using Mutex::Await, then
476 // signals released_cv.
477 pool->Schedule([&state] {
478 state.release_mu.Lock();
479
480 state.barrier_mu.Lock();
481 state.barrier = true;
482 state.barrier_mu.Unlock();
483
484 state.release_mu.Await(absl::Condition(&state.release));
485 state.released_cv.Signal();
486 state.release_mu.Unlock();
487 });
488
489 state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
490 state.barrier_mu.Unlock();
491 state.release_mu.Lock();
492 // Thread A is now blocked on release by way of Mutex::Await().
493
494 // Set release. Calling released_cv.Wait() should un-block thread A,
495 // which will signal released_cv. If not, the test will hang.
496 state.release = true;
497 EXPECT_TRUE(
498 !state.released_cv.WaitWithTimeout(&state.release_mu, absl::Seconds(10)))
499 << "; Unrecoverable test failure: CondVar::WaitWithTimeout did not "
500 "unblock the absl::Mutex::Await call in another thread.";
501
502 state.release_mu.Unlock();
503 }
504
505 // Test for regression of a bug in loop of TryRemove()
TEST(Mutex,MutexTimeoutBug)506 TEST(Mutex, MutexTimeoutBug) {
507 auto tp = CreateDefaultPool();
508
509 TimeoutBugStruct x;
510 x.a = false;
511 x.a_waiter_count = 2;
512 tp->Schedule(std::bind(&WaitForA, &x));
513 tp->Schedule(std::bind(&WaitForA, &x));
514 absl::SleepFor(absl::Seconds(1)); // Allow first two threads to hang.
515 // The skip field of the second will point to the first because there are
516 // only two.
517
518 // Now cause a thread waiting on an always-false to time out
519 // This would deadlock when the bug was present.
520 bool always_false = false;
521 x.mu.LockWhenWithTimeout(absl::Condition(&always_false),
522 absl::Milliseconds(500));
523
524 // if we get here, the bug is not present. Cleanup the state.
525
526 x.a = true; // wakeup the two waiters on A
527 x.mu.Await(absl::Condition(&NoAWaiters, &x)); // wait for them to exit
528 x.mu.Unlock();
529 }
530
531 struct CondVarWaitDeadlock : testing::TestWithParam<int> {
532 absl::Mutex mu;
533 absl::CondVar cv;
534 bool cond1 = false;
535 bool cond2 = false;
536 bool read_lock1;
537 bool read_lock2;
538 bool signal_unlocked;
539
CondVarWaitDeadlock__anon2189ad1c0111::CondVarWaitDeadlock540 CondVarWaitDeadlock() {
541 read_lock1 = GetParam() & (1 << 0);
542 read_lock2 = GetParam() & (1 << 1);
543 signal_unlocked = GetParam() & (1 << 2);
544 }
545
Waiter1__anon2189ad1c0111::CondVarWaitDeadlock546 void Waiter1() {
547 if (read_lock1) {
548 mu.ReaderLock();
549 while (!cond1) {
550 cv.Wait(&mu);
551 }
552 mu.ReaderUnlock();
553 } else {
554 mu.Lock();
555 while (!cond1) {
556 cv.Wait(&mu);
557 }
558 mu.Unlock();
559 }
560 }
561
Waiter2__anon2189ad1c0111::CondVarWaitDeadlock562 void Waiter2() {
563 if (read_lock2) {
564 mu.ReaderLockWhen(absl::Condition(&cond2));
565 mu.ReaderUnlock();
566 } else {
567 mu.LockWhen(absl::Condition(&cond2));
568 mu.Unlock();
569 }
570 }
571 };
572
573 // Test for a deadlock bug in Mutex::Fer().
574 // The sequence of events that lead to the deadlock is:
575 // 1. waiter1 blocks on cv in read mode (mu bits = 0).
576 // 2. waiter2 blocks on mu in either mode (mu bits = kMuWait).
577 // 3. main thread locks mu, sets cond1, unlocks mu (mu bits = kMuWait).
578 // 4. main thread signals on cv and this eventually calls Mutex::Fer().
579 // Currently Fer wakes waiter1 since mu bits = kMuWait (mutex is unlocked).
580 // Before the bug fix Fer neither woke waiter1 nor queued it on mutex,
581 // which resulted in deadlock.
TEST_P(CondVarWaitDeadlock,Test)582 TEST_P(CondVarWaitDeadlock, Test) {
583 auto waiter1 = CreatePool(1);
584 auto waiter2 = CreatePool(1);
585 waiter1->Schedule([this] { this->Waiter1(); });
586 waiter2->Schedule([this] { this->Waiter2(); });
587
588 // Wait while threads block (best-effort is fine).
589 absl::SleepFor(absl::Milliseconds(100));
590
591 // Wake condwaiter.
592 mu.Lock();
593 cond1 = true;
594 if (signal_unlocked) {
595 mu.Unlock();
596 cv.Signal();
597 } else {
598 cv.Signal();
599 mu.Unlock();
600 }
601 waiter1.reset(); // "join" waiter1
602
603 // Wake waiter.
604 mu.Lock();
605 cond2 = true;
606 mu.Unlock();
607 waiter2.reset(); // "join" waiter2
608 }
609
610 INSTANTIATE_TEST_SUITE_P(CondVarWaitDeadlockTest, CondVarWaitDeadlock,
611 ::testing::Range(0, 8),
612 ::testing::PrintToStringParamName());
613
614 // --------------------------------------------------------
615 // Test for fix of bug in DequeueAllWakeable()
616 // Bug was that if there was more than one waiting reader
617 // and all should be woken, the most recently blocked one
618 // would not be.
619
620 struct DequeueAllWakeableBugStruct {
621 absl::Mutex mu;
622 absl::Mutex mu2; // protects all fields below
623 int unfinished_count; // count of unfinished readers; under mu2
624 bool done1; // unfinished_count == 0; under mu2
625 int finished_count; // count of finished readers, under mu2
626 bool done2; // finished_count == 0; under mu2
627 };
628
629 // Test for regression of a bug in loop of DequeueAllWakeable()
AcquireAsReader(DequeueAllWakeableBugStruct * x)630 static void AcquireAsReader(DequeueAllWakeableBugStruct *x) {
631 x->mu.ReaderLock();
632 x->mu2.Lock();
633 x->unfinished_count--;
634 x->done1 = (x->unfinished_count == 0);
635 x->mu2.Unlock();
636 // make sure that both readers acquired mu before we release it.
637 absl::SleepFor(absl::Seconds(2));
638 x->mu.ReaderUnlock();
639
640 x->mu2.Lock();
641 x->finished_count--;
642 x->done2 = (x->finished_count == 0);
643 x->mu2.Unlock();
644 }
645
646 // Test for regression of a bug in loop of DequeueAllWakeable()
TEST(Mutex,MutexReaderWakeupBug)647 TEST(Mutex, MutexReaderWakeupBug) {
648 auto tp = CreateDefaultPool();
649
650 DequeueAllWakeableBugStruct x;
651 x.unfinished_count = 2;
652 x.done1 = false;
653 x.finished_count = 2;
654 x.done2 = false;
655 x.mu.Lock(); // acquire mu exclusively
656 // queue two thread that will block on reader locks on x.mu
657 tp->Schedule(std::bind(&AcquireAsReader, &x));
658 tp->Schedule(std::bind(&AcquireAsReader, &x));
659 absl::SleepFor(absl::Seconds(1)); // give time for reader threads to block
660 x.mu.Unlock(); // wake them up
661
662 // both readers should finish promptly
663 EXPECT_TRUE(
664 x.mu2.LockWhenWithTimeout(absl::Condition(&x.done1), absl::Seconds(10)));
665 x.mu2.Unlock();
666
667 EXPECT_TRUE(
668 x.mu2.LockWhenWithTimeout(absl::Condition(&x.done2), absl::Seconds(10)));
669 x.mu2.Unlock();
670 }
671
672 struct LockWhenTestStruct {
673 absl::Mutex mu1;
674 bool cond = false;
675
676 absl::Mutex mu2;
677 bool waiting = false;
678 };
679
LockWhenTestIsCond(LockWhenTestStruct * s)680 static bool LockWhenTestIsCond(LockWhenTestStruct *s) {
681 s->mu2.Lock();
682 s->waiting = true;
683 s->mu2.Unlock();
684 return s->cond;
685 }
686
LockWhenTestWaitForIsCond(LockWhenTestStruct * s)687 static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) {
688 s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s));
689 s->mu1.Unlock();
690 }
691
TEST(Mutex,LockWhen)692 TEST(Mutex, LockWhen) {
693 LockWhenTestStruct s;
694
695 std::thread t(LockWhenTestWaitForIsCond, &s);
696 s.mu2.LockWhen(absl::Condition(&s.waiting));
697 s.mu2.Unlock();
698
699 s.mu1.Lock();
700 s.cond = true;
701 s.mu1.Unlock();
702
703 t.join();
704 }
705
TEST(Mutex,LockWhenGuard)706 TEST(Mutex, LockWhenGuard) {
707 absl::Mutex mu;
708 int n = 30;
709 bool done = false;
710
711 // We don't inline the lambda because the conversion is ambiguous in MSVC.
712 bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; };
713 bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; };
714
715 std::thread t1([&mu, &n, &done, cond_eq_10]() {
716 absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n));
717 done = true;
718 });
719
720 std::thread t2[10];
721 for (std::thread &t : t2) {
722 t = std::thread([&mu, &n, cond_lt_10]() {
723 absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n));
724 ++n;
725 });
726 }
727
728 {
729 absl::MutexLock lock(&mu);
730 n = 0;
731 }
732
733 for (std::thread &t : t2) t.join();
734 t1.join();
735
736 EXPECT_TRUE(done);
737 EXPECT_EQ(n, 10);
738 }
739
740 // --------------------------------------------------------
741 // The following test requires Mutex::ReaderLock to be a real shared
742 // lock, which is not the case in all builds.
743 #if !defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
744
745 // Test for fix of bug in UnlockSlow() that incorrectly decremented the reader
746 // count when putting a thread to sleep waiting for a false condition when the
747 // lock was not held.
748
749 // For this bug to strike, we make a thread wait on a free mutex with no
750 // waiters by causing its wakeup condition to be false. Then the
751 // next two acquirers must be readers. The bug causes the lock
752 // to be released when one reader unlocks, rather than both.
753
754 struct ReaderDecrementBugStruct {
755 bool cond; // to delay first thread (under mu)
756 int done; // reference count (under mu)
757 absl::Mutex mu;
758
759 bool waiting_on_cond; // under mu2
760 bool have_reader_lock; // under mu2
761 bool complete; // under mu2
762 absl::Mutex mu2; // > mu
763 };
764
765 // L >= mu, L < mu_waiting_on_cond
IsCond(void * v)766 static bool IsCond(void *v) {
767 ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
768 x->mu2.Lock();
769 x->waiting_on_cond = true;
770 x->mu2.Unlock();
771 return x->cond;
772 }
773
774 // L >= mu
AllDone(void * v)775 static bool AllDone(void *v) {
776 ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
777 return x->done == 0;
778 }
779
780 // L={}
WaitForCond(ReaderDecrementBugStruct * x)781 static void WaitForCond(ReaderDecrementBugStruct *x) {
782 absl::Mutex dummy;
783 absl::MutexLock l(&dummy);
784 x->mu.LockWhen(absl::Condition(&IsCond, x));
785 x->done--;
786 x->mu.Unlock();
787 }
788
789 // L={}
GetReadLock(ReaderDecrementBugStruct * x)790 static void GetReadLock(ReaderDecrementBugStruct *x) {
791 x->mu.ReaderLock();
792 x->mu2.Lock();
793 x->have_reader_lock = true;
794 x->mu2.Await(absl::Condition(&x->complete));
795 x->mu2.Unlock();
796 x->mu.ReaderUnlock();
797 x->mu.Lock();
798 x->done--;
799 x->mu.Unlock();
800 }
801
802 // Test for reader counter being decremented incorrectly by waiter
803 // with false condition.
TEST(Mutex,MutexReaderDecrementBug)804 TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
805 ReaderDecrementBugStruct x;
806 x.cond = false;
807 x.waiting_on_cond = false;
808 x.have_reader_lock = false;
809 x.complete = false;
810 x.done = 2; // initial ref count
811
812 // Run WaitForCond() and wait for it to sleep
813 std::thread thread1(WaitForCond, &x);
814 x.mu2.LockWhen(absl::Condition(&x.waiting_on_cond));
815 x.mu2.Unlock();
816
817 // Run GetReadLock(), and wait for it to get the read lock
818 std::thread thread2(GetReadLock, &x);
819 x.mu2.LockWhen(absl::Condition(&x.have_reader_lock));
820 x.mu2.Unlock();
821
822 // Get the reader lock ourselves, and release it.
823 x.mu.ReaderLock();
824 x.mu.ReaderUnlock();
825
826 // The lock should be held in read mode by GetReadLock().
827 // If we have the bug, the lock will be free.
828 x.mu.AssertReaderHeld();
829
830 // Wake up all the threads.
831 x.mu2.Lock();
832 x.complete = true;
833 x.mu2.Unlock();
834
835 // TODO(delesley): turn on analysis once lock upgrading is supported.
836 // (This call upgrades the lock from shared to exclusive.)
837 x.mu.Lock();
838 x.cond = true;
839 x.mu.Await(absl::Condition(&AllDone, &x));
840 x.mu.Unlock();
841
842 thread1.join();
843 thread2.join();
844 }
845 #endif // !ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
846
847 // Test that we correctly handle the situation when a lock is
848 // held and then destroyed (w/o unlocking).
849 #ifdef ABSL_HAVE_THREAD_SANITIZER
850 // TSAN reports errors when locked Mutexes are destroyed.
TEST(Mutex,DISABLED_LockedMutexDestructionBug)851 TEST(Mutex, DISABLED_LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
852 #else
853 TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
854 #endif
855 for (int i = 0; i != 10; i++) {
856 // Create, lock and destroy 10 locks.
857 const int kNumLocks = 10;
858 auto mu = absl::make_unique<absl::Mutex[]>(kNumLocks);
859 for (int j = 0; j != kNumLocks; j++) {
860 if ((j % 2) == 0) {
861 mu[j].WriterLock();
862 } else {
863 mu[j].ReaderLock();
864 }
865 }
866 }
867 }
868
869 // Some functions taking pointers to non-const.
870 bool Equals42(int *p) { return *p == 42; }
871 bool Equals43(int *p) { return *p == 43; }
872
873 // Some functions taking pointers to const.
874 bool ConstEquals42(const int *p) { return *p == 42; }
875 bool ConstEquals43(const int *p) { return *p == 43; }
876
877 // Some function templates taking pointers. Note it's possible for `T` to be
878 // deduced as non-const or const, which creates the potential for ambiguity,
879 // but which the implementation is careful to avoid.
880 template <typename T>
881 bool TemplateEquals42(T *p) {
882 return *p == 42;
883 }
884 template <typename T>
885 bool TemplateEquals43(T *p) {
886 return *p == 43;
887 }
888
889 TEST(Mutex, FunctionPointerCondition) {
890 // Some arguments.
891 int x = 42;
892 const int const_x = 42;
893
894 // Parameter non-const, argument non-const.
895 EXPECT_TRUE(absl::Condition(Equals42, &x).Eval());
896 EXPECT_FALSE(absl::Condition(Equals43, &x).Eval());
897
898 // Parameter const, argument non-const.
899 EXPECT_TRUE(absl::Condition(ConstEquals42, &x).Eval());
900 EXPECT_FALSE(absl::Condition(ConstEquals43, &x).Eval());
901
902 // Parameter const, argument const.
903 EXPECT_TRUE(absl::Condition(ConstEquals42, &const_x).Eval());
904 EXPECT_FALSE(absl::Condition(ConstEquals43, &const_x).Eval());
905
906 // Parameter type deduced, argument non-const.
907 EXPECT_TRUE(absl::Condition(TemplateEquals42, &x).Eval());
908 EXPECT_FALSE(absl::Condition(TemplateEquals43, &x).Eval());
909
910 // Parameter type deduced, argument const.
911 EXPECT_TRUE(absl::Condition(TemplateEquals42, &const_x).Eval());
912 EXPECT_FALSE(absl::Condition(TemplateEquals43, &const_x).Eval());
913
914 // Parameter non-const, argument const is not well-formed.
915 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(Equals42),
916 decltype(&const_x)>::value));
917 // Validate use of is_constructible by contrasting to a well-formed case.
918 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(ConstEquals42),
919 decltype(&const_x)>::value));
920 }
921
922 // Example base and derived class for use in predicates and test below. Not a
923 // particularly realistic example, but it suffices for testing purposes.
924 struct Base {
925 explicit Base(int v) : value(v) {}
926 int value;
927 };
928 struct Derived : Base {
929 explicit Derived(int v) : Base(v) {}
930 };
931
932 // Some functions taking pointer to non-const `Base`.
933 bool BaseEquals42(Base *p) { return p->value == 42; }
934 bool BaseEquals43(Base *p) { return p->value == 43; }
935
936 // Some functions taking pointer to const `Base`.
937 bool ConstBaseEquals42(const Base *p) { return p->value == 42; }
938 bool ConstBaseEquals43(const Base *p) { return p->value == 43; }
939
940 TEST(Mutex, FunctionPointerConditionWithDerivedToBaseConversion) {
941 // Some arguments.
942 Derived derived(42);
943 const Derived const_derived(42);
944
945 // Parameter non-const base, argument derived non-const.
946 EXPECT_TRUE(absl::Condition(BaseEquals42, &derived).Eval());
947 EXPECT_FALSE(absl::Condition(BaseEquals43, &derived).Eval());
948
949 // Parameter const base, argument derived non-const.
950 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &derived).Eval());
951 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &derived).Eval());
952
953 // Parameter const base, argument derived const.
954 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
955 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
956
957 // Parameter const base, argument derived const.
958 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
959 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
960
961 // Parameter derived, argument base is not well-formed.
962 bool (*derived_pred)(const Derived *) = [](const Derived *) { return true; };
963 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
964 Base *>::value));
965 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
966 const Base *>::value));
967 // Validate use of is_constructible by contrasting to well-formed cases.
968 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
969 Derived *>::value));
970 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
971 const Derived *>::value));
972 }
973
974 struct True {
975 template <class... Args>
976 bool operator()(Args...) const {
977 return true;
978 }
979 };
980
981 struct DerivedTrue : True {};
982
983 TEST(Mutex, FunctorCondition) {
984 { // Variadic
985 True f;
986 EXPECT_TRUE(absl::Condition(&f).Eval());
987 }
988
989 { // Inherited
990 DerivedTrue g;
991 EXPECT_TRUE(absl::Condition(&g).Eval());
992 }
993
994 { // lambda
995 int value = 3;
996 auto is_zero = [&value] { return value == 0; };
997 absl::Condition c(&is_zero);
998 EXPECT_FALSE(c.Eval());
999 value = 0;
1000 EXPECT_TRUE(c.Eval());
1001 }
1002
1003 { // bind
1004 int value = 0;
1005 auto is_positive = std::bind(std::less<int>(), 0, std::cref(value));
1006 absl::Condition c(&is_positive);
1007 EXPECT_FALSE(c.Eval());
1008 value = 1;
1009 EXPECT_TRUE(c.Eval());
1010 }
1011
1012 { // std::function
1013 int value = 3;
1014 std::function<bool()> is_zero = [&value] { return value == 0; };
1015 absl::Condition c(&is_zero);
1016 EXPECT_FALSE(c.Eval());
1017 value = 0;
1018 EXPECT_TRUE(c.Eval());
1019 }
1020 }
1021
1022 // --------------------------------------------------------
1023 // Test for bug with pattern of readers using a condvar. The bug was that if a
1024 // reader went to sleep on a condition variable while one or more other readers
1025 // held the lock, but there were no waiters, the reader count (held in the
1026 // mutex word) would be lost. (This is because Enqueue() had at one time
1027 // always placed the thread on the Mutex queue. Later (CL 4075610), to
1028 // tolerate re-entry into Mutex from a Condition predicate, Enqueue() was
1029 // changed so that it could also place a thread on a condition-variable. This
1030 // introduced the case where Enqueue() returned with an empty queue, and this
1031 // case was handled incorrectly in one place.)
1032
1033 static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
1034 int *running) {
1035 std::random_device dev;
1036 std::mt19937 gen(dev());
1037 std::uniform_int_distribution<int> random_millis(0, 15);
1038 mu->ReaderLock();
1039 while (*running == 3) {
1040 absl::SleepFor(absl::Milliseconds(random_millis(gen)));
1041 cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
1042 }
1043 mu->ReaderUnlock();
1044 mu->Lock();
1045 (*running)--;
1046 mu->Unlock();
1047 }
1048
1049 static bool IntIsZero(int *x) { return *x == 0; }
1050
1051 // Test for reader waiting condition variable when there are other readers
1052 // but no waiters.
1053 TEST(Mutex, TestReaderOnCondVar) {
1054 auto tp = CreateDefaultPool();
1055 absl::Mutex mu;
1056 absl::CondVar cv;
1057 int running = 3;
1058 tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
1059 tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
1060 absl::SleepFor(absl::Seconds(2));
1061 mu.Lock();
1062 running--;
1063 mu.Await(absl::Condition(&IntIsZero, &running));
1064 mu.Unlock();
1065 }
1066
1067 // --------------------------------------------------------
1068 struct AcquireFromConditionStruct {
1069 absl::Mutex mu0; // protects value, done
1070 int value; // times condition function is called; under mu0,
1071 bool done; // done with test? under mu0
1072 absl::Mutex mu1; // used to attempt to mess up state of mu0
1073 absl::CondVar cv; // so the condition function can be invoked from
1074 // CondVar::Wait().
1075 };
1076
1077 static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
1078 x->value++; // count times this function is called
1079
1080 if (x->value == 2 || x->value == 3) {
1081 // On the second and third invocation of this function, sleep for 100ms,
1082 // but with the side-effect of altering the state of a Mutex other than
1083 // than one for which this is a condition. The spec now explicitly allows
1084 // this side effect; previously it did not. it was illegal.
1085 bool always_false = false;
1086 x->mu1.LockWhenWithTimeout(absl::Condition(&always_false),
1087 absl::Milliseconds(100));
1088 x->mu1.Unlock();
1089 }
1090 CHECK_LT(x->value, 4) << "should not be invoked a fourth time";
1091
1092 // We arrange for the condition to return true on only the 2nd and 3rd calls.
1093 return x->value == 2 || x->value == 3;
1094 }
1095
1096 static void WaitForCond2(AcquireFromConditionStruct *x) {
1097 // wait for cond0 to become true
1098 x->mu0.LockWhen(absl::Condition(&ConditionWithAcquire, x));
1099 x->done = true;
1100 x->mu0.Unlock();
1101 }
1102
1103 // Test for Condition whose function acquires other Mutexes
1104 TEST(Mutex, AcquireFromCondition) {
1105 auto tp = CreateDefaultPool();
1106
1107 AcquireFromConditionStruct x;
1108 x.value = 0;
1109 x.done = false;
1110 tp->Schedule(
1111 std::bind(&WaitForCond2, &x)); // run WaitForCond2() in a thread T
1112 // T will hang because the first invocation of ConditionWithAcquire() will
1113 // return false.
1114 absl::SleepFor(absl::Milliseconds(500)); // allow T time to hang
1115
1116 x.mu0.Lock();
1117 x.cv.WaitWithTimeout(&x.mu0, absl::Milliseconds(500)); // wake T
1118 // T will be woken because the Wait() will call ConditionWithAcquire()
1119 // for the second time, and it will return true.
1120
1121 x.mu0.Unlock();
1122
1123 // T will then acquire the lock and recheck its own condition.
1124 // It will find the condition true, as this is the third invocation,
1125 // but the use of another Mutex by the calling function will
1126 // cause the old mutex implementation to think that the outer
1127 // LockWhen() has timed out because the inner LockWhenWithTimeout() did.
1128 // T will then check the condition a fourth time because it finds a
1129 // timeout occurred. This should not happen in the new
1130 // implementation that allows the Condition function to use Mutexes.
1131
1132 // It should also succeed, even though the Condition function
1133 // is being invoked from CondVar::Wait, and thus this thread
1134 // is conceptually waiting both on the condition variable, and on mu2.
1135
1136 x.mu0.LockWhen(absl::Condition(&x.done));
1137 x.mu0.Unlock();
1138 }
1139
1140 TEST(Mutex, DeadlockDetector) {
1141 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1142
1143 // check that we can call ForgetDeadlockInfo() on a lock with the lock held
1144 absl::Mutex m1;
1145 absl::Mutex m2;
1146 absl::Mutex m3;
1147 absl::Mutex m4;
1148
1149 m1.Lock(); // m1 gets ID1
1150 m2.Lock(); // m2 gets ID2
1151 m3.Lock(); // m3 gets ID3
1152 m3.Unlock();
1153 m2.Unlock();
1154 // m1 still held
1155 m1.ForgetDeadlockInfo(); // m1 loses ID
1156 m2.Lock(); // m2 gets ID2
1157 m3.Lock(); // m3 gets ID3
1158 m4.Lock(); // m4 gets ID4
1159 m3.Unlock();
1160 m2.Unlock();
1161 m4.Unlock();
1162 m1.Unlock();
1163 }
1164
1165 // Bazel has a test "warning" file that programs can write to if the
1166 // test should pass with a warning. This class disables the warning
1167 // file until it goes out of scope.
1168 class ScopedDisableBazelTestWarnings {
1169 public:
1170 ScopedDisableBazelTestWarnings() {
1171 #ifdef _WIN32
1172 char file[MAX_PATH];
1173 if (GetEnvironmentVariableA(kVarName, file, sizeof(file)) < sizeof(file)) {
1174 warnings_output_file_ = file;
1175 SetEnvironmentVariableA(kVarName, nullptr);
1176 }
1177 #else
1178 const char *file = getenv(kVarName);
1179 if (file != nullptr) {
1180 warnings_output_file_ = file;
1181 unsetenv(kVarName);
1182 }
1183 #endif
1184 }
1185
1186 ~ScopedDisableBazelTestWarnings() {
1187 if (!warnings_output_file_.empty()) {
1188 #ifdef _WIN32
1189 SetEnvironmentVariableA(kVarName, warnings_output_file_.c_str());
1190 #else
1191 setenv(kVarName, warnings_output_file_.c_str(), 0);
1192 #endif
1193 }
1194 }
1195
1196 private:
1197 static const char kVarName[];
1198 std::string warnings_output_file_;
1199 };
1200 const char ScopedDisableBazelTestWarnings::kVarName[] =
1201 "TEST_WARNINGS_OUTPUT_FILE";
1202
1203 #ifdef ABSL_HAVE_THREAD_SANITIZER
1204 // This test intentionally creates deadlocks to test the deadlock detector.
1205 TEST(Mutex, DISABLED_DeadlockDetectorBazelWarning) {
1206 #else
1207 TEST(Mutex, DeadlockDetectorBazelWarning) {
1208 #endif
1209 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
1210
1211 // Cause deadlock detection to detect something, if it's
1212 // compiled in and enabled. But turn off the bazel warning.
1213 ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
1214
1215 absl::Mutex mu0;
1216 absl::Mutex mu1;
1217 bool got_mu0 = mu0.TryLock();
1218 mu1.Lock(); // acquire mu1 while holding mu0
1219 if (got_mu0) {
1220 mu0.Unlock();
1221 }
1222 if (mu0.TryLock()) { // try lock shouldn't cause deadlock detector to fire
1223 mu0.Unlock();
1224 }
1225 mu0.Lock(); // acquire mu0 while holding mu1; should get one deadlock
1226 // report here
1227 mu0.Unlock();
1228 mu1.Unlock();
1229
1230 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1231 }
1232
1233 TEST(Mutex, DeadlockDetectorLongCycle) {
1234 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
1235
1236 // This test generates a warning if it passes, and crashes otherwise.
1237 // Cause bazel to ignore the warning.
1238 ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
1239
1240 // Check that we survive a deadlock with a lock cycle.
1241 std::vector<absl::Mutex> mutex(100);
1242 for (size_t i = 0; i != mutex.size(); i++) {
1243 mutex[i].Lock();
1244 mutex[(i + 1) % mutex.size()].Lock();
1245 mutex[i].Unlock();
1246 mutex[(i + 1) % mutex.size()].Unlock();
1247 }
1248
1249 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1250 }
1251
1252 // This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
1253 // annotation-based static thread-safety analysis is not currently
1254 // predicate-aware and cannot tell if the two for-loops that acquire and
1255 // release the locks have the same predicates.
1256 TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1257 // Stress test: Here we create a large number of locks and use all of them.
1258 // If a deadlock detector keeps a full graph of lock acquisition order,
1259 // it will likely be too slow for this test to pass.
1260 const int n_locks = 1 << 17;
1261 auto array_of_locks = absl::make_unique<absl::Mutex[]>(n_locks);
1262 for (int i = 0; i < n_locks; i++) {
1263 int end = std::min(n_locks, i + 5);
1264 // acquire and then release locks i, i+1, ..., i+4
1265 for (int j = i; j < end; j++) {
1266 array_of_locks[j].Lock();
1267 }
1268 for (int j = i; j < end; j++) {
1269 array_of_locks[j].Unlock();
1270 }
1271 }
1272 }
1273
1274 #ifdef ABSL_HAVE_THREAD_SANITIZER
1275 // TSAN reports errors when locked Mutexes are destroyed.
1276 TEST(Mutex, DISABLED_DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1277 #else
1278 TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1279 #endif
1280 // Test a scenario where a cached deadlock graph node id in the
1281 // list of held locks is not invalidated when the corresponding
1282 // mutex is deleted.
1283 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1284 // Mutex that will be destroyed while being held
1285 absl::Mutex *a = new absl::Mutex;
1286 // Other mutexes needed by test
1287 absl::Mutex b, c;
1288
1289 // Hold mutex.
1290 a->Lock();
1291
1292 // Force deadlock id assignment by acquiring another lock.
1293 b.Lock();
1294 b.Unlock();
1295
1296 // Delete the mutex. The Mutex destructor tries to remove held locks,
1297 // but the attempt isn't foolproof. It can fail if:
1298 // (a) Deadlock detection is currently disabled.
1299 // (b) The destruction is from another thread.
1300 // We exploit (a) by temporarily disabling deadlock detection.
1301 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
1302 delete a;
1303 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1304
1305 // Now acquire another lock which will force a deadlock id assignment.
1306 // We should end up getting assigned the same deadlock id that was
1307 // freed up when "a" was deleted, which will cause a spurious deadlock
1308 // report if the held lock entry for "a" was not invalidated.
1309 c.Lock();
1310 c.Unlock();
1311 }
1312
1313 // --------------------------------------------------------
1314 // Test for timeouts/deadlines on condition waits that are specified using
1315 // absl::Duration and absl::Time. For each waiting function we test with
1316 // a timeout/deadline that has already expired/passed, one that is infinite
1317 // and so never expires/passes, and one that will expire/pass in the near
1318 // future.
1319
1320 static absl::Duration TimeoutTestAllowedSchedulingDelay() {
1321 // Note: we use a function here because Microsoft Visual Studio fails to
1322 // properly initialize constexpr static absl::Duration variables.
1323 return absl::Milliseconds(150);
1324 }
1325
1326 // Returns true if `actual_delay` is close enough to `expected_delay` to pass
1327 // the timeouts/deadlines test. Otherwise, logs warnings and returns false.
1328 ABSL_MUST_USE_RESULT
1329 static bool DelayIsWithinBounds(absl::Duration expected_delay,
1330 absl::Duration actual_delay) {
1331 bool pass = true;
1332 // Do not allow the observed delay to be less than expected. This may occur
1333 // in practice due to clock skew or when the synchronization primitives use a
1334 // different clock than absl::Now(), but these cases should be handled by the
1335 // the retry mechanism in each TimeoutTest.
1336 if (actual_delay < expected_delay) {
1337 LOG(WARNING) << "Actual delay " << actual_delay
1338 << " was too short, expected " << expected_delay
1339 << " (difference " << actual_delay - expected_delay << ")";
1340 pass = false;
1341 }
1342 // If the expected delay is <= zero then allow a small error tolerance, since
1343 // we do not expect context switches to occur during test execution.
1344 // Otherwise, thread scheduling delays may be substantial in rare cases, so
1345 // tolerate up to kTimeoutTestAllowedSchedulingDelay of error.
1346 absl::Duration tolerance = expected_delay <= absl::ZeroDuration()
1347 ? absl::Milliseconds(10)
1348 : TimeoutTestAllowedSchedulingDelay();
1349 if (actual_delay > expected_delay + tolerance) {
1350 LOG(WARNING) << "Actual delay " << actual_delay
1351 << " was too long, expected " << expected_delay
1352 << " (difference " << actual_delay - expected_delay << ")";
1353 pass = false;
1354 }
1355 return pass;
1356 }
1357
1358 // Parameters for TimeoutTest, below.
1359 struct TimeoutTestParam {
1360 // The file and line number (used for logging purposes only).
1361 const char *from_file;
1362 int from_line;
1363
1364 // Should the absolute deadline API based on absl::Time be tested? If false,
1365 // the relative deadline API based on absl::Duration is tested.
1366 bool use_absolute_deadline;
1367
1368 // The deadline/timeout used when calling the API being tested
1369 // (e.g. Mutex::LockWhenWithDeadline).
1370 absl::Duration wait_timeout;
1371
1372 // The delay before the condition will be set true by the test code. If zero
1373 // or negative, the condition is set true immediately (before calling the API
1374 // being tested). Otherwise, if infinite, the condition is never set true.
1375 // Otherwise a closure is scheduled for the future that sets the condition
1376 // true.
1377 absl::Duration satisfy_condition_delay;
1378
1379 // The expected result of the condition after the call to the API being
1380 // tested. Generally `true` means the condition was true when the API returns,
1381 // `false` indicates an expected timeout.
1382 bool expected_result;
1383
1384 // The expected delay before the API under test returns. This is inherently
1385 // flaky, so some slop is allowed (see `DelayIsWithinBounds` above), and the
1386 // test keeps trying indefinitely until this constraint passes.
1387 absl::Duration expected_delay;
1388 };
1389
1390 // Print a `TimeoutTestParam` to a debug log.
1391 std::ostream &operator<<(std::ostream &os, const TimeoutTestParam ¶m) {
1392 return os << "from: " << param.from_file << ":" << param.from_line
1393 << " use_absolute_deadline: "
1394 << (param.use_absolute_deadline ? "true" : "false")
1395 << " wait_timeout: " << param.wait_timeout
1396 << " satisfy_condition_delay: " << param.satisfy_condition_delay
1397 << " expected_result: "
1398 << (param.expected_result ? "true" : "false")
1399 << " expected_delay: " << param.expected_delay;
1400 }
1401
1402 // Like `thread::Executor::ScheduleAt` except:
1403 // a) Delays zero or negative are executed immediately in the current thread.
1404 // b) Infinite delays are never scheduled.
1405 // c) Calls this test's `ScheduleAt` helper instead of using `pool` directly.
1406 static void RunAfterDelay(absl::Duration delay,
1407 absl::synchronization_internal::ThreadPool *pool,
1408 const std::function<void()> &callback) {
1409 if (delay <= absl::ZeroDuration()) {
1410 callback(); // immediate
1411 } else if (delay != absl::InfiniteDuration()) {
1412 ScheduleAfter(pool, delay, callback);
1413 }
1414 }
1415
1416 class TimeoutTest : public ::testing::Test,
1417 public ::testing::WithParamInterface<TimeoutTestParam> {};
1418
1419 std::vector<TimeoutTestParam> MakeTimeoutTestParamValues() {
1420 // The `finite` delay is a finite, relatively short, delay. We make it larger
1421 // than our allowed scheduling delay (slop factor) to avoid confusion when
1422 // diagnosing test failures. The other constants here have clear meanings.
1423 const absl::Duration finite = 3 * TimeoutTestAllowedSchedulingDelay();
1424 const absl::Duration never = absl::InfiniteDuration();
1425 const absl::Duration negative = -absl::InfiniteDuration();
1426 const absl::Duration immediate = absl::ZeroDuration();
1427
1428 // Every test case is run twice; once using the absolute deadline API and once
1429 // using the relative timeout API.
1430 std::vector<TimeoutTestParam> values;
1431 for (bool use_absolute_deadline : {false, true}) {
1432 // Tests with a negative timeout (deadline in the past), which should
1433 // immediately return current state of the condition.
1434
1435 // The condition is already true:
1436 values.push_back(TimeoutTestParam{
1437 __FILE__, __LINE__, use_absolute_deadline,
1438 negative, // wait_timeout
1439 immediate, // satisfy_condition_delay
1440 true, // expected_result
1441 immediate, // expected_delay
1442 });
1443
1444 // The condition becomes true, but the timeout has already expired:
1445 values.push_back(TimeoutTestParam{
1446 __FILE__, __LINE__, use_absolute_deadline,
1447 negative, // wait_timeout
1448 finite, // satisfy_condition_delay
1449 false, // expected_result
1450 immediate // expected_delay
1451 });
1452
1453 // The condition never becomes true:
1454 values.push_back(TimeoutTestParam{
1455 __FILE__, __LINE__, use_absolute_deadline,
1456 negative, // wait_timeout
1457 never, // satisfy_condition_delay
1458 false, // expected_result
1459 immediate // expected_delay
1460 });
1461
1462 // Tests with an infinite timeout (deadline in the infinite future), which
1463 // should only return when the condition becomes true.
1464
1465 // The condition is already true:
1466 values.push_back(TimeoutTestParam{
1467 __FILE__, __LINE__, use_absolute_deadline,
1468 never, // wait_timeout
1469 immediate, // satisfy_condition_delay
1470 true, // expected_result
1471 immediate // expected_delay
1472 });
1473
1474 // The condition becomes true before the (infinite) expiry:
1475 values.push_back(TimeoutTestParam{
1476 __FILE__, __LINE__, use_absolute_deadline,
1477 never, // wait_timeout
1478 finite, // satisfy_condition_delay
1479 true, // expected_result
1480 finite, // expected_delay
1481 });
1482
1483 // Tests with a (small) finite timeout (deadline soon), with the condition
1484 // becoming true both before and after its expiry.
1485
1486 // The condition is already true:
1487 values.push_back(TimeoutTestParam{
1488 __FILE__, __LINE__, use_absolute_deadline,
1489 never, // wait_timeout
1490 immediate, // satisfy_condition_delay
1491 true, // expected_result
1492 immediate // expected_delay
1493 });
1494
1495 // The condition becomes true before the expiry:
1496 values.push_back(TimeoutTestParam{
1497 __FILE__, __LINE__, use_absolute_deadline,
1498 finite * 2, // wait_timeout
1499 finite, // satisfy_condition_delay
1500 true, // expected_result
1501 finite // expected_delay
1502 });
1503
1504 // The condition becomes true, but the timeout has already expired:
1505 values.push_back(TimeoutTestParam{
1506 __FILE__, __LINE__, use_absolute_deadline,
1507 finite, // wait_timeout
1508 finite * 2, // satisfy_condition_delay
1509 false, // expected_result
1510 finite // expected_delay
1511 });
1512
1513 // The condition never becomes true:
1514 values.push_back(TimeoutTestParam{
1515 __FILE__, __LINE__, use_absolute_deadline,
1516 finite, // wait_timeout
1517 never, // satisfy_condition_delay
1518 false, // expected_result
1519 finite // expected_delay
1520 });
1521 }
1522 return values;
1523 }
1524
1525 // Instantiate `TimeoutTest` with `MakeTimeoutTestParamValues()`.
1526 INSTANTIATE_TEST_SUITE_P(All, TimeoutTest,
1527 testing::ValuesIn(MakeTimeoutTestParamValues()));
1528
1529 TEST_P(TimeoutTest, Await) {
1530 const TimeoutTestParam params = GetParam();
1531 LOG(INFO) << "Params: " << params;
1532
1533 // Because this test asserts bounds on scheduling delays it is flaky. To
1534 // compensate it loops forever until it passes. Failures express as test
1535 // timeouts, in which case the test log can be used to diagnose the issue.
1536 for (int attempt = 1;; ++attempt) {
1537 LOG(INFO) << "Attempt " << attempt;
1538
1539 absl::Mutex mu;
1540 bool value = false; // condition value (under mu)
1541
1542 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1543 CreateDefaultPool();
1544 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1545 absl::MutexLock l(&mu);
1546 value = true;
1547 });
1548
1549 absl::MutexLock lock(&mu);
1550 absl::Time start_time = absl::Now();
1551 absl::Condition cond(&value);
1552 bool result =
1553 params.use_absolute_deadline
1554 ? mu.AwaitWithDeadline(cond, start_time + params.wait_timeout)
1555 : mu.AwaitWithTimeout(cond, params.wait_timeout);
1556 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1557 EXPECT_EQ(params.expected_result, result);
1558 break;
1559 }
1560 }
1561 }
1562
1563 TEST_P(TimeoutTest, LockWhen) {
1564 const TimeoutTestParam params = GetParam();
1565 LOG(INFO) << "Params: " << params;
1566
1567 // Because this test asserts bounds on scheduling delays it is flaky. To
1568 // compensate it loops forever until it passes. Failures express as test
1569 // timeouts, in which case the test log can be used to diagnose the issue.
1570 for (int attempt = 1;; ++attempt) {
1571 LOG(INFO) << "Attempt " << attempt;
1572
1573 absl::Mutex mu;
1574 bool value = false; // condition value (under mu)
1575
1576 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1577 CreateDefaultPool();
1578 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1579 absl::MutexLock l(&mu);
1580 value = true;
1581 });
1582
1583 absl::Time start_time = absl::Now();
1584 absl::Condition cond(&value);
1585 bool result =
1586 params.use_absolute_deadline
1587 ? mu.LockWhenWithDeadline(cond, start_time + params.wait_timeout)
1588 : mu.LockWhenWithTimeout(cond, params.wait_timeout);
1589 mu.Unlock();
1590
1591 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1592 EXPECT_EQ(params.expected_result, result);
1593 break;
1594 }
1595 }
1596 }
1597
1598 TEST_P(TimeoutTest, ReaderLockWhen) {
1599 const TimeoutTestParam params = GetParam();
1600 LOG(INFO) << "Params: " << params;
1601
1602 // Because this test asserts bounds on scheduling delays it is flaky. To
1603 // compensate it loops forever until it passes. Failures express as test
1604 // timeouts, in which case the test log can be used to diagnose the issue.
1605 for (int attempt = 0;; ++attempt) {
1606 LOG(INFO) << "Attempt " << attempt;
1607
1608 absl::Mutex mu;
1609 bool value = false; // condition value (under mu)
1610
1611 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1612 CreateDefaultPool();
1613 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1614 absl::MutexLock l(&mu);
1615 value = true;
1616 });
1617
1618 absl::Time start_time = absl::Now();
1619 bool result =
1620 params.use_absolute_deadline
1621 ? mu.ReaderLockWhenWithDeadline(absl::Condition(&value),
1622 start_time + params.wait_timeout)
1623 : mu.ReaderLockWhenWithTimeout(absl::Condition(&value),
1624 params.wait_timeout);
1625 mu.ReaderUnlock();
1626
1627 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1628 EXPECT_EQ(params.expected_result, result);
1629 break;
1630 }
1631 }
1632 }
1633
1634 TEST_P(TimeoutTest, Wait) {
1635 const TimeoutTestParam params = GetParam();
1636 LOG(INFO) << "Params: " << params;
1637
1638 // Because this test asserts bounds on scheduling delays it is flaky. To
1639 // compensate it loops forever until it passes. Failures express as test
1640 // timeouts, in which case the test log can be used to diagnose the issue.
1641 for (int attempt = 0;; ++attempt) {
1642 LOG(INFO) << "Attempt " << attempt;
1643
1644 absl::Mutex mu;
1645 bool value = false; // condition value (under mu)
1646 absl::CondVar cv; // signals a change of `value`
1647
1648 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1649 CreateDefaultPool();
1650 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1651 absl::MutexLock l(&mu);
1652 value = true;
1653 cv.Signal();
1654 });
1655
1656 absl::MutexLock lock(&mu);
1657 absl::Time start_time = absl::Now();
1658 absl::Duration timeout = params.wait_timeout;
1659 absl::Time deadline = start_time + timeout;
1660 while (!value) {
1661 if (params.use_absolute_deadline ? cv.WaitWithDeadline(&mu, deadline)
1662 : cv.WaitWithTimeout(&mu, timeout)) {
1663 break; // deadline/timeout exceeded
1664 }
1665 timeout = deadline - absl::Now(); // recompute
1666 }
1667 bool result = value; // note: `mu` is still held
1668
1669 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1670 EXPECT_EQ(params.expected_result, result);
1671 break;
1672 }
1673 }
1674 }
1675
1676 TEST(Mutex, Logging) {
1677 // Allow user to look at logging output
1678 absl::Mutex logged_mutex;
1679 logged_mutex.EnableDebugLog("fido_mutex");
1680 absl::CondVar logged_cv;
1681 logged_cv.EnableDebugLog("rover_cv");
1682 logged_mutex.Lock();
1683 logged_cv.WaitWithTimeout(&logged_mutex, absl::Milliseconds(20));
1684 logged_mutex.Unlock();
1685 logged_mutex.ReaderLock();
1686 logged_mutex.ReaderUnlock();
1687 logged_mutex.Lock();
1688 logged_mutex.Unlock();
1689 logged_cv.Signal();
1690 logged_cv.SignalAll();
1691 }
1692
1693 // --------------------------------------------------------
1694
1695 // Generate the vector of thread counts for tests parameterized on thread count.
1696 static std::vector<int> AllThreadCountValues() {
1697 if (kExtendedTest) {
1698 return {2, 4, 8, 10, 16, 20, 24, 30, 32};
1699 }
1700 return {2, 4, 10};
1701 }
1702
1703 // A test fixture parameterized by thread count.
1704 class MutexVariableThreadCountTest : public ::testing::TestWithParam<int> {};
1705
1706 // Instantiate the above with AllThreadCountOptions().
1707 INSTANTIATE_TEST_SUITE_P(ThreadCounts, MutexVariableThreadCountTest,
1708 ::testing::ValuesIn(AllThreadCountValues()),
1709 ::testing::PrintToStringParamName());
1710
1711 // Reduces iterations by some factor for slow platforms
1712 // (determined empirically).
1713 static int ScaleIterations(int x) {
1714 // ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE is set in the implementation
1715 // of Mutex that uses either std::mutex or pthread_mutex_t. Use
1716 // these as keys to determine the slow implementation.
1717 #if defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
1718 return x / 10;
1719 #else
1720 return x;
1721 #endif
1722 }
1723
1724 TEST_P(MutexVariableThreadCountTest, Mutex) {
1725 int threads = GetParam();
1726 int iterations = ScaleIterations(10000000) / threads;
1727 int operations = threads * iterations;
1728 EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
1729 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1730 iterations = std::min(iterations, 10);
1731 operations = threads * iterations;
1732 EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
1733 operations, CheckSumG0G1),
1734 operations);
1735 #endif
1736 }
1737
1738 TEST_P(MutexVariableThreadCountTest, Try) {
1739 int threads = GetParam();
1740 int iterations = 1000000 / threads;
1741 int operations = iterations * threads;
1742 EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
1743 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1744 iterations = std::min(iterations, 10);
1745 operations = threads * iterations;
1746 EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
1747 operations, CheckSumG0G1),
1748 operations);
1749 #endif
1750 }
1751
1752 TEST_P(MutexVariableThreadCountTest, R20ms) {
1753 int threads = GetParam();
1754 int iterations = 100;
1755 int operations = iterations * threads;
1756 EXPECT_EQ(RunTest(&TestR20ms, threads, iterations, operations), 0);
1757 }
1758
1759 TEST_P(MutexVariableThreadCountTest, RW) {
1760 int threads = GetParam();
1761 int iterations = ScaleIterations(20000000) / threads;
1762 int operations = iterations * threads;
1763 EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
1764 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1765 iterations = std::min(iterations, 10);
1766 operations = threads * iterations;
1767 EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
1768 operations, CheckSumG0G1),
1769 operations / 2);
1770 #endif
1771 }
1772
1773 TEST_P(MutexVariableThreadCountTest, Await) {
1774 int threads = GetParam();
1775 int iterations = ScaleIterations(500000);
1776 int operations = iterations;
1777 EXPECT_EQ(RunTest(&TestAwait, threads, iterations, operations), operations);
1778 }
1779
1780 TEST_P(MutexVariableThreadCountTest, SignalAll) {
1781 int threads = GetParam();
1782 int iterations = 200000 / threads;
1783 int operations = iterations;
1784 EXPECT_EQ(RunTest(&TestSignalAll, threads, iterations, operations),
1785 operations);
1786 }
1787
1788 TEST(Mutex, Signal) {
1789 int threads = 2; // TestSignal must use two threads
1790 int iterations = 200000;
1791 int operations = iterations;
1792 EXPECT_EQ(RunTest(&TestSignal, threads, iterations, operations), operations);
1793 }
1794
1795 TEST(Mutex, Timed) {
1796 int threads = 10; // Use a fixed thread count of 10
1797 int iterations = 1000;
1798 int operations = iterations;
1799 EXPECT_EQ(RunTest(&TestCVTimeout, threads, iterations, operations),
1800 operations);
1801 }
1802
1803 TEST(Mutex, CVTime) {
1804 int threads = 10; // Use a fixed thread count of 10
1805 int iterations = 1;
1806 EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), threads * iterations);
1807 }
1808
1809 TEST(Mutex, MuTime) {
1810 int threads = 10; // Use a fixed thread count of 10
1811 int iterations = 1;
1812 EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
1813 }
1814
1815 TEST(Mutex, SignalExitedThread) {
1816 // The test may expose a race when Mutex::Unlock signals a thread
1817 // that has already exited.
1818 #if defined(__wasm__) || defined(__asmjs__)
1819 constexpr int kThreads = 1; // OOMs under WASM
1820 #else
1821 constexpr int kThreads = 100;
1822 #endif
1823 std::vector<std::thread> top;
1824 for (unsigned i = 0; i < 2 * std::thread::hardware_concurrency(); i++) {
1825 top.emplace_back([&]() {
1826 for (int i = 0; i < kThreads; i++) {
1827 absl::Mutex mu;
1828 std::thread t([&]() {
1829 mu.Lock();
1830 mu.Unlock();
1831 });
1832 mu.Lock();
1833 mu.Unlock();
1834 t.join();
1835 }
1836 });
1837 }
1838 for (auto &th : top) th.join();
1839 }
1840
1841 TEST(Mutex, WriterPriority) {
1842 absl::Mutex mu;
1843 bool wrote = false;
1844 std::atomic<bool> saw_wrote{false};
1845 auto readfunc = [&]() {
1846 for (size_t i = 0; i < 10; ++i) {
1847 absl::ReaderMutexLock lock(&mu);
1848 if (wrote) {
1849 saw_wrote = true;
1850 break;
1851 }
1852 absl::SleepFor(absl::Seconds(1));
1853 }
1854 };
1855 std::thread t1(readfunc);
1856 absl::SleepFor(absl::Milliseconds(500));
1857 std::thread t2(readfunc);
1858 // Note: this test guards against a bug that was related to an uninit
1859 // PerThreadSynch::priority, so the writer intentionally runs on a new thread.
1860 std::thread t3([&]() {
1861 // The writer should be able squeeze between the two alternating readers.
1862 absl::MutexLock lock(&mu);
1863 wrote = true;
1864 });
1865 t1.join();
1866 t2.join();
1867 t3.join();
1868 EXPECT_TRUE(saw_wrote.load());
1869 }
1870
1871 TEST(Mutex, LockWhenWithTimeoutResult) {
1872 // Check various corner cases for Await/LockWhen return value
1873 // with always true/always false conditions.
1874 absl::Mutex mu;
1875 const bool kAlwaysTrue = true, kAlwaysFalse = false;
1876 const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
1877 EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
1878 mu.Unlock();
1879 EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
1880 EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
1881 EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
1882 std::thread th1([&]() {
1883 EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
1884 mu.Unlock();
1885 });
1886 std::thread th2([&]() {
1887 EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
1888 mu.Unlock();
1889 });
1890 absl::SleepFor(absl::Milliseconds(100));
1891 mu.Unlock();
1892 th1.join();
1893 th2.join();
1894 }
1895
1896 } // namespace
1897