1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/synchronization/mutex.h"
16
17 #ifdef _WIN32
18 #include <windows.h>
19 #endif
20
21 #include <algorithm>
22 #include <atomic>
23 #include <cstdlib>
24 #include <functional>
25 #include <memory>
26 #include <random>
27 #include <string>
28 #include <thread> // NOLINT(build/c++11)
29 #include <type_traits>
30 #include <vector>
31
32 #include "gtest/gtest.h"
33 #include "absl/base/attributes.h"
34 #include "absl/base/config.h"
35 #include "absl/base/internal/sysinfo.h"
36 #include "absl/log/check.h"
37 #include "absl/log/log.h"
38 #include "absl/memory/memory.h"
39 #include "absl/synchronization/internal/create_thread_identity.h"
40 #include "absl/synchronization/internal/thread_pool.h"
41 #include "absl/time/clock.h"
42 #include "absl/time/time.h"
43
44 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
45 #include <pthread.h>
46 #include <string.h>
47 #endif
48
49 namespace {
50
51 // TODO(dmauro): Replace with a commandline flag.
52 static constexpr bool kExtendedTest = false;
53
CreatePool(int threads)54 std::unique_ptr<absl::synchronization_internal::ThreadPool> CreatePool(
55 int threads) {
56 return absl::make_unique<absl::synchronization_internal::ThreadPool>(threads);
57 }
58
59 std::unique_ptr<absl::synchronization_internal::ThreadPool>
CreateDefaultPool()60 CreateDefaultPool() {
61 return CreatePool(kExtendedTest ? 32 : 10);
62 }
63
64 // Hack to schedule a function to run on a thread pool thread after a
65 // duration has elapsed.
ScheduleAfter(absl::synchronization_internal::ThreadPool * tp,absl::Duration after,const std::function<void ()> & func)66 static void ScheduleAfter(absl::synchronization_internal::ThreadPool *tp,
67 absl::Duration after,
68 const std::function<void()> &func) {
69 tp->Schedule([func, after] {
70 absl::SleepFor(after);
71 func();
72 });
73 }
74
75 struct TestContext {
76 int iterations;
77 int threads;
78 int g0; // global 0
79 int g1; // global 1
80 absl::Mutex mu;
81 absl::CondVar cv;
82 };
83
84 // To test whether the invariant check call occurs
85 static std::atomic<bool> invariant_checked;
86
GetInvariantChecked()87 static bool GetInvariantChecked() {
88 return invariant_checked.load(std::memory_order_relaxed);
89 }
90
SetInvariantChecked(bool new_value)91 static void SetInvariantChecked(bool new_value) {
92 invariant_checked.store(new_value, std::memory_order_relaxed);
93 }
94
CheckSumG0G1(void * v)95 static void CheckSumG0G1(void *v) {
96 TestContext *cxt = static_cast<TestContext *>(v);
97 CHECK_EQ(cxt->g0, -cxt->g1) << "Error in CheckSumG0G1";
98 SetInvariantChecked(true);
99 }
100
TestMu(TestContext * cxt,int c)101 static void TestMu(TestContext *cxt, int c) {
102 for (int i = 0; i != cxt->iterations; i++) {
103 absl::MutexLock l(&cxt->mu);
104 int a = cxt->g0 + 1;
105 cxt->g0 = a;
106 cxt->g1--;
107 }
108 }
109
TestTry(TestContext * cxt,int c)110 static void TestTry(TestContext *cxt, int c) {
111 for (int i = 0; i != cxt->iterations; i++) {
112 do {
113 std::this_thread::yield();
114 } while (!cxt->mu.TryLock());
115 int a = cxt->g0 + 1;
116 cxt->g0 = a;
117 cxt->g1--;
118 cxt->mu.Unlock();
119 }
120 }
121
TestR20ms(TestContext * cxt,int c)122 static void TestR20ms(TestContext *cxt, int c) {
123 for (int i = 0; i != cxt->iterations; i++) {
124 absl::ReaderMutexLock l(&cxt->mu);
125 absl::SleepFor(absl::Milliseconds(20));
126 cxt->mu.AssertReaderHeld();
127 }
128 }
129
TestRW(TestContext * cxt,int c)130 static void TestRW(TestContext *cxt, int c) {
131 if ((c & 1) == 0) {
132 for (int i = 0; i != cxt->iterations; i++) {
133 absl::WriterMutexLock l(&cxt->mu);
134 cxt->g0++;
135 cxt->g1--;
136 cxt->mu.AssertHeld();
137 cxt->mu.AssertReaderHeld();
138 }
139 } else {
140 for (int i = 0; i != cxt->iterations; i++) {
141 absl::ReaderMutexLock l(&cxt->mu);
142 CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW";
143 cxt->mu.AssertReaderHeld();
144 }
145 }
146 }
147
148 struct MyContext {
149 int target;
150 TestContext *cxt;
151 bool MyTurn();
152 };
153
MyTurn()154 bool MyContext::MyTurn() {
155 TestContext *cxt = this->cxt;
156 return cxt->g0 == this->target || cxt->g0 == cxt->iterations;
157 }
158
TestAwait(TestContext * cxt,int c)159 static void TestAwait(TestContext *cxt, int c) {
160 MyContext mc;
161 mc.target = c;
162 mc.cxt = cxt;
163 absl::MutexLock l(&cxt->mu);
164 cxt->mu.AssertHeld();
165 while (cxt->g0 < cxt->iterations) {
166 cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
167 CHECK(mc.MyTurn()) << "Error in TestAwait";
168 cxt->mu.AssertHeld();
169 if (cxt->g0 < cxt->iterations) {
170 int a = cxt->g0 + 1;
171 cxt->g0 = a;
172 mc.target += cxt->threads;
173 }
174 }
175 }
176
TestSignalAll(TestContext * cxt,int c)177 static void TestSignalAll(TestContext *cxt, int c) {
178 int target = c;
179 absl::MutexLock l(&cxt->mu);
180 cxt->mu.AssertHeld();
181 while (cxt->g0 < cxt->iterations) {
182 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
183 cxt->cv.Wait(&cxt->mu);
184 }
185 if (cxt->g0 < cxt->iterations) {
186 int a = cxt->g0 + 1;
187 cxt->g0 = a;
188 cxt->cv.SignalAll();
189 target += cxt->threads;
190 }
191 }
192 }
193
TestSignal(TestContext * cxt,int c)194 static void TestSignal(TestContext *cxt, int c) {
195 CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads";
196 int target = c;
197 absl::MutexLock l(&cxt->mu);
198 cxt->mu.AssertHeld();
199 while (cxt->g0 < cxt->iterations) {
200 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
201 cxt->cv.Wait(&cxt->mu);
202 }
203 if (cxt->g0 < cxt->iterations) {
204 int a = cxt->g0 + 1;
205 cxt->g0 = a;
206 cxt->cv.Signal();
207 target += cxt->threads;
208 }
209 }
210 }
211
TestCVTimeout(TestContext * cxt,int c)212 static void TestCVTimeout(TestContext *cxt, int c) {
213 int target = c;
214 absl::MutexLock l(&cxt->mu);
215 cxt->mu.AssertHeld();
216 while (cxt->g0 < cxt->iterations) {
217 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
218 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
219 }
220 if (cxt->g0 < cxt->iterations) {
221 int a = cxt->g0 + 1;
222 cxt->g0 = a;
223 cxt->cv.SignalAll();
224 target += cxt->threads;
225 }
226 }
227 }
228
G0GE2(TestContext * cxt)229 static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
230
TestTime(TestContext * cxt,int c,bool use_cv)231 static void TestTime(TestContext *cxt, int c, bool use_cv) {
232 CHECK_EQ(cxt->iterations, 1) << "TestTime should only use 1 iteration";
233 CHECK_GT(cxt->threads, 2) << "TestTime should use more than 2 threads";
234 const bool kFalse = false;
235 absl::Condition false_cond(&kFalse);
236 absl::Condition g0ge2(G0GE2, cxt);
237 if (c == 0) {
238 absl::MutexLock l(&cxt->mu);
239
240 absl::Time start = absl::Now();
241 if (use_cv) {
242 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
243 } else {
244 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
245 << "TestTime failed";
246 }
247 absl::Duration elapsed = absl::Now() - start;
248 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
249 << "TestTime failed";
250 CHECK_EQ(cxt->g0, 1) << "TestTime failed";
251
252 start = absl::Now();
253 if (use_cv) {
254 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
255 } else {
256 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
257 << "TestTime failed";
258 }
259 elapsed = absl::Now() - start;
260 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
261 << "TestTime failed";
262 cxt->g0++;
263 if (use_cv) {
264 cxt->cv.Signal();
265 }
266
267 start = absl::Now();
268 if (use_cv) {
269 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
270 } else {
271 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)))
272 << "TestTime failed";
273 }
274 elapsed = absl::Now() - start;
275 CHECK(absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0))
276 << "TestTime failed";
277 CHECK_GE(cxt->g0, 3) << "TestTime failed";
278
279 start = absl::Now();
280 if (use_cv) {
281 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
282 } else {
283 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
284 << "TestTime failed";
285 }
286 elapsed = absl::Now() - start;
287 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
288 << "TestTime failed";
289 if (use_cv) {
290 cxt->cv.SignalAll();
291 }
292
293 start = absl::Now();
294 if (use_cv) {
295 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
296 } else {
297 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
298 << "TestTime failed";
299 }
300 elapsed = absl::Now() - start;
301 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
302 << "TestTime failed";
303 CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed";
304
305 } else if (c == 1) {
306 absl::MutexLock l(&cxt->mu);
307 const absl::Time start = absl::Now();
308 if (use_cv) {
309 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
310 } else {
311 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)))
312 << "TestTime failed";
313 }
314 const absl::Duration elapsed = absl::Now() - start;
315 CHECK(absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9))
316 << "TestTime failed";
317 cxt->g0++;
318 } else if (c == 2) {
319 absl::MutexLock l(&cxt->mu);
320 if (use_cv) {
321 while (cxt->g0 < 2) {
322 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
323 }
324 } else {
325 CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)))
326 << "TestTime failed";
327 }
328 cxt->g0++;
329 } else {
330 absl::MutexLock l(&cxt->mu);
331 if (use_cv) {
332 while (cxt->g0 < 2) {
333 cxt->cv.Wait(&cxt->mu);
334 }
335 } else {
336 cxt->mu.Await(g0ge2);
337 }
338 cxt->g0++;
339 }
340 }
341
TestMuTime(TestContext * cxt,int c)342 static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); }
343
TestCVTime(TestContext * cxt,int c)344 static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); }
345
EndTest(int * c0,int * c1,absl::Mutex * mu,absl::CondVar * cv,const std::function<void (int)> & cb)346 static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
347 const std::function<void(int)> &cb) {
348 mu->Lock();
349 int c = (*c0)++;
350 mu->Unlock();
351 cb(c);
352 absl::MutexLock l(mu);
353 (*c1)++;
354 cv->Signal();
355 }
356
357 // Code common to RunTest() and RunTestWithInvariantDebugging().
RunTestCommon(TestContext * cxt,void (* test)(TestContext * cxt,int),int threads,int iterations,int operations)358 static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
359 int threads, int iterations, int operations) {
360 absl::Mutex mu2;
361 absl::CondVar cv2;
362 int c0 = 0;
363 int c1 = 0;
364 cxt->g0 = 0;
365 cxt->g1 = 0;
366 cxt->iterations = iterations;
367 cxt->threads = threads;
368 absl::synchronization_internal::ThreadPool tp(threads);
369 for (int i = 0; i != threads; i++) {
370 tp.Schedule(std::bind(
371 &EndTest, &c0, &c1, &mu2, &cv2,
372 std::function<void(int)>(std::bind(test, cxt, std::placeholders::_1))));
373 }
374 mu2.Lock();
375 while (c1 != threads) {
376 cv2.Wait(&mu2);
377 }
378 mu2.Unlock();
379 return cxt->g0;
380 }
381
382 // Basis for the parameterized tests configured below.
RunTest(void (* test)(TestContext * cxt,int),int threads,int iterations,int operations)383 static int RunTest(void (*test)(TestContext *cxt, int), int threads,
384 int iterations, int operations) {
385 TestContext cxt;
386 return RunTestCommon(&cxt, test, threads, iterations, operations);
387 }
388
389 // Like RunTest(), but sets an invariant on the tested Mutex and
390 // verifies that the invariant check happened. The invariant function
391 // will be passed the TestContext* as its arg and must call
392 // SetInvariantChecked(true);
393 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
RunTestWithInvariantDebugging(void (* test)(TestContext * cxt,int),int threads,int iterations,int operations,void (* invariant)(void *))394 static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
395 int threads, int iterations,
396 int operations,
397 void (*invariant)(void *)) {
398 absl::EnableMutexInvariantDebugging(true);
399 SetInvariantChecked(false);
400 TestContext cxt;
401 cxt.mu.EnableInvariantDebugging(invariant, &cxt);
402 int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
403 CHECK(GetInvariantChecked()) << "Invariant not checked";
404 absl::EnableMutexInvariantDebugging(false); // Restore.
405 return ret;
406 }
407 #endif
408
409 // --------------------------------------------------------
410 // Test for fix of bug in TryRemove()
411 struct TimeoutBugStruct {
412 absl::Mutex mu;
413 bool a;
414 int a_waiter_count;
415 };
416
WaitForA(TimeoutBugStruct * x)417 static void WaitForA(TimeoutBugStruct *x) {
418 x->mu.LockWhen(absl::Condition(&x->a));
419 x->a_waiter_count--;
420 x->mu.Unlock();
421 }
422
NoAWaiters(TimeoutBugStruct * x)423 static bool NoAWaiters(TimeoutBugStruct *x) { return x->a_waiter_count == 0; }
424
425 // Test that a CondVar.Wait(&mutex) can un-block a call to mutex.Await() in
426 // another thread.
TEST(Mutex,CondVarWaitSignalsAwait)427 TEST(Mutex, CondVarWaitSignalsAwait) {
428 // Use a struct so the lock annotations apply.
429 struct {
430 absl::Mutex barrier_mu;
431 bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
432
433 absl::Mutex release_mu;
434 bool release ABSL_GUARDED_BY(release_mu) = false;
435 absl::CondVar released_cv;
436 } state;
437
438 auto pool = CreateDefaultPool();
439
440 // Thread A. Sets barrier, waits for release using Mutex::Await, then
441 // signals released_cv.
442 pool->Schedule([&state] {
443 state.release_mu.Lock();
444
445 state.barrier_mu.Lock();
446 state.barrier = true;
447 state.barrier_mu.Unlock();
448
449 state.release_mu.Await(absl::Condition(&state.release));
450 state.released_cv.Signal();
451 state.release_mu.Unlock();
452 });
453
454 state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
455 state.barrier_mu.Unlock();
456 state.release_mu.Lock();
457 // Thread A is now blocked on release by way of Mutex::Await().
458
459 // Set release. Calling released_cv.Wait() should un-block thread A,
460 // which will signal released_cv. If not, the test will hang.
461 state.release = true;
462 state.released_cv.Wait(&state.release_mu);
463 state.release_mu.Unlock();
464 }
465
466 // Test that a CondVar.WaitWithTimeout(&mutex) can un-block a call to
467 // mutex.Await() in another thread.
TEST(Mutex,CondVarWaitWithTimeoutSignalsAwait)468 TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
469 // Use a struct so the lock annotations apply.
470 struct {
471 absl::Mutex barrier_mu;
472 bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
473
474 absl::Mutex release_mu;
475 bool release ABSL_GUARDED_BY(release_mu) = false;
476 absl::CondVar released_cv;
477 } state;
478
479 auto pool = CreateDefaultPool();
480
481 // Thread A. Sets barrier, waits for release using Mutex::Await, then
482 // signals released_cv.
483 pool->Schedule([&state] {
484 state.release_mu.Lock();
485
486 state.barrier_mu.Lock();
487 state.barrier = true;
488 state.barrier_mu.Unlock();
489
490 state.release_mu.Await(absl::Condition(&state.release));
491 state.released_cv.Signal();
492 state.release_mu.Unlock();
493 });
494
495 state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
496 state.barrier_mu.Unlock();
497 state.release_mu.Lock();
498 // Thread A is now blocked on release by way of Mutex::Await().
499
500 // Set release. Calling released_cv.Wait() should un-block thread A,
501 // which will signal released_cv. If not, the test will hang.
502 state.release = true;
503 EXPECT_TRUE(
504 !state.released_cv.WaitWithTimeout(&state.release_mu, absl::Seconds(10)))
505 << "; Unrecoverable test failure: CondVar::WaitWithTimeout did not "
506 "unblock the absl::Mutex::Await call in another thread.";
507
508 state.release_mu.Unlock();
509 }
510
511 // Test for regression of a bug in loop of TryRemove()
TEST(Mutex,MutexTimeoutBug)512 TEST(Mutex, MutexTimeoutBug) {
513 auto tp = CreateDefaultPool();
514
515 TimeoutBugStruct x;
516 x.a = false;
517 x.a_waiter_count = 2;
518 tp->Schedule(std::bind(&WaitForA, &x));
519 tp->Schedule(std::bind(&WaitForA, &x));
520 absl::SleepFor(absl::Seconds(1)); // Allow first two threads to hang.
521 // The skip field of the second will point to the first because there are
522 // only two.
523
524 // Now cause a thread waiting on an always-false to time out
525 // This would deadlock when the bug was present.
526 bool always_false = false;
527 x.mu.LockWhenWithTimeout(absl::Condition(&always_false),
528 absl::Milliseconds(500));
529
530 // if we get here, the bug is not present. Cleanup the state.
531
532 x.a = true; // wakeup the two waiters on A
533 x.mu.Await(absl::Condition(&NoAWaiters, &x)); // wait for them to exit
534 x.mu.Unlock();
535 }
536
537 struct CondVarWaitDeadlock : testing::TestWithParam<int> {
538 absl::Mutex mu;
539 absl::CondVar cv;
540 bool cond1 = false;
541 bool cond2 = false;
542 bool read_lock1;
543 bool read_lock2;
544 bool signal_unlocked;
545
CondVarWaitDeadlock__anonbe78f71f0111::CondVarWaitDeadlock546 CondVarWaitDeadlock() {
547 read_lock1 = GetParam() & (1 << 0);
548 read_lock2 = GetParam() & (1 << 1);
549 signal_unlocked = GetParam() & (1 << 2);
550 }
551
Waiter1__anonbe78f71f0111::CondVarWaitDeadlock552 void Waiter1() {
553 if (read_lock1) {
554 mu.ReaderLock();
555 while (!cond1) {
556 cv.Wait(&mu);
557 }
558 mu.ReaderUnlock();
559 } else {
560 mu.Lock();
561 while (!cond1) {
562 cv.Wait(&mu);
563 }
564 mu.Unlock();
565 }
566 }
567
Waiter2__anonbe78f71f0111::CondVarWaitDeadlock568 void Waiter2() {
569 if (read_lock2) {
570 mu.ReaderLockWhen(absl::Condition(&cond2));
571 mu.ReaderUnlock();
572 } else {
573 mu.LockWhen(absl::Condition(&cond2));
574 mu.Unlock();
575 }
576 }
577 };
578
579 // Test for a deadlock bug in Mutex::Fer().
580 // The sequence of events that lead to the deadlock is:
581 // 1. waiter1 blocks on cv in read mode (mu bits = 0).
582 // 2. waiter2 blocks on mu in either mode (mu bits = kMuWait).
583 // 3. main thread locks mu, sets cond1, unlocks mu (mu bits = kMuWait).
584 // 4. main thread signals on cv and this eventually calls Mutex::Fer().
585 // Currently Fer wakes waiter1 since mu bits = kMuWait (mutex is unlocked).
586 // Before the bug fix Fer neither woke waiter1 nor queued it on mutex,
587 // which resulted in deadlock.
TEST_P(CondVarWaitDeadlock,Test)588 TEST_P(CondVarWaitDeadlock, Test) {
589 auto waiter1 = CreatePool(1);
590 auto waiter2 = CreatePool(1);
591 waiter1->Schedule([this] { this->Waiter1(); });
592 waiter2->Schedule([this] { this->Waiter2(); });
593
594 // Wait while threads block (best-effort is fine).
595 absl::SleepFor(absl::Milliseconds(100));
596
597 // Wake condwaiter.
598 mu.Lock();
599 cond1 = true;
600 if (signal_unlocked) {
601 mu.Unlock();
602 cv.Signal();
603 } else {
604 cv.Signal();
605 mu.Unlock();
606 }
607 waiter1.reset(); // "join" waiter1
608
609 // Wake waiter.
610 mu.Lock();
611 cond2 = true;
612 mu.Unlock();
613 waiter2.reset(); // "join" waiter2
614 }
615
616 INSTANTIATE_TEST_SUITE_P(CondVarWaitDeadlockTest, CondVarWaitDeadlock,
617 ::testing::Range(0, 8),
618 ::testing::PrintToStringParamName());
619
620 // --------------------------------------------------------
621 // Test for fix of bug in DequeueAllWakeable()
622 // Bug was that if there was more than one waiting reader
623 // and all should be woken, the most recently blocked one
624 // would not be.
625
626 struct DequeueAllWakeableBugStruct {
627 absl::Mutex mu;
628 absl::Mutex mu2; // protects all fields below
629 int unfinished_count; // count of unfinished readers; under mu2
630 bool done1; // unfinished_count == 0; under mu2
631 int finished_count; // count of finished readers, under mu2
632 bool done2; // finished_count == 0; under mu2
633 };
634
635 // Test for regression of a bug in loop of DequeueAllWakeable()
AcquireAsReader(DequeueAllWakeableBugStruct * x)636 static void AcquireAsReader(DequeueAllWakeableBugStruct *x) {
637 x->mu.ReaderLock();
638 x->mu2.Lock();
639 x->unfinished_count--;
640 x->done1 = (x->unfinished_count == 0);
641 x->mu2.Unlock();
642 // make sure that both readers acquired mu before we release it.
643 absl::SleepFor(absl::Seconds(2));
644 x->mu.ReaderUnlock();
645
646 x->mu2.Lock();
647 x->finished_count--;
648 x->done2 = (x->finished_count == 0);
649 x->mu2.Unlock();
650 }
651
652 // Test for regression of a bug in loop of DequeueAllWakeable()
TEST(Mutex,MutexReaderWakeupBug)653 TEST(Mutex, MutexReaderWakeupBug) {
654 auto tp = CreateDefaultPool();
655
656 DequeueAllWakeableBugStruct x;
657 x.unfinished_count = 2;
658 x.done1 = false;
659 x.finished_count = 2;
660 x.done2 = false;
661 x.mu.Lock(); // acquire mu exclusively
662 // queue two thread that will block on reader locks on x.mu
663 tp->Schedule(std::bind(&AcquireAsReader, &x));
664 tp->Schedule(std::bind(&AcquireAsReader, &x));
665 absl::SleepFor(absl::Seconds(1)); // give time for reader threads to block
666 x.mu.Unlock(); // wake them up
667
668 // both readers should finish promptly
669 EXPECT_TRUE(
670 x.mu2.LockWhenWithTimeout(absl::Condition(&x.done1), absl::Seconds(10)));
671 x.mu2.Unlock();
672
673 EXPECT_TRUE(
674 x.mu2.LockWhenWithTimeout(absl::Condition(&x.done2), absl::Seconds(10)));
675 x.mu2.Unlock();
676 }
677
678 struct LockWhenTestStruct {
679 absl::Mutex mu1;
680 bool cond = false;
681
682 absl::Mutex mu2;
683 bool waiting = false;
684 };
685
LockWhenTestIsCond(LockWhenTestStruct * s)686 static bool LockWhenTestIsCond(LockWhenTestStruct *s) {
687 s->mu2.Lock();
688 s->waiting = true;
689 s->mu2.Unlock();
690 return s->cond;
691 }
692
LockWhenTestWaitForIsCond(LockWhenTestStruct * s)693 static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) {
694 s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s));
695 s->mu1.Unlock();
696 }
697
TEST(Mutex,LockWhen)698 TEST(Mutex, LockWhen) {
699 LockWhenTestStruct s;
700
701 std::thread t(LockWhenTestWaitForIsCond, &s);
702 s.mu2.LockWhen(absl::Condition(&s.waiting));
703 s.mu2.Unlock();
704
705 s.mu1.Lock();
706 s.cond = true;
707 s.mu1.Unlock();
708
709 t.join();
710 }
711
TEST(Mutex,LockWhenGuard)712 TEST(Mutex, LockWhenGuard) {
713 absl::Mutex mu;
714 int n = 30;
715 bool done = false;
716
717 // We don't inline the lambda because the conversion is ambiguous in MSVC.
718 bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; };
719 bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; };
720
721 std::thread t1([&mu, &n, &done, cond_eq_10]() {
722 absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n));
723 done = true;
724 });
725
726 std::thread t2[10];
727 for (std::thread &t : t2) {
728 t = std::thread([&mu, &n, cond_lt_10]() {
729 absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n));
730 ++n;
731 });
732 }
733
734 {
735 absl::MutexLock lock(&mu);
736 n = 0;
737 }
738
739 for (std::thread &t : t2) t.join();
740 t1.join();
741
742 EXPECT_TRUE(done);
743 EXPECT_EQ(n, 10);
744 }
745
746 // --------------------------------------------------------
747 // The following test requires Mutex::ReaderLock to be a real shared
748 // lock, which is not the case in all builds.
749 #if !defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
750
751 // Test for fix of bug in UnlockSlow() that incorrectly decremented the reader
752 // count when putting a thread to sleep waiting for a false condition when the
753 // lock was not held.
754
755 // For this bug to strike, we make a thread wait on a free mutex with no
756 // waiters by causing its wakeup condition to be false. Then the
757 // next two acquirers must be readers. The bug causes the lock
758 // to be released when one reader unlocks, rather than both.
759
760 struct ReaderDecrementBugStruct {
761 bool cond; // to delay first thread (under mu)
762 int done; // reference count (under mu)
763 absl::Mutex mu;
764
765 bool waiting_on_cond; // under mu2
766 bool have_reader_lock; // under mu2
767 bool complete; // under mu2
768 absl::Mutex mu2; // > mu
769 };
770
771 // L >= mu, L < mu_waiting_on_cond
IsCond(void * v)772 static bool IsCond(void *v) {
773 ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
774 x->mu2.Lock();
775 x->waiting_on_cond = true;
776 x->mu2.Unlock();
777 return x->cond;
778 }
779
780 // L >= mu
AllDone(void * v)781 static bool AllDone(void *v) {
782 ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
783 return x->done == 0;
784 }
785
786 // L={}
WaitForCond(ReaderDecrementBugStruct * x)787 static void WaitForCond(ReaderDecrementBugStruct *x) {
788 absl::Mutex dummy;
789 absl::MutexLock l(&dummy);
790 x->mu.LockWhen(absl::Condition(&IsCond, x));
791 x->done--;
792 x->mu.Unlock();
793 }
794
795 // L={}
GetReadLock(ReaderDecrementBugStruct * x)796 static void GetReadLock(ReaderDecrementBugStruct *x) {
797 x->mu.ReaderLock();
798 x->mu2.Lock();
799 x->have_reader_lock = true;
800 x->mu2.Await(absl::Condition(&x->complete));
801 x->mu2.Unlock();
802 x->mu.ReaderUnlock();
803 x->mu.Lock();
804 x->done--;
805 x->mu.Unlock();
806 }
807
808 // Test for reader counter being decremented incorrectly by waiter
809 // with false condition.
TEST(Mutex,MutexReaderDecrementBug)810 TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
811 ReaderDecrementBugStruct x;
812 x.cond = false;
813 x.waiting_on_cond = false;
814 x.have_reader_lock = false;
815 x.complete = false;
816 x.done = 2; // initial ref count
817
818 // Run WaitForCond() and wait for it to sleep
819 std::thread thread1(WaitForCond, &x);
820 x.mu2.LockWhen(absl::Condition(&x.waiting_on_cond));
821 x.mu2.Unlock();
822
823 // Run GetReadLock(), and wait for it to get the read lock
824 std::thread thread2(GetReadLock, &x);
825 x.mu2.LockWhen(absl::Condition(&x.have_reader_lock));
826 x.mu2.Unlock();
827
828 // Get the reader lock ourselves, and release it.
829 x.mu.ReaderLock();
830 x.mu.ReaderUnlock();
831
832 // The lock should be held in read mode by GetReadLock().
833 // If we have the bug, the lock will be free.
834 x.mu.AssertReaderHeld();
835
836 // Wake up all the threads.
837 x.mu2.Lock();
838 x.complete = true;
839 x.mu2.Unlock();
840
841 // TODO(delesley): turn on analysis once lock upgrading is supported.
842 // (This call upgrades the lock from shared to exclusive.)
843 x.mu.Lock();
844 x.cond = true;
845 x.mu.Await(absl::Condition(&AllDone, &x));
846 x.mu.Unlock();
847
848 thread1.join();
849 thread2.join();
850 }
851 #endif // !ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
852
853 // Test that we correctly handle the situation when a lock is
854 // held and then destroyed (w/o unlocking).
855 #ifdef ABSL_HAVE_THREAD_SANITIZER
856 // TSAN reports errors when locked Mutexes are destroyed.
TEST(Mutex,DISABLED_LockedMutexDestructionBug)857 TEST(Mutex, DISABLED_LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
858 #else
859 TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
860 #endif
861 for (int i = 0; i != 10; i++) {
862 // Create, lock and destroy 10 locks.
863 const int kNumLocks = 10;
864 auto mu = absl::make_unique<absl::Mutex[]>(kNumLocks);
865 for (int j = 0; j != kNumLocks; j++) {
866 if ((j % 2) == 0) {
867 mu[j].WriterLock();
868 } else {
869 mu[j].ReaderLock();
870 }
871 }
872 }
873 }
874
875 // Some functions taking pointers to non-const.
876 bool Equals42(int *p) { return *p == 42; }
877 bool Equals43(int *p) { return *p == 43; }
878
879 // Some functions taking pointers to const.
880 bool ConstEquals42(const int *p) { return *p == 42; }
881 bool ConstEquals43(const int *p) { return *p == 43; }
882
883 // Some function templates taking pointers. Note it's possible for `T` to be
884 // deduced as non-const or const, which creates the potential for ambiguity,
885 // but which the implementation is careful to avoid.
886 template <typename T>
887 bool TemplateEquals42(T *p) {
888 return *p == 42;
889 }
890 template <typename T>
891 bool TemplateEquals43(T *p) {
892 return *p == 43;
893 }
894
895 TEST(Mutex, FunctionPointerCondition) {
896 // Some arguments.
897 int x = 42;
898 const int const_x = 42;
899
900 // Parameter non-const, argument non-const.
901 EXPECT_TRUE(absl::Condition(Equals42, &x).Eval());
902 EXPECT_FALSE(absl::Condition(Equals43, &x).Eval());
903
904 // Parameter const, argument non-const.
905 EXPECT_TRUE(absl::Condition(ConstEquals42, &x).Eval());
906 EXPECT_FALSE(absl::Condition(ConstEquals43, &x).Eval());
907
908 // Parameter const, argument const.
909 EXPECT_TRUE(absl::Condition(ConstEquals42, &const_x).Eval());
910 EXPECT_FALSE(absl::Condition(ConstEquals43, &const_x).Eval());
911
912 // Parameter type deduced, argument non-const.
913 EXPECT_TRUE(absl::Condition(TemplateEquals42, &x).Eval());
914 EXPECT_FALSE(absl::Condition(TemplateEquals43, &x).Eval());
915
916 // Parameter type deduced, argument const.
917 EXPECT_TRUE(absl::Condition(TemplateEquals42, &const_x).Eval());
918 EXPECT_FALSE(absl::Condition(TemplateEquals43, &const_x).Eval());
919
920 // Parameter non-const, argument const is not well-formed.
921 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(Equals42),
922 decltype(&const_x)>::value));
923 // Validate use of is_constructible by contrasting to a well-formed case.
924 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(ConstEquals42),
925 decltype(&const_x)>::value));
926 }
927
928 // Example base and derived class for use in predicates and test below. Not a
929 // particularly realistic example, but it suffices for testing purposes.
930 struct Base {
931 explicit Base(int v) : value(v) {}
932 int value;
933 };
934 struct Derived : Base {
935 explicit Derived(int v) : Base(v) {}
936 };
937
938 // Some functions taking pointer to non-const `Base`.
939 bool BaseEquals42(Base *p) { return p->value == 42; }
940 bool BaseEquals43(Base *p) { return p->value == 43; }
941
942 // Some functions taking pointer to const `Base`.
943 bool ConstBaseEquals42(const Base *p) { return p->value == 42; }
944 bool ConstBaseEquals43(const Base *p) { return p->value == 43; }
945
946 TEST(Mutex, FunctionPointerConditionWithDerivedToBaseConversion) {
947 // Some arguments.
948 Derived derived(42);
949 const Derived const_derived(42);
950
951 // Parameter non-const base, argument derived non-const.
952 EXPECT_TRUE(absl::Condition(BaseEquals42, &derived).Eval());
953 EXPECT_FALSE(absl::Condition(BaseEquals43, &derived).Eval());
954
955 // Parameter const base, argument derived non-const.
956 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &derived).Eval());
957 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &derived).Eval());
958
959 // Parameter const base, argument derived const.
960 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
961 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
962
963 // Parameter const base, argument derived const.
964 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
965 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
966
967 // Parameter derived, argument base is not well-formed.
968 bool (*derived_pred)(const Derived *) = [](const Derived *) { return true; };
969 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
970 Base *>::value));
971 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
972 const Base *>::value));
973 // Validate use of is_constructible by contrasting to well-formed cases.
974 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
975 Derived *>::value));
976 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
977 const Derived *>::value));
978 }
979
980 struct True {
981 template <class... Args>
982 bool operator()(Args...) const {
983 return true;
984 }
985 };
986
987 struct DerivedTrue : True {};
988
989 TEST(Mutex, FunctorCondition) {
990 { // Variadic
991 True f;
992 EXPECT_TRUE(absl::Condition(&f).Eval());
993 }
994
995 { // Inherited
996 DerivedTrue g;
997 EXPECT_TRUE(absl::Condition(&g).Eval());
998 }
999
1000 { // lambda
1001 int value = 3;
1002 auto is_zero = [&value] { return value == 0; };
1003 absl::Condition c(&is_zero);
1004 EXPECT_FALSE(c.Eval());
1005 value = 0;
1006 EXPECT_TRUE(c.Eval());
1007 }
1008
1009 { // bind
1010 int value = 0;
1011 auto is_positive = std::bind(std::less<int>(), 0, std::cref(value));
1012 absl::Condition c(&is_positive);
1013 EXPECT_FALSE(c.Eval());
1014 value = 1;
1015 EXPECT_TRUE(c.Eval());
1016 }
1017
1018 { // std::function
1019 int value = 3;
1020 std::function<bool()> is_zero = [&value] { return value == 0; };
1021 absl::Condition c(&is_zero);
1022 EXPECT_FALSE(c.Eval());
1023 value = 0;
1024 EXPECT_TRUE(c.Eval());
1025 }
1026 }
1027
1028 TEST(Mutex, ConditionSwap) {
1029 // Ensure that Conditions can be swap'ed.
1030 bool b1 = true;
1031 absl::Condition c1(&b1);
1032 bool b2 = false;
1033 absl::Condition c2(&b2);
1034 EXPECT_TRUE(c1.Eval());
1035 EXPECT_FALSE(c2.Eval());
1036 std::swap(c1, c2);
1037 EXPECT_FALSE(c1.Eval());
1038 EXPECT_TRUE(c2.Eval());
1039 }
1040
1041 // --------------------------------------------------------
1042 // Test for bug with pattern of readers using a condvar. The bug was that if a
1043 // reader went to sleep on a condition variable while one or more other readers
1044 // held the lock, but there were no waiters, the reader count (held in the
1045 // mutex word) would be lost. (This is because Enqueue() had at one time
1046 // always placed the thread on the Mutex queue. Later (CL 4075610), to
1047 // tolerate re-entry into Mutex from a Condition predicate, Enqueue() was
1048 // changed so that it could also place a thread on a condition-variable. This
1049 // introduced the case where Enqueue() returned with an empty queue, and this
1050 // case was handled incorrectly in one place.)
1051
1052 static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
1053 int *running) {
1054 std::random_device dev;
1055 std::mt19937 gen(dev());
1056 std::uniform_int_distribution<int> random_millis(0, 15);
1057 mu->ReaderLock();
1058 while (*running == 3) {
1059 absl::SleepFor(absl::Milliseconds(random_millis(gen)));
1060 cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
1061 }
1062 mu->ReaderUnlock();
1063 mu->Lock();
1064 (*running)--;
1065 mu->Unlock();
1066 }
1067
1068 static bool IntIsZero(int *x) { return *x == 0; }
1069
1070 // Test for reader waiting condition variable when there are other readers
1071 // but no waiters.
1072 TEST(Mutex, TestReaderOnCondVar) {
1073 auto tp = CreateDefaultPool();
1074 absl::Mutex mu;
1075 absl::CondVar cv;
1076 int running = 3;
1077 tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
1078 tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
1079 absl::SleepFor(absl::Seconds(2));
1080 mu.Lock();
1081 running--;
1082 mu.Await(absl::Condition(&IntIsZero, &running));
1083 mu.Unlock();
1084 }
1085
1086 // --------------------------------------------------------
1087 struct AcquireFromConditionStruct {
1088 absl::Mutex mu0; // protects value, done
1089 int value; // times condition function is called; under mu0,
1090 bool done; // done with test? under mu0
1091 absl::Mutex mu1; // used to attempt to mess up state of mu0
1092 absl::CondVar cv; // so the condition function can be invoked from
1093 // CondVar::Wait().
1094 };
1095
1096 static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
1097 x->value++; // count times this function is called
1098
1099 if (x->value == 2 || x->value == 3) {
1100 // On the second and third invocation of this function, sleep for 100ms,
1101 // but with the side-effect of altering the state of a Mutex other than
1102 // than one for which this is a condition. The spec now explicitly allows
1103 // this side effect; previously it did not. it was illegal.
1104 bool always_false = false;
1105 x->mu1.LockWhenWithTimeout(absl::Condition(&always_false),
1106 absl::Milliseconds(100));
1107 x->mu1.Unlock();
1108 }
1109 CHECK_LT(x->value, 4) << "should not be invoked a fourth time";
1110
1111 // We arrange for the condition to return true on only the 2nd and 3rd calls.
1112 return x->value == 2 || x->value == 3;
1113 }
1114
1115 static void WaitForCond2(AcquireFromConditionStruct *x) {
1116 // wait for cond0 to become true
1117 x->mu0.LockWhen(absl::Condition(&ConditionWithAcquire, x));
1118 x->done = true;
1119 x->mu0.Unlock();
1120 }
1121
1122 // Test for Condition whose function acquires other Mutexes
1123 TEST(Mutex, AcquireFromCondition) {
1124 auto tp = CreateDefaultPool();
1125
1126 AcquireFromConditionStruct x;
1127 x.value = 0;
1128 x.done = false;
1129 tp->Schedule(
1130 std::bind(&WaitForCond2, &x)); // run WaitForCond2() in a thread T
1131 // T will hang because the first invocation of ConditionWithAcquire() will
1132 // return false.
1133 absl::SleepFor(absl::Milliseconds(500)); // allow T time to hang
1134
1135 x.mu0.Lock();
1136 x.cv.WaitWithTimeout(&x.mu0, absl::Milliseconds(500)); // wake T
1137 // T will be woken because the Wait() will call ConditionWithAcquire()
1138 // for the second time, and it will return true.
1139
1140 x.mu0.Unlock();
1141
1142 // T will then acquire the lock and recheck its own condition.
1143 // It will find the condition true, as this is the third invocation,
1144 // but the use of another Mutex by the calling function will
1145 // cause the old mutex implementation to think that the outer
1146 // LockWhen() has timed out because the inner LockWhenWithTimeout() did.
1147 // T will then check the condition a fourth time because it finds a
1148 // timeout occurred. This should not happen in the new
1149 // implementation that allows the Condition function to use Mutexes.
1150
1151 // It should also succeed, even though the Condition function
1152 // is being invoked from CondVar::Wait, and thus this thread
1153 // is conceptually waiting both on the condition variable, and on mu2.
1154
1155 x.mu0.LockWhen(absl::Condition(&x.done));
1156 x.mu0.Unlock();
1157 }
1158
1159 TEST(Mutex, DeadlockDetector) {
1160 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1161
1162 // check that we can call ForgetDeadlockInfo() on a lock with the lock held
1163 absl::Mutex m1;
1164 absl::Mutex m2;
1165 absl::Mutex m3;
1166 absl::Mutex m4;
1167
1168 m1.Lock(); // m1 gets ID1
1169 m2.Lock(); // m2 gets ID2
1170 m3.Lock(); // m3 gets ID3
1171 m3.Unlock();
1172 m2.Unlock();
1173 // m1 still held
1174 m1.ForgetDeadlockInfo(); // m1 loses ID
1175 m2.Lock(); // m2 gets ID2
1176 m3.Lock(); // m3 gets ID3
1177 m4.Lock(); // m4 gets ID4
1178 m3.Unlock();
1179 m2.Unlock();
1180 m4.Unlock();
1181 m1.Unlock();
1182 }
1183
1184 // Bazel has a test "warning" file that programs can write to if the
1185 // test should pass with a warning. This class disables the warning
1186 // file until it goes out of scope.
1187 class ScopedDisableBazelTestWarnings {
1188 public:
1189 ScopedDisableBazelTestWarnings() {
1190 #ifdef _WIN32
1191 char file[MAX_PATH];
1192 if (GetEnvironmentVariableA(kVarName, file, sizeof(file)) < sizeof(file)) {
1193 warnings_output_file_ = file;
1194 SetEnvironmentVariableA(kVarName, nullptr);
1195 }
1196 #else
1197 const char *file = getenv(kVarName);
1198 if (file != nullptr) {
1199 warnings_output_file_ = file;
1200 unsetenv(kVarName);
1201 }
1202 #endif
1203 }
1204
1205 ~ScopedDisableBazelTestWarnings() {
1206 if (!warnings_output_file_.empty()) {
1207 #ifdef _WIN32
1208 SetEnvironmentVariableA(kVarName, warnings_output_file_.c_str());
1209 #else
1210 setenv(kVarName, warnings_output_file_.c_str(), 0);
1211 #endif
1212 }
1213 }
1214
1215 private:
1216 static const char kVarName[];
1217 std::string warnings_output_file_;
1218 };
1219 const char ScopedDisableBazelTestWarnings::kVarName[] =
1220 "TEST_WARNINGS_OUTPUT_FILE";
1221
1222 #ifdef ABSL_HAVE_THREAD_SANITIZER
1223 // This test intentionally creates deadlocks to test the deadlock detector.
1224 TEST(Mutex, DISABLED_DeadlockDetectorBazelWarning) {
1225 #else
1226 TEST(Mutex, DeadlockDetectorBazelWarning) {
1227 #endif
1228 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
1229
1230 // Cause deadlock detection to detect something, if it's
1231 // compiled in and enabled. But turn off the bazel warning.
1232 ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
1233
1234 absl::Mutex mu0;
1235 absl::Mutex mu1;
1236 bool got_mu0 = mu0.TryLock();
1237 mu1.Lock(); // acquire mu1 while holding mu0
1238 if (got_mu0) {
1239 mu0.Unlock();
1240 }
1241 if (mu0.TryLock()) { // try lock shouldn't cause deadlock detector to fire
1242 mu0.Unlock();
1243 }
1244 mu0.Lock(); // acquire mu0 while holding mu1; should get one deadlock
1245 // report here
1246 mu0.Unlock();
1247 mu1.Unlock();
1248
1249 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1250 }
1251
1252 TEST(Mutex, DeadlockDetectorLongCycle) {
1253 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
1254
1255 // This test generates a warning if it passes, and crashes otherwise.
1256 // Cause bazel to ignore the warning.
1257 ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
1258
1259 // Check that we survive a deadlock with a lock cycle.
1260 std::vector<absl::Mutex> mutex(100);
1261 for (size_t i = 0; i != mutex.size(); i++) {
1262 mutex[i].Lock();
1263 mutex[(i + 1) % mutex.size()].Lock();
1264 mutex[i].Unlock();
1265 mutex[(i + 1) % mutex.size()].Unlock();
1266 }
1267
1268 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1269 }
1270
1271 // This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
1272 // annotation-based static thread-safety analysis is not currently
1273 // predicate-aware and cannot tell if the two for-loops that acquire and
1274 // release the locks have the same predicates.
1275 TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1276 // Stress test: Here we create a large number of locks and use all of them.
1277 // If a deadlock detector keeps a full graph of lock acquisition order,
1278 // it will likely be too slow for this test to pass.
1279 const int n_locks = 1 << 17;
1280 auto array_of_locks = absl::make_unique<absl::Mutex[]>(n_locks);
1281 for (int i = 0; i < n_locks; i++) {
1282 int end = std::min(n_locks, i + 5);
1283 // acquire and then release locks i, i+1, ..., i+4
1284 for (int j = i; j < end; j++) {
1285 array_of_locks[j].Lock();
1286 }
1287 for (int j = i; j < end; j++) {
1288 array_of_locks[j].Unlock();
1289 }
1290 }
1291 }
1292
1293 #ifdef ABSL_HAVE_THREAD_SANITIZER
1294 // TSAN reports errors when locked Mutexes are destroyed.
1295 TEST(Mutex, DISABLED_DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1296 #else
1297 TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1298 #endif
1299 // Test a scenario where a cached deadlock graph node id in the
1300 // list of held locks is not invalidated when the corresponding
1301 // mutex is deleted.
1302 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1303 // Mutex that will be destroyed while being held
1304 absl::Mutex *a = new absl::Mutex;
1305 // Other mutexes needed by test
1306 absl::Mutex b, c;
1307
1308 // Hold mutex.
1309 a->Lock();
1310
1311 // Force deadlock id assignment by acquiring another lock.
1312 b.Lock();
1313 b.Unlock();
1314
1315 // Delete the mutex. The Mutex destructor tries to remove held locks,
1316 // but the attempt isn't foolproof. It can fail if:
1317 // (a) Deadlock detection is currently disabled.
1318 // (b) The destruction is from another thread.
1319 // We exploit (a) by temporarily disabling deadlock detection.
1320 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
1321 delete a;
1322 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1323
1324 // Now acquire another lock which will force a deadlock id assignment.
1325 // We should end up getting assigned the same deadlock id that was
1326 // freed up when "a" was deleted, which will cause a spurious deadlock
1327 // report if the held lock entry for "a" was not invalidated.
1328 c.Lock();
1329 c.Unlock();
1330 }
1331
1332 // --------------------------------------------------------
1333 // Test for timeouts/deadlines on condition waits that are specified using
1334 // absl::Duration and absl::Time. For each waiting function we test with
1335 // a timeout/deadline that has already expired/passed, one that is infinite
1336 // and so never expires/passes, and one that will expire/pass in the near
1337 // future.
1338
1339 static absl::Duration TimeoutTestAllowedSchedulingDelay() {
1340 // Note: we use a function here because Microsoft Visual Studio fails to
1341 // properly initialize constexpr static absl::Duration variables.
1342 return absl::Milliseconds(150);
1343 }
1344
1345 // Returns true if `actual_delay` is close enough to `expected_delay` to pass
1346 // the timeouts/deadlines test. Otherwise, logs warnings and returns false.
1347 ABSL_MUST_USE_RESULT
1348 static bool DelayIsWithinBounds(absl::Duration expected_delay,
1349 absl::Duration actual_delay) {
1350 bool pass = true;
1351 // Do not allow the observed delay to be less than expected. This may occur
1352 // in practice due to clock skew or when the synchronization primitives use a
1353 // different clock than absl::Now(), but these cases should be handled by the
1354 // the retry mechanism in each TimeoutTest.
1355 if (actual_delay < expected_delay) {
1356 LOG(WARNING) << "Actual delay " << actual_delay
1357 << " was too short, expected " << expected_delay
1358 << " (difference " << actual_delay - expected_delay << ")";
1359 pass = false;
1360 }
1361 // If the expected delay is <= zero then allow a small error tolerance, since
1362 // we do not expect context switches to occur during test execution.
1363 // Otherwise, thread scheduling delays may be substantial in rare cases, so
1364 // tolerate up to kTimeoutTestAllowedSchedulingDelay of error.
1365 absl::Duration tolerance = expected_delay <= absl::ZeroDuration()
1366 ? absl::Milliseconds(10)
1367 : TimeoutTestAllowedSchedulingDelay();
1368 if (actual_delay > expected_delay + tolerance) {
1369 LOG(WARNING) << "Actual delay " << actual_delay
1370 << " was too long, expected " << expected_delay
1371 << " (difference " << actual_delay - expected_delay << ")";
1372 pass = false;
1373 }
1374 return pass;
1375 }
1376
1377 // Parameters for TimeoutTest, below.
1378 struct TimeoutTestParam {
1379 // The file and line number (used for logging purposes only).
1380 const char *from_file;
1381 int from_line;
1382
1383 // Should the absolute deadline API based on absl::Time be tested? If false,
1384 // the relative deadline API based on absl::Duration is tested.
1385 bool use_absolute_deadline;
1386
1387 // The deadline/timeout used when calling the API being tested
1388 // (e.g. Mutex::LockWhenWithDeadline).
1389 absl::Duration wait_timeout;
1390
1391 // The delay before the condition will be set true by the test code. If zero
1392 // or negative, the condition is set true immediately (before calling the API
1393 // being tested). Otherwise, if infinite, the condition is never set true.
1394 // Otherwise a closure is scheduled for the future that sets the condition
1395 // true.
1396 absl::Duration satisfy_condition_delay;
1397
1398 // The expected result of the condition after the call to the API being
1399 // tested. Generally `true` means the condition was true when the API returns,
1400 // `false` indicates an expected timeout.
1401 bool expected_result;
1402
1403 // The expected delay before the API under test returns. This is inherently
1404 // flaky, so some slop is allowed (see `DelayIsWithinBounds` above), and the
1405 // test keeps trying indefinitely until this constraint passes.
1406 absl::Duration expected_delay;
1407 };
1408
1409 // Print a `TimeoutTestParam` to a debug log.
1410 std::ostream &operator<<(std::ostream &os, const TimeoutTestParam ¶m) {
1411 return os << "from: " << param.from_file << ":" << param.from_line
1412 << " use_absolute_deadline: "
1413 << (param.use_absolute_deadline ? "true" : "false")
1414 << " wait_timeout: " << param.wait_timeout
1415 << " satisfy_condition_delay: " << param.satisfy_condition_delay
1416 << " expected_result: "
1417 << (param.expected_result ? "true" : "false")
1418 << " expected_delay: " << param.expected_delay;
1419 }
1420
1421 // Like `thread::Executor::ScheduleAt` except:
1422 // a) Delays zero or negative are executed immediately in the current thread.
1423 // b) Infinite delays are never scheduled.
1424 // c) Calls this test's `ScheduleAt` helper instead of using `pool` directly.
1425 static void RunAfterDelay(absl::Duration delay,
1426 absl::synchronization_internal::ThreadPool *pool,
1427 const std::function<void()> &callback) {
1428 if (delay <= absl::ZeroDuration()) {
1429 callback(); // immediate
1430 } else if (delay != absl::InfiniteDuration()) {
1431 ScheduleAfter(pool, delay, callback);
1432 }
1433 }
1434
1435 class TimeoutTest : public ::testing::Test,
1436 public ::testing::WithParamInterface<TimeoutTestParam> {};
1437
1438 std::vector<TimeoutTestParam> MakeTimeoutTestParamValues() {
1439 // The `finite` delay is a finite, relatively short, delay. We make it larger
1440 // than our allowed scheduling delay (slop factor) to avoid confusion when
1441 // diagnosing test failures. The other constants here have clear meanings.
1442 const absl::Duration finite = 3 * TimeoutTestAllowedSchedulingDelay();
1443 const absl::Duration never = absl::InfiniteDuration();
1444 const absl::Duration negative = -absl::InfiniteDuration();
1445 const absl::Duration immediate = absl::ZeroDuration();
1446
1447 // Every test case is run twice; once using the absolute deadline API and once
1448 // using the relative timeout API.
1449 std::vector<TimeoutTestParam> values;
1450 for (bool use_absolute_deadline : {false, true}) {
1451 // Tests with a negative timeout (deadline in the past), which should
1452 // immediately return current state of the condition.
1453
1454 // The condition is already true:
1455 values.push_back(TimeoutTestParam{
1456 __FILE__, __LINE__, use_absolute_deadline,
1457 negative, // wait_timeout
1458 immediate, // satisfy_condition_delay
1459 true, // expected_result
1460 immediate, // expected_delay
1461 });
1462
1463 // The condition becomes true, but the timeout has already expired:
1464 values.push_back(TimeoutTestParam{
1465 __FILE__, __LINE__, use_absolute_deadline,
1466 negative, // wait_timeout
1467 finite, // satisfy_condition_delay
1468 false, // expected_result
1469 immediate // expected_delay
1470 });
1471
1472 // The condition never becomes true:
1473 values.push_back(TimeoutTestParam{
1474 __FILE__, __LINE__, use_absolute_deadline,
1475 negative, // wait_timeout
1476 never, // satisfy_condition_delay
1477 false, // expected_result
1478 immediate // expected_delay
1479 });
1480
1481 // Tests with an infinite timeout (deadline in the infinite future), which
1482 // should only return when the condition becomes true.
1483
1484 // The condition is already true:
1485 values.push_back(TimeoutTestParam{
1486 __FILE__, __LINE__, use_absolute_deadline,
1487 never, // wait_timeout
1488 immediate, // satisfy_condition_delay
1489 true, // expected_result
1490 immediate // expected_delay
1491 });
1492
1493 // The condition becomes true before the (infinite) expiry:
1494 values.push_back(TimeoutTestParam{
1495 __FILE__, __LINE__, use_absolute_deadline,
1496 never, // wait_timeout
1497 finite, // satisfy_condition_delay
1498 true, // expected_result
1499 finite, // expected_delay
1500 });
1501
1502 // Tests with a (small) finite timeout (deadline soon), with the condition
1503 // becoming true both before and after its expiry.
1504
1505 // The condition is already true:
1506 values.push_back(TimeoutTestParam{
1507 __FILE__, __LINE__, use_absolute_deadline,
1508 never, // wait_timeout
1509 immediate, // satisfy_condition_delay
1510 true, // expected_result
1511 immediate // expected_delay
1512 });
1513
1514 // The condition becomes true before the expiry:
1515 values.push_back(TimeoutTestParam{
1516 __FILE__, __LINE__, use_absolute_deadline,
1517 finite * 2, // wait_timeout
1518 finite, // satisfy_condition_delay
1519 true, // expected_result
1520 finite // expected_delay
1521 });
1522
1523 // The condition becomes true, but the timeout has already expired:
1524 values.push_back(TimeoutTestParam{
1525 __FILE__, __LINE__, use_absolute_deadline,
1526 finite, // wait_timeout
1527 finite * 2, // satisfy_condition_delay
1528 false, // expected_result
1529 finite // expected_delay
1530 });
1531
1532 // The condition never becomes true:
1533 values.push_back(TimeoutTestParam{
1534 __FILE__, __LINE__, use_absolute_deadline,
1535 finite, // wait_timeout
1536 never, // satisfy_condition_delay
1537 false, // expected_result
1538 finite // expected_delay
1539 });
1540 }
1541 return values;
1542 }
1543
1544 // Instantiate `TimeoutTest` with `MakeTimeoutTestParamValues()`.
1545 INSTANTIATE_TEST_SUITE_P(All, TimeoutTest,
1546 testing::ValuesIn(MakeTimeoutTestParamValues()));
1547
1548 TEST_P(TimeoutTest, Await) {
1549 const TimeoutTestParam params = GetParam();
1550 LOG(INFO) << "Params: " << params;
1551
1552 // Because this test asserts bounds on scheduling delays it is flaky. To
1553 // compensate it loops forever until it passes. Failures express as test
1554 // timeouts, in which case the test log can be used to diagnose the issue.
1555 for (int attempt = 1;; ++attempt) {
1556 LOG(INFO) << "Attempt " << attempt;
1557
1558 absl::Mutex mu;
1559 bool value = false; // condition value (under mu)
1560
1561 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1562 CreateDefaultPool();
1563 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1564 absl::MutexLock l(&mu);
1565 value = true;
1566 });
1567
1568 absl::MutexLock lock(&mu);
1569 absl::Time start_time = absl::Now();
1570 absl::Condition cond(&value);
1571 bool result =
1572 params.use_absolute_deadline
1573 ? mu.AwaitWithDeadline(cond, start_time + params.wait_timeout)
1574 : mu.AwaitWithTimeout(cond, params.wait_timeout);
1575 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1576 EXPECT_EQ(params.expected_result, result);
1577 break;
1578 }
1579 }
1580 }
1581
1582 TEST_P(TimeoutTest, LockWhen) {
1583 const TimeoutTestParam params = GetParam();
1584 LOG(INFO) << "Params: " << params;
1585
1586 // Because this test asserts bounds on scheduling delays it is flaky. To
1587 // compensate it loops forever until it passes. Failures express as test
1588 // timeouts, in which case the test log can be used to diagnose the issue.
1589 for (int attempt = 1;; ++attempt) {
1590 LOG(INFO) << "Attempt " << attempt;
1591
1592 absl::Mutex mu;
1593 bool value = false; // condition value (under mu)
1594
1595 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1596 CreateDefaultPool();
1597 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1598 absl::MutexLock l(&mu);
1599 value = true;
1600 });
1601
1602 absl::Time start_time = absl::Now();
1603 absl::Condition cond(&value);
1604 bool result =
1605 params.use_absolute_deadline
1606 ? mu.LockWhenWithDeadline(cond, start_time + params.wait_timeout)
1607 : mu.LockWhenWithTimeout(cond, params.wait_timeout);
1608 mu.Unlock();
1609
1610 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1611 EXPECT_EQ(params.expected_result, result);
1612 break;
1613 }
1614 }
1615 }
1616
1617 TEST_P(TimeoutTest, ReaderLockWhen) {
1618 const TimeoutTestParam params = GetParam();
1619 LOG(INFO) << "Params: " << params;
1620
1621 // Because this test asserts bounds on scheduling delays it is flaky. To
1622 // compensate it loops forever until it passes. Failures express as test
1623 // timeouts, in which case the test log can be used to diagnose the issue.
1624 for (int attempt = 0;; ++attempt) {
1625 LOG(INFO) << "Attempt " << attempt;
1626
1627 absl::Mutex mu;
1628 bool value = false; // condition value (under mu)
1629
1630 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1631 CreateDefaultPool();
1632 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1633 absl::MutexLock l(&mu);
1634 value = true;
1635 });
1636
1637 absl::Time start_time = absl::Now();
1638 bool result =
1639 params.use_absolute_deadline
1640 ? mu.ReaderLockWhenWithDeadline(absl::Condition(&value),
1641 start_time + params.wait_timeout)
1642 : mu.ReaderLockWhenWithTimeout(absl::Condition(&value),
1643 params.wait_timeout);
1644 mu.ReaderUnlock();
1645
1646 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1647 EXPECT_EQ(params.expected_result, result);
1648 break;
1649 }
1650 }
1651 }
1652
1653 TEST_P(TimeoutTest, Wait) {
1654 const TimeoutTestParam params = GetParam();
1655 LOG(INFO) << "Params: " << params;
1656
1657 // Because this test asserts bounds on scheduling delays it is flaky. To
1658 // compensate it loops forever until it passes. Failures express as test
1659 // timeouts, in which case the test log can be used to diagnose the issue.
1660 for (int attempt = 0;; ++attempt) {
1661 LOG(INFO) << "Attempt " << attempt;
1662
1663 absl::Mutex mu;
1664 bool value = false; // condition value (under mu)
1665 absl::CondVar cv; // signals a change of `value`
1666
1667 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1668 CreateDefaultPool();
1669 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1670 absl::MutexLock l(&mu);
1671 value = true;
1672 cv.Signal();
1673 });
1674
1675 absl::MutexLock lock(&mu);
1676 absl::Time start_time = absl::Now();
1677 absl::Duration timeout = params.wait_timeout;
1678 absl::Time deadline = start_time + timeout;
1679 while (!value) {
1680 if (params.use_absolute_deadline ? cv.WaitWithDeadline(&mu, deadline)
1681 : cv.WaitWithTimeout(&mu, timeout)) {
1682 break; // deadline/timeout exceeded
1683 }
1684 timeout = deadline - absl::Now(); // recompute
1685 }
1686 bool result = value; // note: `mu` is still held
1687
1688 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1689 EXPECT_EQ(params.expected_result, result);
1690 break;
1691 }
1692 }
1693 }
1694
1695 TEST(Mutex, Logging) {
1696 // Allow user to look at logging output
1697 absl::Mutex logged_mutex;
1698 logged_mutex.EnableDebugLog("fido_mutex");
1699 absl::CondVar logged_cv;
1700 logged_cv.EnableDebugLog("rover_cv");
1701 logged_mutex.Lock();
1702 logged_cv.WaitWithTimeout(&logged_mutex, absl::Milliseconds(20));
1703 logged_mutex.Unlock();
1704 logged_mutex.ReaderLock();
1705 logged_mutex.ReaderUnlock();
1706 logged_mutex.Lock();
1707 logged_mutex.Unlock();
1708 logged_cv.Signal();
1709 logged_cv.SignalAll();
1710 }
1711
1712 TEST(Mutex, LoggingAddressReuse) {
1713 // Repeatedly re-create a Mutex with debug logging at the same address.
1714 alignas(absl::Mutex) char storage[sizeof(absl::Mutex)];
1715 auto invariant =
1716 +[](void *alive) { EXPECT_TRUE(*static_cast<bool *>(alive)); };
1717 constexpr size_t kIters = 10;
1718 bool alive[kIters] = {};
1719 for (size_t i = 0; i < kIters; ++i) {
1720 absl::Mutex *mu = new (storage) absl::Mutex;
1721 alive[i] = true;
1722 mu->EnableDebugLog("Mutex");
1723 mu->EnableInvariantDebugging(invariant, &alive[i]);
1724 mu->Lock();
1725 mu->Unlock();
1726 mu->~Mutex();
1727 alive[i] = false;
1728 }
1729 }
1730
1731 TEST(Mutex, LoggingBankrupcy) {
1732 // Test the case with too many live Mutexes with debug logging.
1733 std::vector<absl::Mutex> mus(1 << 20);
1734 for (auto &mu : mus) {
1735 mu.EnableDebugLog("Mutex");
1736 }
1737 }
1738
1739 // --------------------------------------------------------
1740
1741 // Generate the vector of thread counts for tests parameterized on thread count.
1742 static std::vector<int> AllThreadCountValues() {
1743 if (kExtendedTest) {
1744 return {2, 4, 8, 10, 16, 20, 24, 30, 32};
1745 }
1746 return {2, 4, 10};
1747 }
1748
1749 // A test fixture parameterized by thread count.
1750 class MutexVariableThreadCountTest : public ::testing::TestWithParam<int> {};
1751
1752 // Instantiate the above with AllThreadCountOptions().
1753 INSTANTIATE_TEST_SUITE_P(ThreadCounts, MutexVariableThreadCountTest,
1754 ::testing::ValuesIn(AllThreadCountValues()),
1755 ::testing::PrintToStringParamName());
1756
1757 // Reduces iterations by some factor for slow platforms
1758 // (determined empirically).
1759 static int ScaleIterations(int x) {
1760 // ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE is set in the implementation
1761 // of Mutex that uses either std::mutex or pthread_mutex_t. Use
1762 // these as keys to determine the slow implementation.
1763 #if defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
1764 return x / 10;
1765 #else
1766 return x;
1767 #endif
1768 }
1769
1770 TEST_P(MutexVariableThreadCountTest, Mutex) {
1771 int threads = GetParam();
1772 int iterations = ScaleIterations(10000000) / threads;
1773 int operations = threads * iterations;
1774 EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
1775 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1776 iterations = std::min(iterations, 10);
1777 operations = threads * iterations;
1778 EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
1779 operations, CheckSumG0G1),
1780 operations);
1781 #endif
1782 }
1783
1784 TEST_P(MutexVariableThreadCountTest, Try) {
1785 int threads = GetParam();
1786 int iterations = 1000000 / threads;
1787 int operations = iterations * threads;
1788 EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
1789 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1790 iterations = std::min(iterations, 10);
1791 operations = threads * iterations;
1792 EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
1793 operations, CheckSumG0G1),
1794 operations);
1795 #endif
1796 }
1797
1798 TEST_P(MutexVariableThreadCountTest, R20ms) {
1799 int threads = GetParam();
1800 int iterations = 100;
1801 int operations = iterations * threads;
1802 EXPECT_EQ(RunTest(&TestR20ms, threads, iterations, operations), 0);
1803 }
1804
1805 TEST_P(MutexVariableThreadCountTest, RW) {
1806 int threads = GetParam();
1807 int iterations = ScaleIterations(20000000) / threads;
1808 int operations = iterations * threads;
1809 EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
1810 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1811 iterations = std::min(iterations, 10);
1812 operations = threads * iterations;
1813 EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
1814 operations, CheckSumG0G1),
1815 operations / 2);
1816 #endif
1817 }
1818
1819 TEST_P(MutexVariableThreadCountTest, Await) {
1820 int threads = GetParam();
1821 int iterations = ScaleIterations(500000);
1822 int operations = iterations;
1823 EXPECT_EQ(RunTest(&TestAwait, threads, iterations, operations), operations);
1824 }
1825
1826 TEST_P(MutexVariableThreadCountTest, SignalAll) {
1827 int threads = GetParam();
1828 int iterations = 200000 / threads;
1829 int operations = iterations;
1830 EXPECT_EQ(RunTest(&TestSignalAll, threads, iterations, operations),
1831 operations);
1832 }
1833
1834 TEST(Mutex, Signal) {
1835 int threads = 2; // TestSignal must use two threads
1836 int iterations = 200000;
1837 int operations = iterations;
1838 EXPECT_EQ(RunTest(&TestSignal, threads, iterations, operations), operations);
1839 }
1840
1841 TEST(Mutex, Timed) {
1842 int threads = 10; // Use a fixed thread count of 10
1843 int iterations = 1000;
1844 int operations = iterations;
1845 EXPECT_EQ(RunTest(&TestCVTimeout, threads, iterations, operations),
1846 operations);
1847 }
1848
1849 TEST(Mutex, CVTime) {
1850 int threads = 10; // Use a fixed thread count of 10
1851 int iterations = 1;
1852 EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), threads * iterations);
1853 }
1854
1855 TEST(Mutex, MuTime) {
1856 int threads = 10; // Use a fixed thread count of 10
1857 int iterations = 1;
1858 EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
1859 }
1860
1861 TEST(Mutex, SignalExitedThread) {
1862 // The test may expose a race when Mutex::Unlock signals a thread
1863 // that has already exited.
1864 #if defined(__wasm__) || defined(__asmjs__)
1865 constexpr int kThreads = 1; // OOMs under WASM
1866 #else
1867 constexpr int kThreads = 100;
1868 #endif
1869 std::vector<std::thread> top;
1870 for (unsigned i = 0; i < 2 * std::thread::hardware_concurrency(); i++) {
1871 top.emplace_back([&]() {
1872 for (int i = 0; i < kThreads; i++) {
1873 absl::Mutex mu;
1874 std::thread t([&]() {
1875 mu.Lock();
1876 mu.Unlock();
1877 });
1878 mu.Lock();
1879 mu.Unlock();
1880 t.join();
1881 }
1882 });
1883 }
1884 for (auto &th : top) th.join();
1885 }
1886
1887 TEST(Mutex, WriterPriority) {
1888 absl::Mutex mu;
1889 bool wrote = false;
1890 std::atomic<bool> saw_wrote{false};
1891 auto readfunc = [&]() {
1892 for (size_t i = 0; i < 10; ++i) {
1893 absl::ReaderMutexLock lock(&mu);
1894 if (wrote) {
1895 saw_wrote = true;
1896 break;
1897 }
1898 absl::SleepFor(absl::Seconds(1));
1899 }
1900 };
1901 std::thread t1(readfunc);
1902 absl::SleepFor(absl::Milliseconds(500));
1903 std::thread t2(readfunc);
1904 // Note: this test guards against a bug that was related to an uninit
1905 // PerThreadSynch::priority, so the writer intentionally runs on a new thread.
1906 std::thread t3([&]() {
1907 // The writer should be able squeeze between the two alternating readers.
1908 absl::MutexLock lock(&mu);
1909 wrote = true;
1910 });
1911 t1.join();
1912 t2.join();
1913 t3.join();
1914 EXPECT_TRUE(saw_wrote.load());
1915 }
1916
1917 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
1918 TEST(Mutex, CondVarPriority) {
1919 // A regression test for a bug in condition variable wait morphing,
1920 // which resulted in the waiting thread getting priority of the waking thread.
1921 int err = 0;
1922 sched_param param;
1923 param.sched_priority = 7;
1924 std::thread test([&]() {
1925 err = pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m);
1926 });
1927 test.join();
1928 if (err) {
1929 // Setting priority usually requires special privileges.
1930 GTEST_SKIP() << "failed to set priority: " << strerror(err);
1931 }
1932 absl::Mutex mu;
1933 absl::CondVar cv;
1934 bool locked = false;
1935 bool notified = false;
1936 bool waiting = false;
1937 bool morph = false;
1938 std::thread th([&]() {
1939 EXPECT_EQ(0, pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m));
1940 mu.Lock();
1941 locked = true;
1942 mu.Await(absl::Condition(¬ified));
1943 mu.Unlock();
1944 EXPECT_EQ(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
1945 ->per_thread_synch.priority,
1946 param.sched_priority);
1947 mu.Lock();
1948 mu.Await(absl::Condition(&waiting));
1949 morph = true;
1950 absl::SleepFor(absl::Seconds(1));
1951 cv.Signal();
1952 mu.Unlock();
1953 });
1954 mu.Lock();
1955 mu.Await(absl::Condition(&locked));
1956 notified = true;
1957 mu.Unlock();
1958 mu.Lock();
1959 waiting = true;
1960 while (!morph) {
1961 cv.Wait(&mu);
1962 }
1963 mu.Unlock();
1964 th.join();
1965 EXPECT_NE(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
1966 ->per_thread_synch.priority,
1967 param.sched_priority);
1968 }
1969 #endif
1970
1971 TEST(Mutex, LockWhenWithTimeoutResult) {
1972 // Check various corner cases for Await/LockWhen return value
1973 // with always true/always false conditions.
1974 absl::Mutex mu;
1975 const bool kAlwaysTrue = true, kAlwaysFalse = false;
1976 const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
1977 EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
1978 mu.Unlock();
1979 EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
1980 EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
1981 EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
1982 std::thread th1([&]() {
1983 EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
1984 mu.Unlock();
1985 });
1986 std::thread th2([&]() {
1987 EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
1988 mu.Unlock();
1989 });
1990 absl::SleepFor(absl::Milliseconds(100));
1991 mu.Unlock();
1992 th1.join();
1993 th2.join();
1994 }
1995
1996 } // namespace
1997