1 /*
2 This file is part of Valgrind, a dynamic binary instrumentation
3 framework.
4
5 Copyright (C) 2008-2008 Google Inc
6 opensource@google.com
7
8 This program is free software; you can redistribute it and/or
9 modify it under the terms of the GNU General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 02111-1307, USA.
22
23 The GNU General Public License is contained in the file COPYING.
24 */
25
26 /* Author: Konstantin Serebryany <opensource@google.com>
27
28 This file contains a set of unit tests for a data race detection tool.
29
30 These tests can be compiled with pthreads (default) or
31 with any other library that supports threads, locks, cond vars, etc.
32
33 */
34
35 #include <fcntl.h>
36 #include <signal.h>
37 #include <stdlib.h>
38 #include <string.h>
39
40 #include <string>
41 #include <queue>
42 #include <vector>
43
44 #include "old_test_suite.h"
45 #include "test_utils.h"
46
47 #include <gtest/gtest.h>
48 #include "gtest_fixture_injection.h"
49
50 // The tests are
51 // - Stability tests (marked STAB)
52 // - Performance tests (marked PERF)
53 // - Feature tests
54 // - TN (true negative) : no race exists and the tool is silent.
55 // - TP (true positive) : a race exists and reported.
56 // - FN (false negative): a race exists but not reported.
57 // - FP (false positive): no race exists but the tool reports it.
58 //
59 // The feature tests are marked according to the behavior of ThreadSanitizer.
60 //
61 // TP and FP tests are annotated with ANNOTATE_EXPECT_RACE,
62 // so, no error reports should be seen when running under ThreadSanitizer.
63 //
64 // When some of the FP cases are fixed in helgrind we'll need
65 // to update these tests.
66 //
67 // Each test resides in its own namespace.
68 // Namespaces are named test01, test02, ...
69 // Please, *DO NOT* change the logic of existing tests nor rename them.
70 // Create a new test instead.
71 //
72 // Some tests use sleep()/usleep().
73 // This is not a synchronization, but a simple way to trigger
74 // some specific behaviour of the race detector's scheduler.
75
76 // Globals and utilities used by several tests. {{{1
77 static CondVar CV;
78 static int COND = 0;
79
80 // test00: {{{1
81 namespace test00 {
82 int GLOB = 0;
Run()83 void Run() {
84 printf("test00: negative\n");
85 printf("\tGLOB=%d\n", GLOB);
86 }
87 REGISTER_TEST(Run, 00)
88 } // namespace test00
89
90
91 // test01: TP. Simple race (write vs write). {{{1
92 namespace test01 {
93 int GLOB = 0;
94
Worker1()95 void Worker1() {
96 GLOB = 1;
97 }
98
Worker2()99 void Worker2() {
100 GLOB = 2;
101 }
102
Run()103 void Run() {
104 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test01. TP.");
105 ANNOTATE_TRACE_MEMORY(&GLOB);
106 printf("test01: positive\n");
107 MyThreadArray t(Worker1, Worker2);
108 t.Start();
109 t.Join();
110 printf("\tGLOB=%d\n", GLOB);
111 }
112 REGISTER_TEST(Run, 1);
113 } // namespace test01
114
115
116 // test02: TN. Synchronization via CondVar. {{{1
117 namespace test02 {
118 int GLOB = 0;
119 // Two write accesses to GLOB are synchronized because
120 // the pair of CV.Signal() and CV.Wait() establish happens-before relation.
121 //
122 // Waiter: Waker:
123 // 1. COND = 0
124 // 2. Start(Waker)
125 // 3. MU.Lock() a. write(GLOB)
126 // b. MU.Lock()
127 // c. COND = 1
128 // /--- d. CV.Signal()
129 // 4. while(COND) / e. MU.Unlock()
130 // CV.Wait(MU) <---/
131 // 5. MU.Unlock()
132 // 6. write(GLOB)
133 Mutex MU;
134
Waker()135 void Waker() {
136 usleep(200000); // Make sure the waiter blocks.
137 GLOB = 1;
138
139 MU.Lock();
140 COND = 1;
141 CV.Signal();
142 MU.Unlock();
143 }
144
Waiter()145 void Waiter() {
146 ThreadPool pool(1);
147 pool.StartWorkers();
148 COND = 0;
149 pool.Add(NewCallback(Waker));
150 MU.Lock();
151 while(COND != 1)
152 CV.Wait(&MU);
153 MU.Unlock();
154 GLOB = 2;
155 }
Run()156 void Run() {
157 printf("test02: negative\n");
158 Waiter();
159 printf("\tGLOB=%d\n", GLOB);
160 }
161 REGISTER_TEST(Run, 2);
162 } // namespace test02
163
164
165 // test03: TN. Synchronization via LockWhen, signaller gets there first. {{{1
166 namespace test03 {
167 int GLOB = 0;
168 // Two write accesses to GLOB are synchronized via conditional critical section.
169 // Note that LockWhen() happens first (we use sleep(1) to make sure)!
170 //
171 // Waiter: Waker:
172 // 1. COND = 0
173 // 2. Start(Waker)
174 // a. write(GLOB)
175 // b. MU.Lock()
176 // c. COND = 1
177 // /--- d. MU.Unlock()
178 // 3. MU.LockWhen(COND==1) <---/
179 // 4. MU.Unlock()
180 // 5. write(GLOB)
181 Mutex MU;
182
Waker()183 void Waker() {
184 usleep(100000); // Make sure the waiter blocks.
185 GLOB = 1;
186
187 MU.Lock();
188 COND = 1; // We are done! Tell the Waiter.
189 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
190 }
Waiter()191 void Waiter() {
192 ThreadPool pool(1);
193 pool.StartWorkers();
194 COND = 0;
195 pool.Add(NewCallback(Waker));
196 MU.LockWhen(Condition(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT
197 MU.Unlock(); // Waker is done!
198
199 GLOB = 2;
200 }
Run()201 void Run() {
202 printf("test03: negative\n");
203 Waiter();
204 printf("\tGLOB=%d\n", GLOB);
205 }
206 REGISTER_TEST2(Run, 3, FEATURE|NEEDS_ANNOTATIONS);
207 } // namespace test03
208
209 // test04: TN. Synchronization via PCQ. {{{1
210 namespace test04 {
211 int GLOB = 0;
212 ProducerConsumerQueue Q(INT_MAX);
213 // Two write accesses to GLOB are separated by PCQ Put/Get.
214 //
215 // Putter: Getter:
216 // 1. write(GLOB)
217 // 2. Q.Put() ---------\ .
218 // \-------> a. Q.Get()
219 // b. write(GLOB)
220
221
Putter()222 void Putter() {
223 GLOB = 1;
224 Q.Put(NULL);
225 }
226
Getter()227 void Getter() {
228 Q.Get();
229 GLOB = 2;
230 }
231
Run()232 void Run() {
233 printf("test04: negative\n");
234 MyThreadArray t(Putter, Getter);
235 t.Start();
236 t.Join();
237 printf("\tGLOB=%d\n", GLOB);
238 }
239 REGISTER_TEST(Run, 4);
240 } // namespace test04
241
242
243 // test05: FP. Synchronization via CondVar, but waiter does not block. {{{1
244 // Since CondVar::Wait() is not called, we get a false positive.
245 namespace test05 {
246 int GLOB = 0;
247 // Two write accesses to GLOB are synchronized via CondVar.
248 // But race detector can not see it.
249 // See this for details:
250 // http://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use.
251 //
252 // Waiter: Waker:
253 // 1. COND = 0
254 // 2. Start(Waker)
255 // 3. MU.Lock() a. write(GLOB)
256 // b. MU.Lock()
257 // c. COND = 1
258 // d. CV.Signal()
259 // 4. while(COND) e. MU.Unlock()
260 // CV.Wait(MU) <<< not called
261 // 5. MU.Unlock()
262 // 6. write(GLOB)
263 Mutex MU;
264
Waker()265 void Waker() {
266 GLOB = 1;
267 MU.Lock();
268 COND = 1;
269 CV.Signal();
270 MU.Unlock();
271 }
272
Waiter()273 void Waiter() {
274 usleep(100000); // Make sure the signaller gets first.
275 MU.Lock();
276 while(COND != 1)
277 CV.Wait(&MU);
278 MU.Unlock();
279 GLOB = 2;
280 }
Run()281 void Run() {
282 printf("test05: unavoidable false positive\n");
283 COND = 0;
284 if (!Tsan_PureHappensBefore())
285 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test05. FP. Unavoidable in hybrid scheme.");
286 MyThreadArray t(Waker, Waiter);
287 t.Start();
288 t.Join();
289 printf("\tGLOB=%d\n", GLOB);
290 }
291 REGISTER_TEST(Run, 5);
292 } // namespace test05
293
294
295 // test06: TN. Synchronization via CondVar, but Waker gets there first. {{{1
296 namespace test06 {
297 int GLOB = 0;
298 // Same as test05 but we annotated the Wait() loop.
299 //
300 // Waiter: Waker:
301 // 1. COND = 0
302 // 2. Start(Waker)
303 // 3. MU.Lock() a. write(GLOB)
304 // b. MU.Lock()
305 // c. COND = 1
306 // /------- d. CV.Signal()
307 // 4. while(COND) / e. MU.Unlock()
308 // CV.Wait(MU) <<< not called /
309 // 6. ANNOTATE_CONDVAR_WAIT(CV, MU) <----/
310 // 5. MU.Unlock()
311 // 6. write(GLOB)
312
313 Mutex MU;
314
Waker()315 void Waker() {
316 GLOB = 1;
317 MU.Lock();
318 COND = 1;
319 CV.Signal();
320 MU.Unlock();
321 }
322
Waiter()323 void Waiter() {
324 ThreadPool pool(1);
325 pool.StartWorkers();
326 COND = 0;
327 pool.Add(NewCallback(Waker));
328 usleep(500000); // Make sure the signaller gets first.
329 MU.Lock();
330 while(COND != 1)
331 CV.Wait(&MU);
332 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
333
334 MU.Unlock();
335 GLOB = 2;
336 }
Run()337 void Run() {
338 printf("test06: negative\n");
339 Waiter();
340 printf("\tGLOB=%d\n", GLOB);
341 }
342 REGISTER_TEST2(Run, 6, FEATURE|NEEDS_ANNOTATIONS);
343 } // namespace test06
344
345
346 // test07: TN. Synchronization via LockWhen(), Signaller is observed first. {{{1
347 namespace test07 {
348 int GLOB = 0;
349 bool COND = 0;
350 // Two write accesses to GLOB are synchronized via conditional critical section.
351 // LockWhen() is observed after COND has been set (due to sleep).
352 // Unlock() calls ANNOTATE_CONDVAR_SIGNAL().
353 //
354 // Waiter: Signaller:
355 // 1. COND = 0
356 // 2. Start(Signaller)
357 // a. write(GLOB)
358 // b. MU.Lock()
359 // c. COND = 1
360 // /--- d. MU.Unlock calls ANNOTATE_CONDVAR_SIGNAL
361 // 3. MU.LockWhen(COND==1) <---/
362 // 4. MU.Unlock()
363 // 5. write(GLOB)
364
365 Mutex MU;
Signaller()366 void Signaller() {
367 GLOB = 1;
368 MU.Lock();
369 COND = true; // We are done! Tell the Waiter.
370 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
371 }
Waiter()372 void Waiter() {
373 COND = false;
374 MyThread t(Signaller);
375 t.Start();
376 usleep(100000); // Make sure the signaller gets there first.
377
378 MU.LockWhen(Condition(&ArgIsTrue, &COND)); // calls ANNOTATE_CONDVAR_WAIT
379 MU.Unlock(); // Signaller is done!
380
381 GLOB = 2; // If LockWhen didn't catch the signal, a race may be reported here.
382 t.Join();
383 }
Run()384 void Run() {
385 printf("test07: negative\n");
386 Waiter();
387 printf("\tGLOB=%d\n", GLOB);
388 }
389 REGISTER_TEST2(Run, 7, FEATURE|NEEDS_ANNOTATIONS);
390 } // namespace test07
391
392 // test08: TN. Synchronization via thread start/join. {{{1
393 namespace test08 {
394 int GLOB = 0;
395 // Three accesses to GLOB are separated by thread start/join.
396 //
397 // Parent: Worker:
398 // 1. write(GLOB)
399 // 2. Start(Worker) ------------>
400 // a. write(GLOB)
401 // 3. Join(Worker) <------------
402 // 4. write(GLOB)
Worker()403 void Worker() {
404 GLOB = 2;
405 }
406
Parent()407 void Parent() {
408 MyThread t(Worker);
409 GLOB = 1;
410 t.Start();
411 t.Join();
412 GLOB = 3;
413 }
Run()414 void Run() {
415 printf("test08: negative\n");
416 Parent();
417 printf("\tGLOB=%d\n", GLOB);
418 }
419 REGISTER_TEST(Run, 8);
420 } // namespace test08
421
422
423 // test09: TP. Simple race (read vs write). {{{1
424 namespace test09 {
425 int GLOB = 0;
426 // A simple data race between writer and reader.
427 // Write happens after read (enforced by sleep).
428 // Usually, easily detectable by a race detector.
Writer()429 void Writer() {
430 usleep(100000);
431 GLOB = 3;
432 }
Reader()433 void Reader() {
434 CHECK(GLOB != -777);
435 }
436
Run()437 void Run() {
438 ANNOTATE_TRACE_MEMORY(&GLOB);
439 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test09. TP.");
440 printf("test09: positive\n");
441 MyThreadArray t(Writer, Reader);
442 t.Start();
443 t.Join();
444 printf("\tGLOB=%d\n", GLOB);
445 }
446 REGISTER_TEST(Run, 9);
447 } // namespace test09
448
449
450 // test10: FN. Simple race (write vs read). {{{1
451 namespace test10 {
452 int GLOB = 0;
453 // A simple data race between writer and reader.
454 // Write happens before Read (enforced by sleep),
455 // otherwise this test is the same as test09.
456 //
457 // Writer: Reader:
458 // 1. write(GLOB) a. sleep(long enough so that GLOB
459 // is most likely initialized by Writer)
460 // b. read(GLOB)
461 //
462 //
463 // Eraser algorithm does not detect the race here,
464 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
465 //
Writer()466 void Writer() {
467 GLOB = 3;
468 }
Reader()469 void Reader() {
470 usleep(100000);
471 CHECK(GLOB != -777);
472 }
473
Run()474 void Run() {
475 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test10. TP. FN in MSMHelgrind.");
476 printf("test10: positive\n");
477 MyThreadArray t(Writer, Reader);
478 t.Start();
479 t.Join();
480 printf("\tGLOB=%d\n", GLOB);
481 }
482 REGISTER_TEST(Run, 10);
483 } // namespace test10
484
485
486 // test12: FP. Synchronization via Mutex, then via PCQ. {{{1
487 namespace test12 {
488 int GLOB = 0;
489 // This test is properly synchronized, but currently (Dec 2007)
490 // helgrind reports a false positive.
491 //
492 // First, we write to GLOB under MU, then we synchronize via PCQ,
493 // which is essentially a semaphore.
494 //
495 // Putter: Getter:
496 // 1. MU.Lock() a. MU.Lock()
497 // 2. write(GLOB) <---- MU ----> b. write(GLOB)
498 // 3. MU.Unlock() c. MU.Unlock()
499 // 4. Q.Put() ---------------> d. Q.Get()
500 // e. write(GLOB)
501
502 ProducerConsumerQueue Q(INT_MAX);
503 Mutex MU;
504
Putter()505 void Putter() {
506 MU.Lock();
507 GLOB++;
508 MU.Unlock();
509
510 Q.Put(NULL);
511 }
512
Getter()513 void Getter() {
514 MU.Lock();
515 GLOB++;
516 MU.Unlock();
517
518 Q.Get();
519 GLOB++;
520 }
521
Run()522 void Run() {
523 // ANNOTATE_EXPECT_RACE(&GLOB, "test12. FP. Fixed by MSMProp1.");
524 printf("test12: negative\n");
525 MyThreadArray t(Putter, Getter);
526 t.Start();
527 t.Join();
528 printf("\tGLOB=%d\n", GLOB);
529 }
530 REGISTER_TEST(Run, 12);
531 } // namespace test12
532
533
534 // test13: FP. Synchronization via Mutex, then via LockWhen. {{{1
535 namespace test13 {
536 int GLOB = 0;
537 // This test is essentially the same as test12, but uses LockWhen
538 // instead of PCQ.
539 //
540 // Waker: Waiter:
541 // 1. MU.Lock() a. MU.Lock()
542 // 2. write(GLOB) <---------- MU ----------> b. write(GLOB)
543 // 3. MU.Unlock() c. MU.Unlock()
544 // 4. MU.Lock() .
545 // 5. COND = 1 .
546 // 6. ANNOTATE_CONDVAR_SIGNAL -------\ .
547 // 7. MU.Unlock() \ .
548 // \----> d. MU.LockWhen(COND == 1)
549 // e. MU.Unlock()
550 // f. write(GLOB)
551 Mutex MU;
552
Waker()553 void Waker() {
554 MU.Lock();
555 GLOB++;
556 MU.Unlock();
557
558 MU.Lock();
559 COND = 1;
560 ANNOTATE_CONDVAR_SIGNAL(&MU);
561 MU.Unlock();
562 }
563
Waiter()564 void Waiter() {
565 MU.Lock();
566 GLOB++;
567 MU.Unlock();
568
569 MU.LockWhen(Condition(&ArgIsOne, &COND));
570 MU.Unlock();
571 GLOB++;
572 }
573
Run()574 void Run() {
575 // ANNOTATE_EXPECT_RACE(&GLOB, "test13. FP. Fixed by MSMProp1.");
576 printf("test13: negative\n");
577 COND = 0;
578
579 MyThreadArray t(Waker, Waiter);
580 t.Start();
581 t.Join();
582
583 printf("\tGLOB=%d\n", GLOB);
584 }
585 REGISTER_TEST2(Run, 13, FEATURE|NEEDS_ANNOTATIONS);
586 } // namespace test13
587
588
589 // test14: FP. Synchronization via PCQ, reads, 2 workers. {{{1
590 namespace test14 {
591 int GLOB = 0;
592 // This test is properly synchronized, but currently (Dec 2007)
593 // helgrind reports a false positive.
594 //
595 // This test is similar to test11, but uses PCQ (semaphore).
596 //
597 // Putter2: Putter1: Getter:
598 // 1. read(GLOB) a. read(GLOB)
599 // 2. Q2.Put() ----\ b. Q1.Put() -----\ .
600 // \ \--------> A. Q1.Get()
601 // \----------------------------------> B. Q2.Get()
602 // C. write(GLOB)
603 ProducerConsumerQueue Q1(INT_MAX), Q2(INT_MAX);
604
Putter1()605 void Putter1() {
606 CHECK(GLOB != 777);
607 Q1.Put(NULL);
608 }
Putter2()609 void Putter2() {
610 CHECK(GLOB != 777);
611 Q2.Put(NULL);
612 }
Getter()613 void Getter() {
614 Q1.Get();
615 Q2.Get();
616 GLOB++;
617 }
Run()618 void Run() {
619 // ANNOTATE_EXPECT_RACE(&GLOB, "test14. FP. Fixed by MSMProp1.");
620 printf("test14: negative\n");
621 MyThreadArray t(Getter, Putter1, Putter2);
622 t.Start();
623 t.Join();
624 printf("\tGLOB=%d\n", GLOB);
625 }
626 REGISTER_TEST(Run, 14);
627 } // namespace test14
628
629
630 // test15: TN. Synchronization via LockWhen. One waker and 2 waiters. {{{1
631 namespace test15 {
632 // Waker: Waiter1, Waiter2:
633 // 1. write(GLOB)
634 // 2. MU.Lock()
635 // 3. COND = 1
636 // 4. ANNOTATE_CONDVAR_SIGNAL ------------> a. MU.LockWhen(COND == 1)
637 // 5. MU.Unlock() b. MU.Unlock()
638 // c. read(GLOB)
639
640 int GLOB = 0;
641 Mutex MU;
642
Waker()643 void Waker() {
644 GLOB = 2;
645
646 MU.Lock();
647 COND = 1;
648 ANNOTATE_CONDVAR_SIGNAL(&MU);
649 MU.Unlock();
650 };
651
Waiter()652 void Waiter() {
653 MU.LockWhen(Condition(&ArgIsOne, &COND));
654 MU.Unlock();
655 CHECK(GLOB != 777);
656 }
657
658
Run()659 void Run() {
660 COND = 0;
661 printf("test15: negative\n");
662 MyThreadArray t(Waker, Waiter, Waiter);
663 t.Start();
664 t.Join();
665 printf("\tGLOB=%d\n", GLOB);
666 }
667 REGISTER_TEST(Run, 15);
668 } // namespace test15
669
670
671 // test16: FP. Barrier (emulated by CV), 2 threads. {{{1
672 namespace test16 {
673 // Worker1: Worker2:
674 // 1. MU.Lock() a. MU.Lock()
675 // 2. write(GLOB) <------------ MU ----------> b. write(GLOB)
676 // 3. MU.Unlock() c. MU.Unlock()
677 // 4. MU2.Lock() d. MU2.Lock()
678 // 5. COND-- e. COND--
679 // 6. ANNOTATE_CONDVAR_SIGNAL(MU2) ---->V .
680 // 7. MU2.Await(COND == 0) <------------+------ f. ANNOTATE_CONDVAR_SIGNAL(MU2)
681 // 8. MU2.Unlock() V-----> g. MU2.Await(COND == 0)
682 // 9. read(GLOB) h. MU2.Unlock()
683 // i. read(GLOB)
684 //
685 //
686 // TODO: This way we may create too many edges in happens-before graph.
687 // Arndt Mühlenfeld in his PhD (TODO: link) suggests creating special nodes in
688 // happens-before graph to reduce the total number of edges.
689 // See figure 3.14.
690 //
691 //
692 int GLOB = 0;
693 Mutex MU;
694 Mutex MU2;
695
Worker()696 void Worker() {
697 MU.Lock();
698 GLOB++;
699 MU.Unlock();
700
701 MU2.Lock();
702 COND--;
703 ANNOTATE_CONDVAR_SIGNAL(&MU2);
704 MU2.Await(Condition(&ArgIsZero, &COND));
705 MU2.Unlock();
706
707 CHECK(GLOB == 2);
708 }
709
Run()710 void Run() {
711 // ANNOTATE_EXPECT_RACE(&GLOB, "test16. FP. Fixed by MSMProp1 + Barrier support.");
712 COND = 2;
713 printf("test16: negative\n");
714 MyThreadArray t(Worker, Worker);
715 t.Start();
716 t.Join();
717 printf("\tGLOB=%d\n", GLOB);
718 }
719 REGISTER_TEST2(Run, 16, FEATURE|NEEDS_ANNOTATIONS);
720 } // namespace test16
721
722
723 // test17: FP. Barrier (emulated by CV), 3 threads. {{{1
724 namespace test17 {
725 // Same as test16, but with 3 threads.
726 int GLOB = 0;
727 Mutex MU;
728 Mutex MU2;
729
Worker()730 void Worker() {
731 MU.Lock();
732 GLOB++;
733 MU.Unlock();
734
735 MU2.Lock();
736 COND--;
737 ANNOTATE_CONDVAR_SIGNAL(&MU2);
738 MU2.Await(Condition(&ArgIsZero, &COND));
739 MU2.Unlock();
740
741 CHECK(GLOB == 3);
742 }
743
Run()744 void Run() {
745 COND = 3;
746 printf("test17: negative\n");
747 MyThreadArray t(Worker, Worker, Worker);
748 t.Start();
749 t.Join();
750 printf("\tGLOB=%d\n", GLOB);
751 }
752 REGISTER_TEST2(Run, 17, FEATURE|NEEDS_ANNOTATIONS);
753 } // namespace test17
754
755
756 // test18: TN. Synchronization via Await(), signaller gets there first. {{{1
757 namespace test18 {
758 int GLOB = 0;
759 Mutex MU;
760 // Same as test03, but uses Mutex::Await() instead of Mutex::LockWhen().
761
Waker()762 void Waker() {
763 usleep(100000); // Make sure the waiter blocks.
764 GLOB = 1;
765
766 MU.Lock();
767 COND = 1; // We are done! Tell the Waiter.
768 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
769 }
Waiter()770 void Waiter() {
771 ThreadPool pool(1);
772 pool.StartWorkers();
773 COND = 0;
774 pool.Add(NewCallback(Waker));
775
776 MU.Lock();
777 MU.Await(Condition(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT
778 MU.Unlock(); // Waker is done!
779
780 GLOB = 2;
781 }
Run()782 void Run() {
783 printf("test18: negative\n");
784 Waiter();
785 printf("\tGLOB=%d\n", GLOB);
786 }
787 REGISTER_TEST2(Run, 18, FEATURE|NEEDS_ANNOTATIONS);
788 } // namespace test18
789
790 // test19: TN. Synchronization via AwaitWithTimeout(). {{{1
791 namespace test19 {
792 int GLOB = 0;
793 // Same as test18, but with AwaitWithTimeout. Do not timeout.
794 Mutex MU;
Waker()795 void Waker() {
796 usleep(100000); // Make sure the waiter blocks.
797 GLOB = 1;
798
799 MU.Lock();
800 COND = 1; // We are done! Tell the Waiter.
801 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
802 }
Waiter()803 void Waiter() {
804 ThreadPool pool(1);
805 pool.StartWorkers();
806 COND = 0;
807 pool.Add(NewCallback(Waker));
808
809 MU.Lock();
810 CHECK(MU.AwaitWithTimeout(Condition(&ArgIsOne, &COND), INT_MAX));
811 MU.Unlock();
812
813 GLOB = 2;
814 }
Run()815 void Run() {
816 printf("test19: negative\n");
817 Waiter();
818 printf("\tGLOB=%d\n", GLOB);
819 }
820 REGISTER_TEST2(Run, 19, FEATURE|NEEDS_ANNOTATIONS);
821 } // namespace test19
822
823 // test20: TP. Incorrect synchronization via AwaitWhen(), timeout. {{{1
824 namespace test20 {
825 int GLOB = 0;
826 Mutex MU;
827 // True race. We timeout in AwaitWhen.
Waker()828 void Waker() {
829 GLOB = 1;
830 usleep(100 * 1000);
831 }
Waiter()832 void Waiter() {
833 MU.Lock();
834 CHECK(!MU.AwaitWithTimeout(Condition(&ArgIsOne, &COND), 100));
835 MU.Unlock();
836
837 GLOB = 2;
838 }
Run()839 void Run() {
840 printf("test20: positive\n");
841 COND = 0;
842 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test20. TP.");
843 MyThreadArray t(Waker, Waiter);
844 t.Start();
845 t.Join();
846 printf("\tGLOB=%d\n", GLOB);
847 }
848 REGISTER_TEST2(Run, 20, FEATURE|NEEDS_ANNOTATIONS);
849 } // namespace test20
850
851 // test21: TP. Incorrect synchronization via LockWhenWithTimeout(). {{{1
852 namespace test21 {
853 int GLOB = 0;
854 // True race. We timeout in LockWhenWithTimeout().
855 Mutex MU;
Waker()856 void Waker() {
857 GLOB = 1;
858 usleep(100 * 1000);
859 }
Waiter()860 void Waiter() {
861 CHECK(!MU.LockWhenWithTimeout(Condition(&ArgIsOne, &COND), 100));
862 MU.Unlock();
863
864 GLOB = 2;
865 }
Run()866 void Run() {
867 printf("test21: positive\n");
868 COND = 0;
869 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test21. TP.");
870 MyThreadArray t(Waker, Waiter);
871 t.Start();
872 t.Join();
873 printf("\tGLOB=%d\n", GLOB);
874 }
875 REGISTER_TEST2(Run, 21, FEATURE|NEEDS_ANNOTATIONS);
876 } // namespace test21
877
878 // test22: TP. Incorrect synchronization via CondVar::WaitWithTimeout(). {{{1
879 namespace test22 {
880 int GLOB = 0;
881 Mutex MU;
882 // True race. We timeout in CondVar::WaitWithTimeout().
Waker()883 void Waker() {
884 GLOB = 1;
885 usleep(100 * 1000);
886 }
Waiter()887 void Waiter() {
888 int ms_left_to_wait = 100;
889 int deadline_ms = GetTimeInMs() + ms_left_to_wait;
890 MU.Lock();
891 while(COND != 1 && ms_left_to_wait > 0) {
892 CV.WaitWithTimeout(&MU, ms_left_to_wait);
893 ms_left_to_wait = deadline_ms - GetTimeInMs();
894 }
895 MU.Unlock();
896
897 GLOB = 2;
898 }
Run()899 void Run() {
900 printf("test22: positive\n");
901 COND = 0;
902 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test22. TP.");
903 MyThreadArray t(Waker, Waiter);
904 t.Start();
905 t.Join();
906 printf("\tGLOB=%d\n", GLOB);
907 }
908 REGISTER_TEST(Run, 22);
909 } // namespace test22
910
911 // test23: TN. TryLock, ReaderLock, ReaderTryLock. {{{1
912 namespace test23 {
913 // Correct synchronization with TryLock, Lock, ReaderTryLock, ReaderLock.
914 int GLOB = 0;
915 Mutex MU;
Worker_TryLock()916 void Worker_TryLock() {
917 for (int i = 0; i < 20; i++) {
918 while (true) {
919 if (MU.TryLock()) {
920 GLOB++;
921 MU.Unlock();
922 break;
923 }
924 usleep(1000);
925 }
926 }
927 }
928
Worker_ReaderTryLock()929 void Worker_ReaderTryLock() {
930 for (int i = 0; i < 20; i++) {
931 while (true) {
932 if (MU.ReaderTryLock()) {
933 CHECK(GLOB != 777);
934 MU.ReaderUnlock();
935 break;
936 }
937 usleep(1000);
938 }
939 }
940 }
941
Worker_ReaderLock()942 void Worker_ReaderLock() {
943 for (int i = 0; i < 20; i++) {
944 MU.ReaderLock();
945 CHECK(GLOB != 777);
946 MU.ReaderUnlock();
947 usleep(1000);
948 }
949 }
950
Worker_Lock()951 void Worker_Lock() {
952 for (int i = 0; i < 20; i++) {
953 MU.Lock();
954 GLOB++;
955 MU.Unlock();
956 usleep(1000);
957 }
958 }
959
Run()960 void Run() {
961 printf("test23: negative\n");
962 MyThreadArray t(Worker_TryLock,
963 Worker_ReaderTryLock,
964 Worker_ReaderLock,
965 Worker_Lock
966 );
967 t.Start();
968 t.Join();
969 printf("\tGLOB=%d\n", GLOB);
970 }
971 REGISTER_TEST(Run, 23);
972 } // namespace test23
973
974 // test24: TN. Synchronization via ReaderLockWhen(). {{{1
975 namespace test24 {
976 int GLOB = 0;
977 Mutex MU;
978 // Same as test03, but uses ReaderLockWhen().
979
Waker()980 void Waker() {
981 usleep(100000); // Make sure the waiter blocks.
982 GLOB = 1;
983
984 MU.Lock();
985 COND = 1; // We are done! Tell the Waiter.
986 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
987 }
Waiter()988 void Waiter() {
989 ThreadPool pool(1);
990 pool.StartWorkers();
991 COND = 0;
992 pool.Add(NewCallback(Waker));
993 MU.ReaderLockWhen(Condition(&ArgIsOne, &COND));
994 MU.ReaderUnlock();
995
996 GLOB = 2;
997 }
Run()998 void Run() {
999 printf("test24: negative\n");
1000 Waiter();
1001 printf("\tGLOB=%d\n", GLOB);
1002 }
1003 REGISTER_TEST2(Run, 24, FEATURE|NEEDS_ANNOTATIONS);
1004 } // namespace test24
1005
1006 // test25: TN. Synchronization via ReaderLockWhenWithTimeout(). {{{1
1007 namespace test25 {
1008 int GLOB = 0;
1009 Mutex MU;
1010 // Same as test24, but uses ReaderLockWhenWithTimeout().
1011 // We do not timeout.
1012
Waker()1013 void Waker() {
1014 usleep(100000); // Make sure the waiter blocks.
1015 GLOB = 1;
1016
1017 MU.Lock();
1018 COND = 1; // We are done! Tell the Waiter.
1019 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
1020 }
Waiter()1021 void Waiter() {
1022 ThreadPool pool(1);
1023 pool.StartWorkers();
1024 COND = 0;
1025 pool.Add(NewCallback(Waker));
1026 CHECK(MU.ReaderLockWhenWithTimeout(Condition(&ArgIsOne, &COND), INT_MAX));
1027 MU.ReaderUnlock();
1028
1029 GLOB = 2;
1030 }
Run()1031 void Run() {
1032 printf("test25: negative\n");
1033 Waiter();
1034 printf("\tGLOB=%d\n", GLOB);
1035 }
1036 REGISTER_TEST2(Run, 25, FEATURE|NEEDS_ANNOTATIONS);
1037 } // namespace test25
1038
1039 // test26: TP. Incorrect synchronization via ReaderLockWhenWithTimeout(). {{{1
1040 namespace test26 {
1041 int GLOB = 0;
1042 Mutex MU;
1043 // Same as test25, but we timeout and incorrectly assume happens-before.
1044
Waker()1045 void Waker() {
1046 GLOB = 1;
1047 usleep(10000);
1048 }
Waiter()1049 void Waiter() {
1050 CHECK(!MU.ReaderLockWhenWithTimeout(Condition(&ArgIsOne, &COND), 100));
1051 MU.ReaderUnlock();
1052
1053 GLOB = 2;
1054 }
Run()1055 void Run() {
1056 printf("test26: positive\n");
1057 COND = 0;
1058 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test26. TP");
1059 MyThreadArray t(Waker, Waiter);
1060 t.Start();
1061 t.Join();
1062 printf("\tGLOB=%d\n", GLOB);
1063 }
1064 REGISTER_TEST2(Run, 26, FEATURE|NEEDS_ANNOTATIONS);
1065 } // namespace test26
1066
1067
1068 // test27: TN. Simple synchronization via SpinLock. {{{1
1069 namespace test27 {
1070 #ifndef NO_SPINLOCK
1071 int GLOB = 0;
1072 SpinLock MU;
Worker()1073 void Worker() {
1074 MU.Lock();
1075 GLOB++;
1076 MU.Unlock();
1077 usleep(10000);
1078 }
1079
Run()1080 void Run() {
1081 printf("test27: negative\n");
1082 MyThreadArray t(Worker, Worker, Worker, Worker);
1083 t.Start();
1084 t.Join();
1085 printf("\tGLOB=%d\n", GLOB);
1086 }
1087 REGISTER_TEST2(Run, 27, FEATURE|NEEDS_ANNOTATIONS);
1088 #endif
1089 } // namespace test27
1090
1091
1092 // test28: TN. Synchronization via Mutex, then PCQ. 3 threads {{{1
1093 namespace test28 {
1094 // Putter1: Getter: Putter2:
1095 // 1. MU.Lock() A. MU.Lock()
1096 // 2. write(GLOB) B. write(GLOB)
1097 // 3. MU.Unlock() C. MU.Unlock()
1098 // 4. Q.Put() ---------\ /------- D. Q.Put()
1099 // 5. MU.Lock() \-------> a. Q.Get() / E. MU.Lock()
1100 // 6. read(GLOB) b. Q.Get() <---------/ F. read(GLOB)
1101 // 7. MU.Unlock() (sleep) G. MU.Unlock()
1102 // c. read(GLOB)
1103 ProducerConsumerQueue Q(INT_MAX);
1104 int GLOB = 0;
1105 Mutex MU;
1106
Putter()1107 void Putter() {
1108 MU.Lock();
1109 GLOB++;
1110 MU.Unlock();
1111
1112 Q.Put(NULL);
1113
1114 MU.Lock();
1115 CHECK(GLOB != 777);
1116 MU.Unlock();
1117 }
1118
Getter()1119 void Getter() {
1120 Q.Get();
1121 Q.Get();
1122 usleep(100000);
1123 CHECK(GLOB == 2);
1124 }
1125
Run()1126 void Run() {
1127 printf("test28: negative\n");
1128 MyThreadArray t(Getter, Putter, Putter);
1129 t.Start();
1130 t.Join();
1131 printf("\tGLOB=%d\n", GLOB);
1132 }
1133 REGISTER_TEST(Run, 28);
1134 } // namespace test28
1135
1136
1137 // test29: TN. Synchronization via Mutex, then PCQ. 4 threads. {{{1
1138 namespace test29 {
1139 // Similar to test28, but has two Getters and two PCQs.
1140 ProducerConsumerQueue *Q1, *Q2;
1141 Mutex MU;
1142 int GLOB = 0;
1143
Putter(ProducerConsumerQueue * q)1144 void Putter(ProducerConsumerQueue *q) {
1145 MU.Lock();
1146 GLOB++;
1147 MU.Unlock();
1148
1149 q->Put(NULL);
1150 q->Put(NULL);
1151
1152 MU.Lock();
1153 CHECK(GLOB != 777);
1154 MU.Unlock();
1155
1156 }
1157
Putter1()1158 void Putter1() { Putter(Q1); }
Putter2()1159 void Putter2() { Putter(Q2); }
1160
Getter()1161 void Getter() {
1162 Q1->Get();
1163 Q2->Get();
1164 usleep(100000);
1165 CHECK(GLOB == 2);
1166 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1167 }
1168
Run()1169 void Run() {
1170 printf("test29: negative\n");
1171 Q1 = new ProducerConsumerQueue(INT_MAX);
1172 Q2 = new ProducerConsumerQueue(INT_MAX);
1173 MyThreadArray t(Getter, Getter, Putter1, Putter2);
1174 t.Start();
1175 t.Join();
1176 printf("\tGLOB=%d\n", GLOB);
1177 delete Q1;
1178 delete Q2;
1179 }
1180 REGISTER_TEST(Run, 29);
1181 } // namespace test29
1182
1183
1184 // test30: TN. Synchronization via 'safe' race. Writer vs multiple Readers. {{{1
1185 namespace test30 {
1186 // This test shows a very risky kind of synchronization which is very easy
1187 // to get wrong. Actually, I am not sure I've got it right.
1188 //
1189 // Writer: Reader1, Reader2, ..., ReaderN:
1190 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1191 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1192 // 3. BOUNDARY++; c. read(GLOB[i]: i < n)
1193 //
1194 // Here we have a 'safe' race on accesses to BOUNDARY and
1195 // no actual races on accesses to GLOB[]:
1196 // Writer writes to GLOB[i] where i>=BOUNDARY and then increments BOUNDARY.
1197 // Readers read BOUNDARY and read GLOB[i] where i<BOUNDARY.
1198 //
1199 // I am not completely sure that this scheme guaranties no race between
1200 // accesses to GLOB since compilers and CPUs
1201 // are free to rearrange memory operations.
1202 // I am actually sure that this scheme is wrong unless we use
1203 // some smart memory fencing...
1204
1205
1206 const int N = 48;
1207 static int GLOB[N];
1208 volatile int BOUNDARY = 0;
1209
Writer()1210 void Writer() {
1211 for (int i = 0; i < N; i++) {
1212 CHECK(BOUNDARY == i);
1213 for (int j = i; j < N; j++) {
1214 GLOB[j] = j;
1215 }
1216 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1));
1217 BOUNDARY++;
1218 usleep(1000);
1219 }
1220 }
1221
Reader()1222 void Reader() {
1223 int n;
1224 do {
1225 n = BOUNDARY;
1226 if (n == 0) continue;
1227 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n));
1228 for (int i = 0; i < n; i++) {
1229 CHECK(GLOB[i] == i);
1230 }
1231 usleep(100);
1232 } while(n < N);
1233 }
1234
Run()1235 void Run() {
1236 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test30. Sync via 'safe' race.");
1237 printf("test30: negative\n");
1238 MyThreadArray t(Writer, Reader, Reader, Reader);
1239 t.Start();
1240 t.Join();
1241 printf("\tGLOB=%d\n", GLOB[N-1]);
1242 }
1243 REGISTER_TEST2(Run, 30, FEATURE|NEEDS_ANNOTATIONS);
1244 } // namespace test30
1245
1246
1247 // test31: TN. Synchronization via 'safe' race. Writer vs Writer. {{{1
1248 namespace test31 {
1249 // This test is similar to test30, but
1250 // it has one Writer instead of mulitple Readers.
1251 //
1252 // Writer1: Writer2
1253 // 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY
1254 // 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n)
1255 // 3. BOUNDARY++; c. write(GLOB[i]: i < n)
1256 //
1257
1258 const int N = 48;
1259 static int GLOB[N];
1260 volatile int BOUNDARY = 0;
1261
Writer1()1262 void Writer1() {
1263 for (int i = 0; i < N; i++) {
1264 CHECK(BOUNDARY == i);
1265 for (int j = i; j < N; j++) {
1266 GLOB[j] = j;
1267 }
1268 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1));
1269 BOUNDARY++;
1270 usleep(1000);
1271 }
1272 }
1273
Writer2()1274 void Writer2() {
1275 int n;
1276 do {
1277 n = BOUNDARY;
1278 if (n == 0) continue;
1279 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n));
1280 for (int i = 0; i < n; i++) {
1281 if(GLOB[i] == i) {
1282 GLOB[i]++;
1283 }
1284 }
1285 usleep(100);
1286 } while(n < N);
1287 }
1288
Run()1289 void Run() {
1290 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test31. Sync via 'safe' race.");
1291 printf("test31: negative\n");
1292 MyThreadArray t(Writer1, Writer2);
1293 t.Start();
1294 t.Join();
1295 printf("\tGLOB=%d\n", GLOB[N-1]);
1296 }
1297 REGISTER_TEST2(Run, 31, FEATURE|NEEDS_ANNOTATIONS);
1298 } // namespace test31
1299
1300
1301 // test32: FP. Synchronization via thread create/join. W/R. {{{1
1302 namespace test32 {
1303 // This test is well synchronized but helgrind 3.3.0 reports a race.
1304 //
1305 // Parent: Writer: Reader:
1306 // 1. Start(Reader) -----------------------\ .
1307 // \ .
1308 // 2. Start(Writer) ---\ \ .
1309 // \---> a. MU.Lock() \--> A. sleep(long enough)
1310 // b. write(GLOB)
1311 // /---- c. MU.Unlock()
1312 // 3. Join(Writer) <---/
1313 // B. MU.Lock()
1314 // C. read(GLOB)
1315 // /------------ D. MU.Unlock()
1316 // 4. Join(Reader) <----------------/
1317 // 5. write(GLOB)
1318 //
1319 //
1320 // The call to sleep() in Reader is not part of synchronization,
1321 // it is required to trigger the false positive in helgrind 3.3.0.
1322 //
1323 int GLOB = 0;
1324 Mutex MU;
1325
Writer()1326 void Writer() {
1327 MU.Lock();
1328 GLOB = 1;
1329 MU.Unlock();
1330 }
1331
Reader()1332 void Reader() {
1333 usleep(480000);
1334 MU.Lock();
1335 CHECK(GLOB != 777);
1336 MU.Unlock();
1337 }
1338
Parent()1339 void Parent() {
1340 MyThread r(Reader);
1341 MyThread w(Writer);
1342 r.Start();
1343 w.Start();
1344
1345 w.Join(); // 'w' joins first.
1346 r.Join();
1347
1348 GLOB = 2;
1349 }
1350
Run()1351 void Run() {
1352 // ANNOTATE_EXPECT_RACE(&GLOB, "test32. FP. Fixed by MSMProp1.");
1353 printf("test32: negative\n");
1354 Parent();
1355 printf("\tGLOB=%d\n", GLOB);
1356 }
1357
1358 REGISTER_TEST(Run, 32);
1359 } // namespace test32
1360
1361
1362 // test33: STAB. Stress test for the number of thread sets (TSETs). {{{1
1363 namespace test33 {
1364 int GLOB = 0;
1365 // Here we access N memory locations from within log(N) threads.
1366 // We do it in such a way that helgrind creates nearly all possible TSETs.
1367 // Then we join all threads and start again (N_iter times).
1368 const int N_iter = 48;
1369 const int Nlog = 15;
1370 const int N = 1 << Nlog;
1371 static int ARR[N];
1372 Mutex MU;
1373
Worker()1374 void Worker() {
1375 MU.Lock();
1376 int n = ++GLOB;
1377 MU.Unlock();
1378
1379 n %= Nlog;
1380 for (int i = 0; i < N; i++) {
1381 // ARR[i] is accessed by threads from i-th subset
1382 if (i & (1 << n)) {
1383 CHECK(ARR[i] == 0);
1384 }
1385 }
1386 }
1387
Run()1388 void Run() {
1389 printf("test33:\n");
1390
1391 std::vector<MyThread*> vec(Nlog);
1392
1393 for (int j = 0; j < N_iter; j++) {
1394 // Create and start Nlog threads
1395 for (int i = 0; i < Nlog; i++) {
1396 vec[i] = new MyThread(Worker);
1397 }
1398 for (int i = 0; i < Nlog; i++) {
1399 vec[i]->Start();
1400 }
1401 // Join all threads.
1402 for (int i = 0; i < Nlog; i++) {
1403 vec[i]->Join();
1404 delete vec[i];
1405 }
1406 printf("------------------\n");
1407 }
1408
1409 printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
1410 GLOB, ARR[1], ARR[7], ARR[N-1]);
1411 }
1412 REGISTER_TEST2(Run, 33, STABILITY|EXCLUDE_FROM_ALL);
1413 } // namespace test33
1414
1415
1416 // test34: STAB. Stress test for the number of locks sets (LSETs). {{{1
1417 namespace test34 {
1418 // Similar to test33, but for lock sets.
1419 int GLOB = 0;
1420 const int N_iter = 48;
1421 const int Nlog = 10;
1422 const int N = 1 << Nlog;
1423 static int ARR[N];
1424 static Mutex *MUs[Nlog];
1425
Worker()1426 void Worker() {
1427 for (int i = 0; i < N; i++) {
1428 // ARR[i] is protected by MUs from i-th subset of all MUs
1429 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Lock();
1430 CHECK(ARR[i] == 0);
1431 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Unlock();
1432 }
1433 }
1434
Run()1435 void Run() {
1436 printf("test34:\n");
1437 for (int iter = 0; iter < N_iter; iter++) {
1438 for (int i = 0; i < Nlog; i++) {
1439 MUs[i] = new Mutex;
1440 }
1441 MyThreadArray t(Worker, Worker);
1442 t.Start();
1443 t.Join();
1444 for (int i = 0; i < Nlog; i++) {
1445 delete MUs[i];
1446 }
1447 printf("------------------\n");
1448 }
1449 printf("\tGLOB=%d\n", GLOB);
1450 }
1451 REGISTER_TEST2(Run, 34, STABILITY|EXCLUDE_FROM_ALL);
1452 } // namespace test34
1453
1454
1455 // test35: PERF. Lots of mutexes and lots of call to free(). {{{1
1456 namespace test35 {
1457 // Helgrind 3.3.0 has very slow in shadow_mem_make_NoAccess(). Fixed locally.
1458 // With the fix helgrind runs this test about a minute.
1459 // Without the fix -- about 5 minutes. (on c2d 2.4GHz).
1460 //
1461 // TODO: need to figure out the best way for performance testing.
1462 int **ARR;
1463 const int N_mu = 25000;
1464 const int N_free = 48000;
1465
Worker()1466 void Worker() {
1467 for (int i = 0; i < N_free; i++)
1468 CHECK(777 == *ARR[i]);
1469 }
1470
Run()1471 void Run() {
1472 printf("test35:\n");
1473 std::vector<Mutex*> mus;
1474
1475 ARR = new int *[N_free];
1476 for (int i = 0; i < N_free; i++) {
1477 const int c = N_free / N_mu;
1478 if ((i % c) == 0) {
1479 mus.push_back(new Mutex);
1480 mus.back()->Lock();
1481 mus.back()->Unlock();
1482 }
1483 ARR[i] = new int(777);
1484 }
1485
1486 // Need to put all ARR[i] into shared state in order
1487 // to trigger the performance bug.
1488 MyThreadArray t(Worker, Worker);
1489 t.Start();
1490 t.Join();
1491
1492 for (int i = 0; i < N_free; i++) delete ARR[i];
1493 delete [] ARR;
1494
1495 for (size_t i = 0; i < mus.size(); i++) {
1496 delete mus[i];
1497 }
1498 }
1499 REGISTER_TEST2(Run, 35, PERFORMANCE|EXCLUDE_FROM_ALL);
1500 } // namespace test35
1501
1502
1503 // test36: TN. Synchronization via Mutex, then PCQ. 3 threads. W/W {{{1
1504 namespace test36 {
1505 // variation of test28 (W/W instead of W/R)
1506
1507 // Putter1: Getter: Putter2:
1508 // 1. MU.Lock(); A. MU.Lock()
1509 // 2. write(GLOB) B. write(GLOB)
1510 // 3. MU.Unlock() C. MU.Unlock()
1511 // 4. Q.Put() ---------\ /------- D. Q.Put()
1512 // 5. MU1.Lock() \-------> a. Q.Get() / E. MU1.Lock()
1513 // 6. MU.Lock() b. Q.Get() <---------/ F. MU.Lock()
1514 // 7. write(GLOB) G. write(GLOB)
1515 // 8. MU.Unlock() H. MU.Unlock()
1516 // 9. MU1.Unlock() (sleep) I. MU1.Unlock()
1517 // c. MU1.Lock()
1518 // d. write(GLOB)
1519 // e. MU1.Unlock()
1520 ProducerConsumerQueue Q(INT_MAX);
1521 int GLOB = 0;
1522 Mutex MU, MU1;
1523
Putter()1524 void Putter() {
1525 MU.Lock();
1526 GLOB++;
1527 MU.Unlock();
1528
1529 Q.Put(NULL);
1530
1531 MU1.Lock();
1532 MU.Lock();
1533 GLOB++;
1534 MU.Unlock();
1535 MU1.Unlock();
1536 }
1537
Getter()1538 void Getter() {
1539 Q.Get();
1540 Q.Get();
1541 usleep(100000);
1542 MU1.Lock();
1543 GLOB++;
1544 MU1.Unlock();
1545 }
1546
Run()1547 void Run() {
1548 printf("test36: negative \n");
1549 MyThreadArray t(Getter, Putter, Putter);
1550 t.Start();
1551 t.Join();
1552 printf("\tGLOB=%d\n", GLOB);
1553 }
1554 REGISTER_TEST(Run, 36);
1555 } // namespace test36
1556
1557
1558 // test37: TN. Simple synchronization (write vs read). {{{1
1559 namespace test37 {
1560 int GLOB = 0;
1561 Mutex MU;
1562 // Similar to test10, but properly locked.
1563 // Writer: Reader:
1564 // 1. MU.Lock()
1565 // 2. write
1566 // 3. MU.Unlock()
1567 // a. MU.Lock()
1568 // b. read
1569 // c. MU.Unlock();
1570
Writer()1571 void Writer() {
1572 MU.Lock();
1573 GLOB = 3;
1574 MU.Unlock();
1575 }
Reader()1576 void Reader() {
1577 usleep(100000);
1578 MU.Lock();
1579 CHECK(GLOB != -777);
1580 MU.Unlock();
1581 }
1582
Run()1583 void Run() {
1584 printf("test37: negative\n");
1585 MyThreadArray t(Writer, Reader);
1586 t.Start();
1587 t.Join();
1588 printf("\tGLOB=%d\n", GLOB);
1589 }
1590 REGISTER_TEST(Run, 37);
1591 } // namespace test37
1592
1593
1594 // test38: TN. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
1595 namespace test38 {
1596 // Fusion of test29 and test36.
1597
1598 // Putter1: Putter2: Getter1: Getter2:
1599 // MU1.Lock() MU1.Lock()
1600 // write(GLOB) write(GLOB)
1601 // MU1.Unlock() MU1.Unlock()
1602 // Q1.Put() Q2.Put()
1603 // Q1.Put() Q2.Put()
1604 // MU1.Lock() MU1.Lock()
1605 // MU2.Lock() MU2.Lock()
1606 // write(GLOB) write(GLOB)
1607 // MU2.Unlock() MU2.Unlock()
1608 // MU1.Unlock() MU1.Unlock() sleep sleep
1609 // Q1.Get() Q1.Get()
1610 // Q2.Get() Q2.Get()
1611 // MU2.Lock() MU2.Lock()
1612 // write(GLOB) write(GLOB)
1613 // MU2.Unlock() MU2.Unlock()
1614 //
1615
1616
1617 ProducerConsumerQueue *Q1, *Q2;
1618 int GLOB = 0;
1619 Mutex MU, MU1, MU2;
1620
Putter(ProducerConsumerQueue * q)1621 void Putter(ProducerConsumerQueue *q) {
1622 MU1.Lock();
1623 GLOB++;
1624 MU1.Unlock();
1625
1626 q->Put(NULL);
1627 q->Put(NULL);
1628
1629 MU1.Lock();
1630 MU2.Lock();
1631 GLOB++;
1632 MU2.Unlock();
1633 MU1.Unlock();
1634
1635 }
1636
Putter1()1637 void Putter1() { Putter(Q1); }
Putter2()1638 void Putter2() { Putter(Q2); }
1639
Getter()1640 void Getter() {
1641 usleep(100000);
1642 Q1->Get();
1643 Q2->Get();
1644
1645 MU2.Lock();
1646 GLOB++;
1647 MU2.Unlock();
1648
1649 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1650 }
1651
Run()1652 void Run() {
1653 printf("test38: negative\n");
1654 Q1 = new ProducerConsumerQueue(INT_MAX);
1655 Q2 = new ProducerConsumerQueue(INT_MAX);
1656 MyThreadArray t(Getter, Getter, Putter1, Putter2);
1657 t.Start();
1658 t.Join();
1659 printf("\tGLOB=%d\n", GLOB);
1660 delete Q1;
1661 delete Q2;
1662 }
1663 REGISTER_TEST(Run, 38);
1664 } // namespace test38
1665
1666 namespace NegativeTests_Barrier { // {{{1
1667 #ifndef NO_BARRIER
1668 // Same as test17 but uses Barrier class (pthread_barrier_t).
1669 int GLOB = 0;
1670 const int N_threads = 3;
1671 Barrier barrier(N_threads);
1672 Mutex MU;
1673
Worker()1674 void Worker() {
1675 MU.Lock();
1676 GLOB++;
1677 MU.Unlock();
1678 barrier.Block();
1679 CHECK(GLOB == N_threads);
1680 }
1681
TEST(NegativeTests,Barrier)1682 TEST(NegativeTests, Barrier) {
1683 ANNOTATE_TRACE_MEMORY(&GLOB);
1684 {
1685 ThreadPool pool(N_threads);
1686 pool.StartWorkers();
1687 for (int i = 0; i < N_threads; i++) {
1688 pool.Add(NewCallback(Worker));
1689 }
1690 } // all folks are joined here.
1691 CHECK(GLOB == 3);
1692 }
1693 #endif // NO_BARRIER
1694 } // namespace test39
1695
1696
1697 // test40: FP. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1
1698 namespace test40 {
1699 // Similar to test38 but with different order of events (due to sleep).
1700
1701 // Putter1: Putter2: Getter1: Getter2:
1702 // MU1.Lock() MU1.Lock()
1703 // write(GLOB) write(GLOB)
1704 // MU1.Unlock() MU1.Unlock()
1705 // Q1.Put() Q2.Put()
1706 // Q1.Put() Q2.Put()
1707 // Q1.Get() Q1.Get()
1708 // Q2.Get() Q2.Get()
1709 // MU2.Lock() MU2.Lock()
1710 // write(GLOB) write(GLOB)
1711 // MU2.Unlock() MU2.Unlock()
1712 //
1713 // MU1.Lock() MU1.Lock()
1714 // MU2.Lock() MU2.Lock()
1715 // write(GLOB) write(GLOB)
1716 // MU2.Unlock() MU2.Unlock()
1717 // MU1.Unlock() MU1.Unlock()
1718
1719
1720 ProducerConsumerQueue *Q1, *Q2;
1721 int GLOB = 0;
1722 Mutex MU, MU1, MU2;
1723
Putter(ProducerConsumerQueue * q)1724 void Putter(ProducerConsumerQueue *q) {
1725 MU1.Lock();
1726 GLOB++;
1727 MU1.Unlock();
1728
1729 q->Put(NULL);
1730 q->Put(NULL);
1731 usleep(100000);
1732
1733 MU1.Lock();
1734 MU2.Lock();
1735 GLOB++;
1736 MU2.Unlock();
1737 MU1.Unlock();
1738
1739 }
1740
Putter1()1741 void Putter1() { Putter(Q1); }
Putter2()1742 void Putter2() { Putter(Q2); }
1743
Getter()1744 void Getter() {
1745 Q1->Get();
1746 Q2->Get();
1747
1748 MU2.Lock();
1749 GLOB++;
1750 MU2.Unlock();
1751
1752 usleep(48000); // TODO: remove this when FP in test32 is fixed.
1753 }
1754
Run()1755 void Run() {
1756 // ANNOTATE_EXPECT_RACE(&GLOB, "test40. FP. Fixed by MSMProp1. Complex Stuff.");
1757 printf("test40: negative\n");
1758 Q1 = new ProducerConsumerQueue(INT_MAX);
1759 Q2 = new ProducerConsumerQueue(INT_MAX);
1760 MyThreadArray t(Getter, Getter, Putter1, Putter2);
1761 t.Start();
1762 t.Join();
1763 printf("\tGLOB=%d\n", GLOB);
1764 delete Q1;
1765 delete Q2;
1766 }
1767 REGISTER_TEST(Run, 40);
1768 } // namespace test40
1769
1770 // test41: TN. Test for race that appears when loading a dynamic symbol. {{{1
1771 namespace test41 {
Worker()1772 void Worker() {
1773 ANNOTATE_NO_OP(NULL); // An empty function, loaded from dll.
1774 }
Run()1775 void Run() {
1776 printf("test41: negative\n");
1777 MyThreadArray t(Worker, Worker, Worker);
1778 t.Start();
1779 t.Join();
1780 }
1781 REGISTER_TEST2(Run, 41, FEATURE|NEEDS_ANNOTATIONS);
1782 } // namespace test41
1783
1784
1785 // test42: TN. Using the same cond var several times. {{{1
1786 namespace test42 {
1787 int GLOB = 0;
1788 int COND = 0;
1789 int N_threads = 3;
1790 Mutex MU;
1791
Worker1()1792 void Worker1() {
1793 GLOB=1;
1794
1795 MU.Lock();
1796 COND = 1;
1797 CV.Signal();
1798 MU.Unlock();
1799
1800 MU.Lock();
1801 while (COND != 0)
1802 CV.Wait(&MU);
1803 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
1804 MU.Unlock();
1805
1806 GLOB=3;
1807
1808 }
1809
Worker2()1810 void Worker2() {
1811
1812 MU.Lock();
1813 while (COND != 1)
1814 CV.Wait(&MU);
1815 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
1816 MU.Unlock();
1817
1818 GLOB=2;
1819
1820 MU.Lock();
1821 COND = 0;
1822 CV.Signal();
1823 MU.Unlock();
1824
1825 }
1826
Run()1827 void Run() {
1828 // ANNOTATE_EXPECT_RACE(&GLOB, "test42. TN. debugging.");
1829 printf("test42: negative\n");
1830 MyThreadArray t(Worker1, Worker2);
1831 t.Start();
1832 t.Join();
1833 printf("\tGLOB=%d\n", GLOB);
1834 }
1835 REGISTER_TEST2(Run, 42, FEATURE|NEEDS_ANNOTATIONS);
1836 } // namespace test42
1837
1838
1839
1840 // test43: TN. {{{1
1841 namespace test43 {
1842 //
1843 // Putter: Getter:
1844 // 1. write
1845 // 2. Q.Put() --\ .
1846 // 3. read \--> a. Q.Get()
1847 // b. read
1848 int GLOB = 0;
1849 ProducerConsumerQueue Q(INT_MAX);
Putter()1850 void Putter() {
1851 GLOB = 1;
1852 Q.Put(NULL);
1853 CHECK(GLOB == 1);
1854 }
Getter()1855 void Getter() {
1856 Q.Get();
1857 usleep(100000);
1858 CHECK(GLOB == 1);
1859 }
Run()1860 void Run() {
1861 printf("test43: negative\n");
1862 MyThreadArray t(Putter, Getter);
1863 t.Start();
1864 t.Join();
1865 printf("\tGLOB=%d\n", GLOB);
1866 }
1867 REGISTER_TEST(Run, 43)
1868 } // namespace test43
1869
1870
1871 // test44: FP. {{{1
1872 namespace test44 {
1873 //
1874 // Putter: Getter:
1875 // 1. read
1876 // 2. Q.Put() --\ .
1877 // 3. MU.Lock() \--> a. Q.Get()
1878 // 4. write
1879 // 5. MU.Unlock()
1880 // b. MU.Lock()
1881 // c. write
1882 // d. MU.Unlock();
1883 int GLOB = 0;
1884 Mutex MU;
1885 ProducerConsumerQueue Q(INT_MAX);
Putter()1886 void Putter() {
1887 CHECK(GLOB == 0);
1888 Q.Put(NULL);
1889 MU.Lock();
1890 GLOB = 1;
1891 MU.Unlock();
1892 }
Getter()1893 void Getter() {
1894 Q.Get();
1895 usleep(100000);
1896 MU.Lock();
1897 GLOB = 1;
1898 MU.Unlock();
1899 }
Run()1900 void Run() {
1901 // ANNOTATE_EXPECT_RACE(&GLOB, "test44. FP. Fixed by MSMProp1.");
1902 printf("test44: negative\n");
1903 MyThreadArray t(Putter, Getter);
1904 t.Start();
1905 t.Join();
1906 printf("\tGLOB=%d\n", GLOB);
1907 }
1908 REGISTER_TEST(Run, 44)
1909 } // namespace test44
1910
1911
1912 // test45: TN. {{{1
1913 namespace test45 {
1914 //
1915 // Putter: Getter:
1916 // 1. read
1917 // 2. Q.Put() --\ .
1918 // 3. MU.Lock() \--> a. Q.Get()
1919 // 4. write
1920 // 5. MU.Unlock()
1921 // b. MU.Lock()
1922 // c. read
1923 // d. MU.Unlock();
1924 int GLOB = 0;
1925 Mutex MU;
1926 ProducerConsumerQueue Q(INT_MAX);
Putter()1927 void Putter() {
1928 CHECK(GLOB == 0);
1929 Q.Put(NULL);
1930 MU.Lock();
1931 GLOB++;
1932 MU.Unlock();
1933 }
Getter()1934 void Getter() {
1935 Q.Get();
1936 usleep(100000);
1937 MU.Lock();
1938 CHECK(GLOB <= 1);
1939 MU.Unlock();
1940 }
Run()1941 void Run() {
1942 printf("test45: negative\n");
1943 MyThreadArray t(Putter, Getter);
1944 t.Start();
1945 t.Join();
1946 printf("\tGLOB=%d\n", GLOB);
1947 }
1948 REGISTER_TEST(Run, 45)
1949 } // namespace test45
1950
1951
1952 // test46: FN. {{{1
1953 namespace test46 {
1954 //
1955 // First: Second:
1956 // 1. write
1957 // 2. MU.Lock()
1958 // 3. write
1959 // 4. MU.Unlock() (sleep)
1960 // a. MU.Lock()
1961 // b. write
1962 // c. MU.Unlock();
1963 int GLOB = 0;
1964 Mutex MU;
First()1965 void First() {
1966 GLOB++;
1967 MU.Lock();
1968 GLOB++;
1969 MU.Unlock();
1970 }
Second()1971 void Second() {
1972 usleep(480000);
1973 MU.Lock();
1974 GLOB++;
1975 MU.Unlock();
1976
1977 // just a print.
1978 // If we move it to Run() we will get report in MSMHelgrind
1979 // due to its false positive (test32).
1980 MU.Lock();
1981 printf("\tGLOB=%d\n", GLOB);
1982 MU.Unlock();
1983 }
Run()1984 void Run() {
1985 ANNOTATE_TRACE_MEMORY(&GLOB);
1986 MyThreadArray t(First, Second);
1987 t.Start();
1988 t.Join();
1989 }
1990 REGISTER_TEST(Run, 46)
1991 } // namespace test46
1992
1993
1994 // test47: TP. Not detected by pure happens-before detectors. {{{1
1995 namespace test47 {
1996 // A true race that can not be detected by a pure happens-before
1997 // race detector.
1998 //
1999 // First: Second:
2000 // 1. write
2001 // 2. MU.Lock()
2002 // 3. MU.Unlock() (sleep)
2003 // a. MU.Lock()
2004 // b. MU.Unlock();
2005 // c. write
2006 int GLOB = 0;
2007 Mutex MU;
First()2008 void First() {
2009 GLOB=1;
2010 MU.Lock();
2011 MU.Unlock();
2012 }
Second()2013 void Second() {
2014 usleep(480000);
2015 MU.Lock();
2016 MU.Unlock();
2017 GLOB++;
2018 }
Run()2019 void Run() {
2020 if (!Tsan_PureHappensBefore())
2021 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test47. TP. Not detected by pure HB.");
2022 printf("test47: positive\n");
2023 MyThreadArray t(First, Second);
2024 t.Start();
2025 t.Join();
2026 printf("\tGLOB=%d\n", GLOB);
2027 }
2028 REGISTER_TEST(Run, 47)
2029 } // namespace test47
2030
2031
2032 // test48: FN. Simple race (single write vs multiple reads). {{{1
2033 namespace test48 {
2034 int GLOB = 0;
2035 // same as test10 but with single writer and multiple readers
2036 // A simple data race between single writer and multiple readers.
2037 // Write happens before Reads (enforced by sleep(1)),
2038
2039 //
2040 // Writer: Readers:
2041 // 1. write(GLOB) a. sleep(long enough so that GLOB
2042 // is most likely initialized by Writer)
2043 // b. read(GLOB)
2044 //
2045 //
2046 // Eraser algorithm does not detect the race here,
2047 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2048 //
Writer()2049 void Writer() {
2050 GLOB = 3;
2051 }
Reader()2052 void Reader() {
2053 usleep(100000);
2054 CHECK(GLOB != -777);
2055 }
2056
Run()2057 void Run() {
2058 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test48. TP. FN in MSMHelgrind.");
2059 printf("test48: positive\n");
2060 MyThreadArray t(Writer, Reader,Reader,Reader);
2061 t.Start();
2062 t.Join();
2063 printf("\tGLOB=%d\n", GLOB);
2064 }
2065 REGISTER_TEST(Run, 48)
2066 } // namespace test48
2067
2068
2069 // test49: FN. Simple race (single write vs multiple reads). {{{1
2070 namespace test49 {
2071 int GLOB = 0;
2072 // same as test10 but with multiple read operations done by a single reader
2073 // A simple data race between writer and readers.
2074 // Write happens before Read (enforced by sleep(1)),
2075 //
2076 // Writer: Reader:
2077 // 1. write(GLOB) a. sleep(long enough so that GLOB
2078 // is most likely initialized by Writer)
2079 // b. read(GLOB)
2080 // c. read(GLOB)
2081 // d. read(GLOB)
2082 // e. read(GLOB)
2083 //
2084 //
2085 // Eraser algorithm does not detect the race here,
2086 // see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html.
2087 //
Writer()2088 void Writer() {
2089 GLOB = 3;
2090 }
Reader()2091 void Reader() {
2092 usleep(100000);
2093 CHECK(GLOB != -777);
2094 CHECK(GLOB != -777);
2095 CHECK(GLOB != -777);
2096 CHECK(GLOB != -777);
2097 }
2098
Run()2099 void Run() {
2100 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test49. TP. FN in MSMHelgrind.");
2101 printf("test49: positive\n");
2102 MyThreadArray t(Writer, Reader);
2103 t.Start();
2104 t.Join();
2105 printf("\tGLOB=%d\n", GLOB);
2106 }
2107 REGISTER_TEST(Run, 49);
2108 } // namespace test49
2109
2110
2111 // test50: TP. Synchronization via CondVar. {{{1
2112 namespace test50 {
2113 int GLOB = 0;
2114 Mutex MU;
2115 // Two last write accesses to GLOB are not synchronized
2116 //
2117 // Waiter: Waker:
2118 // 1. COND = 0
2119 // 2. Start(Waker)
2120 // 3. MU.Lock() a. write(GLOB)
2121 // b. MU.Lock()
2122 // c. COND = 1
2123 // /--- d. CV.Signal()
2124 // 4. while(COND != 1) / e. MU.Unlock()
2125 // CV.Wait(MU) <---/
2126 // 5. MU.Unlock()
2127 // 6. write(GLOB) f. MU.Lock()
2128 // g. write(GLOB)
2129 // h. MU.Unlock()
2130
2131
Waker()2132 void Waker() {
2133 usleep(100000); // Make sure the waiter blocks.
2134
2135 GLOB = 1;
2136
2137 MU.Lock();
2138 COND = 1;
2139 CV.Signal();
2140 MU.Unlock();
2141
2142 usleep(100000);
2143 MU.Lock();
2144 GLOB = 3;
2145 MU.Unlock();
2146 }
2147
Waiter()2148 void Waiter() {
2149 MU.Lock();
2150 while(COND != 1)
2151 CV.Wait(&MU);
2152 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2153 MU.Unlock();
2154
2155 GLOB = 2;
2156 }
Run()2157 void Run() {
2158 printf("test50: positive\n");
2159 COND = 0;
2160 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test50. TP.");
2161 MyThreadArray t(Waker, Waiter);
2162 t.Start();
2163 t.Join();
2164 printf("\tGLOB=%d\n", GLOB);
2165 }
2166 REGISTER_TEST2(Run, 50, FEATURE|NEEDS_ANNOTATIONS);
2167 } // namespace test50
2168
2169
2170 // test51: TP. Synchronization via CondVar: problem with several signals. {{{1
2171 namespace test51 {
2172 int GLOB = 0;
2173 int COND = 0;
2174 Mutex MU;
2175 StealthNotification n1, n2;
2176
2177 // scheduler dependent results because of several signals
2178 // second signal will be lost
2179 //
2180 // Waiter: Waker:
2181 // 1. Start(Waker)
2182 // 2. MU.Lock()
2183 // 3. while(COND)
2184 // CV.Wait(MU)<-\ .
2185 // 4. MU.Unlock() \ .
2186 // 5. write(GLOB) \ a. write(GLOB)
2187 // \ b. MU.Lock()
2188 // \ c. COND = 1
2189 // \--- d. CV.Signal()
2190 // e. MU.Unlock()
2191 //
2192 // f. write(GLOB)
2193 //
2194 // g. MU.Lock()
2195 // h. COND = 1
2196 // LOST<---- i. CV.Signal()
2197 // j. MU.Unlock()
2198
Waker()2199 void Waker() {
2200 n1.wait(); // Make sure the waiter blocks.
2201
2202 GLOB = 1;
2203
2204 MU.Lock();
2205 COND = 1;
2206 CV.Signal();
2207 MU.Unlock();
2208
2209 n2.wait(); // Make sure the waiter continued.
2210
2211 GLOB = 2;
2212
2213 MU.Lock();
2214 COND = 1;
2215 CV.Signal(); //Lost Signal
2216 MU.Unlock();
2217 }
2218
Waiter()2219 void Waiter() {
2220 MU.Lock();
2221 n1.signal(); // Ready to get the first signal.
2222 while(COND != 1)
2223 CV.Wait(&MU);
2224 MU.Unlock();
2225
2226 GLOB = 3;
2227 n2.signal(); // Ready to miss the second signal.
2228 }
Run()2229 void Run() {
2230 ANNOTATE_EXPECT_RACE(&GLOB, "test51. TP.");
2231 printf("test51: positive\n");
2232 MyThreadArray t(Waiter, Waker);
2233 t.Start();
2234 t.Join();
2235 printf("\tGLOB=%d\n", GLOB);
2236 }
2237 REGISTER_TEST(Run, 51);
2238 } // namespace test51
2239
2240
2241 // test52: TP. Synchronization via CondVar: problem with several signals. {{{1
2242 namespace test52 {
2243 int GLOB = 0;
2244 int COND = 0;
2245 Mutex MU;
2246 StealthNotification n1, n2;
2247
2248 // same as test51 but the first signal will be lost
2249 // scheduler dependent results because of several signals
2250 //
2251 // Waiter: Waker:
2252 // 1. Start(Waker)
2253 // a. write(GLOB)
2254 // b. MU.Lock()
2255 // c. COND = 1
2256 // LOST<---- d. CV.Signal()
2257 // e. MU.Unlock()
2258 //
2259 // 2. MU.Lock()
2260 // 3. while(COND)
2261 // CV.Wait(MU)<-\ .
2262 // 4. MU.Unlock() \ f. write(GLOB)
2263 // 5. write(GLOB) \ .
2264 // \ g. MU.Lock()
2265 // \ h. COND = 1
2266 // \--- i. CV.Signal()
2267 // j. MU.Unlock()
2268
Waker()2269 void Waker() {
2270
2271 GLOB = 1;
2272
2273 MU.Lock();
2274 COND = 1;
2275 CV.Signal(); //lost signal
2276 MU.Unlock();
2277
2278 n1.signal(); // Ok, now we may block.
2279 n2.wait(); // We blocked.
2280
2281 GLOB = 2;
2282
2283 MU.Lock();
2284 COND = 1;
2285 CV.Signal();
2286 MU.Unlock();
2287 }
2288
Waiter()2289 void Waiter() {
2290 n1.wait(); // The first signal is lost.
2291
2292 MU.Lock();
2293 n2.signal(); // The 2-nd signal may go.
2294 while(COND != 1)
2295 CV.Wait(&MU);
2296 MU.Unlock();
2297
2298 GLOB = 3;
2299 }
Run()2300 void Run() {
2301 printf("test52: positive\n");
2302 ANNOTATE_EXPECT_RACE(&GLOB, "test52. TP.");
2303 MyThreadArray t(Waker, Waiter);
2304 t.Start();
2305 t.Join();
2306 printf("\tGLOB=%d\n", GLOB);
2307 }
2308 REGISTER_TEST(Run, 52);
2309 } // namespace test52
2310
2311
2312 // test53: FP. Synchronization via implicit semaphore. {{{1
2313 namespace test53 {
2314 // Correctly synchronized test, but the common lockset is empty.
2315 // The variable FLAG works as an implicit semaphore.
2316 // MSMHelgrind still does not complain since it does not maintain the lockset
2317 // at the exclusive state. But MSMProp1 does complain.
2318 // See also test54.
2319 //
2320 //
2321 // Initializer: Users
2322 // 1. MU1.Lock()
2323 // 2. write(GLOB)
2324 // 3. FLAG = true
2325 // 4. MU1.Unlock()
2326 // a. MU1.Lock()
2327 // b. f = FLAG;
2328 // c. MU1.Unlock()
2329 // d. if (!f) goto a.
2330 // e. MU2.Lock()
2331 // f. write(GLOB)
2332 // g. MU2.Unlock()
2333 //
2334
2335 int GLOB = 0;
2336 bool FLAG = false;
2337 Mutex MU1, MU2;
2338
Initializer()2339 void Initializer() {
2340 MU1.Lock();
2341 GLOB = 1000;
2342 FLAG = true;
2343 MU1.Unlock();
2344 usleep(100000); // just in case
2345 }
2346
User()2347 void User() {
2348 bool f = false;
2349 while(!f) {
2350 MU1.Lock();
2351 f = FLAG;
2352 MU1.Unlock();
2353 usleep(10000);
2354 }
2355 // at this point Initializer will not access GLOB again
2356 MU2.Lock();
2357 CHECK(GLOB >= 1000);
2358 GLOB++;
2359 MU2.Unlock();
2360 }
2361
Run()2362 void Run() {
2363 if (!Tsan_PureHappensBefore())
2364 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test53. FP. Implicit semaphore");
2365 printf("test53: FP. false positive, Implicit semaphore\n");
2366 MyThreadArray t(Initializer, User, User);
2367 t.Start();
2368 t.Join();
2369 printf("\tGLOB=%d\n", GLOB);
2370 }
2371 REGISTER_TEST(Run, 53)
2372 } // namespace test53
2373
2374
2375 // test54: TN. Synchronization via implicit semaphore. Annotated {{{1
2376 namespace test54 {
2377 // Same as test53, but annotated.
2378 int GLOB = 0;
2379 bool FLAG = false;
2380 Mutex MU1, MU2;
2381
Initializer()2382 void Initializer() {
2383 MU1.Lock();
2384 GLOB = 1000;
2385 FLAG = true;
2386 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
2387 MU1.Unlock();
2388 usleep(100000); // just in case
2389 }
2390
User()2391 void User() {
2392 bool f = false;
2393 while(!f) {
2394 MU1.Lock();
2395 f = FLAG;
2396 MU1.Unlock();
2397 usleep(10000);
2398 }
2399 // at this point Initializer will not access GLOB again
2400 ANNOTATE_CONDVAR_WAIT(&GLOB);
2401 MU2.Lock();
2402 CHECK(GLOB >= 1000);
2403 GLOB++;
2404 MU2.Unlock();
2405 }
2406
Run()2407 void Run() {
2408 printf("test54: negative\n");
2409 MyThreadArray t(Initializer, User, User);
2410 t.Start();
2411 t.Join();
2412 printf("\tGLOB=%d\n", GLOB);
2413 }
2414 REGISTER_TEST2(Run, 54, FEATURE|NEEDS_ANNOTATIONS)
2415 } // namespace test54
2416
2417
2418 // test55: FP. Synchronization with TryLock. Not easy for race detectors {{{1
2419 namespace test55 {
2420 // "Correct" synchronization with TryLock and Lock.
2421 //
2422 // This scheme is actually very risky.
2423 // It is covered in detail in this video:
2424 // http://youtube.com/watch?v=mrvAqvtWYb4 (slide 36, near 50-th minute).
2425 int GLOB = 0;
2426 Mutex MU;
2427
Worker_Lock()2428 void Worker_Lock() {
2429 GLOB = 1;
2430 MU.Lock();
2431 }
2432
Worker_TryLock()2433 void Worker_TryLock() {
2434 while (true) {
2435 if (!MU.TryLock()) {
2436 MU.Unlock();
2437 break;
2438 }
2439 else
2440 MU.Unlock();
2441 usleep(100);
2442 }
2443 GLOB = 2;
2444 }
2445
Run()2446 void Run() {
2447 printf("test55:\n");
2448 MyThreadArray t(Worker_Lock, Worker_TryLock);
2449 t.Start();
2450 t.Join();
2451 printf("\tGLOB=%d\n", GLOB);
2452 }
2453 REGISTER_TEST2(Run, 55, FEATURE|EXCLUDE_FROM_ALL);
2454 } // namespace test55
2455
2456
2457
2458 // test56: TP. Use of ANNOTATE_BENIGN_RACE. {{{1
2459 namespace test56 {
2460 // For whatever reason the user wants to treat
2461 // a race on GLOB as a benign race.
2462 int GLOB = 0;
2463 int GLOB2 = 0;
2464
Worker()2465 void Worker() {
2466 GLOB++;
2467 }
2468
Run()2469 void Run() {
2470 ANNOTATE_BENIGN_RACE(&GLOB, "test56. Use of ANNOTATE_BENIGN_RACE.");
2471 ANNOTATE_BENIGN_RACE(&GLOB2, "No race. The tool should be silent");
2472 printf("test56: positive\n");
2473 MyThreadArray t(Worker, Worker, Worker, Worker);
2474 t.Start();
2475 t.Join();
2476 printf("\tGLOB=%d\n", GLOB);
2477 }
2478 REGISTER_TEST2(Run, 56, FEATURE|NEEDS_ANNOTATIONS)
2479 } // namespace test56
2480
2481
2482 // test57: TN: Correct use of atomics. {{{1
2483 namespace test57 {
2484 int GLOB = 0;
Writer()2485 void Writer() {
2486 for (int i = 0; i < 10; i++) {
2487 AtomicIncrement(&GLOB, 1);
2488 usleep(1000);
2489 }
2490 }
Reader()2491 void Reader() {
2492 while (GLOB < 20) usleep(1000);
2493 }
Run()2494 void Run() {
2495 printf("test57: negative\n");
2496 MyThreadArray t(Writer, Writer, Reader, Reader);
2497 t.Start();
2498 t.Join();
2499 CHECK(GLOB == 20);
2500 printf("\tGLOB=%d\n", GLOB);
2501 }
2502 REGISTER_TEST(Run, 57)
2503 } // namespace test57
2504
2505
2506 // test58: TN. User defined synchronization. {{{1
2507 namespace test58 {
2508 int GLOB1 = 1;
2509 int GLOB2 = 2;
2510 int FLAG1 = 0;
2511 int FLAG2 = 0;
2512
2513 // Correctly synchronized test, but the common lockset is empty.
2514 // The variables FLAG1 and FLAG2 used for synchronization and as
2515 // temporary variables for swapping two global values.
2516 // Such kind of synchronization is rarely used (Excluded from all tests??).
2517
Worker2()2518 void Worker2() {
2519 FLAG1=GLOB2;
2520
2521 while(!FLAG2)
2522 ;
2523 GLOB2=FLAG2;
2524 }
2525
Worker1()2526 void Worker1() {
2527 FLAG2=GLOB1;
2528
2529 while(!FLAG1)
2530 ;
2531 GLOB1=FLAG1;
2532 }
2533
Run()2534 void Run() {
2535 printf("test58:\n");
2536 MyThreadArray t(Worker1, Worker2);
2537 t.Start();
2538 t.Join();
2539 printf("\tGLOB1=%d\n", GLOB1);
2540 printf("\tGLOB2=%d\n", GLOB2);
2541 }
2542 REGISTER_TEST2(Run, 58, FEATURE|EXCLUDE_FROM_ALL)
2543 } // namespace test58
2544
2545
2546
2547 // test59: TN. User defined synchronization. Annotated {{{1
2548 namespace test59 {
2549 int COND1 = 0;
2550 int COND2 = 0;
2551 int GLOB1 = 1;
2552 int GLOB2 = 2;
2553 int FLAG1 = 0;
2554 int FLAG2 = 0;
2555 // same as test 58 but annotated
2556
Worker2()2557 void Worker2() {
2558 FLAG1=GLOB2;
2559 ANNOTATE_CONDVAR_SIGNAL(&COND2);
2560 while(!FLAG2) usleep(1);
2561 ANNOTATE_CONDVAR_WAIT(&COND1);
2562 GLOB2=FLAG2;
2563 }
2564
Worker1()2565 void Worker1() {
2566 FLAG2=GLOB1;
2567 ANNOTATE_CONDVAR_SIGNAL(&COND1);
2568 while(!FLAG1) usleep(1);
2569 ANNOTATE_CONDVAR_WAIT(&COND2);
2570 GLOB1=FLAG1;
2571 }
2572
Run()2573 void Run() {
2574 printf("test59: negative\n");
2575 ANNOTATE_BENIGN_RACE(&FLAG1, "synchronization via 'safe' race");
2576 ANNOTATE_BENIGN_RACE(&FLAG2, "synchronization via 'safe' race");
2577 MyThreadArray t(Worker1, Worker2);
2578 t.Start();
2579 t.Join();
2580 printf("\tGLOB1=%d\n", GLOB1);
2581 printf("\tGLOB2=%d\n", GLOB2);
2582 }
2583 REGISTER_TEST2(Run, 59, FEATURE|NEEDS_ANNOTATIONS)
2584 } // namespace test59
2585
2586
2587 // test60: TN. Correct synchronization using signal-wait {{{1
2588 namespace test60 {
2589 int COND1 = 0;
2590 int COND2 = 0;
2591 int GLOB1 = 1;
2592 int GLOB2 = 2;
2593 int FLAG2 = 0;
2594 int FLAG1 = 0;
2595 Mutex MU;
2596 // same as test 59 but synchronized with signal-wait.
2597
Worker2()2598 void Worker2() {
2599 FLAG1=GLOB2;
2600
2601 MU.Lock();
2602 COND1 = 1;
2603 CV.Signal();
2604 MU.Unlock();
2605
2606 MU.Lock();
2607 while(COND2 != 1)
2608 CV.Wait(&MU);
2609 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2610 MU.Unlock();
2611
2612 GLOB2=FLAG2;
2613 }
2614
Worker1()2615 void Worker1() {
2616 FLAG2=GLOB1;
2617
2618 MU.Lock();
2619 COND2 = 1;
2620 CV.Signal();
2621 MU.Unlock();
2622
2623 MU.Lock();
2624 while(COND1 != 1)
2625 CV.Wait(&MU);
2626 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU);
2627 MU.Unlock();
2628
2629 GLOB1=FLAG1;
2630 }
2631
Run()2632 void Run() {
2633 printf("test60: negative\n");
2634 MyThreadArray t(Worker1, Worker2);
2635 t.Start();
2636 t.Join();
2637 printf("\tGLOB1=%d\n", GLOB1);
2638 printf("\tGLOB2=%d\n", GLOB2);
2639 }
2640 REGISTER_TEST2(Run, 60, FEATURE|NEEDS_ANNOTATIONS)
2641 } // namespace test60
2642
2643
2644 // test61: TN. Synchronization via Mutex as in happens-before, annotated. {{{1
2645 namespace test61 {
2646 Mutex MU;
2647 int GLOB = 0;
2648 int *P1 = NULL, *P2 = NULL;
2649
2650 // In this test Mutex lock/unlock operations introduce happens-before relation.
2651 // We annotate the code so that MU is treated as in pure happens-before detector.
2652
2653
Putter()2654 void Putter() {
2655 ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&MU);
2656 MU.Lock();
2657 if (P1 == NULL) {
2658 P1 = &GLOB;
2659 *P1 = 1;
2660 }
2661 MU.Unlock();
2662 }
2663
Getter()2664 void Getter() {
2665 bool done = false;
2666 while (!done) {
2667 MU.Lock();
2668 if (P1) {
2669 done = true;
2670 P2 = P1;
2671 P1 = NULL;
2672 }
2673 MU.Unlock();
2674 }
2675 *P2 = 2;
2676 }
2677
2678
Run()2679 void Run() {
2680 printf("test61: negative\n");
2681 MyThreadArray t(Putter, Getter);
2682 t.Start();
2683 t.Join();
2684 printf("\tGLOB=%d\n", GLOB);
2685 }
2686 REGISTER_TEST2(Run, 61, FEATURE|NEEDS_ANNOTATIONS)
2687 } // namespace test61
2688
2689
2690 // test62: STAB. Create as many segments as possible. {{{1
2691 namespace test62 {
2692 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
2693 // A better scheme is to implement garbage collection for segments.
2694 ProducerConsumerQueue Q(INT_MAX);
2695 const int N = 1 << 22;
2696
Putter()2697 void Putter() {
2698 for (int i = 0; i < N; i++){
2699 if ((i % (N / 8)) == 0) {
2700 printf("i=%d\n", i);
2701 }
2702 Q.Put(NULL);
2703 }
2704 }
2705
Getter()2706 void Getter() {
2707 for (int i = 0; i < N; i++)
2708 Q.Get();
2709 }
2710
Run()2711 void Run() {
2712 printf("test62:\n");
2713 MyThreadArray t(Putter, Getter);
2714 t.Start();
2715 t.Join();
2716 }
2717 REGISTER_TEST2(Run, 62, STABILITY|EXCLUDE_FROM_ALL)
2718 } // namespace test62
2719
2720
2721 // test63: STAB. Create as many segments as possible and do it fast. {{{1
2722 namespace test63 {
2723 // Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments.
2724 // A better scheme is to implement garbage collection for segments.
2725 const int N = 1 << 24;
2726 int C = 0;
2727
Putter()2728 void Putter() {
2729 for (int i = 0; i < N; i++){
2730 if ((i % (N / 8)) == 0) {
2731 printf("i=%d\n", i);
2732 }
2733 ANNOTATE_CONDVAR_SIGNAL(&C);
2734 }
2735 }
2736
Getter()2737 void Getter() {
2738 }
2739
Run()2740 void Run() {
2741 printf("test63:\n");
2742 MyThreadArray t(Putter, Getter);
2743 t.Start();
2744 t.Join();
2745 }
2746 REGISTER_TEST2(Run, 63, STABILITY|EXCLUDE_FROM_ALL)
2747 } // namespace test63
2748
2749
2750 // test64: TP. T2 happens-before T3, but T1 is independent. Reads in T1/T2. {{{1
2751 namespace test64 {
2752 // True race between T1 and T3:
2753 //
2754 // T1: T2: T3:
2755 // 1. read(GLOB) (sleep)
2756 // a. read(GLOB)
2757 // b. Q.Put() -----> A. Q.Get()
2758 // B. write(GLOB)
2759 //
2760 //
2761
2762 int GLOB = 0;
2763 ProducerConsumerQueue Q(INT_MAX);
2764
T1()2765 void T1() {
2766 CHECK(GLOB == 0);
2767 }
2768
T2()2769 void T2() {
2770 usleep(100000);
2771 CHECK(GLOB == 0);
2772 Q.Put(NULL);
2773 }
2774
T3()2775 void T3() {
2776 Q.Get();
2777 GLOB = 1;
2778 }
2779
2780
Run()2781 void Run() {
2782 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test64: TP.");
2783 printf("test64: positive\n");
2784 MyThreadArray t(T1, T2, T3);
2785 t.Start();
2786 t.Join();
2787 printf("\tGLOB=%d\n", GLOB);
2788 }
2789 REGISTER_TEST(Run, 64)
2790 } // namespace test64
2791
2792
2793 // test65: TP. T2 happens-before T3, but T1 is independent. Writes in T1/T2. {{{1
2794 namespace test65 {
2795 // Similar to test64.
2796 // True race between T1 and T3:
2797 //
2798 // T1: T2: T3:
2799 // 1. MU.Lock()
2800 // 2. write(GLOB)
2801 // 3. MU.Unlock() (sleep)
2802 // a. MU.Lock()
2803 // b. write(GLOB)
2804 // c. MU.Unlock()
2805 // d. Q.Put() -----> A. Q.Get()
2806 // B. write(GLOB)
2807 //
2808 //
2809
2810 int GLOB = 0;
2811 Mutex MU;
2812 ProducerConsumerQueue Q(INT_MAX);
2813
T1()2814 void T1() {
2815 MU.Lock();
2816 GLOB++;
2817 MU.Unlock();
2818 }
2819
T2()2820 void T2() {
2821 usleep(100000);
2822 MU.Lock();
2823 GLOB++;
2824 MU.Unlock();
2825 Q.Put(NULL);
2826 }
2827
T3()2828 void T3() {
2829 Q.Get();
2830 GLOB = 1;
2831 }
2832
2833
Run()2834 void Run() {
2835 if (!Tsan_PureHappensBefore())
2836 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test65. TP.");
2837 printf("test65: positive\n");
2838 MyThreadArray t(T1, T2, T3);
2839 t.Start();
2840 t.Join();
2841 printf("\tGLOB=%d\n", GLOB);
2842 }
2843 REGISTER_TEST(Run, 65)
2844 } // namespace test65
2845
2846
2847 // test66: TN. Two separate pairs of signaller/waiter using the same CV. {{{1
2848 namespace test66 {
2849 int GLOB1 = 0;
2850 int GLOB2 = 0;
2851 int C1 = 0;
2852 int C2 = 0;
2853 Mutex MU;
2854
Signaller1()2855 void Signaller1() {
2856 GLOB1 = 1;
2857 MU.Lock();
2858 C1 = 1;
2859 CV.Signal();
2860 MU.Unlock();
2861 }
2862
Signaller2()2863 void Signaller2() {
2864 GLOB2 = 1;
2865 usleep(100000);
2866 MU.Lock();
2867 C2 = 1;
2868 CV.Signal();
2869 MU.Unlock();
2870 }
2871
Waiter1()2872 void Waiter1() {
2873 MU.Lock();
2874 while (C1 != 1) CV.Wait(&MU);
2875 ANNOTATE_CONDVAR_WAIT(&CV);
2876 MU.Unlock();
2877 GLOB1 = 2;
2878 }
2879
Waiter2()2880 void Waiter2() {
2881 MU.Lock();
2882 while (C2 != 1) CV.Wait(&MU);
2883 ANNOTATE_CONDVAR_WAIT(&CV);
2884 MU.Unlock();
2885 GLOB2 = 2;
2886 }
2887
Run()2888 void Run() {
2889 printf("test66: negative\n");
2890 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2);
2891 t.Start();
2892 t.Join();
2893 printf("\tGLOB=%d/%d\n", GLOB1, GLOB2);
2894 }
2895 REGISTER_TEST2(Run, 66, FEATURE|NEEDS_ANNOTATIONS)
2896 } // namespace test66
2897
2898
2899 // test67: FN. Race between Signaller1 and Waiter2 {{{1
2900 namespace test67 {
2901 // Similar to test66, but there is a real race here.
2902 //
2903 // Here we create a happens-before arc between Signaller1 and Waiter2
2904 // even though there should be no such arc.
2905 // However, it's probably improssible (or just very hard) to avoid it.
2906 int GLOB = 0;
2907 int C1 = 0;
2908 int C2 = 0;
2909 Mutex MU;
2910
Signaller1()2911 void Signaller1() {
2912 GLOB = 1;
2913 MU.Lock();
2914 C1 = 1;
2915 CV.Signal();
2916 MU.Unlock();
2917 }
2918
Signaller2()2919 void Signaller2() {
2920 usleep(100000);
2921 MU.Lock();
2922 C2 = 1;
2923 CV.Signal();
2924 MU.Unlock();
2925 }
2926
Waiter1()2927 void Waiter1() {
2928 MU.Lock();
2929 while (C1 != 1) CV.Wait(&MU);
2930 ANNOTATE_CONDVAR_WAIT(&CV);
2931 MU.Unlock();
2932 }
2933
Waiter2()2934 void Waiter2() {
2935 MU.Lock();
2936 while (C2 != 1) CV.Wait(&MU);
2937 ANNOTATE_CONDVAR_WAIT(&CV);
2938 MU.Unlock();
2939 GLOB = 2;
2940 }
2941
Run()2942 void Run() {
2943 ANNOTATE_EXPECT_RACE(&GLOB, "test67. FN. Race between Signaller1 and Waiter2");
2944 printf("test67: positive\n");
2945 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2);
2946 t.Start();
2947 t.Join();
2948 printf("\tGLOB=%d\n", GLOB);
2949 }
2950 REGISTER_TEST2(Run, 67, FEATURE|NEEDS_ANNOTATIONS|EXCLUDE_FROM_ALL)
2951 } // namespace test67
2952
2953
2954 // test68: TP. Writes are protected by MU, reads are not. {{{1
2955 namespace test68 {
2956 // In this test, all writes to GLOB are protected by a mutex
2957 // but some reads go unprotected.
2958 // This is certainly a race, but in some cases such code could occur in
2959 // a correct program. For example, the unprotected reads may be used
2960 // for showing statistics and are not required to be precise.
2961 int GLOB = 0;
2962 int COND = 0;
2963 const int N_writers = 3;
2964 Mutex MU, MU1;
2965
Writer()2966 void Writer() {
2967 for (int i = 0; i < 100; i++) {
2968 MU.Lock();
2969 GLOB++;
2970 MU.Unlock();
2971 }
2972
2973 // we are done
2974 MU1.Lock();
2975 COND++;
2976 MU1.Unlock();
2977 }
2978
Reader()2979 void Reader() {
2980 bool cont = true;
2981 while (cont) {
2982 CHECK(GLOB >= 0);
2983
2984 // are we done?
2985 MU1.Lock();
2986 if (COND == N_writers)
2987 cont = false;
2988 MU1.Unlock();
2989 usleep(100);
2990 }
2991 }
2992
Run()2993 void Run() {
2994 ANNOTATE_EXPECT_RACE(&GLOB, "TP. Writes are protected, reads are not.");
2995 printf("test68: positive\n");
2996 MyThreadArray t(Reader, Writer, Writer, Writer);
2997 t.Start();
2998 t.Join();
2999 printf("\tGLOB=%d\n", GLOB);
3000 }
3001 REGISTER_TEST(Run, 68)
3002 } // namespace test68
3003
3004
3005 // test69: {{{1
3006 namespace test69 {
3007 // This is the same as test68, but annotated.
3008 // We do not want to annotate GLOB as a benign race
3009 // because we want to allow racy reads only in certain places.
3010 //
3011 // TODO:
3012 int GLOB = 0;
3013 int COND = 0;
3014 const int N_writers = 3;
3015 int FAKE_MU = 0;
3016 Mutex MU, MU1;
3017
Writer()3018 void Writer() {
3019 for (int i = 0; i < 10; i++) {
3020 MU.Lock();
3021 GLOB++;
3022 MU.Unlock();
3023 }
3024
3025 // we are done
3026 MU1.Lock();
3027 COND++;
3028 MU1.Unlock();
3029 }
3030
Reader()3031 void Reader() {
3032 bool cont = true;
3033 while (cont) {
3034 ANNOTATE_IGNORE_READS_BEGIN();
3035 CHECK(GLOB >= 0);
3036 ANNOTATE_IGNORE_READS_END();
3037
3038 // are we done?
3039 MU1.Lock();
3040 if (COND == N_writers)
3041 cont = false;
3042 MU1.Unlock();
3043 usleep(100);
3044 }
3045 }
3046
Run()3047 void Run() {
3048 printf("test69: negative\n");
3049 MyThreadArray t(Reader, Writer, Writer, Writer);
3050 t.Start();
3051 t.Join();
3052 printf("\tGLOB=%d\n", GLOB);
3053 }
3054 REGISTER_TEST(Run, 69)
3055 } // namespace test69
3056
3057 // test70: STAB. Check that TRACE_MEMORY works. {{{1
3058 namespace test70 {
3059 int GLOB = 0;
Run()3060 void Run() {
3061 printf("test70: negative\n");
3062 ANNOTATE_TRACE_MEMORY(&GLOB);
3063 GLOB = 1;
3064 printf("\tGLOB=%d\n", GLOB);
3065 }
3066 REGISTER_TEST(Run, 70)
3067 } // namespace test70
3068
3069
3070
3071 namespace NegativeTests_Strlen { // {{{1
3072 // This test is a reproducer for a benign race in strlen (as well as index, etc).
3073 // Some implementations of strlen may read up to 7 bytes past the end of the string
3074 // thus touching memory which may not belong to this string.
3075 // Such race is benign because the data read past the end of the string is not used.
3076 //
3077 // Here, we allocate a 8-byte aligned string str and initialize first 5 bytes.
3078 // Then one thread calls strlen(str) (as well as index & rindex)
3079 // and another thread initializes str[5]..str[7].
3080 //
3081 // This can be fixed in Helgrind by intercepting strlen and replacing it
3082 // with a simpler implementation.
3083
3084 char *str;
3085 char *tmp2;
WorkerX()3086 void WorkerX() {
3087 usleep(100000);
3088 ASSERT_TRUE(strlen(str) == 4);
3089 #ifndef WIN32
3090 EXPECT_TRUE(index(str, 'X') == str);
3091 EXPECT_TRUE(index(str, 'x') == str+1);
3092 EXPECT_TRUE(index(str, 'Y') == NULL);
3093 #ifndef ANDROID
3094 EXPECT_TRUE(rindex(str, 'X') == str+2);
3095 EXPECT_TRUE(rindex(str, 'x') == str+3);
3096 EXPECT_TRUE(rindex(str, 'Y') == NULL);
3097 #endif
3098 #else
3099 EXPECT_TRUE(lstrlenA(NULL) == 0);
3100 EXPECT_TRUE(lstrlenW(NULL) == 0);
3101 #endif
3102 EXPECT_TRUE(strchr(str, 'X') == str);
3103 EXPECT_TRUE(strchr(str, 'x') == str+1);
3104 EXPECT_TRUE(strchr(str, 'Y') == NULL);
3105 EXPECT_TRUE(memchr(str, 'X', 8) == str);
3106 EXPECT_TRUE(memchr(str, 'x', 8) == str+1);
3107 char tmp[100] = "Zzz";
3108 EXPECT_TRUE(memmove(tmp, str, strlen(str) + 1) == tmp);
3109 EXPECT_TRUE(strcmp(tmp,str) == 0);
3110 EXPECT_TRUE(strncmp(tmp,str, 4) == 0);
3111 EXPECT_TRUE(memmove(str, tmp, strlen(tmp) + 1) == str);
3112 #ifndef WIN32
3113 #ifndef ANDROID
3114 EXPECT_TRUE(stpcpy(tmp2, str) == tmp2+4);
3115 #endif
3116 EXPECT_TRUE(strcpy(tmp2, str) == tmp2);
3117 EXPECT_TRUE(strncpy(tmp, str, 4) == tmp);
3118 // These may not be properly intercepted since gcc -O1 may inline
3119 // strcpy/stpcpy in presence of a statically sized array. Damn.
3120 // EXPECT_TRUE(stpcpy(tmp, str) == tmp+4);
3121 // EXPECT_TRUE(strcpy(tmp, str) == tmp);
3122 #endif
3123 EXPECT_TRUE(strrchr(str, 'X') == str+2);
3124 EXPECT_TRUE(strrchr(str, 'x') == str+3);
3125 EXPECT_TRUE(strrchr(str, 'Y') == NULL);
3126 }
WorkerY()3127 void WorkerY() {
3128 str[5] = 'Y';
3129 str[6] = 'Y';
3130 str[7] = '\0';
3131 }
3132
TEST(NegativeTests,StrlenAndFriends)3133 TEST(NegativeTests, StrlenAndFriends) {
3134 str = new char[8];
3135 tmp2 = new char[8];
3136 str[0] = 'X';
3137 str[1] = 'x';
3138 str[2] = 'X';
3139 str[3] = 'x';
3140 str[4] = '\0';
3141 MyThread t1(WorkerY);
3142 MyThread t2(WorkerX);
3143 t1.Start();
3144 t2.Start();
3145 t1.Join();
3146 t2.Join();
3147 ASSERT_STREQ("XxXx", str);
3148 ASSERT_STREQ("YY", str+5);
3149
3150 char foo[8] = {10, 20, 127, (char)128, (char)250, -50, 0};
3151 EXPECT_TRUE(strchr(foo, 10) != 0);
3152 EXPECT_TRUE(strchr(foo, 127) != 0);
3153 EXPECT_TRUE(strchr(foo, 128) != 0);
3154 EXPECT_TRUE(strchr(foo, 250) != 0);
3155 EXPECT_TRUE(strchr(foo, -50) != 0);
3156 EXPECT_TRUE(strchr(foo, -60) == 0);
3157 EXPECT_TRUE(strchr(foo, 0) != 0);
3158 EXPECT_TRUE(strchr(foo, 0) == foo + strlen(foo));
3159 EXPECT_TRUE(strrchr(foo, 10) != 0);
3160 EXPECT_TRUE(strrchr(foo, 0) != 0);
3161 EXPECT_TRUE(strrchr(foo, 0) == foo + strlen(foo));
3162 EXPECT_TRUE(strrchr(foo, 250) != 0);
3163 EXPECT_TRUE(strrchr(foo, -60) == 0);
3164 delete [] str;
3165 delete [] tmp2;
3166 // TODO(kcc): add more tests to check that interceptors are correct.
3167 }
3168 } // namespace test71
3169
3170 namespace NegativeTests_EmptyRep { // {{{1
Worker()3171 void Worker() {
3172 string s;
3173 s.erase();
3174 }
3175
TEST(NegativeTests,DISABLED_EmptyRepTest)3176 TEST(NegativeTests, DISABLED_EmptyRepTest) {
3177 // This is a test for the reports on an internal race in std::string implementation.
3178 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=40518
3179 // ThreadSanitizer should be silent on this, but currently it is silent
3180 // only on Valgrind/Linux.
3181 MyThreadArray mta(Worker, Worker);
3182 mta.Start();
3183 mta.Join();
3184 }
3185 } //namespace NegativeTests_EmptyRep
3186
3187 namespace NegativeTests_StdStringDtor { // {{{1
3188 // Some implementations of std::string (including the one on Linux)
3189 // are unfriendly to race detectors since they use atomic reference counting
3190 // in a way that race detectors can not understand.
3191 //
3192 // See http://code.google.com/p/data-race-test/issues/detail?id=40
3193 string *s = NULL;
3194 BlockingCounter *counter = NULL;
3195
DestroyWorker()3196 void DestroyWorker() {
3197 string x = *s; // force string copy (increments ref count).
3198 counter->DecrementCount();
3199 // x is destructed, ref count is decremented.
3200 }
3201
AssignWorker()3202 void AssignWorker() {
3203 string x = *s; // force string copy (increments ref count).
3204 counter->DecrementCount();
3205 // x is assigned, the ref count is decremented.
3206 usleep(100000);
3207 x = "ZZZ";
3208 }
3209
TEST(NegativeTests,StdStringDtorVsDtor)3210 TEST(NegativeTests, StdStringDtorVsDtor) {
3211 MyThreadArray mta(DestroyWorker, DestroyWorker, DestroyWorker);
3212 counter = new BlockingCounter(3);
3213 s = new string ("foo");
3214 mta.Start();
3215
3216 counter->Wait();
3217
3218 delete s; // ref count becomes zero and the object is destroyed.
3219 mta.Join();
3220 delete counter;
3221 }
3222
TEST(NegativeTests,DISABLED_StdStringDtorVsAssign)3223 TEST(NegativeTests, DISABLED_StdStringDtorVsAssign) {
3224 MyThreadArray mta(AssignWorker, AssignWorker, AssignWorker);
3225 counter = new BlockingCounter(3);
3226 s = new string ("foo");
3227 mta.Start();
3228
3229 counter->Wait();
3230
3231 delete s; // ref count becomes zero and the object is destroyed.
3232 mta.Join();
3233 delete counter;
3234 }
3235 } //namespace NegativeTests_EmptyRep
3236
3237 namespace PositiveTests_MutexDtorNoSync {
3238 // Check that Mutex::~Mutex() doesn't introduce h-b arcs.
3239 int *GLOB = NULL;
3240
WriteThenScopedLocalMutex()3241 void WriteThenScopedLocalMutex() {
3242 *GLOB = 1;
3243 {
3244 Mutex l;
3245 }
3246 }
3247
ScopedLocalMutexThenWrite()3248 void ScopedLocalMutexThenWrite() {
3249 {
3250 Mutex l;
3251 }
3252 *GLOB = 2;
3253 }
3254
TEST(PositiveTests,MutexDtorNoSyncTest)3255 TEST(PositiveTests, MutexDtorNoSyncTest) {
3256 GLOB = new int(0);
3257 ANNOTATE_EXPECT_RACE(GLOB, "TP: PositiveTests.MutexDtorNoSyncTest");
3258 MyThreadArray t(WriteThenScopedLocalMutex,
3259 ScopedLocalMutexThenWrite);
3260 t.Start();
3261 t.Join();
3262 delete GLOB;
3263 }
3264
WriteThenScopedLocalMutexLockUnlock()3265 void WriteThenScopedLocalMutexLockUnlock() {
3266 *GLOB = 1;
3267 {
3268 Mutex l;
3269 l.Lock();
3270 l.Unlock();
3271 }
3272 }
3273
ScopedLocalMutexLockUnlockThenWrite()3274 void ScopedLocalMutexLockUnlockThenWrite() {
3275 {
3276 Mutex l;
3277 l.Lock();
3278 l.Unlock();
3279 }
3280 *GLOB = 2;
3281 }
3282
TEST(PositiveTests,MutexDtorNoSyncTest2)3283 TEST(PositiveTests, MutexDtorNoSyncTest2) {
3284 GLOB = new int(0);
3285 ANNOTATE_EXPECT_RACE(GLOB, "TP: PositiveTests.MutexDtorNoSyncTest2");
3286 MyThreadArray t(WriteThenScopedLocalMutexLockUnlock,
3287 ScopedLocalMutexLockUnlockThenWrite);
3288 t.Start();
3289 t.Join();
3290 delete GLOB;
3291 }
3292
3293 } // namespace PositiveTests_MutexDtorSync
3294
3295 namespace PositiveTests_FprintfThreadCreateTest {
3296 // Check that fprintf doesn't introduce h-b with the start of the
3297 // following thread
3298 int *GLOB;
3299 StealthNotification *n;
3300
Worker1()3301 void Worker1() {
3302 *GLOB = 1;
3303 fprintf(stdout, "Hello, world!\n");
3304 n->signal();
3305 }
3306
Worker2()3307 void Worker2() {
3308 *GLOB = 2;
3309 }
3310
3311 #if !defined(_MSC_VER)
3312 // TODO(timurrrr): investigate Windows FN and un-#if
TEST(PositiveTests,FprintfThreadCreateTest)3313 TEST(PositiveTests, FprintfThreadCreateTest) {
3314 GLOB = new int;
3315 ANNOTATE_EXPECT_RACE(GLOB, "TP: PositiveTests.FprintfThreadCreateTest");
3316 n = new StealthNotification;
3317 MyThread t1(Worker1);
3318 t1.Start();
3319 n->wait();
3320 MyThread t2(Worker2);
3321 t2.Start();
3322 t2.Join();
3323 t1.Join();
3324 delete n;
3325 delete GLOB;
3326 }
3327 #endif
3328
3329 } // namespace PositiveTests_FprintfThreadCreateTest
3330
3331 // test72: STAB. Stress test for the number of segment sets (SSETs). {{{1
3332 namespace test72 {
3333 #ifndef NO_BARRIER
3334 // Variation of test33.
3335 // Instead of creating Nlog*N_iter threads,
3336 // we create Nlog threads and do N_iter barriers.
3337 int GLOB = 0;
3338 const int N_iter = 30;
3339 const int Nlog = 16;
3340 const int N = 1 << Nlog;
3341 static int64_t ARR1[N];
3342 static int64_t ARR2[N];
3343 Barrier *barriers[N_iter];
3344 Mutex MU;
3345
Worker()3346 void Worker() {
3347 MU.Lock();
3348 int n = ++GLOB;
3349 MU.Unlock();
3350
3351 n %= Nlog;
3352
3353 for (int it = 0; it < N_iter; it++) {
3354 // Iterate N_iter times, block on barrier after each iteration.
3355 // This way Helgrind will create new segments after each barrier.
3356
3357 for (int x = 0; x < 2; x++) {
3358 // run the inner loop twice.
3359 // When a memory location is accessed second time it is likely
3360 // that the state (SVal) will be unchanged.
3361 // The memory machine may optimize this case.
3362 for (int i = 0; i < N; i++) {
3363 // ARR1[i] and ARR2[N-1-i] are accessed by threads from i-th subset
3364 if (i & (1 << n)) {
3365 CHECK(ARR1[i] == 0);
3366 CHECK(ARR2[N-1-i] == 0);
3367 }
3368 }
3369 }
3370 barriers[it]->Block();
3371 }
3372 }
3373
3374
Run()3375 void Run() {
3376 printf("test72:\n");
3377
3378 std::vector<MyThread*> vec(Nlog);
3379
3380 for (int i = 0; i < N_iter; i++)
3381 barriers[i] = new Barrier(Nlog);
3382
3383 // Create and start Nlog threads
3384 for (int i = 0; i < Nlog; i++) {
3385 vec[i] = new MyThread(Worker);
3386 vec[i]->Start();
3387 }
3388
3389 // Join all threads.
3390 for (int i = 0; i < Nlog; i++) {
3391 vec[i]->Join();
3392 delete vec[i];
3393 }
3394 for (int i = 0; i < N_iter; i++)
3395 delete barriers[i];
3396
3397 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3398 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3399 }
3400 REGISTER_TEST2(Run, 72, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL);
3401 #endif // NO_BARRIER
3402 } // namespace test72
3403
3404
3405 // test73: STAB. Stress test for the number of (SSETs), different access sizes. {{{1
3406 namespace test73 {
3407 #ifndef NO_BARRIER
3408 // Variation of test72.
3409 // We perform accesses of different sizes to the same location.
3410 int GLOB = 0;
3411 const int N_iter = 2;
3412 const int Nlog = 16;
3413 const int N = 1 << Nlog;
3414 static int64_t ARR1[N];
3415 static int ARR2[N];
3416 Barrier *barriers[N_iter];
3417 Mutex MU;
3418
Worker()3419 void Worker() {
3420 MU.Lock();
3421 int n = ++GLOB;
3422 MU.Unlock();
3423
3424 n %= Nlog;
3425
3426 for (int it = 0; it < N_iter; it++) {
3427 // Iterate N_iter times, block on barrier after each iteration.
3428 // This way Helgrind will create new segments after each barrier.
3429
3430 for (int x = 0; x < 4; x++) {
3431 for (int i = 0; i < N; i++) {
3432 // ARR1[i] are accessed by threads from i-th subset
3433 if (i & (1 << n)) {
3434 for (int off = 0; off < (1 << x); off++) {
3435 switch(x) {
3436 case 0: CHECK( ARR1 [i * (1<<x) + off] == 0); break;
3437 case 1: CHECK(((int*) (ARR1))[i * (1<<x) + off] == 0); break;
3438 case 2: CHECK(((short*)(ARR1))[i * (1<<x) + off] == 0); break;
3439 case 3: CHECK(((char*) (ARR1))[i * (1<<x) + off] == 0); break;
3440 }
3441 switch(x) {
3442 case 1: CHECK(((int*) (ARR2))[i * (1<<x) + off] == 0); break;
3443 case 2: CHECK(((short*)(ARR2))[i * (1<<x) + off] == 0); break;
3444 case 3: CHECK(((char*) (ARR2))[i * (1<<x) + off] == 0); break;
3445 }
3446 }
3447 }
3448 }
3449 }
3450 barriers[it]->Block();
3451 }
3452 }
3453
3454
3455
Run()3456 void Run() {
3457 printf("test73:\n");
3458
3459 std::vector<MyThread*> vec(Nlog);
3460
3461 for (int i = 0; i < N_iter; i++)
3462 barriers[i] = new Barrier(Nlog);
3463
3464 // Create and start Nlog threads
3465 for (int i = 0; i < Nlog; i++) {
3466 vec[i] = new MyThread(Worker);
3467 vec[i]->Start();
3468 }
3469
3470 // Join all threads.
3471 for (int i = 0; i < Nlog; i++) {
3472 vec[i]->Join();
3473 delete vec[i];
3474 }
3475 for (int i = 0; i < N_iter; i++)
3476 delete barriers[i];
3477
3478 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n",
3479 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/
3480 }
3481 REGISTER_TEST2(Run, 73, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL);
3482 #endif // NO_BARRIER
3483 } // namespace test73
3484
3485
3486 // test74: PERF. A lot of lock/unlock calls. {{{1
3487 namespace test74 {
3488 const int N = 100000;
3489 Mutex MU;
TEST(StressTests,ManyLocksUnlocks)3490 TEST(StressTests, ManyLocksUnlocks) {
3491 for (int i = 0; i < N; i++ ) {
3492 MU.Lock();
3493 MU.Unlock();
3494 }
3495 }
3496 } // namespace test74
3497
3498 // RefCountedClass {{{1
3499 struct RefCountedClass {
3500 public:
RefCountedClassRefCountedClass3501 RefCountedClass() {
3502 annotate_unref_ = false;
3503 ref_ = 0;
3504 data_ = 0;
3505 }
3506
~RefCountedClassRefCountedClass3507 ~RefCountedClass() {
3508 CHECK(ref_ == 0); // race may be reported here
3509 int data_val = data_; // and here
3510 // if MU is not annotated
3511 data_ = 0;
3512 ref_ = -1;
3513 printf("\tRefCountedClass::data_ = %d\n", data_val);
3514 }
3515
AccessDataRefCountedClass3516 void AccessData() {
3517 this->mu_.Lock();
3518 this->data_++;
3519 this->mu_.Unlock();
3520 }
3521
RefRefCountedClass3522 void Ref() {
3523 MU.Lock();
3524 CHECK(ref_ >= 0);
3525 ref_++;
3526 MU.Unlock();
3527 }
3528
UnrefRefCountedClass3529 void Unref() {
3530 MU.Lock();
3531 CHECK(ref_ > 0);
3532 ref_--;
3533 bool do_delete = ref_ == 0;
3534 if (annotate_unref_) {
3535 ANNOTATE_HAPPENS_BEFORE(this);
3536 }
3537 MU.Unlock();
3538 if (do_delete) {
3539 if (annotate_unref_) {
3540 ANNOTATE_HAPPENS_AFTER(this);
3541 }
3542 delete this;
3543 }
3544 }
3545
Annotate_MURefCountedClass3546 static void Annotate_MU() {
3547 ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&MU);
3548 }
AnnotateUnrefRefCountedClass3549 void AnnotateUnref() {
3550 annotate_unref_ = true;
3551 }
Annotate_RaceRefCountedClass3552 void Annotate_Race() {
3553 ANNOTATE_BENIGN_RACE_SIZED(this, sizeof(*this), "needs annotation");
3554 }
3555 private:
3556 bool annotate_unref_;
3557
3558 int data_;
3559 Mutex mu_; // protects data_
3560
3561 int ref_;
3562 static Mutex MU; // protects ref_
3563 };
3564
3565 Mutex RefCountedClass::MU;
3566
3567 // test76: FP. Ref counting, no annotations. {{{1
3568 namespace test76 {
3569 #ifndef NO_BARRIER
3570 int GLOB = 0;
3571 Barrier barrier(4);
3572 RefCountedClass *object = NULL;
Worker()3573 void Worker() {
3574 object->Ref();
3575 barrier.Block();
3576 object->AccessData();
3577 object->Unref();
3578 }
Run()3579 void Run() {
3580 printf("test76: false positive (ref counting)\n");
3581 object = new RefCountedClass;
3582 object->Annotate_Race();
3583 MyThreadArray t(Worker, Worker, Worker, Worker);
3584 t.Start();
3585 t.Join();
3586 }
3587 REGISTER_TEST2(Run, 76, FEATURE)
3588 #endif // NO_BARRIER
3589 } // namespace test76
3590
3591
3592
3593 // test77: TN. Ref counting, MU is annotated. {{{1
3594 namespace test77 {
3595 #ifndef NO_BARRIER
3596 // same as test76, but RefCountedClass::MU is annotated.
3597 int GLOB = 0;
3598 Barrier barrier(4);
3599 RefCountedClass *object = NULL;
Worker()3600 void Worker() {
3601 object->Ref();
3602 barrier.Block();
3603 object->AccessData();
3604 object->Unref();
3605 }
Run()3606 void Run() {
3607 printf("test77: true negative (ref counting), mutex is annotated\n");
3608 RefCountedClass::Annotate_MU();
3609 object = new RefCountedClass;
3610 MyThreadArray t(Worker, Worker, Worker, Worker);
3611 t.Start();
3612 t.Join();
3613 }
3614 REGISTER_TEST(Run, 77)
3615 #endif // NO_BARRIER
3616 } // namespace test77
3617
3618
3619
3620 // test78: TN. Ref counting, Unref is annotated. {{{1
3621 namespace test78 {
3622 #ifndef NO_BARRIER
3623 // same as test76, but RefCountedClass::Unref is annotated.
3624 int GLOB = 0;
3625 Barrier barrier(4);
3626 RefCountedClass *object = NULL;
Worker()3627 void Worker() {
3628 object->Ref();
3629 barrier.Block();
3630 object->AccessData();
3631 object->Unref();
3632 }
Run()3633 void Run() {
3634 printf("test78: true negative (ref counting), Unref is annotated\n");
3635 RefCountedClass::Annotate_MU();
3636 object = new RefCountedClass;
3637 MyThreadArray t(Worker, Worker, Worker, Worker);
3638 t.Start();
3639 t.Join();
3640 }
3641 REGISTER_TEST(Run, 78)
3642 #endif // NO_BARRIER
3643 } // namespace test78
3644
3645
3646
3647 // test79 TN. Swap. {{{1
3648 namespace test79 {
3649 #if 0
3650 typedef __gnu_cxx::hash_map<int, int> map_t;
3651 #else
3652 typedef std::map<int, int> map_t;
3653 #endif
3654 map_t MAP;
3655 Mutex MU;
3656
3657 // Here we use swap to pass MAP between threads.
3658 // The synchronization is correct, but w/o ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
3659 // Helgrind will complain.
3660
Worker1()3661 void Worker1() {
3662 map_t tmp;
3663 MU.Lock();
3664 // We swap the new empty map 'tmp' with 'MAP'.
3665 MAP.swap(tmp);
3666 MU.Unlock();
3667 // tmp (which is the old version of MAP) is destroyed here.
3668 }
3669
Worker2()3670 void Worker2() {
3671 MU.Lock();
3672 MAP[1]++; // Just update MAP under MU.
3673 MU.Unlock();
3674 }
3675
Worker3()3676 void Worker3() { Worker1(); }
Worker4()3677 void Worker4() { Worker2(); }
3678
Run()3679 void Run() {
3680 ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&MU);
3681 printf("test79: negative\n");
3682 MyThreadArray t(Worker1, Worker2, Worker3, Worker4);
3683 t.Start();
3684 t.Join();
3685 }
3686 REGISTER_TEST(Run, 79)
3687 } // namespace test79
3688
3689
3690 // AtomicRefCountedClass. {{{1
3691 // Same as RefCountedClass, but using atomic ops instead of mutex.
3692 struct AtomicRefCountedClass {
3693 public:
AtomicRefCountedClassAtomicRefCountedClass3694 AtomicRefCountedClass() {
3695 annotate_unref_ = false;
3696 ref_ = 0;
3697 data_ = 0;
3698 }
3699
~AtomicRefCountedClassAtomicRefCountedClass3700 ~AtomicRefCountedClass() {
3701 CHECK(ref_ == 0); // race may be reported here
3702 int data_val = data_; // and here
3703 data_ = 0;
3704 ref_ = -1;
3705 printf("\tRefCountedClass::data_ = %d\n", data_val);
3706 }
3707
AccessDataAtomicRefCountedClass3708 void AccessData() {
3709 this->mu_.Lock();
3710 this->data_++;
3711 this->mu_.Unlock();
3712 }
3713
RefAtomicRefCountedClass3714 void Ref() {
3715 AtomicIncrement(&ref_, 1);
3716 }
3717
UnrefAtomicRefCountedClass3718 void Unref() {
3719 // DISCLAIMER: I am not sure I've implemented this correctly
3720 // (might require some memory barrier, etc).
3721 // But this implementation of reference counting is enough for
3722 // the purpose of Helgrind demonstration.
3723 AtomicIncrement(&ref_, -1);
3724 if (annotate_unref_) { ANNOTATE_HAPPENS_BEFORE(this); }
3725 if (ref_ == 0) {
3726 if (annotate_unref_) { ANNOTATE_HAPPENS_AFTER(this); }
3727 delete this;
3728 }
3729 }
3730
AnnotateUnrefAtomicRefCountedClass3731 void AnnotateUnref() {
3732 annotate_unref_ = true;
3733 }
Annotate_RaceAtomicRefCountedClass3734 void Annotate_Race() {
3735 ANNOTATE_BENIGN_RACE(&this->data_, "needs annotation");
3736 }
3737 private:
3738 bool annotate_unref_;
3739
3740 Mutex mu_;
3741 int data_; // under mu_
3742
3743 int ref_; // used in atomic ops.
3744 };
3745
3746 // test80: FP. Ref counting with atomics, no annotations. {{{1
3747 namespace test80 {
3748 #ifndef NO_BARRIER
3749 int GLOB = 0;
3750 Barrier barrier(4);
3751 AtomicRefCountedClass *object = NULL;
Worker()3752 void Worker() {
3753 object->Ref();
3754 barrier.Block();
3755 object->AccessData();
3756 object->Unref(); // All the tricky stuff is here.
3757 }
Run()3758 void Run() {
3759 printf("test80: false positive (ref counting)\n");
3760 object = new AtomicRefCountedClass;
3761 object->Annotate_Race();
3762 MyThreadArray t(Worker, Worker, Worker, Worker);
3763 t.Start();
3764 t.Join();
3765 }
3766 REGISTER_TEST2(Run, 80, FEATURE|EXCLUDE_FROM_ALL)
3767 #endif // NO_BARRIER
3768 } // namespace test80
3769
3770
3771 // test81: TN. Ref counting with atomics, Unref is annotated. {{{1
3772 namespace test81 {
3773 #ifndef NO_BARRIER
3774 // same as test80, but Unref is annotated.
3775 int GLOB = 0;
3776 Barrier barrier(4);
3777 AtomicRefCountedClass *object = NULL;
Worker()3778 void Worker() {
3779 object->Ref();
3780 barrier.Block();
3781 object->AccessData();
3782 object->Unref(); // All the tricky stuff is here.
3783 }
Run()3784 void Run() {
3785 printf("test81: negative (annotated ref counting)\n");
3786 object = new AtomicRefCountedClass;
3787 object->AnnotateUnref();
3788 MyThreadArray t(Worker, Worker, Worker, Worker);
3789 t.Start();
3790 t.Join();
3791 }
3792 REGISTER_TEST2(Run, 81, FEATURE|EXCLUDE_FROM_ALL)
3793 #endif // NO_BARRIER
3794 } // namespace test81
3795
3796
3797 // test82: Object published w/o synchronization. {{{1
3798 namespace test82 {
3799
3800 // Writer creates a new object and makes the pointer visible to the Reader.
3801 // Reader waits until the object pointer is non-null and reads the object.
3802 //
3803 // On Core 2 Duo this test will sometimes (quite rarely) fail in
3804 // the CHECK below, at least if compiled with -O2.
3805 //
3806 // The sequence of events::
3807 // Thread1: Thread2:
3808 // a. arr_[...] = ...
3809 // b. foo[i] = ...
3810 // A. ... = foo[i]; // non NULL
3811 // B. ... = arr_[...];
3812 //
3813 // Since there is no proper synchronization, during the even (B)
3814 // Thread2 may not see the result of the event (a).
3815 // On x86 and x86_64 this happens due to compiler reordering instructions.
3816 // On other arcitectures it may also happen due to cashe inconsistency.
3817
3818 class FOO {
3819 public:
FOO()3820 FOO() {
3821 idx_ = rand() % 1024;
3822 arr_[idx_] = 77777;
3823 // __asm__ __volatile__("" : : : "memory"); // this fixes!
3824 }
check(volatile FOO * foo)3825 static void check(volatile FOO *foo) {
3826 CHECK(foo->arr_[foo->idx_] == 77777);
3827 }
3828 private:
3829 int idx_;
3830 int arr_[1024];
3831 };
3832
3833 const int N = 100000;
3834 static volatile FOO *foo[N];
3835 Mutex MU;
3836
Writer()3837 void Writer() {
3838 for (int i = 0; i < N; i++) {
3839 foo[i] = new FOO;
3840 usleep(100);
3841 }
3842 }
3843
Reader()3844 void Reader() {
3845 for (int i = 0; i < N; i++) {
3846 while (!foo[i]) {
3847 MU.Lock(); // this is NOT a synchronization,
3848 MU.Unlock(); // it just helps foo[i] to become visible in Reader.
3849 }
3850 if ((i % 100) == 0) {
3851 printf("rd %d\n", i);
3852 }
3853 // At this point Reader() sees the new value of foo[i]
3854 // but in very rare cases will not see the new value of foo[i]->arr_.
3855 // Thus this CHECK will sometimes fail.
3856 FOO::check(foo[i]);
3857 }
3858 }
3859
Run()3860 void Run() {
3861 printf("test82: positive\n");
3862 MyThreadArray t(Writer, Reader);
3863 t.Start();
3864 t.Join();
3865 }
3866 REGISTER_TEST2(Run, 82, FEATURE|EXCLUDE_FROM_ALL)
3867 } // namespace test82
3868
3869
3870 // test83: Object published w/o synchronization (simple version){{{1
3871 namespace test83 {
3872 // A simplified version of test83 (example of a wrong code).
3873 // This test, though incorrect, will almost never fail.
3874 volatile static int *ptr = NULL;
3875 Mutex MU;
3876
Writer()3877 void Writer() {
3878 usleep(100);
3879 ptr = new int(777);
3880 }
3881
Reader()3882 void Reader() {
3883 while(!ptr) {
3884 MU.Lock(); // Not a synchronization!
3885 MU.Unlock();
3886 }
3887 CHECK(*ptr == 777);
3888 }
3889
Run()3890 void Run() {
3891 // printf("test83: positive\n");
3892 MyThreadArray t(Writer, Reader);
3893 t.Start();
3894 t.Join();
3895 }
3896 REGISTER_TEST2(Run, 83, FEATURE|EXCLUDE_FROM_ALL)
3897 } // namespace test83
3898
3899
3900 // test84: TP. True race (regression test for a bug related to atomics){{{1
3901 namespace test84 {
3902 // Helgrind should not create HB arcs for the bus lock even when
3903 // --pure-happens-before=yes is used.
3904 // Bug found in by Bart Van Assche, the test is taken from
3905 // valgrind file drd/tests/atomic_var.c.
3906 static int s_x = 0;
3907 /* s_dummy[] ensures that s_x and s_y are not in the same cache line. */
3908 static char s_dummy[512] = {0};
3909 static int s_y;
3910
thread_func_1()3911 void thread_func_1()
3912 {
3913 s_y = 1;
3914 AtomicIncrement(&s_x, 1);
3915 }
3916
thread_func_2()3917 void thread_func_2()
3918 {
3919 while (AtomicIncrement(&s_x, 0) == 0)
3920 ;
3921 printf("y = %d\n", s_y);
3922 }
3923
3924
Run()3925 void Run() {
3926 CHECK(s_dummy[0] == 0); // Avoid compiler warning about 's_dummy unused'.
3927 printf("test84: positive\n");
3928 ANNOTATE_EXPECT_RACE_FOR_TSAN(&s_y, "test84: TP. true race.");
3929 MyThreadArray t(thread_func_1, thread_func_2);
3930 t.Start();
3931 t.Join();
3932 }
3933 REGISTER_TEST(Run, 84)
3934 } // namespace test84
3935
3936
3937 // Test for RunningOnValgrind(). {{{1
TEST(NegativeTests,RunningOnValgrindTest)3938 TEST(NegativeTests, RunningOnValgrindTest) {
3939 printf("RunningOnValgrind() = %d\n", RunningOnValgrind());
3940 }
3941
3942 namespace NegativeTests_BenignRaceInDtor { // {{{
3943 // Test for race inside DTOR: racey write to vptr. Benign.
3944 // This test shows a racey access to vptr (the pointer to vtbl).
3945 // We have class A and class B derived from A.
3946 // Both classes have a virtual function f() and a virtual DTOR.
3947 // We create an object 'A *a = new B'
3948 // and pass this object from Thread1 to Thread2.
3949 // Thread2 calls a->f(). This call reads a->vtpr.
3950 // Thread1 deletes the object. B::~B waits untill the object can be destroyed
3951 // (flag_stopped == true) but at the very beginning of B::~B
3952 // a->vptr is written to.
3953 // So, we have a race on a->vptr.
3954 // On this particular test this race is benign, but HarmfulRaceInDtor shows
3955 // how such race could harm.
3956 //
3957 //
3958 //
3959 // Threa1: Thread2:
3960 // 1. A a* = new B;
3961 // 2. Q.Put(a); ------------\ .
3962 // \--------------------> a. a = Q.Get();
3963 // b. a->f();
3964 // /--------- c. flag_stopped = true;
3965 // 3. delete a; /
3966 // waits untill flag_stopped <------/
3967 // inside the dtor
3968 //
3969
3970 bool flag_stopped = false;
3971 Mutex mu;
3972
3973 ProducerConsumerQueue Q(INT_MAX); // Used to pass A* between threads.
3974
3975 struct A {
ANegativeTests_BenignRaceInDtor::A3976 A() { printf("A::A()\n"); }
~ANegativeTests_BenignRaceInDtor::A3977 virtual ~A() { printf("A::~A()\n"); }
fNegativeTests_BenignRaceInDtor::A3978 virtual void f() { }
3979
3980 uintptr_t padding[15];
3981 } ALIGNED(64);
3982
3983 struct B: A {
BNegativeTests_BenignRaceInDtor::B3984 B() { printf("B::B()\n"); }
~BNegativeTests_BenignRaceInDtor::B3985 virtual ~B() {
3986 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
3987 printf("B::~B()\n");
3988 // wait until flag_stopped is true.
3989 mu.LockWhen(Condition(&ArgIsTrue, &flag_stopped));
3990 mu.Unlock();
3991 printf("B::~B() done\n");
3992 }
fNegativeTests_BenignRaceInDtor::B3993 virtual void f() { }
3994 };
3995
Waiter()3996 void Waiter() {
3997 A *a = new B;
3998 printf("Waiter: B created\n");
3999 Q.Put(a);
4000 usleep(100000); // so that Worker calls a->f() first.
4001 printf("Waiter: deleting B\n");
4002 delete a;
4003 printf("Waiter: B deleted\n");
4004 usleep(100000);
4005 printf("Waiter: done\n");
4006 }
4007
Worker()4008 void Worker() {
4009 A *a = reinterpret_cast<A*>(Q.Get());
4010 printf("Worker: got A\n");
4011 a->f();
4012
4013 mu.Lock();
4014 flag_stopped = true;
4015 mu.Unlock();
4016 usleep(200000);
4017 printf("Worker: done\n");
4018 }
4019
TEST(NegativeTests,BenignRaceInDtor)4020 TEST(NegativeTests, BenignRaceInDtor) {
4021 MyThreadArray t(Waiter, Worker);
4022 t.Start();
4023 t.Join();
4024 }
4025 } // namespace
4026
4027
4028 namespace PositiveTests_HarmfulRaceInDtor { // {{{
4029 // A variation of BenignRaceInDtor where the race is harmful.
4030 // Race on vptr. Will run A::F() or B::F() depending on the timing.
4031 class A {
4032 public:
A()4033 A() : done_(false) {
4034 // W/o this annotation tsan may produce additional warnings in hybrid mode.
4035 ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&mu_);
4036 }
F()4037 virtual void F() {
4038 printf ("A::F()\n");
4039 }
Done()4040 void Done() {
4041 MutexLock lock(&mu_);
4042 done_ = true;
4043 }
~A()4044 virtual ~A() {
4045 while (true) {
4046 {
4047 MutexLock lock(&mu_);
4048 if (done_) break;
4049 }
4050 usleep(10); // yield.
4051 }
4052 }
4053 private:
4054 Mutex mu_;
4055 bool done_;
4056 };
4057
4058 class B : public A {
4059 public:
F()4060 virtual void F() {
4061 // TODO(kcc): enable this printf back once issue 57 is fixed.
4062 // printf ("B::F()\n");
4063 }
4064 };
4065
4066 static A *a;
4067
Thread1()4068 void Thread1() {
4069 a->F();
4070 a->Done();
4071 sleep(1);
4072 };
4073
Thread2()4074 void Thread2() {
4075 delete a;
4076 }
TEST(PositiveTests,HarmfulRaceInDtorB)4077 TEST(PositiveTests, HarmfulRaceInDtorB) {
4078 ANNOTATE_FLUSH_EXPECTED_RACES();
4079 // Will print B::F()
4080 a = new B;
4081 ANNOTATE_EXPECT_RACE(a, "HarmfulRaceInDtor #1: expected race on a->vptr");
4082 ANNOTATE_TRACE_MEMORY(a);
4083 MyThreadArray t(Thread1, Thread2);
4084 t.Start();
4085 t.Join();
4086 ANNOTATE_FLUSH_EXPECTED_RACES();
4087 }
4088
TEST(PositiveTests,HarmfulRaceInDtorA)4089 TEST(PositiveTests, HarmfulRaceInDtorA) {
4090 ANNOTATE_FLUSH_EXPECTED_RACES();
4091 // Will print A::F()
4092 a = new B;
4093 ANNOTATE_EXPECT_RACE(a, "HarmfulRaceInDtor #2: expected race on a->vptr");
4094 ANNOTATE_TRACE_MEMORY(a);
4095 MyThreadArray t(Thread2, Thread1);
4096 t.Start();
4097 t.Join();
4098 ANNOTATE_FLUSH_EXPECTED_RACES();
4099 }
4100
4101 } // namespace
4102
4103
4104 namespace AnnotateIgnoreTests { // {{{1
4105
4106 int racey_write = 0;
4107
RaceyWriter()4108 void RaceyWriter() {
4109 ANNOTATE_IGNORE_WRITES_BEGIN();
4110 racey_write = 1;
4111 ANNOTATE_IGNORE_WRITES_END();
4112 }
4113
TEST(NegativeTests,AnnotateIgnoreWritesTest)4114 TEST(NegativeTests, AnnotateIgnoreWritesTest) {
4115 MyThread t(RaceyWriter);
4116 t.Start();
4117 racey_write = 1;
4118 t.Join();
4119 }
4120
4121 int racey_read = 0;
4122
RaceyReader1()4123 void RaceyReader1() {
4124 ANNOTATE_IGNORE_READS_BEGIN();
4125 CHECK(racey_read != 777);
4126 ANNOTATE_IGNORE_READS_END();
4127 }
4128
RaceyReader2()4129 void RaceyReader2() {
4130 CHECK(ANNOTATE_UNPROTECTED_READ(racey_read) != 777);
4131 }
4132
TEST(NegativeTests,AnnotateIgnoreReadsTest)4133 TEST(NegativeTests, AnnotateIgnoreReadsTest) {
4134 MyThreadArray t(RaceyReader1, RaceyReader2);
4135 t.Start();
4136 racey_read = 1;
4137 t.Join();
4138 }
4139
4140 int incorrectly_annotated_racey_write = 0;
4141
IncorrectlyAnnotatedRaceyWriter()4142 void IncorrectlyAnnotatedRaceyWriter() {
4143 ANNOTATE_IGNORE_READS_BEGIN();
4144 incorrectly_annotated_racey_write = 1;
4145 ANNOTATE_IGNORE_READS_END();
4146 }
4147
TEST(PositiveTests,AnnotateIgnoreReadsOnWriteTest)4148 TEST(PositiveTests, AnnotateIgnoreReadsOnWriteTest) {
4149 ANNOTATE_EXPECT_RACE(&incorrectly_annotated_racey_write, "expected race");
4150 MyThread t(IncorrectlyAnnotatedRaceyWriter);
4151 t.Start();
4152 incorrectly_annotated_racey_write = 1;
4153 t.Join();
4154 ANNOTATE_FLUSH_EXPECTED_RACES();
4155 }
4156
4157 int incorrectly_annotated_racey_read = 0;
4158
IncorrectlyAnnotatedRaceyReader()4159 void IncorrectlyAnnotatedRaceyReader() {
4160 ANNOTATE_IGNORE_WRITES_BEGIN();
4161 CHECK(incorrectly_annotated_racey_read != 777);
4162 ANNOTATE_IGNORE_WRITES_END();
4163 }
4164
TEST(PositiveTests,AnnotateIgnoreWritesOnReadTest)4165 TEST(PositiveTests, AnnotateIgnoreWritesOnReadTest) {
4166 ANNOTATE_EXPECT_RACE(&incorrectly_annotated_racey_read, "expected race");
4167 MyThread t(IncorrectlyAnnotatedRaceyReader);
4168 t.Start();
4169 incorrectly_annotated_racey_read = 1;
4170 t.Join();
4171 ANNOTATE_FLUSH_EXPECTED_RACES();
4172 }
4173
4174 } // namespace
4175
4176
4177 // test89: Test for debug info. {{{1
4178 namespace test89 {
4179 // Simlpe races with different objects (stack, heap globals; scalars, structs).
4180 // Also, if run with --trace-level=2 this test will show a sequence of
4181 // CTOR and DTOR calls.
4182 struct STRUCT {
4183 int a, b, c;
4184 };
4185
4186 struct A {
4187 int a;
Atest89::A4188 A() {
4189 ANNOTATE_TRACE_MEMORY(&a);
4190 a = 1;
4191 }
~Atest89::A4192 virtual ~A() {
4193 a = 4;
4194 }
4195 };
4196
4197 struct B : A {
Btest89::B4198 B() { CHECK(a == 1); }
~Btest89::B4199 virtual ~B() { CHECK(a == 3); }
4200 };
4201 struct C : B {
Ctest89::C4202 C() { a = 2; }
~Ctest89::C4203 virtual ~C() { a = 3; }
4204 };
4205
4206 int GLOBAL = 0;
4207 int *STACK = 0;
4208 STRUCT GLOB_STRUCT;
4209 STRUCT *STACK_STRUCT;
4210 STRUCT *HEAP_STRUCT;
4211
Worker()4212 void Worker() {
4213 GLOBAL = 1;
4214 *STACK = 1;
4215 GLOB_STRUCT.b = 1;
4216 STACK_STRUCT->b = 1;
4217 HEAP_STRUCT->b = 1;
4218 }
4219
Run()4220 void Run() {
4221 int stack_var = 0;
4222 STACK = &stack_var;
4223
4224 STRUCT stack_struct;
4225 STACK_STRUCT = &stack_struct;
4226
4227 HEAP_STRUCT = new STRUCT;
4228
4229 printf("test89: negative\n");
4230 MyThreadArray t(Worker, Worker);
4231 t.Start();
4232 t.Join();
4233
4234 delete HEAP_STRUCT;
4235
4236 A *a = new C;
4237 printf("Using 'a->a': %d\n", a->a);
4238 delete a;
4239 }
4240 REGISTER_TEST2(Run, 89, FEATURE|EXCLUDE_FROM_ALL)
4241 } // namespace test89
4242
4243
4244 // test90: FP. Test for a safely-published pointer (read-only). {{{1
4245 namespace test90 {
4246 // The Publisher creates an object and safely publishes it under a mutex.
4247 // Readers access the object read-only.
4248 // See also test91.
4249 //
4250 // Without annotations Helgrind will issue a false positive in Reader().
4251 //
4252 // Choices for annotations:
4253 // -- ANNOTATE_CONDVAR_SIGNAL/ANNOTATE_CONDVAR_WAIT
4254 // -- ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
4255 // -- ANNOTATE_PUBLISH_MEMORY_RANGE.
4256
4257 int *GLOB = 0;
4258 Mutex MU;
4259
4260 StealthNotification n1;
4261
Publisher()4262 void Publisher() {
4263 MU.Lock();
4264 GLOB = (int*)malloc(128 * sizeof(int));
4265 ANNOTATE_TRACE_MEMORY(&GLOB[42]);
4266 GLOB[42] = 777;
4267 if (!Tsan_PureHappensBefore())
4268 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB[42], "test90. FP. This is a false positve");
4269 MU.Unlock();
4270 n1.signal();
4271 usleep(200000);
4272 }
4273
Reader()4274 void Reader() {
4275 n1.wait();
4276 while (true) {
4277 MU.Lock();
4278 int *p = &GLOB[42];
4279 MU.Unlock();
4280 if (p) {
4281 CHECK(*p == 777); // Race is reported here.
4282 break;
4283 }
4284 }
4285 }
4286
Run()4287 void Run() {
4288 printf("test90: false positive (safely published pointer).\n");
4289 MyThreadArray t(Publisher, Reader, Reader, Reader);
4290 t.Start();
4291 t.Join();
4292 free(GLOB);
4293 }
4294 REGISTER_TEST(Run, 90)
4295 } // namespace test90
4296
4297
4298 // test91: FP. Test for a safely-published pointer (read-write). {{{1
4299 namespace test91 {
4300 // Similar to test90.
4301 // The Publisher creates an object and safely publishes it under a mutex MU1.
4302 // Accessors get the object under MU1 and access it (read/write) under MU2.
4303 //
4304 // Without annotations Helgrind will issue a false positive in Accessor().
4305 //
4306
4307 int *GLOB = 0;
4308 Mutex MU, MU1, MU2;
4309
Publisher()4310 void Publisher() {
4311 MU1.Lock();
4312 GLOB = (int*)malloc(128 * sizeof(int));
4313 GLOB[42] = 777;
4314 if (!Tsan_PureHappensBefore())
4315 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB[42], "test91. FP. This is a false positve");
4316 MU1.Unlock();
4317 }
4318
Accessor()4319 void Accessor() {
4320 usleep(10000);
4321 while (true) {
4322 MU1.Lock();
4323 int *p = &GLOB[42];
4324 MU1.Unlock();
4325 if (p) {
4326 MU2.Lock();
4327 (*p)++; // Race is reported here.
4328 CHECK(*p > 777);
4329 MU2.Unlock();
4330 break;
4331 }
4332 }
4333 }
4334
Run()4335 void Run() {
4336 printf("test91: false positive (safely published pointer, read/write).\n");
4337 MyThreadArray t(Publisher, Accessor, Accessor, Accessor);
4338 t.Start();
4339 t.Join();
4340 free(GLOB);
4341 }
4342 REGISTER_TEST(Run, 91)
4343 } // namespace test91
4344
4345
4346 // test92: TN. Test for a safely-published pointer (read-write), annotated. {{{1
4347 namespace test92 {
4348 // Similar to test91, but annotated with ANNOTATE_PUBLISH_MEMORY_RANGE.
4349 //
4350 //
4351 // Publisher: Accessors:
4352 //
4353 // 1. MU1.Lock()
4354 // 2. Create GLOB.
4355 // 3. ANNOTATE_PUBLISH_...(GLOB) -------\ .
4356 // 4. MU1.Unlock() \ .
4357 // \ a. MU1.Lock()
4358 // \ b. Get GLOB
4359 // \ c. MU1.Unlock()
4360 // \--> d. Access GLOB
4361 //
4362 // A happens-before arc is created between ANNOTATE_PUBLISH_MEMORY_RANGE and
4363 // accesses to GLOB.
4364
4365 struct ObjType {
4366 int arr[10];
4367 };
4368
4369 ObjType *GLOB = 0;
4370 Mutex MU, MU1, MU2;
4371
Publisher()4372 void Publisher() {
4373 MU1.Lock();
4374 GLOB = new ObjType;
4375 for (int i = 0; i < 10; i++) {
4376 GLOB->arr[i] = 777;
4377 }
4378 // This annotation should go right before the object is published.
4379 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB, sizeof(*GLOB));
4380 MU1.Unlock();
4381 }
4382
Accessor(int index)4383 void Accessor(int index) {
4384 while (true) {
4385 MU1.Lock();
4386 ObjType *p = GLOB;
4387 MU1.Unlock();
4388 if (p) {
4389 MU2.Lock();
4390 p->arr[index]++; // W/o the annotations the race will be reported here.
4391 CHECK(p->arr[index] == 778);
4392 MU2.Unlock();
4393 break;
4394 }
4395 }
4396 }
4397
Accessor0()4398 void Accessor0() { Accessor(0); }
Accessor5()4399 void Accessor5() { Accessor(5); }
Accessor9()4400 void Accessor9() { Accessor(9); }
4401
Run()4402 void Run() {
4403 printf("test92: safely published pointer, read/write, annotated.\n");
4404 MyThreadArray t(Publisher, Accessor0, Accessor5, Accessor9);
4405 t.Start();
4406 t.Join();
4407 printf("\t*GLOB=%d\n", GLOB->arr[0]);
4408 }
4409 REGISTER_TEST(Run, 92)
4410 } // namespace test92
4411
4412
4413 // test93: TP. Test for incorrect usage of ANNOTATE_PUBLISH_MEMORY_RANGE. {{{1
4414 namespace test93 {
4415 int GLOB = 0;
4416
Reader()4417 void Reader() {
4418 CHECK(GLOB == 0);
4419 }
4420
Publisher()4421 void Publisher() {
4422 usleep(10000);
4423 // Incorrect, used after the memory has been accessed in another thread.
4424 ANNOTATE_PUBLISH_MEMORY_RANGE(&GLOB, sizeof(GLOB));
4425 }
4426
Run()4427 void Run() {
4428 printf("test93: positive, misuse of ANNOTATE_PUBLISH_MEMORY_RANGE\n");
4429 MyThreadArray t(Reader, Publisher);
4430 t.Start();
4431 t.Join();
4432 printf("\tGLOB=%d\n", GLOB);
4433 }
4434 REGISTER_TEST2(Run, 93, FEATURE|EXCLUDE_FROM_ALL)
4435 } // namespace test93
4436
4437
4438 // test94: TP. Check do_cv_signal/fake segment logic {{{1
4439 namespace test94 {
4440 int GLOB;
4441
4442 int COND = 0;
4443 int COND2 = 0;
4444 Mutex MU, MU2;
4445 CondVar CV, CV2;
4446
4447 StealthNotification n1, n2, n3;
4448
Thr1()4449 void Thr1() {
4450
4451 n2.wait(); // Make sure the waiter blocks.
4452 GLOB = 1; // WRITE
4453
4454 MU.Lock();
4455 COND = 1;
4456 CV.Signal();
4457 MU.Unlock();
4458 n1.signal();
4459 }
Thr2()4460 void Thr2() {
4461 // Make sure CV2.Signal() "happens after" CV.Signal()
4462 n1.wait();
4463 // Make sure the waiter blocks.
4464 n3.wait();
4465
4466 MU2.Lock();
4467 COND2 = 1;
4468 CV2.Signal();
4469 MU2.Unlock();
4470 }
Thr3()4471 void Thr3() {
4472 MU.Lock();
4473 n2.signal();
4474 while(COND != 1)
4475 CV.Wait(&MU);
4476 MU.Unlock();
4477 }
Thr4()4478 void Thr4() {
4479 MU2.Lock();
4480 n3.signal();
4481 while(COND2 != 1)
4482 CV2.Wait(&MU2);
4483 MU2.Unlock();
4484 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4485 }
Run()4486 void Run() {
4487 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test94: TP.");
4488 printf("test94: TP. Check do_cv_signal/fake segment logic\n");
4489 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4);
4490 mta.Start();
4491 mta.Join();
4492 printf("\tGLOB=%d\n", GLOB);
4493 }
4494 REGISTER_TEST(Run, 94);
4495 } // namespace test94
4496
4497 // test95: TP. Check do_cv_signal/fake segment logic {{{1
4498 namespace test95 {
4499 int GLOB = 0;
4500
4501 int COND = 0;
4502 int COND2 = 0;
4503 Mutex MU, MU2;
4504 CondVar CV, CV2;
4505
Thr1()4506 void Thr1() {
4507 usleep(1000*1000); // Make sure CV2.Signal() "happens before" CV.Signal()
4508 usleep(10000); // Make sure the waiter blocks.
4509
4510 GLOB = 1; // WRITE
4511
4512 MU.Lock();
4513 COND = 1;
4514 CV.Signal();
4515 MU.Unlock();
4516 }
Thr2()4517 void Thr2() {
4518 usleep(10000); // Make sure the waiter blocks.
4519
4520 MU2.Lock();
4521 COND2 = 1;
4522 CV2.Signal();
4523 MU2.Unlock();
4524 }
Thr3()4525 void Thr3() {
4526 MU.Lock();
4527 while(COND != 1)
4528 CV.Wait(&MU);
4529 MU.Unlock();
4530 }
Thr4()4531 void Thr4() {
4532 MU2.Lock();
4533 while(COND2 != 1)
4534 CV2.Wait(&MU2);
4535 MU2.Unlock();
4536 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait !
4537 }
Run()4538 void Run() {
4539 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test95: TP.");
4540 printf("test95: TP. Check do_cv_signal/fake segment logic\n");
4541 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4);
4542 mta.Start();
4543 mta.Join();
4544 printf("\tGLOB=%d\n", GLOB);
4545 }
4546 REGISTER_TEST(Run, 95);
4547 } // namespace test95
4548
4549 // test96: TN. tricky LockSet behaviour {{{1
4550 // 3 threads access the same memory with three different
4551 // locksets: {A, B}, {B, C}, {C, A}.
4552 // These locksets have empty intersection
4553 namespace test96 {
4554 int GLOB = 0;
4555
4556 Mutex A, B, C;
4557
Thread1()4558 void Thread1() {
4559 MutexLock a(&A);
4560 MutexLock b(&B);
4561 GLOB++;
4562 }
4563
Thread2()4564 void Thread2() {
4565 MutexLock b(&B);
4566 MutexLock c(&C);
4567 GLOB++;
4568 }
4569
Thread3()4570 void Thread3() {
4571 MutexLock a(&A);
4572 MutexLock c(&C);
4573 GLOB++;
4574 }
4575
Run()4576 void Run() {
4577 printf("test96: FP. tricky LockSet behaviour\n");
4578 ANNOTATE_TRACE_MEMORY(&GLOB);
4579 MyThreadArray mta(Thread1, Thread2, Thread3);
4580 mta.Start();
4581 mta.Join();
4582 CHECK(GLOB == 3);
4583 printf("\tGLOB=%d\n", GLOB);
4584 }
4585 REGISTER_TEST(Run, 96);
4586 } // namespace test96
4587
4588 namespace FalseNegativeOfFastModeTest { // {{{1
4589 // This test shows false negative with --fast-mode=yes.
4590 const int HG_CACHELINE_SIZE = 64;
4591
4592 StealthNotification n1, n2;
4593
4594 const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int);
4595 int array[ARRAY_SIZE];
4596 int * GLOB = &array[ARRAY_SIZE/2];
4597 /*
4598 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4599 to a memory inside a CacheLineZ which is inside array's memory range
4600 */
4601
Reader()4602 void Reader() {
4603 n1.wait();
4604 CHECK(0 != *GLOB);
4605 n2.signal();
4606 }
4607
TEST(PositiveTests,FalseNegativeOfFastModeTest)4608 TEST(PositiveTests, FalseNegativeOfFastModeTest) {
4609 MyThreadArray t(Reader);
4610 ANNOTATE_TRACE_MEMORY(GLOB);
4611 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, __FUNCTION__);
4612
4613 t.Start();
4614 *GLOB = 0x12345;
4615 n1.signal();
4616 n2.wait();
4617 t.Join();
4618 }
4619 } // namespace
4620
4621 // test99: TP. Unit test for a bug in LockWhen*. {{{1
4622 namespace test99 {
4623
4624
4625 bool GLOB = false;
4626 Mutex mu;
4627
Thread1()4628 static void Thread1() {
4629 for (int i = 0; i < 100; i++) {
4630 mu.LockWhenWithTimeout(Condition(&ArgIsTrue, &GLOB), 5);
4631 GLOB = false;
4632 mu.Unlock();
4633 usleep(10000);
4634 }
4635 }
4636
Thread2()4637 static void Thread2() {
4638 for (int i = 0; i < 100; i++) {
4639 mu.Lock();
4640 mu.Unlock();
4641 usleep(10000);
4642 }
4643 }
4644
Run()4645 void Run() {
4646 printf("test99: regression test for LockWhen*\n");
4647 MyThreadArray t(Thread1, Thread2);
4648 t.Start();
4649 t.Join();
4650 }
4651 REGISTER_TEST(Run, 99);
4652 } // namespace test99
4653
4654
4655 // test100: Test for initialization bit. {{{1
4656 namespace test100 {
4657 int G1 = 0;
4658 int G2 = 0;
4659 int G3 = 0;
4660 int G4 = 0;
4661
Creator()4662 void Creator() {
4663 G1 = 1; CHECK(G1);
4664 G2 = 1;
4665 G3 = 1; CHECK(G3);
4666 G4 = 1;
4667 }
4668
Worker1()4669 void Worker1() {
4670 usleep(100000);
4671 CHECK(G1);
4672 CHECK(G2);
4673 G3 = 3;
4674 G4 = 3;
4675 }
4676
Worker2()4677 void Worker2() {
4678
4679 }
4680
4681
Run()4682 void Run() {
4683 printf("test100: test for initialization bit. \n");
4684 MyThreadArray t(Creator, Worker1, Worker2);
4685 ANNOTATE_TRACE_MEMORY(&G1);
4686 ANNOTATE_TRACE_MEMORY(&G2);
4687 ANNOTATE_TRACE_MEMORY(&G3);
4688 ANNOTATE_TRACE_MEMORY(&G4);
4689 t.Start();
4690 t.Join();
4691 }
4692 REGISTER_TEST2(Run, 100, FEATURE|EXCLUDE_FROM_ALL)
4693 } // namespace test100
4694
4695
4696 // test101: TN. Two signals and two waits. {{{1
4697 namespace test101 {
4698 Mutex MU;
4699 CondVar CV;
4700 int GLOB = 0;
4701
4702 int C1 = 0, C2 = 0;
4703
Signaller()4704 void Signaller() {
4705 usleep(100000);
4706 MU.Lock();
4707 C1 = 1;
4708 CV.Signal();
4709 printf("signal\n");
4710 MU.Unlock();
4711
4712 GLOB = 1;
4713
4714 usleep(500000);
4715 MU.Lock();
4716 C2 = 1;
4717 CV.Signal();
4718 printf("signal\n");
4719 MU.Unlock();
4720 }
4721
Waiter()4722 void Waiter() {
4723 MU.Lock();
4724 while(!C1)
4725 CV.Wait(&MU);
4726 printf("wait\n");
4727 MU.Unlock();
4728
4729 MU.Lock();
4730 while(!C2)
4731 CV.Wait(&MU);
4732 printf("wait\n");
4733 MU.Unlock();
4734
4735 GLOB = 2;
4736
4737 }
4738
Run()4739 void Run() {
4740 printf("test101: negative\n");
4741 MyThreadArray t(Waiter, Signaller);
4742 t.Start();
4743 t.Join();
4744 printf("\tGLOB=%d\n", GLOB);
4745 }
4746 REGISTER_TEST(Run, 101)
4747 } // namespace test101
4748
4749 // test102: --fast-mode=yes vs. --initialization-bit=yes {{{1
4750 namespace test102 {
4751 const int HG_CACHELINE_SIZE = 64;
4752
4753 Mutex MU;
4754
4755 const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int);
4756 int array[ARRAY_SIZE + 1];
4757 int * GLOB = &array[ARRAY_SIZE/2];
4758 /*
4759 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points
4760 to a memory inside a CacheLineZ which is inside array's memory range
4761 */
4762
4763 StealthNotification n1, n2, n3;
4764
Reader()4765 void Reader() {
4766 n1.wait();
4767 CHECK(777 == GLOB[0]);
4768 n2.signal();
4769 n3.wait();
4770 CHECK(777 == GLOB[1]);
4771 }
4772
Run()4773 void Run() {
4774 MyThreadArray t(Reader);
4775 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+0, "test102: TP. FN with --fast-mode=yes");
4776 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+1, "test102: TP");
4777 printf("test102: --fast-mode=yes vs. --initialization-bit=yes\n");
4778
4779 t.Start();
4780 GLOB[0] = 777;
4781 n1.signal();
4782 n2.wait();
4783 GLOB[1] = 777;
4784 n3.signal();
4785 t.Join();
4786 }
4787
4788 REGISTER_TEST2(Run, 102, FEATURE)
4789 } // namespace test102
4790
4791 // test103: Access different memory locations with different LockSets {{{1
4792 namespace test103 {
4793 const int N_MUTEXES = 6;
4794 const int LOCKSET_INTERSECTION_SIZE = 3;
4795
4796 int data[1 << LOCKSET_INTERSECTION_SIZE] = {0};
4797 Mutex MU[N_MUTEXES];
4798
LS_to_idx(int ls)4799 inline int LS_to_idx (int ls) {
4800 return (ls >> (N_MUTEXES - LOCKSET_INTERSECTION_SIZE))
4801 & ((1 << LOCKSET_INTERSECTION_SIZE) - 1);
4802 }
4803
Worker()4804 void Worker() {
4805 for (int ls = 0; ls < (1 << N_MUTEXES); ls++) {
4806 if (LS_to_idx(ls) == 0)
4807 continue;
4808 for (int m = 0; m < N_MUTEXES; m++)
4809 if (ls & (1 << m))
4810 MU[m].Lock();
4811
4812 data[LS_to_idx(ls)]++;
4813
4814 for (int m = N_MUTEXES - 1; m >= 0; m--)
4815 if (ls & (1 << m))
4816 MU[m].Unlock();
4817 }
4818 }
4819
Run()4820 void Run() {
4821 printf("test103: Access different memory locations with different LockSets\n");
4822 MyThreadArray t(Worker, Worker, Worker, Worker);
4823 t.Start();
4824 t.Join();
4825 }
4826 REGISTER_TEST2(Run, 103, FEATURE)
4827 } // namespace test103
4828
4829 // test104: TP. Simple race (write vs write). Heap mem. {{{1
4830 namespace test104 {
4831 int *GLOB = NULL;
Worker()4832 void Worker() {
4833 GLOB[42] = 1;
4834 }
4835
Parent()4836 void Parent() {
4837 MyThread t(Worker);
4838 t.Start();
4839 usleep(100000);
4840 GLOB[42] = 2;
4841 t.Join();
4842 }
Run()4843 void Run() {
4844 GLOB = (int*)malloc(128 * sizeof(int));
4845 GLOB[42] = 0;
4846 ANNOTATE_EXPECT_RACE(&GLOB[42], "test104. TP.");
4847 ANNOTATE_TRACE_MEMORY(&GLOB[42]);
4848 printf("test104: positive\n");
4849 Parent();
4850 printf("\tGLOB=%d\n", GLOB[42]);
4851 free(GLOB);
4852 }
4853 REGISTER_TEST(Run, 104);
4854 } // namespace test104
4855
4856
4857 // test105: Checks how stack grows. {{{1
4858 namespace test105 {
4859 int GLOB = 0;
4860
F1()4861 void F1() {
4862 int ar[32];
4863 // ANNOTATE_TRACE_MEMORY(&ar[0]);
4864 // ANNOTATE_TRACE_MEMORY(&ar[31]);
4865 ar[0] = 1;
4866 ar[31] = 1;
4867 CHECK(ar[0] == 1);
4868 }
4869
Worker()4870 void Worker() {
4871 int ar[32];
4872 // ANNOTATE_TRACE_MEMORY(&ar[0]);
4873 // ANNOTATE_TRACE_MEMORY(&ar[31]);
4874 ar[0] = 1;
4875 ar[31] = 1;
4876 CHECK(ar[0] == 1);
4877 F1();
4878 }
4879
Run()4880 void Run() {
4881 printf("test105: negative\n");
4882 Worker();
4883 MyThread t(Worker);
4884 t.Start();
4885 t.Join();
4886 printf("\tGLOB=%d\n", GLOB);
4887 }
4888 REGISTER_TEST(Run, 105)
4889 } // namespace test105
4890
4891
4892 // test107: Test for ANNOTATE_EXPECT_RACE {{{1
4893 namespace test107 {
4894 int GLOB = 0;
Run()4895 void Run() {
4896 printf("test107: negative\n");
4897 ANNOTATE_EXPECT_RACE(&GLOB, "No race in fact. Just checking the tool.");
4898 printf("\tGLOB=%d\n", GLOB);
4899 }
4900 REGISTER_TEST2(Run, 107, FEATURE|EXCLUDE_FROM_ALL)
4901 } // namespace test107
4902
4903
4904 // test108: TN. initialization of static object. {{{1
4905 namespace test108 {
4906 // Here we have a function-level static object.
4907 // Starting from gcc 4 this is therad safe,
4908 // but is is not thread safe with many other compilers.
4909 //
4910 // Helgrind/ThreadSanitizer supports this kind of initialization by
4911 // intercepting __cxa_guard_acquire/__cxa_guard_release
4912 // and ignoring all accesses between them.
4913 // pthread_once is supported in the same manner.
4914 class Foo {
4915 public:
Foo()4916 Foo() {
4917 ANNOTATE_TRACE_MEMORY(&a_);
4918 a_ = 42;
4919 }
Check() const4920 void Check() const { CHECK(a_ == 42); }
4921 private:
4922 int a_;
4923 };
4924
GetFoo()4925 const Foo *GetFoo() {
4926 static const Foo *foo = new Foo();
4927 return foo;
4928 }
Worker0()4929 void Worker0() {
4930 GetFoo();
4931 }
4932
Worker()4933 void Worker() {
4934 usleep(200000);
4935 const Foo *foo = GetFoo();
4936 foo->Check();
4937 }
4938
4939
Run()4940 void Run() {
4941 printf("test108: negative, initialization of static object\n");
4942 MyThreadArray t(Worker0, Worker, Worker);
4943 t.Start();
4944 t.Join();
4945 }
4946 #ifdef __GNUC__
4947 REGISTER_TEST2(Run, 108, FEATURE)
4948 #endif
4949 } // namespace test108
4950
4951
4952 // test109: TN. Checking happens before between parent and child threads. {{{1
4953 namespace test109 {
4954 // Check that the detector correctly connects
4955 // pthread_create with the new thread
4956 // and
4957 // thread exit with pthread_join
4958 const int N = 32;
4959 static int GLOB[N];
4960
Worker(void * a)4961 void Worker(void *a) {
4962 usleep(10000);
4963 // printf("--Worker : %ld %p\n", (int*)a - GLOB, (void*)pthread_self());
4964 int *arg = (int*)a;
4965 (*arg)++;
4966 }
4967
Run()4968 void Run() {
4969 printf("test109: negative\n");
4970 MyThread *t[N];
4971 for (int i = 0; i < N; i++) {
4972 t[i] = new MyThread(Worker, &GLOB[i]);
4973 }
4974 for (int i = 0; i < N; i++) {
4975 ANNOTATE_TRACE_MEMORY(&GLOB[i]);
4976 GLOB[i] = 1;
4977 t[i]->Start();
4978 // printf("--Started: %p\n", (void*)t[i]->tid());
4979 }
4980 for (int i = 0; i < N; i++) {
4981 // printf("--Joining: %p\n", (void*)t[i]->tid());
4982 t[i]->Join();
4983 // printf("--Joined : %p\n", (void*)t[i]->tid());
4984 GLOB[i]++;
4985 }
4986 for (int i = 0; i < N; i++) delete t[i];
4987
4988 printf("\tGLOB=%d\n", GLOB[13]);
4989 }
4990 REGISTER_TEST(Run, 109)
4991 } // namespace test109
4992
4993
4994 // test111: TN. Unit test for a bug related to stack handling. {{{1
4995 namespace test111 {
4996 char *GLOB = 0;
4997 bool COND = false;
4998 Mutex mu;
4999 const int N = 3000;
5000
write_to_p(char * p,int val)5001 void write_to_p(char *p, int val) {
5002 for (int i = 0; i < N; i++)
5003 p[i] = val;
5004 }
5005
f1()5006 void f1() {
5007 char some_stack[N];
5008 write_to_p(some_stack, 1);
5009 mu.LockWhen(Condition(&ArgIsTrue, &COND));
5010 mu.Unlock();
5011 }
5012
f2()5013 void f2() {
5014 char some_stack[N];
5015 char some_more_stack[N];
5016 write_to_p(some_stack, 2);
5017 write_to_p(some_more_stack, 2);
5018 }
5019
f0()5020 void f0() { f2(); }
5021
Worker1()5022 void Worker1() {
5023 f0();
5024 f1();
5025 f2();
5026 }
5027
Worker2()5028 void Worker2() {
5029 usleep(100000);
5030 mu.Lock();
5031 COND = true;
5032 mu.Unlock();
5033 }
5034
Run()5035 void Run() {
5036 printf("test111: regression test\n");
5037 MyThreadArray t(Worker1, Worker1, Worker2);
5038 t.Start();
5039 t.Join();
5040 }
5041 REGISTER_TEST2(Run, 111, FEATURE)
5042 } // namespace test111
5043
5044 // test112: STAB. Test for ANNOTATE_PUBLISH_MEMORY_RANGE{{{1
5045 namespace test112 {
5046 char *GLOB = 0;
5047 const int N = 64 * 5;
5048 Mutex mu;
5049 bool ready = false; // under mu
5050 int beg, end; // under mu
5051
5052 Mutex mu1;
5053
Worker()5054 void Worker() {
5055
5056 bool is_ready = false;
5057 int b, e;
5058 while (!is_ready) {
5059 mu.Lock();
5060 is_ready = ready;
5061 b = beg;
5062 e = end;
5063 mu.Unlock();
5064 usleep(1000);
5065 }
5066
5067 mu1.Lock();
5068 for (int i = b; i < e; i++) {
5069 GLOB[i]++;
5070 }
5071 mu1.Unlock();
5072 }
5073
PublishRange(int b,int e)5074 void PublishRange(int b, int e) {
5075 MyThreadArray t(Worker, Worker);
5076 ready = false; // runs before other threads
5077 t.Start();
5078
5079 ANNOTATE_NEW_MEMORY(GLOB + b, e - b);
5080 ANNOTATE_TRACE_MEMORY(GLOB + b);
5081 for (int j = b; j < e; j++) {
5082 GLOB[j] = 0;
5083 }
5084 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB + b, e - b);
5085
5086 // hand off
5087 mu.Lock();
5088 ready = true;
5089 beg = b;
5090 end = e;
5091 mu.Unlock();
5092
5093 t.Join();
5094 }
5095
Run()5096 void Run() {
5097 printf("test112: stability (ANNOTATE_PUBLISH_MEMORY_RANGE)\n");
5098 GLOB = new char [N];
5099
5100 PublishRange(0, 10);
5101 PublishRange(3, 5);
5102
5103 PublishRange(12, 13);
5104 PublishRange(10, 14);
5105
5106 PublishRange(15, 17);
5107 PublishRange(16, 18);
5108
5109 // do few more random publishes.
5110 for (int i = 0; i < 20; i++) {
5111 const int begin = rand() % N;
5112 const int size = (rand() % (N - begin)) + 1;
5113 CHECK(size > 0);
5114 CHECK(begin + size <= N);
5115 PublishRange(begin, begin + size);
5116 }
5117
5118 printf("GLOB = %d\n", (int)GLOB[0]);
5119 }
5120 REGISTER_TEST2(Run, 112, STABILITY)
5121 } // namespace test112
5122
5123
5124 // test113: PERF. A lot of lock/unlock calls. Many locks {{{1
5125 namespace test113 {
5126 const int kNumIter = 100000;
5127 const int kNumLocks = 7;
5128 Mutex MU[kNumLocks];
TEST(StressTests,ManyLocksUnlocks2)5129 TEST (StressTests, ManyLocksUnlocks2) {
5130 printf("test113: perf\n");
5131 for (int i = 0; i < kNumIter; i++ ) {
5132 for (int j = 0; j < kNumLocks; j++) {
5133 if (i & (1 << j)) MU[j].Lock();
5134 }
5135 for (int j = kNumLocks - 1; j >= 0; j--) {
5136 if (i & (1 << j)) MU[j].Unlock();
5137 }
5138 }
5139 }
5140 } // namespace test113
5141
5142
5143 // test114: STAB. Recursive static initialization. {{{1
5144 namespace test114 {
Bar()5145 int Bar() {
5146 static int bar = 1;
5147 return bar;
5148 }
Foo()5149 int Foo() {
5150 static int foo = Bar();
5151 return foo;
5152 }
Worker()5153 void Worker() {
5154 static int x = Foo();
5155 CHECK(x == 1);
5156 }
Run()5157 void Run() {
5158 printf("test114: stab\n");
5159 MyThreadArray t(Worker, Worker);
5160 t.Start();
5161 t.Join();
5162 }
5163 #ifdef __GNUC__
5164 REGISTER_TEST(Run, 114)
5165 #endif
5166 } // namespace test114
5167
5168 // test116: TN. some operations with string<> objects. {{{1
5169 namespace test116 {
5170
Worker()5171 void Worker() {
5172 string A[10], B[10], C[10];
5173 for (int i = 0; i < 1000; i++) {
5174 for (int j = 0; j < 10; j++) {
5175 string &a = A[j];
5176 string &b = B[j];
5177 string &c = C[j];
5178 a = "sdl;fkjhasdflksj df";
5179 b = "sdf sdf;ljsd ";
5180 c = "'sfdf df";
5181 c = b;
5182 a = c;
5183 b = a;
5184 swap(a,b);
5185 swap(b,c);
5186 }
5187 for (int j = 0; j < 10; j++) {
5188 string &a = A[j];
5189 string &b = B[j];
5190 string &c = C[j];
5191 a.clear();
5192 b.clear();
5193 c.clear();
5194 }
5195 }
5196 }
5197
Run()5198 void Run() {
5199 printf("test116: negative (strings)\n");
5200 MyThreadArray t(Worker, Worker, Worker);
5201 t.Start();
5202 t.Join();
5203 }
5204 REGISTER_TEST2(Run, 116, FEATURE|EXCLUDE_FROM_ALL)
5205 } // namespace test116
5206
5207 // test117: TN. Many calls to function-scope static init. {{{1
5208 namespace test117 {
5209 const int N = 50;
5210
Foo()5211 int Foo() {
5212 usleep(20000);
5213 return 1;
5214 }
5215
Worker(void * a)5216 void Worker(void *a) {
5217 static int foo = Foo();
5218 CHECK(foo == 1);
5219 }
5220
Run()5221 void Run() {
5222 printf("test117: negative\n");
5223 MyThread *t[N];
5224 for (int i = 0; i < N; i++) {
5225 t[i] = new MyThread(Worker);
5226 }
5227 for (int i = 0; i < N; i++) {
5228 t[i]->Start();
5229 }
5230 for (int i = 0; i < N; i++) {
5231 t[i]->Join();
5232 }
5233 for (int i = 0; i < N; i++) delete t[i];
5234 }
5235 #ifndef WIN32
5236 // This is racey on Windows!
5237 REGISTER_TEST(Run, 117)
5238 #endif
5239 } // namespace test117
5240
5241
5242
5243 // test118 PERF: One signal, multiple waits. {{{1
5244 namespace test118 {
5245 int GLOB = 0;
5246 const int kNumIter = 2000000;
Signaller()5247 void Signaller() {
5248 usleep(50000);
5249 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
5250 }
Waiter()5251 void Waiter() {
5252 for (int i = 0; i < kNumIter; i++) {
5253 ANNOTATE_CONDVAR_WAIT(&GLOB);
5254 if (i == kNumIter / 2)
5255 usleep(100000);
5256 }
5257 }
TEST(StressTests,OneSignalManyWaits)5258 TEST(StressTests, OneSignalManyWaits) {
5259 printf("test118: perf\n");
5260 MyThreadArray t(Signaller, Waiter, Signaller, Waiter);
5261 t.Start();
5262 t.Join();
5263 printf("\tGLOB=%d\n", GLOB);
5264 }
5265 } // namespace test118
5266
5267
5268 // test119: TP. Testing that malloc does not introduce any HB arc. {{{1
5269 namespace test119 {
5270 int GLOB = 0;
Worker1()5271 void Worker1() {
5272 GLOB = 1;
5273 free(malloc(123));
5274 }
Worker2()5275 void Worker2() {
5276 usleep(100000);
5277 free(malloc(345));
5278 GLOB = 2;
5279 }
Run()5280 void Run() {
5281 printf("test119: positive (checking if malloc creates HB arcs)\n");
5282 if (!(Tsan_PureHappensBefore() && kMallocUsesMutex))
5283 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true race");
5284 MyThreadArray t(Worker1, Worker2);
5285 t.Start();
5286 t.Join();
5287 printf("\tGLOB=%d\n", GLOB);
5288 }
5289 REGISTER_TEST(Run, 119)
5290 } // namespace test119
5291
5292
5293 // test120: TP. Thread1: write then read. Thread2: read. {{{1
5294 namespace test120 {
5295 int GLOB = 0;
5296
Thread1()5297 void Thread1() {
5298 GLOB = 1; // write
5299 CHECK(GLOB); // read
5300 }
5301
Thread2()5302 void Thread2() {
5303 usleep(100000);
5304 CHECK(GLOB >= 0); // read
5305 }
5306
Run()5307 void Run() {
5308 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "TP (T1: write then read, T2: read)");
5309 printf("test120: positive\n");
5310 MyThreadArray t(Thread1, Thread2);
5311 GLOB = 1;
5312 t.Start();
5313 t.Join();
5314 printf("\tGLOB=%d\n", GLOB);
5315 }
5316 REGISTER_TEST(Run, 120)
5317 } // namespace test120
5318
5319
5320 namespace DoubleCheckedLocking { // {{{1
5321 struct Foo {
5322 uintptr_t padding1[16];
5323 uintptr_t a;
5324 uintptr_t padding2[16];
5325 };
5326
5327 static Mutex mu;
5328 static Foo *foo;
5329
InitMe()5330 void InitMe() {
5331 if (!foo) {
5332 MutexLock lock(&mu);
5333 if (!foo) {
5334 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo, "Double-checked locking (ptr)");
5335 foo = new Foo;
5336 if (Tsan_PureHappensBefore()) {
5337 // A pure h-b detector may or may not detect this.
5338 ANNOTATE_BENIGN_RACE(&foo->a, "real race");
5339 } else {
5340 // ThreadSanitizer in full hybrid mode must detect it.
5341 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo->a, "Double-checked locking (obj)");
5342 }
5343 foo->a = 42;
5344 }
5345 }
5346 }
5347
UseMe()5348 void UseMe() {
5349 InitMe();
5350 CHECK(foo);
5351 if (foo->a != 42) {
5352 printf("foo->a = %d (should be 42)\n", (int)foo->a);
5353 }
5354 }
5355
Worker1()5356 void Worker1() { UseMe(); }
Worker2()5357 void Worker2() { UseMe(); }
Worker3()5358 void Worker3() { UseMe(); }
5359
5360
TEST(PositiveTests,DoubleCheckedLocking1)5361 TEST(PositiveTests, DoubleCheckedLocking1) {
5362 foo = NULL;
5363 MyThreadArray t1(Worker1, Worker2, Worker3);
5364 t1.Start();
5365 t1.Join();
5366 delete foo;
5367 }
5368 } // namespace DoubleCheckedLocking
5369
5370 namespace DoubleCheckedLocking2 { // {{{1
5371 struct Foo {
5372 uintptr_t padding1[16];
5373 uintptr_t a;
5374 uintptr_t padding2[16];
5375 };
5376
5377 Foo *foo;
5378 Mutex mu;
5379
InitMe()5380 void InitMe() {
5381 if (foo) return;
5382 Foo *x = new Foo;
5383 ANNOTATE_BENIGN_RACE(&x->a, "may or may not detect this race");
5384 x->a = 42;
5385 {
5386 MutexLock lock(&mu);
5387 if (!foo) {
5388 foo = x;
5389 x = NULL;
5390 }
5391 }
5392 if (x) delete x;
5393 }
5394
Worker()5395 void Worker() {
5396 InitMe();
5397 CHECK(foo);
5398 CHECK(foo->a == 42);
5399 }
5400
TEST(PositiveTests,DoubleCheckedLocking2)5401 TEST(PositiveTests, DoubleCheckedLocking2) {
5402 foo = NULL;
5403 ANNOTATE_EXPECT_RACE(&foo, "real race");
5404 MyThreadArray t1(Worker, Worker, Worker, Worker);
5405 t1.Start();
5406 t1.Join();
5407 delete foo;
5408 }
5409
5410 } // namespace DoubleCheckedLocking2
5411
5412 namespace PositiveTests_DifferentSizeAccessTest { // {{{1
5413
5414 uint64_t arr[1000];
5415 size_t arr_index = 0;
5416 uint64_t *MEM;
5417 size_t size[3];
5418 size_t offset[3];
5419
GenericWrite(size_t s,size_t off)5420 void GenericWrite(size_t s, size_t off) {
5421 switch(s) {
5422 case 8:
5423 CHECK(off == 0);
5424 ((uint64_t*)MEM)[off] = 1;
5425 break;
5426 case 4:
5427 CHECK(off < 2);
5428 ((uint32_t*)MEM)[off] = 1;
5429 break;
5430 case 2:
5431 CHECK(off < 4);
5432 ((uint16_t*)MEM)[off] = 1;
5433 break;
5434 case 1:
5435 CHECK(off < 8);
5436 ((uint8_t*)MEM)[off] = 1;
5437 break;
5438 default: CHECK(0); break;
5439 }
5440 }
5441
Thread1()5442 void Thread1() { GenericWrite(size[0], offset[0]); }
Thread2()5443 void Thread2() { GenericWrite(size[1], offset[1]); }
5444
TwoRangesIntersect(size_t beg1,size_t end1,size_t beg2,size_t end2)5445 bool TwoRangesIntersect(size_t beg1, size_t end1, size_t beg2, size_t end2) {
5446 if (beg1 <= beg2 && end1 > beg2) return true;
5447 if (beg2 <= beg1 && end2 > beg1) return true;
5448 return false;
5449 }
5450
RunTwoThreads(size_t size1,size_t offset1,size_t size2,size_t offset2)5451 void RunTwoThreads(size_t size1, size_t offset1, size_t size2, size_t offset2) {
5452 size[0] = size1;
5453 size[1] = size2;
5454 offset[0] = offset1;
5455 offset[1] = offset2;
5456 long beg1 = offset1 * size1;
5457 long end1 = beg1 + size1;
5458 long beg2 = offset2 * size2;
5459 long end2 = beg2 + size2;
5460 bool have_intersection = TwoRangesIntersect(beg1, end1, beg2, end2);
5461 char descr[1024];
5462 MEM = &arr[arr_index++];
5463 sprintf(descr, "Testing: [%ld, %ld) vs [%ld, %ld] (%s intersection); p=%p",
5464 beg1, end1, beg2, end2, have_intersection ? "have" : "no", MEM);
5465 fprintf(stderr, "%s\n", descr);
5466 char *racey_addr_beg = (char*)MEM + max(beg1, beg2);
5467 char *racey_addr_end = (char*)MEM + min(end1, end2);
5468 if (have_intersection) {
5469 ANNOTATE_EXPECT_RACE(racey_addr_beg, descr);
5470 if (racey_addr_end - racey_addr_beg >= 2) {
5471 // We expect a race on the first racey byte, but we may also see some
5472 // races in other bytes (e.g. if a 8-byte store is implemented via two
5473 // 4-byte stores on a 32-bit arch). Ignore these extra races.
5474 ANNOTATE_BENIGN_RACE_SIZED(racey_addr_beg+1, racey_addr_end - racey_addr_beg - 1,
5475 "race");
5476 }
5477 }
5478 MyThreadArray t1(Thread1, Thread2);
5479 t1.Start();
5480 t1.Join();
5481 }
5482
TestTwoSizes(size_t size1,size_t offset1,size_t size2,size_t offset2)5483 void TestTwoSizes(size_t size1, size_t offset1, size_t size2, size_t offset2) {
5484 RunTwoThreads(size1, offset1, size2, offset2);
5485 RunTwoThreads(size2, offset2, size1, offset1);
5486 }
5487
TEST(PositiveTests,DifferentSizeAccessTest)5488 TEST(PositiveTests, DifferentSizeAccessTest) {
5489 for(int size1_log = 3; size1_log >= 0; size1_log--) {
5490 for (int size2_log = size1_log; size2_log >= 0; size2_log--) {
5491 for (int off1 = 0; off1 < (1 << (3-size1_log)); off1++) {
5492 for (int off2 = 0; off2 < (1 << (3-size2_log)); off2++) {
5493 RunTwoThreads(1 << size1_log, off1, 1 << size2_log, off2);
5494 }
5495 }
5496 }
5497 }
5498 }
5499
5500
5501 const int kStressArrSize = 100;
5502 char stress_arr[kStressArrSize];
5503
StressWorker()5504 void StressWorker() {
5505 const int n = 100000;
5506 char foo[kStressArrSize];
5507 memset(foo, 0, sizeof(foo));
5508 for (int i = 0; i < n; i++) {
5509 memcpy(stress_arr + i % (kStressArrSize / 2), foo, i % (kStressArrSize / 3));
5510 }
5511 }
5512
TEST(StressTests,DifferentSizeAccessStressTest)5513 TEST(StressTests, DifferentSizeAccessStressTest) {
5514 ANNOTATE_BENIGN_RACE_SIZED(stress_arr, sizeof(stress_arr), "race");
5515 MyThreadArray t(StressWorker, StressWorker, StressWorker);
5516 t.Start();
5517 t.Join();
5518 }
5519 } // namespace
5520
5521 // test124: What happens if we delete an unlocked lock? {{{1
5522 namespace test124 {
5523 // This test does not worg with pthreads (you can't call
5524 // pthread_mutex_destroy on a locked lock).
5525 int GLOB = 0;
5526 const int N = 1000;
Worker()5527 void Worker() {
5528 Mutex *a_large_local_array_of_mutexes;
5529 a_large_local_array_of_mutexes = new Mutex[N];
5530 for (int i = 0; i < N; i++) {
5531 a_large_local_array_of_mutexes[i].Lock();
5532 }
5533 delete []a_large_local_array_of_mutexes;
5534 GLOB = 1;
5535 }
5536
Run()5537 void Run() {
5538 printf("test124: negative\n");
5539 MyThreadArray t(Worker, Worker, Worker);
5540 t.Start();
5541 t.Join();
5542 printf("\tGLOB=%d\n", GLOB);
5543 }
5544 REGISTER_TEST2(Run, 124, FEATURE|EXCLUDE_FROM_ALL)
5545 } // namespace test124
5546
5547
5548 // test126 TN: test for BlockingCounter {{{1
5549 namespace test126 {
5550 BlockingCounter *blocking_counter;
5551 int GLOB = 0;
Worker()5552 void Worker() {
5553 CHECK(blocking_counter);
5554 CHECK(GLOB == 0);
5555 blocking_counter->DecrementCount();
5556 }
Run()5557 void Run() {
5558 printf("test126: negative\n");
5559 MyThreadArray t(Worker, Worker, Worker);
5560 blocking_counter = new BlockingCounter(3);
5561 t.Start();
5562 blocking_counter->Wait();
5563 GLOB = 1;
5564 t.Join();
5565 printf("\tGLOB=%d\n", GLOB);
5566 }
5567 REGISTER_TEST(Run, 126)
5568 } // namespace test126
5569
5570
5571 // test127. Bad code: unlocking a mutex locked by another thread. {{{1
5572 namespace test127 {
5573 Mutex mu;
Thread1()5574 void Thread1() {
5575 mu.Lock();
5576 usleep(1); // avoid tail call elimination
5577 }
Thread2()5578 void Thread2() {
5579 usleep(100000);
5580 mu.Unlock();
5581 usleep(1); // avoid tail call elimination
5582 }
TEST(LockTests,UnlockingALockHeldByAnotherThread)5583 TEST(LockTests, UnlockingALockHeldByAnotherThread) {
5584 MyThreadArray t(Thread1, Thread2);
5585 t.Start();
5586 t.Join();
5587 }
5588 } // namespace test127
5589
5590 // test128. Suppressed code in concurrent accesses {{{1
5591 // Please use --suppressions=unittest.supp flag when running this test.
5592 namespace test128 {
5593 Mutex mu;
5594 int GLOB = 0;
Worker()5595 void Worker() {
5596 usleep(100000);
5597 mu.Lock();
5598 GLOB++;
5599 mu.Unlock();
5600 }
ThisFunctionShouldBeSuppressed()5601 void ThisFunctionShouldBeSuppressed() {
5602 GLOB++;
5603 }
Run()5604 void Run() {
5605 printf("test128: Suppressed code in concurrent accesses.\n");
5606 MyThreadArray t(Worker, ThisFunctionShouldBeSuppressed);
5607 t.Start();
5608 t.Join();
5609 }
5610 REGISTER_TEST2(Run, 128, FEATURE | EXCLUDE_FROM_ALL)
5611 } // namespace test128
5612
5613 // test129: TN. Synchronization via ReaderLockWhen(). {{{1
5614 namespace test129 {
5615 int GLOB = 0;
5616 Mutex MU;
WeirdCondition(int * param)5617 bool WeirdCondition(int* param) {
5618 *param = GLOB; // a write into Waiter's memory
5619 return GLOB > 0;
5620 }
Waiter()5621 void Waiter() {
5622 int param = 0;
5623 MU.ReaderLockWhen(Condition(WeirdCondition, ¶m));
5624 MU.ReaderUnlock();
5625 CHECK(GLOB > 0);
5626 CHECK(param > 0);
5627 }
Waker()5628 void Waker() {
5629 usleep(100000); // Make sure the waiter blocks.
5630 MU.Lock();
5631 GLOB++;
5632 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL;
5633 }
Run()5634 void Run() {
5635 printf("test129: Synchronization via ReaderLockWhen()\n");
5636 MyThread mt(Waiter, NULL, "Waiter Thread");
5637 mt.Start();
5638 Waker();
5639 mt.Join();
5640 printf("\tGLOB=%d\n", GLOB);
5641 }
5642 REGISTER_TEST2(Run, 129, FEATURE);
5643 } // namespace test129
5644
5645 namespace NegativeTests_PerThreadTest { // {{{1
5646 #ifdef TLS
5647 // This test verifies that the race detector handles
5648 // thread-local storage (TLS) correctly.
5649 // As of 09-03-30 ThreadSanitizer has a bug:
5650 // - Thread1 starts
5651 // - Thread1 touches per_thread_global
5652 // - Thread1 ends
5653 // - Thread2 starts (and there is no happens-before relation between it and
5654 // Thread1)
5655 // - Thread2 touches per_thread_global
5656 // It may happen so that Thread2 will have per_thread_global in the same address
5657 // as Thread1. Since there is no happens-before relation between threads,
5658 // ThreadSanitizer reports a race.
5659 //
5660 // test131 does the same for stack.
5661
5662 static TLS int per_thread_global[10] = {0};
5663
RealWorker()5664 void RealWorker() { // Touch per_thread_global.
5665 per_thread_global[1]++;
5666 per_thread_global[9]++;
5667 errno++;
5668 }
5669
Worker()5670 void Worker() { // Spawn few threads that touch per_thread_global.
5671 MyThreadArray t(RealWorker, RealWorker);
5672 t.Start();
5673 t.Join();
5674 }
Worker0()5675 void Worker0() { usleep(0); Worker(); }
Worker1()5676 void Worker1() { usleep(100000); Worker(); }
Worker2()5677 void Worker2() { usleep(200000); Worker(); }
Worker3()5678 void Worker3() { usleep(300000); Worker(); }
5679
5680 #ifdef WIN32
TEST(NegativeTests,DISABLED_PerThreadTest)5681 TEST(NegativeTests, DISABLED_PerThreadTest) { // issue #23
5682 #else
5683 TEST(NegativeTests, PerThreadTest) {
5684 #endif
5685 MyThreadArray t1(Worker0, Worker1, Worker2, Worker3);
5686 t1.Start();
5687 t1.Join();
5688 }
5689 #endif // TLS
5690 } // namespace test130
5691
5692
5693 namespace NegativeTests_StackReuseTest { // {{{1
5694 // Same as PerThreadTest, but for stack.
5695
RealWorker()5696 void RealWorker() { // Touch stack.
5697 int stack_var = 0;
5698 stack_var++;
5699 }
5700
Worker()5701 void Worker() { // Spawn few threads that touch stack.
5702 MyThreadArray t(RealWorker, RealWorker);
5703 t.Start();
5704 t.Join();
5705 }
Worker0()5706 void Worker0() { usleep(0); Worker(); }
Worker1()5707 void Worker1() { usleep(100000); Worker(); }
Worker2()5708 void Worker2() { usleep(200000); Worker(); }
Worker3()5709 void Worker3() { usleep(300000); Worker(); }
5710
TEST(NegativeTests,StackReuseTest)5711 TEST(NegativeTests, StackReuseTest) {
5712 MyThreadArray t(Worker0, Worker1, Worker2, Worker3);
5713 t.Start();
5714 t.Join();
5715 }
5716
TEST(NegativeTests,StackReuseWithFlushTest)5717 TEST(NegativeTests, StackReuseWithFlushTest) {
5718 MyThreadArray t1(Worker0, Worker1, Worker2, Worker3);
5719 MyThreadArray t2(Worker0, Worker1, Worker2, Worker3);
5720 t1.Start();
5721 ANNOTATE_FLUSH_STATE();
5722 usleep(400000);
5723 t2.Start();
5724 t2.Join();
5725 t1.Join();
5726 }
5727 } // namespace test131
5728
5729
5730 // test132: TP. Simple race (write vs write). Works in fast-mode. {{{1
5731 namespace test132 {
5732 int GLOB = 0;
Worker()5733 void Worker() { GLOB = 1; }
5734
Run1()5735 void Run1() {
5736 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test132");
5737 printf("test132: positive; &GLOB=%p\n", &GLOB);
5738 ANNOTATE_TRACE_MEMORY(&GLOB);
5739 GLOB = 7;
5740 MyThreadArray t(Worker, Worker);
5741 t.Start();
5742 t.Join();
5743 }
5744
Run()5745 void Run() {
5746 Run1();
5747 }
5748 REGISTER_TEST(Run, 132);
5749 } // namespace test132
5750
5751
5752 // test133: TP. Simple race (write vs write). Works in fast mode. {{{1
5753 namespace test133 {
5754 // Same as test132, but everything is run from a separate thread spawned from
5755 // the main thread.
5756 int GLOB = 0;
Worker()5757 void Worker() { GLOB = 1; }
5758
Run1()5759 void Run1() {
5760 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test133");
5761 printf("test133: positive; &GLOB=%p\n", &GLOB);
5762 ANNOTATE_TRACE_MEMORY(&GLOB);
5763 GLOB = 7;
5764 MyThreadArray t(Worker, Worker);
5765 t.Start();
5766 t.Join();
5767 }
Run()5768 void Run() {
5769 MyThread t(Run1);
5770 t.Start();
5771 t.Join();
5772 }
5773 REGISTER_TEST(Run, 133);
5774 } // namespace test133
5775
5776
5777 // test134 TN. Swap. Variant of test79. {{{1
5778 namespace test134 {
5779 #if 0
5780 typedef __gnu_cxx::hash_map<int, int> map_t;
5781 #else
5782 typedef std::map<int, int> map_t;
5783 #endif
5784 map_t map;
5785 Mutex mu;
5786 // Here we use swap to pass map between threads.
5787 // The synchronization is correct, but w/o the annotation
5788 // any hybrid detector will complain.
5789
5790 // Swap is very unfriendly to the lock-set (and hybrid) race detectors.
5791 // Since tmp is destructed outside the mutex, we need to have a happens-before
5792 // arc between any prior access to map and here.
5793 // Since the internals of tmp are created ouside the mutex and are passed to
5794 // other thread, we need to have a h-b arc between here and any future access.
5795 // These arcs can be created by HAPPENS_{BEFORE,AFTER} annotations, but it is
5796 // much simpler to apply pure-happens-before mode to the mutex mu.
Swapper()5797 void Swapper() {
5798 map_t tmp;
5799 MutexLock lock(&mu);
5800 ANNOTATE_HAPPENS_AFTER(&map);
5801 // We swap the new empty map 'tmp' with 'map'.
5802 map.swap(tmp);
5803 ANNOTATE_HAPPENS_BEFORE(&map);
5804 // tmp (which is the old version of map) is destroyed here.
5805 }
5806
Worker()5807 void Worker() {
5808 MutexLock lock(&mu);
5809 ANNOTATE_HAPPENS_AFTER(&map);
5810 map[1]++;
5811 ANNOTATE_HAPPENS_BEFORE(&map);
5812 }
5813
Run()5814 void Run() {
5815 printf("test134: negative (swap)\n");
5816 // ********************** Shorter way: ***********************
5817 // ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&mu);
5818 MyThreadArray t(Worker, Worker, Swapper, Worker, Worker);
5819 t.Start();
5820 t.Join();
5821 }
5822 REGISTER_TEST(Run, 134)
5823 } // namespace test134
5824
5825 // test137 TP. Races on stack variables. {{{1
5826 namespace test137 {
5827 int GLOB = 0;
5828 ProducerConsumerQueue q(10);
5829
Worker()5830 void Worker() {
5831 int stack;
5832 int *tmp = (int*)q.Get();
5833 (*tmp)++;
5834 int *racey = &stack;
5835 q.Put(racey);
5836 (*racey)++;
5837 usleep(150000);
5838 // We may miss the races if we sleep less due to die_memory events...
5839 }
5840
Run()5841 void Run() {
5842 int tmp = 0;
5843 printf("test137: TP. Races on stack variables.\n");
5844 q.Put(&tmp);
5845 MyThreadArray t(Worker, Worker, Worker, Worker);
5846 t.Start();
5847 t.Join();
5848 q.Get();
5849 }
5850
5851 REGISTER_TEST2(Run, 137, FEATURE | EXCLUDE_FROM_ALL)
5852 } // namespace test137
5853
5854 namespace ThreadPoolFNTests { // {{{1
5855
5856 // When using thread pools, two concurrent callbacks might be scheduled
5857 // onto the same executor thread. As a result, unnecessary happens-before
5858 // relation may be introduced between callbacks.
5859 // If we set the number of executor threads to 1, any known data
5860 // race detector will be silent.
5861 // However, the a similar situation may happen with any number of
5862 // executor threads (with some probability).
5863
Worker(int * var)5864 void Worker(int *var) {
5865 usleep(100000);
5866 *var = 42;
5867 }
5868
TEST(ThreadPoolFNTests,OneProducerOneConsumer)5869 TEST(ThreadPoolFNTests, OneProducerOneConsumer) {
5870 int RACEY = 0;
5871 printf("FN. Two closures hit the same thread in ThreadPool.\n");
5872
5873 ThreadPool tp(1);
5874 tp.StartWorkers();
5875 tp.Add(NewCallback(Worker, &RACEY));
5876 tp.Add(NewCallback(Worker, &RACEY));
5877 }
5878
PutWorkerOn(ThreadPool * tp,int * var)5879 void PutWorkerOn(ThreadPool *tp, int *var) {
5880 usleep(100000);
5881 tp->Add(NewCallback(Worker, var));
5882 usleep(100000);
5883 }
5884
TEST(ThreadPoolFNTests,TwoProducersOneConsumer)5885 TEST(ThreadPoolFNTests, TwoProducersOneConsumer) {
5886 int RACEY = 0;
5887 printf("FN. Two closures hit the same thread in ThreadPool.\n");
5888
5889 ThreadPool consumers_tp(1);
5890 consumers_tp.StartWorkers();
5891
5892 ThreadPool producers_tp(2);
5893 producers_tp.StartWorkers();
5894 producers_tp.Add(NewCallback(PutWorkerOn, &consumers_tp, &RACEY));
5895 producers_tp.Add(NewCallback(PutWorkerOn, &consumers_tp, &RACEY));
5896 }
5897 } // namespace ThreadPoolFNTests
5898
5899 // test139: FN. A true race hidden by reference counting annotation. {{{1
5900 namespace test139 {
5901 int GLOB = 0;
5902 RefCountedClass *obj;
5903
Worker1()5904 void Worker1() {
5905 GLOB++; // First access.
5906 obj->Unref();
5907 }
5908
Worker2()5909 void Worker2() {
5910 usleep(100000);
5911 obj->Unref();
5912 GLOB++; // Second access.
5913 }
5914
Run()5915 void Run() {
5916 printf("test139: FN. A true race hidden by reference counting annotation.\n");
5917
5918 obj = new RefCountedClass;
5919 obj->AnnotateUnref();
5920 obj->Ref();
5921 obj->Ref();
5922 MyThreadArray mt(Worker1, Worker2);
5923 mt.Start();
5924 mt.Join();
5925 }
5926
5927 REGISTER_TEST2(Run, 139, FEATURE)
5928 } // namespace test139
5929
5930 // Simple FIFO queue annotated with PCQ annotations. {{{1
5931 class FifoMessageQueue {
5932 public:
FifoMessageQueue()5933 FifoMessageQueue() { ANNOTATE_PCQ_CREATE(this); }
~FifoMessageQueue()5934 ~FifoMessageQueue() { ANNOTATE_PCQ_DESTROY(this); }
5935 // Send a message. 'message' should be positive.
Put(int message)5936 void Put(int message) {
5937 CHECK(message);
5938 MutexLock lock(&mu_);
5939 ANNOTATE_PCQ_PUT(this);
5940 q_.push(message);
5941 }
5942 // Return the message from the queue and pop it
5943 // or return 0 if there are no messages.
Get()5944 int Get() {
5945 MutexLock lock(&mu_);
5946 if (q_.empty()) return 0;
5947 int res = q_.front();
5948 q_.pop();
5949 ANNOTATE_PCQ_GET(this);
5950 return res;
5951 }
5952 private:
5953 Mutex mu_;
5954 queue<int> q_;
5955 };
5956
5957
5958 // test142: TN. Check PCQ_* annotations. {{{1
5959 namespace test142 {
5960 // Putter writes to array[i] and sends a message 'i'.
5961 // Getters receive messages and read array[message].
5962 // PCQ_* annotations calm down the hybrid detectors.
5963
5964 const int N = 1000;
5965 int array[N+1];
5966
5967 FifoMessageQueue q;
5968
Putter()5969 void Putter() {
5970 for (int i = 1; i <= N; i++) {
5971 array[i] = i*i;
5972 q.Put(i);
5973 usleep(1000);
5974 }
5975 }
5976
Getter()5977 void Getter() {
5978 int non_zero_received = 0;
5979 for (int i = 1; i <= N; i++) {
5980 int res = q.Get();
5981 if (res > 0) {
5982 CHECK(array[res] = res * res);
5983 non_zero_received++;
5984 }
5985 usleep(1000);
5986 }
5987 #ifndef WIN32
5988 #ifdef OS_darwin
5989 printf("T=%p: non_zero_received=%d\n",
5990 (void*)pthread_self(), non_zero_received);
5991 #else
5992 printf("T=%d: non_zero_received=%d\n",
5993 (int)pthread_self(), non_zero_received);
5994 #endif
5995 #endif
5996 }
5997
Run()5998 void Run() {
5999 printf("test142: tests PCQ annotations\n");
6000 MyThreadArray t(Putter, Getter, Getter);
6001 t.Start();
6002 t.Join();
6003 }
6004 REGISTER_TEST(Run, 142)
6005 } // namespace test142
6006
6007
6008 // test143: TP. Check PCQ_* annotations. {{{1
6009 namespace test143 {
6010 // True positive.
6011 // We have a race on GLOB between Putter and one of the Getters.
6012 // Pure h-b will not see it.
6013 // If FifoMessageQueue was annotated using HAPPENS_BEFORE/AFTER, the race would
6014 // be missed too.
6015 // PCQ_* annotations do not hide this race.
6016 int GLOB = 0;
6017 StealthNotification n;
6018
6019 FifoMessageQueue q;
6020
Putter()6021 void Putter() {
6022 GLOB = 1;
6023 q.Put(1);
6024 n.signal();
6025 }
6026
Getter()6027 void Getter() {
6028 n.wait();
6029 q.Get();
6030 CHECK(GLOB == 1); // Race here
6031 }
6032
Run()6033 void Run() {
6034 q.Put(1);
6035 if (!Tsan_PureHappensBefore()) {
6036 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true races");
6037 }
6038 printf("test143: tests PCQ annotations (true positive)\n");
6039 MyThreadArray t(Putter, Getter, Getter);
6040 t.Start();
6041 t.Join();
6042 }
6043 REGISTER_TEST(Run, 143);
6044 } // namespace test143
6045
6046 // test144: Unit-test for a bug in fast-mode {{{1
6047 namespace test144 {
6048 struct Foo {
6049 int a, b;
6050 } ALIGNED(64);
6051
6052 struct Foo GLOB;
6053 int &RACEY = GLOB.a;
6054
Worker()6055 void Worker() {
6056 RACEY++;
6057 }
6058
Run()6059 void Run() {
6060 printf("test144: fast-mode bug\n");
6061 ANNOTATE_TRACE_MEMORY(&RACEY);
6062 ANNOTATE_EXPECT_RACE_FOR_TSAN(&RACEY, "Real race");
6063
6064 // This line resets GLOB's creator_tid (bug).
6065 ANNOTATE_NEW_MEMORY(&GLOB.b, sizeof(GLOB.b));
6066
6067 MyThreadArray t(Worker, Worker);
6068 t.Start();
6069 t.Join();
6070 }
6071
6072 REGISTER_TEST(Run, 144);
6073 } // namespace test144
6074
6075 // test145: Unit-test for a bug in fast-mode {{{1
6076 namespace test145 {
6077 // A variation of test144 for dynamic memory.
6078
6079 struct Foo {
6080 int a, b;
6081 } ALIGNED(64);
6082
6083 struct Foo *GLOB;
6084 int *RACEY = NULL;
6085
Worker()6086 void Worker() {
6087 (*RACEY)++;
6088 }
6089
Run()6090 void Run() {
6091 printf("test145: fast-mode bug\n");
6092
6093 GLOB = new Foo;
6094 RACEY = &(GLOB->a);
6095 ANNOTATE_TRACE_MEMORY(RACEY);
6096 ANNOTATE_EXPECT_RACE_FOR_TSAN(RACEY, "Real race");
6097
6098 // This line resets GLOB's creator_tid (bug).
6099 ANNOTATE_NEW_MEMORY(&(GLOB->b), sizeof(GLOB->b));
6100
6101 MyThreadArray t(Worker, Worker);
6102 t.Start();
6103 t.Join();
6104 delete GLOB;
6105 }
6106
6107 REGISTER_TEST(Run, 145);
6108 } // namespace test145
6109
6110 // test147: allocating 1.5G of mem in one chunk. {{{1
6111 namespace test147 {
Run()6112 void Run() {
6113 printf("test147: malloc 1.5G\n");
6114 free(malloc((1 << 30) + (1 << 29)));
6115 }
6116 REGISTER_TEST(Run, 147)
6117 } // namespace test147
6118
6119 // test148: FN. 3 threads, h-b hides race between T1 and T3. {{{1
6120 namespace test148 {
6121 int GLOB = 0;
6122 int COND = 0;
6123 Mutex mu;
6124 CondVar cv;
6125
Signaller()6126 void Signaller() {
6127 usleep(1000000);
6128 GLOB = 1;
6129 mu.Lock();
6130 COND = 1;
6131 cv.Signal();
6132 mu.Unlock();
6133 }
6134
Waiter()6135 void Waiter() {
6136 mu.Lock();
6137 while (COND == 0)
6138 cv.Wait(&mu);
6139 ANNOTATE_CONDVAR_LOCK_WAIT(&cv, &mu);
6140 GLOB = 2;
6141 mu.Unlock();
6142 }
6143
Racer()6144 void Racer() {
6145 usleep(2000000);
6146 mu.Lock();
6147 GLOB = 3;
6148 mu.Unlock();
6149 }
6150
Run()6151 void Run() {
6152 printf("test148: FN. 3 threads, h-b hides race between T1 and T3.\n");
6153 MyThreadArray mta(Signaller, Waiter, Racer);
6154 mta.Start();
6155 mta.Join();
6156 }
6157 REGISTER_TEST(Run, 148)
6158 } // namespace test148
6159
6160 // test149: allocate and memset lots of of mem in several chunks. {{{1
6161 namespace test149 {
Run()6162 void Run() {
6163 int kChunkSize = 1 << 26;
6164 printf("test149: malloc 8x%dM\n", kChunkSize / (1 << 20));
6165 void *mem[8];
6166 for (int i = 0; i < 8; i++) {
6167 mem[i] = malloc(kChunkSize);
6168 memset(mem[i], 0, kChunkSize);
6169 printf("+");
6170 }
6171 for (int i = 0; i < 8; i++) {
6172 free(mem[i]);
6173 printf("-");
6174 }
6175 printf(" Done\n");
6176 }
6177 REGISTER_TEST2(Run, 149, EXCLUDE_FROM_ALL) // TODO(kcc): enable it back
6178 } // namespace test149
6179
6180 // test150: race which is detected after one of the thread has joined. {{{1
6181 namespace test150 {
6182 int GLOB = 0;
6183 StealthNotification n;
Writer1()6184 void Writer1() { GLOB++; }
Writer2()6185 void Writer2() {
6186 n.wait();
6187 GLOB++;
6188 }
TEST(PositiveTests,RaceDetectedAfterJoin)6189 TEST(PositiveTests, RaceDetectedAfterJoin) {
6190 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "real race");
6191 MyThread t1(Writer1);
6192 MyThread t2(Writer2);
6193 t1.Start();
6194 t2.Start();
6195 t1.Join();
6196 n.signal();
6197 t2.Join();
6198 printf("\tGLOB=%d\n", GLOB);
6199 }
6200 } // namespace test150
6201
6202
6203 // test151: stress for the size of vector time clock. {{{1
6204 namespace test151 {
6205 int kNumThreads = 100;
6206 int kNumSegments = 5000000;
Void()6207 void Void() { }
Run()6208 void Run() {
6209 printf("test151: stress\n");
6210 printf("Creating %d threads\n", kNumThreads);
6211 for (int i = 0; i < kNumThreads; i++) {
6212 MyThread t(Void);
6213 t.Start();
6214 t.Join();
6215 }
6216 printf("Creating %d segments\n", kNumSegments);
6217 for (int i = 0; i < kNumSegments; i++) {
6218 if (i % (kNumSegments / 50) == 0)
6219 printf(".");
6220 ANNOTATE_HAPPENS_BEFORE(NULL);
6221 }
6222 printf(" done\n");
6223 }
6224 REGISTER_TEST2(Run, 151, PERFORMANCE | EXCLUDE_FROM_ALL) // TODO(kcc): enable
6225 } // namespace test151
6226
6227 // test152: atexit -> exit creates a h-b arc. {{{1
6228 namespace test152 {
6229 int GLOB = 0;
6230 MyThread *t;
6231
AtExitCallback()6232 void AtExitCallback() {
6233 GLOB++;
6234 }
6235
AtExitThread()6236 void AtExitThread() {
6237 GLOB++;
6238 atexit(AtExitCallback);
6239 }
6240
TEST(NegativeTests,AtExitTest)6241 TEST(NegativeTests, AtExitTest) {
6242 t = new MyThread(AtExitThread);
6243 t->Start(); // We don't join it.
6244 }
6245 } // namespace test152
6246
6247 // test153: test for vanilla pthread_spinlock_t {{{1
6248 namespace test153 {
6249 #ifndef NO_SPINLOCK
6250 // pthread_spinlock_t is tricky because pthread_spin_unlock and
6251 // pthread_spin_init are the same symbol.
6252 int GLOB = 0;
6253 pthread_spinlock_t lock;
6254
Worker1()6255 void Worker1() {
6256 pthread_spin_lock(&lock);
6257 GLOB++;
6258 pthread_spin_unlock(&lock);
6259 }
6260
Worker2()6261 void Worker2() {
6262 while (pthread_spin_trylock(&lock) != 0) { }
6263 GLOB++;
6264 pthread_spin_unlock(&lock);
6265 }
6266
6267
Run()6268 void Run() {
6269 printf("test153: pthread_spin_t\n");
6270 for (int i = 0; i < 3; i++) {
6271 // test few times on the same lock to check how init/destroy are handled.
6272 pthread_spin_init(&lock, 0);
6273 MyThreadArray t(Worker1, Worker1, Worker2, Worker2);
6274 t.Start();
6275 t.Join();
6276 pthread_spin_destroy(&lock);
6277 }
6278 }
6279 REGISTER_TEST(Run, 153)
6280 #endif // NO_SPINLOCK
6281 } // namespace test153
6282
6283 // test154: long test with lots of races. {{{1
6284 namespace test154 {
6285 const int kNumIters = 100000;
6286 const int kArraySize = 100000;
6287 int *arr;
6288
RaceyAccess(int * a)6289 void RaceyAccess(int *a) {
6290 (*a)++;
6291 }
6292
RaceyLoop()6293 void RaceyLoop() {
6294 for (int j = 0; j < kArraySize; j++) {
6295 RaceyAccess(&arr[j]);
6296 }
6297 }
6298
Worker()6299 void Worker() {
6300 for (int i = 0; i < kNumIters; i++) {
6301 usleep(1);
6302 printf(".");
6303 if ((i % 40) == 39)
6304 printf("\n");
6305 RaceyLoop();
6306 }
6307 }
6308
Run()6309 void Run() {
6310 arr = new int[kArraySize];
6311 printf("test154: positive; long test with lots of races\n");
6312 MyThreadArray t(Worker, Worker);
6313 t.Start();
6314 t.Join();
6315 delete arr;
6316 }
6317 REGISTER_TEST2(Run, 154, EXCLUDE_FROM_ALL)
6318 } // namespace test154
6319
6320 namespace PositiveTests_RaceInMemcpy { // {{{1
6321 char *GLOB;
6322
DoMemcpy()6323 void DoMemcpy() {
6324 memcpy(GLOB, GLOB + 1, 1);
6325 }
6326
DoMemmove()6327 void DoMemmove() {
6328 memmove(GLOB, GLOB + 1, 1);
6329 }
6330
Write0()6331 void Write0() {
6332 GLOB[0] = 'z';
6333 }
6334
DoStrlen()6335 void DoStrlen() {
6336 CHECK(strlen(GLOB) == 3);
6337 }
6338
DoStrcpy()6339 void DoStrcpy() {
6340 CHECK(strcpy(GLOB, "zzz") == GLOB);
6341 }
6342
DoStrchr()6343 void DoStrchr() {
6344 CHECK(strchr(GLOB, 'o') == (GLOB + 1));
6345 }
6346
DoMemchr()6347 void DoMemchr() {
6348 CHECK(memchr(GLOB, 'o', 4) == (GLOB + 1));
6349 }
6350
DoStrrchr()6351 void DoStrrchr() {
6352 CHECK(strrchr(GLOB, '!') == NULL);
6353 }
6354
DoStrcmp()6355 void DoStrcmp() {
6356 CHECK(strcmp(GLOB, "xxx") != 0);
6357 }
6358
DoStrncmp()6359 void DoStrncmp() {
6360 CHECK(strncmp(GLOB, "xxx", 3) != 0);
6361 }
6362
6363
RunThreads(void (* f1)(void),void (* f2)(void),char * mem)6364 void RunThreads(void (*f1)(void), void (*f2)(void), char *mem) {
6365 GLOB = mem;
6366 strcpy(GLOB, "foo");
6367 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "expected race");
6368 MyThreadArray t(f1, f2);
6369 t.Start();
6370 t.Join();
6371 }
6372
TEST(PositiveTests,RaceInMemcpy)6373 TEST(PositiveTests, RaceInMemcpy) {
6374 static char mem[4];
6375 RunThreads(DoMemcpy, DoMemcpy, mem);
6376 }
6377
TEST(PositiveTests,RaceInMemmove)6378 TEST(PositiveTests, RaceInMemmove) {
6379 static char mem[4];
6380 RunThreads(DoMemmove, DoMemmove, mem);
6381 }
6382
TEST(PositiveTests,RaceInStrlen1)6383 TEST(PositiveTests, RaceInStrlen1) {
6384 static char mem[4];
6385 RunThreads(DoStrlen, Write0, mem);
6386 }
6387
TEST(PositiveTests,RaceInStrlen2)6388 TEST(PositiveTests, RaceInStrlen2) {
6389 static char mem[4];
6390 RunThreads(Write0, DoStrlen, mem);
6391 }
6392
TEST(PositiveTests,RaceInStrcpy)6393 TEST(PositiveTests, RaceInStrcpy) {
6394 static char mem[4];
6395 RunThreads(Write0, DoStrcpy, mem);
6396 }
6397
TEST(PositiveTests,RaceInStrchr)6398 TEST(PositiveTests, RaceInStrchr) {
6399 static char mem[4];
6400 RunThreads(Write0, DoStrchr, mem);
6401 }
6402
TEST(PositiveTests,RaceInMemchr)6403 TEST(PositiveTests, RaceInMemchr) {
6404 static char mem[4];
6405 RunThreads(Write0, DoMemchr, mem);
6406 }
6407
TEST(PositiveTests,RaceInStrrchr)6408 TEST(PositiveTests, RaceInStrrchr) {
6409 static char mem[4];
6410 RunThreads(Write0, DoStrrchr, mem);
6411 }
6412
TEST(PositiveTests,RaceInStrcmp)6413 TEST(PositiveTests, RaceInStrcmp) {
6414 static char mem[4];
6415 RunThreads(Write0, DoStrcmp, mem);
6416 }
6417
TEST(PositiveTests,RaceInStrncmp)6418 TEST(PositiveTests, RaceInStrncmp) {
6419 static char mem[4];
6420 RunThreads(Write0, DoStrncmp, mem);
6421 }
6422
6423 } // namespace
6424
6425 // test157: TN. Test for stack traces (using ANNOTATE_NO_OP). {{{1
6426 namespace test157 {
6427
func3()6428 void func3() {
6429 ANNOTATE_NO_OP((void*)__LINE__);
6430 }
func2()6431 void func2() {
6432 func3();
6433 }
func1()6434 void func1() {
6435 func2();
6436 }
Worker1()6437 void Worker1() {
6438 func1();
6439 ANNOTATE_NO_OP((void*)__LINE__);
6440 }
Worker2()6441 void Worker2() {
6442 func2();
6443 ANNOTATE_NO_OP((void*)__LINE__);
6444 }
Worker3()6445 void Worker3() {
6446 func3();
6447 ANNOTATE_NO_OP((void*)__LINE__);
6448 }
Run()6449 void Run() {
6450 ANNOTATE_NO_OP((void*)__LINE__);
6451 printf("test157: negative\n");
6452 ANNOTATE_NO_OP((void*)__LINE__);
6453 MyThreadArray t(Worker1, Worker2, Worker3);
6454 ANNOTATE_NO_OP((void*)__LINE__);
6455 t.Start();
6456 ANNOTATE_NO_OP((void*)__LINE__);
6457 t.Join();
6458 ANNOTATE_NO_OP((void*)__LINE__);
6459 }
6460 REGISTER_TEST(Run, 157);
6461 } // namespace test157
6462
6463
6464 namespace MemoryTypes { // {{{1
WriteChar(void * param)6465 void WriteChar(void *param) {
6466 *(char*)param = 1;
6467 usleep(500000); // let other threads hit this before exiting.
6468 }
6469
RaceOnMemory(void (* callback)(void *),char * mem)6470 void RaceOnMemory(void (*callback)(void *), char *mem) {
6471 ANNOTATE_FLUSH_EXPECTED_RACES();
6472 ANNOTATE_EXPECT_RACE(mem, "race");
6473 MyThread t1(callback, mem),
6474 t2(callback, mem);
6475 t1.Start();
6476 t2.Start();
6477 t1.Join();
6478 t2.Join();
6479 CHECK(*mem == 1);
6480 ANNOTATE_FLUSH_EXPECTED_RACES();
6481 }
6482
RaceOnLocalStack(void (* callback)(void *))6483 void RaceOnLocalStack(void (*callback)(void *)) {
6484 char object_on_stack = 0;
6485 // We may have had races on the main stack before -- forget about them.
6486 ANNOTATE_NEW_MEMORY(&object_on_stack, 1);
6487 RaceOnMemory(callback, &object_on_stack);
6488 }
6489
6490 // create a new function to make reports different.
WriteChar1(void * param)6491 void WriteChar1(void *param) { WriteChar(param); }
6492
TEST(MemoryTypes,RaceOnMainThreadStack)6493 TEST(MemoryTypes, RaceOnMainThreadStack) {
6494 RaceOnLocalStack(WriteChar1);
6495 }
6496
WriteChar2(void * param)6497 void WriteChar2(void *param) { WriteChar(param); }
6498
TEST(MemoryTypes,RaceOnNonMainThreadStack)6499 TEST(MemoryTypes, RaceOnNonMainThreadStack) {
6500 MyThread t((void (*)(void*))(RaceOnLocalStack), (void*)WriteChar2);
6501 t.Start();
6502 t.Join();
6503 }
6504
WriteChar3(void * param)6505 void WriteChar3(void *param) { WriteChar(param); }
6506
TEST(MemoryTypes,RaceOnMallocedMemory)6507 TEST(MemoryTypes, RaceOnMallocedMemory) {
6508 char *mem = (char*)malloc(100);
6509 RaceOnMemory(WriteChar3, mem+42);
6510 free(mem);
6511 }
6512
WriteChar4(void * param)6513 void WriteChar4(void *param) { WriteChar(param); }
6514
TEST(MemoryTypes,RaceOnCallocedMemory)6515 TEST(MemoryTypes, RaceOnCallocedMemory) {
6516 char *mem = (char*)calloc(30, 4);
6517 RaceOnMemory(WriteChar4, mem+42);
6518 free(mem);
6519 }
6520
WriteChar5(void * param)6521 void WriteChar5(void *param) { WriteChar(param); }
6522
TEST(MemoryTypes,RaceOnMemoryFromNew)6523 TEST(MemoryTypes, RaceOnMemoryFromNew) {
6524 char *mem = new char;
6525 RaceOnMemory(WriteChar5, mem);
6526 delete mem;
6527 }
6528
WriteChar6(void * param)6529 void WriteChar6(void *param) { WriteChar(param); }
6530
TEST(MemoryTypes,RaceOnMemoryFromNewA)6531 TEST(MemoryTypes, RaceOnMemoryFromNewA) {
6532 char *mem = new char [100];
6533 RaceOnMemory(WriteChar6, mem+42);
6534 delete [] mem;
6535 }
6536
WriteChar7(void * param)6537 void WriteChar7(void *param) { WriteChar(param); }
6538
TEST(MemoryTypes,RaceOnMemoryFromNewNoThrow)6539 TEST(MemoryTypes, RaceOnMemoryFromNewNoThrow) {
6540 char *mem = new (std::nothrow) char;
6541 RaceOnMemory(WriteChar7, mem);
6542 operator delete (mem, std::nothrow);
6543 }
WriteChar8(void * param)6544 void WriteChar8(void *param) { WriteChar(param); }
6545
TEST(MemoryTypes,RaceOnMemoryFromNewNoThrowA)6546 TEST(MemoryTypes, RaceOnMemoryFromNewNoThrowA) {
6547 char *mem = new (std::nothrow) char [100];
6548 RaceOnMemory(WriteChar8, mem+42);
6549 operator delete [] (mem, std::nothrow);
6550 }
6551
AllocateAndDeallocateUsingVariousAllocs()6552 void AllocateAndDeallocateUsingVariousAllocs() {
6553 for (int i = 0; i < 10000; i++) {
6554 char *p;
6555 switch (i % 5) {
6556 case 0:
6557 p = (char*)malloc(10);
6558 free(p);
6559 break;
6560 case 1:
6561 p = new char;
6562 delete p;
6563 break;
6564 case 2:
6565 p = new char [10];
6566 delete [] p;
6567 case 3:
6568 p = new (std::nothrow) char;
6569 operator delete (p, std::nothrow);
6570 break;
6571 case 4:
6572 p = new (std::nothrow) char[10];
6573 operator delete [](p, std::nothrow);
6574 break;
6575 }
6576 }
6577 }
TEST(MemoryTypes,VariousAllocs)6578 TEST(MemoryTypes, VariousAllocs) {
6579 void (*f)(void) = AllocateAndDeallocateUsingVariousAllocs;
6580 MyThreadArray t(f, f, f, f);
6581 t.Start();
6582 t.Join();
6583 }
6584
ReallocThread()6585 void ReallocThread() {
6586 void *ptr = NULL;
6587 for (int i = 8; i < 128; i++) {
6588 int size = (1 << (i / 8)) - 1;
6589 ptr = realloc(ptr, size);
6590 ANNOTATE_TRACE_MEMORY(ptr);
6591 memset(ptr, 42, size);
6592 }
6593 free(ptr);
6594 }
TEST(MemoryTypes,Reallocs)6595 TEST(MemoryTypes, Reallocs) {
6596 MyThreadArray t(ReallocThread, ReallocThread, ReallocThread, ReallocThread);
6597 t.Start();
6598 t.Join();
6599 }
6600 } // namespace
6601
6602
6603 namespace StressTests_ThreadTree { //{{{1
6604 int GLOB = 0;
6605
6606 // Worker(N) will do 2^N increments of GLOB, each increment in a separate thread
Worker(int depth)6607 void Worker(int depth) {
6608 CHECK(depth >= 0);
6609 if (depth > 0) {
6610 MyThread t1((MyThread::worker_t)Worker, (void*)(intptr_t)(depth - 1));
6611 MyThread t2((MyThread::worker_t)Worker, (void*)(intptr_t)(depth - 1));
6612 t1.Start();
6613 t2.Start();
6614 t1.Join();
6615 t2.Join();
6616 } else {
6617 GLOB++; // Race here
6618 }
6619 }
6620
TEST(StressTests,ThreadTree3)6621 TEST(StressTests, ThreadTree3) {
6622 ANNOTATE_EXPECT_RACE(&GLOB, "StressTests.ThreadTree3 race");
6623 ANNOTATE_TRACE_MEMORY(&GLOB);
6624 Worker(3);
6625 }
6626
TEST(StressTests,DISABLED_ThreadTree7)6627 TEST(StressTests, DISABLED_ThreadTree7) {
6628 ANNOTATE_EXPECT_RACE(&GLOB, "StressTests.ThreadTree7 race");
6629 ANNOTATE_TRACE_MEMORY(&GLOB);
6630 Worker(7);
6631 }
6632 } // namespace test313
6633
6634 namespace StressTests_StartAndJoinManyThreads { //{{{1
6635
Worker()6636 void Worker() {
6637 }
6638
6639 // Too slow. Need to run it separately.
TEST(StressTests,StartAndJoinManyThreads)6640 TEST(StressTests, StartAndJoinManyThreads) {
6641 ANNOTATE_FLUSH_STATE();
6642 for (int i = 0; i < 1100; i++) {
6643 if ((i % 100) == 0)
6644 printf(".");
6645 MyThread t1(Worker);
6646 MyThread t2(Worker);
6647 t1.Start();
6648 t2.Start();
6649 t1.Join();
6650 t2.Join();
6651 }
6652 printf("\n");
6653 }
6654 } // namespace
6655
6656 namespace StressTests_ManyAccesses { // {{{1
6657 #ifndef NO_BARRIER
6658 const int kArrayLen = 128; // Small size, so that everything fits into cache.
6659 const int kNumIter = 1024 * 1024 * 2;
6660 int thread_id;
6661 int *array = NULL;
6662 Barrier *barrier;
6663
IncrementMe(int * x)6664 void IncrementMe(int *x) {
6665 (*x)++;
6666 }
6667
NoRaceWorker()6668 void NoRaceWorker() {
6669 int id = AtomicIncrement(&thread_id, 1);
6670 barrier->Block();
6671 int *ptr = array + id * (kArrayLen + 64); // pad to avoid false sharing.
6672 for (int it = 0; it < kNumIter; it++) {
6673 for (int i = 0; i < kArrayLen; i++) {
6674 IncrementMe(ptr + i);
6675 }
6676 }
6677 }
6678
RunThreads(int n_threads,void (* f)(void))6679 void RunThreads(int n_threads, void (*f)(void)) {
6680 thread_id = -1;
6681 barrier = new Barrier(n_threads);
6682 // Allocate a lot so that operator new uses mmap, unless forced to use brk.
6683 array = new int[(kArrayLen + 64) * n_threads + (1 << 22)];
6684 printf("ptr = %p\n", array);
6685 MyThread **t = new MyThread*[n_threads];
6686 for (int i = 0; i < n_threads; i++) t[i] = new MyThread(NoRaceWorker);
6687 for (int i = 0; i < n_threads; i++) t[i]->Start();
6688 for (int i = 0; i < n_threads; i++) t[i]->Join();
6689 for (int i = 0; i < n_threads; i++) delete t[i];
6690 delete [] t;
6691 delete [] array;
6692 }
6693
6694 // Just one thread.
TEST(StressTests,DISABLED_ManyAccessesNoRace1Test)6695 TEST(StressTests, DISABLED_ManyAccessesNoRace1Test) {
6696 RunThreads(1, NoRaceWorker);
6697 }
6698
6699 // 2 threads accessing different memory.
TEST(StressTests,DISABLED_ManyAccessesNoRace2Test)6700 TEST(StressTests, DISABLED_ManyAccessesNoRace2Test) {
6701 RunThreads(2, NoRaceWorker);
6702 }
6703 // 4 threads accessing different memory.
TEST(StressTests,DISABLED_ManyAccessesNoRace4Test)6704 TEST(StressTests, DISABLED_ManyAccessesNoRace4Test) {
6705 RunThreads(4, NoRaceWorker);
6706 }
6707 // 8 threads accessing different memory.
TEST(StressTests,DISABLED_ManyAccessesNoRace8Test)6708 TEST(StressTests, DISABLED_ManyAccessesNoRace8Test) {
6709 RunThreads(8, NoRaceWorker);
6710 }
6711 // 16 threads accessing different memory.
TEST(StressTests,DISABLED_ManyAccessesNoRace16Test)6712 TEST(StressTests, DISABLED_ManyAccessesNoRace16Test) {
6713 RunThreads(16, NoRaceWorker);
6714 }
6715 #endif // NO_BARRIER
6716 } // namespace
6717
6718 namespace NegativeTests_EnableRaceDetectionTest { // {{{1
6719 const size_t size = 10000;
6720 const size_t n_iter = 1000;
6721 int GLOB[size];
6722
Worker()6723 void Worker() {
6724 for (size_t i = 0; i < n_iter; i++) {
6725 for (size_t j = 0; j < size; j++) {
6726 GLOB[j]++;
6727 }
6728 }
6729 }
6730
TEST(NegativeTests,EnableRaceDetectionTest)6731 TEST(NegativeTests, EnableRaceDetectionTest) {
6732 ANNOTATE_ENABLE_RACE_DETECTION(0);
6733 MyThreadArray t(Worker, Worker, Worker, Worker);
6734 t.Start();
6735 t.Join();
6736 ANNOTATE_ENABLE_RACE_DETECTION(1);
6737 }
6738 }
6739
6740 namespace PositiveTests_MopVsFree { // {{{1
6741 int *p;
6742 const int kIdx = 77;
6743 StealthNotification n;
6744
Read()6745 void Read() {
6746 CHECK(p[kIdx] == 777);
6747 n.signal();
6748 }
Free()6749 void Free() {
6750 n.wait();
6751 free(p);
6752 }
6753
TEST(PositiveTests,ReadVsFree)6754 TEST(PositiveTests, ReadVsFree) {
6755 p = (int*)malloc(100 * sizeof(int));
6756 p[kIdx] = 777;
6757 ANNOTATE_EXPECT_RACE(&p[kIdx], "race: read vs free");
6758 MyThreadArray t(Read, Free);
6759 t.Start();
6760 t.Join();
6761 }
6762
6763 } // namespace
6764
6765 namespace ManySmallObjectsTest { // {{{1
Worker()6766 void Worker() {
6767 const int N = 1 << 21;
6768 struct T {
6769 int a, b, c, d;
6770 T() : a(1), b(2), c(3), d(4) { }
6771 };
6772 T **a = new T*[N];
6773 for (int i = 0; i < N; i++) {
6774 if ((i % (N / 16)) == 0)
6775 printf("+");
6776 a[i] = new T;
6777 CHECK(a[i]->a == 1);
6778 }
6779 printf("\n");
6780 for (int i = 0; i < N; i++) {
6781 if ((i % (N / 16)) == 0)
6782 printf("-");
6783 delete a[i];
6784 }
6785 printf("\n");
6786 delete [] a;
6787 }
6788
TEST(StressTests,DISABLED_ManySmallObjectsOneThreadTest)6789 TEST(StressTests, DISABLED_ManySmallObjectsOneThreadTest) {
6790 Worker();
6791 }
6792
TEST(StressTests,DISABLED_ManySmallObjectsTwoThreadsTest)6793 TEST(StressTests, DISABLED_ManySmallObjectsTwoThreadsTest) {
6794 MyThreadArray t(Worker, Worker);
6795 t.Start();
6796 t.Join();
6797 }
6798 } // namespace
6799
6800 namespace RepPrefixedInstructionsTest { //{{{1
6801
6802 #if defined (__GNUC__) && (defined(ARCH_x86) || defined(ARCH_amd64))
rep_clr_1(uint8_t * s,long n)6803 void rep_clr_1(uint8_t *s, long n)
6804 {
6805 intptr_t d0, d1;
6806 __asm__ __volatile__ (
6807 "rep ; stosb"
6808 : "=&c" (d0), "=&D" (d1)
6809 : "a" (0), "1" (s), "0" (n)
6810 : "memory");
6811 }
6812
6813 uint8_t mem1[1000];
6814
Clr1_0_10()6815 void Clr1_0_10() { rep_clr_1(mem1+ 0, 10); }
Clr1_10_10()6816 void Clr1_10_10() { rep_clr_1(mem1+10, 10); }
Clr1_10_0()6817 void Clr1_10_0() { rep_clr_1(mem1+10, 0); }
6818
Clr1_25_1()6819 void Clr1_25_1() { rep_clr_1(mem1+25, 1); }
Clr1_25_0()6820 void Clr1_25_0() { rep_clr_1(mem1+25, 0); }
6821
Clr1_50_30()6822 void Clr1_50_30() { rep_clr_1(mem1+50, 30); }
Clr1_60_0()6823 void Clr1_60_0() { rep_clr_1(mem1+60, 0); }
Clr1_60_1()6824 void Clr1_60_1() { rep_clr_1(mem1+60, 1); }
Clr1_70_10()6825 void Clr1_70_10() { rep_clr_1(mem1+70, 10); }
6826
6827
RunThreads(void (* f1)(void),void (* f2)(void))6828 void RunThreads(void (*f1)(void), void (*f2)(void)) {
6829 MyThreadArray t(f1, f2);
6830 t.Start();
6831 t.Join();
6832 }
6833
TEST(NegativeTests,RepSanityTest)6834 TEST(NegativeTests, RepSanityTest) {
6835 memset(mem1, 0xff, sizeof(mem1));
6836 rep_clr_1(mem1, 0);
6837 CHECK(mem1[0] != 0);
6838 rep_clr_1(mem1, 1);
6839 CHECK(mem1[0] == 0);
6840 CHECK(mem1[1] != 0);
6841 rep_clr_1(mem1, 5);
6842 CHECK(mem1[4] == 0);
6843 CHECK(mem1[5] != 0);
6844 }
6845
TEST(NegativeTests,RepNegativeTest)6846 TEST(NegativeTests, RepNegativeTest) {
6847 memset(mem1, 0xff, sizeof(mem1));
6848 RunThreads(Clr1_0_10, Clr1_10_10);
6849 RunThreads(Clr1_10_0, Clr1_10_10);
6850 RunThreads(Clr1_25_0, Clr1_25_1);
6851 RunThreads(Clr1_50_30, Clr1_60_0);
6852 }
6853
TEST(PositiveTests,RepPositive1Test)6854 TEST(PositiveTests, RepPositive1Test) {
6855 memset(mem1, 0xff, sizeof(mem1));
6856 ANNOTATE_EXPECT_RACE(mem1+10, "real race");
6857 for (int i = 11; i < 20; i++) ANNOTATE_BENIGN_RACE(mem1 + i, "");
6858 RunThreads(Clr1_10_10, Clr1_10_10);
6859 }
TEST(PositiveTests,RepPositive2Test)6860 TEST(PositiveTests, RepPositive2Test) {
6861 memset(mem1, 0xff, sizeof(mem1));
6862 ANNOTATE_EXPECT_RACE(mem1+25, "real race");
6863 RunThreads(Clr1_25_1, Clr1_25_1);
6864 }
6865
TEST(PositiveTests,RepPositive3Test)6866 TEST(PositiveTests, RepPositive3Test) {
6867 memset(mem1, 0xff, sizeof(mem1));
6868 ANNOTATE_EXPECT_RACE(mem1+60, "real race");
6869 RunThreads(Clr1_50_30, Clr1_60_1);
6870 }
6871
TEST(PositiveTests,RepPositive4Test)6872 TEST(PositiveTests, RepPositive4Test) {
6873 memset(mem1, 0xff, sizeof(mem1));
6874 ANNOTATE_EXPECT_RACE(mem1+70, "real race");
6875 for (int i = 71; i < 80; i++) ANNOTATE_BENIGN_RACE(mem1 + i, "");
6876 RunThreads(Clr1_50_30, Clr1_70_10);
6877 }
6878 #endif // __GNUC__ ...
6879 } // namespace
6880
6881 // test400: Demo of a simple false positive. {{{1
6882 namespace test400 {
6883 static Mutex mu;
6884 static vector<int> *vec; // GUARDED_BY(mu);
6885
InitAllBeforeStartingThreads()6886 void InitAllBeforeStartingThreads() {
6887 vec = new vector<int>;
6888 vec->push_back(1);
6889 vec->push_back(2);
6890 }
6891
Thread1()6892 void Thread1() {
6893 MutexLock lock(&mu);
6894 vec->pop_back();
6895 }
6896
Thread2()6897 void Thread2() {
6898 MutexLock lock(&mu);
6899 vec->pop_back();
6900 }
6901
6902 //---- Sub-optimal code ---------
NumberOfElementsLeft()6903 size_t NumberOfElementsLeft() {
6904 MutexLock lock(&mu);
6905 return vec->size();
6906 }
6907
WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly()6908 void WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly() {
6909 while(NumberOfElementsLeft()) {
6910 ; // sleep or print or do nothing.
6911 }
6912 // It is now safe to access vec w/o lock.
6913 // But a hybrid detector (like ThreadSanitizer) can't see it.
6914 // Solutions:
6915 // 1. Use pure happens-before detector (e.g. "tsan --pure-happens-before")
6916 // 2. Call ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&mu)
6917 // in InitAllBeforeStartingThreads()
6918 // 3. (preferred) Use WaitForAllThreadsToFinish_Good() (see below).
6919 CHECK(vec->empty());
6920 delete vec;
6921 }
6922
6923 //----- Better code -----------
6924
NoElementsLeft(vector<int> * v)6925 bool NoElementsLeft(vector<int> *v) {
6926 return v->empty();
6927 }
6928
WaitForAllThreadsToFinish_Good()6929 void WaitForAllThreadsToFinish_Good() {
6930 mu.LockWhen(Condition(NoElementsLeft, vec));
6931 mu.Unlock();
6932
6933 // It is now safe to access vec w/o lock.
6934 CHECK(vec->empty());
6935 delete vec;
6936 }
6937
6938
Run()6939 void Run() {
6940 MyThreadArray t(Thread1, Thread2);
6941 InitAllBeforeStartingThreads();
6942 t.Start();
6943 WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly();
6944 // WaitForAllThreadsToFinish_Good();
6945 t.Join();
6946 }
6947 REGISTER_TEST2(Run, 400, RACE_DEMO)
6948 } // namespace test400
6949
6950 // test401: Demo of false positive caused by reference counting. {{{1
6951 namespace test401 {
6952 // A simplified example of reference counting.
6953 // DecRef() does ref count increment in a way unfriendly to race detectors.
6954 // DecRefAnnotated() does the same in a friendly way.
6955
6956 static vector<int> *vec;
6957 static int ref_count;
6958
InitAllBeforeStartingThreads(int number_of_threads)6959 void InitAllBeforeStartingThreads(int number_of_threads) {
6960 vec = new vector<int>;
6961 vec->push_back(1);
6962 ref_count = number_of_threads;
6963 }
6964
6965 // Correct, but unfriendly to race detectors.
DecRef()6966 int DecRef() {
6967 return AtomicIncrement(&ref_count, -1);
6968 }
6969
6970 // Correct and friendly to race detectors.
DecRefAnnotated()6971 int DecRefAnnotated() {
6972 ANNOTATE_HAPPENS_BEFORE(&ref_count);
6973 int res = AtomicIncrement(&ref_count, -1);
6974 if (res == 0) {
6975 ANNOTATE_HAPPENS_AFTER(&ref_count);
6976 }
6977 return res;
6978 }
6979
ThreadWorker()6980 void ThreadWorker() {
6981 CHECK(ref_count > 0);
6982 CHECK(vec->size() == 1);
6983 if (DecRef() == 0) { // Use DecRefAnnotated() instead!
6984 // No one uses vec now ==> delete it.
6985 delete vec; // A false race may be reported here.
6986 vec = NULL;
6987 }
6988 }
6989
Run()6990 void Run() {
6991 MyThreadArray t(ThreadWorker, ThreadWorker, ThreadWorker);
6992 InitAllBeforeStartingThreads(3 /*number of threads*/);
6993 t.Start();
6994 t.Join();
6995 CHECK(vec == 0);
6996 }
6997 REGISTER_TEST2(Run, 401, RACE_DEMO)
6998 } // namespace test401
6999
7000
7001 // test502: produce lots of segments without cross-thread relations {{{1
7002 namespace test502 {
7003
7004 /*
7005 * This test produces ~1Gb of memory usage when run with the following options:
7006 *
7007 * --tool=helgrind
7008 * --trace-after-race=0
7009 * --num-callers=2
7010 * --more-context=no
7011 */
7012
7013 Mutex MU;
7014 int GLOB = 0;
7015
TP()7016 void TP() {
7017 for (int i = 0; i < 750000; i++) {
7018 MU.Lock();
7019 GLOB++;
7020 MU.Unlock();
7021 }
7022 }
7023
Run()7024 void Run() {
7025 MyThreadArray t(TP, TP);
7026 printf("test502: produce lots of segments without cross-thread relations\n");
7027
7028 t.Start();
7029 t.Join();
7030 }
7031
7032 REGISTER_TEST2(Run, 502, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL
7033 | PERFORMANCE)
7034 } // namespace test502
7035
7036 // test503: produce lots of segments with simple HB-relations {{{1
7037 // HB cache-miss rate is ~55%
7038 namespace test503 {
7039
7040 // |- | | | | |
7041 // | \| | | | |
7042 // | |- | | | |
7043 // | | \| | | |
7044 // | | |- | | |
7045 // | | | \| | |
7046 // | | | |- | |
7047 // | | | | \| |
7048 // | | | | |- |
7049 // | | | | | \|
7050 // | | | | | |----
7051 //->| | | | | |
7052 // |- | | | | |
7053 // | \| | | | |
7054 // ...
7055
7056 const int N_threads = 32;
7057 const int ARRAY_SIZE = 128;
7058 int GLOB[ARRAY_SIZE];
7059 ProducerConsumerQueue *Q[N_threads];
7060 int GLOB_limit = 100000;
7061 int count = -1;
7062
Worker()7063 void Worker(){
7064 int myId = AtomicIncrement(&count, 1);
7065
7066 ProducerConsumerQueue &myQ = *Q[myId], &nextQ = *Q[(myId+1) % N_threads];
7067
7068 // this code produces a new SS with each new segment
7069 while (myQ.Get() != NULL) {
7070 for (int i = 0; i < ARRAY_SIZE; i++)
7071 GLOB[i]++;
7072
7073 if (myId == 0 && GLOB[0] > GLOB_limit) {
7074 // Stop all threads
7075 for (int i = 0; i < N_threads; i++)
7076 Q[i]->Put(NULL);
7077 } else
7078 nextQ.Put(GLOB);
7079 }
7080 }
7081
Run()7082 void Run() {
7083 printf("test503: produce lots of segments with simple HB-relations\n");
7084 for (int i = 0; i < N_threads; i++)
7085 Q[i] = new ProducerConsumerQueue(1);
7086 Q[0]->Put(GLOB);
7087
7088 {
7089 ThreadPool pool(N_threads);
7090 pool.StartWorkers();
7091 for (int i = 0; i < N_threads; i++) {
7092 pool.Add(NewCallback(Worker));
7093 }
7094 } // all folks are joined here.
7095
7096 for (int i = 0; i < N_threads; i++)
7097 delete Q[i];
7098 }
7099
7100 REGISTER_TEST2(Run, 503, MEMORY_USAGE | PRINT_STATS
7101 | PERFORMANCE | EXCLUDE_FROM_ALL)
7102 } // namespace test503
7103
7104 // test504: force massive cache fetch-wback (50% misses, mostly CacheLineZ) {{{1
7105 namespace test504 {
7106 #if !defined(WINE) and !defined(ANDROID) // Valgrind+wine hate large static objects
7107 const int N_THREADS = 2,
7108 HG_CACHELINE_COUNT = 1 << 16,
7109 HG_CACHELINE_SIZE = 1 << 6,
7110 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE;
7111
7112 // int gives us ~4x speed of the byte test
7113 // 4x array size gives us
7114 // total multiplier of 16x over the cachesize
7115 // so we can neglect the cached-at-the-end memory
7116 const int ARRAY_SIZE = 4 * HG_CACHE_SIZE,
7117 ITERATIONS = 30;
7118 int array[ARRAY_SIZE];
7119
7120 int count = 0;
7121 Mutex count_mu;
7122
Worker()7123 void Worker() {
7124 count_mu.Lock();
7125 int myId = ++count;
7126 count_mu.Unlock();
7127
7128 // all threads write to different memory locations,
7129 // so no synchronization mechanisms are needed
7130 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS,
7131 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS;
7132 for (int j = 0; j < ITERATIONS; j++)
7133 for (int i = lower_bound; i < upper_bound;
7134 i += HG_CACHELINE_SIZE / sizeof(array[0])) {
7135 array[i] = i; // each array-write generates a cache miss
7136 }
7137 }
7138
Run()7139 void Run() {
7140 printf("test504: force massive CacheLineZ fetch-wback\n");
7141 MyThreadArray t(Worker, Worker);
7142 t.Start();
7143 t.Join();
7144 }
7145
7146 REGISTER_TEST2(Run, 504, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7147 #endif // WINE
7148 } // namespace test504
7149
7150 // test505: force massive cache fetch-wback (60% misses) {{{1
7151 // modification of test504 - more threads, byte accesses and lots of mutexes
7152 // so it produces lots of CacheLineF misses (30-50% of CacheLineZ misses)
7153 namespace test505 {
7154 #if !defined(WINE) and !defined(ANDROID) // Valgrind+wine hate large static objects
7155
7156 const int N_THREADS = 2,
7157 HG_CACHELINE_COUNT = 1 << 16,
7158 HG_CACHELINE_SIZE = 1 << 6,
7159 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE;
7160
7161 const int ARRAY_SIZE = 4 * HG_CACHE_SIZE,
7162 ITERATIONS = 3;
7163 int64_t array[ARRAY_SIZE];
7164
7165 int count = 0;
7166 Mutex count_mu;
7167
Worker()7168 void Worker() {
7169 const int N_MUTEXES = 5;
7170 Mutex mu[N_MUTEXES];
7171 count_mu.Lock();
7172 int myId = ++count;
7173 count_mu.Unlock();
7174
7175 // all threads write to different memory locations,
7176 // so no synchronization mechanisms are needed
7177 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS,
7178 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS;
7179 for (int j = 0; j < ITERATIONS; j++)
7180 for (int mutex_id = 0; mutex_id < N_MUTEXES; mutex_id++) {
7181 Mutex *m = & mu[mutex_id];
7182 m->Lock();
7183 for (int i = lower_bound + mutex_id, cnt = 0;
7184 i < upper_bound;
7185 i += HG_CACHELINE_SIZE / sizeof(array[0]), cnt++) {
7186 array[i] = i; // each array-write generates a cache miss
7187 }
7188 m->Unlock();
7189 }
7190 }
7191
Run()7192 void Run() {
7193 printf("test505: force massive CacheLineF fetch-wback\n");
7194 MyThreadArray t(Worker, Worker);
7195 t.Start();
7196 t.Join();
7197 }
7198
7199 REGISTER_TEST2(Run, 505, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7200 #endif // WINE
7201 } // namespace test505
7202
7203 // test506: massive HB's using Barriers {{{1
7204 // HB cache miss is ~40%
7205 // segments consume 10x more memory than SSs
7206 // modification of test39
7207 namespace test506 {
7208 #ifndef NO_BARRIER
7209 // Same as test17 but uses Barrier class (pthread_barrier_t).
7210 int GLOB = 0;
7211 const int N_threads = 64,
7212 ITERATIONS = 1000;
7213 Barrier *barrier[ITERATIONS];
7214 Mutex MU;
7215
Worker()7216 void Worker() {
7217 for (int i = 0; i < ITERATIONS; i++) {
7218 MU.Lock();
7219 GLOB++;
7220 MU.Unlock();
7221 barrier[i]->Block();
7222 }
7223 }
Run()7224 void Run() {
7225 printf("test506: massive HB's using Barriers\n");
7226 for (int i = 0; i < ITERATIONS; i++) {
7227 barrier[i] = new Barrier(N_threads);
7228 }
7229 {
7230 ThreadPool pool(N_threads);
7231 pool.StartWorkers();
7232 for (int i = 0; i < N_threads; i++) {
7233 pool.Add(NewCallback(Worker));
7234 }
7235 } // all folks are joined here.
7236 CHECK(GLOB == N_threads * ITERATIONS);
7237 for (int i = 0; i < ITERATIONS; i++) {
7238 delete barrier[i];
7239 }
7240 }
7241 REGISTER_TEST2(Run, 506, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL);
7242 #endif // NO_BARRIER
7243 } // namespace test506
7244
7245 // test507: vgHelgrind_initIterAtFM/stackClear benchmark {{{1
7246 // vgHelgrind_initIterAtFM/stackClear consume ~8.5%/5.5% CPU
7247 namespace test507 {
7248 const int N_THREADS = 1,
7249 BUFFER_SIZE = 1,
7250 ITERATIONS = 1 << 20;
7251
Foo()7252 void Foo() {
7253 struct T {
7254 char temp;
7255 T() {
7256 ANNOTATE_RWLOCK_CREATE(&temp);
7257 }
7258 ~T() {
7259 ANNOTATE_RWLOCK_DESTROY(&temp);
7260 }
7261 } s[BUFFER_SIZE];
7262 s->temp = '\0';
7263 }
7264
Worker()7265 void Worker() {
7266 for (int j = 0; j < ITERATIONS; j++) {
7267 Foo();
7268 }
7269 }
7270
Run()7271 void Run() {
7272 printf("test507: vgHelgrind_initIterAtFM/stackClear benchmark\n");
7273 {
7274 ThreadPool pool(N_THREADS);
7275 pool.StartWorkers();
7276 for (int i = 0; i < N_THREADS; i++) {
7277 pool.Add(NewCallback(Worker));
7278 }
7279 } // all folks are joined here.
7280 }
7281 REGISTER_TEST2(Run, 507, EXCLUDE_FROM_ALL);
7282 } // namespace test507
7283
7284 // test508: cmp_WordVecs_for_FM benchmark {{{1
7285 // 50+% of CPU consumption by cmp_WordVecs_for_FM
7286 namespace test508 {
7287 const int N_THREADS = 1,
7288 BUFFER_SIZE = 1 << 10,
7289 ITERATIONS = 1 << 9;
7290
Foo()7291 void Foo() {
7292 struct T {
7293 char temp;
7294 T() {
7295 ANNOTATE_RWLOCK_CREATE(&temp);
7296 }
7297 ~T() {
7298 ANNOTATE_RWLOCK_DESTROY(&temp);
7299 }
7300 } s[BUFFER_SIZE];
7301 s->temp = '\0';
7302 }
7303
Worker()7304 void Worker() {
7305 for (int j = 0; j < ITERATIONS; j++) {
7306 Foo();
7307 }
7308 }
7309
Run()7310 void Run() {
7311 printf("test508: cmp_WordVecs_for_FM benchmark\n");
7312 {
7313 ThreadPool pool(N_THREADS);
7314 pool.StartWorkers();
7315 for (int i = 0; i < N_THREADS; i++) {
7316 pool.Add(NewCallback(Worker));
7317 }
7318 } // all folks are joined here.
7319 }
7320 REGISTER_TEST2(Run, 508, EXCLUDE_FROM_ALL);
7321 } // namespace test508
7322
7323 // test509: avl_find_node benchmark {{{1
7324 // 10+% of CPU consumption by avl_find_node
7325 namespace test509 {
7326 const int N_THREADS = 16,
7327 ITERATIONS = 1 << 8;
7328
Worker()7329 void Worker() {
7330 std::vector<Mutex*> mu_list;
7331 for (int i = 0; i < ITERATIONS; i++) {
7332 Mutex * mu = new Mutex();
7333 mu_list.push_back(mu);
7334 mu->Lock();
7335 }
7336 for (int i = ITERATIONS - 1; i >= 0; i--) {
7337 Mutex * mu = mu_list[i];
7338 mu->Unlock();
7339 delete mu;
7340 }
7341 }
7342
Run()7343 void Run() {
7344 printf("test509: avl_find_node benchmark\n");
7345 {
7346 ThreadPool pool(N_THREADS);
7347 pool.StartWorkers();
7348 for (int i = 0; i < N_THREADS; i++) {
7349 pool.Add(NewCallback(Worker));
7350 }
7351 } // all folks are joined here.
7352 }
7353 REGISTER_TEST2(Run, 509, EXCLUDE_FROM_ALL);
7354 } // namespace test509
7355
7356 // test510: SS-recycle test {{{1
7357 // this tests shows the case where only ~1% of SS are recycled
7358 namespace test510 {
7359 const int N_THREADS = 16,
7360 ITERATIONS = 1 << 10;
7361 int GLOB = 0;
7362
Worker()7363 void Worker() {
7364 usleep(100000);
7365 for (int i = 0; i < ITERATIONS; i++) {
7366 ANNOTATE_CONDVAR_SIGNAL((void*)0xDeadBeef);
7367 GLOB++;
7368 usleep(10);
7369 }
7370 }
7371
Run()7372 void Run() {
7373 //ANNOTATE_BENIGN_RACE(&GLOB, "Test");
7374 printf("test510: SS-recycle test\n");
7375 {
7376 ThreadPool pool(N_THREADS);
7377 pool.StartWorkers();
7378 for (int i = 0; i < N_THREADS; i++) {
7379 pool.Add(NewCallback(Worker));
7380 }
7381 } // all folks are joined here.
7382 }
7383 REGISTER_TEST2(Run, 510, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7384 } // namespace test510
7385
7386 // test511: Segment refcounting test ('1' refcounting) {{{1
7387 namespace test511 {
7388 int GLOB = 0;
7389
Run()7390 void Run () {
7391 for (int i = 0; i < 300; i++) {
7392 ANNOTATE_CONDVAR_SIGNAL(&GLOB);
7393 usleep(1000);
7394 GLOB++;
7395 ANNOTATE_CONDVAR_WAIT(&GLOB);
7396 }
7397 }
7398 REGISTER_TEST2(Run, 511, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL);
7399 } // namespace test511
7400
7401 // test512: Access the same memory with big intersecting LockSets {{{1
7402 namespace test512 {
7403 const int N_MUTEXES = 128;
7404 const int DATA_SIZE = 1024;
7405
7406 Mutex mu[N_MUTEXES];
7407 int GLOB[DATA_SIZE];
7408
TP()7409 void TP() {
7410 Mutex thread_mu;
7411 thread_mu.Lock();
7412 for (int j = 0; j < 10; j++) {
7413 for (int m = 0; m < N_MUTEXES; m++)
7414 mu[m].Lock();
7415 for (int i = 0; i < 3000; i++) {
7416 ANNOTATE_CONDVAR_SIGNAL(&GLOB); // Force new segment
7417 for (int k = 0; k < DATA_SIZE; k++)
7418 GLOB[k] = 42;
7419 }
7420 for (int m = 0; m < N_MUTEXES; m++)
7421 mu[m].Unlock();
7422 }
7423 thread_mu.Unlock();
7424 }
7425
Run()7426 void Run() {
7427 MyThreadArray t(TP, TP);
7428 printf("test512: Access the same memory with big intersecting LockSets.\n");
7429
7430 t.Start();
7431 t.Join();
7432 }
7433
7434 REGISTER_TEST2(Run, 512, EXCLUDE_FROM_ALL | PERFORMANCE)
7435 } // namespace test512
7436
7437 // test513: --fast-mode benchmark {{{1
7438 namespace test513 {
7439
7440 const int N_THREADS = 2,
7441 HG_CACHELINE_SIZE = 1 << 6,
7442 ARRAY_SIZE = HG_CACHELINE_SIZE * 512,
7443 MUTEX_ID_BITS = 8,
7444 MUTEX_ID_MASK = (1 << MUTEX_ID_BITS) - 1;
7445
7446 // Each thread has its own cacheline and tackles with it intensively
7447 const int ITERATIONS = 1024;
7448 int array[N_THREADS][ARRAY_SIZE];
7449
7450 int count = 0;
7451 Mutex count_mu;
7452 Mutex mutex_arr[N_THREADS][MUTEX_ID_BITS];
7453
Worker()7454 void Worker() {
7455 count_mu.Lock();
7456 int myId = count++;
7457 count_mu.Unlock();
7458
7459 // all threads write to different memory locations
7460 for (int j = 0; j < ITERATIONS; j++) {
7461 int mutex_mask = j & MUTEX_ID_BITS;
7462 for (int m = 0; m < MUTEX_ID_BITS; m++)
7463 if (mutex_mask & (1 << m))
7464 mutex_arr[myId][m].Lock();
7465
7466 for (int i = 0; i < ARRAY_SIZE; i++) {
7467 array[myId][i] = i;
7468 }
7469
7470 for (int m = 0; m < MUTEX_ID_BITS; m++)
7471 if (mutex_mask & (1 << m))
7472 mutex_arr[myId][m].Unlock();
7473 }
7474 }
7475
Run()7476 void Run() {
7477 printf("test513: --fast-mode benchmark\n");
7478 {
7479 ThreadPool pool(N_THREADS);
7480 pool.StartWorkers();
7481 for (int i = 0; i < N_THREADS; i++) {
7482 pool.Add(NewCallback(Worker));
7483 }
7484 } // all folks are joined here.
7485 }
7486
7487 REGISTER_TEST2(Run, 513, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL)
7488 } // namespace test513
7489
7490 namespace ThreadChainTest { // {{{1 Reg test for thread creation
Thread1()7491 void Thread1() { }
Thread2()7492 void Thread2() {
7493 MyThread t(Thread1);
7494 t.Start();
7495 t.Join();
7496 }
Thread3()7497 void Thread3() {
7498 MyThread t(Thread2);
7499 t.Start();
7500 t.Join();
7501 }
Thread4()7502 void Thread4() {
7503 MyThread t(Thread3);
7504 t.Start();
7505 t.Join();
7506 }
7507
TEST(RegTests,ThreadChainTest)7508 TEST(RegTests, ThreadChainTest) {
7509 Thread4();
7510 }
7511
7512 } // namespace
7513
7514 #ifndef ANDROID // GTest does not support ASSERT_DEBUG_DEATH.
7515 namespace SimpleDeathTest { // {{{1 Make sure that the tool handles death tests correctly
7516 #ifdef WIN32
TEST(DeathTests,DISABLED_SimpleDeathTest)7517 TEST(DeathTests, DISABLED_SimpleDeathTest) {
7518 #else
7519 TEST(DeathTests, SimpleDeathTest) {
7520 #endif
7521 ASSERT_DEBUG_DEATH(CHECK(false), "");
7522 }
7523 } // namespace
7524 #endif
7525
7526 namespace IgnoreTests { // {{{1 Test how the tool works with indirect calls to fun_r functions
7527 int GLOB = 0;
7528 void (*f)() = NULL;
7529
NotIgnoredRacey()7530 void NotIgnoredRacey() {
7531 GLOB++;
7532 }
7533
FunRFunction()7534 void FunRFunction() {
7535 NotIgnoredRacey();
7536 usleep(1); // avoid tail call elimination
7537 }
7538
DoDirectCall()7539 void DoDirectCall() {
7540 FunRFunction();
7541 usleep(1); // avoid tail call elimination
7542 }
7543
DoIndirectCall()7544 void DoIndirectCall() {
7545 (*f)();
7546 usleep(1); // avoid tail call elimination
7547 }
7548
TEST(IgnoreTests,DirectCallToFunR)7549 TEST(IgnoreTests, DirectCallToFunR) {
7550 MyThreadArray mta(DoDirectCall, DoDirectCall);
7551 mta.Start();
7552 mta.Join();
7553 }
7554
TEST(IgnoreTests,IndirectCallToFunR)7555 TEST(IgnoreTests, IndirectCallToFunR) {
7556 f = FunRFunction;
7557 MyThreadArray mta(DoIndirectCall, DoIndirectCall);
7558 mta.Start();
7559 mta.Join();
7560 }
7561 } // namespace
7562
7563 namespace MutexNotPhbTests {
7564
7565 int GLOB = 0;
7566 Mutex mu;
7567 StealthNotification n;
7568
SignalThread()7569 void SignalThread() {
7570 GLOB = 1;
7571 mu.Lock();
7572 mu.Unlock();
7573 n.signal();
7574 }
7575
WaitThread()7576 void WaitThread() {
7577 n.wait();
7578 mu.Lock();
7579 mu.Unlock();
7580 GLOB = 2;
7581 }
7582
TEST(MutexNotPhbTests,MutexNotPhbTest)7583 TEST(MutexNotPhbTests, MutexNotPhbTest) {
7584 ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(&mu);
7585 ANNOTATE_EXPECT_RACE(&GLOB, "MutexNotPhbTest. TP.");
7586 MyThreadArray mta(SignalThread, WaitThread);
7587 mta.Start();
7588 mta.Join();
7589 }
7590 } // namespace
7591
7592 namespace RaceVerifierTests_Simple {
7593 int GLOB = 0;
7594
Worker1()7595 void Worker1() {
7596 GLOB = 1;
7597 }
7598
Worker2()7599 void Worker2() {
7600 GLOB = 2;
7601 }
7602
TEST(RaceVerifierTests,Simple)7603 TEST(RaceVerifierTests, Simple) {
7604 ANNOTATE_EXPECT_RACE(&GLOB, "SimpleRace.");
7605 MyThreadArray t(Worker1, Worker2);
7606 t.Start();
7607 t.Join();
7608 }
7609 } // namespace
7610
7611 namespace RaceVerifierTests_Unverifiable {
7612 StealthNotification n;
7613 int GLOB = 0;
7614
Worker1()7615 void Worker1() {
7616 if (!GLOB)
7617 GLOB = 1;
7618 n.signal();
7619 }
7620
Worker2()7621 void Worker2() {
7622 n.wait();
7623 GLOB = 2;
7624 }
7625
TEST(RaceVerifierTests,Unverifiable)7626 TEST(RaceVerifierTests, Unverifiable) {
7627 ANNOTATE_EXPECT_RACE(&GLOB, "SimpleRace. UNVERIFIABLE.");
7628 MyThreadArray t(Worker1, Worker2);
7629 t.Start();
7630 t.Join();
7631 }
7632 } // namespace
7633
7634
7635 namespace RaceVerifierTests_ManyRacesInOneTrace {
7636 StealthNotification n;
7637 int array[2];
7638
Worker1()7639 void Worker1() {
7640 array[0] = 1;
7641 array[1] = 2;
7642 }
7643
Worker2()7644 void Worker2() {
7645 array[1] = array[0];
7646 }
7647
TEST(RaceVerifierTests,ManyRacesInOneTrace)7648 TEST(RaceVerifierTests, ManyRacesInOneTrace) {
7649 ANNOTATE_EXPECT_RACE(array + 0, "RaceVerifierTests_ManyRacesInOneTrace: race 1.");
7650 ANNOTATE_EXPECT_RACE(array + 1, "RaceVerifierTests_ManyRacesInOneTrace: race 2.");
7651 MyThreadArray t(Worker1, Worker2);
7652 t.Start();
7653 t.Join();
7654 }
7655 } // namespace
7656
7657 namespace PrintfTests_Simple {
7658
Worker1()7659 void Worker1() {
7660 // This one is a printf() => vfprintf()
7661 fprintf(stderr, "Hello from a thread: %d\n", 2);
7662 // This one is a puts()
7663 fprintf(stderr, "Hello from a thread\n");
7664 fprintf(stdout, "Hello from a thread: %d\n", 2);
7665 fprintf(stdout, "Hello from a thread\n");
7666 }
7667
TEST(PrintfTests,DISABLED_Simple)7668 TEST(PrintfTests, DISABLED_Simple) {
7669 MyThreadArray t(Worker1, Worker1);
7670 t.Start();
7671 t.Join();
7672 }
7673 } // namespace
7674
7675 namespace PrintfTests_RaceOnFwriteArgument {
7676
7677 char s[] = "abracadabra\n";
7678
Worker1()7679 void Worker1() {
7680 fwrite(s, 1, sizeof(s) - 1, stdout);
7681 }
7682
Worker2()7683 void Worker2() {
7684 s[3] = 'z';
7685 }
7686
TEST(PrintfTests,RaceOnFwriteArgument)7687 TEST(PrintfTests, RaceOnFwriteArgument) {
7688 ANNOTATE_TRACE_MEMORY(s + 3);
7689 ANNOTATE_EXPECT_RACE(s + 3, "PrintfTests_RaceOnFwriteArgument.");
7690 MyThreadArray t(Worker1, Worker2);
7691 t.Start();
7692 t.Join();
7693 }
7694 } // namespace
7695
7696 namespace PrintfTests_RaceOnPutsArgument {
7697
7698 char s[] = "abracadabra";
7699
Worker1()7700 void Worker1() {
7701 puts(s);
7702 }
7703
Worker2()7704 void Worker2() {
7705 s[3] = 'z';
7706 }
7707
TEST(PrintfTests,RaceOnPutsArgument)7708 TEST(PrintfTests, RaceOnPutsArgument) {
7709 ANNOTATE_TRACE_MEMORY(s + 3);
7710 ANNOTATE_EXPECT_RACE(s + 3, "PrintfTests_RaceOnPutsArgument.");
7711 MyThreadArray t(Worker1, Worker2);
7712 t.Start();
7713 t.Join();
7714 }
7715 } // namespace
7716
7717 namespace PrintfTests_RaceOnPrintfArgument {
7718
7719 volatile char s[] = "abracadabra";
7720 volatile char s2[] = "abracadabra";
7721
Worker1()7722 void Worker1() {
7723 fprintf(stdout, "printing a string: %s\n", s);
7724 fprintf(stderr, "printing a string: %s\n", s2);
7725 }
7726
Worker2()7727 void Worker2() {
7728 s[3] = 'z';
7729 s2[3] = 'z';
7730 }
7731
TEST(PrintfTests,DISABLED_RaceOnPrintfArgument)7732 TEST(PrintfTests, DISABLED_RaceOnPrintfArgument) {
7733 ANNOTATE_EXPECT_RACE(s + 3, "PrintfTests_RaceOnPrintfArgument (stdout).");
7734 ANNOTATE_EXPECT_RACE(s2 + 3, "PrintfTests_RaceOnPrintfArgument (stderr).");
7735 MyThreadArray t(Worker1, Worker2);
7736 t.Start();
7737 t.Join();
7738 }
7739 } // namespace
7740
7741 // Apparently, %n is not supported in windows
7742 #ifndef WIN32
7743 namespace PrintfTests_RaceOnOutputArgument {
7744
7745 volatile char s[] = "abracadabra";
7746 volatile int a = 0;
7747
Worker1()7748 void Worker1() {
7749 fprintf(stdout, "printing a string: %s%n\n", s, &a);
7750 }
7751
Worker2()7752 void Worker2() {
7753 fprintf(stdout, "the other thread have already printed %d characters\n", a);
7754 }
7755
TEST(PrintfTests,DISABLED_RaceOnOutputArgument)7756 TEST(PrintfTests, DISABLED_RaceOnOutputArgument) {
7757 ANNOTATE_EXPECT_RACE(&a, "PrintfTests_RaceOnOutputArgument:int.");
7758 MyThreadArray t(Worker1, Worker2);
7759 t.Start();
7760 t.Join();
7761 }
7762 } // namespace
7763 #endif
7764
7765 namespace PrintfTests_Fflush {
7766
7767 volatile char s[] = "abracadabra";
7768 volatile int a = 0;
7769
Worker1()7770 void Worker1() {
7771 fflush(NULL);
7772 }
7773
Worker2()7774 void Worker2() {
7775 fflush(NULL);
7776 }
7777
TEST(PrintfTests,DISABLED_Fflush)7778 TEST(PrintfTests, DISABLED_Fflush) {
7779 MyThreadArray t(Worker1, Worker2);
7780 t.Start();
7781 t.Join();
7782 }
7783 } // namespace
7784
7785 namespace BenignRaceTest { // {{{1
7786 const int kArrayLen = 97;
7787 char X[kArrayLen];
7788 char *P;
7789
7790 int counter;
7791
7792
Worker()7793 void Worker() {
7794 (*P)++;
7795 ANNOTATE_HAPPENS_BEFORE(P);
7796 AtomicIncrement(&counter, -1);
7797 }
7798
TEST(NegativeTests,BenignRaceTest)7799 TEST(NegativeTests, BenignRaceTest) {
7800 ThreadPool pool1(1);
7801 ThreadPool pool2(1);
7802 ThreadPool pool3(1);
7803 pool1.StartWorkers();
7804 pool2.StartWorkers();
7805 pool3.StartWorkers();
7806
7807 ANNOTATE_BENIGN_RACE(&counter, "");
7808 const int kNIter = 1000;
7809
7810 for (int i = 0; i < kNIter; i++) {
7811 counter = 3;
7812 long len = (i % (kArrayLen / 3)) + 1;
7813 long beg = i % (kArrayLen - len);
7814 long end = beg + len;
7815 CHECK(beg < kArrayLen);
7816 CHECK(end <= kArrayLen);
7817 bool is_expected = i % 2;
7818 long pos = i % len;
7819 P = X + beg + pos;
7820 CHECK(P < X + kArrayLen);
7821 // printf("[%d] b=%ld e=%ld p=%ld is_expected=%d\n",
7822 // i, beg, end, pos, is_expected);
7823 ANNOTATE_NEW_MEMORY(X, kArrayLen);
7824 if (is_expected) {
7825 ANNOTATE_EXPECT_RACE(P, "expected race in BenignRaceTest");
7826 } else {
7827 ANNOTATE_BENIGN_RACE_SIZED(X + beg, len, "");
7828 }
7829 if ((i % (kNIter / 10)) == 0) {
7830 ANNOTATE_FLUSH_STATE();
7831 }
7832 pool1.Add(NewCallback(Worker));
7833 pool2.Add(NewCallback(Worker));
7834 pool3.Add(NewCallback(Worker));
7835
7836 while(AtomicIncrement(&counter, 0) != 0)
7837 usleep(1000);
7838 ANNOTATE_HAPPENS_AFTER(P);
7839
7840 ANNOTATE_FLUSH_EXPECTED_RACES();
7841 }
7842 }
7843 }
7844
7845 namespace StressTests_FlushStateTest { // {{{1
7846 // Stress test for FlushState which happens in parallel with some work.
7847 const int N = 1000;
7848 int array[N];
7849
Flusher()7850 void Flusher() {
7851 for (int i = 0; i < 10; i++) {
7852 usleep(1000);
7853 ANNOTATE_FLUSH_STATE();
7854 }
7855 }
7856
Write1(int i)7857 void Write1(int i) { array[i]++; }
Write2(int i)7858 void Write2(int i) { array[i]--; }
Read1(int i)7859 int Read1(int i) { volatile int z = array[i]; return z; }
Read2(int i)7860 int Read2(int i) { volatile int z = array[i]; return z; }
7861
Worker()7862 void Worker() {
7863 for (int iter = 0; iter < 10; iter++) {
7864 usleep(1000);
7865 for (int i = 0; i < N; i++) {
7866 Write1(i);
7867 Write2(i);
7868 Read1(i);
7869 Read2(i);
7870 }
7871 }
7872 }
7873
TEST(StressTests,FlushStateTest)7874 TEST(StressTests, FlushStateTest) {
7875 MyThreadArray t(Flusher, Worker, Worker, Worker);
7876 t.Start();
7877 t.Join();
7878 }
7879
7880 } // namespace
7881
7882 // End {{{1
7883 // vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker
7884