1 //===-- tsan_clock_test.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "tsan_clock.h"
13 #include "tsan_rtl.h"
14 #include "gtest/gtest.h"
15 #include <sys/time.h>
16 #include <time.h>
17
18 namespace __tsan {
19
20 ClockCache cache;
21
TEST(Clock,VectorBasic)22 TEST(Clock, VectorBasic) {
23 ThreadClock clk(0);
24 ASSERT_EQ(clk.size(), 1U);
25 clk.tick();
26 ASSERT_EQ(clk.size(), 1U);
27 ASSERT_EQ(clk.get(0), 1U);
28 clk.set(&cache, 3, clk.get(3) + 1);
29 ASSERT_EQ(clk.size(), 4U);
30 ASSERT_EQ(clk.get(0), 1U);
31 ASSERT_EQ(clk.get(1), 0U);
32 ASSERT_EQ(clk.get(2), 0U);
33 ASSERT_EQ(clk.get(3), 1U);
34 clk.set(&cache, 3, clk.get(3) + 1);
35 ASSERT_EQ(clk.get(3), 2U);
36 }
37
TEST(Clock,ChunkedBasic)38 TEST(Clock, ChunkedBasic) {
39 ThreadClock vector(0);
40 SyncClock chunked;
41 ASSERT_EQ(vector.size(), 1U);
42 ASSERT_EQ(chunked.size(), 0U);
43 vector.acquire(&cache, &chunked);
44 ASSERT_EQ(vector.size(), 1U);
45 ASSERT_EQ(chunked.size(), 0U);
46 vector.release(&cache, &chunked);
47 ASSERT_EQ(vector.size(), 1U);
48 ASSERT_EQ(chunked.size(), 1U);
49 vector.acq_rel(&cache, &chunked);
50 ASSERT_EQ(vector.size(), 1U);
51 ASSERT_EQ(chunked.size(), 1U);
52 chunked.Reset(&cache);
53 }
54
55 static const uptr interesting_sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66,
56 100, 124, 125, 126, 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254,
57 255};
58
TEST(Clock,Iter)59 TEST(Clock, Iter) {
60 const uptr n = ARRAY_SIZE(interesting_sizes);
61 for (uptr fi = 0; fi < n; fi++) {
62 const uptr size = interesting_sizes[fi];
63 SyncClock sync;
64 ThreadClock vector(0);
65 for (uptr i = 0; i < size; i++)
66 vector.set(&cache, i, i + 1);
67 if (size != 0)
68 vector.release(&cache, &sync);
69 uptr i = 0;
70 for (ClockElem &ce : sync) {
71 ASSERT_LT(i, size);
72 ASSERT_EQ(sync.get_clean(i), ce.epoch);
73 i++;
74 }
75 ASSERT_EQ(i, size);
76 sync.Reset(&cache);
77 }
78 }
79
TEST(Clock,AcquireRelease)80 TEST(Clock, AcquireRelease) {
81 ThreadClock vector1(100);
82 vector1.tick();
83 SyncClock chunked;
84 vector1.release(&cache, &chunked);
85 ASSERT_EQ(chunked.size(), 101U);
86 ThreadClock vector2(0);
87 vector2.acquire(&cache, &chunked);
88 ASSERT_EQ(vector2.size(), 101U);
89 ASSERT_EQ(vector2.get(0), 0U);
90 ASSERT_EQ(vector2.get(1), 0U);
91 ASSERT_EQ(vector2.get(99), 0U);
92 ASSERT_EQ(vector2.get(100), 1U);
93 chunked.Reset(&cache);
94 }
95
TEST(Clock,RepeatedAcquire)96 TEST(Clock, RepeatedAcquire) {
97 ThreadClock thr1(1);
98 thr1.tick();
99 ThreadClock thr2(2);
100 thr2.tick();
101
102 SyncClock sync;
103 thr1.ReleaseStore(&cache, &sync);
104
105 thr2.acquire(&cache, &sync);
106 thr2.acquire(&cache, &sync);
107
108 sync.Reset(&cache);
109 }
110
TEST(Clock,releaseStoreAcquire)111 TEST(Clock, releaseStoreAcquire) {
112 ThreadClock thr0(0);
113 thr0.tick();
114 ThreadClock thr1(1);
115 thr1.tick();
116 SyncClock syncA;
117 SyncClock syncB;
118 ASSERT_EQ(syncA.size(), 0U);
119 ASSERT_EQ(syncB.size(), 0U);
120 thr1.releaseStoreAcquire(&cache, &syncB);
121 ASSERT_EQ(syncB.size(), 2U); // T0 and T1
122 // releaseStoreAcquire to an empty SyncClock
123 thr0.releaseStoreAcquire(&cache, &syncA);
124 ASSERT_EQ(syncA.size(), 1U);
125 // releaseStoreAcquire from a non-empty SyncClock
126 // T0 learns about T1
127 thr0.releaseStoreAcquire(&cache, &syncB);
128 // releaseStoreAcquire to the originally empty SyncClock
129 // T0 deposits info about T1 into syncA
130 thr0.releaseStoreAcquire(&cache, &syncA);
131 ASSERT_EQ(syncA.size(), 2U);
132 syncA.Reset(&cache);
133 syncB.Reset(&cache);
134 }
135
TEST(Clock,ManyThreads)136 TEST(Clock, ManyThreads) {
137 SyncClock chunked;
138 for (unsigned i = 0; i < 200; i++) {
139 ThreadClock vector(0);
140 vector.tick();
141 vector.set(&cache, i, i + 1);
142 vector.release(&cache, &chunked);
143 ASSERT_EQ(i + 1, chunked.size());
144 vector.acquire(&cache, &chunked);
145 ASSERT_EQ(i + 1, vector.size());
146 }
147
148 for (unsigned i = 0; i < 200; i++) {
149 printf("i=%d\n", i);
150 ASSERT_EQ(i + 1, chunked.get(i));
151 }
152
153 ThreadClock vector(1);
154 vector.acquire(&cache, &chunked);
155 ASSERT_EQ(200U, vector.size());
156 for (unsigned i = 0; i < 200; i++)
157 ASSERT_EQ(i + 1, vector.get(i));
158
159 chunked.Reset(&cache);
160 }
161
TEST(Clock,DifferentSizes)162 TEST(Clock, DifferentSizes) {
163 {
164 ThreadClock vector1(10);
165 vector1.tick();
166 ThreadClock vector2(20);
167 vector2.tick();
168 {
169 SyncClock chunked;
170 vector1.release(&cache, &chunked);
171 ASSERT_EQ(chunked.size(), 11U);
172 vector2.release(&cache, &chunked);
173 ASSERT_EQ(chunked.size(), 21U);
174 chunked.Reset(&cache);
175 }
176 {
177 SyncClock chunked;
178 vector2.release(&cache, &chunked);
179 ASSERT_EQ(chunked.size(), 21U);
180 vector1.release(&cache, &chunked);
181 ASSERT_EQ(chunked.size(), 21U);
182 chunked.Reset(&cache);
183 }
184 {
185 SyncClock chunked;
186 vector1.release(&cache, &chunked);
187 vector2.acquire(&cache, &chunked);
188 ASSERT_EQ(vector2.size(), 21U);
189 chunked.Reset(&cache);
190 }
191 {
192 SyncClock chunked;
193 vector2.release(&cache, &chunked);
194 vector1.acquire(&cache, &chunked);
195 ASSERT_EQ(vector1.size(), 21U);
196 chunked.Reset(&cache);
197 }
198 }
199 }
200
TEST(Clock,Growth)201 TEST(Clock, Growth) {
202 {
203 ThreadClock vector(10);
204 vector.tick();
205 vector.set(&cache, 5, 42);
206 SyncClock sync;
207 vector.release(&cache, &sync);
208 ASSERT_EQ(sync.size(), 11U);
209 ASSERT_EQ(sync.get(0), 0ULL);
210 ASSERT_EQ(sync.get(1), 0ULL);
211 ASSERT_EQ(sync.get(5), 42ULL);
212 ASSERT_EQ(sync.get(9), 0ULL);
213 ASSERT_EQ(sync.get(10), 1ULL);
214 sync.Reset(&cache);
215 }
216 {
217 ThreadClock vector1(10);
218 vector1.tick();
219 ThreadClock vector2(20);
220 vector2.tick();
221 SyncClock sync;
222 vector1.release(&cache, &sync);
223 vector2.release(&cache, &sync);
224 ASSERT_EQ(sync.size(), 21U);
225 ASSERT_EQ(sync.get(0), 0ULL);
226 ASSERT_EQ(sync.get(10), 1ULL);
227 ASSERT_EQ(sync.get(19), 0ULL);
228 ASSERT_EQ(sync.get(20), 1ULL);
229 sync.Reset(&cache);
230 }
231 {
232 ThreadClock vector(100);
233 vector.tick();
234 vector.set(&cache, 5, 42);
235 vector.set(&cache, 90, 84);
236 SyncClock sync;
237 vector.release(&cache, &sync);
238 ASSERT_EQ(sync.size(), 101U);
239 ASSERT_EQ(sync.get(0), 0ULL);
240 ASSERT_EQ(sync.get(1), 0ULL);
241 ASSERT_EQ(sync.get(5), 42ULL);
242 ASSERT_EQ(sync.get(60), 0ULL);
243 ASSERT_EQ(sync.get(70), 0ULL);
244 ASSERT_EQ(sync.get(90), 84ULL);
245 ASSERT_EQ(sync.get(99), 0ULL);
246 ASSERT_EQ(sync.get(100), 1ULL);
247 sync.Reset(&cache);
248 }
249 {
250 ThreadClock vector1(10);
251 vector1.tick();
252 ThreadClock vector2(100);
253 vector2.tick();
254 SyncClock sync;
255 vector1.release(&cache, &sync);
256 vector2.release(&cache, &sync);
257 ASSERT_EQ(sync.size(), 101U);
258 ASSERT_EQ(sync.get(0), 0ULL);
259 ASSERT_EQ(sync.get(10), 1ULL);
260 ASSERT_EQ(sync.get(99), 0ULL);
261 ASSERT_EQ(sync.get(100), 1ULL);
262 sync.Reset(&cache);
263 }
264 }
265
TEST(Clock,Growth2)266 TEST(Clock, Growth2) {
267 // Test clock growth for every pair of sizes:
268 const uptr n = ARRAY_SIZE(interesting_sizes);
269 for (uptr fi = 0; fi < n; fi++) {
270 for (uptr ti = fi + 1; ti < n; ti++) {
271 const uptr from = interesting_sizes[fi];
272 const uptr to = interesting_sizes[ti];
273 SyncClock sync;
274 ThreadClock vector(0);
275 for (uptr i = 0; i < from; i++)
276 vector.set(&cache, i, i + 1);
277 if (from != 0)
278 vector.release(&cache, &sync);
279 ASSERT_EQ(sync.size(), from);
280 for (uptr i = 0; i < from; i++)
281 ASSERT_EQ(sync.get(i), i + 1);
282 for (uptr i = 0; i < to; i++)
283 vector.set(&cache, i, i + 1);
284 vector.release(&cache, &sync);
285 ASSERT_EQ(sync.size(), to);
286 for (uptr i = 0; i < to; i++)
287 ASSERT_EQ(sync.get(i), i + 1);
288 vector.set(&cache, to + 1, to + 1);
289 vector.release(&cache, &sync);
290 ASSERT_EQ(sync.size(), to + 2);
291 for (uptr i = 0; i < to; i++)
292 ASSERT_EQ(sync.get(i), i + 1);
293 ASSERT_EQ(sync.get(to), 0U);
294 ASSERT_EQ(sync.get(to + 1), to + 1);
295 sync.Reset(&cache);
296 }
297 }
298 }
299
300 const uptr kThreads = 4;
301 const uptr kClocks = 4;
302
303 // SimpleSyncClock and SimpleThreadClock implement the same thing as
304 // SyncClock and ThreadClock, but in a very simple way.
305 struct SimpleSyncClock {
306 u64 clock[kThreads];
307 uptr size;
308
SimpleSyncClock__tsan::SimpleSyncClock309 SimpleSyncClock() {
310 Reset();
311 }
312
Reset__tsan::SimpleSyncClock313 void Reset() {
314 size = 0;
315 for (uptr i = 0; i < kThreads; i++)
316 clock[i] = 0;
317 }
318
verify__tsan::SimpleSyncClock319 bool verify(const SyncClock *other) const {
320 for (uptr i = 0; i < min(size, other->size()); i++) {
321 if (clock[i] != other->get(i))
322 return false;
323 }
324 for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
325 if (i < size && clock[i] != 0)
326 return false;
327 if (i < other->size() && other->get(i) != 0)
328 return false;
329 }
330 return true;
331 }
332 };
333
334 struct SimpleThreadClock {
335 u64 clock[kThreads];
336 uptr size;
337 unsigned tid;
338
SimpleThreadClock__tsan::SimpleThreadClock339 explicit SimpleThreadClock(unsigned tid) {
340 this->tid = tid;
341 size = tid + 1;
342 for (uptr i = 0; i < kThreads; i++)
343 clock[i] = 0;
344 }
345
tick__tsan::SimpleThreadClock346 void tick() {
347 clock[tid]++;
348 }
349
acquire__tsan::SimpleThreadClock350 void acquire(const SimpleSyncClock *src) {
351 if (size < src->size)
352 size = src->size;
353 for (uptr i = 0; i < kThreads; i++)
354 clock[i] = max(clock[i], src->clock[i]);
355 }
356
release__tsan::SimpleThreadClock357 void release(SimpleSyncClock *dst) const {
358 if (dst->size < size)
359 dst->size = size;
360 for (uptr i = 0; i < kThreads; i++)
361 dst->clock[i] = max(dst->clock[i], clock[i]);
362 }
363
releaseStoreAcquire__tsan::SimpleThreadClock364 void releaseStoreAcquire(SimpleSyncClock *sc) {
365 if (sc->size < size)
366 sc->size = size;
367 else
368 size = sc->size;
369 for (uptr i = 0; i < kThreads; i++) {
370 uptr tmp = clock[i];
371 clock[i] = max(sc->clock[i], clock[i]);
372 sc->clock[i] = tmp;
373 }
374 }
375
acq_rel__tsan::SimpleThreadClock376 void acq_rel(SimpleSyncClock *dst) {
377 acquire(dst);
378 release(dst);
379 }
380
ReleaseStore__tsan::SimpleThreadClock381 void ReleaseStore(SimpleSyncClock *dst) const {
382 if (dst->size < size)
383 dst->size = size;
384 for (uptr i = 0; i < kThreads; i++)
385 dst->clock[i] = clock[i];
386 }
387
verify__tsan::SimpleThreadClock388 bool verify(const ThreadClock *other) const {
389 for (uptr i = 0; i < min(size, other->size()); i++) {
390 if (clock[i] != other->get(i))
391 return false;
392 }
393 for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
394 if (i < size && clock[i] != 0)
395 return false;
396 if (i < other->size() && other->get(i) != 0)
397 return false;
398 }
399 return true;
400 }
401 };
402
ClockFuzzer(bool printing)403 static bool ClockFuzzer(bool printing) {
404 // Create kThreads thread clocks.
405 SimpleThreadClock *thr0[kThreads];
406 ThreadClock *thr1[kThreads];
407 unsigned reused[kThreads];
408 for (unsigned i = 0; i < kThreads; i++) {
409 reused[i] = 0;
410 thr0[i] = new SimpleThreadClock(i);
411 thr1[i] = new ThreadClock(i, reused[i]);
412 }
413
414 // Create kClocks sync clocks.
415 SimpleSyncClock *sync0[kClocks];
416 SyncClock *sync1[kClocks];
417 for (unsigned i = 0; i < kClocks; i++) {
418 sync0[i] = new SimpleSyncClock();
419 sync1[i] = new SyncClock();
420 }
421
422 // Do N random operations (acquire, release, etc) and compare results
423 // for SimpleThread/SyncClock and real Thread/SyncClock.
424 for (int i = 0; i < 10000; i++) {
425 unsigned tid = rand() % kThreads;
426 unsigned cid = rand() % kClocks;
427 thr0[tid]->tick();
428 thr1[tid]->tick();
429
430 switch (rand() % 7) {
431 case 0:
432 if (printing)
433 printf("acquire thr%d <- clk%d\n", tid, cid);
434 thr0[tid]->acquire(sync0[cid]);
435 thr1[tid]->acquire(&cache, sync1[cid]);
436 break;
437 case 1:
438 if (printing)
439 printf("release thr%d -> clk%d\n", tid, cid);
440 thr0[tid]->release(sync0[cid]);
441 thr1[tid]->release(&cache, sync1[cid]);
442 break;
443 case 2:
444 if (printing)
445 printf("acq_rel thr%d <> clk%d\n", tid, cid);
446 thr0[tid]->acq_rel(sync0[cid]);
447 thr1[tid]->acq_rel(&cache, sync1[cid]);
448 break;
449 case 3:
450 if (printing)
451 printf("rel_str thr%d >> clk%d\n", tid, cid);
452 thr0[tid]->ReleaseStore(sync0[cid]);
453 thr1[tid]->ReleaseStore(&cache, sync1[cid]);
454 break;
455 case 4:
456 if (printing)
457 printf("reset clk%d\n", cid);
458 sync0[cid]->Reset();
459 sync1[cid]->Reset(&cache);
460 break;
461 case 5:
462 if (printing)
463 printf("releaseStoreAcquire thr%d -> clk%d\n", tid, cid);
464 thr0[tid]->releaseStoreAcquire(sync0[cid]);
465 thr1[tid]->releaseStoreAcquire(&cache, sync1[cid]);
466 break;
467 case 6:
468 if (printing)
469 printf("reset thr%d\n", tid);
470 u64 epoch = thr0[tid]->clock[tid] + 1;
471 reused[tid]++;
472 delete thr0[tid];
473 thr0[tid] = new SimpleThreadClock(tid);
474 thr0[tid]->clock[tid] = epoch;
475 delete thr1[tid];
476 thr1[tid] = new ThreadClock(tid, reused[tid]);
477 thr1[tid]->set(epoch);
478 break;
479 }
480
481 if (printing) {
482 for (unsigned i = 0; i < kThreads; i++) {
483 printf("thr%d: ", i);
484 thr1[i]->DebugDump(printf);
485 printf("\n");
486 }
487 for (unsigned i = 0; i < kClocks; i++) {
488 printf("clk%d: ", i);
489 sync1[i]->DebugDump(printf);
490 printf("\n");
491 }
492
493 printf("\n");
494 }
495
496 if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) {
497 if (!printing)
498 return false;
499 printf("differs with model:\n");
500 for (unsigned i = 0; i < kThreads; i++) {
501 printf("thr%d: clock=[", i);
502 for (uptr j = 0; j < thr0[i]->size; j++)
503 printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]);
504 printf("]\n");
505 }
506 for (unsigned i = 0; i < kClocks; i++) {
507 printf("clk%d: clock=[", i);
508 for (uptr j = 0; j < sync0[i]->size; j++)
509 printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]);
510 printf("]\n");
511 }
512 return false;
513 }
514 }
515
516 for (unsigned i = 0; i < kClocks; i++) {
517 sync1[i]->Reset(&cache);
518 }
519 return true;
520 }
521
TEST(Clock,Fuzzer)522 TEST(Clock, Fuzzer) {
523 struct timeval tv;
524 gettimeofday(&tv, NULL);
525 int seed = tv.tv_sec + tv.tv_usec;
526 printf("seed=%d\n", seed);
527 srand(seed);
528 if (!ClockFuzzer(false)) {
529 // Redo the test with the same seed, but logging operations.
530 srand(seed);
531 ClockFuzzer(true);
532 ASSERT_TRUE(false);
533 }
534 }
535
536 } // namespace __tsan
537