1 //===-- tsan_clock_test.cc ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "tsan_clock.h"
14 #include "tsan_rtl.h"
15 #include "gtest/gtest.h"
16 #include <sys/time.h>
17 #include <time.h>
18
19 namespace __tsan {
20
21 ClockCache cache;
22
TEST(Clock,VectorBasic)23 TEST(Clock, VectorBasic) {
24 ThreadClock clk(0);
25 ASSERT_EQ(clk.size(), 1U);
26 clk.tick();
27 ASSERT_EQ(clk.size(), 1U);
28 ASSERT_EQ(clk.get(0), 1U);
29 clk.set(3, clk.get(3) + 1);
30 ASSERT_EQ(clk.size(), 4U);
31 ASSERT_EQ(clk.get(0), 1U);
32 ASSERT_EQ(clk.get(1), 0U);
33 ASSERT_EQ(clk.get(2), 0U);
34 ASSERT_EQ(clk.get(3), 1U);
35 clk.set(3, clk.get(3) + 1);
36 ASSERT_EQ(clk.get(3), 2U);
37 }
38
TEST(Clock,ChunkedBasic)39 TEST(Clock, ChunkedBasic) {
40 ThreadClock vector(0);
41 SyncClock chunked;
42 ASSERT_EQ(vector.size(), 1U);
43 ASSERT_EQ(chunked.size(), 0U);
44 vector.acquire(&cache, &chunked);
45 ASSERT_EQ(vector.size(), 1U);
46 ASSERT_EQ(chunked.size(), 0U);
47 vector.release(&cache, &chunked);
48 ASSERT_EQ(vector.size(), 1U);
49 ASSERT_EQ(chunked.size(), 1U);
50 vector.acq_rel(&cache, &chunked);
51 ASSERT_EQ(vector.size(), 1U);
52 ASSERT_EQ(chunked.size(), 1U);
53 chunked.Reset(&cache);
54 }
55
TEST(Clock,AcquireRelease)56 TEST(Clock, AcquireRelease) {
57 ThreadClock vector1(100);
58 vector1.tick();
59 SyncClock chunked;
60 vector1.release(&cache, &chunked);
61 ASSERT_EQ(chunked.size(), 101U);
62 ThreadClock vector2(0);
63 vector2.acquire(&cache, &chunked);
64 ASSERT_EQ(vector2.size(), 101U);
65 ASSERT_EQ(vector2.get(0), 0U);
66 ASSERT_EQ(vector2.get(1), 0U);
67 ASSERT_EQ(vector2.get(99), 0U);
68 ASSERT_EQ(vector2.get(100), 1U);
69 chunked.Reset(&cache);
70 }
71
TEST(Clock,RepeatedAcquire)72 TEST(Clock, RepeatedAcquire) {
73 ThreadClock thr1(1);
74 thr1.tick();
75 ThreadClock thr2(2);
76 thr2.tick();
77
78 SyncClock sync;
79 thr1.ReleaseStore(&cache, &sync);
80
81 thr2.acquire(&cache, &sync);
82 thr2.acquire(&cache, &sync);
83
84 sync.Reset(&cache);
85 }
86
TEST(Clock,ManyThreads)87 TEST(Clock, ManyThreads) {
88 SyncClock chunked;
89 for (unsigned i = 0; i < 100; i++) {
90 ThreadClock vector(0);
91 vector.tick();
92 vector.set(i, 1);
93 vector.release(&cache, &chunked);
94 ASSERT_EQ(i + 1, chunked.size());
95 vector.acquire(&cache, &chunked);
96 ASSERT_EQ(i + 1, vector.size());
97 }
98
99 for (unsigned i = 0; i < 100; i++)
100 ASSERT_EQ(1U, chunked.get(i));
101
102 ThreadClock vector(1);
103 vector.acquire(&cache, &chunked);
104 ASSERT_EQ(100U, vector.size());
105 for (unsigned i = 0; i < 100; i++)
106 ASSERT_EQ(1U, vector.get(i));
107
108 chunked.Reset(&cache);
109 }
110
TEST(Clock,DifferentSizes)111 TEST(Clock, DifferentSizes) {
112 {
113 ThreadClock vector1(10);
114 vector1.tick();
115 ThreadClock vector2(20);
116 vector2.tick();
117 {
118 SyncClock chunked;
119 vector1.release(&cache, &chunked);
120 ASSERT_EQ(chunked.size(), 11U);
121 vector2.release(&cache, &chunked);
122 ASSERT_EQ(chunked.size(), 21U);
123 chunked.Reset(&cache);
124 }
125 {
126 SyncClock chunked;
127 vector2.release(&cache, &chunked);
128 ASSERT_EQ(chunked.size(), 21U);
129 vector1.release(&cache, &chunked);
130 ASSERT_EQ(chunked.size(), 21U);
131 chunked.Reset(&cache);
132 }
133 {
134 SyncClock chunked;
135 vector1.release(&cache, &chunked);
136 vector2.acquire(&cache, &chunked);
137 ASSERT_EQ(vector2.size(), 21U);
138 chunked.Reset(&cache);
139 }
140 {
141 SyncClock chunked;
142 vector2.release(&cache, &chunked);
143 vector1.acquire(&cache, &chunked);
144 ASSERT_EQ(vector1.size(), 21U);
145 chunked.Reset(&cache);
146 }
147 }
148 }
149
TEST(Clock,Growth)150 TEST(Clock, Growth) {
151 {
152 ThreadClock vector(10);
153 vector.tick();
154 vector.set(5, 42);
155 SyncClock sync;
156 vector.release(&cache, &sync);
157 ASSERT_EQ(sync.size(), 11U);
158 ASSERT_EQ(sync.get(0), 0ULL);
159 ASSERT_EQ(sync.get(1), 0ULL);
160 ASSERT_EQ(sync.get(5), 42ULL);
161 ASSERT_EQ(sync.get(9), 0ULL);
162 ASSERT_EQ(sync.get(10), 1ULL);
163 sync.Reset(&cache);
164 }
165 {
166 ThreadClock vector1(10);
167 vector1.tick();
168 ThreadClock vector2(20);
169 vector2.tick();
170 SyncClock sync;
171 vector1.release(&cache, &sync);
172 vector2.release(&cache, &sync);
173 ASSERT_EQ(sync.size(), 21U);
174 ASSERT_EQ(sync.get(0), 0ULL);
175 ASSERT_EQ(sync.get(10), 1ULL);
176 ASSERT_EQ(sync.get(19), 0ULL);
177 ASSERT_EQ(sync.get(20), 1ULL);
178 sync.Reset(&cache);
179 }
180 {
181 ThreadClock vector(100);
182 vector.tick();
183 vector.set(5, 42);
184 vector.set(90, 84);
185 SyncClock sync;
186 vector.release(&cache, &sync);
187 ASSERT_EQ(sync.size(), 101U);
188 ASSERT_EQ(sync.get(0), 0ULL);
189 ASSERT_EQ(sync.get(1), 0ULL);
190 ASSERT_EQ(sync.get(5), 42ULL);
191 ASSERT_EQ(sync.get(60), 0ULL);
192 ASSERT_EQ(sync.get(70), 0ULL);
193 ASSERT_EQ(sync.get(90), 84ULL);
194 ASSERT_EQ(sync.get(99), 0ULL);
195 ASSERT_EQ(sync.get(100), 1ULL);
196 sync.Reset(&cache);
197 }
198 {
199 ThreadClock vector1(10);
200 vector1.tick();
201 ThreadClock vector2(100);
202 vector2.tick();
203 SyncClock sync;
204 vector1.release(&cache, &sync);
205 vector2.release(&cache, &sync);
206 ASSERT_EQ(sync.size(), 101U);
207 ASSERT_EQ(sync.get(0), 0ULL);
208 ASSERT_EQ(sync.get(10), 1ULL);
209 ASSERT_EQ(sync.get(99), 0ULL);
210 ASSERT_EQ(sync.get(100), 1ULL);
211 sync.Reset(&cache);
212 }
213 }
214
215 const uptr kThreads = 4;
216 const uptr kClocks = 4;
217
218 // SimpleSyncClock and SimpleThreadClock implement the same thing as
219 // SyncClock and ThreadClock, but in a very simple way.
220 struct SimpleSyncClock {
221 u64 clock[kThreads];
222 uptr size;
223
SimpleSyncClock__tsan::SimpleSyncClock224 SimpleSyncClock() {
225 Reset();
226 }
227
Reset__tsan::SimpleSyncClock228 void Reset() {
229 size = 0;
230 for (uptr i = 0; i < kThreads; i++)
231 clock[i] = 0;
232 }
233
verify__tsan::SimpleSyncClock234 bool verify(const SyncClock *other) const {
235 for (uptr i = 0; i < min(size, other->size()); i++) {
236 if (clock[i] != other->get(i))
237 return false;
238 }
239 for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
240 if (i < size && clock[i] != 0)
241 return false;
242 if (i < other->size() && other->get(i) != 0)
243 return false;
244 }
245 return true;
246 }
247 };
248
249 struct SimpleThreadClock {
250 u64 clock[kThreads];
251 uptr size;
252 unsigned tid;
253
SimpleThreadClock__tsan::SimpleThreadClock254 explicit SimpleThreadClock(unsigned tid) {
255 this->tid = tid;
256 size = tid + 1;
257 for (uptr i = 0; i < kThreads; i++)
258 clock[i] = 0;
259 }
260
tick__tsan::SimpleThreadClock261 void tick() {
262 clock[tid]++;
263 }
264
acquire__tsan::SimpleThreadClock265 void acquire(const SimpleSyncClock *src) {
266 if (size < src->size)
267 size = src->size;
268 for (uptr i = 0; i < kThreads; i++)
269 clock[i] = max(clock[i], src->clock[i]);
270 }
271
release__tsan::SimpleThreadClock272 void release(SimpleSyncClock *dst) const {
273 if (dst->size < size)
274 dst->size = size;
275 for (uptr i = 0; i < kThreads; i++)
276 dst->clock[i] = max(dst->clock[i], clock[i]);
277 }
278
acq_rel__tsan::SimpleThreadClock279 void acq_rel(SimpleSyncClock *dst) {
280 acquire(dst);
281 release(dst);
282 }
283
ReleaseStore__tsan::SimpleThreadClock284 void ReleaseStore(SimpleSyncClock *dst) const {
285 if (dst->size < size)
286 dst->size = size;
287 for (uptr i = 0; i < kThreads; i++)
288 dst->clock[i] = clock[i];
289 }
290
verify__tsan::SimpleThreadClock291 bool verify(const ThreadClock *other) const {
292 for (uptr i = 0; i < min(size, other->size()); i++) {
293 if (clock[i] != other->get(i))
294 return false;
295 }
296 for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
297 if (i < size && clock[i] != 0)
298 return false;
299 if (i < other->size() && other->get(i) != 0)
300 return false;
301 }
302 return true;
303 }
304 };
305
ClockFuzzer(bool printing)306 static bool ClockFuzzer(bool printing) {
307 // Create kThreads thread clocks.
308 SimpleThreadClock *thr0[kThreads];
309 ThreadClock *thr1[kThreads];
310 unsigned reused[kThreads];
311 for (unsigned i = 0; i < kThreads; i++) {
312 reused[i] = 0;
313 thr0[i] = new SimpleThreadClock(i);
314 thr1[i] = new ThreadClock(i, reused[i]);
315 }
316
317 // Create kClocks sync clocks.
318 SimpleSyncClock *sync0[kClocks];
319 SyncClock *sync1[kClocks];
320 for (unsigned i = 0; i < kClocks; i++) {
321 sync0[i] = new SimpleSyncClock();
322 sync1[i] = new SyncClock();
323 }
324
325 // Do N random operations (acquire, release, etc) and compare results
326 // for SimpleThread/SyncClock and real Thread/SyncClock.
327 for (int i = 0; i < 10000; i++) {
328 unsigned tid = rand() % kThreads;
329 unsigned cid = rand() % kClocks;
330 thr0[tid]->tick();
331 thr1[tid]->tick();
332
333 switch (rand() % 6) {
334 case 0:
335 if (printing)
336 printf("acquire thr%d <- clk%d\n", tid, cid);
337 thr0[tid]->acquire(sync0[cid]);
338 thr1[tid]->acquire(&cache, sync1[cid]);
339 break;
340 case 1:
341 if (printing)
342 printf("release thr%d -> clk%d\n", tid, cid);
343 thr0[tid]->release(sync0[cid]);
344 thr1[tid]->release(&cache, sync1[cid]);
345 break;
346 case 2:
347 if (printing)
348 printf("acq_rel thr%d <> clk%d\n", tid, cid);
349 thr0[tid]->acq_rel(sync0[cid]);
350 thr1[tid]->acq_rel(&cache, sync1[cid]);
351 break;
352 case 3:
353 if (printing)
354 printf("rel_str thr%d >> clk%d\n", tid, cid);
355 thr0[tid]->ReleaseStore(sync0[cid]);
356 thr1[tid]->ReleaseStore(&cache, sync1[cid]);
357 break;
358 case 4:
359 if (printing)
360 printf("reset clk%d\n", cid);
361 sync0[cid]->Reset();
362 sync1[cid]->Reset(&cache);
363 break;
364 case 5:
365 if (printing)
366 printf("reset thr%d\n", tid);
367 u64 epoch = thr0[tid]->clock[tid] + 1;
368 reused[tid]++;
369 delete thr0[tid];
370 thr0[tid] = new SimpleThreadClock(tid);
371 thr0[tid]->clock[tid] = epoch;
372 delete thr1[tid];
373 thr1[tid] = new ThreadClock(tid, reused[tid]);
374 thr1[tid]->set(epoch);
375 break;
376 }
377
378 if (printing) {
379 for (unsigned i = 0; i < kThreads; i++) {
380 printf("thr%d: ", i);
381 thr1[i]->DebugDump(printf);
382 printf("\n");
383 }
384 for (unsigned i = 0; i < kClocks; i++) {
385 printf("clk%d: ", i);
386 sync1[i]->DebugDump(printf);
387 printf("\n");
388 }
389
390 printf("\n");
391 }
392
393 if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) {
394 if (!printing)
395 return false;
396 printf("differs with model:\n");
397 for (unsigned i = 0; i < kThreads; i++) {
398 printf("thr%d: clock=[", i);
399 for (uptr j = 0; j < thr0[i]->size; j++)
400 printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]);
401 printf("]\n");
402 }
403 for (unsigned i = 0; i < kClocks; i++) {
404 printf("clk%d: clock=[", i);
405 for (uptr j = 0; j < sync0[i]->size; j++)
406 printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]);
407 printf("]\n");
408 }
409 return false;
410 }
411 }
412
413 for (unsigned i = 0; i < kClocks; i++) {
414 sync1[i]->Reset(&cache);
415 }
416 return true;
417 }
418
TEST(Clock,Fuzzer)419 TEST(Clock, Fuzzer) {
420 struct timeval tv;
421 gettimeofday(&tv, NULL);
422 int seed = tv.tv_sec + tv.tv_usec;
423 printf("seed=%d\n", seed);
424 srand(seed);
425 if (!ClockFuzzer(false)) {
426 // Redo the test with the same seed, but logging operations.
427 srand(seed);
428 ClockFuzzer(true);
429 ASSERT_TRUE(false);
430 }
431 }
432
433 } // namespace __tsan
434