1 // RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s
2
3 #include <pthread.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <time.h>
8 #include <errno.h>
9 #include <vector>
10 #include <algorithm>
11 #include <sys/time.h>
12
13 const int kThreads = 4;
14 const int kMutexes = 16 << 10;
15 const int kIters = 400 << 10;
16 const int kMaxPerThread = 10;
17
18 const int kStateInited = 0;
19 const int kStateNotInited = -1;
20 const int kStateLocked = -2;
21
22 struct Mutex {
23 int state;
24 pthread_rwlock_t m;
25 };
26
27 Mutex mtx[kMutexes];
28
check(int res)29 void check(int res) {
30 if (res != 0) {
31 printf("SOMETHING HAS FAILED\n");
32 exit(1);
33 }
34 }
35
cas(int * a,int oldval,int newval)36 bool cas(int *a, int oldval, int newval) {
37 return __atomic_compare_exchange_n(a, &oldval, newval, false,
38 __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
39 }
40
Thread(void * seed)41 void *Thread(void *seed) {
42 unsigned rnd = (unsigned)(unsigned long)seed;
43 int err;
44 std::vector<int> locked;
45 for (int i = 0; i < kIters; i++) {
46 int what = rand_r(&rnd) % 10;
47 if (what < 4 && locked.size() < kMaxPerThread) {
48 // lock
49 int max_locked = -1;
50 if (!locked.empty()) {
51 max_locked = *std::max_element(locked.begin(), locked.end());
52 if (max_locked == kMutexes - 1) {
53 i--;
54 continue;
55 }
56 }
57 int id = (rand_r(&rnd) % (kMutexes - max_locked - 1)) + max_locked + 1;
58 Mutex *m = &mtx[id];
59 // init the mutex if necessary or acquire a reference
60 for (;;) {
61 int old = __atomic_load_n(&m->state, __ATOMIC_RELAXED);
62 if (old == kStateLocked) {
63 sched_yield();
64 continue;
65 }
66 int newv = old + 1;
67 if (old == kStateNotInited)
68 newv = kStateLocked;
69 if (cas(&m->state, old, newv)) {
70 if (old == kStateNotInited) {
71 if ((err = pthread_rwlock_init(&m->m, 0))) {
72 fprintf(stderr, "pthread_rwlock_init failed with %d\n", err);
73 exit(1);
74 }
75 if (!cas(&m->state, kStateLocked, 1)) {
76 fprintf(stderr, "init commit failed\n");
77 exit(1);
78 }
79 }
80 break;
81 }
82 }
83 // now we have an inited and referenced mutex, choose what to do
84 bool failed = false;
85 switch (rand_r(&rnd) % 4) {
86 case 0:
87 if ((err = pthread_rwlock_wrlock(&m->m))) {
88 fprintf(stderr, "pthread_rwlock_wrlock failed with %d\n", err);
89 exit(1);
90 }
91 break;
92 case 1:
93 if ((err = pthread_rwlock_rdlock(&m->m))) {
94 fprintf(stderr, "pthread_rwlock_rdlock failed with %d\n", err);
95 exit(1);
96 }
97 break;
98 case 2:
99 err = pthread_rwlock_trywrlock(&m->m);
100 if (err != 0 && err != EBUSY) {
101 fprintf(stderr, "pthread_rwlock_trywrlock failed with %d\n", err);
102 exit(1);
103 }
104 failed = err == EBUSY;
105 break;
106 case 3:
107 err = pthread_rwlock_tryrdlock(&m->m);
108 if (err != 0 && err != EBUSY) {
109 fprintf(stderr, "pthread_rwlock_tryrdlock failed with %d\n", err);
110 exit(1);
111 }
112 failed = err == EBUSY;
113 break;
114 }
115 if (failed) {
116 if (__atomic_fetch_sub(&m->state, 1, __ATOMIC_ACQ_REL) <= 0) {
117 fprintf(stderr, "failed to unref after failed trylock\n");
118 exit(1);
119 }
120 continue;
121 }
122 locked.push_back(id);
123 } else if (what < 9 && !locked.empty()) {
124 // unlock
125 int pos = rand_r(&rnd) % locked.size();
126 int id = locked[pos];
127 locked[pos] = locked[locked.size() - 1];
128 locked.pop_back();
129 Mutex *m = &mtx[id];
130 if ((err = pthread_rwlock_unlock(&m->m))) {
131 fprintf(stderr, "pthread_rwlock_unlock failed with %d\n", err);
132 exit(1);
133 }
134 if (__atomic_fetch_sub(&m->state, 1, __ATOMIC_ACQ_REL) <= 0) {
135 fprintf(stderr, "failed to unref after unlock\n");
136 exit(1);
137 }
138 } else {
139 // Destroy a random mutex.
140 int id = rand_r(&rnd) % kMutexes;
141 Mutex *m = &mtx[id];
142 if (!cas(&m->state, kStateInited, kStateLocked)) {
143 i--;
144 continue;
145 }
146 if ((err = pthread_rwlock_destroy(&m->m))) {
147 fprintf(stderr, "pthread_rwlock_destroy failed with %d\n", err);
148 exit(1);
149 }
150 if (!cas(&m->state, kStateLocked, kStateNotInited)) {
151 fprintf(stderr, "destroy commit failed\n");
152 exit(1);
153 }
154 }
155 }
156 // Unlock all previously locked mutexes, otherwise other threads can deadlock.
157 for (int i = 0; i < locked.size(); i++) {
158 int id = locked[i];
159 Mutex *m = &mtx[id];
160 if ((err = pthread_rwlock_unlock(&m->m))) {
161 fprintf(stderr, "pthread_rwlock_unlock failed with %d\n", err);
162 exit(1);
163 }
164 }
165 return 0;
166 }
167
main()168 int main() {
169 struct timeval tv;
170 gettimeofday(&tv, NULL);
171 unsigned s = tv.tv_sec + tv.tv_usec;
172 fprintf(stderr, "seed %d\n", s);
173 srand(s);
174 for (int i = 0; i < kMutexes; i++)
175 mtx[i].state = kStateNotInited;
176 pthread_t t[kThreads];
177 for (int i = 0; i < kThreads; i++)
178 pthread_create(&t[i], 0, Thread, (void*)(unsigned long)rand());
179 for (int i = 0; i < kThreads; i++)
180 pthread_join(t[i], 0);
181 fprintf(stderr, "DONE\n");
182 return 0;
183 }
184
185 // CHECK-NOT: WARNING: ThreadSanitizer
186 // CHECK: DONE
187
188