• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * rwlock8.c
3  *
4  * Hammer on a bunch of rwlocks to test robustness and fairness.
5  * Printed stats should be roughly even for each thread.
6  *
7  * Yield during each access to exercise lock contention code paths
8  * more than rwlock7.c does (particularly on uni-processor systems).
9  */
10 
11 #include "test.h"
12 #include <sys/timeb.h>
13 
14 #ifdef __GNUC__
15 #include <stdlib.h>
16 #endif
17 
18 #define THREADS         5
19 #define DATASIZE        7
20 #define ITERATIONS      100000
21 
22 /*
23  * Keep statistics for each thread.
24  */
25 typedef struct thread_tag {
26   int         thread_num;
27   pthread_t   thread_id;
28   int         updates;
29   int         reads;
30   int         changed;
31   int         seed;
32 } thread_t;
33 
34 /*
35  * Read-write lock and shared data
36  */
37 typedef struct data_tag {
38   pthread_rwlock_t    lock;
39   int                 data;
40   int                 updates;
41 } data_t;
42 
43 static thread_t threads[THREADS];
44 static data_t data[DATASIZE];
45 
46 /*
47  * Thread start routine that uses read-write locks
48  */
thread_routine(void * arg)49 void *thread_routine (void *arg)
50 {
51   thread_t *self = (thread_t*)arg;
52   int iteration;
53   int element = 0;
54   int seed = self->seed;
55   int interval = 1 + rand_r (&seed) % 71;
56 
57   self->changed = 0;
58 
59   for (iteration = 0; iteration < ITERATIONS; iteration++)
60     {
61       if (iteration % (ITERATIONS / 10) == 0)
62         {
63           putchar('.');
64           fflush(stdout);
65         }
66       /*
67        * Each "self->interval" iterations, perform an
68        * update operation (write lock instead of read
69        * lock).
70        */
71       if ((iteration % interval) == 0)
72         {
73           pthread_rwlock_wrlock (&data[element].lock);
74           data[element].data = self->thread_num;
75           data[element].updates++;
76           self->updates++;
77 	  interval = 1 + rand_r (&seed) % 71;
78 	  sched_yield();
79           pthread_rwlock_unlock (&data[element].lock);
80         } else {
81           /*
82            * Look at the current data element to see whether
83            * the current thread last updated it. Count the
84            * times, to report later.
85            */
86           pthread_rwlock_rdlock (&data[element].lock);
87 
88           self->reads++;
89 
90           if (data[element].data != self->thread_num)
91             {
92               self->changed++;
93 	      interval = 1 + self->changed % 71;
94             }
95 
96 	  sched_yield();
97 
98           pthread_rwlock_unlock (&data[element].lock);
99         }
100 
101       element = (element + 1) % DATASIZE;
102 
103     }
104 
105   return NULL;
106 }
107 
108 int
main(int argc,char * argv[])109 main (int argc, char *argv[])
110 {
111   int count;
112   int data_count;
113   int thread_updates = 0;
114   int data_updates = 0;
115   int seed = 1;
116 
117   struct _timeb currSysTime1;
118   struct _timeb currSysTime2;
119 
120   //printf ("Skipped (pre-tested)\n");
121   //return 0;
122 
123   /*
124    * Initialize the shared data.
125    */
126   for (data_count = 0; data_count < DATASIZE; data_count++)
127     {
128       data[data_count].data = 0;
129       data[data_count].updates = 0;
130 
131       assert(pthread_rwlock_init (&data[data_count].lock, NULL) == 0);
132     }
133 
134   _ftime(&currSysTime1);
135 
136   /*
137    * Create THREADS threads to access shared data.
138    */
139   for (count = 0; count < THREADS; count++)
140     {
141       threads[count].thread_num = count;
142       threads[count].updates = 0;
143       threads[count].reads = 0;
144       threads[count].seed = 1 + rand_r (&seed) % 71;
145 
146       assert(pthread_create (&threads[count].thread_id,
147                              NULL, thread_routine, (void*)&threads[count]) == 0);
148     }
149 
150   /*
151    * Wait for all threads to complete, and collect
152    * statistics.
153    */
154   for (count = 0; count < THREADS; count++)
155     {
156       assert(pthread_join (threads[count].thread_id, NULL) == 0);
157     }
158 
159   putchar('\n');
160   fflush(stdout);
161 
162   for (count = 0; count < THREADS; count++)
163     {
164       if (threads[count].changed > 0)
165         {
166           printf ("Thread %d found changed elements %d times\n",
167                   count, threads[count].changed);
168         }
169     }
170 
171   putchar('\n');
172   fflush(stdout);
173 
174   for (count = 0; count < THREADS; count++)
175     {
176       thread_updates += threads[count].updates;
177       printf ("%02d: seed %d, updates %d, reads %d\n",
178               count, threads[count].seed,
179               threads[count].updates, threads[count].reads);
180     }
181 
182   putchar('\n');
183   fflush(stdout);
184 
185   /*
186    * Collect statistics for the data.
187    */
188   for (data_count = 0; data_count < DATASIZE; data_count++)
189     {
190       data_updates += data[data_count].updates;
191       printf ("data %02d: value %d, %d updates\n",
192               data_count, data[data_count].data, data[data_count].updates);
193       assert(pthread_rwlock_destroy (&data[data_count].lock) == 0);
194     }
195 
196   printf ("%d thread updates, %d data updates\n",
197           thread_updates, data_updates);
198 
199   _ftime(&currSysTime2);
200 
201   printf( "\nstart: %ld/%d, stop: %ld/%d, duration:%ld\n",
202           currSysTime1.time,currSysTime1.millitm,
203           currSysTime2.time,currSysTime2.millitm,
204           (currSysTime2.time*1000+currSysTime2.millitm) -
205           (currSysTime1.time*1000+currSysTime1.millitm));
206 
207   return 0;
208 }
209