• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * rwlock7.c
3  *
4  * Hammer on a bunch of rwlocks to test robustness and fairness.
5  * Printed stats should be roughly even for each thread.
6  */
7 
8 #include "test.h"
9 #include <sys/timeb.h>
10 
11 #ifdef __GNUC__
12 #include <stdlib.h>
13 #endif
14 
15 #define THREADS         5
16 #define DATASIZE        7
17 #define ITERATIONS      1000000
18 
19 /*
20  * Keep statistics for each thread.
21  */
22 typedef struct thread_tag {
23   int         thread_num;
24   pthread_t   thread_id;
25   int         updates;
26   int         reads;
27   int         changed;
28   int         seed;
29 } thread_t;
30 
31 /*
32  * Read-write lock and shared data
33  */
34 typedef struct data_tag {
35   pthread_rwlock_t    lock;
36   int                 data;
37   int                 updates;
38 } data_t;
39 
40 static thread_t threads[THREADS];
41 static data_t data[DATASIZE];
42 
43 /*
44  * Thread start routine that uses read-write locks
45  */
thread_routine(void * arg)46 void *thread_routine (void *arg)
47 {
48   thread_t *self = (thread_t*)arg;
49   int iteration;
50   int element = 0;
51   int seed = self->seed;
52   int interval = 1 + rand_r (&seed) % 71;
53 
54   self->changed = 0;
55 
56   for (iteration = 0; iteration < ITERATIONS; iteration++)
57     {
58       if (iteration % (ITERATIONS / 10) == 0)
59         {
60           putchar('.');
61           fflush(stdout);
62         }
63       /*
64        * Each "self->interval" iterations, perform an
65        * update operation (write lock instead of read
66        * lock).
67        */
68       if ((iteration % interval) == 0)
69         {
70           pthread_rwlock_wrlock (&data[element].lock);
71           data[element].data = self->thread_num;
72           data[element].updates++;
73           self->updates++;
74 	  interval = 1 + rand_r (&seed) % 71;
75           pthread_rwlock_unlock (&data[element].lock);
76         } else {
77           /*
78            * Look at the current data element to see whether
79            * the current thread last updated it. Count the
80            * times, to report later.
81            */
82           pthread_rwlock_rdlock (&data[element].lock);
83 
84           self->reads++;
85 
86           if (data[element].data != self->thread_num)
87             {
88               self->changed++;
89 	      interval = 1 + self->changed % 71;
90             }
91 
92           pthread_rwlock_unlock (&data[element].lock);
93         }
94 
95       element = (element + 1) % DATASIZE;
96 
97     }
98 
99   return NULL;
100 }
101 
102 int
main(int argc,char * argv[])103 main (int argc, char *argv[])
104 {
105   int count;
106   int data_count;
107   int thread_updates = 0;
108   int data_updates = 0;
109   int seed = 1;
110 
111   struct _timeb currSysTime1;
112   struct _timeb currSysTime2;
113 
114   /*
115    * Initialize the shared data.
116    */
117   for (data_count = 0; data_count < DATASIZE; data_count++)
118     {
119       data[data_count].data = 0;
120       data[data_count].updates = 0;
121 
122       assert(pthread_rwlock_init (&data[data_count].lock, NULL) == 0);
123     }
124 
125   _ftime(&currSysTime1);
126 
127   /*
128    * Create THREADS threads to access shared data.
129    */
130   for (count = 0; count < THREADS; count++)
131     {
132       threads[count].thread_num = count;
133       threads[count].updates = 0;
134       threads[count].reads = 0;
135       threads[count].seed = 1 + rand_r (&seed) % 71;
136 
137       assert(pthread_create (&threads[count].thread_id,
138                              NULL, thread_routine, (void*)&threads[count]) == 0);
139     }
140 
141   /*
142    * Wait for all threads to complete, and collect
143    * statistics.
144    */
145   for (count = 0; count < THREADS; count++)
146     {
147       assert(pthread_join (threads[count].thread_id, NULL) == 0);
148     }
149 
150   putchar('\n');
151   fflush(stdout);
152 
153   for (count = 0; count < THREADS; count++)
154     {
155       if (threads[count].changed > 0)
156         {
157           printf ("Thread %d found changed elements %d times\n",
158                   count, threads[count].changed);
159         }
160     }
161 
162   putchar('\n');
163   fflush(stdout);
164 
165   for (count = 0; count < THREADS; count++)
166     {
167       thread_updates += threads[count].updates;
168       printf ("%02d: seed %d, updates %d, reads %d\n",
169               count, threads[count].seed,
170               threads[count].updates, threads[count].reads);
171     }
172 
173   putchar('\n');
174   fflush(stdout);
175 
176   /*
177    * Collect statistics for the data.
178    */
179   for (data_count = 0; data_count < DATASIZE; data_count++)
180     {
181       data_updates += data[data_count].updates;
182       printf ("data %02d: value %d, %d updates\n",
183               data_count, data[data_count].data, data[data_count].updates);
184       assert(pthread_rwlock_destroy (&data[data_count].lock) == 0);
185     }
186 
187   printf ("%d thread updates, %d data updates\n",
188           thread_updates, data_updates);
189 
190   _ftime(&currSysTime2);
191 
192   printf( "\nstart: %ld/%d, stop: %ld/%d, duration:%ld\n",
193           currSysTime1.time,currSysTime1.millitm,
194           currSysTime2.time,currSysTime2.millitm,
195           (currSysTime2.time*1000+currSysTime2.millitm) -
196           (currSysTime1.time*1000+currSysTime1.millitm));
197 
198   return 0;
199 }
200