• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Simple POSIX threads program.
2  *
3  *
4  * --------------------------------------------------------------------------
5  *
6  *      Pthreads-win32 - POSIX Threads Library for Win32
7  *      Copyright(C) 1998 John E. Bossom
8  *      Copyright(C) 1999,2005 Pthreads-win32 contributors
9  *
10  *      Contact Email: rpj@callisto.canberra.edu.au
11  *
12  *      The current list of contributors is contained
13  *      in the file CONTRIBUTORS included with the source
14  *      code distribution. The list can also be seen at the
15  *      following World Wide Web location:
16  *      http://sources.redhat.com/pthreads-win32/contributors.html
17  *
18  *      This library is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU Lesser General Public
20  *      License as published by the Free Software Foundation; either
21  *      version 2 of the License, or (at your option) any later version.
22  *
23  *      This library is distributed in the hope that it will be useful,
24  *      but WITHOUT ANY WARRANTY; without even the implied warranty of
25  *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
26  *      Lesser General Public License for more details.
27  *
28  *      You should have received a copy of the GNU Lesser General Public
29  *      License along with this library in the file COPYING.LIB;
30  *      if not, write to the Free Software Foundation, Inc.,
31  *      59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
32  *
33  * --------------------------------------------------------------------------
34  *
35  * Author: Eyal Lebedinsky eyal@eyal.emu.id.au
36  * Written: Sep 1998.
37  * Version Date: 12 Sep 1998
38  *
39  * Do we need to lock stdout or is it thread safe?
40  *
41  * Used:
42  *	pthread_t
43  *	pthread_attr_t
44  *	pthread_create()
45  *	pthread_join()
46  *	pthread_mutex_t
47  *	PTHREAD_MUTEX_INITIALIZER
48  *	pthread_mutex_init() [not used now]
49  *	pthread_mutex_destroy()
50  *	pthread_mutex_lock()
51  *	pthread_mutex_trylock()
52  *	pthread_mutex_unlock()
53  *
54  * What this program does is establish a work queue (implemented using
55  * four mutexes for each thread). It then schedules work (by storing
56  * a number in 'todo') and releases the threads. When the work is done
57  * the threads will block. The program then repeats the same thing once
58  * more (just to test the logic) and when the work is done it destroyes
59  * the threads.
60  *
61  * The 'work' we do is simply burning CPU cycles in a loop.
62  * The 'todo' work queue is trivial - each threads pops one element
63  * off it by incrementing it, the poped number is the 'work' to do.
64  * When 'todo' reaches the limit (nwork) the queue is considered
65  * empty.
66  *
67  * The number displayed at the end is the amount of work each thread
68  * did, so we can see if the load was properly distributed.
69  *
70  * The program was written to test a threading setup (not seen here)
71  * rather than to demonstrate correct usage of the pthread facilities.
72  *
73  * Note how each thread is given access to a thread control structure
74  * (TC) which is used for communicating to/from the main program (e.g.
75  * the threads knows its 'id' and also filles in the 'work' done).
76 */
77 
78 #include "test.h"
79 
80 #include <stdlib.h>
81 #include <math.h>
82 
83 struct thread_control {
84   int		id;
85   pthread_t	thread;		/* thread id */
86   pthread_mutex_t	mutex_start;
87   pthread_mutex_t	mutex_started;
88   pthread_mutex_t	mutex_end;
89   pthread_mutex_t	mutex_ended;
90   long		work;		/* work done */
91   int		stat;		/* pthread_init status */
92 };
93 
94 typedef struct thread_control	TC;
95 
96 static TC		*tcs = NULL;
97 static int		nthreads = 10;
98 static int		nwork = 100;
99 static int		quiet = 0;
100 
101 static int		todo = -1;
102 
103 static pthread_mutex_t	mutex_todo = PTHREAD_MUTEX_INITIALIZER;
104 static pthread_mutex_t	mutex_stdout = PTHREAD_MUTEX_INITIALIZER;
105 
106 
107 static void
die(int ret)108 die (int ret)
109 {
110   if (NULL != tcs)
111     {
112       free (tcs);
113       tcs = NULL;
114     }
115 
116   if (ret)
117     exit (ret);
118 }
119 
120 
121 static double
waste_time(int n)122 waste_time (int n)
123 {
124   int		i;
125   double	f, g, h, s;
126 
127   s = 0.0;
128 
129   /*
130    * Useless work.
131    */
132   for (i = n*100; i > 0; --i)
133     {
134       f = rand ();
135       g = rand ();
136       h = rand ();
137       s += 2.0 * f * g / (h != 0.0 ? (h * h) : 1.0);
138     }
139   return s;
140 }
141 
142 static int
do_work_unit(int who,int n)143 do_work_unit (int who, int n)
144 {
145   int		i;
146   static int	nchars = 0;
147   double	f = 0.0;
148 
149   if (quiet)
150     i = 0;
151   else {
152     /*
153      * get lock on stdout
154      */
155     assert(pthread_mutex_lock (&mutex_stdout) == 0);
156 
157     /*
158      * do our job
159      */
160     i = printf ("%c", "0123456789abcdefghijklmnopqrstuvwxyz"[who]);
161 
162     if (!(++nchars % 50))
163       printf ("\n");
164 
165     fflush (stdout);
166 
167     /*
168      * release lock on stdout
169      */
170     assert(pthread_mutex_unlock (&mutex_stdout) == 0);
171   }
172 
173   n = rand () % 10000;	/* ignore incoming 'n' */
174   f = waste_time (n);
175 
176   /* This prevents the statement above from being optimised out */
177   if (f > 0.0)
178     return(n);
179 
180   return (n);
181 }
182 
183 static int
print_server(void * ptr)184 print_server (void *ptr)
185 {
186   int		mywork;
187   int		n;
188   TC		*tc = (TC *)ptr;
189 
190   assert(pthread_mutex_lock (&tc->mutex_started) == 0);
191 
192   for (;;)
193     {
194       assert(pthread_mutex_lock (&tc->mutex_start) == 0);
195       assert(pthread_mutex_unlock (&tc->mutex_start) == 0);
196       assert(pthread_mutex_lock (&tc->mutex_ended) == 0);
197       assert(pthread_mutex_unlock (&tc->mutex_started) == 0);
198 
199       for (;;)
200 	{
201 
202 	  /*
203 	   * get lock on todo list
204 	   */
205 	  assert(pthread_mutex_lock (&mutex_todo) == 0);
206 
207 	  mywork = todo;
208 	  if (todo >= 0)
209 	    {
210 	      ++todo;
211 	      if (todo >= nwork)
212 		todo = -1;
213 	    }
214 	  assert(pthread_mutex_unlock (&mutex_todo) == 0);
215 
216 	  if (mywork < 0)
217 	    break;
218 
219 	  assert((n = do_work_unit (tc->id, mywork)) >= 0);
220 	  tc->work += n;
221 	}
222 
223       assert(pthread_mutex_lock (&tc->mutex_end) == 0);
224       assert(pthread_mutex_unlock (&tc->mutex_end) == 0);
225       assert(pthread_mutex_lock (&tc->mutex_started) == 0);
226       assert(pthread_mutex_unlock (&tc->mutex_ended) == 0);
227 
228       if (-2 == mywork)
229 	break;
230     }
231 
232   assert(pthread_mutex_unlock (&tc->mutex_started) == 0);
233 
234   return (0);
235 }
236 
237 static void
dosync(void)238 dosync (void)
239 {
240   int		i;
241 
242   for (i = 0; i < nthreads; ++i)
243     {
244       assert(pthread_mutex_lock (&tcs[i].mutex_end) == 0);
245       assert(pthread_mutex_unlock (&tcs[i].mutex_start) == 0);
246       assert(pthread_mutex_lock (&tcs[i].mutex_started) == 0);
247       assert(pthread_mutex_unlock (&tcs[i].mutex_started) == 0);
248     }
249 
250   /*
251    * Now threads do their work
252    */
253   for (i = 0; i < nthreads; ++i)
254     {
255       assert(pthread_mutex_lock (&tcs[i].mutex_start) == 0);
256       assert(pthread_mutex_unlock (&tcs[i].mutex_end) == 0);
257       assert(pthread_mutex_lock (&tcs[i].mutex_ended) == 0);
258       assert(pthread_mutex_unlock (&tcs[i].mutex_ended) == 0);
259     }
260 }
261 
262 static void
dowork(void)263 dowork (void)
264 {
265   todo = 0;
266   dosync();
267 
268   todo = 0;
269   dosync();
270 }
271 
272 int
main(int argc,char * argv[])273 main (int argc, char *argv[])
274 {
275   int		i;
276 
277   assert(NULL != (tcs = (TC *) calloc (nthreads, sizeof (*tcs))));
278 
279   /*
280    * Launch threads
281    */
282   for (i = 0; i < nthreads; ++i)
283     {
284       tcs[i].id = i;
285 
286       assert(pthread_mutex_init (&tcs[i].mutex_start, NULL) == 0);
287       assert(pthread_mutex_init (&tcs[i].mutex_started, NULL) == 0);
288       assert(pthread_mutex_init (&tcs[i].mutex_end, NULL) == 0);
289       assert(pthread_mutex_init (&tcs[i].mutex_ended, NULL) == 0);
290 
291       tcs[i].work = 0;
292 
293       assert(pthread_mutex_lock (&tcs[i].mutex_start) == 0);
294       assert((tcs[i].stat =
295 	      pthread_create (&tcs[i].thread,
296 			      NULL,
297                   (void *(*)(void *))print_server,
298                 (void *) &tcs[i])
299 	      ) == 0);
300 
301       /*
302        * Wait for thread initialisation
303        */
304       {
305 	int trylock = 0;
306 
307 	while (trylock == 0)
308 	  {
309 	    trylock = pthread_mutex_trylock(&tcs[i].mutex_started);
310 	    assert(trylock == 0 || trylock == EBUSY);
311 
312 	    if (trylock == 0)
313 	      {
314 		assert(pthread_mutex_unlock (&tcs[i].mutex_started) == 0);
315 	      }
316 	  }
317       }
318     }
319 
320   dowork ();
321 
322   /*
323    * Terminate threads
324    */
325   todo = -2;	/* please terminate */
326   dosync();
327 
328   for (i = 0; i < nthreads; ++i)
329     {
330       if (0 == tcs[i].stat)
331 	assert(pthread_join (tcs[i].thread, NULL) == 0);
332     }
333 
334   /*
335    * destroy locks
336    */
337   assert(pthread_mutex_destroy (&mutex_stdout) == 0);
338   assert(pthread_mutex_destroy (&mutex_todo) == 0);
339 
340   /*
341    * Cleanup
342    */
343   printf ("\n");
344 
345   /*
346    * Show results
347    */
348   for (i = 0; i < nthreads; ++i)
349     {
350       printf ("%2d ", i);
351       if (0 == tcs[i].stat)
352 	printf ("%10ld\n", tcs[i].work);
353       else
354 	printf ("failed %d\n", tcs[i].stat);
355 
356       assert(pthread_mutex_unlock(&tcs[i].mutex_start) == 0);
357 
358       assert(pthread_mutex_destroy (&tcs[i].mutex_start) == 0);
359       assert(pthread_mutex_destroy (&tcs[i].mutex_started) == 0);
360       assert(pthread_mutex_destroy (&tcs[i].mutex_end) == 0);
361       assert(pthread_mutex_destroy (&tcs[i].mutex_ended) == 0);
362     }
363 
364   die (0);
365 
366   return (0);
367 }
368