• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Locking in multithreaded situations.
2    Copyright (C) 2005-2012 Free Software Foundation, Inc.
3 
4    This program is free software; you can redistribute it and/or modify
5    it under the terms of the GNU General Public License as published by
6    the Free Software Foundation; either version 3, or (at your option)
7    any later version.
8 
9    This program is distributed in the hope that it will be useful,
10    but WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12    GNU General Public License for more details.
13 
14    You should have received a copy of the GNU General Public License
15    along with this program; if not, see <http://www.gnu.org/licenses/>.  */
16 
17 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
18    Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
19    gthr-win32.h.  */
20 
21 #include <config.h>
22 
23 #include "glthread/lock.h"
24 
25 /* ========================================================================= */
26 
27 #if USE_POSIX_THREADS
28 
29 /* -------------------------- gl_lock_t datatype -------------------------- */
30 
31 /* ------------------------- gl_rwlock_t datatype ------------------------- */
32 
33 # if HAVE_PTHREAD_RWLOCK
34 
35 #  if !defined PTHREAD_RWLOCK_INITIALIZER
36 
37 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)38 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
39 {
40   int err;
41 
42   err = pthread_rwlock_init (&lock->rwlock, NULL);
43   if (err != 0)
44     return err;
45   lock->initialized = 1;
46   return 0;
47 }
48 
49 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)50 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
51 {
52   if (!lock->initialized)
53     {
54       int err;
55 
56       err = pthread_mutex_lock (&lock->guard);
57       if (err != 0)
58         return err;
59       if (!lock->initialized)
60         {
61           err = glthread_rwlock_init_multithreaded (lock);
62           if (err != 0)
63             {
64               pthread_mutex_unlock (&lock->guard);
65               return err;
66             }
67         }
68       err = pthread_mutex_unlock (&lock->guard);
69       if (err != 0)
70         return err;
71     }
72   return pthread_rwlock_rdlock (&lock->rwlock);
73 }
74 
75 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)76 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
77 {
78   if (!lock->initialized)
79     {
80       int err;
81 
82       err = pthread_mutex_lock (&lock->guard);
83       if (err != 0)
84         return err;
85       if (!lock->initialized)
86         {
87           err = glthread_rwlock_init_multithreaded (lock);
88           if (err != 0)
89             {
90               pthread_mutex_unlock (&lock->guard);
91               return err;
92             }
93         }
94       err = pthread_mutex_unlock (&lock->guard);
95       if (err != 0)
96         return err;
97     }
98   return pthread_rwlock_wrlock (&lock->rwlock);
99 }
100 
101 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)102 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
103 {
104   if (!lock->initialized)
105     return EINVAL;
106   return pthread_rwlock_unlock (&lock->rwlock);
107 }
108 
109 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)110 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
111 {
112   int err;
113 
114   if (!lock->initialized)
115     return EINVAL;
116   err = pthread_rwlock_destroy (&lock->rwlock);
117   if (err != 0)
118     return err;
119   lock->initialized = 0;
120   return 0;
121 }
122 
123 #  endif
124 
125 # else
126 
127 int
glthread_rwlock_init_multithreaded(gl_rwlock_t * lock)128 glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
129 {
130   int err;
131 
132   err = pthread_mutex_init (&lock->lock, NULL);
133   if (err != 0)
134     return err;
135   err = pthread_cond_init (&lock->waiting_readers, NULL);
136   if (err != 0)
137     return err;
138   err = pthread_cond_init (&lock->waiting_writers, NULL);
139   if (err != 0)
140     return err;
141   lock->waiting_writers_count = 0;
142   lock->runcount = 0;
143   return 0;
144 }
145 
146 int
glthread_rwlock_rdlock_multithreaded(gl_rwlock_t * lock)147 glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
148 {
149   int err;
150 
151   err = pthread_mutex_lock (&lock->lock);
152   if (err != 0)
153     return err;
154   /* Test whether only readers are currently running, and whether the runcount
155      field will not overflow.  */
156   /* POSIX says: "It is implementation-defined whether the calling thread
157      acquires the lock when a writer does not hold the lock and there are
158      writers blocked on the lock."  Let's say, no: give the writers a higher
159      priority.  */
160   while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
161     {
162       /* This thread has to wait for a while.  Enqueue it among the
163          waiting_readers.  */
164       err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
165       if (err != 0)
166         {
167           pthread_mutex_unlock (&lock->lock);
168           return err;
169         }
170     }
171   lock->runcount++;
172   return pthread_mutex_unlock (&lock->lock);
173 }
174 
175 int
glthread_rwlock_wrlock_multithreaded(gl_rwlock_t * lock)176 glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
177 {
178   int err;
179 
180   err = pthread_mutex_lock (&lock->lock);
181   if (err != 0)
182     return err;
183   /* Test whether no readers or writers are currently running.  */
184   while (!(lock->runcount == 0))
185     {
186       /* This thread has to wait for a while.  Enqueue it among the
187          waiting_writers.  */
188       lock->waiting_writers_count++;
189       err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
190       if (err != 0)
191         {
192           lock->waiting_writers_count--;
193           pthread_mutex_unlock (&lock->lock);
194           return err;
195         }
196       lock->waiting_writers_count--;
197     }
198   lock->runcount--; /* runcount becomes -1 */
199   return pthread_mutex_unlock (&lock->lock);
200 }
201 
202 int
glthread_rwlock_unlock_multithreaded(gl_rwlock_t * lock)203 glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
204 {
205   int err;
206 
207   err = pthread_mutex_lock (&lock->lock);
208   if (err != 0)
209     return err;
210   if (lock->runcount < 0)
211     {
212       /* Drop a writer lock.  */
213       if (!(lock->runcount == -1))
214         {
215           pthread_mutex_unlock (&lock->lock);
216           return EINVAL;
217         }
218       lock->runcount = 0;
219     }
220   else
221     {
222       /* Drop a reader lock.  */
223       if (!(lock->runcount > 0))
224         {
225           pthread_mutex_unlock (&lock->lock);
226           return EINVAL;
227         }
228       lock->runcount--;
229     }
230   if (lock->runcount == 0)
231     {
232       /* POSIX recommends that "write locks shall take precedence over read
233          locks", to avoid "writer starvation".  */
234       if (lock->waiting_writers_count > 0)
235         {
236           /* Wake up one of the waiting writers.  */
237           err = pthread_cond_signal (&lock->waiting_writers);
238           if (err != 0)
239             {
240               pthread_mutex_unlock (&lock->lock);
241               return err;
242             }
243         }
244       else
245         {
246           /* Wake up all waiting readers.  */
247           err = pthread_cond_broadcast (&lock->waiting_readers);
248           if (err != 0)
249             {
250               pthread_mutex_unlock (&lock->lock);
251               return err;
252             }
253         }
254     }
255   return pthread_mutex_unlock (&lock->lock);
256 }
257 
258 int
glthread_rwlock_destroy_multithreaded(gl_rwlock_t * lock)259 glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
260 {
261   int err;
262 
263   err = pthread_mutex_destroy (&lock->lock);
264   if (err != 0)
265     return err;
266   err = pthread_cond_destroy (&lock->waiting_readers);
267   if (err != 0)
268     return err;
269   err = pthread_cond_destroy (&lock->waiting_writers);
270   if (err != 0)
271     return err;
272   return 0;
273 }
274 
275 # endif
276 
277 /* --------------------- gl_recursive_lock_t datatype --------------------- */
278 
279 # if HAVE_PTHREAD_MUTEX_RECURSIVE
280 
281 #  if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
282 
283 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)284 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
285 {
286   pthread_mutexattr_t attributes;
287   int err;
288 
289   err = pthread_mutexattr_init (&attributes);
290   if (err != 0)
291     return err;
292   err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
293   if (err != 0)
294     {
295       pthread_mutexattr_destroy (&attributes);
296       return err;
297     }
298   err = pthread_mutex_init (lock, &attributes);
299   if (err != 0)
300     {
301       pthread_mutexattr_destroy (&attributes);
302       return err;
303     }
304   err = pthread_mutexattr_destroy (&attributes);
305   if (err != 0)
306     return err;
307   return 0;
308 }
309 
310 #  else
311 
312 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)313 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
314 {
315   pthread_mutexattr_t attributes;
316   int err;
317 
318   err = pthread_mutexattr_init (&attributes);
319   if (err != 0)
320     return err;
321   err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
322   if (err != 0)
323     {
324       pthread_mutexattr_destroy (&attributes);
325       return err;
326     }
327   err = pthread_mutex_init (&lock->recmutex, &attributes);
328   if (err != 0)
329     {
330       pthread_mutexattr_destroy (&attributes);
331       return err;
332     }
333   err = pthread_mutexattr_destroy (&attributes);
334   if (err != 0)
335     return err;
336   lock->initialized = 1;
337   return 0;
338 }
339 
340 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)341 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
342 {
343   if (!lock->initialized)
344     {
345       int err;
346 
347       err = pthread_mutex_lock (&lock->guard);
348       if (err != 0)
349         return err;
350       if (!lock->initialized)
351         {
352           err = glthread_recursive_lock_init_multithreaded (lock);
353           if (err != 0)
354             {
355               pthread_mutex_unlock (&lock->guard);
356               return err;
357             }
358         }
359       err = pthread_mutex_unlock (&lock->guard);
360       if (err != 0)
361         return err;
362     }
363   return pthread_mutex_lock (&lock->recmutex);
364 }
365 
366 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)367 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
368 {
369   if (!lock->initialized)
370     return EINVAL;
371   return pthread_mutex_unlock (&lock->recmutex);
372 }
373 
374 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)375 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
376 {
377   int err;
378 
379   if (!lock->initialized)
380     return EINVAL;
381   err = pthread_mutex_destroy (&lock->recmutex);
382   if (err != 0)
383     return err;
384   lock->initialized = 0;
385   return 0;
386 }
387 
388 #  endif
389 
390 # else
391 
392 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)393 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
394 {
395   int err;
396 
397   err = pthread_mutex_init (&lock->mutex, NULL);
398   if (err != 0)
399     return err;
400   lock->owner = (pthread_t) 0;
401   lock->depth = 0;
402   return 0;
403 }
404 
405 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)406 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
407 {
408   pthread_t self = pthread_self ();
409   if (lock->owner != self)
410     {
411       int err;
412 
413       err = pthread_mutex_lock (&lock->mutex);
414       if (err != 0)
415         return err;
416       lock->owner = self;
417     }
418   if (++(lock->depth) == 0) /* wraparound? */
419     {
420       lock->depth--;
421       return EAGAIN;
422     }
423   return 0;
424 }
425 
426 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)427 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
428 {
429   if (lock->owner != pthread_self ())
430     return EPERM;
431   if (lock->depth == 0)
432     return EINVAL;
433   if (--(lock->depth) == 0)
434     {
435       lock->owner = (pthread_t) 0;
436       return pthread_mutex_unlock (&lock->mutex);
437     }
438   else
439     return 0;
440 }
441 
442 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)443 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
444 {
445   if (lock->owner != (pthread_t) 0)
446     return EBUSY;
447   return pthread_mutex_destroy (&lock->mutex);
448 }
449 
450 # endif
451 
452 /* -------------------------- gl_once_t datatype -------------------------- */
453 
454 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
455 
456 int
glthread_once_singlethreaded(pthread_once_t * once_control)457 glthread_once_singlethreaded (pthread_once_t *once_control)
458 {
459   /* We don't know whether pthread_once_t is an integer type, a floating-point
460      type, a pointer type, or a structure type.  */
461   char *firstbyte = (char *)once_control;
462   if (*firstbyte == *(const char *)&fresh_once)
463     {
464       /* First time use of once_control.  Invert the first byte.  */
465       *firstbyte = ~ *(const char *)&fresh_once;
466       return 1;
467     }
468   else
469     return 0;
470 }
471 
472 #endif
473 
474 /* ========================================================================= */
475 
476 #if USE_PTH_THREADS
477 
478 /* Use the GNU Pth threads library.  */
479 
480 /* -------------------------- gl_lock_t datatype -------------------------- */
481 
482 /* ------------------------- gl_rwlock_t datatype ------------------------- */
483 
484 /* --------------------- gl_recursive_lock_t datatype --------------------- */
485 
486 /* -------------------------- gl_once_t datatype -------------------------- */
487 
488 static void
glthread_once_call(void * arg)489 glthread_once_call (void *arg)
490 {
491   void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
492   void (*initfunction) (void) = *gl_once_temp_addr;
493   initfunction ();
494 }
495 
496 int
glthread_once_multithreaded(pth_once_t * once_control,void (* initfunction)(void))497 glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
498 {
499   void (*temp) (void) = initfunction;
500   return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
501 }
502 
503 int
glthread_once_singlethreaded(pth_once_t * once_control)504 glthread_once_singlethreaded (pth_once_t *once_control)
505 {
506   /* We know that pth_once_t is an integer type.  */
507   if (*once_control == PTH_ONCE_INIT)
508     {
509       /* First time use of once_control.  Invert the marker.  */
510       *once_control = ~ PTH_ONCE_INIT;
511       return 1;
512     }
513   else
514     return 0;
515 }
516 
517 #endif
518 
519 /* ========================================================================= */
520 
521 #if USE_SOLARIS_THREADS
522 
523 /* Use the old Solaris threads library.  */
524 
525 /* -------------------------- gl_lock_t datatype -------------------------- */
526 
527 /* ------------------------- gl_rwlock_t datatype ------------------------- */
528 
529 /* --------------------- gl_recursive_lock_t datatype --------------------- */
530 
531 int
glthread_recursive_lock_init_multithreaded(gl_recursive_lock_t * lock)532 glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
533 {
534   int err;
535 
536   err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
537   if (err != 0)
538     return err;
539   lock->owner = (thread_t) 0;
540   lock->depth = 0;
541   return 0;
542 }
543 
544 int
glthread_recursive_lock_lock_multithreaded(gl_recursive_lock_t * lock)545 glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
546 {
547   thread_t self = thr_self ();
548   if (lock->owner != self)
549     {
550       int err;
551 
552       err = mutex_lock (&lock->mutex);
553       if (err != 0)
554         return err;
555       lock->owner = self;
556     }
557   if (++(lock->depth) == 0) /* wraparound? */
558     {
559       lock->depth--;
560       return EAGAIN;
561     }
562   return 0;
563 }
564 
565 int
glthread_recursive_lock_unlock_multithreaded(gl_recursive_lock_t * lock)566 glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
567 {
568   if (lock->owner != thr_self ())
569     return EPERM;
570   if (lock->depth == 0)
571     return EINVAL;
572   if (--(lock->depth) == 0)
573     {
574       lock->owner = (thread_t) 0;
575       return mutex_unlock (&lock->mutex);
576     }
577   else
578     return 0;
579 }
580 
581 int
glthread_recursive_lock_destroy_multithreaded(gl_recursive_lock_t * lock)582 glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
583 {
584   if (lock->owner != (thread_t) 0)
585     return EBUSY;
586   return mutex_destroy (&lock->mutex);
587 }
588 
589 /* -------------------------- gl_once_t datatype -------------------------- */
590 
591 int
glthread_once_multithreaded(gl_once_t * once_control,void (* initfunction)(void))592 glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
593 {
594   if (!once_control->inited)
595     {
596       int err;
597 
598       /* Use the mutex to guarantee that if another thread is already calling
599          the initfunction, this thread waits until it's finished.  */
600       err = mutex_lock (&once_control->mutex);
601       if (err != 0)
602         return err;
603       if (!once_control->inited)
604         {
605           once_control->inited = 1;
606           initfunction ();
607         }
608       return mutex_unlock (&once_control->mutex);
609     }
610   else
611     return 0;
612 }
613 
614 int
glthread_once_singlethreaded(gl_once_t * once_control)615 glthread_once_singlethreaded (gl_once_t *once_control)
616 {
617   /* We know that gl_once_t contains an integer type.  */
618   if (!once_control->inited)
619     {
620       /* First time use of once_control.  Invert the marker.  */
621       once_control->inited = ~ 0;
622       return 1;
623     }
624   else
625     return 0;
626 }
627 
628 #endif
629 
630 /* ========================================================================= */
631 
632 #if USE_WINDOWS_THREADS
633 
634 /* -------------------------- gl_lock_t datatype -------------------------- */
635 
636 void
glthread_lock_init_func(gl_lock_t * lock)637 glthread_lock_init_func (gl_lock_t *lock)
638 {
639   InitializeCriticalSection (&lock->lock);
640   lock->guard.done = 1;
641 }
642 
643 int
glthread_lock_lock_func(gl_lock_t * lock)644 glthread_lock_lock_func (gl_lock_t *lock)
645 {
646   if (!lock->guard.done)
647     {
648       if (InterlockedIncrement (&lock->guard.started) == 0)
649         /* This thread is the first one to need this lock.  Initialize it.  */
650         glthread_lock_init (lock);
651       else
652         /* Yield the CPU while waiting for another thread to finish
653            initializing this lock.  */
654         while (!lock->guard.done)
655           Sleep (0);
656     }
657   EnterCriticalSection (&lock->lock);
658   return 0;
659 }
660 
661 int
glthread_lock_unlock_func(gl_lock_t * lock)662 glthread_lock_unlock_func (gl_lock_t *lock)
663 {
664   if (!lock->guard.done)
665     return EINVAL;
666   LeaveCriticalSection (&lock->lock);
667   return 0;
668 }
669 
670 int
glthread_lock_destroy_func(gl_lock_t * lock)671 glthread_lock_destroy_func (gl_lock_t *lock)
672 {
673   if (!lock->guard.done)
674     return EINVAL;
675   DeleteCriticalSection (&lock->lock);
676   lock->guard.done = 0;
677   return 0;
678 }
679 
680 /* ------------------------- gl_rwlock_t datatype ------------------------- */
681 
682 /* In this file, the waitqueues are implemented as circular arrays.  */
683 #define gl_waitqueue_t gl_carray_waitqueue_t
684 
685 static void
gl_waitqueue_init(gl_waitqueue_t * wq)686 gl_waitqueue_init (gl_waitqueue_t *wq)
687 {
688   wq->array = NULL;
689   wq->count = 0;
690   wq->alloc = 0;
691   wq->offset = 0;
692 }
693 
694 /* Enqueues the current thread, represented by an event, in a wait queue.
695    Returns INVALID_HANDLE_VALUE if an allocation failure occurs.  */
696 static HANDLE
gl_waitqueue_add(gl_waitqueue_t * wq)697 gl_waitqueue_add (gl_waitqueue_t *wq)
698 {
699   HANDLE event;
700   unsigned int index;
701 
702   if (wq->count == wq->alloc)
703     {
704       unsigned int new_alloc = 2 * wq->alloc + 1;
705       HANDLE *new_array =
706         (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
707       if (new_array == NULL)
708         /* No more memory.  */
709         return INVALID_HANDLE_VALUE;
710       /* Now is a good opportunity to rotate the array so that its contents
711          starts at offset 0.  */
712       if (wq->offset > 0)
713         {
714           unsigned int old_count = wq->count;
715           unsigned int old_alloc = wq->alloc;
716           unsigned int old_offset = wq->offset;
717           unsigned int i;
718           if (old_offset + old_count > old_alloc)
719             {
720               unsigned int limit = old_offset + old_count - old_alloc;
721               for (i = 0; i < limit; i++)
722                 new_array[old_alloc + i] = new_array[i];
723             }
724           for (i = 0; i < old_count; i++)
725             new_array[i] = new_array[old_offset + i];
726           wq->offset = 0;
727         }
728       wq->array = new_array;
729       wq->alloc = new_alloc;
730     }
731   /* Whether the created event is a manual-reset one or an auto-reset one,
732      does not matter, since we will wait on it only once.  */
733   event = CreateEvent (NULL, TRUE, FALSE, NULL);
734   if (event == INVALID_HANDLE_VALUE)
735     /* No way to allocate an event.  */
736     return INVALID_HANDLE_VALUE;
737   index = wq->offset + wq->count;
738   if (index >= wq->alloc)
739     index -= wq->alloc;
740   wq->array[index] = event;
741   wq->count++;
742   return event;
743 }
744 
745 /* Notifies the first thread from a wait queue and dequeues it.  */
746 static void
gl_waitqueue_notify_first(gl_waitqueue_t * wq)747 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
748 {
749   SetEvent (wq->array[wq->offset + 0]);
750   wq->offset++;
751   wq->count--;
752   if (wq->count == 0 || wq->offset == wq->alloc)
753     wq->offset = 0;
754 }
755 
756 /* Notifies all threads from a wait queue and dequeues them all.  */
757 static void
gl_waitqueue_notify_all(gl_waitqueue_t * wq)758 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
759 {
760   unsigned int i;
761 
762   for (i = 0; i < wq->count; i++)
763     {
764       unsigned int index = wq->offset + i;
765       if (index >= wq->alloc)
766         index -= wq->alloc;
767       SetEvent (wq->array[index]);
768     }
769   wq->count = 0;
770   wq->offset = 0;
771 }
772 
773 void
glthread_rwlock_init_func(gl_rwlock_t * lock)774 glthread_rwlock_init_func (gl_rwlock_t *lock)
775 {
776   InitializeCriticalSection (&lock->lock);
777   gl_waitqueue_init (&lock->waiting_readers);
778   gl_waitqueue_init (&lock->waiting_writers);
779   lock->runcount = 0;
780   lock->guard.done = 1;
781 }
782 
783 int
glthread_rwlock_rdlock_func(gl_rwlock_t * lock)784 glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
785 {
786   if (!lock->guard.done)
787     {
788       if (InterlockedIncrement (&lock->guard.started) == 0)
789         /* This thread is the first one to need this lock.  Initialize it.  */
790         glthread_rwlock_init (lock);
791       else
792         /* Yield the CPU while waiting for another thread to finish
793            initializing this lock.  */
794         while (!lock->guard.done)
795           Sleep (0);
796     }
797   EnterCriticalSection (&lock->lock);
798   /* Test whether only readers are currently running, and whether the runcount
799      field will not overflow.  */
800   if (!(lock->runcount + 1 > 0))
801     {
802       /* This thread has to wait for a while.  Enqueue it among the
803          waiting_readers.  */
804       HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
805       if (event != INVALID_HANDLE_VALUE)
806         {
807           DWORD result;
808           LeaveCriticalSection (&lock->lock);
809           /* Wait until another thread signals this event.  */
810           result = WaitForSingleObject (event, INFINITE);
811           if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
812             abort ();
813           CloseHandle (event);
814           /* The thread which signalled the event already did the bookkeeping:
815              removed us from the waiting_readers, incremented lock->runcount.  */
816           if (!(lock->runcount > 0))
817             abort ();
818           return 0;
819         }
820       else
821         {
822           /* Allocation failure.  Weird.  */
823           do
824             {
825               LeaveCriticalSection (&lock->lock);
826               Sleep (1);
827               EnterCriticalSection (&lock->lock);
828             }
829           while (!(lock->runcount + 1 > 0));
830         }
831     }
832   lock->runcount++;
833   LeaveCriticalSection (&lock->lock);
834   return 0;
835 }
836 
837 int
glthread_rwlock_wrlock_func(gl_rwlock_t * lock)838 glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
839 {
840   if (!lock->guard.done)
841     {
842       if (InterlockedIncrement (&lock->guard.started) == 0)
843         /* This thread is the first one to need this lock.  Initialize it.  */
844         glthread_rwlock_init (lock);
845       else
846         /* Yield the CPU while waiting for another thread to finish
847            initializing this lock.  */
848         while (!lock->guard.done)
849           Sleep (0);
850     }
851   EnterCriticalSection (&lock->lock);
852   /* Test whether no readers or writers are currently running.  */
853   if (!(lock->runcount == 0))
854     {
855       /* This thread has to wait for a while.  Enqueue it among the
856          waiting_writers.  */
857       HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
858       if (event != INVALID_HANDLE_VALUE)
859         {
860           DWORD result;
861           LeaveCriticalSection (&lock->lock);
862           /* Wait until another thread signals this event.  */
863           result = WaitForSingleObject (event, INFINITE);
864           if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
865             abort ();
866           CloseHandle (event);
867           /* The thread which signalled the event already did the bookkeeping:
868              removed us from the waiting_writers, set lock->runcount = -1.  */
869           if (!(lock->runcount == -1))
870             abort ();
871           return 0;
872         }
873       else
874         {
875           /* Allocation failure.  Weird.  */
876           do
877             {
878               LeaveCriticalSection (&lock->lock);
879               Sleep (1);
880               EnterCriticalSection (&lock->lock);
881             }
882           while (!(lock->runcount == 0));
883         }
884     }
885   lock->runcount--; /* runcount becomes -1 */
886   LeaveCriticalSection (&lock->lock);
887   return 0;
888 }
889 
890 int
glthread_rwlock_unlock_func(gl_rwlock_t * lock)891 glthread_rwlock_unlock_func (gl_rwlock_t *lock)
892 {
893   if (!lock->guard.done)
894     return EINVAL;
895   EnterCriticalSection (&lock->lock);
896   if (lock->runcount < 0)
897     {
898       /* Drop a writer lock.  */
899       if (!(lock->runcount == -1))
900         abort ();
901       lock->runcount = 0;
902     }
903   else
904     {
905       /* Drop a reader lock.  */
906       if (!(lock->runcount > 0))
907         {
908           LeaveCriticalSection (&lock->lock);
909           return EPERM;
910         }
911       lock->runcount--;
912     }
913   if (lock->runcount == 0)
914     {
915       /* POSIX recommends that "write locks shall take precedence over read
916          locks", to avoid "writer starvation".  */
917       if (lock->waiting_writers.count > 0)
918         {
919           /* Wake up one of the waiting writers.  */
920           lock->runcount--;
921           gl_waitqueue_notify_first (&lock->waiting_writers);
922         }
923       else
924         {
925           /* Wake up all waiting readers.  */
926           lock->runcount += lock->waiting_readers.count;
927           gl_waitqueue_notify_all (&lock->waiting_readers);
928         }
929     }
930   LeaveCriticalSection (&lock->lock);
931   return 0;
932 }
933 
934 int
glthread_rwlock_destroy_func(gl_rwlock_t * lock)935 glthread_rwlock_destroy_func (gl_rwlock_t *lock)
936 {
937   if (!lock->guard.done)
938     return EINVAL;
939   if (lock->runcount != 0)
940     return EBUSY;
941   DeleteCriticalSection (&lock->lock);
942   if (lock->waiting_readers.array != NULL)
943     free (lock->waiting_readers.array);
944   if (lock->waiting_writers.array != NULL)
945     free (lock->waiting_writers.array);
946   lock->guard.done = 0;
947   return 0;
948 }
949 
950 /* --------------------- gl_recursive_lock_t datatype --------------------- */
951 
952 void
glthread_recursive_lock_init_func(gl_recursive_lock_t * lock)953 glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
954 {
955   lock->owner = 0;
956   lock->depth = 0;
957   InitializeCriticalSection (&lock->lock);
958   lock->guard.done = 1;
959 }
960 
961 int
glthread_recursive_lock_lock_func(gl_recursive_lock_t * lock)962 glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
963 {
964   if (!lock->guard.done)
965     {
966       if (InterlockedIncrement (&lock->guard.started) == 0)
967         /* This thread is the first one to need this lock.  Initialize it.  */
968         glthread_recursive_lock_init (lock);
969       else
970         /* Yield the CPU while waiting for another thread to finish
971            initializing this lock.  */
972         while (!lock->guard.done)
973           Sleep (0);
974     }
975   {
976     DWORD self = GetCurrentThreadId ();
977     if (lock->owner != self)
978       {
979         EnterCriticalSection (&lock->lock);
980         lock->owner = self;
981       }
982     if (++(lock->depth) == 0) /* wraparound? */
983       {
984         lock->depth--;
985         return EAGAIN;
986       }
987   }
988   return 0;
989 }
990 
991 int
glthread_recursive_lock_unlock_func(gl_recursive_lock_t * lock)992 glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
993 {
994   if (lock->owner != GetCurrentThreadId ())
995     return EPERM;
996   if (lock->depth == 0)
997     return EINVAL;
998   if (--(lock->depth) == 0)
999     {
1000       lock->owner = 0;
1001       LeaveCriticalSection (&lock->lock);
1002     }
1003   return 0;
1004 }
1005 
1006 int
glthread_recursive_lock_destroy_func(gl_recursive_lock_t * lock)1007 glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
1008 {
1009   if (lock->owner != 0)
1010     return EBUSY;
1011   DeleteCriticalSection (&lock->lock);
1012   lock->guard.done = 0;
1013   return 0;
1014 }
1015 
1016 /* -------------------------- gl_once_t datatype -------------------------- */
1017 
1018 void
glthread_once_func(gl_once_t * once_control,void (* initfunction)(void))1019 glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
1020 {
1021   if (once_control->inited <= 0)
1022     {
1023       if (InterlockedIncrement (&once_control->started) == 0)
1024         {
1025           /* This thread is the first one to come to this once_control.  */
1026           InitializeCriticalSection (&once_control->lock);
1027           EnterCriticalSection (&once_control->lock);
1028           once_control->inited = 0;
1029           initfunction ();
1030           once_control->inited = 1;
1031           LeaveCriticalSection (&once_control->lock);
1032         }
1033       else
1034         {
1035           /* Undo last operation.  */
1036           InterlockedDecrement (&once_control->started);
1037           /* Some other thread has already started the initialization.
1038              Yield the CPU while waiting for the other thread to finish
1039              initializing and taking the lock.  */
1040           while (once_control->inited < 0)
1041             Sleep (0);
1042           if (once_control->inited <= 0)
1043             {
1044               /* Take the lock.  This blocks until the other thread has
1045                  finished calling the initfunction.  */
1046               EnterCriticalSection (&once_control->lock);
1047               LeaveCriticalSection (&once_control->lock);
1048               if (!(once_control->inited > 0))
1049                 abort ();
1050             }
1051         }
1052     }
1053 }
1054 
1055 #endif
1056 
1057 /* ========================================================================= */
1058