1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
3 *
4 * gthread.c: posix thread system implementation
5 * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 /*
22 * Modified by the GLib Team and others 1997-2000. See the AUTHORS
23 * file for a list of people on the GLib Team. See the ChangeLog
24 * files for a list of changes. These files are distributed with
25 * GLib at ftp://ftp.gtk.org/pub/gtk/.
26 */
27
28 /* The GMutex, GCond and GPrivate implementations in this file are some
29 * of the lowest-level code in GLib. All other parts of GLib (messages,
30 * memory, slices, etc) assume that they can freely use these facilities
31 * without risking recursion.
32 *
33 * As such, these functions are NOT permitted to call any other part of
34 * GLib.
35 *
36 * The thread manipulation functions (create, exit, join, etc.) have
37 * more freedom -- they can do as they please.
38 */
39
40 #include "config.h"
41
42 #include "gthread.h"
43
44 #include "gthreadprivate.h"
45 #include "gslice.h"
46 #include "gmessages.h"
47 #include "gstrfuncs.h"
48 #include "gmain.h"
49 #include "gutils.h"
50
51 #include <stdlib.h>
52 #include <stdio.h>
53 #include <string.h>
54 #include <errno.h>
55 #include <pthread.h>
56
57 #include <sys/time.h>
58 #include <unistd.h>
59
60 #ifdef HAVE_PTHREAD_SET_NAME_NP
61 #include <pthread_np.h>
62 #endif
63 #ifdef HAVE_SCHED_H
64 #include <sched.h>
65 #endif
66 #ifdef G_OS_WIN32
67 #include <windows.h>
68 #endif
69
70 /* clang defines __ATOMIC_SEQ_CST but doesn't support the GCC extension */
71 #if defined(HAVE_FUTEX) && defined(__ATOMIC_SEQ_CST) && !defined(__clang__)
72 #define USE_NATIVE_MUTEX
73 #endif
74
75 static void
g_thread_abort(gint status,const gchar * function)76 g_thread_abort (gint status,
77 const gchar *function)
78 {
79 fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s. Aborting.\n",
80 function, strerror (status));
81 g_abort ();
82 }
83
84 /* {{{1 GMutex */
85
86 #if !defined(USE_NATIVE_MUTEX)
87
88 static pthread_mutex_t *
g_mutex_impl_new(void)89 g_mutex_impl_new (void)
90 {
91 pthread_mutexattr_t *pattr = NULL;
92 pthread_mutex_t *mutex;
93 gint status;
94 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
95 pthread_mutexattr_t attr;
96 #endif
97
98 mutex = malloc (sizeof (pthread_mutex_t));
99 if G_UNLIKELY (mutex == NULL)
100 g_thread_abort (errno, "malloc");
101
102 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
103 pthread_mutexattr_init (&attr);
104 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
105 pattr = &attr;
106 #endif
107
108 if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
109 g_thread_abort (status, "pthread_mutex_init");
110
111 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
112 pthread_mutexattr_destroy (&attr);
113 #endif
114
115 return mutex;
116 }
117
118 static void
g_mutex_impl_free(pthread_mutex_t * mutex)119 g_mutex_impl_free (pthread_mutex_t *mutex)
120 {
121 pthread_mutex_destroy (mutex);
122 free (mutex);
123 }
124
125 static inline pthread_mutex_t *
g_mutex_get_impl(GMutex * mutex)126 g_mutex_get_impl (GMutex *mutex)
127 {
128 pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
129
130 if G_UNLIKELY (impl == NULL)
131 {
132 impl = g_mutex_impl_new ();
133 if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
134 g_mutex_impl_free (impl);
135 impl = mutex->p;
136 }
137
138 return impl;
139 }
140
141
142 /**
143 * g_mutex_init:
144 * @mutex: an uninitialized #GMutex
145 *
146 * Initializes a #GMutex so that it can be used.
147 *
148 * This function is useful to initialize a mutex that has been
149 * allocated on the stack, or as part of a larger structure.
150 * It is not necessary to initialize a mutex that has been
151 * statically allocated.
152 *
153 * |[<!-- language="C" -->
154 * typedef struct {
155 * GMutex m;
156 * ...
157 * } Blob;
158 *
159 * Blob *b;
160 *
161 * b = g_new (Blob, 1);
162 * g_mutex_init (&b->m);
163 * ]|
164 *
165 * To undo the effect of g_mutex_init() when a mutex is no longer
166 * needed, use g_mutex_clear().
167 *
168 * Calling g_mutex_init() on an already initialized #GMutex leads
169 * to undefined behaviour.
170 *
171 * Since: 2.32
172 */
173 void
g_mutex_init(GMutex * mutex)174 g_mutex_init (GMutex *mutex)
175 {
176 mutex->p = g_mutex_impl_new ();
177 }
178
179 /**
180 * g_mutex_clear:
181 * @mutex: an initialized #GMutex
182 *
183 * Frees the resources allocated to a mutex with g_mutex_init().
184 *
185 * This function should not be used with a #GMutex that has been
186 * statically allocated.
187 *
188 * Calling g_mutex_clear() on a locked mutex leads to undefined
189 * behaviour.
190 *
191 * Sine: 2.32
192 */
193 void
g_mutex_clear(GMutex * mutex)194 g_mutex_clear (GMutex *mutex)
195 {
196 g_mutex_impl_free (mutex->p);
197 }
198
199 /**
200 * g_mutex_lock:
201 * @mutex: a #GMutex
202 *
203 * Locks @mutex. If @mutex is already locked by another thread, the
204 * current thread will block until @mutex is unlocked by the other
205 * thread.
206 *
207 * #GMutex is neither guaranteed to be recursive nor to be
208 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
209 * already been locked by the same thread results in undefined behaviour
210 * (including but not limited to deadlocks).
211 */
212 void
g_mutex_lock(GMutex * mutex)213 g_mutex_lock (GMutex *mutex)
214 {
215 gint status;
216
217 if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
218 g_thread_abort (status, "pthread_mutex_lock");
219 }
220
221 /**
222 * g_mutex_unlock:
223 * @mutex: a #GMutex
224 *
225 * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
226 * call for @mutex, it will become unblocked and can lock @mutex itself.
227 *
228 * Calling g_mutex_unlock() on a mutex that is not locked by the
229 * current thread leads to undefined behaviour.
230 */
231 void
g_mutex_unlock(GMutex * mutex)232 g_mutex_unlock (GMutex *mutex)
233 {
234 gint status;
235
236 if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
237 g_thread_abort (status, "pthread_mutex_unlock");
238 }
239
240 /**
241 * g_mutex_trylock:
242 * @mutex: a #GMutex
243 *
244 * Tries to lock @mutex. If @mutex is already locked by another thread,
245 * it immediately returns %FALSE. Otherwise it locks @mutex and returns
246 * %TRUE.
247 *
248 * #GMutex is neither guaranteed to be recursive nor to be
249 * non-recursive. As such, calling g_mutex_lock() on a #GMutex that has
250 * already been locked by the same thread results in undefined behaviour
251 * (including but not limited to deadlocks or arbitrary return values).
252 *
253 * Returns: %TRUE if @mutex could be locked
254 */
255 gboolean
g_mutex_trylock(GMutex * mutex)256 g_mutex_trylock (GMutex *mutex)
257 {
258 gint status;
259
260 if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
261 return TRUE;
262
263 if G_UNLIKELY (status != EBUSY)
264 g_thread_abort (status, "pthread_mutex_trylock");
265
266 return FALSE;
267 }
268
269 #endif /* !defined(USE_NATIVE_MUTEX) */
270
271 /* {{{1 GRecMutex */
272
273 static pthread_mutex_t *
g_rec_mutex_impl_new(void)274 g_rec_mutex_impl_new (void)
275 {
276 pthread_mutexattr_t attr;
277 pthread_mutex_t *mutex;
278
279 mutex = malloc (sizeof (pthread_mutex_t));
280 if G_UNLIKELY (mutex == NULL)
281 g_thread_abort (errno, "malloc");
282
283 pthread_mutexattr_init (&attr);
284 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
285 pthread_mutex_init (mutex, &attr);
286 pthread_mutexattr_destroy (&attr);
287
288 return mutex;
289 }
290
291 static void
g_rec_mutex_impl_free(pthread_mutex_t * mutex)292 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
293 {
294 pthread_mutex_destroy (mutex);
295 free (mutex);
296 }
297
298 static inline pthread_mutex_t *
g_rec_mutex_get_impl(GRecMutex * rec_mutex)299 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
300 {
301 pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
302
303 if G_UNLIKELY (impl == NULL)
304 {
305 impl = g_rec_mutex_impl_new ();
306 if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
307 g_rec_mutex_impl_free (impl);
308 impl = rec_mutex->p;
309 }
310
311 return impl;
312 }
313
314 /**
315 * g_rec_mutex_init:
316 * @rec_mutex: an uninitialized #GRecMutex
317 *
318 * Initializes a #GRecMutex so that it can be used.
319 *
320 * This function is useful to initialize a recursive mutex
321 * that has been allocated on the stack, or as part of a larger
322 * structure.
323 *
324 * It is not necessary to initialise a recursive mutex that has been
325 * statically allocated.
326 *
327 * |[<!-- language="C" -->
328 * typedef struct {
329 * GRecMutex m;
330 * ...
331 * } Blob;
332 *
333 * Blob *b;
334 *
335 * b = g_new (Blob, 1);
336 * g_rec_mutex_init (&b->m);
337 * ]|
338 *
339 * Calling g_rec_mutex_init() on an already initialized #GRecMutex
340 * leads to undefined behaviour.
341 *
342 * To undo the effect of g_rec_mutex_init() when a recursive mutex
343 * is no longer needed, use g_rec_mutex_clear().
344 *
345 * Since: 2.32
346 */
347 void
g_rec_mutex_init(GRecMutex * rec_mutex)348 g_rec_mutex_init (GRecMutex *rec_mutex)
349 {
350 rec_mutex->p = g_rec_mutex_impl_new ();
351 }
352
353 /**
354 * g_rec_mutex_clear:
355 * @rec_mutex: an initialized #GRecMutex
356 *
357 * Frees the resources allocated to a recursive mutex with
358 * g_rec_mutex_init().
359 *
360 * This function should not be used with a #GRecMutex that has been
361 * statically allocated.
362 *
363 * Calling g_rec_mutex_clear() on a locked recursive mutex leads
364 * to undefined behaviour.
365 *
366 * Sine: 2.32
367 */
368 void
g_rec_mutex_clear(GRecMutex * rec_mutex)369 g_rec_mutex_clear (GRecMutex *rec_mutex)
370 {
371 g_rec_mutex_impl_free (rec_mutex->p);
372 }
373
374 /**
375 * g_rec_mutex_lock:
376 * @rec_mutex: a #GRecMutex
377 *
378 * Locks @rec_mutex. If @rec_mutex is already locked by another
379 * thread, the current thread will block until @rec_mutex is
380 * unlocked by the other thread. If @rec_mutex is already locked
381 * by the current thread, the 'lock count' of @rec_mutex is increased.
382 * The mutex will only become available again when it is unlocked
383 * as many times as it has been locked.
384 *
385 * Since: 2.32
386 */
387 void
g_rec_mutex_lock(GRecMutex * mutex)388 g_rec_mutex_lock (GRecMutex *mutex)
389 {
390 pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
391 }
392
393 /**
394 * g_rec_mutex_unlock:
395 * @rec_mutex: a #GRecMutex
396 *
397 * Unlocks @rec_mutex. If another thread is blocked in a
398 * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
399 * and can lock @rec_mutex itself.
400 *
401 * Calling g_rec_mutex_unlock() on a recursive mutex that is not
402 * locked by the current thread leads to undefined behaviour.
403 *
404 * Since: 2.32
405 */
406 void
g_rec_mutex_unlock(GRecMutex * rec_mutex)407 g_rec_mutex_unlock (GRecMutex *rec_mutex)
408 {
409 pthread_mutex_unlock (rec_mutex->p);
410 }
411
412 /**
413 * g_rec_mutex_trylock:
414 * @rec_mutex: a #GRecMutex
415 *
416 * Tries to lock @rec_mutex. If @rec_mutex is already locked
417 * by another thread, it immediately returns %FALSE. Otherwise
418 * it locks @rec_mutex and returns %TRUE.
419 *
420 * Returns: %TRUE if @rec_mutex could be locked
421 *
422 * Since: 2.32
423 */
424 gboolean
g_rec_mutex_trylock(GRecMutex * rec_mutex)425 g_rec_mutex_trylock (GRecMutex *rec_mutex)
426 {
427 if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
428 return FALSE;
429
430 return TRUE;
431 }
432
433 /* {{{1 GRWLock */
434
435 static pthread_rwlock_t *
g_rw_lock_impl_new(void)436 g_rw_lock_impl_new (void)
437 {
438 pthread_rwlock_t *rwlock;
439 gint status;
440
441 rwlock = malloc (sizeof (pthread_rwlock_t));
442 if G_UNLIKELY (rwlock == NULL)
443 g_thread_abort (errno, "malloc");
444
445 if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
446 g_thread_abort (status, "pthread_rwlock_init");
447
448 return rwlock;
449 }
450
451 static void
g_rw_lock_impl_free(pthread_rwlock_t * rwlock)452 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
453 {
454 pthread_rwlock_destroy (rwlock);
455 free (rwlock);
456 }
457
458 static inline pthread_rwlock_t *
g_rw_lock_get_impl(GRWLock * lock)459 g_rw_lock_get_impl (GRWLock *lock)
460 {
461 pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
462
463 if G_UNLIKELY (impl == NULL)
464 {
465 impl = g_rw_lock_impl_new ();
466 if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
467 g_rw_lock_impl_free (impl);
468 impl = lock->p;
469 }
470
471 return impl;
472 }
473
474 /**
475 * g_rw_lock_init:
476 * @rw_lock: an uninitialized #GRWLock
477 *
478 * Initializes a #GRWLock so that it can be used.
479 *
480 * This function is useful to initialize a lock that has been
481 * allocated on the stack, or as part of a larger structure. It is not
482 * necessary to initialise a reader-writer lock that has been statically
483 * allocated.
484 *
485 * |[<!-- language="C" -->
486 * typedef struct {
487 * GRWLock l;
488 * ...
489 * } Blob;
490 *
491 * Blob *b;
492 *
493 * b = g_new (Blob, 1);
494 * g_rw_lock_init (&b->l);
495 * ]|
496 *
497 * To undo the effect of g_rw_lock_init() when a lock is no longer
498 * needed, use g_rw_lock_clear().
499 *
500 * Calling g_rw_lock_init() on an already initialized #GRWLock leads
501 * to undefined behaviour.
502 *
503 * Since: 2.32
504 */
505 void
g_rw_lock_init(GRWLock * rw_lock)506 g_rw_lock_init (GRWLock *rw_lock)
507 {
508 rw_lock->p = g_rw_lock_impl_new ();
509 }
510
511 /**
512 * g_rw_lock_clear:
513 * @rw_lock: an initialized #GRWLock
514 *
515 * Frees the resources allocated to a lock with g_rw_lock_init().
516 *
517 * This function should not be used with a #GRWLock that has been
518 * statically allocated.
519 *
520 * Calling g_rw_lock_clear() when any thread holds the lock
521 * leads to undefined behaviour.
522 *
523 * Sine: 2.32
524 */
525 void
g_rw_lock_clear(GRWLock * rw_lock)526 g_rw_lock_clear (GRWLock *rw_lock)
527 {
528 g_rw_lock_impl_free (rw_lock->p);
529 }
530
531 /**
532 * g_rw_lock_writer_lock:
533 * @rw_lock: a #GRWLock
534 *
535 * Obtain a write lock on @rw_lock. If any thread already holds
536 * a read or write lock on @rw_lock, the current thread will block
537 * until all other threads have dropped their locks on @rw_lock.
538 *
539 * Since: 2.32
540 */
541 void
g_rw_lock_writer_lock(GRWLock * rw_lock)542 g_rw_lock_writer_lock (GRWLock *rw_lock)
543 {
544 int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
545
546 if (retval != 0)
547 g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
548 }
549
550 /**
551 * g_rw_lock_writer_trylock:
552 * @rw_lock: a #GRWLock
553 *
554 * Tries to obtain a write lock on @rw_lock. If any other thread holds
555 * a read or write lock on @rw_lock, it immediately returns %FALSE.
556 * Otherwise it locks @rw_lock and returns %TRUE.
557 *
558 * Returns: %TRUE if @rw_lock could be locked
559 *
560 * Since: 2.32
561 */
562 gboolean
g_rw_lock_writer_trylock(GRWLock * rw_lock)563 g_rw_lock_writer_trylock (GRWLock *rw_lock)
564 {
565 if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
566 return FALSE;
567
568 return TRUE;
569 }
570
571 /**
572 * g_rw_lock_writer_unlock:
573 * @rw_lock: a #GRWLock
574 *
575 * Release a write lock on @rw_lock.
576 *
577 * Calling g_rw_lock_writer_unlock() on a lock that is not held
578 * by the current thread leads to undefined behaviour.
579 *
580 * Since: 2.32
581 */
582 void
g_rw_lock_writer_unlock(GRWLock * rw_lock)583 g_rw_lock_writer_unlock (GRWLock *rw_lock)
584 {
585 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
586 }
587
588 /**
589 * g_rw_lock_reader_lock:
590 * @rw_lock: a #GRWLock
591 *
592 * Obtain a read lock on @rw_lock. If another thread currently holds
593 * the write lock on @rw_lock, the current thread will block. If another thread
594 * does not hold the write lock, but is waiting for it, it is implementation
595 * defined whether the reader or writer will block. Read locks can be taken
596 * recursively.
597 *
598 * It is implementation-defined how many threads are allowed to
599 * hold read locks on the same lock simultaneously. If the limit is hit,
600 * or if a deadlock is detected, a critical warning will be emitted.
601 *
602 * Since: 2.32
603 */
604 void
g_rw_lock_reader_lock(GRWLock * rw_lock)605 g_rw_lock_reader_lock (GRWLock *rw_lock)
606 {
607 int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
608
609 if (retval != 0)
610 g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
611 }
612
613 /**
614 * g_rw_lock_reader_trylock:
615 * @rw_lock: a #GRWLock
616 *
617 * Tries to obtain a read lock on @rw_lock and returns %TRUE if
618 * the read lock was successfully obtained. Otherwise it
619 * returns %FALSE.
620 *
621 * Returns: %TRUE if @rw_lock could be locked
622 *
623 * Since: 2.32
624 */
625 gboolean
g_rw_lock_reader_trylock(GRWLock * rw_lock)626 g_rw_lock_reader_trylock (GRWLock *rw_lock)
627 {
628 if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
629 return FALSE;
630
631 return TRUE;
632 }
633
634 /**
635 * g_rw_lock_reader_unlock:
636 * @rw_lock: a #GRWLock
637 *
638 * Release a read lock on @rw_lock.
639 *
640 * Calling g_rw_lock_reader_unlock() on a lock that is not held
641 * by the current thread leads to undefined behaviour.
642 *
643 * Since: 2.32
644 */
645 void
g_rw_lock_reader_unlock(GRWLock * rw_lock)646 g_rw_lock_reader_unlock (GRWLock *rw_lock)
647 {
648 pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
649 }
650
651 /* {{{1 GCond */
652
653 #if !defined(USE_NATIVE_MUTEX)
654
655 static pthread_cond_t *
g_cond_impl_new(void)656 g_cond_impl_new (void)
657 {
658 pthread_condattr_t attr;
659 pthread_cond_t *cond;
660 gint status;
661
662 pthread_condattr_init (&attr);
663
664 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
665 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
666 if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
667 g_thread_abort (status, "pthread_condattr_setclock");
668 #else
669 #error Cannot support GCond on your platform.
670 #endif
671
672 cond = malloc (sizeof (pthread_cond_t));
673 if G_UNLIKELY (cond == NULL)
674 g_thread_abort (errno, "malloc");
675
676 if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
677 g_thread_abort (status, "pthread_cond_init");
678
679 pthread_condattr_destroy (&attr);
680
681 return cond;
682 }
683
684 static void
g_cond_impl_free(pthread_cond_t * cond)685 g_cond_impl_free (pthread_cond_t *cond)
686 {
687 pthread_cond_destroy (cond);
688 free (cond);
689 }
690
691 static inline pthread_cond_t *
g_cond_get_impl(GCond * cond)692 g_cond_get_impl (GCond *cond)
693 {
694 pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
695
696 if G_UNLIKELY (impl == NULL)
697 {
698 impl = g_cond_impl_new ();
699 if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
700 g_cond_impl_free (impl);
701 impl = cond->p;
702 }
703
704 return impl;
705 }
706
707 /**
708 * g_cond_init:
709 * @cond: an uninitialized #GCond
710 *
711 * Initialises a #GCond so that it can be used.
712 *
713 * This function is useful to initialise a #GCond that has been
714 * allocated as part of a larger structure. It is not necessary to
715 * initialise a #GCond that has been statically allocated.
716 *
717 * To undo the effect of g_cond_init() when a #GCond is no longer
718 * needed, use g_cond_clear().
719 *
720 * Calling g_cond_init() on an already-initialised #GCond leads
721 * to undefined behaviour.
722 *
723 * Since: 2.32
724 */
725 void
g_cond_init(GCond * cond)726 g_cond_init (GCond *cond)
727 {
728 cond->p = g_cond_impl_new ();
729 }
730
731 /**
732 * g_cond_clear:
733 * @cond: an initialised #GCond
734 *
735 * Frees the resources allocated to a #GCond with g_cond_init().
736 *
737 * This function should not be used with a #GCond that has been
738 * statically allocated.
739 *
740 * Calling g_cond_clear() for a #GCond on which threads are
741 * blocking leads to undefined behaviour.
742 *
743 * Since: 2.32
744 */
745 void
g_cond_clear(GCond * cond)746 g_cond_clear (GCond *cond)
747 {
748 g_cond_impl_free (cond->p);
749 }
750
751 /**
752 * g_cond_wait:
753 * @cond: a #GCond
754 * @mutex: a #GMutex that is currently locked
755 *
756 * Atomically releases @mutex and waits until @cond is signalled.
757 * When this function returns, @mutex is locked again and owned by the
758 * calling thread.
759 *
760 * When using condition variables, it is possible that a spurious wakeup
761 * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
762 * not called). It's also possible that a stolen wakeup may occur.
763 * This is when g_cond_signal() is called, but another thread acquires
764 * @mutex before this thread and modifies the state of the program in
765 * such a way that when g_cond_wait() is able to return, the expected
766 * condition is no longer met.
767 *
768 * For this reason, g_cond_wait() must always be used in a loop. See
769 * the documentation for #GCond for a complete example.
770 **/
771 void
g_cond_wait(GCond * cond,GMutex * mutex)772 g_cond_wait (GCond *cond,
773 GMutex *mutex)
774 {
775 gint status;
776
777 if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
778 g_thread_abort (status, "pthread_cond_wait");
779 }
780
781 /**
782 * g_cond_signal:
783 * @cond: a #GCond
784 *
785 * If threads are waiting for @cond, at least one of them is unblocked.
786 * If no threads are waiting for @cond, this function has no effect.
787 * It is good practice to hold the same lock as the waiting thread
788 * while calling this function, though not required.
789 */
790 void
g_cond_signal(GCond * cond)791 g_cond_signal (GCond *cond)
792 {
793 gint status;
794
795 if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
796 g_thread_abort (status, "pthread_cond_signal");
797 }
798
799 /**
800 * g_cond_broadcast:
801 * @cond: a #GCond
802 *
803 * If threads are waiting for @cond, all of them are unblocked.
804 * If no threads are waiting for @cond, this function has no effect.
805 * It is good practice to lock the same mutex as the waiting threads
806 * while calling this function, though not required.
807 */
808 void
g_cond_broadcast(GCond * cond)809 g_cond_broadcast (GCond *cond)
810 {
811 gint status;
812
813 if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
814 g_thread_abort (status, "pthread_cond_broadcast");
815 }
816
817 /**
818 * g_cond_wait_until:
819 * @cond: a #GCond
820 * @mutex: a #GMutex that is currently locked
821 * @end_time: the monotonic time to wait until
822 *
823 * Waits until either @cond is signalled or @end_time has passed.
824 *
825 * As with g_cond_wait() it is possible that a spurious or stolen wakeup
826 * could occur. For that reason, waiting on a condition variable should
827 * always be in a loop, based on an explicitly-checked predicate.
828 *
829 * %TRUE is returned if the condition variable was signalled (or in the
830 * case of a spurious wakeup). %FALSE is returned if @end_time has
831 * passed.
832 *
833 * The following code shows how to correctly perform a timed wait on a
834 * condition variable (extending the example presented in the
835 * documentation for #GCond):
836 *
837 * |[<!-- language="C" -->
838 * gpointer
839 * pop_data_timed (void)
840 * {
841 * gint64 end_time;
842 * gpointer data;
843 *
844 * g_mutex_lock (&data_mutex);
845 *
846 * end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
847 * while (!current_data)
848 * if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
849 * {
850 * // timeout has passed.
851 * g_mutex_unlock (&data_mutex);
852 * return NULL;
853 * }
854 *
855 * // there is data for us
856 * data = current_data;
857 * current_data = NULL;
858 *
859 * g_mutex_unlock (&data_mutex);
860 *
861 * return data;
862 * }
863 * ]|
864 *
865 * Notice that the end time is calculated once, before entering the
866 * loop and reused. This is the motivation behind the use of absolute
867 * time on this API -- if a relative time of 5 seconds were passed
868 * directly to the call and a spurious wakeup occurred, the program would
869 * have to start over waiting again (which would lead to a total wait
870 * time of more than 5 seconds).
871 *
872 * Returns: %TRUE on a signal, %FALSE on a timeout
873 * Since: 2.32
874 **/
875 gboolean
g_cond_wait_until(GCond * cond,GMutex * mutex,gint64 end_time)876 g_cond_wait_until (GCond *cond,
877 GMutex *mutex,
878 gint64 end_time)
879 {
880 struct timespec ts;
881 gint status;
882
883 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
884 /* end_time is given relative to the monotonic clock as returned by
885 * g_get_monotonic_time().
886 *
887 * Since this pthreads wants the relative time, convert it back again.
888 */
889 {
890 gint64 now = g_get_monotonic_time ();
891 gint64 relative;
892
893 if (end_time <= now)
894 return FALSE;
895
896 relative = end_time - now;
897
898 ts.tv_sec = relative / 1000000;
899 ts.tv_nsec = (relative % 1000000) * 1000;
900
901 if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
902 return TRUE;
903 }
904 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
905 /* This is the exact check we used during init to set the clock to
906 * monotonic, so if we're in this branch, timedwait() will already be
907 * expecting a monotonic clock.
908 */
909 {
910 ts.tv_sec = end_time / 1000000;
911 ts.tv_nsec = (end_time % 1000000) * 1000;
912
913 if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
914 return TRUE;
915 }
916 #else
917 #error Cannot support GCond on your platform.
918 #endif
919
920 if G_UNLIKELY (status != ETIMEDOUT)
921 g_thread_abort (status, "pthread_cond_timedwait");
922
923 return FALSE;
924 }
925
926 #endif /* defined(USE_NATIVE_MUTEX) */
927
928 /* {{{1 GPrivate */
929
930 /**
931 * GPrivate:
932 *
933 * The #GPrivate struct is an opaque data structure to represent a
934 * thread-local data key. It is approximately equivalent to the
935 * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
936 * TlsSetValue()/TlsGetValue() on Windows.
937 *
938 * If you don't already know why you might want this functionality,
939 * then you probably don't need it.
940 *
941 * #GPrivate is a very limited resource (as far as 128 per program,
942 * shared between all libraries). It is also not possible to destroy a
943 * #GPrivate after it has been used. As such, it is only ever acceptable
944 * to use #GPrivate in static scope, and even then sparingly so.
945 *
946 * See G_PRIVATE_INIT() for a couple of examples.
947 *
948 * The #GPrivate structure should be considered opaque. It should only
949 * be accessed via the g_private_ functions.
950 */
951
952 /**
953 * G_PRIVATE_INIT:
954 * @notify: a #GDestroyNotify
955 *
956 * A macro to assist with the static initialisation of a #GPrivate.
957 *
958 * This macro is useful for the case that a #GDestroyNotify function
959 * should be associated with the key. This is needed when the key will be
960 * used to point at memory that should be deallocated when the thread
961 * exits.
962 *
963 * Additionally, the #GDestroyNotify will also be called on the previous
964 * value stored in the key when g_private_replace() is used.
965 *
966 * If no #GDestroyNotify is needed, then use of this macro is not
967 * required -- if the #GPrivate is declared in static scope then it will
968 * be properly initialised by default (ie: to all zeros). See the
969 * examples below.
970 *
971 * |[<!-- language="C" -->
972 * static GPrivate name_key = G_PRIVATE_INIT (g_free);
973 *
974 * // return value should not be freed
975 * const gchar *
976 * get_local_name (void)
977 * {
978 * return g_private_get (&name_key);
979 * }
980 *
981 * void
982 * set_local_name (const gchar *name)
983 * {
984 * g_private_replace (&name_key, g_strdup (name));
985 * }
986 *
987 *
988 * static GPrivate count_key; // no free function
989 *
990 * gint
991 * get_local_count (void)
992 * {
993 * return GPOINTER_TO_INT (g_private_get (&count_key));
994 * }
995 *
996 * void
997 * set_local_count (gint count)
998 * {
999 * g_private_set (&count_key, GINT_TO_POINTER (count));
1000 * }
1001 * ]|
1002 *
1003 * Since: 2.32
1004 **/
1005
1006 static pthread_key_t *
g_private_impl_new(GDestroyNotify notify)1007 g_private_impl_new (GDestroyNotify notify)
1008 {
1009 pthread_key_t *key;
1010 gint status;
1011
1012 key = malloc (sizeof (pthread_key_t));
1013 if G_UNLIKELY (key == NULL)
1014 g_thread_abort (errno, "malloc");
1015 status = pthread_key_create (key, notify);
1016 if G_UNLIKELY (status != 0)
1017 g_thread_abort (status, "pthread_key_create");
1018
1019 return key;
1020 }
1021
1022 static void
g_private_impl_free(pthread_key_t * key)1023 g_private_impl_free (pthread_key_t *key)
1024 {
1025 gint status;
1026
1027 status = pthread_key_delete (*key);
1028 if G_UNLIKELY (status != 0)
1029 g_thread_abort (status, "pthread_key_delete");
1030 free (key);
1031 }
1032
1033 static inline pthread_key_t *
g_private_get_impl(GPrivate * key)1034 g_private_get_impl (GPrivate *key)
1035 {
1036 pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1037
1038 if G_UNLIKELY (impl == NULL)
1039 {
1040 impl = g_private_impl_new (key->notify);
1041 if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1042 {
1043 g_private_impl_free (impl);
1044 impl = key->p;
1045 }
1046 }
1047
1048 return impl;
1049 }
1050
1051 /**
1052 * g_private_get:
1053 * @key: a #GPrivate
1054 *
1055 * Returns the current value of the thread local variable @key.
1056 *
1057 * If the value has not yet been set in this thread, %NULL is returned.
1058 * Values are never copied between threads (when a new thread is
1059 * created, for example).
1060 *
1061 * Returns: the thread-local value
1062 */
1063 gpointer
g_private_get(GPrivate * key)1064 g_private_get (GPrivate *key)
1065 {
1066 /* quote POSIX: No errors are returned from pthread_getspecific(). */
1067 return pthread_getspecific (*g_private_get_impl (key));
1068 }
1069
1070 /**
1071 * g_private_set:
1072 * @key: a #GPrivate
1073 * @value: the new value
1074 *
1075 * Sets the thread local variable @key to have the value @value in the
1076 * current thread.
1077 *
1078 * This function differs from g_private_replace() in the following way:
1079 * the #GDestroyNotify for @key is not called on the old value.
1080 */
1081 void
g_private_set(GPrivate * key,gpointer value)1082 g_private_set (GPrivate *key,
1083 gpointer value)
1084 {
1085 gint status;
1086
1087 if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1088 g_thread_abort (status, "pthread_setspecific");
1089 }
1090
1091 /**
1092 * g_private_replace:
1093 * @key: a #GPrivate
1094 * @value: the new value
1095 *
1096 * Sets the thread local variable @key to have the value @value in the
1097 * current thread.
1098 *
1099 * This function differs from g_private_set() in the following way: if
1100 * the previous value was non-%NULL then the #GDestroyNotify handler for
1101 * @key is run on it.
1102 *
1103 * Since: 2.32
1104 **/
1105 void
g_private_replace(GPrivate * key,gpointer value)1106 g_private_replace (GPrivate *key,
1107 gpointer value)
1108 {
1109 pthread_key_t *impl = g_private_get_impl (key);
1110 gpointer old;
1111 gint status;
1112
1113 old = pthread_getspecific (*impl);
1114 if (old && key->notify)
1115 key->notify (old);
1116
1117 if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1118 g_thread_abort (status, "pthread_setspecific");
1119 }
1120
1121 /* {{{1 GThread */
1122
1123 #define posix_check_err(err, name) G_STMT_START{ \
1124 int error = (err); \
1125 if (error) \
1126 g_error ("file %s: line %d (%s): error '%s' during '%s'", \
1127 __FILE__, __LINE__, G_STRFUNC, \
1128 g_strerror (error), name); \
1129 }G_STMT_END
1130
1131 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1132
1133 typedef struct
1134 {
1135 GRealThread thread;
1136
1137 pthread_t system_thread;
1138 gboolean joined;
1139 GMutex lock;
1140 } GThreadPosix;
1141
1142 void
g_system_thread_free(GRealThread * thread)1143 g_system_thread_free (GRealThread *thread)
1144 {
1145 GThreadPosix *pt = (GThreadPosix *) thread;
1146
1147 if (!pt->joined)
1148 pthread_detach (pt->system_thread);
1149
1150 g_mutex_clear (&pt->lock);
1151
1152 g_slice_free (GThreadPosix, pt);
1153 }
1154
1155 GRealThread *
g_system_thread_new(GThreadFunc proxy,gulong stack_size,const char * name,GThreadFunc func,gpointer data,GError ** error)1156 g_system_thread_new (GThreadFunc proxy,
1157 gulong stack_size,
1158 const char *name,
1159 GThreadFunc func,
1160 gpointer data,
1161 GError **error)
1162 {
1163 GThreadPosix *thread;
1164 GRealThread *base_thread;
1165 pthread_attr_t attr;
1166 gint ret;
1167
1168 thread = g_slice_new0 (GThreadPosix);
1169 base_thread = (GRealThread*)thread;
1170 base_thread->ref_count = 2;
1171 base_thread->ours = TRUE;
1172 base_thread->thread.joinable = TRUE;
1173 base_thread->thread.func = func;
1174 base_thread->thread.data = data;
1175 base_thread->name = g_strdup (name);
1176
1177 posix_check_cmd (pthread_attr_init (&attr));
1178
1179 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1180 if (stack_size)
1181 {
1182 #ifdef _SC_THREAD_STACK_MIN
1183 long min_stack_size = sysconf (_SC_THREAD_STACK_MIN);
1184 if (min_stack_size >= 0)
1185 stack_size = MAX ((gulong) min_stack_size, stack_size);
1186 #endif /* _SC_THREAD_STACK_MIN */
1187 /* No error check here, because some systems can't do it and
1188 * we simply don't want threads to fail because of that. */
1189 pthread_attr_setstacksize (&attr, stack_size);
1190 }
1191 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1192
1193 ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread);
1194
1195 posix_check_cmd (pthread_attr_destroy (&attr));
1196
1197 if (ret == EAGAIN)
1198 {
1199 g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1200 "Error creating thread: %s", g_strerror (ret));
1201 g_slice_free (GThreadPosix, thread);
1202 return NULL;
1203 }
1204
1205 posix_check_err (ret, "pthread_create");
1206
1207 g_mutex_init (&thread->lock);
1208
1209 return (GRealThread *) thread;
1210 }
1211
1212 /**
1213 * g_thread_yield:
1214 *
1215 * Causes the calling thread to voluntarily relinquish the CPU, so
1216 * that other threads can run.
1217 *
1218 * This function is often used as a method to make busy wait less evil.
1219 */
1220 void
g_thread_yield(void)1221 g_thread_yield (void)
1222 {
1223 sched_yield ();
1224 }
1225
1226 void
g_system_thread_wait(GRealThread * thread)1227 g_system_thread_wait (GRealThread *thread)
1228 {
1229 GThreadPosix *pt = (GThreadPosix *) thread;
1230
1231 g_mutex_lock (&pt->lock);
1232
1233 if (!pt->joined)
1234 {
1235 posix_check_cmd (pthread_join (pt->system_thread, NULL));
1236 pt->joined = TRUE;
1237 }
1238
1239 g_mutex_unlock (&pt->lock);
1240 }
1241
1242 void
g_system_thread_exit(void)1243 g_system_thread_exit (void)
1244 {
1245 pthread_exit (NULL);
1246 }
1247
1248 void
g_system_thread_set_name(const gchar * name)1249 g_system_thread_set_name (const gchar *name)
1250 {
1251 #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1252 pthread_setname_np (name); /* on OS X and iOS */
1253 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
1254 pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */
1255 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG)
1256 pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */
1257 #elif defined(HAVE_PTHREAD_SET_NAME_NP)
1258 pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */
1259 #endif
1260 }
1261
1262 /* {{{1 GMutex and GCond futex implementation */
1263
1264 #if defined(USE_NATIVE_MUTEX)
1265
1266 #include <linux/futex.h>
1267 #include <sys/syscall.h>
1268
1269 #ifndef FUTEX_WAIT_PRIVATE
1270 #define FUTEX_WAIT_PRIVATE FUTEX_WAIT
1271 #define FUTEX_WAKE_PRIVATE FUTEX_WAKE
1272 #endif
1273
1274 /* We should expand the set of operations available in gatomic once we
1275 * have better C11 support in GCC in common distributions (ie: 4.9).
1276 *
1277 * Before then, let's define a couple of useful things for our own
1278 * purposes...
1279 */
1280
1281 #define exchange_acquire(ptr, new) \
1282 __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1283 #define compare_exchange_acquire(ptr, old, new) \
1284 __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1285
1286 #define exchange_release(ptr, new) \
1287 __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1288 #define store_release(ptr, new) \
1289 __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1290
1291 /* Our strategy for the mutex is pretty simple:
1292 *
1293 * 0: not in use
1294 *
1295 * 1: acquired by one thread only, no contention
1296 *
1297 * > 1: contended
1298 *
1299 *
1300 * As such, attempting to acquire the lock should involve an increment.
1301 * If we find that the previous value was 0 then we can return
1302 * immediately.
1303 *
1304 * On unlock, we always store 0 to indicate that the lock is available.
1305 * If the value there was 1 before then we didn't have contention and
1306 * can return immediately. If the value was something other than 1 then
1307 * we have the contended case and need to wake a waiter.
1308 *
1309 * If it was not 0 then there is another thread holding it and we must
1310 * wait. We must always ensure that we mark a value >1 while we are
1311 * waiting in order to instruct the holder to do a wake operation on
1312 * unlock.
1313 */
1314
1315 void
g_mutex_init(GMutex * mutex)1316 g_mutex_init (GMutex *mutex)
1317 {
1318 mutex->i[0] = 0;
1319 }
1320
1321 void
g_mutex_clear(GMutex * mutex)1322 g_mutex_clear (GMutex *mutex)
1323 {
1324 if G_UNLIKELY (mutex->i[0] != 0)
1325 {
1326 fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n");
1327 g_abort ();
1328 }
1329 }
1330
1331 static void __attribute__((noinline))
g_mutex_lock_slowpath(GMutex * mutex)1332 g_mutex_lock_slowpath (GMutex *mutex)
1333 {
1334 /* Set to 2 to indicate contention. If it was zero before then we
1335 * just acquired the lock.
1336 *
1337 * Otherwise, sleep for as long as the 2 remains...
1338 */
1339 while (exchange_acquire (&mutex->i[0], 2) != 0)
1340 syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) 2, NULL);
1341 }
1342
1343 static void __attribute__((noinline))
g_mutex_unlock_slowpath(GMutex * mutex,guint prev)1344 g_mutex_unlock_slowpath (GMutex *mutex,
1345 guint prev)
1346 {
1347 /* We seem to get better code for the uncontended case by splitting
1348 * this out...
1349 */
1350 if G_UNLIKELY (prev == 0)
1351 {
1352 fprintf (stderr, "Attempt to unlock mutex that was not locked\n");
1353 g_abort ();
1354 }
1355
1356 syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1357 }
1358
1359 void
g_mutex_lock(GMutex * mutex)1360 g_mutex_lock (GMutex *mutex)
1361 {
1362 /* 0 -> 1 and we're done. Anything else, and we need to wait... */
1363 if G_UNLIKELY (g_atomic_int_add (&mutex->i[0], 1) != 0)
1364 g_mutex_lock_slowpath (mutex);
1365 }
1366
1367 void
g_mutex_unlock(GMutex * mutex)1368 g_mutex_unlock (GMutex *mutex)
1369 {
1370 guint prev;
1371
1372 prev = exchange_release (&mutex->i[0], 0);
1373
1374 /* 1-> 0 and we're done. Anything else and we need to signal... */
1375 if G_UNLIKELY (prev != 1)
1376 g_mutex_unlock_slowpath (mutex, prev);
1377 }
1378
1379 gboolean
g_mutex_trylock(GMutex * mutex)1380 g_mutex_trylock (GMutex *mutex)
1381 {
1382 guint zero = 0;
1383
1384 /* We don't want to touch the value at all unless we can move it from
1385 * exactly 0 to 1.
1386 */
1387 return compare_exchange_acquire (&mutex->i[0], &zero, 1);
1388 }
1389
1390 /* Condition variables are implemented in a rather simple way as well.
1391 * In many ways, futex() as an abstraction is even more ideally suited
1392 * to condition variables than it is to mutexes.
1393 *
1394 * We store a generation counter. We sample it with the lock held and
1395 * unlock before sleeping on the futex.
1396 *
1397 * Signalling simply involves increasing the counter and making the
1398 * appropriate futex call.
1399 *
1400 * The only thing that is the slightest bit complicated is timed waits
1401 * because we must convert our absolute time to relative.
1402 */
1403
1404 void
g_cond_init(GCond * cond)1405 g_cond_init (GCond *cond)
1406 {
1407 cond->i[0] = 0;
1408 }
1409
1410 void
g_cond_clear(GCond * cond)1411 g_cond_clear (GCond *cond)
1412 {
1413 }
1414
1415 void
g_cond_wait(GCond * cond,GMutex * mutex)1416 g_cond_wait (GCond *cond,
1417 GMutex *mutex)
1418 {
1419 guint sampled = g_atomic_int_get (&cond->i[0]);
1420
1421 g_mutex_unlock (mutex);
1422 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL);
1423 g_mutex_lock (mutex);
1424 }
1425
1426 void
g_cond_signal(GCond * cond)1427 g_cond_signal (GCond *cond)
1428 {
1429 g_atomic_int_inc (&cond->i[0]);
1430
1431 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1432 }
1433
1434 void
g_cond_broadcast(GCond * cond)1435 g_cond_broadcast (GCond *cond)
1436 {
1437 g_atomic_int_inc (&cond->i[0]);
1438
1439 syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL);
1440 }
1441
1442 gboolean
g_cond_wait_until(GCond * cond,GMutex * mutex,gint64 end_time)1443 g_cond_wait_until (GCond *cond,
1444 GMutex *mutex,
1445 gint64 end_time)
1446 {
1447 struct timespec now;
1448 struct timespec span;
1449 guint sampled;
1450 int res;
1451 gboolean success;
1452
1453 if (end_time < 0)
1454 return FALSE;
1455
1456 clock_gettime (CLOCK_MONOTONIC, &now);
1457 span.tv_sec = (end_time / 1000000) - now.tv_sec;
1458 span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec;
1459 if (span.tv_nsec < 0)
1460 {
1461 span.tv_nsec += 1000000000;
1462 span.tv_sec--;
1463 }
1464
1465 if (span.tv_sec < 0)
1466 return FALSE;
1467
1468 sampled = cond->i[0];
1469 g_mutex_unlock (mutex);
1470 res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span);
1471 success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1472 g_mutex_lock (mutex);
1473
1474 return success;
1475 }
1476
1477 #endif
1478
1479 /* {{{1 Epilogue */
1480 /* vim:set foldmethod=marker: */
1481