• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* GLIB - Library of useful routines for C programming
2  * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
3  *
4  * gthread.c: posix thread system implementation
5  * Copyright 1998 Sebastian Wilhelmi; University of Karlsruhe
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /*
22  * Modified by the GLib Team and others 1997-2000.  See the AUTHORS
23  * file for a list of people on the GLib Team.  See the ChangeLog
24  * files for a list of changes.  These files are distributed with
25  * GLib at ftp://ftp.gtk.org/pub/gtk/.
26  */
27 
28 /* The GMutex, GCond and GPrivate implementations in this file are some
29  * of the lowest-level code in GLib.  All other parts of GLib (messages,
30  * memory, slices, etc) assume that they can freely use these facilities
31  * without risking recursion.
32  *
33  * As such, these functions are NOT permitted to call any other part of
34  * GLib.
35  *
36  * The thread manipulation functions (create, exit, join, etc.) have
37  * more freedom -- they can do as they please.
38  */
39 
40 #include "config.h"
41 
42 #include "gthread.h"
43 
44 #include "gmain.h"
45 #include "gmessages.h"
46 #include "gslice.h"
47 #include "gstrfuncs.h"
48 #include "gtestutils.h"
49 #include "gthreadprivate.h"
50 #include "gutils.h"
51 
52 #include <stdlib.h>
53 #include <stdio.h>
54 #include <string.h>
55 #include <errno.h>
56 #include <pthread.h>
57 
58 #include <sys/time.h>
59 #include <unistd.h>
60 
61 #ifdef HAVE_PTHREAD_SET_NAME_NP
62 #include <pthread_np.h>
63 #endif
64 #ifdef HAVE_SCHED_H
65 #include <sched.h>
66 #endif
67 #ifdef G_OS_WIN32
68 #include <windows.h>
69 #endif
70 
71 #if defined(HAVE_SYS_SCHED_GETATTR)
72 #include <sys/syscall.h>
73 #endif
74 
75 /* OHOS_GLIB_COMPATIBLE
76  * ohos.glib.compatible.001: glib 2.62.5 update 2.68.1 Incompatible with gstreamer 1.16.2
77  * static volatile gsize _init_once = 0; // Conflicts with volatile,
78  *    if (g_once_init_enter (&_init_once))
79  *    add "&& !defined(__clang__)"
80  */
81 #if defined(HAVE_FUTEX) && \
82     (defined(HAVE_STDATOMIC_H) || defined(__ATOMIC_SEQ_CST)) && !defined(__clang__)
83 #define USE_NATIVE_MUTEX
84 #endif
85 
86 static void
g_thread_abort(gint status,const gchar * function)87 g_thread_abort (gint         status,
88                 const gchar *function)
89 {
90   fprintf (stderr, "GLib (gthread-posix.c): Unexpected error from C library during '%s': %s.  Aborting.\n",
91            function, strerror (status));
92   g_abort ();
93 }
94 
95 /* {{{1 GMutex */
96 
97 #if !defined(USE_NATIVE_MUTEX)
98 
99 static pthread_mutex_t *
g_mutex_impl_new(void)100 g_mutex_impl_new (void)
101 {
102   pthread_mutexattr_t *pattr = NULL;
103   pthread_mutex_t *mutex;
104   gint status;
105 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
106   pthread_mutexattr_t attr;
107 #endif
108 
109   mutex = malloc (sizeof (pthread_mutex_t));
110   if G_UNLIKELY (mutex == NULL)
111     g_thread_abort (errno, "malloc");
112 
113 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
114   pthread_mutexattr_init (&attr);
115   pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
116   pattr = &attr;
117 #endif
118 
119   if G_UNLIKELY ((status = pthread_mutex_init (mutex, pattr)) != 0)
120     g_thread_abort (status, "pthread_mutex_init");
121 
122 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
123   pthread_mutexattr_destroy (&attr);
124 #endif
125 
126   return mutex;
127 }
128 
129 static void
g_mutex_impl_free(pthread_mutex_t * mutex)130 g_mutex_impl_free (pthread_mutex_t *mutex)
131 {
132   pthread_mutex_destroy (mutex);
133   free (mutex);
134 }
135 
136 static inline pthread_mutex_t *
g_mutex_get_impl(GMutex * mutex)137 g_mutex_get_impl (GMutex *mutex)
138 {
139   pthread_mutex_t *impl = g_atomic_pointer_get (&mutex->p);
140 
141   if G_UNLIKELY (impl == NULL)
142     {
143       impl = g_mutex_impl_new ();
144       if (!g_atomic_pointer_compare_and_exchange (&mutex->p, NULL, impl))
145         g_mutex_impl_free (impl);
146       impl = mutex->p;
147     }
148 
149   return impl;
150 }
151 
152 
153 /**
154  * g_mutex_init:
155  * @mutex: an uninitialized #GMutex
156  *
157  * Initializes a #GMutex so that it can be used.
158  *
159  * This function is useful to initialize a mutex that has been
160  * allocated on the stack, or as part of a larger structure.
161  * It is not necessary to initialize a mutex that has been
162  * statically allocated.
163  *
164  * |[<!-- language="C" -->
165  *   typedef struct {
166  *     GMutex m;
167  *     ...
168  *   } Blob;
169  *
170  * Blob *b;
171  *
172  * b = g_new (Blob, 1);
173  * g_mutex_init (&b->m);
174  * ]|
175  *
176  * To undo the effect of g_mutex_init() when a mutex is no longer
177  * needed, use g_mutex_clear().
178  *
179  * Calling g_mutex_init() on an already initialized #GMutex leads
180  * to undefined behaviour.
181  *
182  * Since: 2.32
183  */
184 void
g_mutex_init(GMutex * mutex)185 g_mutex_init (GMutex *mutex)
186 {
187   mutex->p = g_mutex_impl_new ();
188 }
189 
190 /**
191  * g_mutex_clear:
192  * @mutex: an initialized #GMutex
193  *
194  * Frees the resources allocated to a mutex with g_mutex_init().
195  *
196  * This function should not be used with a #GMutex that has been
197  * statically allocated.
198  *
199  * Calling g_mutex_clear() on a locked mutex leads to undefined
200  * behaviour.
201  *
202  * Sine: 2.32
203  */
204 void
g_mutex_clear(GMutex * mutex)205 g_mutex_clear (GMutex *mutex)
206 {
207   g_mutex_impl_free (mutex->p);
208 }
209 
210 /**
211  * g_mutex_lock:
212  * @mutex: a #GMutex
213  *
214  * Locks @mutex. If @mutex is already locked by another thread, the
215  * current thread will block until @mutex is unlocked by the other
216  * thread.
217  *
218  * #GMutex is neither guaranteed to be recursive nor to be
219  * non-recursive.  As such, calling g_mutex_lock() on a #GMutex that has
220  * already been locked by the same thread results in undefined behaviour
221  * (including but not limited to deadlocks).
222  */
223 void
g_mutex_lock(GMutex * mutex)224 g_mutex_lock (GMutex *mutex)
225 {
226   gint status;
227 
228   if G_UNLIKELY ((status = pthread_mutex_lock (g_mutex_get_impl (mutex))) != 0)
229     g_thread_abort (status, "pthread_mutex_lock");
230 }
231 
232 /**
233  * g_mutex_unlock:
234  * @mutex: a #GMutex
235  *
236  * Unlocks @mutex. If another thread is blocked in a g_mutex_lock()
237  * call for @mutex, it will become unblocked and can lock @mutex itself.
238  *
239  * Calling g_mutex_unlock() on a mutex that is not locked by the
240  * current thread leads to undefined behaviour.
241  */
242 void
g_mutex_unlock(GMutex * mutex)243 g_mutex_unlock (GMutex *mutex)
244 {
245   gint status;
246 
247   if G_UNLIKELY ((status = pthread_mutex_unlock (g_mutex_get_impl (mutex))) != 0)
248     g_thread_abort (status, "pthread_mutex_unlock");
249 }
250 
251 /**
252  * g_mutex_trylock:
253  * @mutex: a #GMutex
254  *
255  * Tries to lock @mutex. If @mutex is already locked by another thread,
256  * it immediately returns %FALSE. Otherwise it locks @mutex and returns
257  * %TRUE.
258  *
259  * #GMutex is neither guaranteed to be recursive nor to be
260  * non-recursive.  As such, calling g_mutex_lock() on a #GMutex that has
261  * already been locked by the same thread results in undefined behaviour
262  * (including but not limited to deadlocks or arbitrary return values).
263  *
264  * Returns: %TRUE if @mutex could be locked
265  */
266 gboolean
g_mutex_trylock(GMutex * mutex)267 g_mutex_trylock (GMutex *mutex)
268 {
269   gint status;
270 
271   if G_LIKELY ((status = pthread_mutex_trylock (g_mutex_get_impl (mutex))) == 0)
272     return TRUE;
273 
274   if G_UNLIKELY (status != EBUSY)
275     g_thread_abort (status, "pthread_mutex_trylock");
276 
277   return FALSE;
278 }
279 
280 #endif /* !defined(USE_NATIVE_MUTEX) */
281 
282 /* {{{1 GRecMutex */
283 
284 static pthread_mutex_t *
g_rec_mutex_impl_new(void)285 g_rec_mutex_impl_new (void)
286 {
287   pthread_mutexattr_t attr;
288   pthread_mutex_t *mutex;
289 
290   mutex = malloc (sizeof (pthread_mutex_t));
291   if G_UNLIKELY (mutex == NULL)
292     g_thread_abort (errno, "malloc");
293 
294   pthread_mutexattr_init (&attr);
295   pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
296   pthread_mutex_init (mutex, &attr);
297   pthread_mutexattr_destroy (&attr);
298 
299   return mutex;
300 }
301 
302 static void
g_rec_mutex_impl_free(pthread_mutex_t * mutex)303 g_rec_mutex_impl_free (pthread_mutex_t *mutex)
304 {
305   pthread_mutex_destroy (mutex);
306   free (mutex);
307 }
308 
309 static inline pthread_mutex_t *
g_rec_mutex_get_impl(GRecMutex * rec_mutex)310 g_rec_mutex_get_impl (GRecMutex *rec_mutex)
311 {
312   pthread_mutex_t *impl = g_atomic_pointer_get (&rec_mutex->p);
313 
314   if G_UNLIKELY (impl == NULL)
315     {
316       impl = g_rec_mutex_impl_new ();
317       if (!g_atomic_pointer_compare_and_exchange (&rec_mutex->p, NULL, impl))
318         g_rec_mutex_impl_free (impl);
319       impl = rec_mutex->p;
320     }
321 
322   return impl;
323 }
324 
325 /**
326  * g_rec_mutex_init:
327  * @rec_mutex: an uninitialized #GRecMutex
328  *
329  * Initializes a #GRecMutex so that it can be used.
330  *
331  * This function is useful to initialize a recursive mutex
332  * that has been allocated on the stack, or as part of a larger
333  * structure.
334  *
335  * It is not necessary to initialise a recursive mutex that has been
336  * statically allocated.
337  *
338  * |[<!-- language="C" -->
339  *   typedef struct {
340  *     GRecMutex m;
341  *     ...
342  *   } Blob;
343  *
344  * Blob *b;
345  *
346  * b = g_new (Blob, 1);
347  * g_rec_mutex_init (&b->m);
348  * ]|
349  *
350  * Calling g_rec_mutex_init() on an already initialized #GRecMutex
351  * leads to undefined behaviour.
352  *
353  * To undo the effect of g_rec_mutex_init() when a recursive mutex
354  * is no longer needed, use g_rec_mutex_clear().
355  *
356  * Since: 2.32
357  */
358 void
g_rec_mutex_init(GRecMutex * rec_mutex)359 g_rec_mutex_init (GRecMutex *rec_mutex)
360 {
361   rec_mutex->p = g_rec_mutex_impl_new ();
362 }
363 
364 /**
365  * g_rec_mutex_clear:
366  * @rec_mutex: an initialized #GRecMutex
367  *
368  * Frees the resources allocated to a recursive mutex with
369  * g_rec_mutex_init().
370  *
371  * This function should not be used with a #GRecMutex that has been
372  * statically allocated.
373  *
374  * Calling g_rec_mutex_clear() on a locked recursive mutex leads
375  * to undefined behaviour.
376  *
377  * Sine: 2.32
378  */
379 void
g_rec_mutex_clear(GRecMutex * rec_mutex)380 g_rec_mutex_clear (GRecMutex *rec_mutex)
381 {
382   g_rec_mutex_impl_free (rec_mutex->p);
383 }
384 
385 /**
386  * g_rec_mutex_lock:
387  * @rec_mutex: a #GRecMutex
388  *
389  * Locks @rec_mutex. If @rec_mutex is already locked by another
390  * thread, the current thread will block until @rec_mutex is
391  * unlocked by the other thread. If @rec_mutex is already locked
392  * by the current thread, the 'lock count' of @rec_mutex is increased.
393  * The mutex will only become available again when it is unlocked
394  * as many times as it has been locked.
395  *
396  * Since: 2.32
397  */
398 void
g_rec_mutex_lock(GRecMutex * mutex)399 g_rec_mutex_lock (GRecMutex *mutex)
400 {
401   pthread_mutex_lock (g_rec_mutex_get_impl (mutex));
402 }
403 
404 /**
405  * g_rec_mutex_unlock:
406  * @rec_mutex: a #GRecMutex
407  *
408  * Unlocks @rec_mutex. If another thread is blocked in a
409  * g_rec_mutex_lock() call for @rec_mutex, it will become unblocked
410  * and can lock @rec_mutex itself.
411  *
412  * Calling g_rec_mutex_unlock() on a recursive mutex that is not
413  * locked by the current thread leads to undefined behaviour.
414  *
415  * Since: 2.32
416  */
417 void
g_rec_mutex_unlock(GRecMutex * rec_mutex)418 g_rec_mutex_unlock (GRecMutex *rec_mutex)
419 {
420   pthread_mutex_unlock (rec_mutex->p);
421 }
422 
423 /**
424  * g_rec_mutex_trylock:
425  * @rec_mutex: a #GRecMutex
426  *
427  * Tries to lock @rec_mutex. If @rec_mutex is already locked
428  * by another thread, it immediately returns %FALSE. Otherwise
429  * it locks @rec_mutex and returns %TRUE.
430  *
431  * Returns: %TRUE if @rec_mutex could be locked
432  *
433  * Since: 2.32
434  */
435 gboolean
g_rec_mutex_trylock(GRecMutex * rec_mutex)436 g_rec_mutex_trylock (GRecMutex *rec_mutex)
437 {
438   if (pthread_mutex_trylock (g_rec_mutex_get_impl (rec_mutex)) != 0)
439     return FALSE;
440 
441   return TRUE;
442 }
443 
444 /* {{{1 GRWLock */
445 
446 static pthread_rwlock_t *
g_rw_lock_impl_new(void)447 g_rw_lock_impl_new (void)
448 {
449   pthread_rwlock_t *rwlock;
450   gint status;
451 
452   rwlock = malloc (sizeof (pthread_rwlock_t));
453   if G_UNLIKELY (rwlock == NULL)
454     g_thread_abort (errno, "malloc");
455 
456   if G_UNLIKELY ((status = pthread_rwlock_init (rwlock, NULL)) != 0)
457     g_thread_abort (status, "pthread_rwlock_init");
458 
459   return rwlock;
460 }
461 
462 static void
g_rw_lock_impl_free(pthread_rwlock_t * rwlock)463 g_rw_lock_impl_free (pthread_rwlock_t *rwlock)
464 {
465   pthread_rwlock_destroy (rwlock);
466   free (rwlock);
467 }
468 
469 static inline pthread_rwlock_t *
g_rw_lock_get_impl(GRWLock * lock)470 g_rw_lock_get_impl (GRWLock *lock)
471 {
472   pthread_rwlock_t *impl = g_atomic_pointer_get (&lock->p);
473 
474   if G_UNLIKELY (impl == NULL)
475     {
476       impl = g_rw_lock_impl_new ();
477       if (!g_atomic_pointer_compare_and_exchange (&lock->p, NULL, impl))
478         g_rw_lock_impl_free (impl);
479       impl = lock->p;
480     }
481 
482   return impl;
483 }
484 
485 /**
486  * g_rw_lock_init:
487  * @rw_lock: an uninitialized #GRWLock
488  *
489  * Initializes a #GRWLock so that it can be used.
490  *
491  * This function is useful to initialize a lock that has been
492  * allocated on the stack, or as part of a larger structure.  It is not
493  * necessary to initialise a reader-writer lock that has been statically
494  * allocated.
495  *
496  * |[<!-- language="C" -->
497  *   typedef struct {
498  *     GRWLock l;
499  *     ...
500  *   } Blob;
501  *
502  * Blob *b;
503  *
504  * b = g_new (Blob, 1);
505  * g_rw_lock_init (&b->l);
506  * ]|
507  *
508  * To undo the effect of g_rw_lock_init() when a lock is no longer
509  * needed, use g_rw_lock_clear().
510  *
511  * Calling g_rw_lock_init() on an already initialized #GRWLock leads
512  * to undefined behaviour.
513  *
514  * Since: 2.32
515  */
516 void
g_rw_lock_init(GRWLock * rw_lock)517 g_rw_lock_init (GRWLock *rw_lock)
518 {
519   rw_lock->p = g_rw_lock_impl_new ();
520 }
521 
522 /**
523  * g_rw_lock_clear:
524  * @rw_lock: an initialized #GRWLock
525  *
526  * Frees the resources allocated to a lock with g_rw_lock_init().
527  *
528  * This function should not be used with a #GRWLock that has been
529  * statically allocated.
530  *
531  * Calling g_rw_lock_clear() when any thread holds the lock
532  * leads to undefined behaviour.
533  *
534  * Sine: 2.32
535  */
536 void
g_rw_lock_clear(GRWLock * rw_lock)537 g_rw_lock_clear (GRWLock *rw_lock)
538 {
539   g_rw_lock_impl_free (rw_lock->p);
540 }
541 
542 /**
543  * g_rw_lock_writer_lock:
544  * @rw_lock: a #GRWLock
545  *
546  * Obtain a write lock on @rw_lock. If another thread currently holds
547  * a read or write lock on @rw_lock, the current thread will block
548  * until all other threads have dropped their locks on @rw_lock.
549  *
550  * Calling g_rw_lock_writer_lock() while the current thread already
551  * owns a read or write lock on @rw_lock leads to undefined behaviour.
552  *
553  * Since: 2.32
554  */
555 void
g_rw_lock_writer_lock(GRWLock * rw_lock)556 g_rw_lock_writer_lock (GRWLock *rw_lock)
557 {
558   int retval = pthread_rwlock_wrlock (g_rw_lock_get_impl (rw_lock));
559 
560   if (retval != 0)
561     g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
562 }
563 
564 /**
565  * g_rw_lock_writer_trylock:
566  * @rw_lock: a #GRWLock
567  *
568  * Tries to obtain a write lock on @rw_lock. If another thread
569  * currently holds a read or write lock on @rw_lock, it immediately
570  * returns %FALSE.
571  * Otherwise it locks @rw_lock and returns %TRUE.
572  *
573  * Returns: %TRUE if @rw_lock could be locked
574  *
575  * Since: 2.32
576  */
577 gboolean
g_rw_lock_writer_trylock(GRWLock * rw_lock)578 g_rw_lock_writer_trylock (GRWLock *rw_lock)
579 {
580   if (pthread_rwlock_trywrlock (g_rw_lock_get_impl (rw_lock)) != 0)
581     return FALSE;
582 
583   return TRUE;
584 }
585 
586 /**
587  * g_rw_lock_writer_unlock:
588  * @rw_lock: a #GRWLock
589  *
590  * Release a write lock on @rw_lock.
591  *
592  * Calling g_rw_lock_writer_unlock() on a lock that is not held
593  * by the current thread leads to undefined behaviour.
594  *
595  * Since: 2.32
596  */
597 void
g_rw_lock_writer_unlock(GRWLock * rw_lock)598 g_rw_lock_writer_unlock (GRWLock *rw_lock)
599 {
600   pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
601 }
602 
603 /**
604  * g_rw_lock_reader_lock:
605  * @rw_lock: a #GRWLock
606  *
607  * Obtain a read lock on @rw_lock. If another thread currently holds
608  * the write lock on @rw_lock, the current thread will block until the
609  * write lock was (held and) released. If another thread does not hold
610  * the write lock, but is waiting for it, it is implementation defined
611  * whether the reader or writer will block. Read locks can be taken
612  * recursively.
613  *
614  * Calling g_rw_lock_reader_lock() while the current thread already
615  * owns a write lock leads to undefined behaviour. Read locks however
616  * can be taken recursively, in which case you need to make sure to
617  * call g_rw_lock_reader_unlock() the same amount of times.
618  *
619  * It is implementation-defined how many read locks are allowed to be
620  * held on the same lock simultaneously. If the limit is hit,
621  * or if a deadlock is detected, a critical warning will be emitted.
622  *
623  * Since: 2.32
624  */
625 void
g_rw_lock_reader_lock(GRWLock * rw_lock)626 g_rw_lock_reader_lock (GRWLock *rw_lock)
627 {
628   int retval = pthread_rwlock_rdlock (g_rw_lock_get_impl (rw_lock));
629 
630   if (retval != 0)
631     g_critical ("Failed to get RW lock %p: %s", rw_lock, g_strerror (retval));
632 }
633 
634 /**
635  * g_rw_lock_reader_trylock:
636  * @rw_lock: a #GRWLock
637  *
638  * Tries to obtain a read lock on @rw_lock and returns %TRUE if
639  * the read lock was successfully obtained. Otherwise it
640  * returns %FALSE.
641  *
642  * Returns: %TRUE if @rw_lock could be locked
643  *
644  * Since: 2.32
645  */
646 gboolean
g_rw_lock_reader_trylock(GRWLock * rw_lock)647 g_rw_lock_reader_trylock (GRWLock *rw_lock)
648 {
649   if (pthread_rwlock_tryrdlock (g_rw_lock_get_impl (rw_lock)) != 0)
650     return FALSE;
651 
652   return TRUE;
653 }
654 
655 /**
656  * g_rw_lock_reader_unlock:
657  * @rw_lock: a #GRWLock
658  *
659  * Release a read lock on @rw_lock.
660  *
661  * Calling g_rw_lock_reader_unlock() on a lock that is not held
662  * by the current thread leads to undefined behaviour.
663  *
664  * Since: 2.32
665  */
666 void
g_rw_lock_reader_unlock(GRWLock * rw_lock)667 g_rw_lock_reader_unlock (GRWLock *rw_lock)
668 {
669   pthread_rwlock_unlock (g_rw_lock_get_impl (rw_lock));
670 }
671 
672 /* {{{1 GCond */
673 
674 #if !defined(USE_NATIVE_MUTEX)
675 
676 static pthread_cond_t *
g_cond_impl_new(void)677 g_cond_impl_new (void)
678 {
679   pthread_condattr_t attr;
680   pthread_cond_t *cond;
681   gint status;
682 
683   pthread_condattr_init (&attr);
684 
685 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
686 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
687   if G_UNLIKELY ((status = pthread_condattr_setclock (&attr, CLOCK_MONOTONIC)) != 0)
688     g_thread_abort (status, "pthread_condattr_setclock");
689 #else
690 #error Cannot support GCond on your platform.
691 #endif
692 
693   cond = malloc (sizeof (pthread_cond_t));
694   if G_UNLIKELY (cond == NULL)
695     g_thread_abort (errno, "malloc");
696 
697   if G_UNLIKELY ((status = pthread_cond_init (cond, &attr)) != 0)
698     g_thread_abort (status, "pthread_cond_init");
699 
700   pthread_condattr_destroy (&attr);
701 
702   return cond;
703 }
704 
705 static void
g_cond_impl_free(pthread_cond_t * cond)706 g_cond_impl_free (pthread_cond_t *cond)
707 {
708   pthread_cond_destroy (cond);
709   free (cond);
710 }
711 
712 static inline pthread_cond_t *
g_cond_get_impl(GCond * cond)713 g_cond_get_impl (GCond *cond)
714 {
715   pthread_cond_t *impl = g_atomic_pointer_get (&cond->p);
716 
717   if G_UNLIKELY (impl == NULL)
718     {
719       impl = g_cond_impl_new ();
720       if (!g_atomic_pointer_compare_and_exchange (&cond->p, NULL, impl))
721         g_cond_impl_free (impl);
722       impl = cond->p;
723     }
724 
725   return impl;
726 }
727 
728 /**
729  * g_cond_init:
730  * @cond: an uninitialized #GCond
731  *
732  * Initialises a #GCond so that it can be used.
733  *
734  * This function is useful to initialise a #GCond that has been
735  * allocated as part of a larger structure.  It is not necessary to
736  * initialise a #GCond that has been statically allocated.
737  *
738  * To undo the effect of g_cond_init() when a #GCond is no longer
739  * needed, use g_cond_clear().
740  *
741  * Calling g_cond_init() on an already-initialised #GCond leads
742  * to undefined behaviour.
743  *
744  * Since: 2.32
745  */
746 void
g_cond_init(GCond * cond)747 g_cond_init (GCond *cond)
748 {
749   cond->p = g_cond_impl_new ();
750 }
751 
752 /**
753  * g_cond_clear:
754  * @cond: an initialised #GCond
755  *
756  * Frees the resources allocated to a #GCond with g_cond_init().
757  *
758  * This function should not be used with a #GCond that has been
759  * statically allocated.
760  *
761  * Calling g_cond_clear() for a #GCond on which threads are
762  * blocking leads to undefined behaviour.
763  *
764  * Since: 2.32
765  */
766 void
g_cond_clear(GCond * cond)767 g_cond_clear (GCond *cond)
768 {
769   g_cond_impl_free (cond->p);
770 }
771 
772 /**
773  * g_cond_wait:
774  * @cond: a #GCond
775  * @mutex: a #GMutex that is currently locked
776  *
777  * Atomically releases @mutex and waits until @cond is signalled.
778  * When this function returns, @mutex is locked again and owned by the
779  * calling thread.
780  *
781  * When using condition variables, it is possible that a spurious wakeup
782  * may occur (ie: g_cond_wait() returns even though g_cond_signal() was
783  * not called).  It's also possible that a stolen wakeup may occur.
784  * This is when g_cond_signal() is called, but another thread acquires
785  * @mutex before this thread and modifies the state of the program in
786  * such a way that when g_cond_wait() is able to return, the expected
787  * condition is no longer met.
788  *
789  * For this reason, g_cond_wait() must always be used in a loop.  See
790  * the documentation for #GCond for a complete example.
791  **/
792 void
g_cond_wait(GCond * cond,GMutex * mutex)793 g_cond_wait (GCond  *cond,
794              GMutex *mutex)
795 {
796   gint status;
797 
798   if G_UNLIKELY ((status = pthread_cond_wait (g_cond_get_impl (cond), g_mutex_get_impl (mutex))) != 0)
799     g_thread_abort (status, "pthread_cond_wait");
800 }
801 
802 /**
803  * g_cond_signal:
804  * @cond: a #GCond
805  *
806  * If threads are waiting for @cond, at least one of them is unblocked.
807  * If no threads are waiting for @cond, this function has no effect.
808  * It is good practice to hold the same lock as the waiting thread
809  * while calling this function, though not required.
810  */
811 void
g_cond_signal(GCond * cond)812 g_cond_signal (GCond *cond)
813 {
814   gint status;
815 
816   if G_UNLIKELY ((status = pthread_cond_signal (g_cond_get_impl (cond))) != 0)
817     g_thread_abort (status, "pthread_cond_signal");
818 }
819 
820 /**
821  * g_cond_broadcast:
822  * @cond: a #GCond
823  *
824  * If threads are waiting for @cond, all of them are unblocked.
825  * If no threads are waiting for @cond, this function has no effect.
826  * It is good practice to lock the same mutex as the waiting threads
827  * while calling this function, though not required.
828  */
829 void
g_cond_broadcast(GCond * cond)830 g_cond_broadcast (GCond *cond)
831 {
832   gint status;
833 
834   if G_UNLIKELY ((status = pthread_cond_broadcast (g_cond_get_impl (cond))) != 0)
835     g_thread_abort (status, "pthread_cond_broadcast");
836 }
837 
838 /**
839  * g_cond_wait_until:
840  * @cond: a #GCond
841  * @mutex: a #GMutex that is currently locked
842  * @end_time: the monotonic time to wait until
843  *
844  * Waits until either @cond is signalled or @end_time has passed.
845  *
846  * As with g_cond_wait() it is possible that a spurious or stolen wakeup
847  * could occur.  For that reason, waiting on a condition variable should
848  * always be in a loop, based on an explicitly-checked predicate.
849  *
850  * %TRUE is returned if the condition variable was signalled (or in the
851  * case of a spurious wakeup).  %FALSE is returned if @end_time has
852  * passed.
853  *
854  * The following code shows how to correctly perform a timed wait on a
855  * condition variable (extending the example presented in the
856  * documentation for #GCond):
857  *
858  * |[<!-- language="C" -->
859  * gpointer
860  * pop_data_timed (void)
861  * {
862  *   gint64 end_time;
863  *   gpointer data;
864  *
865  *   g_mutex_lock (&data_mutex);
866  *
867  *   end_time = g_get_monotonic_time () + 5 * G_TIME_SPAN_SECOND;
868  *   while (!current_data)
869  *     if (!g_cond_wait_until (&data_cond, &data_mutex, end_time))
870  *       {
871  *         // timeout has passed.
872  *         g_mutex_unlock (&data_mutex);
873  *         return NULL;
874  *       }
875  *
876  *   // there is data for us
877  *   data = current_data;
878  *   current_data = NULL;
879  *
880  *   g_mutex_unlock (&data_mutex);
881  *
882  *   return data;
883  * }
884  * ]|
885  *
886  * Notice that the end time is calculated once, before entering the
887  * loop and reused.  This is the motivation behind the use of absolute
888  * time on this API -- if a relative time of 5 seconds were passed
889  * directly to the call and a spurious wakeup occurred, the program would
890  * have to start over waiting again (which would lead to a total wait
891  * time of more than 5 seconds).
892  *
893  * Returns: %TRUE on a signal, %FALSE on a timeout
894  * Since: 2.32
895  **/
896 gboolean
g_cond_wait_until(GCond * cond,GMutex * mutex,gint64 end_time)897 g_cond_wait_until (GCond  *cond,
898                    GMutex *mutex,
899                    gint64  end_time)
900 {
901   struct timespec ts;
902   gint status;
903 
904 #ifdef HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
905   /* end_time is given relative to the monotonic clock as returned by
906    * g_get_monotonic_time().
907    *
908    * Since this pthreads wants the relative time, convert it back again.
909    */
910   {
911     gint64 now = g_get_monotonic_time ();
912     gint64 relative;
913 
914     if (end_time <= now)
915       return FALSE;
916 
917     relative = end_time - now;
918 
919     ts.tv_sec = relative / 1000000;
920     ts.tv_nsec = (relative % 1000000) * 1000;
921 
922     if ((status = pthread_cond_timedwait_relative_np (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
923       return TRUE;
924   }
925 #elif defined (HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined (CLOCK_MONOTONIC)
926   /* This is the exact check we used during init to set the clock to
927    * monotonic, so if we're in this branch, timedwait() will already be
928    * expecting a monotonic clock.
929    */
930   {
931     ts.tv_sec = end_time / 1000000;
932     ts.tv_nsec = (end_time % 1000000) * 1000;
933 
934     if ((status = pthread_cond_timedwait (g_cond_get_impl (cond), g_mutex_get_impl (mutex), &ts)) == 0)
935       return TRUE;
936   }
937 #else
938 #error Cannot support GCond on your platform.
939 #endif
940 
941   if G_UNLIKELY (status != ETIMEDOUT)
942     g_thread_abort (status, "pthread_cond_timedwait");
943 
944   return FALSE;
945 }
946 
947 #endif /* defined(USE_NATIVE_MUTEX) */
948 
949 /* {{{1 GPrivate */
950 
951 /**
952  * GPrivate:
953  *
954  * The #GPrivate struct is an opaque data structure to represent a
955  * thread-local data key. It is approximately equivalent to the
956  * pthread_setspecific()/pthread_getspecific() APIs on POSIX and to
957  * TlsSetValue()/TlsGetValue() on Windows.
958  *
959  * If you don't already know why you might want this functionality,
960  * then you probably don't need it.
961  *
962  * #GPrivate is a very limited resource (as far as 128 per program,
963  * shared between all libraries). It is also not possible to destroy a
964  * #GPrivate after it has been used. As such, it is only ever acceptable
965  * to use #GPrivate in static scope, and even then sparingly so.
966  *
967  * See G_PRIVATE_INIT() for a couple of examples.
968  *
969  * The #GPrivate structure should be considered opaque.  It should only
970  * be accessed via the g_private_ functions.
971  */
972 
973 /**
974  * G_PRIVATE_INIT:
975  * @notify: a #GDestroyNotify
976  *
977  * A macro to assist with the static initialisation of a #GPrivate.
978  *
979  * This macro is useful for the case that a #GDestroyNotify function
980  * should be associated with the key.  This is needed when the key will be
981  * used to point at memory that should be deallocated when the thread
982  * exits.
983  *
984  * Additionally, the #GDestroyNotify will also be called on the previous
985  * value stored in the key when g_private_replace() is used.
986  *
987  * If no #GDestroyNotify is needed, then use of this macro is not
988  * required -- if the #GPrivate is declared in static scope then it will
989  * be properly initialised by default (ie: to all zeros).  See the
990  * examples below.
991  *
992  * |[<!-- language="C" -->
993  * static GPrivate name_key = G_PRIVATE_INIT (g_free);
994  *
995  * // return value should not be freed
996  * const gchar *
997  * get_local_name (void)
998  * {
999  *   return g_private_get (&name_key);
1000  * }
1001  *
1002  * void
1003  * set_local_name (const gchar *name)
1004  * {
1005  *   g_private_replace (&name_key, g_strdup (name));
1006  * }
1007  *
1008  *
1009  * static GPrivate count_key;   // no free function
1010  *
1011  * gint
1012  * get_local_count (void)
1013  * {
1014  *   return GPOINTER_TO_INT (g_private_get (&count_key));
1015  * }
1016  *
1017  * void
1018  * set_local_count (gint count)
1019  * {
1020  *   g_private_set (&count_key, GINT_TO_POINTER (count));
1021  * }
1022  * ]|
1023  *
1024  * Since: 2.32
1025  **/
1026 
1027 static pthread_key_t *
g_private_impl_new(GDestroyNotify notify)1028 g_private_impl_new (GDestroyNotify notify)
1029 {
1030   pthread_key_t *key;
1031   gint status;
1032 
1033   key = malloc (sizeof (pthread_key_t));
1034   if G_UNLIKELY (key == NULL)
1035     g_thread_abort (errno, "malloc");
1036   status = pthread_key_create (key, notify);
1037   if G_UNLIKELY (status != 0)
1038     g_thread_abort (status, "pthread_key_create");
1039 
1040   return key;
1041 }
1042 
1043 static void
g_private_impl_free(pthread_key_t * key)1044 g_private_impl_free (pthread_key_t *key)
1045 {
1046   gint status;
1047 
1048   status = pthread_key_delete (*key);
1049   if G_UNLIKELY (status != 0)
1050     g_thread_abort (status, "pthread_key_delete");
1051   free (key);
1052 }
1053 
1054 static inline pthread_key_t *
g_private_get_impl(GPrivate * key)1055 g_private_get_impl (GPrivate *key)
1056 {
1057   pthread_key_t *impl = g_atomic_pointer_get (&key->p);
1058 
1059   if G_UNLIKELY (impl == NULL)
1060     {
1061       impl = g_private_impl_new (key->notify);
1062       if (!g_atomic_pointer_compare_and_exchange (&key->p, NULL, impl))
1063         {
1064           g_private_impl_free (impl);
1065           impl = key->p;
1066         }
1067     }
1068 
1069   return impl;
1070 }
1071 
1072 /**
1073  * g_private_get:
1074  * @key: a #GPrivate
1075  *
1076  * Returns the current value of the thread local variable @key.
1077  *
1078  * If the value has not yet been set in this thread, %NULL is returned.
1079  * Values are never copied between threads (when a new thread is
1080  * created, for example).
1081  *
1082  * Returns: the thread-local value
1083  */
1084 gpointer
g_private_get(GPrivate * key)1085 g_private_get (GPrivate *key)
1086 {
1087   /* quote POSIX: No errors are returned from pthread_getspecific(). */
1088   return pthread_getspecific (*g_private_get_impl (key));
1089 }
1090 
1091 /**
1092  * g_private_set:
1093  * @key: a #GPrivate
1094  * @value: the new value
1095  *
1096  * Sets the thread local variable @key to have the value @value in the
1097  * current thread.
1098  *
1099  * This function differs from g_private_replace() in the following way:
1100  * the #GDestroyNotify for @key is not called on the old value.
1101  */
1102 void
g_private_set(GPrivate * key,gpointer value)1103 g_private_set (GPrivate *key,
1104                gpointer  value)
1105 {
1106   gint status;
1107 
1108   if G_UNLIKELY ((status = pthread_setspecific (*g_private_get_impl (key), value)) != 0)
1109     g_thread_abort (status, "pthread_setspecific");
1110 }
1111 
1112 /**
1113  * g_private_replace:
1114  * @key: a #GPrivate
1115  * @value: the new value
1116  *
1117  * Sets the thread local variable @key to have the value @value in the
1118  * current thread.
1119  *
1120  * This function differs from g_private_set() in the following way: if
1121  * the previous value was non-%NULL then the #GDestroyNotify handler for
1122  * @key is run on it.
1123  *
1124  * Since: 2.32
1125  **/
1126 void
g_private_replace(GPrivate * key,gpointer value)1127 g_private_replace (GPrivate *key,
1128                    gpointer  value)
1129 {
1130   pthread_key_t *impl = g_private_get_impl (key);
1131   gpointer old;
1132   gint status;
1133 
1134   old = pthread_getspecific (*impl);
1135 
1136   if G_UNLIKELY ((status = pthread_setspecific (*impl, value)) != 0)
1137     g_thread_abort (status, "pthread_setspecific");
1138 
1139   if (old && key->notify)
1140     key->notify (old);
1141 }
1142 
1143 /* {{{1 GThread */
1144 
1145 #define posix_check_err(err, name) G_STMT_START{			\
1146   int error = (err); 							\
1147   if (error)	 		 		 			\
1148     g_error ("file %s: line %d (%s): error '%s' during '%s'",		\
1149            __FILE__, __LINE__, G_STRFUNC,				\
1150            g_strerror (error), name);					\
1151   }G_STMT_END
1152 
1153 #define posix_check_cmd(cmd) posix_check_err (cmd, #cmd)
1154 
1155 typedef struct
1156 {
1157   GRealThread thread;
1158 
1159   pthread_t system_thread;
1160   gboolean  joined;
1161   GMutex    lock;
1162 
1163   void *(*proxy) (void *);
1164 
1165   /* Must be statically allocated and valid forever */
1166   const GThreadSchedulerSettings *scheduler_settings;
1167 } GThreadPosix;
1168 
1169 void
g_system_thread_free(GRealThread * thread)1170 g_system_thread_free (GRealThread *thread)
1171 {
1172   GThreadPosix *pt = (GThreadPosix *) thread;
1173 
1174   if (!pt->joined)
1175     pthread_detach (pt->system_thread);
1176 
1177   g_mutex_clear (&pt->lock);
1178 
1179   g_slice_free (GThreadPosix, pt);
1180 }
1181 
1182 gboolean
g_system_thread_get_scheduler_settings(GThreadSchedulerSettings * scheduler_settings)1183 g_system_thread_get_scheduler_settings (GThreadSchedulerSettings *scheduler_settings)
1184 {
1185   /* FIXME: Implement the same for macOS and the BSDs so it doesn't go through
1186    * the fallback code using an additional thread. */
1187 #if defined(HAVE_SYS_SCHED_GETATTR)
1188   pid_t tid;
1189   int res;
1190   /* FIXME: The struct definition does not seem to be possible to pull in
1191    * via any of the normal system headers and it's only declared in the
1192    * kernel headers. That's why we hardcode 56 here right now. */
1193   guint size = 56; /* Size as of Linux 5.3.9 */
1194   guint flags = 0;
1195 
1196   tid = (pid_t) syscall (SYS_gettid);
1197 
1198   scheduler_settings->attr = g_malloc0 (size);
1199 
1200   do
1201     {
1202       int errsv;
1203 
1204       res = syscall (SYS_sched_getattr, tid, scheduler_settings->attr, size, flags);
1205       errsv = errno;
1206       if (res == -1)
1207         {
1208           if (errsv == EAGAIN)
1209             {
1210               continue;
1211             }
1212           else if (errsv == E2BIG)
1213             {
1214               g_assert (size < G_MAXINT);
1215               size *= 2;
1216               scheduler_settings->attr = g_realloc (scheduler_settings->attr, size);
1217               /* Needs to be zero-initialized */
1218               memset (scheduler_settings->attr, 0, size);
1219             }
1220           else
1221             {
1222               g_debug ("Failed to get thread scheduler attributes: %s", g_strerror (errsv));
1223               g_free (scheduler_settings->attr);
1224 
1225               return FALSE;
1226             }
1227         }
1228     }
1229   while (res == -1);
1230 
1231   /* Try setting them on the current thread to see if any system policies are
1232    * in place that would disallow doing so */
1233   res = syscall (SYS_sched_setattr, tid, scheduler_settings->attr, flags);
1234   if (res == -1)
1235     {
1236       int errsv = errno;
1237 
1238       g_debug ("Failed to set thread scheduler attributes: %s", g_strerror (errsv));
1239       g_free (scheduler_settings->attr);
1240 
1241       return FALSE;
1242     }
1243 
1244   return TRUE;
1245 #else
1246   return FALSE;
1247 #endif
1248 }
1249 
1250 #if defined(HAVE_SYS_SCHED_GETATTR)
1251 static void *
linux_pthread_proxy(void * data)1252 linux_pthread_proxy (void *data)
1253 {
1254   GThreadPosix *thread = data;
1255   static gboolean printed_scheduler_warning = FALSE;  /* (atomic) */
1256 
1257   /* Set scheduler settings first if requested */
1258   if (thread->scheduler_settings)
1259     {
1260       pid_t tid = 0;
1261       guint flags = 0;
1262       int res;
1263       int errsv;
1264 
1265       tid = (pid_t) syscall (SYS_gettid);
1266       res = syscall (SYS_sched_setattr, tid, thread->scheduler_settings->attr, flags);
1267       errsv = errno;
1268       if (res == -1 && g_atomic_int_compare_and_exchange (&printed_scheduler_warning, FALSE, TRUE))
1269         g_critical ("Failed to set scheduler settings: %s", g_strerror (errsv));
1270       else if (res == -1)
1271         g_debug ("Failed to set scheduler settings: %s", g_strerror (errsv));
1272       printed_scheduler_warning = TRUE;
1273     }
1274 
1275   return thread->proxy (data);
1276 }
1277 #endif
1278 
1279 GRealThread *
g_system_thread_new(GThreadFunc proxy,gulong stack_size,const GThreadSchedulerSettings * scheduler_settings,const char * name,GThreadFunc func,gpointer data,GError ** error)1280 g_system_thread_new (GThreadFunc proxy,
1281                      gulong stack_size,
1282                      const GThreadSchedulerSettings *scheduler_settings,
1283                      const char *name,
1284                      GThreadFunc func,
1285                      gpointer data,
1286                      GError **error)
1287 {
1288   GThreadPosix *thread;
1289   GRealThread *base_thread;
1290   pthread_attr_t attr;
1291   gint ret;
1292 
1293   thread = g_slice_new0 (GThreadPosix);
1294   base_thread = (GRealThread*)thread;
1295   base_thread->ref_count = 2;
1296   base_thread->ours = TRUE;
1297   base_thread->thread.joinable = TRUE;
1298   base_thread->thread.func = func;
1299   base_thread->thread.data = data;
1300   base_thread->name = g_strdup (name);
1301   thread->scheduler_settings = scheduler_settings;
1302   thread->proxy = proxy;
1303 
1304   posix_check_cmd (pthread_attr_init (&attr));
1305 
1306 #ifdef HAVE_PTHREAD_ATTR_SETSTACKSIZE
1307   if (stack_size)
1308     {
1309 #ifdef _SC_THREAD_STACK_MIN
1310       long min_stack_size = sysconf (_SC_THREAD_STACK_MIN);
1311       if (min_stack_size >= 0)
1312         stack_size = MAX ((gulong) min_stack_size, stack_size);
1313 #endif /* _SC_THREAD_STACK_MIN */
1314       /* No error check here, because some systems can't do it and
1315        * we simply don't want threads to fail because of that. */
1316       pthread_attr_setstacksize (&attr, stack_size);
1317     }
1318 #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */
1319 
1320 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1321   if (!scheduler_settings)
1322     {
1323       /* While this is the default, better be explicit about it */
1324       pthread_attr_setinheritsched (&attr, PTHREAD_INHERIT_SCHED);
1325     }
1326 #endif /* HAVE_PTHREAD_ATTR_SETINHERITSCHED */
1327 
1328 #if defined(HAVE_SYS_SCHED_GETATTR)
1329   ret = pthread_create (&thread->system_thread, &attr, linux_pthread_proxy, thread);
1330 #else
1331   ret = pthread_create (&thread->system_thread, &attr, (void* (*)(void*))proxy, thread);
1332 #endif
1333 
1334   posix_check_cmd (pthread_attr_destroy (&attr));
1335 
1336   if (ret == EAGAIN)
1337     {
1338       g_set_error (error, G_THREAD_ERROR, G_THREAD_ERROR_AGAIN,
1339                    "Error creating thread: %s", g_strerror (ret));
1340       g_slice_free (GThreadPosix, thread);
1341       return NULL;
1342     }
1343 
1344   posix_check_err (ret, "pthread_create");
1345 
1346   g_mutex_init (&thread->lock);
1347 
1348   return (GRealThread *) thread;
1349 }
1350 
1351 /**
1352  * g_thread_yield:
1353  *
1354  * Causes the calling thread to voluntarily relinquish the CPU, so
1355  * that other threads can run.
1356  *
1357  * This function is often used as a method to make busy wait less evil.
1358  */
1359 void
g_thread_yield(void)1360 g_thread_yield (void)
1361 {
1362   sched_yield ();
1363 }
1364 
1365 void
g_system_thread_wait(GRealThread * thread)1366 g_system_thread_wait (GRealThread *thread)
1367 {
1368   GThreadPosix *pt = (GThreadPosix *) thread;
1369 
1370   g_mutex_lock (&pt->lock);
1371 
1372   if (!pt->joined)
1373     {
1374       posix_check_cmd (pthread_join (pt->system_thread, NULL));
1375       pt->joined = TRUE;
1376     }
1377 
1378   g_mutex_unlock (&pt->lock);
1379 }
1380 
1381 void
g_system_thread_exit(void)1382 g_system_thread_exit (void)
1383 {
1384   pthread_exit (NULL);
1385 }
1386 
1387 void
g_system_thread_set_name(const gchar * name)1388 g_system_thread_set_name (const gchar *name)
1389 {
1390 #if defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
1391   pthread_setname_np (name); /* on OS X and iOS */
1392 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
1393   pthread_setname_np (pthread_self (), name); /* on Linux and Solaris */
1394 #elif defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG)
1395   pthread_setname_np (pthread_self (), "%s", (gchar *) name); /* on NetBSD */
1396 #elif defined(HAVE_PTHREAD_SET_NAME_NP)
1397   pthread_set_name_np (pthread_self (), name); /* on FreeBSD, DragonFlyBSD, OpenBSD */
1398 #endif
1399 }
1400 
1401 /* {{{1 GMutex and GCond futex implementation */
1402 
1403 #if defined(USE_NATIVE_MUTEX)
1404 
1405 #include <linux/futex.h>
1406 #include <sys/syscall.h>
1407 
1408 #ifndef FUTEX_WAIT_PRIVATE
1409 #define FUTEX_WAIT_PRIVATE FUTEX_WAIT
1410 #define FUTEX_WAKE_PRIVATE FUTEX_WAKE
1411 #endif
1412 
1413 /* We should expand the set of operations available in gatomic once we
1414  * have better C11 support in GCC in common distributions (ie: 4.9).
1415  *
1416  * Before then, let's define a couple of useful things for our own
1417  * purposes...
1418  */
1419 
1420 #ifdef HAVE_STDATOMIC_H
1421 
1422 #include <stdatomic.h>
1423 
1424 #define exchange_acquire(ptr, new) \
1425   atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_ACQUIRE)
1426 #define compare_exchange_acquire(ptr, old, new) \
1427   atomic_compare_exchange_strong_explicit((atomic_uint *) (ptr), (old), (new), \
1428                                           __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1429 
1430 #define exchange_release(ptr, new) \
1431   atomic_exchange_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1432 #define store_release(ptr, new) \
1433   atomic_store_explicit((atomic_uint *) (ptr), (new), __ATOMIC_RELEASE)
1434 
1435 #else
1436 
1437 #define exchange_acquire(ptr, new) \
1438   __atomic_exchange_4((ptr), (new), __ATOMIC_ACQUIRE)
1439 #define compare_exchange_acquire(ptr, old, new) \
1440   __atomic_compare_exchange_4((ptr), (old), (new), 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
1441 
1442 #define exchange_release(ptr, new) \
1443   __atomic_exchange_4((ptr), (new), __ATOMIC_RELEASE)
1444 #define store_release(ptr, new) \
1445   __atomic_store_4((ptr), (new), __ATOMIC_RELEASE)
1446 
1447 #endif
1448 
1449 /* Our strategy for the mutex is pretty simple:
1450  *
1451  *  0: not in use
1452  *
1453  *  1: acquired by one thread only, no contention
1454  *
1455  *  > 1: contended
1456  *
1457  *
1458  * As such, attempting to acquire the lock should involve an increment.
1459  * If we find that the previous value was 0 then we can return
1460  * immediately.
1461  *
1462  * On unlock, we always store 0 to indicate that the lock is available.
1463  * If the value there was 1 before then we didn't have contention and
1464  * can return immediately.  If the value was something other than 1 then
1465  * we have the contended case and need to wake a waiter.
1466  *
1467  * If it was not 0 then there is another thread holding it and we must
1468  * wait.  We must always ensure that we mark a value >1 while we are
1469  * waiting in order to instruct the holder to do a wake operation on
1470  * unlock.
1471  */
1472 
1473 void
g_mutex_init(GMutex * mutex)1474 g_mutex_init (GMutex *mutex)
1475 {
1476   mutex->i[0] = 0;
1477 }
1478 
1479 void
g_mutex_clear(GMutex * mutex)1480 g_mutex_clear (GMutex *mutex)
1481 {
1482   if G_UNLIKELY (mutex->i[0] != 0)
1483     {
1484       fprintf (stderr, "g_mutex_clear() called on uninitialised or locked mutex\n");
1485       g_abort ();
1486     }
1487 }
1488 
1489 static void __attribute__((noinline))
g_mutex_lock_slowpath(GMutex * mutex)1490 g_mutex_lock_slowpath (GMutex *mutex)
1491 {
1492   /* Set to 2 to indicate contention.  If it was zero before then we
1493    * just acquired the lock.
1494    *
1495    * Otherwise, sleep for as long as the 2 remains...
1496    */
1497   while (exchange_acquire (&mutex->i[0], 2) != 0)
1498     syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) 2, NULL);
1499 }
1500 
1501 static void __attribute__((noinline))
g_mutex_unlock_slowpath(GMutex * mutex,guint prev)1502 g_mutex_unlock_slowpath (GMutex *mutex,
1503                          guint   prev)
1504 {
1505   /* We seem to get better code for the uncontended case by splitting
1506    * this out...
1507    */
1508   if G_UNLIKELY (prev == 0)
1509     {
1510       fprintf (stderr, "Attempt to unlock mutex that was not locked\n");
1511       g_abort ();
1512     }
1513 
1514   syscall (__NR_futex, &mutex->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1515 }
1516 
1517 void
g_mutex_lock(GMutex * mutex)1518 g_mutex_lock (GMutex *mutex)
1519 {
1520   /* 0 -> 1 and we're done.  Anything else, and we need to wait... */
1521   if G_UNLIKELY (g_atomic_int_add (&mutex->i[0], 1) != 0)
1522     g_mutex_lock_slowpath (mutex);
1523 }
1524 
1525 void
g_mutex_unlock(GMutex * mutex)1526 g_mutex_unlock (GMutex *mutex)
1527 {
1528   guint prev;
1529 
1530   prev = exchange_release (&mutex->i[0], 0);
1531 
1532   /* 1-> 0 and we're done.  Anything else and we need to signal... */
1533   if G_UNLIKELY (prev != 1)
1534     g_mutex_unlock_slowpath (mutex, prev);
1535 }
1536 
1537 gboolean
g_mutex_trylock(GMutex * mutex)1538 g_mutex_trylock (GMutex *mutex)
1539 {
1540   guint zero = 0;
1541 
1542   /* We don't want to touch the value at all unless we can move it from
1543    * exactly 0 to 1.
1544    */
1545   return compare_exchange_acquire (&mutex->i[0], &zero, 1);
1546 }
1547 
1548 /* Condition variables are implemented in a rather simple way as well.
1549  * In many ways, futex() as an abstraction is even more ideally suited
1550  * to condition variables than it is to mutexes.
1551  *
1552  * We store a generation counter.  We sample it with the lock held and
1553  * unlock before sleeping on the futex.
1554  *
1555  * Signalling simply involves increasing the counter and making the
1556  * appropriate futex call.
1557  *
1558  * The only thing that is the slightest bit complicated is timed waits
1559  * because we must convert our absolute time to relative.
1560  */
1561 
1562 void
g_cond_init(GCond * cond)1563 g_cond_init (GCond *cond)
1564 {
1565   cond->i[0] = 0;
1566 }
1567 
1568 void
g_cond_clear(GCond * cond)1569 g_cond_clear (GCond *cond)
1570 {
1571 }
1572 
1573 void
g_cond_wait(GCond * cond,GMutex * mutex)1574 g_cond_wait (GCond  *cond,
1575              GMutex *mutex)
1576 {
1577   guint sampled = (guint) g_atomic_int_get (&cond->i[0]);
1578 
1579   g_mutex_unlock (mutex);
1580   syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, NULL);
1581   g_mutex_lock (mutex);
1582 }
1583 
1584 void
g_cond_signal(GCond * cond)1585 g_cond_signal (GCond *cond)
1586 {
1587   g_atomic_int_inc (&cond->i[0]);
1588 
1589   syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) 1, NULL);
1590 }
1591 
1592 void
g_cond_broadcast(GCond * cond)1593 g_cond_broadcast (GCond *cond)
1594 {
1595   g_atomic_int_inc (&cond->i[0]);
1596 
1597   syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAKE_PRIVATE, (gsize) INT_MAX, NULL);
1598 }
1599 
1600 gboolean
g_cond_wait_until(GCond * cond,GMutex * mutex,gint64 end_time)1601 g_cond_wait_until (GCond  *cond,
1602                    GMutex *mutex,
1603                    gint64  end_time)
1604 {
1605   struct timespec now;
1606   struct timespec span;
1607   guint sampled;
1608   int res;
1609   gboolean success;
1610 
1611   if (end_time < 0)
1612     return FALSE;
1613 
1614   clock_gettime (CLOCK_MONOTONIC, &now);
1615   span.tv_sec = (end_time / 1000000) - now.tv_sec;
1616   span.tv_nsec = ((end_time % 1000000) * 1000) - now.tv_nsec;
1617   if (span.tv_nsec < 0)
1618     {
1619       span.tv_nsec += 1000000000;
1620       span.tv_sec--;
1621     }
1622 
1623   if (span.tv_sec < 0)
1624     return FALSE;
1625 
1626   sampled = cond->i[0];
1627   g_mutex_unlock (mutex);
1628   res = syscall (__NR_futex, &cond->i[0], (gsize) FUTEX_WAIT_PRIVATE, (gsize) sampled, &span);
1629   success = (res < 0 && errno == ETIMEDOUT) ? FALSE : TRUE;
1630   g_mutex_lock (mutex);
1631 
1632   return success;
1633 }
1634 
1635 #endif
1636 
1637   /* {{{1 Epilogue */
1638 /* vim:set foldmethod=marker: */
1639