• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 #include <pthread.h>
26 #include <assert.h>
27 #include <errno.h>
28 
29 #include <sys/time.h>
30 #include <sys/resource.h>  /* getrlimit() */
31 #include <unistd.h>  /* getpagesize() */
32 
33 #include <limits.h>
34 
35 #ifdef __MVS__
36 #include <sys/ipc.h>
37 #include <sys/sem.h>
38 #endif
39 
40 #if defined(__GLIBC__) && !defined(__UCLIBC__)
41 #include <gnu/libc-version.h>  /* gnu_get_libc_version() */
42 #endif
43 
44 #undef NANOSEC
45 #define NANOSEC ((uint64_t) 1e9)
46 
47 #if defined(PTHREAD_BARRIER_SERIAL_THREAD)
48 STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
49 #endif
50 
51 /* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
52 #if defined(_AIX) || \
53     defined(__OpenBSD__) || \
54     !defined(PTHREAD_BARRIER_SERIAL_THREAD)
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)55 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
56   struct _uv_barrier* b;
57   int rc;
58 
59   if (barrier == NULL || count == 0)
60     return UV_EINVAL;
61 
62   b = uv__malloc(sizeof(*b));
63   if (b == NULL)
64     return UV_ENOMEM;
65 
66   b->in = 0;
67   b->out = 0;
68   b->threshold = count;
69 
70   rc = uv_mutex_init(&b->mutex);
71   if (rc != 0)
72     goto error2;
73 
74   rc = uv_cond_init(&b->cond);
75   if (rc != 0)
76     goto error;
77 
78   barrier->b = b;
79   return 0;
80 
81 error:
82   uv_mutex_destroy(&b->mutex);
83 error2:
84   uv__free(b);
85   return rc;
86 }
87 
88 
uv_barrier_wait(uv_barrier_t * barrier)89 int uv_barrier_wait(uv_barrier_t* barrier) {
90   struct _uv_barrier* b;
91   int last;
92 
93   if (barrier == NULL || barrier->b == NULL)
94     return UV_EINVAL;
95 
96   b = barrier->b;
97   uv_mutex_lock(&b->mutex);
98 
99   if (++b->in == b->threshold) {
100     b->in = 0;
101     b->out = b->threshold;
102     uv_cond_signal(&b->cond);
103   } else {
104     do
105       uv_cond_wait(&b->cond, &b->mutex);
106     while (b->in != 0);
107   }
108 
109   last = (--b->out == 0);
110   uv_cond_signal(&b->cond);
111 
112   uv_mutex_unlock(&b->mutex);
113   return last;
114 }
115 
116 
uv_barrier_destroy(uv_barrier_t * barrier)117 void uv_barrier_destroy(uv_barrier_t* barrier) {
118   struct _uv_barrier* b;
119 
120   b = barrier->b;
121   uv_mutex_lock(&b->mutex);
122 
123   assert(b->in == 0);
124   while (b->out != 0)
125     uv_cond_wait(&b->cond, &b->mutex);
126 
127   if (b->in != 0)
128     abort();
129 
130   uv_mutex_unlock(&b->mutex);
131   uv_mutex_destroy(&b->mutex);
132   uv_cond_destroy(&b->cond);
133 
134   uv__free(barrier->b);
135   barrier->b = NULL;
136 }
137 
138 #else
139 
uv_barrier_init(uv_barrier_t * barrier,unsigned int count)140 int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
141   return UV__ERR(pthread_barrier_init(barrier, NULL, count));
142 }
143 
144 
uv_barrier_wait(uv_barrier_t * barrier)145 int uv_barrier_wait(uv_barrier_t* barrier) {
146   int rc;
147 
148   rc = pthread_barrier_wait(barrier);
149   if (rc != 0)
150     if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
151       abort();
152 
153   return rc == PTHREAD_BARRIER_SERIAL_THREAD;
154 }
155 
156 
uv_barrier_destroy(uv_barrier_t * barrier)157 void uv_barrier_destroy(uv_barrier_t* barrier) {
158   if (pthread_barrier_destroy(barrier))
159     abort();
160 }
161 
162 #endif
163 
164 
165 /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
166  * too small to safely receive signals on.
167  *
168  * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
169  * the largest MINSIGSTKSZ of the architectures that musl supports) so
170  * let's use that as a lower bound.
171  *
172  * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
173  * is between 28 and 133 KB when compiling against glibc, depending
174  * on the architecture.
175  */
uv__min_stack_size(void)176 static size_t uv__min_stack_size(void) {
177   static const size_t min = 8192;
178 
179 #ifdef PTHREAD_STACK_MIN  /* Not defined on NetBSD. */
180   if (min < (size_t) PTHREAD_STACK_MIN)
181     return PTHREAD_STACK_MIN;
182 #endif  /* PTHREAD_STACK_MIN */
183 
184   return min;
185 }
186 
187 
188 /* On Linux, threads created by musl have a much smaller stack than threads
189  * created by glibc (80 vs. 2048 or 4096 kB.)  Follow glibc for consistency.
190  */
uv__default_stack_size(void)191 static size_t uv__default_stack_size(void) {
192 #if !defined(__linux__)
193   return 0;
194 #elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
195   return 4 << 20;  /* glibc default. */
196 #else
197   return 2 << 20;  /* glibc default. */
198 #endif
199 }
200 
201 
202 /* On MacOS, threads other than the main thread are created with a reduced
203  * stack size by default.  Adjust to RLIMIT_STACK aligned to the page size.
204  */
uv__thread_stack_size(void)205 size_t uv__thread_stack_size(void) {
206 #if defined(__APPLE__) || defined(__linux__)
207   struct rlimit lim;
208 
209   /* getrlimit() can fail on some aarch64 systems due to a glibc bug where
210    * the system call wrapper invokes the wrong system call. Don't treat
211    * that as fatal, just use the default stack size instead.
212    */
213   if (getrlimit(RLIMIT_STACK, &lim))
214     return uv__default_stack_size();
215 
216   if (lim.rlim_cur == RLIM_INFINITY)
217     return uv__default_stack_size();
218 
219   /* pthread_attr_setstacksize() expects page-aligned values. */
220   lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
221 
222   if (lim.rlim_cur >= (rlim_t) uv__min_stack_size())
223     return lim.rlim_cur;
224 #endif
225 
226   return uv__default_stack_size();
227 }
228 
229 
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)230 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
231   uv_thread_options_t params;
232   params.flags = UV_THREAD_NO_FLAGS;
233   return uv_thread_create_ex(tid, &params, entry, arg);
234 }
235 
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)236 int uv_thread_create_ex(uv_thread_t* tid,
237                         const uv_thread_options_t* params,
238                         void (*entry)(void *arg),
239                         void *arg) {
240   int err;
241   pthread_attr_t* attr;
242   pthread_attr_t attr_storage;
243   size_t pagesize;
244   size_t stack_size;
245   size_t min_stack_size;
246 
247   /* Used to squelch a -Wcast-function-type warning. */
248   union {
249     void (*in)(void*);
250     void* (*out)(void*);
251   } f;
252 
253   stack_size =
254       params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
255 
256   attr = NULL;
257   if (stack_size == 0) {
258     stack_size = uv__thread_stack_size();
259   } else {
260     pagesize = (size_t)getpagesize();
261     /* Round up to the nearest page boundary. */
262     stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
263     min_stack_size = uv__min_stack_size();
264     if (stack_size < min_stack_size)
265       stack_size = min_stack_size;
266   }
267 
268   if (stack_size > 0) {
269     attr = &attr_storage;
270 
271     if (pthread_attr_init(attr))
272       abort();
273 
274     if (pthread_attr_setstacksize(attr, stack_size))
275       abort();
276   }
277 
278   f.in = entry;
279   err = pthread_create(tid, attr, f.out, arg);
280 
281   if (attr != NULL)
282     pthread_attr_destroy(attr);
283 
284   return UV__ERR(err);
285 }
286 
287 
uv_thread_self(void)288 uv_thread_t uv_thread_self(void) {
289   return pthread_self();
290 }
291 
uv_thread_join(uv_thread_t * tid)292 int uv_thread_join(uv_thread_t *tid) {
293   return UV__ERR(pthread_join(*tid, NULL));
294 }
295 
296 
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)297 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
298   return pthread_equal(*t1, *t2);
299 }
300 
301 
uv_mutex_init(uv_mutex_t * mutex)302 int uv_mutex_init(uv_mutex_t* mutex) {
303 #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
304   return UV__ERR(pthread_mutex_init(mutex, NULL));
305 #else
306   pthread_mutexattr_t attr;
307   int err;
308 
309   if (pthread_mutexattr_init(&attr))
310     abort();
311 
312   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
313     abort();
314 
315   err = pthread_mutex_init(mutex, &attr);
316 
317   if (pthread_mutexattr_destroy(&attr))
318     abort();
319 
320   return UV__ERR(err);
321 #endif
322 }
323 
324 
uv_mutex_init_recursive(uv_mutex_t * mutex)325 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
326   pthread_mutexattr_t attr;
327   int err;
328 
329   if (pthread_mutexattr_init(&attr))
330     abort();
331 
332   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
333     abort();
334 
335   err = pthread_mutex_init(mutex, &attr);
336 
337   if (pthread_mutexattr_destroy(&attr))
338     abort();
339 
340   return UV__ERR(err);
341 }
342 
343 
uv_mutex_destroy(uv_mutex_t * mutex)344 void uv_mutex_destroy(uv_mutex_t* mutex) {
345   if (pthread_mutex_destroy(mutex))
346     abort();
347 }
348 
349 
uv_mutex_lock(uv_mutex_t * mutex)350 void uv_mutex_lock(uv_mutex_t* mutex) {
351   if (pthread_mutex_lock(mutex))
352     abort();
353 }
354 
355 
uv_mutex_trylock(uv_mutex_t * mutex)356 int uv_mutex_trylock(uv_mutex_t* mutex) {
357   int err;
358 
359   err = pthread_mutex_trylock(mutex);
360   if (err) {
361     if (err != EBUSY && err != EAGAIN)
362       abort();
363     return UV_EBUSY;
364   }
365 
366   return 0;
367 }
368 
369 
uv_mutex_unlock(uv_mutex_t * mutex)370 void uv_mutex_unlock(uv_mutex_t* mutex) {
371   if (pthread_mutex_unlock(mutex))
372     abort();
373 }
374 
375 
uv_rwlock_init(uv_rwlock_t * rwlock)376 int uv_rwlock_init(uv_rwlock_t* rwlock) {
377   return UV__ERR(pthread_rwlock_init(rwlock, NULL));
378 }
379 
380 
uv_rwlock_destroy(uv_rwlock_t * rwlock)381 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
382   if (pthread_rwlock_destroy(rwlock))
383     abort();
384 }
385 
386 
uv_rwlock_rdlock(uv_rwlock_t * rwlock)387 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
388   if (pthread_rwlock_rdlock(rwlock))
389     abort();
390 }
391 
392 
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)393 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
394   int err;
395 
396   err = pthread_rwlock_tryrdlock(rwlock);
397   if (err) {
398     if (err != EBUSY && err != EAGAIN)
399       abort();
400     return UV_EBUSY;
401   }
402 
403   return 0;
404 }
405 
406 
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)407 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
408   if (pthread_rwlock_unlock(rwlock))
409     abort();
410 }
411 
412 
uv_rwlock_wrlock(uv_rwlock_t * rwlock)413 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
414   if (pthread_rwlock_wrlock(rwlock))
415     abort();
416 }
417 
418 
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)419 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
420   int err;
421 
422   err = pthread_rwlock_trywrlock(rwlock);
423   if (err) {
424     if (err != EBUSY && err != EAGAIN)
425       abort();
426     return UV_EBUSY;
427   }
428 
429   return 0;
430 }
431 
432 
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)433 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
434   if (pthread_rwlock_unlock(rwlock))
435     abort();
436 }
437 
438 
uv_once(uv_once_t * guard,void (* callback)(void))439 void uv_once(uv_once_t* guard, void (*callback)(void)) {
440   if (pthread_once(guard, callback))
441     abort();
442 }
443 
444 #if defined(__APPLE__) && defined(__MACH__)
445 
uv_sem_init(uv_sem_t * sem,unsigned int value)446 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
447   kern_return_t err;
448 
449   err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
450   if (err == KERN_SUCCESS)
451     return 0;
452   if (err == KERN_INVALID_ARGUMENT)
453     return UV_EINVAL;
454   if (err == KERN_RESOURCE_SHORTAGE)
455     return UV_ENOMEM;
456 
457   abort();
458   return UV_EINVAL;  /* Satisfy the compiler. */
459 }
460 
461 
uv_sem_destroy(uv_sem_t * sem)462 void uv_sem_destroy(uv_sem_t* sem) {
463   if (semaphore_destroy(mach_task_self(), *sem))
464     abort();
465 }
466 
467 
uv_sem_post(uv_sem_t * sem)468 void uv_sem_post(uv_sem_t* sem) {
469   if (semaphore_signal(*sem))
470     abort();
471 }
472 
473 
uv_sem_wait(uv_sem_t * sem)474 void uv_sem_wait(uv_sem_t* sem) {
475   int r;
476 
477   do
478     r = semaphore_wait(*sem);
479   while (r == KERN_ABORTED);
480 
481   if (r != KERN_SUCCESS)
482     abort();
483 }
484 
485 
uv_sem_trywait(uv_sem_t * sem)486 int uv_sem_trywait(uv_sem_t* sem) {
487   mach_timespec_t interval;
488   kern_return_t err;
489 
490   interval.tv_sec = 0;
491   interval.tv_nsec = 0;
492 
493   err = semaphore_timedwait(*sem, interval);
494   if (err == KERN_SUCCESS)
495     return 0;
496   if (err == KERN_OPERATION_TIMED_OUT)
497     return UV_EAGAIN;
498 
499   abort();
500   return UV_EINVAL;  /* Satisfy the compiler. */
501 }
502 
503 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
504 
505 #if defined(__GLIBC__) && !defined(__UCLIBC__)
506 
507 /* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
508  * by providing a custom implementation for glibc < 2.21 in terms of other
509  * concurrency primitives.
510  * Refs: https://github.com/nodejs/node/issues/19903 */
511 
512 /* To preserve ABI compatibility, we treat the uv_sem_t as storage for
513  * a pointer to the actual struct we're using underneath. */
514 
515 static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
516 static int platform_needs_custom_semaphore = 0;
517 
glibc_version_check(void)518 static void glibc_version_check(void) {
519   const char* version = gnu_get_libc_version();
520   platform_needs_custom_semaphore =
521       version[0] == '2' && version[1] == '.' &&
522       atoi(version + 2) < 21;
523 }
524 
525 #elif defined(__MVS__)
526 
527 #define platform_needs_custom_semaphore 1
528 
529 #else /* !defined(__GLIBC__) && !defined(__MVS__) */
530 
531 #define platform_needs_custom_semaphore 0
532 
533 #endif
534 
535 typedef struct uv_semaphore_s {
536   uv_mutex_t mutex;
537   uv_cond_t cond;
538   unsigned int value;
539 } uv_semaphore_t;
540 
541 #if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
542     platform_needs_custom_semaphore
543 STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
544 #endif
545 
uv__custom_sem_init(uv_sem_t * sem_,unsigned int value)546 static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
547   int err;
548   uv_semaphore_t* sem;
549 
550   sem = uv__malloc(sizeof(*sem));
551   if (sem == NULL)
552     return UV_ENOMEM;
553 
554   if ((err = uv_mutex_init(&sem->mutex)) != 0) {
555     uv__free(sem);
556     return err;
557   }
558 
559   if ((err = uv_cond_init(&sem->cond)) != 0) {
560     uv_mutex_destroy(&sem->mutex);
561     uv__free(sem);
562     return err;
563   }
564 
565   sem->value = value;
566   *(uv_semaphore_t**)sem_ = sem;
567   return 0;
568 }
569 
570 
uv__custom_sem_destroy(uv_sem_t * sem_)571 static void uv__custom_sem_destroy(uv_sem_t* sem_) {
572   uv_semaphore_t* sem;
573 
574   sem = *(uv_semaphore_t**)sem_;
575   uv_cond_destroy(&sem->cond);
576   uv_mutex_destroy(&sem->mutex);
577   uv__free(sem);
578 }
579 
580 
uv__custom_sem_post(uv_sem_t * sem_)581 static void uv__custom_sem_post(uv_sem_t* sem_) {
582   uv_semaphore_t* sem;
583 
584   sem = *(uv_semaphore_t**)sem_;
585   uv_mutex_lock(&sem->mutex);
586   sem->value++;
587   if (sem->value == 1)
588     uv_cond_signal(&sem->cond);
589   uv_mutex_unlock(&sem->mutex);
590 }
591 
592 
uv__custom_sem_wait(uv_sem_t * sem_)593 static void uv__custom_sem_wait(uv_sem_t* sem_) {
594   uv_semaphore_t* sem;
595 
596   sem = *(uv_semaphore_t**)sem_;
597   uv_mutex_lock(&sem->mutex);
598   while (sem->value == 0)
599     uv_cond_wait(&sem->cond, &sem->mutex);
600   sem->value--;
601   uv_mutex_unlock(&sem->mutex);
602 }
603 
604 
uv__custom_sem_trywait(uv_sem_t * sem_)605 static int uv__custom_sem_trywait(uv_sem_t* sem_) {
606   uv_semaphore_t* sem;
607 
608   sem = *(uv_semaphore_t**)sem_;
609   if (uv_mutex_trylock(&sem->mutex) != 0)
610     return UV_EAGAIN;
611 
612   if (sem->value == 0) {
613     uv_mutex_unlock(&sem->mutex);
614     return UV_EAGAIN;
615   }
616 
617   sem->value--;
618   uv_mutex_unlock(&sem->mutex);
619 
620   return 0;
621 }
622 
uv__sem_init(uv_sem_t * sem,unsigned int value)623 static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
624   if (sem_init(sem, 0, value))
625     return UV__ERR(errno);
626   return 0;
627 }
628 
629 
uv__sem_destroy(uv_sem_t * sem)630 static void uv__sem_destroy(uv_sem_t* sem) {
631   if (sem_destroy(sem))
632     abort();
633 }
634 
635 
uv__sem_post(uv_sem_t * sem)636 static void uv__sem_post(uv_sem_t* sem) {
637   if (sem_post(sem))
638     abort();
639 }
640 
641 
uv__sem_wait(uv_sem_t * sem)642 static void uv__sem_wait(uv_sem_t* sem) {
643   int r;
644 
645   do
646     r = sem_wait(sem);
647   while (r == -1 && errno == EINTR);
648 
649   if (r)
650     abort();
651 }
652 
653 
uv__sem_trywait(uv_sem_t * sem)654 static int uv__sem_trywait(uv_sem_t* sem) {
655   int r;
656 
657   do
658     r = sem_trywait(sem);
659   while (r == -1 && errno == EINTR);
660 
661   if (r) {
662     if (errno == EAGAIN)
663       return UV_EAGAIN;
664     abort();
665   }
666 
667   return 0;
668 }
669 
uv_sem_init(uv_sem_t * sem,unsigned int value)670 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
671 #if defined(__GLIBC__) && !defined(__UCLIBC__)
672   uv_once(&glibc_version_check_once, glibc_version_check);
673 #endif
674 
675   if (platform_needs_custom_semaphore)
676     return uv__custom_sem_init(sem, value);
677   else
678     return uv__sem_init(sem, value);
679 }
680 
681 
uv_sem_destroy(uv_sem_t * sem)682 void uv_sem_destroy(uv_sem_t* sem) {
683   if (platform_needs_custom_semaphore)
684     uv__custom_sem_destroy(sem);
685   else
686     uv__sem_destroy(sem);
687 }
688 
689 
uv_sem_post(uv_sem_t * sem)690 void uv_sem_post(uv_sem_t* sem) {
691   if (platform_needs_custom_semaphore)
692     uv__custom_sem_post(sem);
693   else
694     uv__sem_post(sem);
695 }
696 
697 
uv_sem_wait(uv_sem_t * sem)698 void uv_sem_wait(uv_sem_t* sem) {
699   if (platform_needs_custom_semaphore)
700     uv__custom_sem_wait(sem);
701   else
702     uv__sem_wait(sem);
703 }
704 
705 
uv_sem_trywait(uv_sem_t * sem)706 int uv_sem_trywait(uv_sem_t* sem) {
707   if (platform_needs_custom_semaphore)
708     return uv__custom_sem_trywait(sem);
709   else
710     return uv__sem_trywait(sem);
711 }
712 
713 #endif /* defined(__APPLE__) && defined(__MACH__) */
714 
715 
716 #if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
717 
uv_cond_init(uv_cond_t * cond)718 int uv_cond_init(uv_cond_t* cond) {
719   return UV__ERR(pthread_cond_init(cond, NULL));
720 }
721 
722 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
723 
uv_cond_init(uv_cond_t * cond)724 int uv_cond_init(uv_cond_t* cond) {
725   pthread_condattr_t attr;
726   int err;
727 
728   err = pthread_condattr_init(&attr);
729   if (err)
730     return UV__ERR(err);
731 
732   err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
733   if (err)
734     goto error2;
735 
736   err = pthread_cond_init(cond, &attr);
737   if (err)
738     goto error2;
739 
740   err = pthread_condattr_destroy(&attr);
741   if (err)
742     goto error;
743 
744   return 0;
745 
746 error:
747   pthread_cond_destroy(cond);
748 error2:
749   pthread_condattr_destroy(&attr);
750   return UV__ERR(err);
751 }
752 
753 #endif /* defined(__APPLE__) && defined(__MACH__) */
754 
uv_cond_destroy(uv_cond_t * cond)755 void uv_cond_destroy(uv_cond_t* cond) {
756 #if defined(__APPLE__) && defined(__MACH__)
757   /* It has been reported that destroying condition variables that have been
758    * signalled but not waited on can sometimes result in application crashes.
759    * See https://codereview.chromium.org/1323293005.
760    */
761   pthread_mutex_t mutex;
762   struct timespec ts;
763   int err;
764 
765   if (pthread_mutex_init(&mutex, NULL))
766     abort();
767 
768   if (pthread_mutex_lock(&mutex))
769     abort();
770 
771   ts.tv_sec = 0;
772   ts.tv_nsec = 1;
773 
774   err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
775   if (err != 0 && err != ETIMEDOUT)
776     abort();
777 
778   if (pthread_mutex_unlock(&mutex))
779     abort();
780 
781   if (pthread_mutex_destroy(&mutex))
782     abort();
783 #endif /* defined(__APPLE__) && defined(__MACH__) */
784 
785   if (pthread_cond_destroy(cond))
786     abort();
787 }
788 
uv_cond_signal(uv_cond_t * cond)789 void uv_cond_signal(uv_cond_t* cond) {
790   if (pthread_cond_signal(cond))
791     abort();
792 }
793 
uv_cond_broadcast(uv_cond_t * cond)794 void uv_cond_broadcast(uv_cond_t* cond) {
795   if (pthread_cond_broadcast(cond))
796     abort();
797 }
798 
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)799 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
800   if (pthread_cond_wait(cond, mutex))
801     abort();
802 }
803 
804 
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)805 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
806   int r;
807   struct timespec ts;
808 #if defined(__MVS__)
809   struct timeval tv;
810 #endif
811 
812 #if defined(__APPLE__) && defined(__MACH__)
813   ts.tv_sec = timeout / NANOSEC;
814   ts.tv_nsec = timeout % NANOSEC;
815   r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
816 #else
817 #if defined(__MVS__)
818   if (gettimeofday(&tv, NULL))
819     abort();
820   timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
821 #else
822   timeout += uv__hrtime(UV_CLOCK_PRECISE);
823 #endif
824   ts.tv_sec = timeout / NANOSEC;
825   ts.tv_nsec = timeout % NANOSEC;
826   r = pthread_cond_timedwait(cond, mutex, &ts);
827 #endif
828 
829 
830   if (r == 0)
831     return 0;
832 
833   if (r == ETIMEDOUT)
834     return UV_ETIMEDOUT;
835 
836   abort();
837 #ifndef __SUNPRO_C
838   return UV_EINVAL;  /* Satisfy the compiler. */
839 #endif
840 }
841 
842 
uv_key_create(uv_key_t * key)843 int uv_key_create(uv_key_t* key) {
844   return UV__ERR(pthread_key_create(key, NULL));
845 }
846 
847 
uv_key_delete(uv_key_t * key)848 void uv_key_delete(uv_key_t* key) {
849   if (pthread_key_delete(*key))
850     abort();
851 }
852 
853 
uv_key_get(uv_key_t * key)854 void* uv_key_get(uv_key_t* key) {
855   return pthread_getspecific(*key);
856 }
857 
858 
uv_key_set(uv_key_t * key,void * value)859 void uv_key_set(uv_key_t* key, void* value) {
860   if (pthread_setspecific(*key, value))
861     abort();
862 }
863