• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include <assert.h>
23 #include <limits.h>
24 #include <stdlib.h>
25 
26 #if defined(__MINGW64_VERSION_MAJOR)
27 /* MemoryBarrier expands to __mm_mfence in some cases (x86+sse2), which may
28  * require this header in some versions of mingw64. */
29 #include <intrin.h>
30 #endif
31 
32 #include "uv.h"
33 #include "internal.h"
34 
uv__once_inner(uv_once_t * guard,void (* callback)(void))35 static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) {
36   DWORD result;
37   HANDLE existing_event, created_event;
38 
39   created_event = CreateEvent(NULL, 1, 0, NULL);
40   if (created_event == 0) {
41     /* Could fail in a low-memory situation? */
42     uv_fatal_error(GetLastError(), "CreateEvent");
43   }
44 
45   existing_event = InterlockedCompareExchangePointer(&guard->event,
46                                                      created_event,
47                                                      NULL);
48 
49   if (existing_event == NULL) {
50     /* We won the race */
51     callback();
52 
53     result = SetEvent(created_event);
54     assert(result);
55     guard->ran = 1;
56 
57   } else {
58     /* We lost the race. Destroy the event we created and wait for the existing
59      * one to become signaled. */
60     CloseHandle(created_event);
61     result = WaitForSingleObject(existing_event, INFINITE);
62     assert(result == WAIT_OBJECT_0);
63   }
64 }
65 
66 
uv_once(uv_once_t * guard,void (* callback)(void))67 void uv_once(uv_once_t* guard, void (*callback)(void)) {
68   /* Fast case - avoid WaitForSingleObject. */
69   if (guard->ran) {
70     return;
71   }
72 
73   uv__once_inner(guard, callback);
74 }
75 
76 
77 /* Verify that uv_thread_t can be stored in a TLS slot. */
78 STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));
79 
80 static uv_key_t uv__current_thread_key;
81 static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;
82 
83 
uv__init_current_thread_key(void)84 static void uv__init_current_thread_key(void) {
85   if (uv_key_create(&uv__current_thread_key))
86     abort();
87 }
88 
89 
90 struct thread_ctx {
91   void (*entry)(void* arg);
92   void* arg;
93   uv_thread_t self;
94 };
95 
96 
uv__thread_start(void * arg)97 static UINT __stdcall uv__thread_start(void* arg) {
98   struct thread_ctx *ctx_p;
99   struct thread_ctx ctx;
100 
101   ctx_p = arg;
102   ctx = *ctx_p;
103   uv__free(ctx_p);
104 
105   uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
106   uv_key_set(&uv__current_thread_key, ctx.self);
107 
108   ctx.entry(ctx.arg);
109 
110   return 0;
111 }
112 
113 
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)114 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
115   uv_thread_options_t params;
116   params.flags = UV_THREAD_NO_FLAGS;
117   return uv_thread_create_ex(tid, &params, entry, arg);
118 }
119 
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)120 int uv_thread_create_ex(uv_thread_t* tid,
121                         const uv_thread_options_t* params,
122                         void (*entry)(void *arg),
123                         void *arg) {
124   struct thread_ctx* ctx;
125   int err;
126   HANDLE thread;
127   SYSTEM_INFO sysinfo;
128   size_t stack_size;
129   size_t pagesize;
130 
131   stack_size =
132       params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
133 
134   if (stack_size != 0) {
135     GetNativeSystemInfo(&sysinfo);
136     pagesize = (size_t)sysinfo.dwPageSize;
137     /* Round up to the nearest page boundary. */
138     stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
139 
140     if ((unsigned)stack_size != stack_size)
141       return UV_EINVAL;
142   }
143 
144   ctx = uv__malloc(sizeof(*ctx));
145   if (ctx == NULL)
146     return UV_ENOMEM;
147 
148   ctx->entry = entry;
149   ctx->arg = arg;
150 
151   /* Create the thread in suspended state so we have a chance to pass
152    * its own creation handle to it */
153   thread = (HANDLE) _beginthreadex(NULL,
154                                    (unsigned)stack_size,
155                                    uv__thread_start,
156                                    ctx,
157                                    CREATE_SUSPENDED,
158                                    NULL);
159   if (thread == NULL) {
160     err = errno;
161     uv__free(ctx);
162   } else {
163     err = 0;
164     *tid = thread;
165     ctx->self = thread;
166     ResumeThread(thread);
167   }
168 
169   switch (err) {
170     case 0:
171       return 0;
172     case EACCES:
173       return UV_EACCES;
174     case EAGAIN:
175       return UV_EAGAIN;
176     case EINVAL:
177       return UV_EINVAL;
178   }
179 
180   return UV_EIO;
181 }
182 
uv_thread_setaffinity(uv_thread_t * tid,char * cpumask,char * oldmask,size_t mask_size)183 int uv_thread_setaffinity(uv_thread_t* tid,
184                           char* cpumask,
185                           char* oldmask,
186                           size_t mask_size) {
187   int i;
188   HANDLE hproc;
189   DWORD_PTR procmask;
190   DWORD_PTR sysmask;
191   DWORD_PTR threadmask;
192   DWORD_PTR oldthreadmask;
193   int cpumasksize;
194 
195   cpumasksize = uv_cpumask_size();
196   assert(cpumasksize > 0);
197   if (mask_size < (size_t)cpumasksize)
198     return UV_EINVAL;
199 
200   hproc = GetCurrentProcess();
201   if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
202     return uv_translate_sys_error(GetLastError());
203 
204   threadmask = 0;
205   for (i = 0; i < cpumasksize; i++) {
206     if (cpumask[i]) {
207       if (procmask & (1 << i))
208         threadmask |= 1 << i;
209       else
210         return UV_EINVAL;
211     }
212   }
213 
214   oldthreadmask = SetThreadAffinityMask(*tid, threadmask);
215   if (oldthreadmask == 0)
216     return uv_translate_sys_error(GetLastError());
217 
218   if (oldmask != NULL) {
219     for (i = 0; i < cpumasksize; i++)
220       oldmask[i] = (oldthreadmask >> i) & 1;
221   }
222 
223   return 0;
224 }
225 
uv_thread_getaffinity(uv_thread_t * tid,char * cpumask,size_t mask_size)226 int uv_thread_getaffinity(uv_thread_t* tid,
227                           char* cpumask,
228                           size_t mask_size) {
229   int i;
230   HANDLE hproc;
231   DWORD_PTR procmask;
232   DWORD_PTR sysmask;
233   DWORD_PTR threadmask;
234   int cpumasksize;
235 
236   cpumasksize = uv_cpumask_size();
237   assert(cpumasksize > 0);
238   if (mask_size < (size_t)cpumasksize)
239     return UV_EINVAL;
240 
241   hproc = GetCurrentProcess();
242   if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
243     return uv_translate_sys_error(GetLastError());
244 
245   threadmask = SetThreadAffinityMask(*tid, procmask);
246   if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0)
247     return uv_translate_sys_error(GetLastError());
248 
249   for (i = 0; i < cpumasksize; i++)
250     cpumask[i] = (threadmask >> i) & 1;
251 
252   return 0;
253 }
254 
uv_thread_getcpu(void)255 int uv_thread_getcpu(void) {
256   return GetCurrentProcessorNumber();
257 }
258 
uv_thread_self(void)259 uv_thread_t uv_thread_self(void) {
260   uv_thread_t key;
261   uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
262   key = uv_key_get(&uv__current_thread_key);
263   if (key == NULL) {
264       /* If the thread wasn't started by uv_thread_create (such as the main
265        * thread), we assign an id to it now. */
266       if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
267                            GetCurrentProcess(), &key, 0,
268                            FALSE, DUPLICATE_SAME_ACCESS)) {
269           uv_fatal_error(GetLastError(), "DuplicateHandle");
270       }
271       uv_key_set(&uv__current_thread_key, key);
272   }
273   return key;
274 }
275 
276 
uv_thread_join(uv_thread_t * tid)277 int uv_thread_join(uv_thread_t *tid) {
278   if (WaitForSingleObject(*tid, INFINITE))
279     return uv_translate_sys_error(GetLastError());
280   else {
281     CloseHandle(*tid);
282     *tid = 0;
283     MemoryBarrier();  /* For feature parity with pthread_join(). */
284     return 0;
285   }
286 }
287 
288 
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)289 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
290   return *t1 == *t2;
291 }
292 
293 
uv_mutex_init(uv_mutex_t * mutex)294 int uv_mutex_init(uv_mutex_t* mutex) {
295   InitializeCriticalSection(mutex);
296   return 0;
297 }
298 
299 
uv_mutex_init_recursive(uv_mutex_t * mutex)300 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
301   return uv_mutex_init(mutex);
302 }
303 
304 
uv_mutex_destroy(uv_mutex_t * mutex)305 void uv_mutex_destroy(uv_mutex_t* mutex) {
306   DeleteCriticalSection(mutex);
307 }
308 
309 
uv_mutex_lock(uv_mutex_t * mutex)310 void uv_mutex_lock(uv_mutex_t* mutex) {
311   EnterCriticalSection(mutex);
312 }
313 
314 
uv_mutex_trylock(uv_mutex_t * mutex)315 int uv_mutex_trylock(uv_mutex_t* mutex) {
316   if (TryEnterCriticalSection(mutex))
317     return 0;
318   else
319     return UV_EBUSY;
320 }
321 
322 
uv_mutex_unlock(uv_mutex_t * mutex)323 void uv_mutex_unlock(uv_mutex_t* mutex) {
324   LeaveCriticalSection(mutex);
325 }
326 
327 /* Ensure that the ABI for this type remains stable in v1.x */
328 #ifdef _WIN64
329 STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);
330 #else
331 STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);
332 #endif
333 
uv_rwlock_init(uv_rwlock_t * rwlock)334 int uv_rwlock_init(uv_rwlock_t* rwlock) {
335   memset(rwlock, 0, sizeof(*rwlock));
336   InitializeSRWLock(&rwlock->read_write_lock_);
337 
338   return 0;
339 }
340 
341 
uv_rwlock_destroy(uv_rwlock_t * rwlock)342 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
343   /* SRWLock does not need explicit destruction so long as there are no waiting threads
344      See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
345 }
346 
347 
uv_rwlock_rdlock(uv_rwlock_t * rwlock)348 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
349   AcquireSRWLockShared(&rwlock->read_write_lock_);
350 }
351 
352 
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)353 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
354   if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))
355     return UV_EBUSY;
356 
357   return 0;
358 }
359 
360 
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)361 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
362   ReleaseSRWLockShared(&rwlock->read_write_lock_);
363 }
364 
365 
uv_rwlock_wrlock(uv_rwlock_t * rwlock)366 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
367   AcquireSRWLockExclusive(&rwlock->read_write_lock_);
368 }
369 
370 
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)371 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
372   if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))
373     return UV_EBUSY;
374 
375   return 0;
376 }
377 
378 
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)379 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
380   ReleaseSRWLockExclusive(&rwlock->read_write_lock_);
381 }
382 
383 
uv_sem_init(uv_sem_t * sem,unsigned int value)384 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
385   *sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
386   if (*sem == NULL)
387     return uv_translate_sys_error(GetLastError());
388   else
389     return 0;
390 }
391 
392 
uv_sem_destroy(uv_sem_t * sem)393 void uv_sem_destroy(uv_sem_t* sem) {
394   if (!CloseHandle(*sem))
395     abort();
396 }
397 
398 
uv_sem_post(uv_sem_t * sem)399 void uv_sem_post(uv_sem_t* sem) {
400   if (!ReleaseSemaphore(*sem, 1, NULL))
401     abort();
402 }
403 
404 
uv_sem_wait(uv_sem_t * sem)405 void uv_sem_wait(uv_sem_t* sem) {
406   if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
407     abort();
408 }
409 
410 
uv_sem_trywait(uv_sem_t * sem)411 int uv_sem_trywait(uv_sem_t* sem) {
412   DWORD r = WaitForSingleObject(*sem, 0);
413 
414   if (r == WAIT_OBJECT_0)
415     return 0;
416 
417   if (r == WAIT_TIMEOUT)
418     return UV_EAGAIN;
419 
420   abort();
421   return -1; /* Satisfy the compiler. */
422 }
423 
424 
uv_cond_init(uv_cond_t * cond)425 int uv_cond_init(uv_cond_t* cond) {
426   InitializeConditionVariable(&cond->cond_var);
427   return 0;
428 }
429 
430 
uv_cond_destroy(uv_cond_t * cond)431 void uv_cond_destroy(uv_cond_t* cond) {
432   /* nothing to do */
433   (void) &cond;
434 }
435 
436 
uv_cond_signal(uv_cond_t * cond)437 void uv_cond_signal(uv_cond_t* cond) {
438   WakeConditionVariable(&cond->cond_var);
439 }
440 
441 
uv_cond_broadcast(uv_cond_t * cond)442 void uv_cond_broadcast(uv_cond_t* cond) {
443   WakeAllConditionVariable(&cond->cond_var);
444 }
445 
446 
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)447 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
448   if (!SleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
449     abort();
450 }
451 
452 
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)453 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
454   if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
455     return 0;
456   if (GetLastError() != ERROR_TIMEOUT)
457     abort();
458   return UV_ETIMEDOUT;
459 }
460 
461 
uv_key_create(uv_key_t * key)462 int uv_key_create(uv_key_t* key) {
463   key->tls_index = TlsAlloc();
464   if (key->tls_index == TLS_OUT_OF_INDEXES)
465     return UV_ENOMEM;
466   return 0;
467 }
468 
469 
uv_key_delete(uv_key_t * key)470 void uv_key_delete(uv_key_t* key) {
471   if (TlsFree(key->tls_index) == FALSE)
472     abort();
473   key->tls_index = TLS_OUT_OF_INDEXES;
474 }
475 
476 
uv_key_get(uv_key_t * key)477 void* uv_key_get(uv_key_t* key) {
478   void* value;
479 
480   value = TlsGetValue(key->tls_index);
481   if (value == NULL)
482     if (GetLastError() != ERROR_SUCCESS)
483       abort();
484 
485   return value;
486 }
487 
488 
uv_key_set(uv_key_t * key,void * value)489 void uv_key_set(uv_key_t* key, void* value) {
490   if (TlsSetValue(key->tls_index, value) == FALSE)
491     abort();
492 }
493