• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include <assert.h>
23 #include <errno.h>
24 #include <limits.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)
29 #include <crtdbg.h>
30 #endif
31 
32 #include "uv.h"
33 #include "internal.h"
34 #include "queue.h"
35 #include "handle-inl.h"
36 #include "heap-inl.h"
37 #include "req-inl.h"
38 
39 /* uv_once initialization guards */
40 static uv_once_t uv_init_guard_ = UV_ONCE_INIT;
41 
42 
43 #if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
44 /* Our crt debug report handler allows us to temporarily disable asserts
45  * just for the current thread.
46  */
47 
48 UV_THREAD_LOCAL int uv__crt_assert_enabled = TRUE;
49 
uv__crt_dbg_report_handler(int report_type,char * message,int * ret_val)50 static int uv__crt_dbg_report_handler(int report_type, char *message, int *ret_val) {
51   if (uv__crt_assert_enabled || report_type != _CRT_ASSERT)
52     return FALSE;
53 
54   if (ret_val) {
55     /* Set ret_val to 0 to continue with normal execution.
56      * Set ret_val to 1 to trigger a breakpoint.
57     */
58 
59     if(IsDebuggerPresent())
60       *ret_val = 1;
61     else
62       *ret_val = 0;
63   }
64 
65   /* Don't call _CrtDbgReport. */
66   return TRUE;
67 }
68 #else
69 UV_THREAD_LOCAL int uv__crt_assert_enabled = FALSE;
70 #endif
71 
72 
73 #if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
uv__crt_invalid_parameter_handler(const wchar_t * expression,const wchar_t * function,const wchar_t * file,unsigned int line,uintptr_t reserved)74 static void uv__crt_invalid_parameter_handler(const wchar_t* expression,
75     const wchar_t* function, const wchar_t * file, unsigned int line,
76     uintptr_t reserved) {
77   /* No-op. */
78 }
79 #endif
80 
81 static uv_loop_t** uv__loops;
82 static int uv__loops_size;
83 static int uv__loops_capacity;
84 #define UV__LOOPS_CHUNK_SIZE 8
85 static uv_mutex_t uv__loops_lock;
86 
87 
uv__loops_init(void)88 static void uv__loops_init(void) {
89   uv_mutex_init(&uv__loops_lock);
90 }
91 
92 
uv__loops_add(uv_loop_t * loop)93 static int uv__loops_add(uv_loop_t* loop) {
94   uv_loop_t** new_loops;
95   int new_capacity, i;
96 
97   uv_mutex_lock(&uv__loops_lock);
98 
99   if (uv__loops_size == uv__loops_capacity) {
100     new_capacity = uv__loops_capacity + UV__LOOPS_CHUNK_SIZE;
101     new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * new_capacity);
102     if (!new_loops)
103       goto failed_loops_realloc;
104     uv__loops = new_loops;
105     for (i = uv__loops_capacity; i < new_capacity; ++i)
106       uv__loops[i] = NULL;
107     uv__loops_capacity = new_capacity;
108   }
109   uv__loops[uv__loops_size] = loop;
110   ++uv__loops_size;
111 
112   uv_mutex_unlock(&uv__loops_lock);
113   return 0;
114 
115 failed_loops_realloc:
116   uv_mutex_unlock(&uv__loops_lock);
117   return ERROR_OUTOFMEMORY;
118 }
119 
120 
uv__loops_remove(uv_loop_t * loop)121 static void uv__loops_remove(uv_loop_t* loop) {
122   int loop_index;
123   int smaller_capacity;
124   uv_loop_t** new_loops;
125 
126   uv_mutex_lock(&uv__loops_lock);
127 
128   for (loop_index = 0; loop_index < uv__loops_size; ++loop_index) {
129     if (uv__loops[loop_index] == loop)
130       break;
131   }
132   /* If loop was not found, ignore */
133   if (loop_index == uv__loops_size)
134     goto loop_removed;
135 
136   uv__loops[loop_index] = uv__loops[uv__loops_size - 1];
137   uv__loops[uv__loops_size - 1] = NULL;
138   --uv__loops_size;
139 
140   if (uv__loops_size == 0) {
141     uv__loops_capacity = 0;
142     uv__free(uv__loops);
143     uv__loops = NULL;
144     goto loop_removed;
145   }
146 
147   /* If we didn't grow to big skip downsizing */
148   if (uv__loops_capacity < 4 * UV__LOOPS_CHUNK_SIZE)
149     goto loop_removed;
150 
151   /* Downsize only if more than half of buffer is free */
152   smaller_capacity = uv__loops_capacity / 2;
153   if (uv__loops_size >= smaller_capacity)
154     goto loop_removed;
155   new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * smaller_capacity);
156   if (!new_loops)
157     goto loop_removed;
158   uv__loops = new_loops;
159   uv__loops_capacity = smaller_capacity;
160 
161 loop_removed:
162   uv_mutex_unlock(&uv__loops_lock);
163 }
164 
uv__wake_all_loops(void)165 void uv__wake_all_loops(void) {
166   int i;
167   uv_loop_t* loop;
168 
169   uv_mutex_lock(&uv__loops_lock);
170   for (i = 0; i < uv__loops_size; ++i) {
171     loop = uv__loops[i];
172     assert(loop);
173     if (loop->iocp != INVALID_HANDLE_VALUE)
174       PostQueuedCompletionStatus(loop->iocp, 0, 0, NULL);
175   }
176   uv_mutex_unlock(&uv__loops_lock);
177 }
178 
uv__init(void)179 static void uv__init(void) {
180   /* Tell Windows that we will handle critical errors. */
181   SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
182                SEM_NOOPENFILEERRORBOX);
183 
184   /* Tell the CRT to not exit the application when an invalid parameter is
185    * passed. The main issue is that invalid FDs will trigger this behavior.
186    */
187 #if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
188   _set_invalid_parameter_handler(uv__crt_invalid_parameter_handler);
189 #endif
190 
191   /* We also need to setup our debug report handler because some CRT
192    * functions (eg _get_osfhandle) raise an assert when called with invalid
193    * FDs even though they return the proper error code in the release build.
194    */
195 #if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
196   _CrtSetReportHook(uv__crt_dbg_report_handler);
197 #endif
198 
199   /* Initialize tracking of all uv loops */
200   uv__loops_init();
201 
202   /* Fetch winapi function pointers. This must be done first because other
203    * initialization code might need these function pointers to be loaded.
204    */
205   uv__winapi_init();
206 
207   /* Initialize winsock */
208   uv__winsock_init();
209 
210   /* Initialize FS */
211   uv__fs_init();
212 
213   /* Initialize signal stuff */
214   uv__signals_init();
215 
216   /* Initialize console */
217   uv__console_init();
218 
219   /* Initialize utilities */
220   uv__util_init();
221 
222   /* Initialize system wakeup detection */
223   uv__init_detect_system_wakeup();
224 }
225 
226 
uv_loop_init(uv_loop_t * loop)227 int uv_loop_init(uv_loop_t* loop) {
228   uv__loop_internal_fields_t* lfields;
229   struct heap* timer_heap;
230   int err;
231 
232   /* Initialize libuv itself first */
233   uv__once_init();
234 
235   /* Create an I/O completion port */
236   loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
237   if (loop->iocp == NULL)
238     return uv_translate_sys_error(GetLastError());
239 
240   lfields = (uv__loop_internal_fields_t*) uv__calloc(1, sizeof(*lfields));
241   if (lfields == NULL)
242     return UV_ENOMEM;
243   loop->internal_fields = lfields;
244 
245   err = uv_mutex_init(&lfields->loop_metrics.lock);
246   if (err)
247     goto fail_metrics_mutex_init;
248   memset(&lfields->loop_metrics.metrics,
249          0,
250          sizeof(lfields->loop_metrics.metrics));
251 
252   /* To prevent uninitialized memory access, loop->time must be initialized
253    * to zero before calling uv_update_time for the first time.
254    */
255   loop->time = 0;
256   uv_update_time(loop);
257 
258   uv__queue_init(&loop->wq);
259   uv__queue_init(&loop->handle_queue);
260   loop->active_reqs.count = 0;
261   loop->active_handles = 0;
262 
263   loop->pending_reqs_tail = NULL;
264 
265   loop->endgame_handles = NULL;
266 
267   loop->timer_heap = timer_heap = uv__malloc(sizeof(*timer_heap));
268   if (timer_heap == NULL) {
269     err = UV_ENOMEM;
270     goto fail_timers_alloc;
271   }
272 
273   heap_init(timer_heap);
274 
275   loop->check_handles = NULL;
276   loop->prepare_handles = NULL;
277   loop->idle_handles = NULL;
278 
279   loop->next_prepare_handle = NULL;
280   loop->next_check_handle = NULL;
281   loop->next_idle_handle = NULL;
282 
283   memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
284 
285   loop->timer_counter = 0;
286   loop->stop_flag = 0;
287 
288   err = uv_mutex_init(&loop->wq_mutex);
289   if (err)
290     goto fail_mutex_init;
291 
292   err = uv_async_init(loop, &loop->wq_async, uv__work_done);
293   if (err)
294     goto fail_async_init;
295 
296   uv__handle_unref(&loop->wq_async);
297   loop->wq_async.flags |= UV_HANDLE_INTERNAL;
298 
299   err = uv__loops_add(loop);
300   if (err)
301     goto fail_async_init;
302 
303   loop->magic = UV_LOOP_MAGIC;
304   return 0;
305 
306 fail_async_init:
307   uv_mutex_destroy(&loop->wq_mutex);
308 
309 fail_mutex_init:
310   uv__free(timer_heap);
311   loop->timer_heap = NULL;
312 
313 fail_timers_alloc:
314   uv_mutex_destroy(&lfields->loop_metrics.lock);
315 
316 fail_metrics_mutex_init:
317   uv__free(lfields);
318   loop->internal_fields = NULL;
319   CloseHandle(loop->iocp);
320   loop->iocp = INVALID_HANDLE_VALUE;
321 
322   return err;
323 }
324 
325 
uv_update_time(uv_loop_t * loop)326 void uv_update_time(uv_loop_t* loop) {
327   uint64_t new_time = uv__hrtime(1000);
328   assert(new_time >= loop->time);
329   loop->time = new_time;
330 }
331 
332 
uv__once_init(void)333 void uv__once_init(void) {
334   uv_once(&uv_init_guard_, uv__init);
335 }
336 
337 
uv__loop_close(uv_loop_t * loop)338 void uv__loop_close(uv_loop_t* loop) {
339   uv__loop_internal_fields_t* lfields;
340   size_t i;
341 
342   uv__loops_remove(loop);
343 
344   /* Close the async handle without needing an extra loop iteration.
345    * We might have a pending message, but we're just going to destroy the IOCP
346    * soon, so we can just discard it now without the usual risk of a getting
347    * another notification from GetQueuedCompletionStatusEx after calling the
348    * close_cb (which we also skip defining). We'll assert later that queue was
349    * actually empty and all reqs handled. */
350   loop->wq_async.async_sent = 0;
351   loop->wq_async.close_cb = NULL;
352   uv__handle_closing(&loop->wq_async);
353   uv__handle_close(&loop->wq_async);
354 
355   for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) {
356     SOCKET sock = loop->poll_peer_sockets[i];
357     if (sock != 0 && sock != INVALID_SOCKET)
358       closesocket(sock);
359   }
360 
361   uv_mutex_lock(&loop->wq_mutex);
362   assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
363   assert(!uv__has_active_reqs(loop));
364   uv_mutex_unlock(&loop->wq_mutex);
365   uv_mutex_destroy(&loop->wq_mutex);
366 
367   uv__free(loop->timer_heap);
368   loop->timer_heap = NULL;
369 
370   lfields = uv__get_internal_fields(loop);
371   uv_mutex_destroy(&lfields->loop_metrics.lock);
372   uv__free(lfields);
373   loop->internal_fields = NULL;
374 
375   CloseHandle(loop->iocp);
376   loop->magic = ~UV_LOOP_MAGIC;
377 }
378 
379 
uv__loop_configure(uv_loop_t * loop,uv_loop_option option,va_list ap)380 int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
381   uv__loop_internal_fields_t* lfields;
382 
383   lfields = uv__get_internal_fields(loop);
384   if (option == UV_METRICS_IDLE_TIME) {
385     lfields->flags |= UV_METRICS_IDLE_TIME;
386     return 0;
387   }
388 
389   return UV_ENOSYS;
390 }
391 
392 
uv_backend_fd(const uv_loop_t * loop)393 int uv_backend_fd(const uv_loop_t* loop) {
394   return -1;
395 }
396 
397 
uv_loop_fork(uv_loop_t * loop)398 int uv_loop_fork(uv_loop_t* loop) {
399   return UV_ENOSYS;
400 }
401 
402 
uv__loop_alive(const uv_loop_t * loop)403 static int uv__loop_alive(const uv_loop_t* loop) {
404   return uv__has_active_handles(loop) ||
405          uv__has_active_reqs(loop) ||
406          loop->pending_reqs_tail != NULL ||
407          loop->endgame_handles != NULL;
408 }
409 
410 
uv_loop_alive(const uv_loop_t * loop)411 int uv_loop_alive(const uv_loop_t* loop) {
412   return uv__loop_alive(loop);
413 }
414 
415 
uv_backend_timeout(const uv_loop_t * loop)416 int uv_backend_timeout(const uv_loop_t* loop) {
417   if (loop->stop_flag == 0 &&
418       /* uv__loop_alive(loop) && */
419       (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
420       loop->pending_reqs_tail == NULL &&
421       loop->idle_handles == NULL &&
422       loop->endgame_handles == NULL)
423     return uv__next_timeout(loop);
424   return 0;
425 }
426 
427 
uv__poll_wine(uv_loop_t * loop,DWORD timeout)428 static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
429   uv__loop_internal_fields_t* lfields;
430   DWORD bytes;
431   ULONG_PTR key;
432   OVERLAPPED* overlapped;
433   uv_req_t* req;
434   int repeat;
435   uint64_t timeout_time;
436   uint64_t user_timeout;
437   int reset_timeout;
438 
439   lfields = uv__get_internal_fields(loop);
440   timeout_time = loop->time + timeout;
441 
442   if (lfields->flags & UV_METRICS_IDLE_TIME) {
443     reset_timeout = 1;
444     user_timeout = timeout;
445     timeout = 0;
446   } else {
447     reset_timeout = 0;
448   }
449 
450   for (repeat = 0; ; repeat++) {
451     /* Only need to set the provider_entry_time if timeout != 0. The function
452      * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
453      */
454     if (timeout != 0)
455       uv__metrics_set_provider_entry_time(loop);
456 
457     /* Store the current timeout in a location that's globally accessible so
458      * other locations like uv__work_done() can determine whether the queue
459      * of events in the callback were waiting when poll was called.
460      */
461     lfields->current_timeout = timeout;
462 
463     GetQueuedCompletionStatus(loop->iocp,
464                               &bytes,
465                               &key,
466                               &overlapped,
467                               timeout);
468 
469     if (reset_timeout != 0) {
470       if (overlapped && timeout == 0)
471         uv__metrics_inc_events_waiting(loop, 1);
472       timeout = user_timeout;
473       reset_timeout = 0;
474     }
475 
476     /* Placed here because on success the loop will break whether there is an
477      * empty package or not, or if GetQueuedCompletionStatus returned early then
478      * the timeout will be updated and the loop will run again. In either case
479      * the idle time will need to be updated.
480      */
481     uv__metrics_update_idle_time(loop);
482 
483     if (overlapped) {
484       uv__metrics_inc_events(loop, 1);
485 
486       /* Package was dequeued */
487       req = uv__overlapped_to_req(overlapped);
488       uv__insert_pending_req(loop, req);
489 
490       /* Some time might have passed waiting for I/O,
491        * so update the loop time here.
492        */
493       uv_update_time(loop);
494     } else if (GetLastError() != WAIT_TIMEOUT) {
495       /* Serious error */
496       uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
497     } else if (timeout > 0) {
498       /* GetQueuedCompletionStatus can occasionally return a little early.
499        * Make sure that the desired timeout target time is reached.
500        */
501       uv_update_time(loop);
502       if (timeout_time > loop->time) {
503         timeout = (DWORD)(timeout_time - loop->time);
504         /* The first call to GetQueuedCompletionStatus should return very
505          * close to the target time and the second should reach it, but
506          * this is not stated in the documentation. To make sure a busy
507          * loop cannot happen, the timeout is increased exponentially
508          * starting on the third round.
509          */
510         timeout += repeat ? (1 << (repeat - 1)) : 0;
511         continue;
512       }
513     }
514     break;
515   }
516 }
517 
518 
uv__poll(uv_loop_t * loop,DWORD timeout)519 static void uv__poll(uv_loop_t* loop, DWORD timeout) {
520   uv__loop_internal_fields_t* lfields;
521   BOOL success;
522   uv_req_t* req;
523   OVERLAPPED_ENTRY overlappeds[128];
524   ULONG count;
525   ULONG i;
526   int repeat;
527   uint64_t timeout_time;
528   uint64_t user_timeout;
529   uint64_t actual_timeout;
530   int reset_timeout;
531 
532   lfields = uv__get_internal_fields(loop);
533   timeout_time = loop->time + timeout;
534 
535   if (lfields->flags & UV_METRICS_IDLE_TIME) {
536     reset_timeout = 1;
537     user_timeout = timeout;
538     timeout = 0;
539   } else {
540     reset_timeout = 0;
541   }
542 
543   for (repeat = 0; ; repeat++) {
544     actual_timeout = timeout;
545 
546     /* Only need to set the provider_entry_time if timeout != 0. The function
547      * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
548      */
549     if (timeout != 0)
550       uv__metrics_set_provider_entry_time(loop);
551 
552     /* Store the current timeout in a location that's globally accessible so
553      * other locations like uv__work_done() can determine whether the queue
554      * of events in the callback were waiting when poll was called.
555      */
556     lfields->current_timeout = timeout;
557 
558     success = pGetQueuedCompletionStatusEx(loop->iocp,
559                                            overlappeds,
560                                            ARRAY_SIZE(overlappeds),
561                                            &count,
562                                            timeout,
563                                            FALSE);
564 
565     if (reset_timeout != 0) {
566       timeout = user_timeout;
567       reset_timeout = 0;
568     }
569 
570     /* Placed here because on success the loop will break whether there is an
571      * empty package or not, or if pGetQueuedCompletionStatusEx returned early
572      * then the timeout will be updated and the loop will run again. In either
573      * case the idle time will need to be updated.
574      */
575     uv__metrics_update_idle_time(loop);
576 
577     if (success) {
578       for (i = 0; i < count; i++) {
579         /* Package was dequeued, but see if it is not a empty package
580          * meant only to wake us up.
581          */
582         if (overlappeds[i].lpOverlapped) {
583           uv__metrics_inc_events(loop, 1);
584           if (actual_timeout == 0)
585             uv__metrics_inc_events_waiting(loop, 1);
586 
587           req = uv__overlapped_to_req(overlappeds[i].lpOverlapped);
588           uv__insert_pending_req(loop, req);
589         }
590       }
591 
592       /* Some time might have passed waiting for I/O,
593        * so update the loop time here.
594        */
595       uv_update_time(loop);
596     } else if (GetLastError() != WAIT_TIMEOUT) {
597       /* Serious error */
598       uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
599     } else if (timeout > 0) {
600       /* GetQueuedCompletionStatus can occasionally return a little early.
601        * Make sure that the desired timeout target time is reached.
602        */
603       uv_update_time(loop);
604       if (timeout_time > loop->time) {
605         timeout = (DWORD)(timeout_time - loop->time);
606         /* The first call to GetQueuedCompletionStatus should return very
607          * close to the target time and the second should reach it, but
608          * this is not stated in the documentation. To make sure a busy
609          * loop cannot happen, the timeout is increased exponentially
610          * starting on the third round.
611          */
612         timeout += repeat ? (1 << (repeat - 1)) : 0;
613         continue;
614       }
615     }
616     break;
617   }
618 }
619 
620 
uv_run(uv_loop_t * loop,uv_run_mode mode)621 int uv_run(uv_loop_t *loop, uv_run_mode mode) {
622   DWORD timeout;
623   int r;
624   int can_sleep;
625 
626   r = uv__loop_alive(loop);
627   if (!r)
628     uv_update_time(loop);
629 
630   /* Maintain backwards compatibility by processing timers before entering the
631    * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
632    * once, which should be done after polling in order to maintain proper
633    * execution order of the conceptual event loop. */
634   if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
635     uv_update_time(loop);
636     uv__run_timers(loop);
637   }
638 
639   while (r != 0 && loop->stop_flag == 0) {
640     can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL;
641 
642     uv__process_reqs(loop);
643     uv__idle_invoke(loop);
644     uv__prepare_invoke(loop);
645 
646     timeout = 0;
647     if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
648       timeout = uv_backend_timeout(loop);
649 
650     uv__metrics_inc_loop_count(loop);
651 
652     if (pGetQueuedCompletionStatusEx)
653       uv__poll(loop, timeout);
654     else
655       uv__poll_wine(loop, timeout);
656 
657     /* Process immediate callbacks (e.g. write_cb) a small fixed number of
658      * times to avoid loop starvation.*/
659     for (r = 0; r < 8 && loop->pending_reqs_tail != NULL; r++)
660       uv__process_reqs(loop);
661 
662     /* Run one final update on the provider_idle_time in case uv__poll*
663      * returned because the timeout expired, but no events were received. This
664      * call will be ignored if the provider_entry_time was either never set (if
665      * the timeout == 0) or was already updated b/c an event was received.
666      */
667     uv__metrics_update_idle_time(loop);
668 
669     uv__check_invoke(loop);
670     uv__process_endgames(loop);
671 
672     uv_update_time(loop);
673     uv__run_timers(loop);
674 
675     r = uv__loop_alive(loop);
676     if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
677       break;
678   }
679 
680   /* The if statement lets the compiler compile it to a conditional store.
681    * Avoids dirtying a cache line.
682    */
683   if (loop->stop_flag != 0)
684     loop->stop_flag = 0;
685 
686   return r;
687 }
688 
689 
uv_fileno(const uv_handle_t * handle,uv_os_fd_t * fd)690 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
691   uv_os_fd_t fd_out;
692 
693   switch (handle->type) {
694   case UV_TCP:
695     fd_out = (uv_os_fd_t)((uv_tcp_t*) handle)->socket;
696     break;
697 
698   case UV_NAMED_PIPE:
699     fd_out = ((uv_pipe_t*) handle)->handle;
700     break;
701 
702   case UV_TTY:
703     fd_out = ((uv_tty_t*) handle)->handle;
704     break;
705 
706   case UV_UDP:
707     fd_out = (uv_os_fd_t)((uv_udp_t*) handle)->socket;
708     break;
709 
710   case UV_POLL:
711     fd_out = (uv_os_fd_t)((uv_poll_t*) handle)->socket;
712     break;
713 
714   default:
715     return UV_EINVAL;
716   }
717 
718   if (uv_is_closing(handle) || fd_out == INVALID_HANDLE_VALUE)
719     return UV_EBADF;
720 
721   *fd = fd_out;
722   return 0;
723 }
724 
725 
uv__socket_sockopt(uv_handle_t * handle,int optname,int * value)726 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
727   int r;
728   int len;
729   SOCKET socket;
730 
731   if (handle == NULL || value == NULL)
732     return UV_EINVAL;
733 
734   if (handle->type == UV_TCP)
735     socket = ((uv_tcp_t*) handle)->socket;
736   else if (handle->type == UV_UDP)
737     socket = ((uv_udp_t*) handle)->socket;
738   else
739     return UV_ENOTSUP;
740 
741   len = sizeof(*value);
742 
743   if (*value == 0)
744     r = getsockopt(socket, SOL_SOCKET, optname, (char*) value, &len);
745   else
746     r = setsockopt(socket, SOL_SOCKET, optname, (const char*) value, len);
747 
748   if (r == SOCKET_ERROR)
749     return uv_translate_sys_error(WSAGetLastError());
750 
751   return 0;
752 }
753 
uv_cpumask_size(void)754 int uv_cpumask_size(void) {
755   return (int)(sizeof(DWORD_PTR) * 8);
756 }
757 
uv__getsockpeername(const uv_handle_t * handle,uv__peersockfunc func,struct sockaddr * name,int * namelen,int delayed_error)758 int uv__getsockpeername(const uv_handle_t* handle,
759                         uv__peersockfunc func,
760                         struct sockaddr* name,
761                         int* namelen,
762                         int delayed_error) {
763 
764   int result;
765   uv_os_fd_t fd;
766 
767   result = uv_fileno(handle, &fd);
768   if (result != 0)
769     return result;
770 
771   if (delayed_error)
772     return uv_translate_sys_error(delayed_error);
773 
774   result = func((SOCKET) fd, name, namelen);
775   if (result != 0)
776     return uv_translate_sys_error(WSAGetLastError());
777 
778   return 0;
779 }
780