• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 #include "uv.h"
22 #include "internal.h"
23 
24 #include <stddef.h> /* NULL */
25 #include <stdio.h> /* printf */
26 #include <stdlib.h>
27 #include <string.h> /* strerror */
28 #include <errno.h>
29 #include <assert.h>
30 #include <unistd.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <fcntl.h>  /* O_CLOEXEC */
34 #include <sys/ioctl.h>
35 #include <sys/socket.h>
36 #include <sys/un.h>
37 #include <netinet/in.h>
38 #include <arpa/inet.h>
39 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
40 #include <sys/uio.h> /* writev */
41 #include <sys/resource.h> /* getrusage */
42 #include <pwd.h>
43 #include <sys/utsname.h>
44 #include <sys/time.h>
45 
46 #ifdef __sun
47 # include <sys/filio.h>
48 # include <sys/types.h>
49 # include <sys/wait.h>
50 #endif
51 
52 #if defined(__APPLE__)
53 # include <sys/filio.h>
54 # endif /* defined(__APPLE__) */
55 
56 
57 #if defined(__APPLE__) && !TARGET_OS_IPHONE
58 # include <crt_externs.h>
59 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
60 # define environ (*_NSGetEnviron())
61 #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
62 extern char** environ;
63 #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
64 
65 
66 #if defined(__DragonFly__)      || \
67     defined(__FreeBSD__)        || \
68     defined(__FreeBSD_kernel__) || \
69     defined(__NetBSD__)         || \
70     defined(__OpenBSD__)
71 # include <sys/sysctl.h>
72 # include <sys/filio.h>
73 # include <sys/wait.h>
74 # if defined(__FreeBSD__) || defined(__linux__)
75 #  define uv__accept4 accept4
76 # endif
77 # if defined(__NetBSD__)
78 #  define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
79 # endif
80 #endif
81 
82 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
83 # include <dlfcn.h>  /* for dlsym */
84 #endif
85 
86 #if defined(__MVS__)
87 #include <sys/ioctl.h>
88 #endif
89 
90 #if defined(__linux__)
91 #include <sys/syscall.h>
92 #endif
93 
94 static int uv__run_pending(uv_loop_t* loop);
95 
96 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
97 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
98 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
99               sizeof(((struct iovec*) 0)->iov_base));
100 STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
101               sizeof(((struct iovec*) 0)->iov_len));
102 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
103 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
104 
105 
uv_hrtime(void)106 uint64_t uv_hrtime(void) {
107   return uv__hrtime(UV_CLOCK_PRECISE);
108 }
109 
110 
uv_close(uv_handle_t * handle,uv_close_cb close_cb)111 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
112   assert(!uv__is_closing(handle));
113 
114   handle->flags |= UV_HANDLE_CLOSING;
115   handle->close_cb = close_cb;
116 
117   switch (handle->type) {
118   case UV_NAMED_PIPE:
119     uv__pipe_close((uv_pipe_t*)handle);
120     break;
121 
122   case UV_TTY:
123     uv__stream_close((uv_stream_t*)handle);
124     break;
125 
126   case UV_TCP:
127     uv__tcp_close((uv_tcp_t*)handle);
128     break;
129 
130   case UV_UDP:
131     uv__udp_close((uv_udp_t*)handle);
132     break;
133 
134   case UV_PREPARE:
135     uv__prepare_close((uv_prepare_t*)handle);
136     break;
137 
138   case UV_CHECK:
139     uv__check_close((uv_check_t*)handle);
140     break;
141 
142   case UV_IDLE:
143     uv__idle_close((uv_idle_t*)handle);
144     break;
145 
146   case UV_ASYNC:
147     uv__async_close((uv_async_t*)handle);
148     break;
149 
150   case UV_TIMER:
151     uv__timer_close((uv_timer_t*)handle);
152     break;
153 
154   case UV_PROCESS:
155     uv__process_close((uv_process_t*)handle);
156     break;
157 
158   case UV_FS_EVENT:
159     uv__fs_event_close((uv_fs_event_t*)handle);
160     break;
161 
162   case UV_POLL:
163     uv__poll_close((uv_poll_t*)handle);
164     break;
165 
166   case UV_FS_POLL:
167     uv__fs_poll_close((uv_fs_poll_t*)handle);
168     /* Poll handles use file system requests, and one of them may still be
169      * running. The poll code will call uv__make_close_pending() for us. */
170     return;
171 
172   case UV_SIGNAL:
173     uv__signal_close((uv_signal_t*) handle);
174     break;
175 
176   default:
177     assert(0);
178   }
179 
180   uv__make_close_pending(handle);
181 }
182 
uv__socket_sockopt(uv_handle_t * handle,int optname,int * value)183 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
184   int r;
185   int fd;
186   socklen_t len;
187 
188   if (handle == NULL || value == NULL)
189     return UV_EINVAL;
190 
191   if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
192     fd = uv__stream_fd((uv_stream_t*) handle);
193   else if (handle->type == UV_UDP)
194     fd = ((uv_udp_t *) handle)->io_watcher.fd;
195   else
196     return UV_ENOTSUP;
197 
198   len = sizeof(*value);
199 
200   if (*value == 0)
201     r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
202   else
203     r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
204 
205   if (r < 0)
206     return UV__ERR(errno);
207 
208   return 0;
209 }
210 
uv__make_close_pending(uv_handle_t * handle)211 void uv__make_close_pending(uv_handle_t* handle) {
212   assert(handle->flags & UV_HANDLE_CLOSING);
213   assert(!(handle->flags & UV_HANDLE_CLOSED));
214   handle->next_closing = handle->loop->closing_handles;
215   handle->loop->closing_handles = handle;
216 }
217 
uv__getiovmax(void)218 int uv__getiovmax(void) {
219 #if defined(IOV_MAX)
220   return IOV_MAX;
221 #elif defined(_SC_IOV_MAX)
222   static int iovmax = -1;
223   if (iovmax == -1) {
224     iovmax = sysconf(_SC_IOV_MAX);
225     /* On some embedded devices (arm-linux-uclibc based ip camera),
226      * sysconf(_SC_IOV_MAX) can not get the correct value. The return
227      * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
228      */
229     if (iovmax == -1) iovmax = 1;
230   }
231   return iovmax;
232 #else
233   return 1024;
234 #endif
235 }
236 
237 
uv__finish_close(uv_handle_t * handle)238 static void uv__finish_close(uv_handle_t* handle) {
239   uv_signal_t* sh;
240 
241   /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
242    * possible for it to be active in the sense that uv__is_active() returns
243    * true.
244    *
245    * A good example is when the user calls uv_shutdown(), immediately followed
246    * by uv_close(). The handle is considered active at this point because the
247    * completion of the shutdown req is still pending.
248    */
249   assert(handle->flags & UV_HANDLE_CLOSING);
250   assert(!(handle->flags & UV_HANDLE_CLOSED));
251   handle->flags |= UV_HANDLE_CLOSED;
252 
253   switch (handle->type) {
254     case UV_PREPARE:
255     case UV_CHECK:
256     case UV_IDLE:
257     case UV_ASYNC:
258     case UV_TIMER:
259     case UV_PROCESS:
260     case UV_FS_EVENT:
261     case UV_FS_POLL:
262     case UV_POLL:
263       break;
264 
265     case UV_SIGNAL:
266       /* If there are any caught signals "trapped" in the signal pipe,
267        * we can't call the close callback yet. Reinserting the handle
268        * into the closing queue makes the event loop spin but that's
269        * okay because we only need to deliver the pending events.
270        */
271       sh = (uv_signal_t*) handle;
272       if (sh->caught_signals > sh->dispatched_signals) {
273         handle->flags ^= UV_HANDLE_CLOSED;
274         uv__make_close_pending(handle);  /* Back into the queue. */
275         return;
276       }
277       break;
278 
279     case UV_NAMED_PIPE:
280     case UV_TCP:
281     case UV_TTY:
282       uv__stream_destroy((uv_stream_t*)handle);
283       break;
284 
285     case UV_UDP:
286       uv__udp_finish_close((uv_udp_t*)handle);
287       break;
288 
289     default:
290       assert(0);
291       break;
292   }
293 
294   uv__handle_unref(handle);
295   QUEUE_REMOVE(&handle->handle_queue);
296 
297   if (handle->close_cb) {
298     handle->close_cb(handle);
299   }
300 }
301 
302 
uv__run_closing_handles(uv_loop_t * loop)303 static void uv__run_closing_handles(uv_loop_t* loop) {
304   uv_handle_t* p;
305   uv_handle_t* q;
306 
307   p = loop->closing_handles;
308   loop->closing_handles = NULL;
309 
310   while (p) {
311     q = p->next_closing;
312     uv__finish_close(p);
313     p = q;
314   }
315 }
316 
317 
uv_is_closing(const uv_handle_t * handle)318 int uv_is_closing(const uv_handle_t* handle) {
319   return uv__is_closing(handle);
320 }
321 
322 
uv_backend_fd(const uv_loop_t * loop)323 int uv_backend_fd(const uv_loop_t* loop) {
324   return loop->backend_fd;
325 }
326 
327 
uv_backend_timeout(const uv_loop_t * loop)328 int uv_backend_timeout(const uv_loop_t* loop) {
329   if (loop->stop_flag != 0)
330     return 0;
331 
332   if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
333     return 0;
334 
335   if (!QUEUE_EMPTY(&loop->idle_handles))
336     return 0;
337 
338   if (!QUEUE_EMPTY(&loop->pending_queue))
339     return 0;
340 
341   if (loop->closing_handles)
342     return 0;
343 
344   return uv__next_timeout(loop);
345 }
346 
347 
uv__loop_alive(const uv_loop_t * loop)348 static int uv__loop_alive(const uv_loop_t* loop) {
349   return uv__has_active_handles(loop) ||
350          uv__has_active_reqs(loop) ||
351          loop->closing_handles != NULL;
352 }
353 
354 
uv_loop_alive(const uv_loop_t * loop)355 int uv_loop_alive(const uv_loop_t* loop) {
356     return uv__loop_alive(loop);
357 }
358 
359 
uv_run(uv_loop_t * loop,uv_run_mode mode)360 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
361   int timeout;
362   int r;
363   int ran_pending;
364 
365   r = uv__loop_alive(loop);
366   if (!r)
367     uv__update_time(loop);
368 
369   while (r != 0 && loop->stop_flag == 0) {
370     uv__update_time(loop);
371     uv__run_timers(loop);
372     ran_pending = uv__run_pending(loop);
373     uv__run_idle(loop);
374     uv__run_prepare(loop);
375 
376     timeout = 0;
377     if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
378       timeout = uv_backend_timeout(loop);
379 
380     uv__io_poll(loop, timeout);
381     uv__run_check(loop);
382     uv__run_closing_handles(loop);
383 
384     if (mode == UV_RUN_ONCE) {
385       /* UV_RUN_ONCE implies forward progress: at least one callback must have
386        * been invoked when it returns. uv__io_poll() can return without doing
387        * I/O (meaning: no callbacks) when its timeout expires - which means we
388        * have pending timers that satisfy the forward progress constraint.
389        *
390        * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
391        * the check.
392        */
393       uv__update_time(loop);
394       uv__run_timers(loop);
395     }
396 
397     r = uv__loop_alive(loop);
398     if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
399       break;
400   }
401 
402   /* The if statement lets gcc compile it to a conditional store. Avoids
403    * dirtying a cache line.
404    */
405   if (loop->stop_flag != 0)
406     loop->stop_flag = 0;
407 
408   return r;
409 }
410 
411 
uv_update_time(uv_loop_t * loop)412 void uv_update_time(uv_loop_t* loop) {
413   uv__update_time(loop);
414 }
415 
416 
uv_is_active(const uv_handle_t * handle)417 int uv_is_active(const uv_handle_t* handle) {
418   return uv__is_active(handle);
419 }
420 
421 
422 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
uv__socket(int domain,int type,int protocol)423 int uv__socket(int domain, int type, int protocol) {
424   int sockfd;
425   int err;
426 
427 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
428   sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
429   if (sockfd != -1)
430     return sockfd;
431 
432   if (errno != EINVAL)
433     return UV__ERR(errno);
434 #endif
435 
436   sockfd = socket(domain, type, protocol);
437   if (sockfd == -1)
438     return UV__ERR(errno);
439 
440   err = uv__nonblock(sockfd, 1);
441   if (err == 0)
442     err = uv__cloexec(sockfd, 1);
443 
444   if (err) {
445     uv__close(sockfd);
446     return err;
447   }
448 
449 #if defined(SO_NOSIGPIPE)
450   {
451     int on = 1;
452     setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
453   }
454 #endif
455 
456   return sockfd;
457 }
458 
459 /* get a file pointer to a file in read-only and close-on-exec mode */
uv__open_file(const char * path)460 FILE* uv__open_file(const char* path) {
461   int fd;
462   FILE* fp;
463 
464   fd = uv__open_cloexec(path, O_RDONLY);
465   if (fd < 0)
466     return NULL;
467 
468    fp = fdopen(fd, "r");
469    if (fp == NULL)
470      uv__close(fd);
471 
472    return fp;
473 }
474 
475 
uv__accept(int sockfd)476 int uv__accept(int sockfd) {
477   int peerfd;
478   int err;
479 
480   (void) &err;
481   assert(sockfd >= 0);
482 
483   do
484 #ifdef uv__accept4
485     peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
486 #else
487     peerfd = accept(sockfd, NULL, NULL);
488 #endif
489   while (peerfd == -1 && errno == EINTR);
490 
491   if (peerfd == -1)
492     return UV__ERR(errno);
493 
494 #ifndef uv__accept4
495   err = uv__cloexec(peerfd, 1);
496   if (err == 0)
497     err = uv__nonblock(peerfd, 1);
498 
499   if (err != 0) {
500     uv__close(peerfd);
501     return err;
502   }
503 #endif
504 
505   return peerfd;
506 }
507 
508 
509 /* close() on macos has the "interesting" quirk that it fails with EINTR
510  * without closing the file descriptor when a thread is in the cancel state.
511  * That's why libuv calls close$NOCANCEL() instead.
512  *
513  * glibc on linux has a similar issue: close() is a cancellation point and
514  * will unwind the thread when it's in the cancel state. Work around that
515  * by making the system call directly. Musl libc is unaffected.
516  */
uv__close_nocancel(int fd)517 int uv__close_nocancel(int fd) {
518 #if defined(__APPLE__)
519 #pragma GCC diagnostic push
520 #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
521 #if defined(__LP64__) || defined(TARGET_OS_IPHONE)
522   extern int close$NOCANCEL(int);
523   return close$NOCANCEL(fd);
524 #else
525   extern int close$NOCANCEL$UNIX2003(int);
526   return close$NOCANCEL$UNIX2003(fd);
527 #endif
528 #pragma GCC diagnostic pop
529 #elif defined(__linux__)
530   return syscall(SYS_close, fd);
531 #else
532   return close(fd);
533 #endif
534 }
535 
536 
uv__close_nocheckstdio(int fd)537 int uv__close_nocheckstdio(int fd) {
538   int saved_errno;
539   int rc;
540 
541   assert(fd > -1);  /* Catch uninitialized io_watcher.fd bugs. */
542 
543   saved_errno = errno;
544   rc = uv__close_nocancel(fd);
545   if (rc == -1) {
546     rc = UV__ERR(errno);
547     if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
548       rc = 0;    /* The close is in progress, not an error. */
549     errno = saved_errno;
550   }
551 
552   return rc;
553 }
554 
555 
uv__close(int fd)556 int uv__close(int fd) {
557   assert(fd > STDERR_FILENO);  /* Catch stdio close bugs. */
558 #if defined(__MVS__)
559   SAVE_ERRNO(epoll_file_close(fd));
560 #endif
561   return uv__close_nocheckstdio(fd);
562 }
563 
564 
uv__nonblock_ioctl(int fd,int set)565 int uv__nonblock_ioctl(int fd, int set) {
566   int r;
567 
568   do
569     r = ioctl(fd, FIONBIO, &set);
570   while (r == -1 && errno == EINTR);
571 
572   if (r)
573     return UV__ERR(errno);
574 
575   return 0;
576 }
577 
578 
579 #if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__HAIKU__)
uv__cloexec_ioctl(int fd,int set)580 int uv__cloexec_ioctl(int fd, int set) {
581   int r;
582 
583   do
584     r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
585   while (r == -1 && errno == EINTR);
586 
587   if (r)
588     return UV__ERR(errno);
589 
590   return 0;
591 }
592 #endif
593 
594 
uv__nonblock_fcntl(int fd,int set)595 int uv__nonblock_fcntl(int fd, int set) {
596   int flags;
597   int r;
598 
599   do
600     r = fcntl(fd, F_GETFL);
601   while (r == -1 && errno == EINTR);
602 
603   if (r == -1)
604     return UV__ERR(errno);
605 
606   /* Bail out now if already set/clear. */
607   if (!!(r & O_NONBLOCK) == !!set)
608     return 0;
609 
610   if (set)
611     flags = r | O_NONBLOCK;
612   else
613     flags = r & ~O_NONBLOCK;
614 
615   do
616     r = fcntl(fd, F_SETFL, flags);
617   while (r == -1 && errno == EINTR);
618 
619   if (r)
620     return UV__ERR(errno);
621 
622   return 0;
623 }
624 
625 
uv__cloexec_fcntl(int fd,int set)626 int uv__cloexec_fcntl(int fd, int set) {
627   int flags;
628   int r;
629 
630   do
631     r = fcntl(fd, F_GETFD);
632   while (r == -1 && errno == EINTR);
633 
634   if (r == -1)
635     return UV__ERR(errno);
636 
637   /* Bail out now if already set/clear. */
638   if (!!(r & FD_CLOEXEC) == !!set)
639     return 0;
640 
641   if (set)
642     flags = r | FD_CLOEXEC;
643   else
644     flags = r & ~FD_CLOEXEC;
645 
646   do
647     r = fcntl(fd, F_SETFD, flags);
648   while (r == -1 && errno == EINTR);
649 
650   if (r)
651     return UV__ERR(errno);
652 
653   return 0;
654 }
655 
656 
uv__recvmsg(int fd,struct msghdr * msg,int flags)657 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
658   struct cmsghdr* cmsg;
659   ssize_t rc;
660   int* pfd;
661   int* end;
662 #if defined(__linux__)
663   static int no_msg_cmsg_cloexec;
664   if (no_msg_cmsg_cloexec == 0) {
665     rc = recvmsg(fd, msg, flags | 0x40000000);  /* MSG_CMSG_CLOEXEC */
666     if (rc != -1)
667       return rc;
668     if (errno != EINVAL)
669       return UV__ERR(errno);
670     rc = recvmsg(fd, msg, flags);
671     if (rc == -1)
672       return UV__ERR(errno);
673     no_msg_cmsg_cloexec = 1;
674   } else {
675     rc = recvmsg(fd, msg, flags);
676   }
677 #else
678   rc = recvmsg(fd, msg, flags);
679 #endif
680   if (rc == -1)
681     return UV__ERR(errno);
682   if (msg->msg_controllen == 0)
683     return rc;
684   for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
685     if (cmsg->cmsg_type == SCM_RIGHTS)
686       for (pfd = (int*) CMSG_DATA(cmsg),
687            end = (int*) ((char*) cmsg + cmsg->cmsg_len);
688            pfd < end;
689            pfd += 1)
690         uv__cloexec(*pfd, 1);
691   return rc;
692 }
693 
694 
uv_cwd(char * buffer,size_t * size)695 int uv_cwd(char* buffer, size_t* size) {
696   char scratch[1 + UV__PATH_MAX];
697 
698   if (buffer == NULL || size == NULL)
699     return UV_EINVAL;
700 
701   /* Try to read directly into the user's buffer first... */
702   if (getcwd(buffer, *size) != NULL)
703     goto fixup;
704 
705   if (errno != ERANGE)
706     return UV__ERR(errno);
707 
708   /* ...or into scratch space if the user's buffer is too small
709    * so we can report how much space to provide on the next try.
710    */
711   if (getcwd(scratch, sizeof(scratch)) == NULL)
712     return UV__ERR(errno);
713 
714   buffer = scratch;
715 
716 fixup:
717 
718   *size = strlen(buffer);
719 
720   if (*size > 1 && buffer[*size - 1] == '/') {
721     *size -= 1;
722     buffer[*size] = '\0';
723   }
724 
725   if (buffer == scratch) {
726     *size += 1;
727     return UV_ENOBUFS;
728   }
729 
730   return 0;
731 }
732 
733 
uv_chdir(const char * dir)734 int uv_chdir(const char* dir) {
735   if (chdir(dir))
736     return UV__ERR(errno);
737 
738   return 0;
739 }
740 
741 
uv_disable_stdio_inheritance(void)742 void uv_disable_stdio_inheritance(void) {
743   int fd;
744 
745   /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
746    * first 16 file descriptors. After that, bail out after the first error.
747    */
748   for (fd = 0; ; fd++)
749     if (uv__cloexec(fd, 1) && fd > 15)
750       break;
751 }
752 
753 
uv_fileno(const uv_handle_t * handle,uv_os_fd_t * fd)754 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
755   int fd_out;
756 
757   switch (handle->type) {
758   case UV_TCP:
759   case UV_NAMED_PIPE:
760   case UV_TTY:
761     fd_out = uv__stream_fd((uv_stream_t*) handle);
762     break;
763 
764   case UV_UDP:
765     fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
766     break;
767 
768   case UV_POLL:
769     fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
770     break;
771 
772   default:
773     return UV_EINVAL;
774   }
775 
776   if (uv__is_closing(handle) || fd_out == -1)
777     return UV_EBADF;
778 
779   *fd = fd_out;
780   return 0;
781 }
782 
783 
uv__run_pending(uv_loop_t * loop)784 static int uv__run_pending(uv_loop_t* loop) {
785   QUEUE* q;
786   QUEUE pq;
787   uv__io_t* w;
788 
789   if (QUEUE_EMPTY(&loop->pending_queue))
790     return 0;
791 
792   QUEUE_MOVE(&loop->pending_queue, &pq);
793 
794   while (!QUEUE_EMPTY(&pq)) {
795     q = QUEUE_HEAD(&pq);
796     QUEUE_REMOVE(q);
797     QUEUE_INIT(q);
798     w = QUEUE_DATA(q, uv__io_t, pending_queue);
799     w->cb(loop, w, POLLOUT);
800   }
801 
802   return 1;
803 }
804 
805 
next_power_of_two(unsigned int val)806 static unsigned int next_power_of_two(unsigned int val) {
807   val -= 1;
808   val |= val >> 1;
809   val |= val >> 2;
810   val |= val >> 4;
811   val |= val >> 8;
812   val |= val >> 16;
813   val += 1;
814   return val;
815 }
816 
maybe_resize(uv_loop_t * loop,unsigned int len)817 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
818   uv__io_t** watchers;
819   void* fake_watcher_list;
820   void* fake_watcher_count;
821   unsigned int nwatchers;
822   unsigned int i;
823 
824   if (len <= loop->nwatchers)
825     return;
826 
827   /* Preserve fake watcher list and count at the end of the watchers */
828   if (loop->watchers != NULL) {
829     fake_watcher_list = loop->watchers[loop->nwatchers];
830     fake_watcher_count = loop->watchers[loop->nwatchers + 1];
831   } else {
832     fake_watcher_list = NULL;
833     fake_watcher_count = NULL;
834   }
835 
836   nwatchers = next_power_of_two(len + 2) - 2;
837   watchers = uv__realloc(loop->watchers,
838                          (nwatchers + 2) * sizeof(loop->watchers[0]));
839 
840   if (watchers == NULL)
841     abort();
842   for (i = loop->nwatchers; i < nwatchers; i++)
843     watchers[i] = NULL;
844   watchers[nwatchers] = fake_watcher_list;
845   watchers[nwatchers + 1] = fake_watcher_count;
846 
847   loop->watchers = watchers;
848   loop->nwatchers = nwatchers;
849 }
850 
851 
uv__io_init(uv__io_t * w,uv__io_cb cb,int fd)852 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
853   assert(cb != NULL);
854   assert(fd >= -1);
855   QUEUE_INIT(&w->pending_queue);
856   QUEUE_INIT(&w->watcher_queue);
857   w->cb = cb;
858   w->fd = fd;
859   w->events = 0;
860   w->pevents = 0;
861 
862 #if defined(UV_HAVE_KQUEUE)
863   w->rcount = 0;
864   w->wcount = 0;
865 #endif /* defined(UV_HAVE_KQUEUE) */
866 }
867 
868 
uv__io_start(uv_loop_t * loop,uv__io_t * w,unsigned int events)869 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
870   assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
871   assert(0 != events);
872   assert(w->fd >= 0);
873   assert(w->fd < INT_MAX);
874 
875   w->pevents |= events;
876   maybe_resize(loop, w->fd + 1);
877 
878 #if !defined(__sun)
879   /* The event ports backend needs to rearm all file descriptors on each and
880    * every tick of the event loop but the other backends allow us to
881    * short-circuit here if the event mask is unchanged.
882    */
883   if (w->events == w->pevents)
884     return;
885 #endif
886 
887   if (QUEUE_EMPTY(&w->watcher_queue))
888     QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
889 
890   if (loop->watchers[w->fd] == NULL) {
891     loop->watchers[w->fd] = w;
892     loop->nfds++;
893   }
894 }
895 
896 
uv__io_stop(uv_loop_t * loop,uv__io_t * w,unsigned int events)897 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
898   assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
899   assert(0 != events);
900 
901   if (w->fd == -1)
902     return;
903 
904   assert(w->fd >= 0);
905 
906   /* Happens when uv__io_stop() is called on a handle that was never started. */
907   if ((unsigned) w->fd >= loop->nwatchers)
908     return;
909 
910   w->pevents &= ~events;
911 
912   if (w->pevents == 0) {
913     QUEUE_REMOVE(&w->watcher_queue);
914     QUEUE_INIT(&w->watcher_queue);
915 
916     if (loop->watchers[w->fd] != NULL) {
917       assert(loop->watchers[w->fd] == w);
918       assert(loop->nfds > 0);
919       loop->watchers[w->fd] = NULL;
920       loop->nfds--;
921       w->events = 0;
922     }
923   }
924   else if (QUEUE_EMPTY(&w->watcher_queue))
925     QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
926 }
927 
928 
uv__io_close(uv_loop_t * loop,uv__io_t * w)929 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
930   uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
931   QUEUE_REMOVE(&w->pending_queue);
932 
933   /* Remove stale events for this file descriptor */
934   if (w->fd != -1)
935     uv__platform_invalidate_fd(loop, w->fd);
936 }
937 
938 
uv__io_feed(uv_loop_t * loop,uv__io_t * w)939 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
940   if (QUEUE_EMPTY(&w->pending_queue))
941     QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
942 }
943 
944 
uv__io_active(const uv__io_t * w,unsigned int events)945 int uv__io_active(const uv__io_t* w, unsigned int events) {
946   assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
947   assert(0 != events);
948   return 0 != (w->pevents & events);
949 }
950 
951 
uv__fd_exists(uv_loop_t * loop,int fd)952 int uv__fd_exists(uv_loop_t* loop, int fd) {
953   return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
954 }
955 
956 
uv_getrusage(uv_rusage_t * rusage)957 int uv_getrusage(uv_rusage_t* rusage) {
958   struct rusage usage;
959 
960   if (getrusage(RUSAGE_SELF, &usage))
961     return UV__ERR(errno);
962 
963   rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
964   rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
965 
966   rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
967   rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
968 
969 #if !defined(__MVS__) && !defined(__HAIKU__)
970   rusage->ru_maxrss = usage.ru_maxrss;
971   rusage->ru_ixrss = usage.ru_ixrss;
972   rusage->ru_idrss = usage.ru_idrss;
973   rusage->ru_isrss = usage.ru_isrss;
974   rusage->ru_minflt = usage.ru_minflt;
975   rusage->ru_majflt = usage.ru_majflt;
976   rusage->ru_nswap = usage.ru_nswap;
977   rusage->ru_inblock = usage.ru_inblock;
978   rusage->ru_oublock = usage.ru_oublock;
979   rusage->ru_msgsnd = usage.ru_msgsnd;
980   rusage->ru_msgrcv = usage.ru_msgrcv;
981   rusage->ru_nsignals = usage.ru_nsignals;
982   rusage->ru_nvcsw = usage.ru_nvcsw;
983   rusage->ru_nivcsw = usage.ru_nivcsw;
984 #endif
985 
986   return 0;
987 }
988 
989 
uv__open_cloexec(const char * path,int flags)990 int uv__open_cloexec(const char* path, int flags) {
991 #if defined(O_CLOEXEC)
992   int fd;
993 
994   fd = open(path, flags | O_CLOEXEC);
995   if (fd == -1)
996     return UV__ERR(errno);
997 
998   return fd;
999 #else  /* O_CLOEXEC */
1000   int err;
1001   int fd;
1002 
1003   fd = open(path, flags);
1004   if (fd == -1)
1005     return UV__ERR(errno);
1006 
1007   err = uv__cloexec(fd, 1);
1008   if (err) {
1009     uv__close(fd);
1010     return err;
1011   }
1012 
1013   return fd;
1014 #endif  /* O_CLOEXEC */
1015 }
1016 
1017 
uv__dup2_cloexec(int oldfd,int newfd)1018 int uv__dup2_cloexec(int oldfd, int newfd) {
1019 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
1020   int r;
1021 
1022   r = dup3(oldfd, newfd, O_CLOEXEC);
1023   if (r == -1)
1024     return UV__ERR(errno);
1025 
1026   return r;
1027 #else
1028   int err;
1029   int r;
1030 
1031   r = dup2(oldfd, newfd);  /* Never retry. */
1032   if (r == -1)
1033     return UV__ERR(errno);
1034 
1035   err = uv__cloexec(newfd, 1);
1036   if (err != 0) {
1037     uv__close(newfd);
1038     return err;
1039   }
1040 
1041   return r;
1042 #endif
1043 }
1044 
1045 
uv_os_homedir(char * buffer,size_t * size)1046 int uv_os_homedir(char* buffer, size_t* size) {
1047   uv_passwd_t pwd;
1048   size_t len;
1049   int r;
1050 
1051   /* Check if the HOME environment variable is set first. The task of
1052      performing input validation on buffer and size is taken care of by
1053      uv_os_getenv(). */
1054   r = uv_os_getenv("HOME", buffer, size);
1055 
1056   if (r != UV_ENOENT)
1057     return r;
1058 
1059   /* HOME is not set, so call uv__getpwuid_r() */
1060   r = uv__getpwuid_r(&pwd);
1061 
1062   if (r != 0) {
1063     return r;
1064   }
1065 
1066   len = strlen(pwd.homedir);
1067 
1068   if (len >= *size) {
1069     *size = len + 1;
1070     uv_os_free_passwd(&pwd);
1071     return UV_ENOBUFS;
1072   }
1073 
1074   memcpy(buffer, pwd.homedir, len + 1);
1075   *size = len;
1076   uv_os_free_passwd(&pwd);
1077 
1078   return 0;
1079 }
1080 
1081 
uv_os_tmpdir(char * buffer,size_t * size)1082 int uv_os_tmpdir(char* buffer, size_t* size) {
1083   const char* buf;
1084   size_t len;
1085 
1086   if (buffer == NULL || size == NULL || *size == 0)
1087     return UV_EINVAL;
1088 
1089 #define CHECK_ENV_VAR(name)                                                   \
1090   do {                                                                        \
1091     buf = getenv(name);                                                       \
1092     if (buf != NULL)                                                          \
1093       goto return_buffer;                                                     \
1094   }                                                                           \
1095   while (0)
1096 
1097   /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1098   CHECK_ENV_VAR("TMPDIR");
1099   CHECK_ENV_VAR("TMP");
1100   CHECK_ENV_VAR("TEMP");
1101   CHECK_ENV_VAR("TEMPDIR");
1102 
1103 #undef CHECK_ENV_VAR
1104 
1105   /* No temp environment variables defined */
1106   #if defined(__ANDROID__)
1107     buf = "/data/local/tmp";
1108   #else
1109     buf = "/tmp";
1110   #endif
1111 
1112 return_buffer:
1113   len = strlen(buf);
1114 
1115   if (len >= *size) {
1116     *size = len + 1;
1117     return UV_ENOBUFS;
1118   }
1119 
1120   /* The returned directory should not have a trailing slash. */
1121   if (len > 1 && buf[len - 1] == '/') {
1122     len--;
1123   }
1124 
1125   memcpy(buffer, buf, len + 1);
1126   buffer[len] = '\0';
1127   *size = len;
1128 
1129   return 0;
1130 }
1131 
1132 
uv__getpwuid_r(uv_passwd_t * pwd)1133 int uv__getpwuid_r(uv_passwd_t* pwd) {
1134   struct passwd pw;
1135   struct passwd* result;
1136   char* buf;
1137   uid_t uid;
1138   size_t bufsize;
1139   size_t name_size;
1140   size_t homedir_size;
1141   size_t shell_size;
1142   long initsize;
1143   int r;
1144 #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
1145   int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
1146 
1147   getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
1148   if (getpwuid_r == NULL)
1149     return UV_ENOSYS;
1150 #endif
1151 
1152   if (pwd == NULL)
1153     return UV_EINVAL;
1154 
1155   initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
1156 
1157   if (initsize <= 0)
1158     bufsize = 4096;
1159   else
1160     bufsize = (size_t) initsize;
1161 
1162   uid = geteuid();
1163   buf = NULL;
1164 
1165   for (;;) {
1166     uv__free(buf);
1167     buf = uv__malloc(bufsize);
1168 
1169     if (buf == NULL)
1170       return UV_ENOMEM;
1171 
1172     r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1173 
1174     if (r != ERANGE)
1175       break;
1176 
1177     bufsize *= 2;
1178   }
1179 
1180   if (r != 0) {
1181     uv__free(buf);
1182     return -r;
1183   }
1184 
1185   if (result == NULL) {
1186     uv__free(buf);
1187     return UV_ENOENT;
1188   }
1189 
1190   /* Allocate memory for the username, shell, and home directory */
1191   name_size = strlen(pw.pw_name) + 1;
1192   homedir_size = strlen(pw.pw_dir) + 1;
1193   shell_size = strlen(pw.pw_shell) + 1;
1194   pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1195 
1196   if (pwd->username == NULL) {
1197     uv__free(buf);
1198     return UV_ENOMEM;
1199   }
1200 
1201   /* Copy the username */
1202   memcpy(pwd->username, pw.pw_name, name_size);
1203 
1204   /* Copy the home directory */
1205   pwd->homedir = pwd->username + name_size;
1206   memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1207 
1208   /* Copy the shell */
1209   pwd->shell = pwd->homedir + homedir_size;
1210   memcpy(pwd->shell, pw.pw_shell, shell_size);
1211 
1212   /* Copy the uid and gid */
1213   pwd->uid = pw.pw_uid;
1214   pwd->gid = pw.pw_gid;
1215 
1216   uv__free(buf);
1217 
1218   return 0;
1219 }
1220 
1221 
uv_os_free_passwd(uv_passwd_t * pwd)1222 void uv_os_free_passwd(uv_passwd_t* pwd) {
1223   if (pwd == NULL)
1224     return;
1225 
1226   /*
1227     The memory for name, shell, and homedir are allocated in a single
1228     uv__malloc() call. The base of the pointer is stored in pwd->username, so
1229     that is the field that needs to be freed.
1230   */
1231   uv__free(pwd->username);
1232   pwd->username = NULL;
1233   pwd->shell = NULL;
1234   pwd->homedir = NULL;
1235 }
1236 
1237 
uv_os_get_passwd(uv_passwd_t * pwd)1238 int uv_os_get_passwd(uv_passwd_t* pwd) {
1239   return uv__getpwuid_r(pwd);
1240 }
1241 
1242 
uv_translate_sys_error(int sys_errno)1243 int uv_translate_sys_error(int sys_errno) {
1244   /* If < 0 then it's already a libuv error. */
1245   return sys_errno <= 0 ? sys_errno : -sys_errno;
1246 }
1247 
1248 
uv_os_environ(uv_env_item_t ** envitems,int * count)1249 int uv_os_environ(uv_env_item_t** envitems, int* count) {
1250   int i, j, cnt;
1251   uv_env_item_t* envitem;
1252 
1253   *envitems = NULL;
1254   *count = 0;
1255 
1256   for (i = 0; environ[i] != NULL; i++);
1257 
1258   *envitems = uv__calloc(i, sizeof(**envitems));
1259 
1260   if (envitems == NULL)
1261     return UV_ENOMEM;
1262 
1263   for (j = 0, cnt = 0; j < i; j++) {
1264     char* buf;
1265     char* ptr;
1266 
1267     if (environ[j] == NULL)
1268       break;
1269 
1270     buf = uv__strdup(environ[j]);
1271     if (buf == NULL)
1272       goto fail;
1273 
1274     ptr = strchr(buf, '=');
1275     if (ptr == NULL) {
1276       uv__free(buf);
1277       continue;
1278     }
1279 
1280     *ptr = '\0';
1281 
1282     envitem = &(*envitems)[cnt];
1283     envitem->name = buf;
1284     envitem->value = ptr + 1;
1285 
1286     cnt++;
1287   }
1288 
1289   *count = cnt;
1290   return 0;
1291 
1292 fail:
1293   for (i = 0; i < cnt; i++) {
1294     envitem = &(*envitems)[cnt];
1295     uv__free(envitem->name);
1296   }
1297   uv__free(*envitems);
1298 
1299   *envitems = NULL;
1300   *count = 0;
1301   return UV_ENOMEM;
1302 }
1303 
1304 
uv_os_getenv(const char * name,char * buffer,size_t * size)1305 int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1306   char* var;
1307   size_t len;
1308 
1309   if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1310     return UV_EINVAL;
1311 
1312   var = getenv(name);
1313 
1314   if (var == NULL)
1315     return UV_ENOENT;
1316 
1317   len = strlen(var);
1318 
1319   if (len >= *size) {
1320     *size = len + 1;
1321     return UV_ENOBUFS;
1322   }
1323 
1324   memcpy(buffer, var, len + 1);
1325   *size = len;
1326 
1327   return 0;
1328 }
1329 
1330 
uv_os_setenv(const char * name,const char * value)1331 int uv_os_setenv(const char* name, const char* value) {
1332   if (name == NULL || value == NULL)
1333     return UV_EINVAL;
1334 
1335   if (setenv(name, value, 1) != 0)
1336     return UV__ERR(errno);
1337 
1338   return 0;
1339 }
1340 
1341 
uv_os_unsetenv(const char * name)1342 int uv_os_unsetenv(const char* name) {
1343   if (name == NULL)
1344     return UV_EINVAL;
1345 
1346   if (unsetenv(name) != 0)
1347     return UV__ERR(errno);
1348 
1349   return 0;
1350 }
1351 
1352 
uv_os_gethostname(char * buffer,size_t * size)1353 int uv_os_gethostname(char* buffer, size_t* size) {
1354   /*
1355     On some platforms, if the input buffer is not large enough, gethostname()
1356     succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1357     instead by creating a large enough buffer and comparing the hostname length
1358     to the size input.
1359   */
1360   char buf[UV_MAXHOSTNAMESIZE];
1361   size_t len;
1362 
1363   if (buffer == NULL || size == NULL || *size == 0)
1364     return UV_EINVAL;
1365 
1366   if (gethostname(buf, sizeof(buf)) != 0)
1367     return UV__ERR(errno);
1368 
1369   buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1370   len = strlen(buf);
1371 
1372   if (len >= *size) {
1373     *size = len + 1;
1374     return UV_ENOBUFS;
1375   }
1376 
1377   memcpy(buffer, buf, len + 1);
1378   *size = len;
1379   return 0;
1380 }
1381 
1382 
uv_get_osfhandle(int fd)1383 uv_os_fd_t uv_get_osfhandle(int fd) {
1384   return fd;
1385 }
1386 
uv_open_osfhandle(uv_os_fd_t os_fd)1387 int uv_open_osfhandle(uv_os_fd_t os_fd) {
1388   return os_fd;
1389 }
1390 
uv_os_getpid(void)1391 uv_pid_t uv_os_getpid(void) {
1392   return getpid();
1393 }
1394 
1395 
uv_os_getppid(void)1396 uv_pid_t uv_os_getppid(void) {
1397   return getppid();
1398 }
1399 
1400 
uv_os_getpriority(uv_pid_t pid,int * priority)1401 int uv_os_getpriority(uv_pid_t pid, int* priority) {
1402   int r;
1403 
1404   if (priority == NULL)
1405     return UV_EINVAL;
1406 
1407   errno = 0;
1408   r = getpriority(PRIO_PROCESS, (int) pid);
1409 
1410   if (r == -1 && errno != 0)
1411     return UV__ERR(errno);
1412 
1413   *priority = r;
1414   return 0;
1415 }
1416 
1417 
uv_os_setpriority(uv_pid_t pid,int priority)1418 int uv_os_setpriority(uv_pid_t pid, int priority) {
1419   if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1420     return UV_EINVAL;
1421 
1422   if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1423     return UV__ERR(errno);
1424 
1425   return 0;
1426 }
1427 
1428 
uv_os_uname(uv_utsname_t * buffer)1429 int uv_os_uname(uv_utsname_t* buffer) {
1430   struct utsname buf;
1431   int r;
1432 
1433   if (buffer == NULL)
1434     return UV_EINVAL;
1435 
1436   if (uname(&buf) == -1) {
1437     r = UV__ERR(errno);
1438     goto error;
1439   }
1440 
1441   r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1442   if (r == UV_E2BIG)
1443     goto error;
1444 
1445 #ifdef _AIX
1446   r = snprintf(buffer->release,
1447                sizeof(buffer->release),
1448                "%s.%s",
1449                buf.version,
1450                buf.release);
1451   if (r >= sizeof(buffer->release)) {
1452     r = UV_E2BIG;
1453     goto error;
1454   }
1455 #else
1456   r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1457   if (r == UV_E2BIG)
1458     goto error;
1459 #endif
1460 
1461   r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1462   if (r == UV_E2BIG)
1463     goto error;
1464 
1465 #if defined(_AIX) || defined(__PASE__)
1466   r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1467 #else
1468   r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1469 #endif
1470 
1471   if (r == UV_E2BIG)
1472     goto error;
1473 
1474   return 0;
1475 
1476 error:
1477   buffer->sysname[0] = '\0';
1478   buffer->release[0] = '\0';
1479   buffer->version[0] = '\0';
1480   buffer->machine[0] = '\0';
1481   return r;
1482 }
1483 
uv__getsockpeername(const uv_handle_t * handle,uv__peersockfunc func,struct sockaddr * name,int * namelen)1484 int uv__getsockpeername(const uv_handle_t* handle,
1485                         uv__peersockfunc func,
1486                         struct sockaddr* name,
1487                         int* namelen) {
1488   socklen_t socklen;
1489   uv_os_fd_t fd;
1490   int r;
1491 
1492   r = uv_fileno(handle, &fd);
1493   if (r < 0)
1494     return r;
1495 
1496   /* sizeof(socklen_t) != sizeof(int) on some systems. */
1497   socklen = (socklen_t) *namelen;
1498 
1499   if (func(fd, name, &socklen))
1500     return UV__ERR(errno);
1501 
1502   *namelen = (int) socklen;
1503   return 0;
1504 }
1505 
uv_gettimeofday(uv_timeval64_t * tv)1506 int uv_gettimeofday(uv_timeval64_t* tv) {
1507   struct timeval time;
1508 
1509   if (tv == NULL)
1510     return UV_EINVAL;
1511 
1512   if (gettimeofday(&time, NULL) != 0)
1513     return UV__ERR(errno);
1514 
1515   tv->tv_sec = (int64_t) time.tv_sec;
1516   tv->tv_usec = (int32_t) time.tv_usec;
1517   return 0;
1518 }
1519 
uv_sleep(unsigned int msec)1520 void uv_sleep(unsigned int msec) {
1521   struct timespec timeout;
1522   int rc;
1523 
1524   timeout.tv_sec = msec / 1000;
1525   timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
1526 
1527   do
1528     rc = nanosleep(&timeout, &timeout);
1529   while (rc == -1 && errno == EINTR);
1530 
1531   assert(rc == 0);
1532 }
1533