• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2   * Permission is hereby granted, free of charge, to any person obtaining a copy
3   * of this software and associated documentation files (the "Software"), to
4   * deal in the Software without restriction, including without limitation the
5   * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6   * sell copies of the Software, and to permit persons to whom the Software is
7   * furnished to do so, subject to the following conditions:
8   *
9   * The above copyright notice and this permission notice shall be included in
10   * all copies or substantial portions of the Software.
11   *
12   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15   * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17   * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18   * IN THE SOFTWARE.
19   */
20  
21  #include "uv.h"
22  #include "internal.h"
23  
24  #include <stddef.h> /* NULL */
25  #include <stdio.h> /* printf */
26  #include <stdlib.h>
27  #include <string.h> /* strerror */
28  #include <errno.h>
29  #include <assert.h>
30  #include <unistd.h>
31  #include <sys/types.h>
32  #include <sys/stat.h>
33  #include <fcntl.h>  /* O_CLOEXEC */
34  #include <sys/ioctl.h>
35  #include <sys/socket.h>
36  #include <sys/un.h>
37  #include <netinet/in.h>
38  #include <arpa/inet.h>
39  #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
40  #include <sys/uio.h> /* writev */
41  #include <sys/resource.h> /* getrusage */
42  #include <pwd.h>
43  #include <sys/utsname.h>
44  #include <sys/time.h>
45  
46  #ifdef __sun
47  # include <sys/filio.h>
48  # include <sys/types.h>
49  # include <sys/wait.h>
50  #endif
51  
52  #if defined(__APPLE__)
53  # include <sys/filio.h>
54  # endif /* defined(__APPLE__) */
55  
56  
57  #if defined(__APPLE__) && !TARGET_OS_IPHONE
58  # include <crt_externs.h>
59  # include <mach-o/dyld.h> /* _NSGetExecutablePath */
60  # define environ (*_NSGetEnviron())
61  #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
62  extern char** environ;
63  #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
64  
65  
66  #if defined(__DragonFly__)      || \
67      defined(__FreeBSD__)        || \
68      defined(__FreeBSD_kernel__) || \
69      defined(__NetBSD__)         || \
70      defined(__OpenBSD__)
71  # include <sys/sysctl.h>
72  # include <sys/filio.h>
73  # include <sys/wait.h>
74  # if defined(__FreeBSD__)
75  #  define uv__accept4 accept4
76  # endif
77  # if defined(__NetBSD__)
78  #  define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
79  # endif
80  #endif
81  
82  #if defined(__MVS__)
83  #include <sys/ioctl.h>
84  #endif
85  
86  #if defined(__linux__)
87  # include <sched.h>
88  # include <sys/syscall.h>
89  # define uv__accept4 accept4
90  #endif
91  
92  #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
93  # include <sanitizer/linux_syscall_hooks.h>
94  #endif
95  
96  static int uv__run_pending(uv_loop_t* loop);
97  
98  /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
99  STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
100  STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
101                sizeof(((struct iovec*) 0)->iov_base));
102  STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
103                sizeof(((struct iovec*) 0)->iov_len));
104  STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
105  STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
106  
107  
uv_hrtime(void)108  uint64_t uv_hrtime(void) {
109    return uv__hrtime(UV_CLOCK_PRECISE);
110  }
111  
112  
uv_close(uv_handle_t * handle,uv_close_cb close_cb)113  void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
114    assert(!uv__is_closing(handle));
115  
116    handle->flags |= UV_HANDLE_CLOSING;
117    handle->close_cb = close_cb;
118  
119    switch (handle->type) {
120    case UV_NAMED_PIPE:
121      uv__pipe_close((uv_pipe_t*)handle);
122      break;
123  
124    case UV_TTY:
125      uv__stream_close((uv_stream_t*)handle);
126      break;
127  
128    case UV_TCP:
129      uv__tcp_close((uv_tcp_t*)handle);
130      break;
131  
132    case UV_UDP:
133      uv__udp_close((uv_udp_t*)handle);
134      break;
135  
136    case UV_PREPARE:
137      uv__prepare_close((uv_prepare_t*)handle);
138      break;
139  
140    case UV_CHECK:
141      uv__check_close((uv_check_t*)handle);
142      break;
143  
144    case UV_IDLE:
145      uv__idle_close((uv_idle_t*)handle);
146      break;
147  
148    case UV_ASYNC:
149      uv__async_close((uv_async_t*)handle);
150      break;
151  
152    case UV_TIMER:
153      uv__timer_close((uv_timer_t*)handle);
154      break;
155  
156    case UV_PROCESS:
157      uv__process_close((uv_process_t*)handle);
158      break;
159  
160    case UV_FS_EVENT:
161      uv__fs_event_close((uv_fs_event_t*)handle);
162      break;
163  
164    case UV_POLL:
165      uv__poll_close((uv_poll_t*)handle);
166      break;
167  
168    case UV_FS_POLL:
169      uv__fs_poll_close((uv_fs_poll_t*)handle);
170      /* Poll handles use file system requests, and one of them may still be
171       * running. The poll code will call uv__make_close_pending() for us. */
172      return;
173  
174    case UV_SIGNAL:
175      uv__signal_close((uv_signal_t*) handle);
176      break;
177  
178    default:
179      assert(0);
180    }
181  
182    uv__make_close_pending(handle);
183  }
184  
uv__socket_sockopt(uv_handle_t * handle,int optname,int * value)185  int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
186    int r;
187    int fd;
188    socklen_t len;
189  
190    if (handle == NULL || value == NULL)
191      return UV_EINVAL;
192  
193    if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
194      fd = uv__stream_fd((uv_stream_t*) handle);
195    else if (handle->type == UV_UDP)
196      fd = ((uv_udp_t *) handle)->io_watcher.fd;
197    else
198      return UV_ENOTSUP;
199  
200    len = sizeof(*value);
201  
202    if (*value == 0)
203      r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
204    else
205      r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
206  
207    if (r < 0)
208      return UV__ERR(errno);
209  
210    return 0;
211  }
212  
uv__make_close_pending(uv_handle_t * handle)213  void uv__make_close_pending(uv_handle_t* handle) {
214    assert(handle->flags & UV_HANDLE_CLOSING);
215    assert(!(handle->flags & UV_HANDLE_CLOSED));
216    handle->next_closing = handle->loop->closing_handles;
217    handle->loop->closing_handles = handle;
218  }
219  
uv__getiovmax(void)220  int uv__getiovmax(void) {
221  #if defined(IOV_MAX)
222    return IOV_MAX;
223  #elif defined(_SC_IOV_MAX)
224    static int iovmax_cached = -1;
225    int iovmax;
226  
227    iovmax = uv__load_relaxed(&iovmax_cached);
228    if (iovmax != -1)
229      return iovmax;
230  
231    /* On some embedded devices (arm-linux-uclibc based ip camera),
232     * sysconf(_SC_IOV_MAX) can not get the correct value. The return
233     * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
234     */
235    iovmax = sysconf(_SC_IOV_MAX);
236    if (iovmax == -1)
237      iovmax = 1;
238  
239    uv__store_relaxed(&iovmax_cached, iovmax);
240  
241    return iovmax;
242  #else
243    return 1024;
244  #endif
245  }
246  
247  
uv__finish_close(uv_handle_t * handle)248  static void uv__finish_close(uv_handle_t* handle) {
249    uv_signal_t* sh;
250  
251    /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
252     * possible for it to be active in the sense that uv__is_active() returns
253     * true.
254     *
255     * A good example is when the user calls uv_shutdown(), immediately followed
256     * by uv_close(). The handle is considered active at this point because the
257     * completion of the shutdown req is still pending.
258     */
259    assert(handle->flags & UV_HANDLE_CLOSING);
260    assert(!(handle->flags & UV_HANDLE_CLOSED));
261    handle->flags |= UV_HANDLE_CLOSED;
262  
263    switch (handle->type) {
264      case UV_PREPARE:
265      case UV_CHECK:
266      case UV_IDLE:
267      case UV_ASYNC:
268      case UV_TIMER:
269      case UV_PROCESS:
270      case UV_FS_EVENT:
271      case UV_FS_POLL:
272      case UV_POLL:
273        break;
274  
275      case UV_SIGNAL:
276        /* If there are any caught signals "trapped" in the signal pipe,
277         * we can't call the close callback yet. Reinserting the handle
278         * into the closing queue makes the event loop spin but that's
279         * okay because we only need to deliver the pending events.
280         */
281        sh = (uv_signal_t*) handle;
282        if (sh->caught_signals > sh->dispatched_signals) {
283          handle->flags ^= UV_HANDLE_CLOSED;
284          uv__make_close_pending(handle);  /* Back into the queue. */
285          return;
286        }
287        break;
288  
289      case UV_NAMED_PIPE:
290      case UV_TCP:
291      case UV_TTY:
292        uv__stream_destroy((uv_stream_t*)handle);
293        break;
294  
295      case UV_UDP:
296        uv__udp_finish_close((uv_udp_t*)handle);
297        break;
298  
299      default:
300        assert(0);
301        break;
302    }
303  
304    uv__handle_unref(handle);
305    QUEUE_REMOVE(&handle->handle_queue);
306  
307    if (handle->close_cb) {
308      handle->close_cb(handle);
309    }
310  }
311  
312  
uv__run_closing_handles(uv_loop_t * loop)313  static void uv__run_closing_handles(uv_loop_t* loop) {
314    uv_handle_t* p;
315    uv_handle_t* q;
316  
317    p = loop->closing_handles;
318    loop->closing_handles = NULL;
319  
320    while (p) {
321      q = p->next_closing;
322      uv__finish_close(p);
323      p = q;
324    }
325  }
326  
327  
uv_is_closing(const uv_handle_t * handle)328  int uv_is_closing(const uv_handle_t* handle) {
329    return uv__is_closing(handle);
330  }
331  
332  
uv_backend_fd(const uv_loop_t * loop)333  int uv_backend_fd(const uv_loop_t* loop) {
334    return loop->backend_fd;
335  }
336  
337  
uv__loop_alive(const uv_loop_t * loop)338  static int uv__loop_alive(const uv_loop_t* loop) {
339    return uv__has_active_handles(loop) ||
340           uv__has_active_reqs(loop) ||
341           !QUEUE_EMPTY(&loop->pending_queue) ||
342           loop->closing_handles != NULL;
343  }
344  
345  
uv__backend_timeout(const uv_loop_t * loop)346  static int uv__backend_timeout(const uv_loop_t* loop) {
347    if (loop->stop_flag == 0 &&
348        /* uv__loop_alive(loop) && */
349        (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
350        QUEUE_EMPTY(&loop->pending_queue) &&
351        QUEUE_EMPTY(&loop->idle_handles) &&
352        loop->closing_handles == NULL)
353      return uv__next_timeout(loop);
354    return 0;
355  }
356  
357  
uv_backend_timeout(const uv_loop_t * loop)358  int uv_backend_timeout(const uv_loop_t* loop) {
359    if (QUEUE_EMPTY(&loop->watcher_queue))
360      return uv__backend_timeout(loop);
361    /* Need to call uv_run to update the backend fd state. */
362    return 0;
363  }
364  
365  
uv_loop_alive(const uv_loop_t * loop)366  int uv_loop_alive(const uv_loop_t* loop) {
367    return uv__loop_alive(loop);
368  }
369  
370  
uv_run(uv_loop_t * loop,uv_run_mode mode)371  int uv_run(uv_loop_t* loop, uv_run_mode mode) {
372    int timeout;
373    int r;
374    int ran_pending;
375  
376    r = uv__loop_alive(loop);
377    if (!r)
378      uv__update_time(loop);
379  
380    while (r != 0 && loop->stop_flag == 0) {
381      uv__update_time(loop);
382      uv__run_timers(loop);
383      ran_pending = uv__run_pending(loop);
384      uv__run_idle(loop);
385      uv__run_prepare(loop);
386  
387      timeout = 0;
388      if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
389        timeout = uv__backend_timeout(loop);
390  
391      uv__io_poll(loop, timeout);
392  
393      /* Run one final update on the provider_idle_time in case uv__io_poll
394       * returned because the timeout expired, but no events were received. This
395       * call will be ignored if the provider_entry_time was either never set (if
396       * the timeout == 0) or was already updated b/c an event was received.
397       */
398      uv__metrics_update_idle_time(loop);
399  
400      uv__run_check(loop);
401      uv__run_closing_handles(loop);
402  
403      if (mode == UV_RUN_ONCE) {
404        /* UV_RUN_ONCE implies forward progress: at least one callback must have
405         * been invoked when it returns. uv__io_poll() can return without doing
406         * I/O (meaning: no callbacks) when its timeout expires - which means we
407         * have pending timers that satisfy the forward progress constraint.
408         *
409         * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
410         * the check.
411         */
412        uv__update_time(loop);
413        uv__run_timers(loop);
414      }
415  
416      r = uv__loop_alive(loop);
417      if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
418        break;
419    }
420  
421    /* The if statement lets gcc compile it to a conditional store. Avoids
422     * dirtying a cache line.
423     */
424    if (loop->stop_flag != 0)
425      loop->stop_flag = 0;
426  
427    return r;
428  }
429  
430  
uv_update_time(uv_loop_t * loop)431  void uv_update_time(uv_loop_t* loop) {
432    uv__update_time(loop);
433  }
434  
435  
uv_is_active(const uv_handle_t * handle)436  int uv_is_active(const uv_handle_t* handle) {
437    return uv__is_active(handle);
438  }
439  
440  
441  /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
uv__socket(int domain,int type,int protocol)442  int uv__socket(int domain, int type, int protocol) {
443    int sockfd;
444    int err;
445  
446  #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
447    sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
448    if (sockfd != -1)
449      return sockfd;
450  
451    if (errno != EINVAL)
452      return UV__ERR(errno);
453  #endif
454  
455    sockfd = socket(domain, type, protocol);
456    if (sockfd == -1)
457      return UV__ERR(errno);
458  
459    err = uv__nonblock(sockfd, 1);
460    if (err == 0)
461      err = uv__cloexec(sockfd, 1);
462  
463    if (err) {
464      uv__close(sockfd);
465      return err;
466    }
467  
468  #if defined(SO_NOSIGPIPE)
469    {
470      int on = 1;
471      setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
472    }
473  #endif
474  
475    return sockfd;
476  }
477  
478  /* get a file pointer to a file in read-only and close-on-exec mode */
uv__open_file(const char * path)479  FILE* uv__open_file(const char* path) {
480    int fd;
481    FILE* fp;
482  
483    fd = uv__open_cloexec(path, O_RDONLY);
484    if (fd < 0)
485      return NULL;
486  
487     fp = fdopen(fd, "r");
488     if (fp == NULL)
489       uv__close(fd);
490  
491     return fp;
492  }
493  
494  
uv__accept(int sockfd)495  int uv__accept(int sockfd) {
496    int peerfd;
497    int err;
498  
499    (void) &err;
500    assert(sockfd >= 0);
501  
502    do
503  #ifdef uv__accept4
504      peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
505  #else
506      peerfd = accept(sockfd, NULL, NULL);
507  #endif
508    while (peerfd == -1 && errno == EINTR);
509  
510    if (peerfd == -1)
511      return UV__ERR(errno);
512  
513  #ifndef uv__accept4
514    err = uv__cloexec(peerfd, 1);
515    if (err == 0)
516      err = uv__nonblock(peerfd, 1);
517  
518    if (err != 0) {
519      uv__close(peerfd);
520      return err;
521    }
522  #endif
523  
524    return peerfd;
525  }
526  
527  
528  /* close() on macos has the "interesting" quirk that it fails with EINTR
529   * without closing the file descriptor when a thread is in the cancel state.
530   * That's why libuv calls close$NOCANCEL() instead.
531   *
532   * glibc on linux has a similar issue: close() is a cancellation point and
533   * will unwind the thread when it's in the cancel state. Work around that
534   * by making the system call directly. Musl libc is unaffected.
535   */
uv__close_nocancel(int fd)536  int uv__close_nocancel(int fd) {
537  #if defined(__APPLE__)
538  #pragma GCC diagnostic push
539  #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
540  #if defined(__LP64__) || TARGET_OS_IPHONE
541    extern int close$NOCANCEL(int);
542    return close$NOCANCEL(fd);
543  #else
544    extern int close$NOCANCEL$UNIX2003(int);
545    return close$NOCANCEL$UNIX2003(fd);
546  #endif
547  #pragma GCC diagnostic pop
548  #elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
549    long rc;
550    __sanitizer_syscall_pre_close(fd);
551    rc = syscall(SYS_close, fd);
552    __sanitizer_syscall_post_close(rc, fd);
553    return rc;
554  #elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
555    return syscall(SYS_close, fd);
556  #else
557    return close(fd);
558  #endif
559  }
560  
561  
uv__close_nocheckstdio(int fd)562  int uv__close_nocheckstdio(int fd) {
563    int saved_errno;
564    int rc;
565  
566    assert(fd > -1);  /* Catch uninitialized io_watcher.fd bugs. */
567  
568    saved_errno = errno;
569    rc = uv__close_nocancel(fd);
570    if (rc == -1) {
571      rc = UV__ERR(errno);
572      if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
573        rc = 0;    /* The close is in progress, not an error. */
574      errno = saved_errno;
575    }
576  
577    return rc;
578  }
579  
580  
uv__close(int fd)581  int uv__close(int fd) {
582    assert(fd > STDERR_FILENO);  /* Catch stdio close bugs. */
583  #if defined(__MVS__)
584    SAVE_ERRNO(epoll_file_close(fd));
585  #endif
586    return uv__close_nocheckstdio(fd);
587  }
588  
589  #if UV__NONBLOCK_IS_IOCTL
uv__nonblock_ioctl(int fd,int set)590  int uv__nonblock_ioctl(int fd, int set) {
591    int r;
592  
593    do
594      r = ioctl(fd, FIONBIO, &set);
595    while (r == -1 && errno == EINTR);
596  
597    if (r)
598      return UV__ERR(errno);
599  
600    return 0;
601  }
602  #endif
603  
604  
uv__nonblock_fcntl(int fd,int set)605  int uv__nonblock_fcntl(int fd, int set) {
606    int flags;
607    int r;
608  
609    do
610      r = fcntl(fd, F_GETFL);
611    while (r == -1 && errno == EINTR);
612  
613    if (r == -1)
614      return UV__ERR(errno);
615  
616    /* Bail out now if already set/clear. */
617    if (!!(r & O_NONBLOCK) == !!set)
618      return 0;
619  
620    if (set)
621      flags = r | O_NONBLOCK;
622    else
623      flags = r & ~O_NONBLOCK;
624  
625    do
626      r = fcntl(fd, F_SETFL, flags);
627    while (r == -1 && errno == EINTR);
628  
629    if (r)
630      return UV__ERR(errno);
631  
632    return 0;
633  }
634  
635  
uv__cloexec(int fd,int set)636  int uv__cloexec(int fd, int set) {
637    int flags;
638    int r;
639  
640    flags = 0;
641    if (set)
642      flags = FD_CLOEXEC;
643  
644    do
645      r = fcntl(fd, F_SETFD, flags);
646    while (r == -1 && errno == EINTR);
647  
648    if (r)
649      return UV__ERR(errno);
650  
651    return 0;
652  }
653  
654  
uv__recvmsg(int fd,struct msghdr * msg,int flags)655  ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
656    struct cmsghdr* cmsg;
657    ssize_t rc;
658    int* pfd;
659    int* end;
660  #if defined(__linux__)
661    static int no_msg_cmsg_cloexec;
662    if (0 == uv__load_relaxed(&no_msg_cmsg_cloexec)) {
663      rc = recvmsg(fd, msg, flags | 0x40000000);  /* MSG_CMSG_CLOEXEC */
664      if (rc != -1)
665        return rc;
666      if (errno != EINVAL)
667        return UV__ERR(errno);
668      rc = recvmsg(fd, msg, flags);
669      if (rc == -1)
670        return UV__ERR(errno);
671      uv__store_relaxed(&no_msg_cmsg_cloexec, 1);
672    } else {
673      rc = recvmsg(fd, msg, flags);
674    }
675  #else
676    rc = recvmsg(fd, msg, flags);
677  #endif
678    if (rc == -1)
679      return UV__ERR(errno);
680    if (msg->msg_controllen == 0)
681      return rc;
682    for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
683      if (cmsg->cmsg_type == SCM_RIGHTS)
684        for (pfd = (int*) CMSG_DATA(cmsg),
685             end = (int*) ((char*) cmsg + cmsg->cmsg_len);
686             pfd < end;
687             pfd += 1)
688          uv__cloexec(*pfd, 1);
689    return rc;
690  }
691  
692  
uv_cwd(char * buffer,size_t * size)693  int uv_cwd(char* buffer, size_t* size) {
694    char scratch[1 + UV__PATH_MAX];
695  
696    if (buffer == NULL || size == NULL)
697      return UV_EINVAL;
698  
699    /* Try to read directly into the user's buffer first... */
700    if (getcwd(buffer, *size) != NULL)
701      goto fixup;
702  
703    if (errno != ERANGE)
704      return UV__ERR(errno);
705  
706    /* ...or into scratch space if the user's buffer is too small
707     * so we can report how much space to provide on the next try.
708     */
709    if (getcwd(scratch, sizeof(scratch)) == NULL)
710      return UV__ERR(errno);
711  
712    buffer = scratch;
713  
714  fixup:
715  
716    *size = strlen(buffer);
717  
718    if (*size > 1 && buffer[*size - 1] == '/') {
719      *size -= 1;
720      buffer[*size] = '\0';
721    }
722  
723    if (buffer == scratch) {
724      *size += 1;
725      return UV_ENOBUFS;
726    }
727  
728    return 0;
729  }
730  
731  
uv_chdir(const char * dir)732  int uv_chdir(const char* dir) {
733    if (chdir(dir))
734      return UV__ERR(errno);
735  
736    return 0;
737  }
738  
739  
uv_disable_stdio_inheritance(void)740  void uv_disable_stdio_inheritance(void) {
741    int fd;
742  
743    /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
744     * first 16 file descriptors. After that, bail out after the first error.
745     */
746    for (fd = 0; ; fd++)
747      if (uv__cloexec(fd, 1) && fd > 15)
748        break;
749  }
750  
751  
uv_fileno(const uv_handle_t * handle,uv_os_fd_t * fd)752  int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
753    int fd_out;
754  
755    switch (handle->type) {
756    case UV_TCP:
757    case UV_NAMED_PIPE:
758    case UV_TTY:
759      fd_out = uv__stream_fd((uv_stream_t*) handle);
760      break;
761  
762    case UV_UDP:
763      fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
764      break;
765  
766    case UV_POLL:
767      fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
768      break;
769  
770    default:
771      return UV_EINVAL;
772    }
773  
774    if (uv__is_closing(handle) || fd_out == -1)
775      return UV_EBADF;
776  
777    *fd = fd_out;
778    return 0;
779  }
780  
781  
uv__run_pending(uv_loop_t * loop)782  static int uv__run_pending(uv_loop_t* loop) {
783    QUEUE* q;
784    QUEUE pq;
785    uv__io_t* w;
786  
787    if (QUEUE_EMPTY(&loop->pending_queue))
788      return 0;
789  
790    QUEUE_MOVE(&loop->pending_queue, &pq);
791  
792    while (!QUEUE_EMPTY(&pq)) {
793      q = QUEUE_HEAD(&pq);
794      QUEUE_REMOVE(q);
795      QUEUE_INIT(q);
796      w = QUEUE_DATA(q, uv__io_t, pending_queue);
797      w->cb(loop, w, POLLOUT);
798    }
799  
800    return 1;
801  }
802  
803  
next_power_of_two(unsigned int val)804  static unsigned int next_power_of_two(unsigned int val) {
805    val -= 1;
806    val |= val >> 1;
807    val |= val >> 2;
808    val |= val >> 4;
809    val |= val >> 8;
810    val |= val >> 16;
811    val += 1;
812    return val;
813  }
814  
maybe_resize(uv_loop_t * loop,unsigned int len)815  static void maybe_resize(uv_loop_t* loop, unsigned int len) {
816    uv__io_t** watchers;
817    void* fake_watcher_list;
818    void* fake_watcher_count;
819    unsigned int nwatchers;
820    unsigned int i;
821  
822    if (len <= loop->nwatchers)
823      return;
824  
825    /* Preserve fake watcher list and count at the end of the watchers */
826    if (loop->watchers != NULL) {
827      fake_watcher_list = loop->watchers[loop->nwatchers];
828      fake_watcher_count = loop->watchers[loop->nwatchers + 1];
829    } else {
830      fake_watcher_list = NULL;
831      fake_watcher_count = NULL;
832    }
833  
834    nwatchers = next_power_of_two(len + 2) - 2;
835    watchers = uv__reallocf(loop->watchers,
836                            (nwatchers + 2) * sizeof(loop->watchers[0]));
837  
838    if (watchers == NULL)
839      abort();
840    for (i = loop->nwatchers; i < nwatchers; i++)
841      watchers[i] = NULL;
842    watchers[nwatchers] = fake_watcher_list;
843    watchers[nwatchers + 1] = fake_watcher_count;
844  
845    loop->watchers = watchers;
846    loop->nwatchers = nwatchers;
847  }
848  
849  
uv__io_init(uv__io_t * w,uv__io_cb cb,int fd)850  void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
851    assert(cb != NULL);
852    assert(fd >= -1);
853    QUEUE_INIT(&w->pending_queue);
854    QUEUE_INIT(&w->watcher_queue);
855    w->cb = cb;
856    w->fd = fd;
857    w->events = 0;
858    w->pevents = 0;
859  
860  #if defined(UV_HAVE_KQUEUE)
861    w->rcount = 0;
862    w->wcount = 0;
863  #endif /* defined(UV_HAVE_KQUEUE) */
864  }
865  
866  
uv__io_start(uv_loop_t * loop,uv__io_t * w,unsigned int events)867  void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
868    assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
869    assert(0 != events);
870    assert(w->fd >= 0);
871    assert(w->fd < INT_MAX);
872  
873    w->pevents |= events;
874    maybe_resize(loop, w->fd + 1);
875  
876  #if !defined(__sun)
877    /* The event ports backend needs to rearm all file descriptors on each and
878     * every tick of the event loop but the other backends allow us to
879     * short-circuit here if the event mask is unchanged.
880     */
881    if (w->events == w->pevents)
882      return;
883  #endif
884  
885    if (QUEUE_EMPTY(&w->watcher_queue))
886      QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
887  
888    if (loop->watchers[w->fd] == NULL) {
889      loop->watchers[w->fd] = w;
890      loop->nfds++;
891    }
892  }
893  
894  
uv__io_stop(uv_loop_t * loop,uv__io_t * w,unsigned int events)895  void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
896    assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
897    assert(0 != events);
898  
899    if (w->fd == -1)
900      return;
901  
902    assert(w->fd >= 0);
903  
904    /* Happens when uv__io_stop() is called on a handle that was never started. */
905    if ((unsigned) w->fd >= loop->nwatchers)
906      return;
907  
908    w->pevents &= ~events;
909  
910    if (w->pevents == 0) {
911      QUEUE_REMOVE(&w->watcher_queue);
912      QUEUE_INIT(&w->watcher_queue);
913      w->events = 0;
914  
915      if (w == loop->watchers[w->fd]) {
916        assert(loop->nfds > 0);
917        loop->watchers[w->fd] = NULL;
918        loop->nfds--;
919      }
920    }
921    else if (QUEUE_EMPTY(&w->watcher_queue))
922      QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
923  }
924  
925  
uv__io_close(uv_loop_t * loop,uv__io_t * w)926  void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
927    uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
928    QUEUE_REMOVE(&w->pending_queue);
929  
930    /* Remove stale events for this file descriptor */
931    if (w->fd != -1)
932      uv__platform_invalidate_fd(loop, w->fd);
933  }
934  
935  
uv__io_feed(uv_loop_t * loop,uv__io_t * w)936  void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
937    if (QUEUE_EMPTY(&w->pending_queue))
938      QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
939  }
940  
941  
uv__io_active(const uv__io_t * w,unsigned int events)942  int uv__io_active(const uv__io_t* w, unsigned int events) {
943    assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
944    assert(0 != events);
945    return 0 != (w->pevents & events);
946  }
947  
948  
uv__fd_exists(uv_loop_t * loop,int fd)949  int uv__fd_exists(uv_loop_t* loop, int fd) {
950    return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
951  }
952  
953  
uv_getrusage(uv_rusage_t * rusage)954  int uv_getrusage(uv_rusage_t* rusage) {
955    struct rusage usage;
956  
957    if (getrusage(RUSAGE_SELF, &usage))
958      return UV__ERR(errno);
959  
960    rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
961    rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
962  
963    rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
964    rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
965  
966  #if !defined(__MVS__) && !defined(__HAIKU__)
967    rusage->ru_maxrss = usage.ru_maxrss;
968    rusage->ru_ixrss = usage.ru_ixrss;
969    rusage->ru_idrss = usage.ru_idrss;
970    rusage->ru_isrss = usage.ru_isrss;
971    rusage->ru_minflt = usage.ru_minflt;
972    rusage->ru_majflt = usage.ru_majflt;
973    rusage->ru_nswap = usage.ru_nswap;
974    rusage->ru_inblock = usage.ru_inblock;
975    rusage->ru_oublock = usage.ru_oublock;
976    rusage->ru_msgsnd = usage.ru_msgsnd;
977    rusage->ru_msgrcv = usage.ru_msgrcv;
978    rusage->ru_nsignals = usage.ru_nsignals;
979    rusage->ru_nvcsw = usage.ru_nvcsw;
980    rusage->ru_nivcsw = usage.ru_nivcsw;
981  #endif
982  
983    return 0;
984  }
985  
986  
uv__open_cloexec(const char * path,int flags)987  int uv__open_cloexec(const char* path, int flags) {
988  #if defined(O_CLOEXEC)
989    int fd;
990  
991    fd = open(path, flags | O_CLOEXEC);
992    if (fd == -1)
993      return UV__ERR(errno);
994  
995    return fd;
996  #else  /* O_CLOEXEC */
997    int err;
998    int fd;
999  
1000    fd = open(path, flags);
1001    if (fd == -1)
1002      return UV__ERR(errno);
1003  
1004    err = uv__cloexec(fd, 1);
1005    if (err) {
1006      uv__close(fd);
1007      return err;
1008    }
1009  
1010    return fd;
1011  #endif  /* O_CLOEXEC */
1012  }
1013  
1014  
uv__slurp(const char * filename,char * buf,size_t len)1015  int uv__slurp(const char* filename, char* buf, size_t len) {
1016    ssize_t n;
1017    int fd;
1018  
1019    assert(len > 0);
1020  
1021    fd = uv__open_cloexec(filename, O_RDONLY);
1022    if (fd < 0)
1023      return fd;
1024  
1025    do
1026      n = read(fd, buf, len - 1);
1027    while (n == -1 && errno == EINTR);
1028  
1029    if (uv__close_nocheckstdio(fd))
1030      abort();
1031  
1032    if (n < 0)
1033      return UV__ERR(errno);
1034  
1035    buf[n] = '\0';
1036  
1037    return 0;
1038  }
1039  
1040  
uv__dup2_cloexec(int oldfd,int newfd)1041  int uv__dup2_cloexec(int oldfd, int newfd) {
1042  #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
1043    int r;
1044  
1045    r = dup3(oldfd, newfd, O_CLOEXEC);
1046    if (r == -1)
1047      return UV__ERR(errno);
1048  
1049    return r;
1050  #else
1051    int err;
1052    int r;
1053  
1054    r = dup2(oldfd, newfd);  /* Never retry. */
1055    if (r == -1)
1056      return UV__ERR(errno);
1057  
1058    err = uv__cloexec(newfd, 1);
1059    if (err != 0) {
1060      uv__close(newfd);
1061      return err;
1062    }
1063  
1064    return r;
1065  #endif
1066  }
1067  
1068  
uv_os_homedir(char * buffer,size_t * size)1069  int uv_os_homedir(char* buffer, size_t* size) {
1070    uv_passwd_t pwd;
1071    size_t len;
1072    int r;
1073  
1074    /* Check if the HOME environment variable is set first. The task of
1075       performing input validation on buffer and size is taken care of by
1076       uv_os_getenv(). */
1077    r = uv_os_getenv("HOME", buffer, size);
1078  
1079    if (r != UV_ENOENT)
1080      return r;
1081  
1082    /* HOME is not set, so call uv__getpwuid_r() */
1083    r = uv__getpwuid_r(&pwd);
1084  
1085    if (r != 0) {
1086      return r;
1087    }
1088  
1089    len = strlen(pwd.homedir);
1090  
1091    if (len >= *size) {
1092      *size = len + 1;
1093      uv_os_free_passwd(&pwd);
1094      return UV_ENOBUFS;
1095    }
1096  
1097    memcpy(buffer, pwd.homedir, len + 1);
1098    *size = len;
1099    uv_os_free_passwd(&pwd);
1100  
1101    return 0;
1102  }
1103  
1104  
uv_os_tmpdir(char * buffer,size_t * size)1105  int uv_os_tmpdir(char* buffer, size_t* size) {
1106    const char* buf;
1107    size_t len;
1108  
1109    if (buffer == NULL || size == NULL || *size == 0)
1110      return UV_EINVAL;
1111  
1112  #define CHECK_ENV_VAR(name)                                                   \
1113    do {                                                                        \
1114      buf = getenv(name);                                                       \
1115      if (buf != NULL)                                                          \
1116        goto return_buffer;                                                     \
1117    }                                                                           \
1118    while (0)
1119  
1120    /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1121    CHECK_ENV_VAR("TMPDIR");
1122    CHECK_ENV_VAR("TMP");
1123    CHECK_ENV_VAR("TEMP");
1124    CHECK_ENV_VAR("TEMPDIR");
1125  
1126  #undef CHECK_ENV_VAR
1127  
1128    /* No temp environment variables defined */
1129    #if defined(__ANDROID__)
1130      buf = "/data/local/tmp";
1131    #else
1132      buf = "/tmp";
1133    #endif
1134  
1135  return_buffer:
1136    len = strlen(buf);
1137  
1138    if (len >= *size) {
1139      *size = len + 1;
1140      return UV_ENOBUFS;
1141    }
1142  
1143    /* The returned directory should not have a trailing slash. */
1144    if (len > 1 && buf[len - 1] == '/') {
1145      len--;
1146    }
1147  
1148    memcpy(buffer, buf, len + 1);
1149    buffer[len] = '\0';
1150    *size = len;
1151  
1152    return 0;
1153  }
1154  
1155  
uv__getpwuid_r(uv_passwd_t * pwd)1156  int uv__getpwuid_r(uv_passwd_t* pwd) {
1157    struct passwd pw;
1158    struct passwd* result;
1159    char* buf;
1160    uid_t uid;
1161    size_t bufsize;
1162    size_t name_size;
1163    size_t homedir_size;
1164    size_t shell_size;
1165    long initsize;
1166    int r;
1167  
1168    if (pwd == NULL)
1169      return UV_EINVAL;
1170  
1171    initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
1172  
1173    if (initsize <= 0)
1174      bufsize = 4096;
1175    else
1176      bufsize = (size_t) initsize;
1177  
1178    uid = geteuid();
1179    buf = NULL;
1180  
1181    for (;;) {
1182      uv__free(buf);
1183      buf = uv__malloc(bufsize);
1184  
1185      if (buf == NULL)
1186        return UV_ENOMEM;
1187  
1188      do
1189        r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1190      while (r == EINTR);
1191  
1192      if (r != ERANGE)
1193        break;
1194  
1195      bufsize *= 2;
1196    }
1197  
1198    if (r != 0) {
1199      uv__free(buf);
1200      return UV__ERR(r);
1201    }
1202  
1203    if (result == NULL) {
1204      uv__free(buf);
1205      return UV_ENOENT;
1206    }
1207  
1208    /* Allocate memory for the username, shell, and home directory */
1209    name_size = strlen(pw.pw_name) + 1;
1210    homedir_size = strlen(pw.pw_dir) + 1;
1211    shell_size = strlen(pw.pw_shell) + 1;
1212    pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1213  
1214    if (pwd->username == NULL) {
1215      uv__free(buf);
1216      return UV_ENOMEM;
1217    }
1218  
1219    /* Copy the username */
1220    memcpy(pwd->username, pw.pw_name, name_size);
1221  
1222    /* Copy the home directory */
1223    pwd->homedir = pwd->username + name_size;
1224    memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1225  
1226    /* Copy the shell */
1227    pwd->shell = pwd->homedir + homedir_size;
1228    memcpy(pwd->shell, pw.pw_shell, shell_size);
1229  
1230    /* Copy the uid and gid */
1231    pwd->uid = pw.pw_uid;
1232    pwd->gid = pw.pw_gid;
1233  
1234    uv__free(buf);
1235  
1236    return 0;
1237  }
1238  
1239  
uv_os_free_passwd(uv_passwd_t * pwd)1240  void uv_os_free_passwd(uv_passwd_t* pwd) {
1241    if (pwd == NULL)
1242      return;
1243  
1244    /*
1245      The memory for name, shell, and homedir are allocated in a single
1246      uv__malloc() call. The base of the pointer is stored in pwd->username, so
1247      that is the field that needs to be freed.
1248    */
1249    uv__free(pwd->username);
1250    pwd->username = NULL;
1251    pwd->shell = NULL;
1252    pwd->homedir = NULL;
1253  }
1254  
1255  
uv_os_get_passwd(uv_passwd_t * pwd)1256  int uv_os_get_passwd(uv_passwd_t* pwd) {
1257    return uv__getpwuid_r(pwd);
1258  }
1259  
1260  
uv_translate_sys_error(int sys_errno)1261  int uv_translate_sys_error(int sys_errno) {
1262    /* If < 0 then it's already a libuv error. */
1263    return sys_errno <= 0 ? sys_errno : -sys_errno;
1264  }
1265  
1266  
uv_os_environ(uv_env_item_t ** envitems,int * count)1267  int uv_os_environ(uv_env_item_t** envitems, int* count) {
1268    int i, j, cnt;
1269    uv_env_item_t* envitem;
1270  
1271    *envitems = NULL;
1272    *count = 0;
1273  
1274    for (i = 0; environ[i] != NULL; i++);
1275  
1276    *envitems = uv__calloc(i, sizeof(**envitems));
1277  
1278    if (*envitems == NULL)
1279      return UV_ENOMEM;
1280  
1281    for (j = 0, cnt = 0; j < i; j++) {
1282      char* buf;
1283      char* ptr;
1284  
1285      if (environ[j] == NULL)
1286        break;
1287  
1288      buf = uv__strdup(environ[j]);
1289      if (buf == NULL)
1290        goto fail;
1291  
1292      ptr = strchr(buf, '=');
1293      if (ptr == NULL) {
1294        uv__free(buf);
1295        continue;
1296      }
1297  
1298      *ptr = '\0';
1299  
1300      envitem = &(*envitems)[cnt];
1301      envitem->name = buf;
1302      envitem->value = ptr + 1;
1303  
1304      cnt++;
1305    }
1306  
1307    *count = cnt;
1308    return 0;
1309  
1310  fail:
1311    for (i = 0; i < cnt; i++) {
1312      envitem = &(*envitems)[cnt];
1313      uv__free(envitem->name);
1314    }
1315    uv__free(*envitems);
1316  
1317    *envitems = NULL;
1318    *count = 0;
1319    return UV_ENOMEM;
1320  }
1321  
1322  
uv_os_getenv(const char * name,char * buffer,size_t * size)1323  int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1324    char* var;
1325    size_t len;
1326  
1327    if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1328      return UV_EINVAL;
1329  
1330    var = getenv(name);
1331  
1332    if (var == NULL)
1333      return UV_ENOENT;
1334  
1335    len = strlen(var);
1336  
1337    if (len >= *size) {
1338      *size = len + 1;
1339      return UV_ENOBUFS;
1340    }
1341  
1342    memcpy(buffer, var, len + 1);
1343    *size = len;
1344  
1345    return 0;
1346  }
1347  
1348  
uv_os_setenv(const char * name,const char * value)1349  int uv_os_setenv(const char* name, const char* value) {
1350    if (name == NULL || value == NULL)
1351      return UV_EINVAL;
1352  
1353    if (setenv(name, value, 1) != 0)
1354      return UV__ERR(errno);
1355  
1356    return 0;
1357  }
1358  
1359  
uv_os_unsetenv(const char * name)1360  int uv_os_unsetenv(const char* name) {
1361    if (name == NULL)
1362      return UV_EINVAL;
1363  
1364    if (unsetenv(name) != 0)
1365      return UV__ERR(errno);
1366  
1367    return 0;
1368  }
1369  
1370  
uv_os_gethostname(char * buffer,size_t * size)1371  int uv_os_gethostname(char* buffer, size_t* size) {
1372    /*
1373      On some platforms, if the input buffer is not large enough, gethostname()
1374      succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1375      instead by creating a large enough buffer and comparing the hostname length
1376      to the size input.
1377    */
1378    char buf[UV_MAXHOSTNAMESIZE];
1379    size_t len;
1380  
1381    if (buffer == NULL || size == NULL || *size == 0)
1382      return UV_EINVAL;
1383  
1384    if (gethostname(buf, sizeof(buf)) != 0)
1385      return UV__ERR(errno);
1386  
1387    buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1388    len = strlen(buf);
1389  
1390    if (len >= *size) {
1391      *size = len + 1;
1392      return UV_ENOBUFS;
1393    }
1394  
1395    memcpy(buffer, buf, len + 1);
1396    *size = len;
1397    return 0;
1398  }
1399  
1400  
uv_get_osfhandle(int fd)1401  uv_os_fd_t uv_get_osfhandle(int fd) {
1402    return fd;
1403  }
1404  
uv_open_osfhandle(uv_os_fd_t os_fd)1405  int uv_open_osfhandle(uv_os_fd_t os_fd) {
1406    return os_fd;
1407  }
1408  
uv_os_getpid(void)1409  uv_pid_t uv_os_getpid(void) {
1410    return getpid();
1411  }
1412  
1413  
uv_os_getppid(void)1414  uv_pid_t uv_os_getppid(void) {
1415    return getppid();
1416  }
1417  
1418  
uv_os_getpriority(uv_pid_t pid,int * priority)1419  int uv_os_getpriority(uv_pid_t pid, int* priority) {
1420    int r;
1421  
1422    if (priority == NULL)
1423      return UV_EINVAL;
1424  
1425    errno = 0;
1426    r = getpriority(PRIO_PROCESS, (int) pid);
1427  
1428    if (r == -1 && errno != 0)
1429      return UV__ERR(errno);
1430  
1431    *priority = r;
1432    return 0;
1433  }
1434  
1435  
uv_os_setpriority(uv_pid_t pid,int priority)1436  int uv_os_setpriority(uv_pid_t pid, int priority) {
1437    if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1438      return UV_EINVAL;
1439  
1440    if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1441      return UV__ERR(errno);
1442  
1443    return 0;
1444  }
1445  
1446  
uv_os_uname(uv_utsname_t * buffer)1447  int uv_os_uname(uv_utsname_t* buffer) {
1448    struct utsname buf;
1449    int r;
1450  
1451    if (buffer == NULL)
1452      return UV_EINVAL;
1453  
1454    if (uname(&buf) == -1) {
1455      r = UV__ERR(errno);
1456      goto error;
1457    }
1458  
1459    r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1460    if (r == UV_E2BIG)
1461      goto error;
1462  
1463  #ifdef _AIX
1464    r = snprintf(buffer->release,
1465                 sizeof(buffer->release),
1466                 "%s.%s",
1467                 buf.version,
1468                 buf.release);
1469    if (r >= sizeof(buffer->release)) {
1470      r = UV_E2BIG;
1471      goto error;
1472    }
1473  #else
1474    r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1475    if (r == UV_E2BIG)
1476      goto error;
1477  #endif
1478  
1479    r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1480    if (r == UV_E2BIG)
1481      goto error;
1482  
1483  #if defined(_AIX) || defined(__PASE__)
1484    r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1485  #else
1486    r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1487  #endif
1488  
1489    if (r == UV_E2BIG)
1490      goto error;
1491  
1492    return 0;
1493  
1494  error:
1495    buffer->sysname[0] = '\0';
1496    buffer->release[0] = '\0';
1497    buffer->version[0] = '\0';
1498    buffer->machine[0] = '\0';
1499    return r;
1500  }
1501  
uv__getsockpeername(const uv_handle_t * handle,uv__peersockfunc func,struct sockaddr * name,int * namelen)1502  int uv__getsockpeername(const uv_handle_t* handle,
1503                          uv__peersockfunc func,
1504                          struct sockaddr* name,
1505                          int* namelen) {
1506    socklen_t socklen;
1507    uv_os_fd_t fd;
1508    int r;
1509  
1510    r = uv_fileno(handle, &fd);
1511    if (r < 0)
1512      return r;
1513  
1514    /* sizeof(socklen_t) != sizeof(int) on some systems. */
1515    socklen = (socklen_t) *namelen;
1516  
1517    if (func(fd, name, &socklen))
1518      return UV__ERR(errno);
1519  
1520    *namelen = (int) socklen;
1521    return 0;
1522  }
1523  
uv_gettimeofday(uv_timeval64_t * tv)1524  int uv_gettimeofday(uv_timeval64_t* tv) {
1525    struct timeval time;
1526  
1527    if (tv == NULL)
1528      return UV_EINVAL;
1529  
1530    if (gettimeofday(&time, NULL) != 0)
1531      return UV__ERR(errno);
1532  
1533    tv->tv_sec = (int64_t) time.tv_sec;
1534    tv->tv_usec = (int32_t) time.tv_usec;
1535    return 0;
1536  }
1537  
uv_sleep(unsigned int msec)1538  void uv_sleep(unsigned int msec) {
1539    struct timespec timeout;
1540    int rc;
1541  
1542    timeout.tv_sec = msec / 1000;
1543    timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
1544  
1545    do
1546      rc = nanosleep(&timeout, &timeout);
1547    while (rc == -1 && errno == EINTR);
1548  
1549    assert(rc == 0);
1550  }
1551  
uv__search_path(const char * prog,char * buf,size_t * buflen)1552  int uv__search_path(const char* prog, char* buf, size_t* buflen) {
1553    char abspath[UV__PATH_MAX];
1554    size_t abspath_size;
1555    char trypath[UV__PATH_MAX];
1556    char* cloned_path;
1557    char* path_env;
1558    char* token;
1559  
1560    if (buf == NULL || buflen == NULL || *buflen == 0)
1561      return UV_EINVAL;
1562  
1563    /*
1564     * Possibilities for prog:
1565     * i) an absolute path such as: /home/user/myprojects/nodejs/node
1566     * ii) a relative path such as: ./node or ../myprojects/nodejs/node
1567     * iii) a bare filename such as "node", after exporting PATH variable
1568     *     to its location.
1569     */
1570  
1571    /* Case i) and ii) absolute or relative paths */
1572    if (strchr(prog, '/') != NULL) {
1573      if (realpath(prog, abspath) != abspath)
1574        return UV__ERR(errno);
1575  
1576      abspath_size = strlen(abspath);
1577  
1578      *buflen -= 1;
1579      if (*buflen > abspath_size)
1580        *buflen = abspath_size;
1581  
1582      memcpy(buf, abspath, *buflen);
1583      buf[*buflen] = '\0';
1584  
1585      return 0;
1586    }
1587  
1588    /* Case iii). Search PATH environment variable */
1589    cloned_path = NULL;
1590    token = NULL;
1591    path_env = getenv("PATH");
1592  
1593    if (path_env == NULL)
1594      return UV_EINVAL;
1595  
1596    cloned_path = uv__strdup(path_env);
1597    if (cloned_path == NULL)
1598      return UV_ENOMEM;
1599  
1600    token = strtok(cloned_path, ":");
1601    while (token != NULL) {
1602      snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
1603      if (realpath(trypath, abspath) == abspath) {
1604        /* Check the match is executable */
1605        if (access(abspath, X_OK) == 0) {
1606          abspath_size = strlen(abspath);
1607  
1608          *buflen -= 1;
1609          if (*buflen > abspath_size)
1610            *buflen = abspath_size;
1611  
1612          memcpy(buf, abspath, *buflen);
1613          buf[*buflen] = '\0';
1614  
1615          uv__free(cloned_path);
1616          return 0;
1617        }
1618      }
1619      token = strtok(NULL, ":");
1620    }
1621    uv__free(cloned_path);
1622  
1623    /* Out of tokens (path entries), and no match found */
1624    return UV_EINVAL;
1625  }
1626  
1627  
uv_available_parallelism(void)1628  unsigned int uv_available_parallelism(void) {
1629  #ifdef __linux__
1630    cpu_set_t set;
1631    long rc;
1632  
1633    memset(&set, 0, sizeof(set));
1634  
1635    /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
1636     * glibc it's... complicated... so for consistency try sched_getaffinity()
1637     * before falling back to sysconf(_SC_NPROCESSORS_ONLN).
1638     */
1639    if (0 == sched_getaffinity(0, sizeof(set), &set))
1640      rc = CPU_COUNT(&set);
1641    else
1642      rc = sysconf(_SC_NPROCESSORS_ONLN);
1643  
1644    if (rc < 1)
1645      rc = 1;
1646  
1647    return (unsigned) rc;
1648  #elif defined(__MVS__)
1649    return 1;  /* TODO(bnoordhuis) Read from CSD_NUMBER_ONLINE_CPUS? */
1650  #else  /* __linux__ */
1651    long rc;
1652  
1653    rc = sysconf(_SC_NPROCESSORS_ONLN);
1654    if (rc < 1)
1655      rc = 1;
1656  
1657    return (unsigned) rc;
1658  #endif  /* __linux__ */
1659  }
1660