• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 #include "uv.h"
22 #include "uv_log.h"
23 #include "internal.h"
24 #include "strtok.h"
25 
26 #include <stddef.h> /* NULL */
27 #include <stdio.h> /* printf */
28 #include <stdlib.h>
29 #include <string.h> /* strerror */
30 #include <errno.h>
31 #include <assert.h>
32 #include <unistd.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <fcntl.h>  /* O_CLOEXEC */
36 #include <sys/ioctl.h>
37 #include <sys/socket.h>
38 #include <sys/un.h>
39 #include <netinet/in.h>
40 #include <arpa/inet.h>
41 #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
42 #include <sys/uio.h> /* writev */
43 #include <sys/resource.h> /* getrusage */
44 #include <pwd.h>
45 #include <grp.h>
46 #include <sys/utsname.h>
47 #include <sys/time.h>
48 #include <time.h> /* clock_gettime */
49 
50 #ifdef __sun
51 # include <sys/filio.h>
52 # include <sys/wait.h>
53 #endif
54 
55 #if defined(__APPLE__)
56 # include <sys/filio.h>
57 # endif /* defined(__APPLE__) */
58 
59 
60 #if defined(__APPLE__) && !TARGET_OS_IPHONE
61 # include <crt_externs.h>
62 # include <mach-o/dyld.h> /* _NSGetExecutablePath */
63 # define environ (*_NSGetEnviron())
64 #else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
65 extern char** environ;
66 #endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
67 
68 
69 #if defined(__DragonFly__)      || \
70     defined(__FreeBSD__)        || \
71     defined(__NetBSD__)         || \
72     defined(__OpenBSD__)
73 # include <sys/sysctl.h>
74 # include <sys/filio.h>
75 # include <sys/wait.h>
76 # include <sys/param.h>
77 # if defined(__FreeBSD__)
78 #  include <sys/cpuset.h>
79 #  define uv__accept4 accept4
80 # endif
81 # if defined(__NetBSD__)
82 #  define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
83 # endif
84 #endif
85 
86 #if defined(__MVS__)
87 # include <sys/ioctl.h>
88 # include "zos-sys-info.h"
89 #endif
90 
91 #if defined(__linux__)
92 # include <sched.h>
93 # include <sys/syscall.h>
94 # define gettid() syscall(SYS_gettid)
95 # define uv__accept4 accept4
96 #endif
97 
98 #if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
99 # include <sanitizer/linux_syscall_hooks.h>
100 #endif
101 
102 static void uv__run_pending(uv_loop_t* loop);
103 
104 /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
105 STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
106 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
107               sizeof(((struct iovec*) 0)->iov_base));
108 STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
109               sizeof(((struct iovec*) 0)->iov_len));
110 STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
111 STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
112 
113 
114 /* https://github.com/libuv/libuv/issues/1674 */
uv_clock_gettime(uv_clock_id clock_id,uv_timespec64_t * ts)115 int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
116   struct timespec t;
117   int r;
118 
119   if (ts == NULL)
120     return UV_EFAULT;
121 
122   switch (clock_id) {
123     default:
124       return UV_EINVAL;
125     case UV_CLOCK_MONOTONIC:
126       r = clock_gettime(CLOCK_MONOTONIC, &t);
127       break;
128     case UV_CLOCK_REALTIME:
129       r = clock_gettime(CLOCK_REALTIME, &t);
130       break;
131   }
132 
133   if (r)
134     return UV__ERR(errno);
135 
136   ts->tv_sec = t.tv_sec;
137   ts->tv_nsec = t.tv_nsec;
138 
139   return 0;
140 }
141 
142 
uv_hrtime(void)143 uint64_t uv_hrtime(void) {
144   return uv__hrtime(UV_CLOCK_PRECISE);
145 }
146 
147 
uv_close(uv_handle_t * handle,uv_close_cb close_cb)148 void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
149   assert(!uv__is_closing(handle));
150 #if defined(USE_OHOS_DFX) && defined(__aarch64__)
151   uv__multi_thread_check_unify(handle->loop, __func__);
152 #endif
153   handle->flags |= UV_HANDLE_CLOSING;
154   handle->close_cb = close_cb;
155 
156   switch (handle->type) {
157   case UV_NAMED_PIPE:
158     uv__pipe_close((uv_pipe_t*)handle);
159     break;
160 
161   case UV_TTY:
162     uv__stream_close((uv_stream_t*)handle);
163     break;
164 
165   case UV_TCP:
166     uv__tcp_close((uv_tcp_t*)handle);
167     break;
168 
169   case UV_UDP:
170     uv__udp_close((uv_udp_t*)handle);
171     break;
172 
173   case UV_PREPARE:
174     uv__prepare_close((uv_prepare_t*)handle);
175     break;
176 
177   case UV_CHECK:
178     uv__check_close((uv_check_t*)handle);
179     break;
180 
181   case UV_IDLE:
182     uv__idle_close((uv_idle_t*)handle);
183     break;
184 
185   case UV_ASYNC:
186     uv__async_close((uv_async_t*)handle);
187     break;
188 
189   case UV_TIMER:
190     uv__timer_close((uv_timer_t*)handle);
191     break;
192 
193   case UV_PROCESS:
194     uv__process_close((uv_process_t*)handle);
195     break;
196 
197   case UV_FS_EVENT:
198     uv__fs_event_close((uv_fs_event_t*)handle);
199 #if defined(__sun) || defined(__MVS__)
200     /*
201      * On Solaris, illumos, and z/OS we will not be able to dissociate the
202      * watcher for an event which is pending delivery, so we cannot always call
203      * uv__make_close_pending() straight away. The backend will call the
204      * function once the event has cleared.
205      */
206     return;
207 #endif
208     break;
209 
210   case UV_POLL:
211     uv__poll_close((uv_poll_t*)handle);
212     break;
213 
214   case UV_FS_POLL:
215     uv__fs_poll_close((uv_fs_poll_t*)handle);
216     /* Poll handles use file system requests, and one of them may still be
217      * running. The poll code will call uv__make_close_pending() for us. */
218     return;
219 
220   case UV_SIGNAL:
221     uv__signal_close((uv_signal_t*) handle);
222     break;
223 
224   default:
225     assert(0);
226   }
227 
228   uv__make_close_pending(handle);
229 }
230 
uv__socket_sockopt(uv_handle_t * handle,int optname,int * value)231 int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
232   int r;
233   int fd;
234   socklen_t len;
235 
236   if (handle == NULL || value == NULL)
237     return UV_EINVAL;
238 
239   if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
240     fd = uv__stream_fd((uv_stream_t*) handle);
241   else if (handle->type == UV_UDP)
242     fd = ((uv_udp_t *) handle)->io_watcher.fd;
243   else
244     return UV_ENOTSUP;
245 
246   len = sizeof(*value);
247 
248   if (*value == 0)
249     r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
250   else
251     r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
252 
253   if (r < 0)
254     return UV__ERR(errno);
255 
256   return 0;
257 }
258 
uv__make_close_pending(uv_handle_t * handle)259 void uv__make_close_pending(uv_handle_t* handle) {
260   assert(handle->flags & UV_HANDLE_CLOSING);
261   assert(!(handle->flags & UV_HANDLE_CLOSED));
262   handle->next_closing = handle->loop->closing_handles;
263   handle->loop->closing_handles = handle;
264 }
265 
uv__getiovmax(void)266 int uv__getiovmax(void) {
267 #if defined(IOV_MAX)
268   return IOV_MAX;
269 #elif defined(_SC_IOV_MAX)
270   static _Atomic int iovmax_cached = -1;
271   int iovmax;
272 
273   iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
274   if (iovmax != -1)
275     return iovmax;
276 
277   /* On some embedded devices (arm-linux-uclibc based ip camera),
278    * sysconf(_SC_IOV_MAX) can not get the correct value. The return
279    * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
280    */
281   iovmax = sysconf(_SC_IOV_MAX);
282   if (iovmax == -1)
283     iovmax = 1;
284 
285   atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
286 
287   return iovmax;
288 #else
289   return 1024;
290 #endif
291 }
292 
293 
uv__finish_close(uv_handle_t * handle)294 static void uv__finish_close(uv_handle_t* handle) {
295   uv_signal_t* sh;
296 
297   /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
298    * possible for it to be active in the sense that uv__is_active() returns
299    * true.
300    *
301    * A good example is when the user calls uv_shutdown(), immediately followed
302    * by uv_close(). The handle is considered active at this point because the
303    * completion of the shutdown req is still pending.
304    */
305   assert(handle->flags & UV_HANDLE_CLOSING);
306   assert(!(handle->flags & UV_HANDLE_CLOSED));
307   handle->flags |= UV_HANDLE_CLOSED;
308 
309   switch (handle->type) {
310     case UV_PREPARE:
311     case UV_CHECK:
312     case UV_IDLE:
313     case UV_ASYNC:
314     case UV_TIMER:
315     case UV_PROCESS:
316     case UV_FS_EVENT:
317     case UV_FS_POLL:
318     case UV_POLL:
319       break;
320 
321     case UV_SIGNAL:
322       /* If there are any caught signals "trapped" in the signal pipe,
323        * we can't call the close callback yet. Reinserting the handle
324        * into the closing queue makes the event loop spin but that's
325        * okay because we only need to deliver the pending events.
326        */
327       sh = (uv_signal_t*) handle;
328       if (sh->caught_signals > sh->dispatched_signals) {
329         handle->flags ^= UV_HANDLE_CLOSED;
330         uv__make_close_pending(handle);  /* Back into the queue. */
331         return;
332       }
333       break;
334 
335     case UV_NAMED_PIPE:
336     case UV_TCP:
337     case UV_TTY:
338       uv__stream_destroy((uv_stream_t*)handle);
339       break;
340 
341     case UV_UDP:
342       uv__udp_finish_close((uv_udp_t*)handle);
343       break;
344 
345     default:
346       assert(0);
347       break;
348   }
349 
350   uv__handle_unref(handle);
351   uv__queue_remove(&handle->handle_queue);
352 
353   if (handle->close_cb) {
354     handle->close_cb(handle);
355   }
356 }
357 
358 
uv__run_closing_handles(uv_loop_t * loop)359 static void uv__run_closing_handles(uv_loop_t* loop) {
360   uv_handle_t* p;
361   uv_handle_t* q;
362 
363   p = loop->closing_handles;
364   loop->closing_handles = NULL;
365 
366   while (p) {
367     q = p->next_closing;
368     uv__finish_close(p);
369     p = q;
370   }
371 }
372 
373 
uv_is_closing(const uv_handle_t * handle)374 int uv_is_closing(const uv_handle_t* handle) {
375   return uv__is_closing(handle);
376 }
377 
378 
uv_backend_fd(const uv_loop_t * loop)379 int uv_backend_fd(const uv_loop_t* loop) {
380   return loop->backend_fd;
381 }
382 
383 
uv__loop_alive(const uv_loop_t * loop)384 static int uv__loop_alive(const uv_loop_t* loop) {
385   return uv__has_active_handles(loop) ||
386          uv__has_active_reqs(loop) ||
387          !uv__queue_empty(&loop->pending_queue) ||
388          loop->closing_handles != NULL;
389 }
390 
391 
uv__backend_timeout(const uv_loop_t * loop)392 static int uv__backend_timeout(const uv_loop_t* loop) {
393   if (loop->stop_flag == 0 &&
394       /* uv__loop_alive(loop) && */
395       (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
396       uv__queue_empty(&loop->pending_queue) &&
397       uv__queue_empty(&loop->idle_handles) &&
398       (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
399       loop->closing_handles == NULL)
400     return uv__next_timeout(loop);
401   return 0;
402 }
403 
404 
uv_backend_timeout(const uv_loop_t * loop)405 int uv_backend_timeout(const uv_loop_t* loop) {
406   if (uv__queue_empty(&loop->watcher_queue))
407     return uv__backend_timeout(loop);
408   /* Need to call uv_run to update the backend fd state. */
409   return 0;
410 }
411 
412 
uv_loop_alive(const uv_loop_t * loop)413 int uv_loop_alive(const uv_loop_t* loop) {
414   return uv__loop_alive(loop);
415 }
416 
417 
uv_loop_alive_taskpool(const uv_loop_t * loop,int initial_handles)418 int uv_loop_alive_taskpool(const uv_loop_t* loop, int initial_handles) {
419   return loop->active_handles > initial_handles ||
420          uv__has_active_reqs(loop) ||
421          !uv__queue_empty(&loop->pending_queue) ||
422          loop->closing_handles != NULL;
423 }
424 
425 
426 int is_uv_loop_good_magic(const uv_loop_t* loop);
427 
428 
uv_run(uv_loop_t * loop,uv_run_mode mode)429 int uv_run(uv_loop_t* loop, uv_run_mode mode) {
430   int timeout;
431   int r;
432   int can_sleep;
433 #if defined(USE_OHOS_DFX) && defined(__aarch64__)
434   uv__set_thread_id(loop);
435 #endif
436 
437   if (!is_uv_loop_good_magic(loop)) {
438     return 0;
439   }
440 
441   r = uv__loop_alive(loop);
442   if (!r)
443     uv__update_time(loop);
444 
445   while (r != 0 && loop->stop_flag == 0) {
446     if (!is_uv_loop_good_magic(loop)) {
447       return 0;
448     }
449 
450     uv__update_time(loop);
451     uv__run_timers(loop);
452 
453     can_sleep =
454         uv__queue_empty(&loop->pending_queue) &&
455         uv__queue_empty(&loop->idle_handles);
456 
457     uv__run_pending(loop);
458     uv__run_idle(loop);
459     uv__run_prepare(loop);
460 
461     timeout = 0;
462     if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
463       timeout = uv__backend_timeout(loop);
464 
465     uv__metrics_inc_loop_count(loop);
466 
467     uv__io_poll(loop, timeout);
468 
469     /* Process immediate callbacks (e.g. write_cb) a small fixed number of
470      * times to avoid loop starvation.*/
471     for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
472       uv__run_pending(loop);
473 
474     /* Run one final update on the provider_idle_time in case uv__io_poll
475      * returned because the timeout expired, but no events were received. This
476      * call will be ignored if the provider_entry_time was either never set (if
477      * the timeout == 0) or was already updated b/c an event was received.
478      */
479     uv__metrics_update_idle_time(loop);
480 
481     uv__run_check(loop);
482     uv__run_closing_handles(loop);
483 
484     if (mode == UV_RUN_ONCE) {
485       /* UV_RUN_ONCE implies forward progress: at least one callback must have
486        * been invoked when it returns. uv__io_poll() can return without doing
487        * I/O (meaning: no callbacks) when its timeout expires - which means we
488        * have pending timers that satisfy the forward progress constraint.
489        *
490        * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
491        * the check.
492        */
493       uv__update_time(loop);
494       uv__run_timers(loop);
495     }
496 
497     r = uv__loop_alive(loop);
498     if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
499       break;
500   }
501 
502   /* The if statement lets gcc compile it to a conditional store. Avoids
503    * dirtying a cache line.
504    */
505   if (loop->stop_flag != 0)
506     loop->stop_flag = 0;
507 
508   return r;
509 }
510 
511 
uv_update_time(uv_loop_t * loop)512 void uv_update_time(uv_loop_t* loop) {
513   uv__update_time(loop);
514 }
515 
516 
uv_is_active(const uv_handle_t * handle)517 int uv_is_active(const uv_handle_t* handle) {
518   return uv__is_active(handle);
519 }
520 
521 
522 /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
uv__socket(int domain,int type,int protocol)523 int uv__socket(int domain, int type, int protocol) {
524   int sockfd;
525   int err;
526 
527 #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
528   sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
529   if (sockfd != -1)
530     return sockfd;
531 
532   if (errno != EINVAL)
533     return UV__ERR(errno);
534 #endif
535 
536   sockfd = socket(domain, type, protocol);
537   if (sockfd == -1)
538     return UV__ERR(errno);
539 
540   err = uv__nonblock(sockfd, 1);
541   if (err == 0)
542     err = uv__cloexec(sockfd, 1);
543 
544   if (err) {
545     uv__close(sockfd);
546     return err;
547   }
548 
549 #if defined(SO_NOSIGPIPE)
550   {
551     int on = 1;
552     setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
553   }
554 #endif
555 
556   return sockfd;
557 }
558 
559 /* get a file pointer to a file in read-only and close-on-exec mode */
uv__open_file(const char * path)560 FILE* uv__open_file(const char* path) {
561   int fd;
562   FILE* fp;
563 
564   fd = uv__open_cloexec(path, O_RDONLY);
565   if (fd < 0)
566     return NULL;
567 
568    fp = fdopen(fd, "r");
569    if (fp == NULL)
570      uv__close(fd);
571 
572    return fp;
573 }
574 
575 
uv__accept(int sockfd)576 int uv__accept(int sockfd) {
577   int peerfd;
578   int err;
579 
580   (void) &err;
581   assert(sockfd >= 0);
582 
583   do
584 #ifdef uv__accept4
585     peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
586 #else
587     peerfd = accept(sockfd, NULL, NULL);
588 #endif
589   while (peerfd == -1 && errno == EINTR);
590 
591   if (peerfd == -1)
592     return UV__ERR(errno);
593 
594 #ifndef uv__accept4
595   err = uv__cloexec(peerfd, 1);
596   if (err == 0)
597     err = uv__nonblock(peerfd, 1);
598 
599   if (err != 0) {
600     uv__close(peerfd);
601     return err;
602   }
603 #endif
604 
605   return peerfd;
606 }
607 
608 
609 /* close() on macos has the "interesting" quirk that it fails with EINTR
610  * without closing the file descriptor when a thread is in the cancel state.
611  * That's why libuv calls close$NOCANCEL() instead.
612  *
613  * glibc on linux has a similar issue: close() is a cancellation point and
614  * will unwind the thread when it's in the cancel state. Work around that
615  * by making the system call directly. Musl libc is unaffected.
616  */
uv__close_nocancel(int fd)617 int uv__close_nocancel(int fd) {
618 #if defined(__APPLE__)
619 #pragma GCC diagnostic push
620 #pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
621 #if defined(__LP64__) || TARGET_OS_IPHONE
622   extern int close$NOCANCEL(int);
623   return close$NOCANCEL(fd);
624 #else
625   extern int close$NOCANCEL$UNIX2003(int);
626   return close$NOCANCEL$UNIX2003(fd);
627 #endif
628 #pragma GCC diagnostic pop
629 #elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
630   long rc;
631   __sanitizer_syscall_pre_close(fd);
632   rc = syscall(SYS_close, fd);
633   __sanitizer_syscall_post_close(rc, fd);
634   return rc;
635 #elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
636   return syscall(SYS_close, fd);
637 #else
638   return close(fd);
639 #endif
640 }
641 
642 
uv__close_nocheckstdio(int fd)643 int uv__close_nocheckstdio(int fd) {
644   int saved_errno;
645   int rc;
646 
647   assert(fd > -1);  /* Catch uninitialized io_watcher.fd bugs. */
648 
649   saved_errno = errno;
650   rc = uv__close_nocancel(fd);
651   if (rc == -1) {
652     rc = UV__ERR(errno);
653     if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
654       rc = 0;    /* The close is in progress, not an error. */
655     errno = saved_errno;
656   }
657 
658   return rc;
659 }
660 
661 
uv__close(int fd)662 int uv__close(int fd) {
663   assert(fd > STDERR_FILENO);  /* Catch stdio close bugs. */
664 #if defined(__MVS__)
665   SAVE_ERRNO(epoll_file_close(fd));
666 #endif
667   return uv__close_nocheckstdio(fd);
668 }
669 
670 #if UV__NONBLOCK_IS_IOCTL
uv__nonblock_ioctl(int fd,int set)671 int uv__nonblock_ioctl(int fd, int set) {
672   int r;
673 
674   do
675     r = ioctl(fd, FIONBIO, &set);
676   while (r == -1 && errno == EINTR);
677 
678   if (r)
679     return UV__ERR(errno);
680 
681   return 0;
682 }
683 #endif
684 
685 
uv__nonblock_fcntl(int fd,int set)686 int uv__nonblock_fcntl(int fd, int set) {
687   int flags;
688   int r;
689 
690   do
691     r = fcntl(fd, F_GETFL);
692   while (r == -1 && errno == EINTR);
693 
694   if (r == -1)
695     return UV__ERR(errno);
696 
697   /* Bail out now if already set/clear. */
698   if (!!(r & O_NONBLOCK) == !!set)
699     return 0;
700 
701   if (set)
702     flags = r | O_NONBLOCK;
703   else
704     flags = r & ~O_NONBLOCK;
705 
706   do
707     r = fcntl(fd, F_SETFL, flags);
708   while (r == -1 && errno == EINTR);
709 
710   if (r)
711     return UV__ERR(errno);
712 
713   return 0;
714 }
715 
716 
uv__cloexec(int fd,int set)717 int uv__cloexec(int fd, int set) {
718   int flags;
719   int r;
720 
721   flags = 0;
722   if (set)
723     flags = FD_CLOEXEC;
724 
725   do
726     r = fcntl(fd, F_SETFD, flags);
727   while (r == -1 && errno == EINTR);
728 
729   if (r)
730     return UV__ERR(errno);
731 
732   return 0;
733 }
734 
735 
uv__recvmsg(int fd,struct msghdr * msg,int flags)736 ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
737 #if defined(__ANDROID__)   || \
738     defined(__DragonFly__) || \
739     defined(__FreeBSD__)   || \
740     defined(__NetBSD__)    || \
741     defined(__OpenBSD__)   || \
742     defined(__linux__)
743   ssize_t rc;
744   rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
745   if (rc == -1)
746     return UV__ERR(errno);
747   return rc;
748 #else
749   struct cmsghdr* cmsg;
750   int* pfd;
751   int* end;
752   ssize_t rc;
753   rc = recvmsg(fd, msg, flags);
754   if (rc == -1)
755     return UV__ERR(errno);
756   if (msg->msg_controllen == 0)
757     return rc;
758   for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
759     if (cmsg->cmsg_type == SCM_RIGHTS)
760       for (pfd = (int*) CMSG_DATA(cmsg),
761            end = (int*) ((char*) cmsg + cmsg->cmsg_len);
762            pfd < end;
763            pfd += 1)
764         uv__cloexec(*pfd, 1);
765   return rc;
766 #endif
767 }
768 
769 
uv_cwd(char * buffer,size_t * size)770 int uv_cwd(char* buffer, size_t* size) {
771   char scratch[1 + UV__PATH_MAX];
772 
773   if (buffer == NULL || size == NULL)
774     return UV_EINVAL;
775 
776   /* Try to read directly into the user's buffer first... */
777   if (getcwd(buffer, *size) != NULL)
778     goto fixup;
779 
780   if (errno != ERANGE)
781     return UV__ERR(errno);
782 
783   /* ...or into scratch space if the user's buffer is too small
784    * so we can report how much space to provide on the next try.
785    */
786   if (getcwd(scratch, sizeof(scratch)) == NULL)
787     return UV__ERR(errno);
788 
789   buffer = scratch;
790 
791 fixup:
792 
793   *size = strlen(buffer);
794 
795   if (*size > 1 && buffer[*size - 1] == '/') {
796     *size -= 1;
797     buffer[*size] = '\0';
798   }
799 
800   if (buffer == scratch) {
801     *size += 1;
802     return UV_ENOBUFS;
803   }
804 
805   return 0;
806 }
807 
808 
uv_chdir(const char * dir)809 int uv_chdir(const char* dir) {
810   if (chdir(dir))
811     return UV__ERR(errno);
812 
813   return 0;
814 }
815 
816 
uv_disable_stdio_inheritance(void)817 void uv_disable_stdio_inheritance(void) {
818   int fd;
819 
820   /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
821    * first 16 file descriptors. After that, bail out after the first error.
822    */
823   for (fd = 0; ; fd++)
824     if (uv__cloexec(fd, 1) && fd > 15)
825       break;
826 }
827 
828 
uv_fileno(const uv_handle_t * handle,uv_os_fd_t * fd)829 int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
830   int fd_out;
831 
832   switch (handle->type) {
833   case UV_TCP:
834   case UV_NAMED_PIPE:
835   case UV_TTY:
836     fd_out = uv__stream_fd((uv_stream_t*) handle);
837     break;
838 
839   case UV_UDP:
840     fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
841     break;
842 
843   case UV_POLL:
844     fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
845     break;
846 
847   default:
848     return UV_EINVAL;
849   }
850 
851   if (uv__is_closing(handle) || fd_out == -1)
852     return UV_EBADF;
853 
854   *fd = fd_out;
855   return 0;
856 }
857 
858 
uv__run_pending(uv_loop_t * loop)859 static void uv__run_pending(uv_loop_t* loop) {
860   struct uv__queue* q;
861   struct uv__queue pq;
862   uv__io_t* w;
863 
864   uv__queue_move(&loop->pending_queue, &pq);
865 
866   while (!uv__queue_empty(&pq)) {
867     q = uv__queue_head(&pq);
868     uv__queue_remove(q);
869     uv__queue_init(q);
870     w = uv__queue_data(q, uv__io_t, pending_queue);
871     w->cb(loop, w, POLLOUT);
872   }
873 }
874 
875 
next_power_of_two(unsigned int val)876 static unsigned int next_power_of_two(unsigned int val) {
877   val -= 1;
878   val |= val >> 1;
879   val |= val >> 2;
880   val |= val >> 4;
881   val |= val >> 8;
882   val |= val >> 16;
883   val += 1;
884   return val;
885 }
886 
maybe_resize(uv_loop_t * loop,unsigned int len)887 static void maybe_resize(uv_loop_t* loop, unsigned int len) {
888   uv__io_t** watchers;
889   void* fake_watcher_list;
890   void* fake_watcher_count;
891   unsigned int nwatchers;
892   unsigned int i;
893 
894   if (len <= loop->nwatchers)
895     return;
896 
897   /* Preserve fake watcher list and count at the end of the watchers */
898   if (loop->watchers != NULL) {
899     fake_watcher_list = loop->watchers[loop->nwatchers];
900     fake_watcher_count = loop->watchers[loop->nwatchers + 1];
901   } else {
902     fake_watcher_list = NULL;
903     fake_watcher_count = NULL;
904   }
905 
906   nwatchers = next_power_of_two(len + 2) - 2;
907   watchers = uv__reallocf(loop->watchers,
908                           (nwatchers + 2) * sizeof(loop->watchers[0]));
909 
910   if (watchers == NULL)
911     abort();
912   for (i = loop->nwatchers; i < nwatchers; i++)
913     watchers[i] = NULL;
914   watchers[nwatchers] = fake_watcher_list;
915   watchers[nwatchers + 1] = fake_watcher_count;
916 
917   loop->watchers = watchers;
918   loop->nwatchers = nwatchers;
919 }
920 
921 
uv__io_init(uv__io_t * w,uv__io_cb cb,int fd)922 void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
923   assert(cb != NULL);
924   assert(fd >= -1);
925   uv__queue_init(&w->pending_queue);
926   uv__queue_init(&w->watcher_queue);
927   w->cb = cb;
928   w->fd = fd;
929   w->events = 0;
930   w->pevents = 0;
931 }
932 
933 
uv__io_start(uv_loop_t * loop,uv__io_t * w,unsigned int events)934 void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
935   assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
936   assert(0 != events);
937   assert(w->fd >= 0);
938   assert(w->fd < INT_MAX);
939 
940   w->pevents |= events;
941   maybe_resize(loop, w->fd + 1);
942 
943 #if !defined(__sun)
944   /* The event ports backend needs to rearm all file descriptors on each and
945    * every tick of the event loop but the other backends allow us to
946    * short-circuit here if the event mask is unchanged.
947    */
948   if (w->events == w->pevents)
949     return;
950 #endif
951 
952   if (uv__queue_empty(&w->watcher_queue))
953     uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
954 
955   if (loop->watchers[w->fd] == NULL) {
956     loop->watchers[w->fd] = w;
957     loop->nfds++;
958   }
959 }
960 
961 
uv__io_stop(uv_loop_t * loop,uv__io_t * w,unsigned int events)962 void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
963   assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
964   assert(0 != events);
965 
966   if (w->fd == -1)
967     return;
968 
969   assert(w->fd >= 0);
970 
971   /* Happens when uv__io_stop() is called on a handle that was never started. */
972   if ((unsigned) w->fd >= loop->nwatchers)
973     return;
974 
975   w->pevents &= ~events;
976 
977   if (w->pevents == 0) {
978     uv__queue_remove(&w->watcher_queue);
979     uv__queue_init(&w->watcher_queue);
980     w->events = 0;
981 
982     if (w == loop->watchers[w->fd]) {
983       assert(loop->nfds > 0);
984       loop->watchers[w->fd] = NULL;
985       loop->nfds--;
986     }
987   }
988   else if (uv__queue_empty(&w->watcher_queue))
989     uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
990 }
991 
992 
uv__io_close(uv_loop_t * loop,uv__io_t * w)993 void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
994   uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
995   uv__queue_remove(&w->pending_queue);
996 
997   /* Remove stale events for this file descriptor */
998   if (w->fd != -1)
999     uv__platform_invalidate_fd(loop, w->fd);
1000 }
1001 
1002 
uv__io_feed(uv_loop_t * loop,uv__io_t * w)1003 void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
1004   if (uv__queue_empty(&w->pending_queue))
1005     uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
1006 }
1007 
1008 
uv__io_active(const uv__io_t * w,unsigned int events)1009 int uv__io_active(const uv__io_t* w, unsigned int events) {
1010   assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
1011   assert(0 != events);
1012   return 0 != (w->pevents & events);
1013 }
1014 
1015 
uv__fd_exists(uv_loop_t * loop,int fd)1016 int uv__fd_exists(uv_loop_t* loop, int fd) {
1017   return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
1018 }
1019 
1020 
uv_getrusage(uv_rusage_t * rusage)1021 int uv_getrusage(uv_rusage_t* rusage) {
1022   struct rusage usage;
1023 
1024   if (getrusage(RUSAGE_SELF, &usage))
1025     return UV__ERR(errno);
1026 
1027   rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
1028   rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
1029 
1030   rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
1031   rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
1032 
1033 #if !defined(__MVS__) && !defined(__HAIKU__)
1034   rusage->ru_maxrss = usage.ru_maxrss;
1035   rusage->ru_ixrss = usage.ru_ixrss;
1036   rusage->ru_idrss = usage.ru_idrss;
1037   rusage->ru_isrss = usage.ru_isrss;
1038   rusage->ru_minflt = usage.ru_minflt;
1039   rusage->ru_majflt = usage.ru_majflt;
1040   rusage->ru_nswap = usage.ru_nswap;
1041   rusage->ru_inblock = usage.ru_inblock;
1042   rusage->ru_oublock = usage.ru_oublock;
1043   rusage->ru_msgsnd = usage.ru_msgsnd;
1044   rusage->ru_msgrcv = usage.ru_msgrcv;
1045   rusage->ru_nsignals = usage.ru_nsignals;
1046   rusage->ru_nvcsw = usage.ru_nvcsw;
1047   rusage->ru_nivcsw = usage.ru_nivcsw;
1048 #endif
1049 
1050   /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
1051    * the outliers because of course they are.
1052    */
1053 #if defined(__APPLE__)
1054   rusage->ru_maxrss /= 1024;                  /* macOS and iOS report bytes. */
1055 #elif defined(__sun)
1056   rusage->ru_maxrss /= getpagesize() / 1024;  /* Solaris reports pages. */
1057 #endif
1058 
1059   return 0;
1060 }
1061 
1062 
uv__open_cloexec(const char * path,int flags)1063 int uv__open_cloexec(const char* path, int flags) {
1064 #if defined(O_CLOEXEC)
1065   int fd;
1066 
1067   fd = open(path, flags | O_CLOEXEC);
1068   if (fd == -1)
1069     return UV__ERR(errno);
1070 
1071   return fd;
1072 #else  /* O_CLOEXEC */
1073   int err;
1074   int fd;
1075 
1076   fd = open(path, flags);
1077   if (fd == -1)
1078     return UV__ERR(errno);
1079 
1080   err = uv__cloexec(fd, 1);
1081   if (err) {
1082     uv__close(fd);
1083     return err;
1084   }
1085 
1086   return fd;
1087 #endif  /* O_CLOEXEC */
1088 }
1089 
1090 
uv__slurp(const char * filename,char * buf,size_t len)1091 int uv__slurp(const char* filename, char* buf, size_t len) {
1092   ssize_t n;
1093   int fd;
1094 
1095   assert(len > 0);
1096 
1097   fd = uv__open_cloexec(filename, O_RDONLY);
1098   if (fd < 0)
1099     return fd;
1100 
1101   do
1102     n = read(fd, buf, len - 1);
1103   while (n == -1 && errno == EINTR);
1104 
1105   if (uv__close_nocheckstdio(fd))
1106     abort();
1107 
1108   if (n < 0)
1109     return UV__ERR(errno);
1110 
1111   buf[n] = '\0';
1112 
1113   return 0;
1114 }
1115 
1116 
uv__dup2_cloexec(int oldfd,int newfd)1117 int uv__dup2_cloexec(int oldfd, int newfd) {
1118 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
1119   int r;
1120 
1121   r = dup3(oldfd, newfd, O_CLOEXEC);
1122   if (r == -1)
1123     return UV__ERR(errno);
1124 
1125   return r;
1126 #else
1127   int err;
1128   int r;
1129 
1130   r = dup2(oldfd, newfd);  /* Never retry. */
1131   if (r == -1)
1132     return UV__ERR(errno);
1133 
1134   err = uv__cloexec(newfd, 1);
1135   if (err != 0) {
1136     uv__close(newfd);
1137     return err;
1138   }
1139 
1140   return r;
1141 #endif
1142 }
1143 
1144 
uv_os_homedir(char * buffer,size_t * size)1145 int uv_os_homedir(char* buffer, size_t* size) {
1146   uv_passwd_t pwd;
1147   size_t len;
1148   int r;
1149 
1150   /* Check if the HOME environment variable is set first. The task of
1151      performing input validation on buffer and size is taken care of by
1152      uv_os_getenv(). */
1153   r = uv_os_getenv("HOME", buffer, size);
1154 
1155   if (r != UV_ENOENT)
1156     return r;
1157 
1158   /* HOME is not set, so call uv_os_get_passwd() */
1159   r = uv_os_get_passwd(&pwd);
1160 
1161   if (r != 0) {
1162     return r;
1163   }
1164 
1165   len = strlen(pwd.homedir);
1166 
1167   if (len >= *size) {
1168     *size = len + 1;
1169     uv_os_free_passwd(&pwd);
1170     return UV_ENOBUFS;
1171   }
1172 
1173   memcpy(buffer, pwd.homedir, len + 1);
1174   *size = len;
1175   uv_os_free_passwd(&pwd);
1176 
1177   return 0;
1178 }
1179 
1180 
uv_os_tmpdir(char * buffer,size_t * size)1181 int uv_os_tmpdir(char* buffer, size_t* size) {
1182   const char* buf;
1183   size_t len;
1184 
1185   if (buffer == NULL || size == NULL || *size == 0)
1186     return UV_EINVAL;
1187 
1188 #define CHECK_ENV_VAR(name)                                                   \
1189   do {                                                                        \
1190     buf = getenv(name);                                                       \
1191     if (buf != NULL)                                                          \
1192       goto return_buffer;                                                     \
1193   }                                                                           \
1194   while (0)
1195 
1196   /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1197   CHECK_ENV_VAR("TMPDIR");
1198   CHECK_ENV_VAR("TMP");
1199   CHECK_ENV_VAR("TEMP");
1200   CHECK_ENV_VAR("TEMPDIR");
1201 
1202 #undef CHECK_ENV_VAR
1203 
1204   /* No temp environment variables defined */
1205   #if defined(__ANDROID__)
1206     buf = "/data/local/tmp";
1207   #else
1208     buf = "/tmp";
1209   #endif
1210 
1211 return_buffer:
1212   len = strlen(buf);
1213 
1214   if (len >= *size) {
1215     *size = len + 1;
1216     return UV_ENOBUFS;
1217   }
1218 
1219   /* The returned directory should not have a trailing slash. */
1220   if (len > 1 && buf[len - 1] == '/') {
1221     len--;
1222   }
1223 
1224   memcpy(buffer, buf, len + 1);
1225   buffer[len] = '\0';
1226   *size = len;
1227 
1228   return 0;
1229 }
1230 
1231 
uv__getpwuid_r(uv_passwd_t * pwd,uid_t uid)1232 static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
1233   struct passwd pw;
1234   struct passwd* result;
1235   char* buf;
1236   size_t bufsize;
1237   size_t name_size;
1238   size_t homedir_size;
1239   size_t shell_size;
1240   int r;
1241 
1242   if (pwd == NULL)
1243     return UV_EINVAL;
1244 
1245   /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
1246    * is frequently 1024 or 4096, so we can just use that directly. The pwent
1247    * will not usually be large. */
1248   for (bufsize = 2000;; bufsize *= 2) {
1249     buf = uv__malloc(bufsize);
1250 
1251     if (buf == NULL)
1252       return UV_ENOMEM;
1253 
1254     do
1255       r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1256     while (r == EINTR);
1257 
1258     if (r != 0 || result == NULL)
1259       uv__free(buf);
1260 
1261     if (r != ERANGE)
1262       break;
1263   }
1264 
1265   if (r != 0)
1266     return UV__ERR(r);
1267 
1268   if (result == NULL)
1269     return UV_ENOENT;
1270 
1271   /* Allocate memory for the username, shell, and home directory */
1272   name_size = strlen(pw.pw_name) + 1;
1273   homedir_size = strlen(pw.pw_dir) + 1;
1274   shell_size = strlen(pw.pw_shell) + 1;
1275   pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1276 
1277   if (pwd->username == NULL) {
1278     uv__free(buf);
1279     return UV_ENOMEM;
1280   }
1281 
1282   /* Copy the username */
1283   memcpy(pwd->username, pw.pw_name, name_size);
1284 
1285   /* Copy the home directory */
1286   pwd->homedir = pwd->username + name_size;
1287   memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1288 
1289   /* Copy the shell */
1290   pwd->shell = pwd->homedir + homedir_size;
1291   memcpy(pwd->shell, pw.pw_shell, shell_size);
1292 
1293   /* Copy the uid and gid */
1294   pwd->uid = pw.pw_uid;
1295   pwd->gid = pw.pw_gid;
1296 
1297   uv__free(buf);
1298 
1299   return 0;
1300 }
1301 
1302 
uv_os_get_group(uv_group_t * grp,uv_uid_t gid)1303 int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
1304 #if defined(__ANDROID__) && __ANDROID_API__ < 24
1305   /* This function getgrgid_r() was added in Android N (level 24) */
1306   return UV_ENOSYS;
1307 #else
1308   struct group gp;
1309   struct group* result;
1310   char* buf;
1311   char* gr_mem;
1312   size_t bufsize;
1313   size_t name_size;
1314   long members;
1315   size_t mem_size;
1316   int r;
1317 
1318   if (grp == NULL)
1319     return UV_EINVAL;
1320 
1321   /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
1322    * is frequently 1024 or 4096, so we can just use that directly. The pwent
1323    * will not usually be large. */
1324   for (bufsize = 2000;; bufsize *= 2) {
1325     buf = uv__malloc(bufsize);
1326 
1327     if (buf == NULL)
1328       return UV_ENOMEM;
1329 
1330     do
1331       r = getgrgid_r(gid, &gp, buf, bufsize, &result);
1332     while (r == EINTR);
1333 
1334     if (r != 0 || result == NULL)
1335       uv__free(buf);
1336 
1337     if (r != ERANGE)
1338       break;
1339   }
1340 
1341   if (r != 0)
1342     return UV__ERR(r);
1343 
1344   if (result == NULL)
1345     return UV_ENOENT;
1346 
1347   /* Allocate memory for the groupname and members. */
1348   name_size = strlen(gp.gr_name) + 1;
1349   members = 0;
1350   mem_size = sizeof(char*);
1351   for (r = 0; gp.gr_mem[r] != NULL; r++) {
1352     mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
1353     members++;
1354   }
1355 
1356   gr_mem = uv__malloc(name_size + mem_size);
1357   if (gr_mem == NULL) {
1358     uv__free(buf);
1359     return UV_ENOMEM;
1360   }
1361 
1362   /* Copy the members */
1363   grp->members = (char**) gr_mem;
1364   grp->members[members] = NULL;
1365   gr_mem = (char*) &grp->members[members + 1];
1366   for (r = 0; r < members; r++) {
1367     grp->members[r] = gr_mem;
1368     strcpy(gr_mem, gp.gr_mem[r]);
1369     gr_mem += strlen(gr_mem) + 1;
1370   }
1371   assert(gr_mem == (char*)grp->members + mem_size);
1372 
1373   /* Copy the groupname */
1374   grp->groupname = gr_mem;
1375   memcpy(grp->groupname, gp.gr_name, name_size);
1376   gr_mem += name_size;
1377 
1378   /* Copy the gid */
1379   grp->gid = gp.gr_gid;
1380 
1381   uv__free(buf);
1382 
1383   return 0;
1384 #endif
1385 }
1386 
1387 
uv_os_get_passwd(uv_passwd_t * pwd)1388 int uv_os_get_passwd(uv_passwd_t* pwd) {
1389   return uv__getpwuid_r(pwd, geteuid());
1390 }
1391 
1392 
uv_os_get_passwd2(uv_passwd_t * pwd,uv_uid_t uid)1393 int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
1394   return uv__getpwuid_r(pwd, uid);
1395 }
1396 
1397 
uv_translate_sys_error(int sys_errno)1398 int uv_translate_sys_error(int sys_errno) {
1399   /* If < 0 then it's already a libuv error. */
1400   return sys_errno <= 0 ? sys_errno : -sys_errno;
1401 }
1402 
1403 
uv_os_environ(uv_env_item_t ** envitems,int * count)1404 int uv_os_environ(uv_env_item_t** envitems, int* count) {
1405   int i, j, cnt;
1406   uv_env_item_t* envitem;
1407 
1408   *envitems = NULL;
1409   *count = 0;
1410 
1411   for (i = 0; environ[i] != NULL; i++);
1412 
1413   *envitems = uv__calloc(i, sizeof(**envitems));
1414 
1415   if (*envitems == NULL)
1416     return UV_ENOMEM;
1417 
1418   for (j = 0, cnt = 0; j < i; j++) {
1419     char* buf;
1420     char* ptr;
1421 
1422     if (environ[j] == NULL)
1423       break;
1424 
1425     buf = uv__strdup(environ[j]);
1426     if (buf == NULL)
1427       goto fail;
1428 
1429     ptr = strchr(buf, '=');
1430     if (ptr == NULL) {
1431       uv__free(buf);
1432       continue;
1433     }
1434 
1435     *ptr = '\0';
1436 
1437     envitem = &(*envitems)[cnt];
1438     envitem->name = buf;
1439     envitem->value = ptr + 1;
1440 
1441     cnt++;
1442   }
1443 
1444   *count = cnt;
1445   return 0;
1446 
1447 fail:
1448   for (i = 0; i < cnt; i++) {
1449     envitem = &(*envitems)[cnt];
1450     uv__free(envitem->name);
1451   }
1452   uv__free(*envitems);
1453 
1454   *envitems = NULL;
1455   *count = 0;
1456   return UV_ENOMEM;
1457 }
1458 
1459 
uv_os_getenv(const char * name,char * buffer,size_t * size)1460 int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1461   char* var;
1462   size_t len;
1463 
1464   if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1465     return UV_EINVAL;
1466 
1467   var = getenv(name);
1468 
1469   if (var == NULL)
1470     return UV_ENOENT;
1471 
1472   len = strlen(var);
1473 
1474   if (len >= *size) {
1475     *size = len + 1;
1476     return UV_ENOBUFS;
1477   }
1478 
1479   memcpy(buffer, var, len + 1);
1480   *size = len;
1481 
1482   return 0;
1483 }
1484 
1485 
uv_os_setenv(const char * name,const char * value)1486 int uv_os_setenv(const char* name, const char* value) {
1487   if (name == NULL || value == NULL)
1488     return UV_EINVAL;
1489 
1490   if (setenv(name, value, 1) != 0)
1491     return UV__ERR(errno);
1492 
1493   return 0;
1494 }
1495 
1496 
uv_os_unsetenv(const char * name)1497 int uv_os_unsetenv(const char* name) {
1498   if (name == NULL)
1499     return UV_EINVAL;
1500 
1501   if (unsetenv(name) != 0)
1502     return UV__ERR(errno);
1503 
1504   return 0;
1505 }
1506 
1507 
uv_os_gethostname(char * buffer,size_t * size)1508 int uv_os_gethostname(char* buffer, size_t* size) {
1509   /*
1510     On some platforms, if the input buffer is not large enough, gethostname()
1511     succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1512     instead by creating a large enough buffer and comparing the hostname length
1513     to the size input.
1514   */
1515   char buf[UV_MAXHOSTNAMESIZE];
1516   size_t len;
1517 
1518   if (buffer == NULL || size == NULL || *size == 0)
1519     return UV_EINVAL;
1520 
1521   if (gethostname(buf, sizeof(buf)) != 0)
1522     return UV__ERR(errno);
1523 
1524   buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1525   len = strlen(buf);
1526 
1527   if (len >= *size) {
1528     *size = len + 1;
1529     return UV_ENOBUFS;
1530   }
1531 
1532   memcpy(buffer, buf, len + 1);
1533   *size = len;
1534   return 0;
1535 }
1536 
1537 
uv_get_osfhandle(int fd)1538 uv_os_fd_t uv_get_osfhandle(int fd) {
1539   return fd;
1540 }
1541 
uv_open_osfhandle(uv_os_fd_t os_fd)1542 int uv_open_osfhandle(uv_os_fd_t os_fd) {
1543   return os_fd;
1544 }
1545 
uv_os_getpid(void)1546 uv_pid_t uv_os_getpid(void) {
1547   return getpid();
1548 }
1549 
1550 
uv_os_getppid(void)1551 uv_pid_t uv_os_getppid(void) {
1552   return getppid();
1553 }
1554 
uv_cpumask_size(void)1555 int uv_cpumask_size(void) {
1556 #if UV__CPU_AFFINITY_SUPPORTED
1557   return CPU_SETSIZE;
1558 #else
1559   return UV_ENOTSUP;
1560 #endif
1561 }
1562 
uv_os_getpriority(uv_pid_t pid,int * priority)1563 int uv_os_getpriority(uv_pid_t pid, int* priority) {
1564   int r;
1565 
1566   if (priority == NULL)
1567     return UV_EINVAL;
1568 
1569   errno = 0;
1570   r = getpriority(PRIO_PROCESS, (int) pid);
1571 
1572   if (r == -1 && errno != 0)
1573     return UV__ERR(errno);
1574 
1575   *priority = r;
1576   return 0;
1577 }
1578 
1579 
uv_os_setpriority(uv_pid_t pid,int priority)1580 int uv_os_setpriority(uv_pid_t pid, int priority) {
1581   if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1582     return UV_EINVAL;
1583 
1584   if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1585     return UV__ERR(errno);
1586 
1587   return 0;
1588 }
1589 
1590 /**
1591  * If the function succeeds, the return value is 0.
1592  * If the function fails, the return value is non-zero.
1593  * for Linux, when schedule policy is SCHED_OTHER (default), priority is 0.
1594  * So the output parameter priority is actually the nice value.
1595 */
uv_thread_getpriority(uv_thread_t tid,int * priority)1596 int uv_thread_getpriority(uv_thread_t tid, int* priority) {
1597   int r;
1598   int policy;
1599   struct sched_param param;
1600 #ifdef __linux__
1601   pid_t pid = gettid();
1602 #endif
1603 
1604   if (priority == NULL)
1605     return UV_EINVAL;
1606 
1607   r = pthread_getschedparam(tid, &policy, &param);
1608   if (r != 0)
1609     return UV__ERR(errno);
1610 
1611 #ifdef __linux__
1612   if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self())) {
1613     errno = 0;
1614     r = getpriority(PRIO_PROCESS, pid);
1615     if (r == -1 && errno != 0)
1616       return UV__ERR(errno);
1617     *priority = r;
1618     return 0;
1619   }
1620 #endif
1621 
1622   *priority = param.sched_priority;
1623   return 0;
1624 }
1625 
1626 #ifdef __linux__
set_nice_for_calling_thread(int priority)1627 static int set_nice_for_calling_thread(int priority) {
1628   int r;
1629   int nice;
1630 
1631   if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1632     return UV_EINVAL;
1633 
1634   pid_t pid = gettid();
1635   nice = 0 - priority * 2;
1636   r = setpriority(PRIO_PROCESS, pid, nice);
1637   if (r != 0)
1638     return UV__ERR(errno);
1639   return 0;
1640 }
1641 #endif
1642 
1643 /**
1644  * If the function succeeds, the return value is 0.
1645  * If the function fails, the return value is non-zero.
1646 */
uv_thread_setpriority(uv_thread_t tid,int priority)1647 int uv_thread_setpriority(uv_thread_t tid, int priority) {
1648   int r;
1649   int min;
1650   int max;
1651   int range;
1652   int prio;
1653   int policy;
1654   struct sched_param param;
1655 
1656   if (priority < UV_THREAD_PRIORITY_LOWEST || priority > UV_THREAD_PRIORITY_HIGHEST)
1657     return UV_EINVAL;
1658 
1659   r = pthread_getschedparam(tid, &policy, &param);
1660   if (r != 0)
1661     return UV__ERR(errno);
1662 
1663 #ifdef __linux__
1664 /**
1665  * for Linux, when schedule policy is SCHED_OTHER (default), priority must be 0,
1666  * we should set the nice value in this case.
1667 */
1668   if (SCHED_OTHER == policy && pthread_equal(tid, pthread_self()))
1669     return set_nice_for_calling_thread(priority);
1670 #endif
1671 
1672 #ifdef __PASE__
1673   min = 1;
1674   max = 127;
1675 #else
1676   min = sched_get_priority_min(policy);
1677   max = sched_get_priority_max(policy);
1678 #endif
1679 
1680   if (min == -1 || max == -1)
1681     return UV__ERR(errno);
1682 
1683   range = max - min;
1684 
1685   switch (priority) {
1686     case UV_THREAD_PRIORITY_HIGHEST:
1687       prio = max;
1688       break;
1689     case UV_THREAD_PRIORITY_ABOVE_NORMAL:
1690       prio = min + range * 3 / 4;
1691       break;
1692     case UV_THREAD_PRIORITY_NORMAL:
1693       prio = min + range / 2;
1694       break;
1695     case UV_THREAD_PRIORITY_BELOW_NORMAL:
1696       prio = min + range / 4;
1697       break;
1698     case UV_THREAD_PRIORITY_LOWEST:
1699       prio = min;
1700       break;
1701     default:
1702       return 0;
1703   }
1704 
1705   if (param.sched_priority != prio) {
1706     param.sched_priority = prio;
1707     r = pthread_setschedparam(tid, policy, &param);
1708     if (r != 0)
1709       return UV__ERR(errno);
1710   }
1711 
1712   return 0;
1713 }
1714 
uv_os_uname(uv_utsname_t * buffer)1715 int uv_os_uname(uv_utsname_t* buffer) {
1716   struct utsname buf;
1717   int r;
1718 
1719   if (buffer == NULL)
1720     return UV_EINVAL;
1721 
1722   if (uname(&buf) == -1) {
1723     r = UV__ERR(errno);
1724     goto error;
1725   }
1726 
1727   r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1728   if (r == UV_E2BIG)
1729     goto error;
1730 
1731 #ifdef _AIX
1732   r = snprintf(buffer->release,
1733                sizeof(buffer->release),
1734                "%s.%s",
1735                buf.version,
1736                buf.release);
1737   if (r >= sizeof(buffer->release)) {
1738     r = UV_E2BIG;
1739     goto error;
1740   }
1741 #else
1742   r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1743   if (r == UV_E2BIG)
1744     goto error;
1745 #endif
1746 
1747   r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1748   if (r == UV_E2BIG)
1749     goto error;
1750 
1751 #if defined(_AIX) || defined(__PASE__)
1752   r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1753 #else
1754   r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1755 #endif
1756 
1757   if (r == UV_E2BIG)
1758     goto error;
1759 
1760   return 0;
1761 
1762 error:
1763   buffer->sysname[0] = '\0';
1764   buffer->release[0] = '\0';
1765   buffer->version[0] = '\0';
1766   buffer->machine[0] = '\0';
1767   return r;
1768 }
1769 
uv__getsockpeername(const uv_handle_t * handle,uv__peersockfunc func,struct sockaddr * name,int * namelen)1770 int uv__getsockpeername(const uv_handle_t* handle,
1771                         uv__peersockfunc func,
1772                         struct sockaddr* name,
1773                         int* namelen) {
1774   socklen_t socklen;
1775   uv_os_fd_t fd;
1776   int r;
1777 
1778   r = uv_fileno(handle, &fd);
1779   if (r < 0)
1780     return r;
1781 
1782   /* sizeof(socklen_t) != sizeof(int) on some systems. */
1783   socklen = (socklen_t) *namelen;
1784 
1785   if (func(fd, name, &socklen))
1786     return UV__ERR(errno);
1787 
1788   *namelen = (int) socklen;
1789   return 0;
1790 }
1791 
uv_gettimeofday(uv_timeval64_t * tv)1792 int uv_gettimeofday(uv_timeval64_t* tv) {
1793   struct timeval time;
1794 
1795   if (tv == NULL)
1796     return UV_EINVAL;
1797 
1798   if (gettimeofday(&time, NULL) != 0)
1799     return UV__ERR(errno);
1800 
1801   tv->tv_sec = (int64_t) time.tv_sec;
1802   tv->tv_usec = (int32_t) time.tv_usec;
1803   return 0;
1804 }
1805 
uv_sleep(unsigned int msec)1806 void uv_sleep(unsigned int msec) {
1807   struct timespec timeout;
1808   int rc;
1809 
1810   timeout.tv_sec = msec / 1000;
1811   timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
1812 
1813   do
1814     rc = nanosleep(&timeout, &timeout);
1815   while (rc == -1 && errno == EINTR);
1816 
1817   assert(rc == 0);
1818 }
1819 
uv__search_path(const char * prog,char * buf,size_t * buflen)1820 int uv__search_path(const char* prog, char* buf, size_t* buflen) {
1821   char abspath[UV__PATH_MAX];
1822   size_t abspath_size;
1823   char trypath[UV__PATH_MAX];
1824   char* cloned_path;
1825   char* path_env;
1826   char* token;
1827   char* itr;
1828 
1829   if (buf == NULL || buflen == NULL || *buflen == 0)
1830     return UV_EINVAL;
1831 
1832   /*
1833    * Possibilities for prog:
1834    * i) an absolute path such as: /home/user/myprojects/nodejs/node
1835    * ii) a relative path such as: ./node or ../myprojects/nodejs/node
1836    * iii) a bare filename such as "node", after exporting PATH variable
1837    *     to its location.
1838    */
1839 
1840   /* Case i) and ii) absolute or relative paths */
1841   if (strchr(prog, '/') != NULL) {
1842     if (realpath(prog, abspath) != abspath)
1843       return UV__ERR(errno);
1844 
1845     abspath_size = strlen(abspath);
1846 
1847     *buflen -= 1;
1848     if (*buflen > abspath_size)
1849       *buflen = abspath_size;
1850 
1851     memcpy(buf, abspath, *buflen);
1852     buf[*buflen] = '\0';
1853 
1854     return 0;
1855   }
1856 
1857   /* Case iii). Search PATH environment variable */
1858   cloned_path = NULL;
1859   token = NULL;
1860   path_env = getenv("PATH");
1861 
1862   if (path_env == NULL)
1863     return UV_EINVAL;
1864 
1865   cloned_path = uv__strdup(path_env);
1866   if (cloned_path == NULL)
1867     return UV_ENOMEM;
1868 
1869   token = uv__strtok(cloned_path, ":", &itr);
1870   while (token != NULL) {
1871     snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
1872     if (realpath(trypath, abspath) == abspath) {
1873       /* Check the match is executable */
1874       if (access(abspath, X_OK) == 0) {
1875         abspath_size = strlen(abspath);
1876 
1877         *buflen -= 1;
1878         if (*buflen > abspath_size)
1879           *buflen = abspath_size;
1880 
1881         memcpy(buf, abspath, *buflen);
1882         buf[*buflen] = '\0';
1883 
1884         uv__free(cloned_path);
1885         return 0;
1886       }
1887     }
1888     token = uv__strtok(NULL, ":", &itr);
1889   }
1890   uv__free(cloned_path);
1891 
1892   /* Out of tokens (path entries), and no match found */
1893   return UV_EINVAL;
1894 }
1895 
1896 
uv_available_parallelism(void)1897 unsigned int uv_available_parallelism(void) {
1898 #ifdef __linux__
1899   cpu_set_t set;
1900   long rc;
1901 
1902   memset(&set, 0, sizeof(set));
1903 
1904   /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
1905    * glibc it's... complicated... so for consistency try sched_getaffinity()
1906    * before falling back to sysconf(_SC_NPROCESSORS_ONLN).
1907    */
1908   if (0 == sched_getaffinity(0, sizeof(set), &set))
1909     rc = CPU_COUNT(&set);
1910   else
1911     rc = sysconf(_SC_NPROCESSORS_ONLN);
1912 
1913   if (rc < 1)
1914     rc = 1;
1915 
1916   return (unsigned) rc;
1917 #elif defined(__MVS__)
1918   int rc;
1919 
1920   rc = __get_num_online_cpus();
1921   if (rc < 1)
1922     rc = 1;
1923 
1924   return (unsigned) rc;
1925 #else  /* __linux__ */
1926   long rc;
1927 
1928   rc = sysconf(_SC_NPROCESSORS_ONLN);
1929   if (rc < 1)
1930     rc = 1;
1931 
1932   return (unsigned) rc;
1933 #endif  /* __linux__ */
1934 }
1935 
uv_register_task_to_event(struct uv_loop_s * loop,uv_post_task func,void * handler)1936 int uv_register_task_to_event(struct uv_loop_s* loop, uv_post_task func, void* handler)
1937 {
1938 #if defined(__aarch64__)
1939   if (loop == NULL)
1940     return -1;
1941 
1942   struct uv_loop_data* data = (struct uv_loop_data*)malloc(sizeof(struct uv_loop_data));
1943   if (data == NULL)
1944     return -1;
1945   if ((uint64_t)data >> UV_EVENT_MAGIC_OFFSETBITS != 0x0) {
1946     UV_LOGE("malloc address error");
1947     free(data);
1948     return -1;
1949   }
1950 
1951   (void)memset(data, 0, sizeof(struct uv_loop_data));
1952   data->post_task_func = func;
1953   data->event_handler = handler;
1954   data = (struct uv_loop_data*)((uint64_t)data | (UV_EVENT_MAGIC_OFFSET << UV_EVENT_MAGIC_OFFSETBITS));
1955   loop->data = (void *)data;
1956   return 0;
1957 #else
1958   return -1;
1959 #endif
1960 }
1961 
1962 
uv_unregister_task_to_event(struct uv_loop_s * loop)1963 int uv_unregister_task_to_event(struct uv_loop_s* loop)
1964 {
1965 #if defined(__aarch64__)
1966   if (loop == NULL || loop->data == NULL ||
1967     ((uint64_t)loop->data >> UV_EVENT_MAGIC_OFFSETBITS) != (uint64_t)(UV_EVENT_MAGIC_OFFSET))
1968     return -1;
1969   loop->data = (struct uv_loop_data*)((uint64_t)loop->data -
1970     (UV_EVENT_MAGIC_OFFSET << UV_EVENT_MAGIC_OFFSETBITS));
1971   free(loop->data);
1972   loop->data = NULL;
1973   return 0;
1974 #else
1975   return -1;
1976 #endif
1977 }
1978 
1979 
uv_check_data_valid(struct uv_loop_data * data)1980 int uv_check_data_valid(struct uv_loop_data* data) {
1981 #if defined(__aarch64__)
1982   if (data == NULL || ((uint64_t)data >> UV_EVENT_MAGIC_OFFSETBITS) != (uint64_t)(UV_EVENT_MAGIC_OFFSET)) {
1983     return -1;
1984   }
1985   struct uv_loop_data* addr = (struct uv_loop_data*)((uint64_t)data -
1986     (UV_EVENT_MAGIC_OFFSET << UV_EVENT_MAGIC_OFFSETBITS));
1987   if (addr->post_task_func == NULL) {
1988     UV_LOGE("post_task_func NULL");
1989     return -1;
1990   }
1991   return 0;
1992 #else
1993   return -1;
1994 #endif
1995 }
1996 
1997