• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 #include "uv.h"
22 #include "internal.h"
23 
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <assert.h>
29 #include <errno.h>
30 
31 #ifndef SUNOS_NO_IFADDRS
32 # include <ifaddrs.h>
33 #endif
34 #include <net/if.h>
35 #include <net/if_dl.h>
36 #include <net/if_arp.h>
37 #include <sys/sockio.h>
38 
39 #include <sys/loadavg.h>
40 #include <sys/time.h>
41 #include <unistd.h>
42 #include <kstat.h>
43 #include <fcntl.h>
44 
45 #include <sys/port.h>
46 #include <port.h>
47 
48 #define PORT_FIRED 0x69
49 #define PORT_UNUSED 0x0
50 #define PORT_LOADED 0x99
51 #define PORT_DELETED -1
52 
53 #if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
54 #define PROCFS_FILE_OFFSET_BITS_HACK 1
55 #undef _FILE_OFFSET_BITS
56 #else
57 #define PROCFS_FILE_OFFSET_BITS_HACK 0
58 #endif
59 
60 #include <procfs.h>
61 
62 #if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
63 #define _FILE_OFFSET_BITS 64
64 #endif
65 
66 
uv__platform_loop_init(uv_loop_t * loop)67 int uv__platform_loop_init(uv_loop_t* loop) {
68   int err;
69   int fd;
70 
71   loop->fs_fd = -1;
72   loop->backend_fd = -1;
73 
74   fd = port_create();
75   if (fd == -1)
76     return UV__ERR(errno);
77 
78   err = uv__cloexec(fd, 1);
79   if (err) {
80     uv__close(fd);
81     return err;
82   }
83   loop->backend_fd = fd;
84 
85   return 0;
86 }
87 
88 
uv__platform_loop_delete(uv_loop_t * loop)89 void uv__platform_loop_delete(uv_loop_t* loop) {
90   if (loop->fs_fd != -1) {
91     uv__close(loop->fs_fd);
92     loop->fs_fd = -1;
93   }
94 
95   if (loop->backend_fd != -1) {
96     uv__close(loop->backend_fd);
97     loop->backend_fd = -1;
98   }
99 }
100 
101 
uv__io_fork(uv_loop_t * loop)102 int uv__io_fork(uv_loop_t* loop) {
103 #if defined(PORT_SOURCE_FILE)
104   if (loop->fs_fd != -1) {
105     /* stop the watcher before we blow away its fileno */
106     uv__io_stop(loop, &loop->fs_event_watcher, POLLIN);
107   }
108 #endif
109   uv__platform_loop_delete(loop);
110   return uv__platform_loop_init(loop);
111 }
112 
113 
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)114 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
115   struct port_event* events;
116   uintptr_t i;
117   uintptr_t nfds;
118 
119   assert(loop->watchers != NULL);
120   assert(fd >= 0);
121 
122   events = (struct port_event*) loop->watchers[loop->nwatchers];
123   nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
124   if (events == NULL)
125     return;
126 
127   /* Invalidate events with same file descriptor */
128   for (i = 0; i < nfds; i++)
129     if ((int) events[i].portev_object == fd)
130       events[i].portev_object = -1;
131 }
132 
133 
uv__io_check_fd(uv_loop_t * loop,int fd)134 int uv__io_check_fd(uv_loop_t* loop, int fd) {
135   if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
136     return UV__ERR(errno);
137 
138   if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) {
139     perror("(libuv) port_dissociate()");
140     abort();
141   }
142 
143   return 0;
144 }
145 
146 
uv__io_poll(uv_loop_t * loop,int timeout)147 void uv__io_poll(uv_loop_t* loop, int timeout) {
148   struct port_event events[1024];
149   struct port_event* pe;
150   struct timespec spec;
151   QUEUE* q;
152   uv__io_t* w;
153   sigset_t* pset;
154   sigset_t set;
155   uint64_t base;
156   uint64_t diff;
157   uint64_t idle_poll;
158   unsigned int nfds;
159   unsigned int i;
160   int saved_errno;
161   int have_signals;
162   int nevents;
163   int count;
164   int err;
165   int fd;
166   int user_timeout;
167   int reset_timeout;
168 
169   if (loop->nfds == 0) {
170     assert(QUEUE_EMPTY(&loop->watcher_queue));
171     return;
172   }
173 
174   while (!QUEUE_EMPTY(&loop->watcher_queue)) {
175     q = QUEUE_HEAD(&loop->watcher_queue);
176     QUEUE_REMOVE(q);
177     QUEUE_INIT(q);
178 
179     w = QUEUE_DATA(q, uv__io_t, watcher_queue);
180     assert(w->pevents != 0);
181 
182     if (port_associate(loop->backend_fd,
183                        PORT_SOURCE_FD,
184                        w->fd,
185                        w->pevents,
186                        0)) {
187       perror("(libuv) port_associate()");
188       abort();
189     }
190 
191     w->events = w->pevents;
192   }
193 
194   pset = NULL;
195   if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
196     pset = &set;
197     sigemptyset(pset);
198     sigaddset(pset, SIGPROF);
199   }
200 
201   assert(timeout >= -1);
202   base = loop->time;
203   count = 48; /* Benchmarks suggest this gives the best throughput. */
204 
205   if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
206     reset_timeout = 1;
207     user_timeout = timeout;
208     timeout = 0;
209   } else {
210     reset_timeout = 0;
211   }
212 
213   for (;;) {
214     /* Only need to set the provider_entry_time if timeout != 0. The function
215      * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
216      */
217     if (timeout != 0)
218       uv__metrics_set_provider_entry_time(loop);
219 
220     if (timeout != -1) {
221       spec.tv_sec = timeout / 1000;
222       spec.tv_nsec = (timeout % 1000) * 1000000;
223     }
224 
225     /* Work around a kernel bug where nfds is not updated. */
226     events[0].portev_source = 0;
227 
228     nfds = 1;
229     saved_errno = 0;
230 
231     if (pset != NULL)
232       pthread_sigmask(SIG_BLOCK, pset, NULL);
233 
234     err = port_getn(loop->backend_fd,
235                     events,
236                     ARRAY_SIZE(events),
237                     &nfds,
238                     timeout == -1 ? NULL : &spec);
239 
240     if (pset != NULL)
241       pthread_sigmask(SIG_UNBLOCK, pset, NULL);
242 
243     if (err) {
244       /* Work around another kernel bug: port_getn() may return events even
245        * on error.
246        */
247       if (errno == EINTR || errno == ETIME) {
248         saved_errno = errno;
249       } else {
250         perror("(libuv) port_getn()");
251         abort();
252       }
253     }
254 
255     /* Update loop->time unconditionally. It's tempting to skip the update when
256      * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
257      * operating system didn't reschedule our process while in the syscall.
258      */
259     SAVE_ERRNO(uv__update_time(loop));
260 
261     if (events[0].portev_source == 0) {
262       if (reset_timeout != 0) {
263         timeout = user_timeout;
264         reset_timeout = 0;
265       }
266 
267       if (timeout == 0)
268         return;
269 
270       if (timeout == -1)
271         continue;
272 
273       goto update_timeout;
274     }
275 
276     if (nfds == 0) {
277       assert(timeout != -1);
278       return;
279     }
280 
281     have_signals = 0;
282     nevents = 0;
283 
284     assert(loop->watchers != NULL);
285     loop->watchers[loop->nwatchers] = (void*) events;
286     loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
287     for (i = 0; i < nfds; i++) {
288       pe = events + i;
289       fd = pe->portev_object;
290 
291       /* Skip invalidated events, see uv__platform_invalidate_fd */
292       if (fd == -1)
293         continue;
294 
295       assert(fd >= 0);
296       assert((unsigned) fd < loop->nwatchers);
297 
298       w = loop->watchers[fd];
299 
300       /* File descriptor that we've stopped watching, ignore. */
301       if (w == NULL)
302         continue;
303 
304       /* Run signal watchers last.  This also affects child process watchers
305        * because those are implemented in terms of signal watchers.
306        */
307       if (w == &loop->signal_io_watcher) {
308         have_signals = 1;
309       } else {
310         uv__metrics_update_idle_time(loop);
311         w->cb(loop, w, pe->portev_events);
312       }
313 
314       nevents++;
315 
316       if (w != loop->watchers[fd])
317         continue;  /* Disabled by callback. */
318 
319       /* Events Ports operates in oneshot mode, rearm timer on next run. */
320       if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
321         QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
322     }
323 
324     if (reset_timeout != 0) {
325       timeout = user_timeout;
326       reset_timeout = 0;
327     }
328 
329     if (have_signals != 0) {
330       uv__metrics_update_idle_time(loop);
331       loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
332     }
333 
334     loop->watchers[loop->nwatchers] = NULL;
335     loop->watchers[loop->nwatchers + 1] = NULL;
336 
337     if (have_signals != 0)
338       return;  /* Event loop should cycle now so don't poll again. */
339 
340     if (nevents != 0) {
341       if (nfds == ARRAY_SIZE(events) && --count != 0) {
342         /* Poll for more events but don't block this time. */
343         timeout = 0;
344         continue;
345       }
346       return;
347     }
348 
349     if (saved_errno == ETIME) {
350       assert(timeout != -1);
351       return;
352     }
353 
354     if (timeout == 0)
355       return;
356 
357     if (timeout == -1)
358       continue;
359 
360 update_timeout:
361     assert(timeout > 0);
362 
363     diff = loop->time - base;
364     if (diff >= (uint64_t) timeout)
365       return;
366 
367     timeout -= diff;
368   }
369 }
370 
371 
uv__hrtime(uv_clocktype_t type)372 uint64_t uv__hrtime(uv_clocktype_t type) {
373   return gethrtime();
374 }
375 
376 
377 /*
378  * We could use a static buffer for the path manipulations that we need outside
379  * of the function, but this function could be called by multiple consumers and
380  * we don't want to potentially create a race condition in the use of snprintf.
381  */
uv_exepath(char * buffer,size_t * size)382 int uv_exepath(char* buffer, size_t* size) {
383   ssize_t res;
384   char buf[128];
385 
386   if (buffer == NULL || size == NULL || *size == 0)
387     return UV_EINVAL;
388 
389   snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
390 
391   res = *size - 1;
392   if (res > 0)
393     res = readlink(buf, buffer, res);
394 
395   if (res == -1)
396     return UV__ERR(errno);
397 
398   buffer[res] = '\0';
399   *size = res;
400   return 0;
401 }
402 
403 
uv_get_free_memory(void)404 uint64_t uv_get_free_memory(void) {
405   return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
406 }
407 
408 
uv_get_total_memory(void)409 uint64_t uv_get_total_memory(void) {
410   return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
411 }
412 
413 
uv_get_constrained_memory(void)414 uint64_t uv_get_constrained_memory(void) {
415   return 0;  /* Memory constraints are unknown. */
416 }
417 
418 
uv_loadavg(double avg[3])419 void uv_loadavg(double avg[3]) {
420   (void) getloadavg(avg, 3);
421 }
422 
423 
424 #if defined(PORT_SOURCE_FILE)
425 
uv__fs_event_rearm(uv_fs_event_t * handle)426 static int uv__fs_event_rearm(uv_fs_event_t *handle) {
427   if (handle->fd == -1)
428     return UV_EBADF;
429 
430   if (port_associate(handle->loop->fs_fd,
431                      PORT_SOURCE_FILE,
432                      (uintptr_t) &handle->fo,
433                      FILE_ATTRIB | FILE_MODIFIED,
434                      handle) == -1) {
435     return UV__ERR(errno);
436   }
437   handle->fd = PORT_LOADED;
438 
439   return 0;
440 }
441 
442 
uv__fs_event_read(uv_loop_t * loop,uv__io_t * w,unsigned int revents)443 static void uv__fs_event_read(uv_loop_t* loop,
444                               uv__io_t* w,
445                               unsigned int revents) {
446   uv_fs_event_t *handle = NULL;
447   timespec_t timeout;
448   port_event_t pe;
449   int events;
450   int r;
451 
452   (void) w;
453   (void) revents;
454 
455   do {
456     uint_t n = 1;
457 
458     /*
459      * Note that our use of port_getn() here (and not port_get()) is deliberate:
460      * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
461      * causes port_get() to return success instead of ETIME when there aren't
462      * actually any events (!); by using port_getn() in lieu of port_get(),
463      * we can at least workaround the bug by checking for zero returned events
464      * and treating it as we would ETIME.
465      */
466     do {
467       memset(&timeout, 0, sizeof timeout);
468       r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
469     }
470     while (r == -1 && errno == EINTR);
471 
472     if ((r == -1 && errno == ETIME) || n == 0)
473       break;
474 
475     handle = (uv_fs_event_t*) pe.portev_user;
476     assert((r == 0) && "unexpected port_get() error");
477 
478     events = 0;
479     if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
480       events |= UV_CHANGE;
481     if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
482       events |= UV_RENAME;
483     assert(events != 0);
484     handle->fd = PORT_FIRED;
485     handle->cb(handle, NULL, events, 0);
486 
487     if (handle->fd != PORT_DELETED) {
488       r = uv__fs_event_rearm(handle);
489       if (r != 0)
490         handle->cb(handle, NULL, 0, r);
491     }
492   }
493   while (handle->fd != PORT_DELETED);
494 }
495 
496 
uv_fs_event_init(uv_loop_t * loop,uv_fs_event_t * handle)497 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
498   uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
499   return 0;
500 }
501 
502 
uv_fs_event_start(uv_fs_event_t * handle,uv_fs_event_cb cb,const char * path,unsigned int flags)503 int uv_fs_event_start(uv_fs_event_t* handle,
504                       uv_fs_event_cb cb,
505                       const char* path,
506                       unsigned int flags) {
507   int portfd;
508   int first_run;
509   int err;
510 
511   if (uv__is_active(handle))
512     return UV_EINVAL;
513 
514   first_run = 0;
515   if (handle->loop->fs_fd == -1) {
516     portfd = port_create();
517     if (portfd == -1)
518       return UV__ERR(errno);
519     handle->loop->fs_fd = portfd;
520     first_run = 1;
521   }
522 
523   uv__handle_start(handle);
524   handle->path = uv__strdup(path);
525   handle->fd = PORT_UNUSED;
526   handle->cb = cb;
527 
528   memset(&handle->fo, 0, sizeof handle->fo);
529   handle->fo.fo_name = handle->path;
530   err = uv__fs_event_rearm(handle);
531   if (err != 0) {
532     uv_fs_event_stop(handle);
533     return err;
534   }
535 
536   if (first_run) {
537     uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd);
538     uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN);
539   }
540 
541   return 0;
542 }
543 
544 
uv_fs_event_stop(uv_fs_event_t * handle)545 int uv_fs_event_stop(uv_fs_event_t* handle) {
546   if (!uv__is_active(handle))
547     return 0;
548 
549   if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) {
550     port_dissociate(handle->loop->fs_fd,
551                     PORT_SOURCE_FILE,
552                     (uintptr_t) &handle->fo);
553   }
554 
555   handle->fd = PORT_DELETED;
556   uv__free(handle->path);
557   handle->path = NULL;
558   handle->fo.fo_name = NULL;
559   uv__handle_stop(handle);
560 
561   return 0;
562 }
563 
uv__fs_event_close(uv_fs_event_t * handle)564 void uv__fs_event_close(uv_fs_event_t* handle) {
565   uv_fs_event_stop(handle);
566 }
567 
568 #else /* !defined(PORT_SOURCE_FILE) */
569 
uv_fs_event_init(uv_loop_t * loop,uv_fs_event_t * handle)570 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
571   return UV_ENOSYS;
572 }
573 
574 
uv_fs_event_start(uv_fs_event_t * handle,uv_fs_event_cb cb,const char * filename,unsigned int flags)575 int uv_fs_event_start(uv_fs_event_t* handle,
576                       uv_fs_event_cb cb,
577                       const char* filename,
578                       unsigned int flags) {
579   return UV_ENOSYS;
580 }
581 
582 
uv_fs_event_stop(uv_fs_event_t * handle)583 int uv_fs_event_stop(uv_fs_event_t* handle) {
584   return UV_ENOSYS;
585 }
586 
587 
uv__fs_event_close(uv_fs_event_t * handle)588 void uv__fs_event_close(uv_fs_event_t* handle) {
589   UNREACHABLE();
590 }
591 
592 #endif /* defined(PORT_SOURCE_FILE) */
593 
594 
uv_resident_set_memory(size_t * rss)595 int uv_resident_set_memory(size_t* rss) {
596   psinfo_t psinfo;
597   int err;
598   int fd;
599 
600   fd = open("/proc/self/psinfo", O_RDONLY);
601   if (fd == -1)
602     return UV__ERR(errno);
603 
604   /* FIXME(bnoordhuis) Handle EINTR. */
605   err = UV_EINVAL;
606   if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
607     *rss = (size_t)psinfo.pr_rssize * 1024;
608     err = 0;
609   }
610   uv__close(fd);
611 
612   return err;
613 }
614 
615 
uv_uptime(double * uptime)616 int uv_uptime(double* uptime) {
617   kstat_ctl_t   *kc;
618   kstat_t       *ksp;
619   kstat_named_t *knp;
620 
621   long hz = sysconf(_SC_CLK_TCK);
622 
623   kc = kstat_open();
624   if (kc == NULL)
625     return UV_EPERM;
626 
627   ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
628   if (kstat_read(kc, ksp, NULL) == -1) {
629     *uptime = -1;
630   } else {
631     knp = (kstat_named_t*)  kstat_data_lookup(ksp, (char*) "clk_intr");
632     *uptime = knp->value.ul / hz;
633   }
634   kstat_close(kc);
635 
636   return 0;
637 }
638 
639 
uv_cpu_info(uv_cpu_info_t ** cpu_infos,int * count)640 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
641   int           lookup_instance;
642   kstat_ctl_t   *kc;
643   kstat_t       *ksp;
644   kstat_named_t *knp;
645   uv_cpu_info_t* cpu_info;
646 
647   kc = kstat_open();
648   if (kc == NULL)
649     return UV_EPERM;
650 
651   /* Get count of cpus */
652   lookup_instance = 0;
653   while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
654     lookup_instance++;
655   }
656 
657   *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos));
658   if (!(*cpu_infos)) {
659     kstat_close(kc);
660     return UV_ENOMEM;
661   }
662 
663   *count = lookup_instance;
664 
665   cpu_info = *cpu_infos;
666   lookup_instance = 0;
667   while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
668     if (kstat_read(kc, ksp, NULL) == -1) {
669       cpu_info->speed = 0;
670       cpu_info->model = NULL;
671     } else {
672       knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
673       assert(knp->data_type == KSTAT_DATA_INT32 ||
674              knp->data_type == KSTAT_DATA_INT64);
675       cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
676                                                              : knp->value.i64;
677 
678       knp = kstat_data_lookup(ksp, (char*) "brand");
679       assert(knp->data_type == KSTAT_DATA_STRING);
680       cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp));
681     }
682 
683     lookup_instance++;
684     cpu_info++;
685   }
686 
687   cpu_info = *cpu_infos;
688   lookup_instance = 0;
689   for (;;) {
690     ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
691 
692     if (ksp == NULL)
693       break;
694 
695     if (kstat_read(kc, ksp, NULL) == -1) {
696       cpu_info->cpu_times.user = 0;
697       cpu_info->cpu_times.nice = 0;
698       cpu_info->cpu_times.sys = 0;
699       cpu_info->cpu_times.idle = 0;
700       cpu_info->cpu_times.irq = 0;
701     } else {
702       knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
703       assert(knp->data_type == KSTAT_DATA_UINT64);
704       cpu_info->cpu_times.user = knp->value.ui64;
705 
706       knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
707       assert(knp->data_type == KSTAT_DATA_UINT64);
708       cpu_info->cpu_times.sys = knp->value.ui64;
709 
710       knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
711       assert(knp->data_type == KSTAT_DATA_UINT64);
712       cpu_info->cpu_times.idle = knp->value.ui64;
713 
714       knp = kstat_data_lookup(ksp, (char*) "intr");
715       assert(knp->data_type == KSTAT_DATA_UINT64);
716       cpu_info->cpu_times.irq = knp->value.ui64;
717       cpu_info->cpu_times.nice = 0;
718     }
719 
720     lookup_instance++;
721     cpu_info++;
722   }
723 
724   kstat_close(kc);
725 
726   return 0;
727 }
728 
729 
730 #ifdef SUNOS_NO_IFADDRS
uv_interface_addresses(uv_interface_address_t ** addresses,int * count)731 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
732   *count = 0;
733   *addresses = NULL;
734   return UV_ENOSYS;
735 }
736 #else  /* SUNOS_NO_IFADDRS */
737 /*
738  * Inspired By:
739  * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris
740  * http://www.pauliesworld.org/project/getmac.c
741  */
uv__set_phys_addr(uv_interface_address_t * address,struct ifaddrs * ent)742 static int uv__set_phys_addr(uv_interface_address_t* address,
743                              struct ifaddrs* ent) {
744 
745   struct sockaddr_dl* sa_addr;
746   int sockfd;
747   size_t i;
748   struct arpreq arpreq;
749 
750   /* This appears to only work as root */
751   sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
752   memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
753   for (i = 0; i < sizeof(address->phys_addr); i++) {
754     /* Check that all bytes of phys_addr are zero. */
755     if (address->phys_addr[i] != 0)
756       return 0;
757   }
758   memset(&arpreq, 0, sizeof(arpreq));
759   if (address->address.address4.sin_family == AF_INET) {
760     struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa);
761     sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr;
762   } else if (address->address.address4.sin_family == AF_INET6) {
763     struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa);
764     memcpy(sin->sin6_addr.s6_addr,
765            address->address.address6.sin6_addr.s6_addr,
766            sizeof(address->address.address6.sin6_addr.s6_addr));
767   } else {
768     return 0;
769   }
770 
771   sockfd = socket(AF_INET, SOCK_DGRAM, 0);
772   if (sockfd < 0)
773     return UV__ERR(errno);
774 
775   if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) {
776     uv__close(sockfd);
777     return UV__ERR(errno);
778   }
779   memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr));
780   uv__close(sockfd);
781   return 0;
782 }
783 
784 
uv__ifaddr_exclude(struct ifaddrs * ent)785 static int uv__ifaddr_exclude(struct ifaddrs *ent) {
786   if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
787     return 1;
788   if (ent->ifa_addr == NULL)
789     return 1;
790   if (ent->ifa_addr->sa_family != AF_INET &&
791       ent->ifa_addr->sa_family != AF_INET6)
792     return 1;
793   return 0;
794 }
795 
uv_interface_addresses(uv_interface_address_t ** addresses,int * count)796 int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
797   uv_interface_address_t* address;
798   struct ifaddrs* addrs;
799   struct ifaddrs* ent;
800 
801   *count = 0;
802   *addresses = NULL;
803 
804   if (getifaddrs(&addrs))
805     return UV__ERR(errno);
806 
807   /* Count the number of interfaces */
808   for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
809     if (uv__ifaddr_exclude(ent))
810       continue;
811     (*count)++;
812   }
813 
814   if (*count == 0) {
815     freeifaddrs(addrs);
816     return 0;
817   }
818 
819   *addresses = uv__malloc(*count * sizeof(**addresses));
820   if (!(*addresses)) {
821     freeifaddrs(addrs);
822     return UV_ENOMEM;
823   }
824 
825   address = *addresses;
826 
827   for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
828     if (uv__ifaddr_exclude(ent))
829       continue;
830 
831     address->name = uv__strdup(ent->ifa_name);
832 
833     if (ent->ifa_addr->sa_family == AF_INET6) {
834       address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
835     } else {
836       address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
837     }
838 
839     if (ent->ifa_netmask->sa_family == AF_INET6) {
840       address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
841     } else {
842       address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
843     }
844 
845     address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
846                            (ent->ifa_flags & IFF_LOOPBACK));
847 
848     uv__set_phys_addr(address, ent);
849     address++;
850   }
851 
852   freeifaddrs(addrs);
853 
854   return 0;
855 }
856 #endif  /* SUNOS_NO_IFADDRS */
857 
uv_free_interface_addresses(uv_interface_address_t * addresses,int count)858 void uv_free_interface_addresses(uv_interface_address_t* addresses,
859   int count) {
860   int i;
861 
862   for (i = 0; i < count; i++) {
863     uv__free(addresses[i].name);
864   }
865 
866   uv__free(addresses);
867 }
868 
869 
870 #if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
strnlen(const char * s,size_t maxlen)871 size_t strnlen(const char* s, size_t maxlen) {
872   const char* end;
873   end = memchr(s, '\0', maxlen);
874   if (end == NULL)
875     return maxlen;
876   return end - s;
877 }
878 #endif
879