1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include "uv.h"
23 #include "uv-common.h"
24
25 #include <assert.h>
26 #include <errno.h>
27 #include <stdarg.h>
28 #include <stddef.h> /* NULL */
29 #include <stdio.h>
30 #include <stdlib.h> /* malloc */
31 #include <string.h> /* memset */
32
33 #if defined(_WIN32)
34 # include <malloc.h> /* malloc */
35 #else
36 # include <net/if.h> /* if_nametoindex */
37 # include <sys/un.h> /* AF_UNIX, sockaddr_un */
38 #endif
39
40
41 typedef struct {
42 uv_malloc_func local_malloc;
43 uv_realloc_func local_realloc;
44 uv_calloc_func local_calloc;
45 uv_free_func local_free;
46 } uv__allocator_t;
47
48 static uv__allocator_t uv__allocator = {
49 malloc,
50 realloc,
51 calloc,
52 free,
53 };
54
uv__strdup(const char * s)55 char* uv__strdup(const char* s) {
56 size_t len = strlen(s) + 1;
57 char* m = uv__malloc(len);
58 if (m == NULL)
59 return NULL;
60 return memcpy(m, s, len);
61 }
62
uv__strndup(const char * s,size_t n)63 char* uv__strndup(const char* s, size_t n) {
64 char* m;
65 size_t len = strlen(s);
66 if (n < len)
67 len = n;
68 m = uv__malloc(len + 1);
69 if (m == NULL)
70 return NULL;
71 m[len] = '\0';
72 return memcpy(m, s, len);
73 }
74
uv__malloc(size_t size)75 void* uv__malloc(size_t size) {
76 if (size > 0)
77 return uv__allocator.local_malloc(size);
78 return NULL;
79 }
80
uv__free(void * ptr)81 void uv__free(void* ptr) {
82 int saved_errno;
83
84 /* Libuv expects that free() does not clobber errno. The system allocator
85 * honors that assumption but custom allocators may not be so careful.
86 */
87 saved_errno = errno;
88 uv__allocator.local_free(ptr);
89 errno = saved_errno;
90 }
91
uv__calloc(size_t count,size_t size)92 void* uv__calloc(size_t count, size_t size) {
93 return uv__allocator.local_calloc(count, size);
94 }
95
uv__realloc(void * ptr,size_t size)96 void* uv__realloc(void* ptr, size_t size) {
97 if (size > 0)
98 return uv__allocator.local_realloc(ptr, size);
99 uv__free(ptr);
100 return NULL;
101 }
102
uv__reallocf(void * ptr,size_t size)103 void* uv__reallocf(void* ptr, size_t size) {
104 void* newptr;
105
106 newptr = uv__realloc(ptr, size);
107 if (newptr == NULL)
108 if (size > 0)
109 uv__free(ptr);
110
111 return newptr;
112 }
113
uv_replace_allocator(uv_malloc_func malloc_func,uv_realloc_func realloc_func,uv_calloc_func calloc_func,uv_free_func free_func)114 int uv_replace_allocator(uv_malloc_func malloc_func,
115 uv_realloc_func realloc_func,
116 uv_calloc_func calloc_func,
117 uv_free_func free_func) {
118 if (malloc_func == NULL || realloc_func == NULL ||
119 calloc_func == NULL || free_func == NULL) {
120 return UV_EINVAL;
121 }
122
123 uv__allocator.local_malloc = malloc_func;
124 uv__allocator.local_realloc = realloc_func;
125 uv__allocator.local_calloc = calloc_func;
126 uv__allocator.local_free = free_func;
127
128 return 0;
129 }
130
131 #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
132
uv_handle_size(uv_handle_type type)133 size_t uv_handle_size(uv_handle_type type) {
134 switch (type) {
135 UV_HANDLE_TYPE_MAP(XX)
136 default:
137 return -1;
138 }
139 }
140
uv_req_size(uv_req_type type)141 size_t uv_req_size(uv_req_type type) {
142 switch(type) {
143 UV_REQ_TYPE_MAP(XX)
144 default:
145 return -1;
146 }
147 }
148
149 #undef XX
150
151
uv_loop_size(void)152 size_t uv_loop_size(void) {
153 return sizeof(uv_loop_t);
154 }
155
156
uv_buf_init(char * base,unsigned int len)157 uv_buf_t uv_buf_init(char* base, unsigned int len) {
158 uv_buf_t buf;
159 buf.base = base;
160 buf.len = len;
161 return buf;
162 }
163
164
uv__unknown_err_code(int err)165 static const char* uv__unknown_err_code(int err) {
166 char buf[32];
167 char* copy;
168
169 snprintf(buf, sizeof(buf), "Unknown system error %d", err);
170 copy = uv__strdup(buf);
171
172 return copy != NULL ? copy : "Unknown system error";
173 }
174
175 #define UV_ERR_NAME_GEN_R(name, _) \
176 case UV_## name: \
177 uv__strscpy(buf, #name, buflen); break;
uv_err_name_r(int err,char * buf,size_t buflen)178 char* uv_err_name_r(int err, char* buf, size_t buflen) {
179 switch (err) {
180 UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
181 default: snprintf(buf, buflen, "Unknown system error %d", err);
182 }
183 return buf;
184 }
185 #undef UV_ERR_NAME_GEN_R
186
187
188 #define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
uv_err_name(int err)189 const char* uv_err_name(int err) {
190 switch (err) {
191 UV_ERRNO_MAP(UV_ERR_NAME_GEN)
192 }
193 return uv__unknown_err_code(err);
194 }
195 #undef UV_ERR_NAME_GEN
196
197
198 #define UV_STRERROR_GEN_R(name, msg) \
199 case UV_ ## name: \
200 snprintf(buf, buflen, "%s", msg); break;
uv_strerror_r(int err,char * buf,size_t buflen)201 char* uv_strerror_r(int err, char* buf, size_t buflen) {
202 switch (err) {
203 UV_ERRNO_MAP(UV_STRERROR_GEN_R)
204 default: snprintf(buf, buflen, "Unknown system error %d", err);
205 }
206 return buf;
207 }
208 #undef UV_STRERROR_GEN_R
209
210
211 #define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
uv_strerror(int err)212 const char* uv_strerror(int err) {
213 switch (err) {
214 UV_ERRNO_MAP(UV_STRERROR_GEN)
215 }
216 return uv__unknown_err_code(err);
217 }
218 #undef UV_STRERROR_GEN
219
220
uv_ip4_addr(const char * ip,int port,struct sockaddr_in * addr)221 int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
222 memset(addr, 0, sizeof(*addr));
223 addr->sin_family = AF_INET;
224 addr->sin_port = htons(port);
225 #ifdef SIN6_LEN
226 addr->sin_len = sizeof(*addr);
227 #endif
228 return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
229 }
230
231
uv_ip6_addr(const char * ip,int port,struct sockaddr_in6 * addr)232 int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
233 char address_part[40];
234 size_t address_part_size;
235 const char* zone_index;
236
237 memset(addr, 0, sizeof(*addr));
238 addr->sin6_family = AF_INET6;
239 addr->sin6_port = htons(port);
240 #ifdef SIN6_LEN
241 addr->sin6_len = sizeof(*addr);
242 #endif
243
244 zone_index = strchr(ip, '%');
245 if (zone_index != NULL) {
246 address_part_size = zone_index - ip;
247 if (address_part_size >= sizeof(address_part))
248 address_part_size = sizeof(address_part) - 1;
249
250 memcpy(address_part, ip, address_part_size);
251 address_part[address_part_size] = '\0';
252 ip = address_part;
253
254 zone_index++; /* skip '%' */
255 /* NOTE: unknown interface (id=0) is silently ignored */
256 #ifdef _WIN32
257 addr->sin6_scope_id = atoi(zone_index);
258 #else
259 addr->sin6_scope_id = if_nametoindex(zone_index);
260 #endif
261 }
262
263 return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
264 }
265
266
uv_ip4_name(const struct sockaddr_in * src,char * dst,size_t size)267 int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
268 return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
269 }
270
271
uv_ip6_name(const struct sockaddr_in6 * src,char * dst,size_t size)272 int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
273 return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
274 }
275
276
uv_ip_name(const struct sockaddr * src,char * dst,size_t size)277 int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
278 switch (src->sa_family) {
279 case AF_INET:
280 return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
281 dst, size);
282 case AF_INET6:
283 return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
284 dst, size);
285 default:
286 return UV_EAFNOSUPPORT;
287 }
288 }
289
290
uv_tcp_bind(uv_tcp_t * handle,const struct sockaddr * addr,unsigned int flags)291 int uv_tcp_bind(uv_tcp_t* handle,
292 const struct sockaddr* addr,
293 unsigned int flags) {
294 unsigned int addrlen;
295
296 if (handle->type != UV_TCP)
297 return UV_EINVAL;
298
299 if (addr->sa_family == AF_INET)
300 addrlen = sizeof(struct sockaddr_in);
301 else if (addr->sa_family == AF_INET6)
302 addrlen = sizeof(struct sockaddr_in6);
303 else
304 return UV_EINVAL;
305
306 return uv__tcp_bind(handle, addr, addrlen, flags);
307 }
308
309
uv_udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned flags)310 int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
311 unsigned extra_flags;
312 int domain;
313 int rc;
314
315 /* Use the lower 8 bits for the domain. */
316 domain = flags & 0xFF;
317 if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
318 return UV_EINVAL;
319
320 /* Use the higher bits for extra flags. */
321 extra_flags = flags & ~0xFF;
322 if (extra_flags & ~UV_UDP_RECVMMSG)
323 return UV_EINVAL;
324
325 rc = uv__udp_init_ex(loop, handle, flags, domain);
326
327 if (rc == 0)
328 if (extra_flags & UV_UDP_RECVMMSG)
329 handle->flags |= UV_HANDLE_UDP_RECVMMSG;
330
331 return rc;
332 }
333
334
uv_udp_init(uv_loop_t * loop,uv_udp_t * handle)335 int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
336 return uv_udp_init_ex(loop, handle, AF_UNSPEC);
337 }
338
339
uv_udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int flags)340 int uv_udp_bind(uv_udp_t* handle,
341 const struct sockaddr* addr,
342 unsigned int flags) {
343 unsigned int addrlen;
344
345 if (handle->type != UV_UDP)
346 return UV_EINVAL;
347
348 if (addr->sa_family == AF_INET)
349 addrlen = sizeof(struct sockaddr_in);
350 else if (addr->sa_family == AF_INET6)
351 addrlen = sizeof(struct sockaddr_in6);
352 else
353 return UV_EINVAL;
354
355 return uv__udp_bind(handle, addr, addrlen, flags);
356 }
357
358
uv_tcp_connect(uv_connect_t * req,uv_tcp_t * handle,const struct sockaddr * addr,uv_connect_cb cb)359 int uv_tcp_connect(uv_connect_t* req,
360 uv_tcp_t* handle,
361 const struct sockaddr* addr,
362 uv_connect_cb cb) {
363 unsigned int addrlen;
364
365 if (handle->type != UV_TCP)
366 return UV_EINVAL;
367
368 if (addr->sa_family == AF_INET)
369 addrlen = sizeof(struct sockaddr_in);
370 else if (addr->sa_family == AF_INET6)
371 addrlen = sizeof(struct sockaddr_in6);
372 else
373 return UV_EINVAL;
374
375 return uv__tcp_connect(req, handle, addr, addrlen, cb);
376 }
377
378
uv_udp_connect(uv_udp_t * handle,const struct sockaddr * addr)379 int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
380 unsigned int addrlen;
381
382 if (handle->type != UV_UDP)
383 return UV_EINVAL;
384
385 /* Disconnect the handle */
386 if (addr == NULL) {
387 if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
388 return UV_ENOTCONN;
389
390 return uv__udp_disconnect(handle);
391 }
392
393 if (addr->sa_family == AF_INET)
394 addrlen = sizeof(struct sockaddr_in);
395 else if (addr->sa_family == AF_INET6)
396 addrlen = sizeof(struct sockaddr_in6);
397 else
398 return UV_EINVAL;
399
400 if (handle->flags & UV_HANDLE_UDP_CONNECTED)
401 return UV_EISCONN;
402
403 return uv__udp_connect(handle, addr, addrlen);
404 }
405
406
uv__udp_is_connected(uv_udp_t * handle)407 int uv__udp_is_connected(uv_udp_t* handle) {
408 struct sockaddr_storage addr;
409 int addrlen;
410 if (handle->type != UV_UDP)
411 return 0;
412
413 addrlen = sizeof(addr);
414 if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
415 return 0;
416
417 return addrlen > 0;
418 }
419
420
uv__udp_check_before_send(uv_udp_t * handle,const struct sockaddr * addr)421 int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
422 unsigned int addrlen;
423
424 if (handle->type != UV_UDP)
425 return UV_EINVAL;
426
427 if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
428 return UV_EISCONN;
429
430 if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
431 return UV_EDESTADDRREQ;
432
433 if (addr != NULL) {
434 if (addr->sa_family == AF_INET)
435 addrlen = sizeof(struct sockaddr_in);
436 else if (addr->sa_family == AF_INET6)
437 addrlen = sizeof(struct sockaddr_in6);
438 #if defined(AF_UNIX) && !defined(_WIN32)
439 else if (addr->sa_family == AF_UNIX)
440 addrlen = sizeof(struct sockaddr_un);
441 #endif
442 else
443 return UV_EINVAL;
444 } else {
445 addrlen = 0;
446 }
447
448 return addrlen;
449 }
450
451
uv_udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,uv_udp_send_cb send_cb)452 int uv_udp_send(uv_udp_send_t* req,
453 uv_udp_t* handle,
454 const uv_buf_t bufs[],
455 unsigned int nbufs,
456 const struct sockaddr* addr,
457 uv_udp_send_cb send_cb) {
458 int addrlen;
459
460 addrlen = uv__udp_check_before_send(handle, addr);
461 if (addrlen < 0)
462 return addrlen;
463
464 return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
465 }
466
467
uv_udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr)468 int uv_udp_try_send(uv_udp_t* handle,
469 const uv_buf_t bufs[],
470 unsigned int nbufs,
471 const struct sockaddr* addr) {
472 int addrlen;
473
474 addrlen = uv__udp_check_before_send(handle, addr);
475 if (addrlen < 0)
476 return addrlen;
477
478 return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
479 }
480
481
uv_udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)482 int uv_udp_recv_start(uv_udp_t* handle,
483 uv_alloc_cb alloc_cb,
484 uv_udp_recv_cb recv_cb) {
485 if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
486 return UV_EINVAL;
487 else
488 return uv__udp_recv_start(handle, alloc_cb, recv_cb);
489 }
490
491
uv_udp_recv_stop(uv_udp_t * handle)492 int uv_udp_recv_stop(uv_udp_t* handle) {
493 if (handle->type != UV_UDP)
494 return UV_EINVAL;
495 else
496 return uv__udp_recv_stop(handle);
497 }
498
499
uv_walk(uv_loop_t * loop,uv_walk_cb walk_cb,void * arg)500 void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
501 QUEUE queue;
502 QUEUE* q;
503 uv_handle_t* h;
504
505 QUEUE_MOVE(&loop->handle_queue, &queue);
506 while (!QUEUE_EMPTY(&queue)) {
507 q = QUEUE_HEAD(&queue);
508 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
509
510 QUEUE_REMOVE(q);
511 QUEUE_INSERT_TAIL(&loop->handle_queue, q);
512
513 if (h->flags & UV_HANDLE_INTERNAL) continue;
514 walk_cb(h, arg);
515 }
516 }
517
518
uv__print_handles(uv_loop_t * loop,int only_active,FILE * stream)519 static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
520 const char* type;
521 QUEUE* q;
522 uv_handle_t* h;
523
524 if (loop == NULL)
525 loop = uv_default_loop();
526
527 QUEUE_FOREACH(q, &loop->handle_queue) {
528 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
529
530 if (only_active && !uv__is_active(h))
531 continue;
532
533 switch (h->type) {
534 #define X(uc, lc) case UV_##uc: type = #lc; break;
535 UV_HANDLE_TYPE_MAP(X)
536 #undef X
537 default: type = "<unknown>";
538 }
539
540 fprintf(stream,
541 "[%c%c%c] %-8s %p\n",
542 "R-"[!(h->flags & UV_HANDLE_REF)],
543 "A-"[!(h->flags & UV_HANDLE_ACTIVE)],
544 "I-"[!(h->flags & UV_HANDLE_INTERNAL)],
545 type,
546 (void*)h);
547 }
548 }
549
550
uv_print_all_handles(uv_loop_t * loop,FILE * stream)551 void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
552 uv__print_handles(loop, 0, stream);
553 }
554
555
uv_print_active_handles(uv_loop_t * loop,FILE * stream)556 void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
557 uv__print_handles(loop, 1, stream);
558 }
559
560
uv_ref(uv_handle_t * handle)561 void uv_ref(uv_handle_t* handle) {
562 uv__handle_ref(handle);
563 }
564
565
uv_unref(uv_handle_t * handle)566 void uv_unref(uv_handle_t* handle) {
567 uv__handle_unref(handle);
568 }
569
570
uv_has_ref(const uv_handle_t * handle)571 int uv_has_ref(const uv_handle_t* handle) {
572 return uv__has_ref(handle);
573 }
574
575
uv_stop(uv_loop_t * loop)576 void uv_stop(uv_loop_t* loop) {
577 loop->stop_flag = 1;
578 }
579
580
uv_now(const uv_loop_t * loop)581 uint64_t uv_now(const uv_loop_t* loop) {
582 return loop->time;
583 }
584
585
586
uv__count_bufs(const uv_buf_t bufs[],unsigned int nbufs)587 size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
588 unsigned int i;
589 size_t bytes;
590
591 bytes = 0;
592 for (i = 0; i < nbufs; i++)
593 bytes += (size_t) bufs[i].len;
594
595 return bytes;
596 }
597
uv_recv_buffer_size(uv_handle_t * handle,int * value)598 int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
599 return uv__socket_sockopt(handle, SO_RCVBUF, value);
600 }
601
uv_send_buffer_size(uv_handle_t * handle,int * value)602 int uv_send_buffer_size(uv_handle_t* handle, int *value) {
603 return uv__socket_sockopt(handle, SO_SNDBUF, value);
604 }
605
uv_fs_event_getpath(uv_fs_event_t * handle,char * buffer,size_t * size)606 int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
607 size_t required_len;
608
609 if (!uv__is_active(handle)) {
610 *size = 0;
611 return UV_EINVAL;
612 }
613
614 required_len = strlen(handle->path);
615 if (required_len >= *size) {
616 *size = required_len + 1;
617 return UV_ENOBUFS;
618 }
619
620 memcpy(buffer, handle->path, required_len);
621 *size = required_len;
622 buffer[required_len] = '\0';
623
624 return 0;
625 }
626
627 /* The windows implementation does not have the same structure layout as
628 * the unix implementation (nbufs is not directly inside req but is
629 * contained in a nested union/struct) so this function locates it.
630 */
uv__get_nbufs(uv_fs_t * req)631 static unsigned int* uv__get_nbufs(uv_fs_t* req) {
632 #ifdef _WIN32
633 return &req->fs.info.nbufs;
634 #else
635 return &req->nbufs;
636 #endif
637 }
638
639 /* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
640 * systems. So, the memory should be released using free(). On Windows,
641 * uv__malloc() is used, so use uv__free() to free memory.
642 */
643 #ifdef _WIN32
644 # define uv__fs_scandir_free uv__free
645 #else
646 # define uv__fs_scandir_free free
647 #endif
648
uv__fs_scandir_cleanup(uv_fs_t * req)649 void uv__fs_scandir_cleanup(uv_fs_t* req) {
650 uv__dirent_t** dents;
651
652 unsigned int* nbufs = uv__get_nbufs(req);
653
654 dents = req->ptr;
655 if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
656 (*nbufs)--;
657 for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
658 uv__fs_scandir_free(dents[*nbufs]);
659
660 uv__fs_scandir_free(req->ptr);
661 req->ptr = NULL;
662 }
663
664
uv_fs_scandir_next(uv_fs_t * req,uv_dirent_t * ent)665 int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
666 uv__dirent_t** dents;
667 uv__dirent_t* dent;
668 unsigned int* nbufs;
669
670 /* Check to see if req passed */
671 if (req->result < 0)
672 return req->result;
673
674 /* Ptr will be null if req was canceled or no files found */
675 if (!req->ptr)
676 return UV_EOF;
677
678 nbufs = uv__get_nbufs(req);
679 assert(nbufs);
680
681 dents = req->ptr;
682
683 /* Free previous entity */
684 if (*nbufs > 0)
685 uv__fs_scandir_free(dents[*nbufs - 1]);
686
687 /* End was already reached */
688 if (*nbufs == (unsigned int) req->result) {
689 uv__fs_scandir_free(dents);
690 req->ptr = NULL;
691 return UV_EOF;
692 }
693
694 dent = dents[(*nbufs)++];
695
696 ent->name = dent->d_name;
697 ent->type = uv__fs_get_dirent_type(dent);
698
699 return 0;
700 }
701
uv__fs_get_dirent_type(uv__dirent_t * dent)702 uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
703 uv_dirent_type_t type;
704
705 #ifdef HAVE_DIRENT_TYPES
706 switch (dent->d_type) {
707 case UV__DT_DIR:
708 type = UV_DIRENT_DIR;
709 break;
710 case UV__DT_FILE:
711 type = UV_DIRENT_FILE;
712 break;
713 case UV__DT_LINK:
714 type = UV_DIRENT_LINK;
715 break;
716 case UV__DT_FIFO:
717 type = UV_DIRENT_FIFO;
718 break;
719 case UV__DT_SOCKET:
720 type = UV_DIRENT_SOCKET;
721 break;
722 case UV__DT_CHAR:
723 type = UV_DIRENT_CHAR;
724 break;
725 case UV__DT_BLOCK:
726 type = UV_DIRENT_BLOCK;
727 break;
728 default:
729 type = UV_DIRENT_UNKNOWN;
730 }
731 #else
732 type = UV_DIRENT_UNKNOWN;
733 #endif
734
735 return type;
736 }
737
uv__fs_readdir_cleanup(uv_fs_t * req)738 void uv__fs_readdir_cleanup(uv_fs_t* req) {
739 uv_dir_t* dir;
740 uv_dirent_t* dirents;
741 int i;
742
743 if (req->ptr == NULL)
744 return;
745
746 dir = req->ptr;
747 dirents = dir->dirents;
748 req->ptr = NULL;
749
750 if (dirents == NULL)
751 return;
752
753 for (i = 0; i < req->result; ++i) {
754 uv__free((char*) dirents[i].name);
755 dirents[i].name = NULL;
756 }
757 }
758
759
uv_loop_configure(uv_loop_t * loop,uv_loop_option option,...)760 int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
761 va_list ap;
762 int err;
763
764 va_start(ap, option);
765 /* Any platform-agnostic options should be handled here. */
766 err = uv__loop_configure(loop, option, ap);
767 va_end(ap);
768
769 return err;
770 }
771
772
773 static uv_loop_t default_loop_struct;
774 static uv_loop_t* default_loop_ptr;
775
776
uv_default_loop(void)777 uv_loop_t* uv_default_loop(void) {
778 if (default_loop_ptr != NULL)
779 return default_loop_ptr;
780
781 if (uv_loop_init(&default_loop_struct))
782 return NULL;
783
784 default_loop_ptr = &default_loop_struct;
785 return default_loop_ptr;
786 }
787
788
uv_loop_new(void)789 uv_loop_t* uv_loop_new(void) {
790 uv_loop_t* loop;
791
792 loop = uv__malloc(sizeof(*loop));
793 if (loop == NULL)
794 return NULL;
795
796 if (uv_loop_init(loop)) {
797 uv__free(loop);
798 return NULL;
799 }
800
801 return loop;
802 }
803
804
uv_loop_close(uv_loop_t * loop)805 int uv_loop_close(uv_loop_t* loop) {
806 QUEUE* q;
807 uv_handle_t* h;
808 #ifndef NDEBUG
809 void* saved_data;
810 #endif
811
812 if (uv__has_active_reqs(loop))
813 return UV_EBUSY;
814
815 QUEUE_FOREACH(q, &loop->handle_queue) {
816 h = QUEUE_DATA(q, uv_handle_t, handle_queue);
817 if (!(h->flags & UV_HANDLE_INTERNAL))
818 return UV_EBUSY;
819 }
820
821 uv__loop_close(loop);
822
823 #ifndef NDEBUG
824 saved_data = loop->data;
825 memset(loop, -1, sizeof(*loop));
826 loop->data = saved_data;
827 #endif
828 if (loop == default_loop_ptr)
829 default_loop_ptr = NULL;
830
831 return 0;
832 }
833
834
uv_loop_delete(uv_loop_t * loop)835 void uv_loop_delete(uv_loop_t* loop) {
836 uv_loop_t* default_loop;
837 int err;
838
839 default_loop = default_loop_ptr;
840
841 err = uv_loop_close(loop);
842 (void) err; /* Squelch compiler warnings. */
843 assert(err == 0);
844 if (loop != default_loop)
845 uv__free(loop);
846 }
847
848
uv_read_start(uv_stream_t * stream,uv_alloc_cb alloc_cb,uv_read_cb read_cb)849 int uv_read_start(uv_stream_t* stream,
850 uv_alloc_cb alloc_cb,
851 uv_read_cb read_cb) {
852 if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
853 return UV_EINVAL;
854
855 if (stream->flags & UV_HANDLE_CLOSING)
856 return UV_EINVAL;
857
858 if (stream->flags & UV_HANDLE_READING)
859 return UV_EALREADY;
860
861 if (!(stream->flags & UV_HANDLE_READABLE))
862 return UV_ENOTCONN;
863
864 return uv__read_start(stream, alloc_cb, read_cb);
865 }
866
867
uv_os_free_environ(uv_env_item_t * envitems,int count)868 void uv_os_free_environ(uv_env_item_t* envitems, int count) {
869 int i;
870
871 for (i = 0; i < count; i++) {
872 uv__free(envitems[i].name);
873 }
874
875 uv__free(envitems);
876 }
877
878
uv_free_cpu_info(uv_cpu_info_t * cpu_infos,int count)879 void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
880 int i;
881
882 for (i = 0; i < count; i++)
883 uv__free(cpu_infos[i].model);
884
885 uv__free(cpu_infos);
886 }
887
888
889 /* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
890 * threads have already been forcibly terminated by the operating system
891 * by the time destructors run, ergo, it's not safe to try to clean them up.
892 */
893 #if defined(__GNUC__) && !defined(_WIN32)
894 __attribute__((destructor))
895 #endif
uv_library_shutdown(void)896 void uv_library_shutdown(void) {
897 static int was_shutdown;
898
899 if (uv__load_relaxed(&was_shutdown))
900 return;
901
902 uv__process_title_cleanup();
903 uv__signal_cleanup();
904 #ifdef __MVS__
905 /* TODO(itodorov) - zos: revisit when Woz compiler is available. */
906 uv__os390_cleanup();
907 #else
908 uv__threadpool_cleanup();
909 #endif
910 uv__store_relaxed(&was_shutdown, 1);
911 }
912
913
uv__metrics_update_idle_time(uv_loop_t * loop)914 void uv__metrics_update_idle_time(uv_loop_t* loop) {
915 uv__loop_metrics_t* loop_metrics;
916 uint64_t entry_time;
917 uint64_t exit_time;
918
919 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
920 return;
921
922 loop_metrics = uv__get_loop_metrics(loop);
923
924 /* The thread running uv__metrics_update_idle_time() is always the same
925 * thread that sets provider_entry_time. So it's unnecessary to lock before
926 * retrieving this value.
927 */
928 if (loop_metrics->provider_entry_time == 0)
929 return;
930
931 exit_time = uv_hrtime();
932
933 uv_mutex_lock(&loop_metrics->lock);
934 entry_time = loop_metrics->provider_entry_time;
935 loop_metrics->provider_entry_time = 0;
936 loop_metrics->provider_idle_time += exit_time - entry_time;
937 uv_mutex_unlock(&loop_metrics->lock);
938 }
939
940
uv__metrics_set_provider_entry_time(uv_loop_t * loop)941 void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
942 uv__loop_metrics_t* loop_metrics;
943 uint64_t now;
944
945 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
946 return;
947
948 now = uv_hrtime();
949 loop_metrics = uv__get_loop_metrics(loop);
950 uv_mutex_lock(&loop_metrics->lock);
951 loop_metrics->provider_entry_time = now;
952 uv_mutex_unlock(&loop_metrics->lock);
953 }
954
955
uv_metrics_idle_time(uv_loop_t * loop)956 uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
957 uv__loop_metrics_t* loop_metrics;
958 uint64_t entry_time;
959 uint64_t idle_time;
960
961 loop_metrics = uv__get_loop_metrics(loop);
962 uv_mutex_lock(&loop_metrics->lock);
963 idle_time = loop_metrics->provider_idle_time;
964 entry_time = loop_metrics->provider_entry_time;
965 uv_mutex_unlock(&loop_metrics->lock);
966
967 if (entry_time > 0)
968 idle_time += uv_hrtime() - entry_time;
969 return idle_time;
970 }
971