1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include "uv.h"
23 #include "uv-common.h"
24 #include "uv_log.h"
25
26 #include <assert.h>
27 #include <errno.h>
28 #include <stdarg.h>
29 #include <stddef.h> /* NULL */
30 #include <stdio.h>
31 #include <stdlib.h> /* malloc */
32 #include <string.h> /* memset */
33
34 #if defined(_WIN32)
35 # include <malloc.h> /* malloc */
36 #else
37 # include <net/if.h> /* if_nametoindex */
38 # include <sys/un.h> /* AF_UNIX, sockaddr_un */
39 #endif
40
41 #if defined(USE_OHOS_DFX) && defined(__aarch64__)
42 #include "parameter.h"
43 #include <unistd.h>
44 #include <sys/types.h>
45 static unsigned int g_multi_thread_check = 0;
46 #endif
47
48 #ifdef USE_FFRT
49 #include "ffrt_inner.h"
50 #endif
51
52 typedef struct {
53 uv_malloc_func local_malloc;
54 uv_realloc_func local_realloc;
55 uv_calloc_func local_calloc;
56 uv_free_func local_free;
57 } uv__allocator_t;
58
59 static uv__allocator_t uv__allocator = {
60 malloc,
61 realloc,
62 calloc,
63 free,
64 };
65
uv__strdup(const char * s)66 char* uv__strdup(const char* s) {
67 size_t len = strlen(s) + 1;
68 char* m = uv__malloc(len);
69 if (m == NULL)
70 return NULL;
71 return memcpy(m, s, len);
72 }
73
uv__strndup(const char * s,size_t n)74 char* uv__strndup(const char* s, size_t n) {
75 char* m;
76 size_t len = strlen(s);
77 if (n < len)
78 len = n;
79 m = uv__malloc(len + 1);
80 if (m == NULL)
81 return NULL;
82 m[len] = '\0';
83 return memcpy(m, s, len);
84 }
85
uv__malloc(size_t size)86 void* uv__malloc(size_t size) {
87 if (size > 0)
88 return uv__allocator.local_malloc(size);
89 return NULL;
90 }
91
uv__free(void * ptr)92 void uv__free(void* ptr) {
93 int saved_errno;
94
95 /* Libuv expects that free() does not clobber errno. The system allocator
96 * honors that assumption but custom allocators may not be so careful.
97 */
98 saved_errno = errno;
99 uv__allocator.local_free(ptr);
100 errno = saved_errno;
101 }
102
uv__calloc(size_t count,size_t size)103 void* uv__calloc(size_t count, size_t size) {
104 return uv__allocator.local_calloc(count, size);
105 }
106
uv__realloc(void * ptr,size_t size)107 void* uv__realloc(void* ptr, size_t size) {
108 if (size > 0)
109 return uv__allocator.local_realloc(ptr, size);
110 uv__free(ptr);
111 return NULL;
112 }
113
uv__reallocf(void * ptr,size_t size)114 void* uv__reallocf(void* ptr, size_t size) {
115 void* newptr;
116
117 newptr = uv__realloc(ptr, size);
118 if (newptr == NULL)
119 if (size > 0)
120 uv__free(ptr);
121
122 return newptr;
123 }
124
uv_replace_allocator(uv_malloc_func malloc_func,uv_realloc_func realloc_func,uv_calloc_func calloc_func,uv_free_func free_func)125 int uv_replace_allocator(uv_malloc_func malloc_func,
126 uv_realloc_func realloc_func,
127 uv_calloc_func calloc_func,
128 uv_free_func free_func) {
129 if (malloc_func == NULL || realloc_func == NULL ||
130 calloc_func == NULL || free_func == NULL) {
131 return UV_EINVAL;
132 }
133
134 uv__allocator.local_malloc = malloc_func;
135 uv__allocator.local_realloc = realloc_func;
136 uv__allocator.local_calloc = calloc_func;
137 uv__allocator.local_free = free_func;
138
139 return 0;
140 }
141
142
uv_os_free_passwd(uv_passwd_t * pwd)143 void uv_os_free_passwd(uv_passwd_t* pwd) {
144 if (pwd == NULL)
145 return;
146
147 /* On unix, the memory for name, shell, and homedir are allocated in a single
148 * uv__malloc() call. The base of the pointer is stored in pwd->username, so
149 * that is the field that needs to be freed.
150 */
151 uv__free(pwd->username);
152 #ifdef _WIN32
153 uv__free(pwd->homedir);
154 #endif
155 pwd->username = NULL;
156 pwd->shell = NULL;
157 pwd->homedir = NULL;
158 }
159
160
uv_os_free_group(uv_group_t * grp)161 void uv_os_free_group(uv_group_t *grp) {
162 if (grp == NULL)
163 return;
164
165 /* The memory for is allocated in a single uv__malloc() call. The base of the
166 * pointer is stored in grp->members, so that is the only field that needs to
167 * be freed.
168 */
169 uv__free(grp->members);
170 grp->members = NULL;
171 grp->groupname = NULL;
172 }
173
174
175 #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
176
uv_handle_size(uv_handle_type type)177 size_t uv_handle_size(uv_handle_type type) {
178 switch (type) {
179 UV_HANDLE_TYPE_MAP(XX)
180 default:
181 return -1;
182 }
183 }
184
uv_req_size(uv_req_type type)185 size_t uv_req_size(uv_req_type type) {
186 switch(type) {
187 UV_REQ_TYPE_MAP(XX)
188 default:
189 return -1;
190 }
191 }
192
193 #undef XX
194
195
uv_loop_size(void)196 size_t uv_loop_size(void) {
197 return sizeof(uv_loop_t);
198 }
199
200
uv_buf_init(char * base,unsigned int len)201 uv_buf_t uv_buf_init(char* base, unsigned int len) {
202 uv_buf_t buf;
203 buf.base = base;
204 buf.len = len;
205 return buf;
206 }
207
208
uv__unknown_err_code(int err)209 static const char* uv__unknown_err_code(int err) {
210 char buf[32];
211 char* copy;
212
213 snprintf(buf, sizeof(buf), "Unknown system error %d", err);
214 copy = uv__strdup(buf);
215
216 return copy != NULL ? copy : "Unknown system error";
217 }
218
219 #define UV_ERR_NAME_GEN_R(name, _) \
220 case UV_## name: \
221 uv__strscpy(buf, #name, buflen); break;
uv_err_name_r(int err,char * buf,size_t buflen)222 char* uv_err_name_r(int err, char* buf, size_t buflen) {
223 switch (err) {
224 UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
225 default: snprintf(buf, buflen, "Unknown system error %d", err);
226 }
227 return buf;
228 }
229 #undef UV_ERR_NAME_GEN_R
230
231
232 #define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
uv_err_name(int err)233 const char* uv_err_name(int err) {
234 switch (err) {
235 UV_ERRNO_MAP(UV_ERR_NAME_GEN)
236 }
237 return uv__unknown_err_code(err);
238 }
239 #undef UV_ERR_NAME_GEN
240
241
242 #define UV_STRERROR_GEN_R(name, msg) \
243 case UV_ ## name: \
244 snprintf(buf, buflen, "%s", msg); break;
uv_strerror_r(int err,char * buf,size_t buflen)245 char* uv_strerror_r(int err, char* buf, size_t buflen) {
246 switch (err) {
247 UV_ERRNO_MAP(UV_STRERROR_GEN_R)
248 default: snprintf(buf, buflen, "Unknown system error %d", err);
249 }
250 return buf;
251 }
252 #undef UV_STRERROR_GEN_R
253
254
255 #define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
uv_strerror(int err)256 const char* uv_strerror(int err) {
257 switch (err) {
258 UV_ERRNO_MAP(UV_STRERROR_GEN)
259 }
260 return uv__unknown_err_code(err);
261 }
262 #undef UV_STRERROR_GEN
263
264
uv_ip4_addr(const char * ip,int port,struct sockaddr_in * addr)265 int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
266 memset(addr, 0, sizeof(*addr));
267 addr->sin_family = AF_INET;
268 addr->sin_port = htons(port);
269 #ifdef SIN6_LEN
270 addr->sin_len = sizeof(*addr);
271 #endif
272 return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
273 }
274
275
uv_ip6_addr(const char * ip,int port,struct sockaddr_in6 * addr)276 int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
277 char address_part[40];
278 size_t address_part_size;
279 const char* zone_index;
280
281 memset(addr, 0, sizeof(*addr));
282 addr->sin6_family = AF_INET6;
283 addr->sin6_port = htons(port);
284 #ifdef SIN6_LEN
285 addr->sin6_len = sizeof(*addr);
286 #endif
287
288 zone_index = strchr(ip, '%');
289 if (zone_index != NULL) {
290 address_part_size = zone_index - ip;
291 if (address_part_size >= sizeof(address_part))
292 address_part_size = sizeof(address_part) - 1;
293
294 memcpy(address_part, ip, address_part_size);
295 address_part[address_part_size] = '\0';
296 ip = address_part;
297
298 zone_index++; /* skip '%' */
299 /* NOTE: unknown interface (id=0) is silently ignored */
300 #ifdef _WIN32
301 addr->sin6_scope_id = atoi(zone_index);
302 #else
303 addr->sin6_scope_id = if_nametoindex(zone_index);
304 #endif
305 }
306
307 return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
308 }
309
310
uv_ip4_name(const struct sockaddr_in * src,char * dst,size_t size)311 int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
312 return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
313 }
314
315
uv_ip6_name(const struct sockaddr_in6 * src,char * dst,size_t size)316 int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
317 return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
318 }
319
320
uv_ip_name(const struct sockaddr * src,char * dst,size_t size)321 int uv_ip_name(const struct sockaddr *src, char *dst, size_t size) {
322 switch (src->sa_family) {
323 case AF_INET:
324 return uv_inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr,
325 dst, size);
326 case AF_INET6:
327 return uv_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)src)->sin6_addr,
328 dst, size);
329 default:
330 return UV_EAFNOSUPPORT;
331 }
332 }
333
334
uv_tcp_bind(uv_tcp_t * handle,const struct sockaddr * addr,unsigned int flags)335 int uv_tcp_bind(uv_tcp_t* handle,
336 const struct sockaddr* addr,
337 unsigned int flags) {
338 unsigned int addrlen;
339
340 if (handle->type != UV_TCP)
341 return UV_EINVAL;
342 if (uv__is_closing(handle)) {
343 return UV_EINVAL;
344 }
345 if (addr->sa_family == AF_INET)
346 addrlen = sizeof(struct sockaddr_in);
347 else if (addr->sa_family == AF_INET6)
348 addrlen = sizeof(struct sockaddr_in6);
349 else
350 return UV_EINVAL;
351
352 return uv__tcp_bind(handle, addr, addrlen, flags);
353 }
354
355
uv_udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned flags)356 int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
357 unsigned extra_flags;
358 int domain;
359 int rc;
360
361 /* Use the lower 8 bits for the domain. */
362 domain = flags & 0xFF;
363 if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
364 return UV_EINVAL;
365
366 /* Use the higher bits for extra flags. */
367 extra_flags = flags & ~0xFF;
368 if (extra_flags & ~UV_UDP_RECVMMSG)
369 return UV_EINVAL;
370
371 rc = uv__udp_init_ex(loop, handle, flags, domain);
372
373 if (rc == 0)
374 if (extra_flags & UV_UDP_RECVMMSG)
375 handle->flags |= UV_HANDLE_UDP_RECVMMSG;
376
377 return rc;
378 }
379
380
uv_udp_init(uv_loop_t * loop,uv_udp_t * handle)381 int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
382 return uv_udp_init_ex(loop, handle, AF_UNSPEC);
383 }
384
385
uv_udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int flags)386 int uv_udp_bind(uv_udp_t* handle,
387 const struct sockaddr* addr,
388 unsigned int flags) {
389 unsigned int addrlen;
390
391 if (handle->type != UV_UDP)
392 return UV_EINVAL;
393
394 if (addr->sa_family == AF_INET)
395 addrlen = sizeof(struct sockaddr_in);
396 else if (addr->sa_family == AF_INET6)
397 addrlen = sizeof(struct sockaddr_in6);
398 else
399 return UV_EINVAL;
400
401 return uv__udp_bind(handle, addr, addrlen, flags);
402 }
403
404
uv_tcp_connect(uv_connect_t * req,uv_tcp_t * handle,const struct sockaddr * addr,uv_connect_cb cb)405 int uv_tcp_connect(uv_connect_t* req,
406 uv_tcp_t* handle,
407 const struct sockaddr* addr,
408 uv_connect_cb cb) {
409 unsigned int addrlen;
410
411 if (handle->type != UV_TCP)
412 return UV_EINVAL;
413
414 if (addr->sa_family == AF_INET)
415 addrlen = sizeof(struct sockaddr_in);
416 else if (addr->sa_family == AF_INET6)
417 addrlen = sizeof(struct sockaddr_in6);
418 else
419 return UV_EINVAL;
420
421 return uv__tcp_connect(req, handle, addr, addrlen, cb);
422 }
423
424
uv_udp_connect(uv_udp_t * handle,const struct sockaddr * addr)425 int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
426 unsigned int addrlen;
427
428 if (handle->type != UV_UDP)
429 return UV_EINVAL;
430
431 /* Disconnect the handle */
432 if (addr == NULL) {
433 if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
434 return UV_ENOTCONN;
435
436 return uv__udp_disconnect(handle);
437 }
438
439 if (addr->sa_family == AF_INET)
440 addrlen = sizeof(struct sockaddr_in);
441 else if (addr->sa_family == AF_INET6)
442 addrlen = sizeof(struct sockaddr_in6);
443 else
444 return UV_EINVAL;
445
446 if (handle->flags & UV_HANDLE_UDP_CONNECTED)
447 return UV_EISCONN;
448
449 return uv__udp_connect(handle, addr, addrlen);
450 }
451
452
uv__udp_is_connected(uv_udp_t * handle)453 int uv__udp_is_connected(uv_udp_t* handle) {
454 struct sockaddr_storage addr;
455 int addrlen;
456 if (handle->type != UV_UDP)
457 return 0;
458
459 addrlen = sizeof(addr);
460 if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
461 return 0;
462
463 return addrlen > 0;
464 }
465
466
uv__udp_check_before_send(uv_udp_t * handle,const struct sockaddr * addr)467 int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
468 unsigned int addrlen;
469
470 if (handle->type != UV_UDP)
471 return UV_EINVAL;
472
473 if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
474 return UV_EISCONN;
475
476 if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
477 return UV_EDESTADDRREQ;
478
479 if (addr != NULL) {
480 if (addr->sa_family == AF_INET)
481 addrlen = sizeof(struct sockaddr_in);
482 else if (addr->sa_family == AF_INET6)
483 addrlen = sizeof(struct sockaddr_in6);
484 #if defined(AF_UNIX) && !defined(_WIN32)
485 else if (addr->sa_family == AF_UNIX)
486 addrlen = sizeof(struct sockaddr_un);
487 #endif
488 else
489 return UV_EINVAL;
490 } else {
491 addrlen = 0;
492 }
493
494 return addrlen;
495 }
496
497
uv_udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,uv_udp_send_cb send_cb)498 int uv_udp_send(uv_udp_send_t* req,
499 uv_udp_t* handle,
500 const uv_buf_t bufs[],
501 unsigned int nbufs,
502 const struct sockaddr* addr,
503 uv_udp_send_cb send_cb) {
504 int addrlen;
505
506 addrlen = uv__udp_check_before_send(handle, addr);
507 if (addrlen < 0)
508 return addrlen;
509
510 return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
511 }
512
513
uv_udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr)514 int uv_udp_try_send(uv_udp_t* handle,
515 const uv_buf_t bufs[],
516 unsigned int nbufs,
517 const struct sockaddr* addr) {
518 int addrlen;
519
520 addrlen = uv__udp_check_before_send(handle, addr);
521 if (addrlen < 0)
522 return addrlen;
523
524 return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
525 }
526
527
uv_udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)528 int uv_udp_recv_start(uv_udp_t* handle,
529 uv_alloc_cb alloc_cb,
530 uv_udp_recv_cb recv_cb) {
531 if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
532 return UV_EINVAL;
533 else
534 return uv__udp_recv_start(handle, alloc_cb, recv_cb);
535 }
536
537
uv_udp_recv_stop(uv_udp_t * handle)538 int uv_udp_recv_stop(uv_udp_t* handle) {
539 if (handle->type != UV_UDP)
540 return UV_EINVAL;
541 else
542 return uv__udp_recv_stop(handle);
543 }
544
545
uv_walk(uv_loop_t * loop,uv_walk_cb walk_cb,void * arg)546 void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
547 struct uv__queue queue;
548 struct uv__queue* q;
549 uv_handle_t* h;
550
551 UV_LOGI("clean up handles in loop(%{public}zu)", (size_t)loop);
552 uv__queue_move(&loop->handle_queue, &queue);
553 while (!uv__queue_empty(&queue)) {
554 q = uv__queue_head(&queue);
555 h = uv__queue_data(q, uv_handle_t, handle_queue);
556
557 uv__queue_remove(q);
558 uv__queue_insert_tail(&loop->handle_queue, q);
559
560 if (h->flags & UV_HANDLE_INTERNAL) continue;
561 walk_cb(h, arg);
562 }
563 }
564
565
uv__print_handles(uv_loop_t * loop,int only_active,FILE * stream)566 static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
567 const char* type;
568 struct uv__queue* q;
569 uv_handle_t* h;
570
571 if (loop == NULL)
572 loop = uv_default_loop();
573
574 if (stream == NULL)
575 stream = stderr;
576
577 uv__queue_foreach(q, &loop->handle_queue) {
578 h = uv__queue_data(q, uv_handle_t, handle_queue);
579
580 if (only_active && !uv__is_active(h))
581 continue;
582
583 switch (h->type) {
584 #define X(uc, lc) case UV_##uc: type = #lc; break;
585 UV_HANDLE_TYPE_MAP(X)
586 #undef X
587 default: type = "<unknown>";
588 }
589
590 fprintf(stream,
591 "[%c%c%c] %-8s %p\n",
592 "R-"[!(h->flags & UV_HANDLE_REF)],
593 "A-"[!(h->flags & UV_HANDLE_ACTIVE)],
594 "I-"[!(h->flags & UV_HANDLE_INTERNAL)],
595 type,
596 (void*)h);
597 }
598 }
599
600
uv_print_all_handles(uv_loop_t * loop,FILE * stream)601 void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
602 uv__print_handles(loop, 0, stream);
603 }
604
605
uv_print_active_handles(uv_loop_t * loop,FILE * stream)606 void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
607 uv__print_handles(loop, 1, stream);
608 }
609
610
uv_ref(uv_handle_t * handle)611 void uv_ref(uv_handle_t* handle) {
612 uv__handle_ref(handle);
613 }
614
615
uv_unref(uv_handle_t * handle)616 void uv_unref(uv_handle_t* handle) {
617 uv__handle_unref(handle);
618 }
619
620
uv_has_ref(const uv_handle_t * handle)621 int uv_has_ref(const uv_handle_t* handle) {
622 return uv__has_ref(handle);
623 }
624
625
uv_stop(uv_loop_t * loop)626 void uv_stop(uv_loop_t* loop) {
627 loop->stop_flag = 1;
628 }
629
630
uv_now(const uv_loop_t * loop)631 uint64_t uv_now(const uv_loop_t* loop) {
632 return loop->time;
633 }
634
635
636
uv__count_bufs(const uv_buf_t bufs[],unsigned int nbufs)637 size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
638 unsigned int i;
639 size_t bytes;
640
641 bytes = 0;
642 for (i = 0; i < nbufs; i++)
643 bytes += (size_t) bufs[i].len;
644
645 return bytes;
646 }
647
uv_recv_buffer_size(uv_handle_t * handle,int * value)648 int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
649 return uv__socket_sockopt(handle, SO_RCVBUF, value);
650 }
651
uv_send_buffer_size(uv_handle_t * handle,int * value)652 int uv_send_buffer_size(uv_handle_t* handle, int *value) {
653 return uv__socket_sockopt(handle, SO_SNDBUF, value);
654 }
655
uv_fs_event_getpath(uv_fs_event_t * handle,char * buffer,size_t * size)656 int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
657 size_t required_len;
658
659 if (!uv__is_active(handle)) {
660 *size = 0;
661 return UV_EINVAL;
662 }
663
664 required_len = strlen(handle->path);
665 if (required_len >= *size) {
666 *size = required_len + 1;
667 return UV_ENOBUFS;
668 }
669
670 memcpy(buffer, handle->path, required_len);
671 *size = required_len;
672 buffer[required_len] = '\0';
673
674 return 0;
675 }
676
677 /* The windows implementation does not have the same structure layout as
678 * the unix implementation (nbufs is not directly inside req but is
679 * contained in a nested union/struct) so this function locates it.
680 */
uv__get_nbufs(uv_fs_t * req)681 static unsigned int* uv__get_nbufs(uv_fs_t* req) {
682 #ifdef _WIN32
683 return &req->fs.info.nbufs;
684 #else
685 return &req->nbufs;
686 #endif
687 }
688
689 /* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
690 * systems. So, the memory should be released using free(). On Windows,
691 * uv__malloc() is used, so use uv__free() to free memory.
692 */
693 #ifdef _WIN32
694 # define uv__fs_scandir_free uv__free
695 #else
696 # define uv__fs_scandir_free free
697 #endif
698
uv__fs_scandir_cleanup(uv_fs_t * req)699 void uv__fs_scandir_cleanup(uv_fs_t* req) {
700 uv__dirent_t** dents;
701 unsigned int* nbufs;
702 unsigned int i;
703 unsigned int n;
704
705 if (req->result >= 0) {
706 dents = req->ptr;
707 nbufs = uv__get_nbufs(req);
708
709 i = 0;
710 if (*nbufs > 0)
711 i = *nbufs - 1;
712
713 n = (unsigned int) req->result;
714 for (; i < n; i++)
715 uv__fs_scandir_free(dents[i]);
716 }
717
718 uv__fs_scandir_free(req->ptr);
719 req->ptr = NULL;
720 }
721
722
uv_fs_scandir_next(uv_fs_t * req,uv_dirent_t * ent)723 int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
724 uv__dirent_t** dents;
725 uv__dirent_t* dent;
726 unsigned int* nbufs;
727
728 /* Check to see if req passed */
729 if (req->result < 0)
730 return req->result;
731
732 /* Ptr will be null if req was canceled or no files found */
733 if (!req->ptr)
734 return UV_EOF;
735
736 nbufs = uv__get_nbufs(req);
737 assert(nbufs);
738
739 dents = req->ptr;
740
741 /* Free previous entity */
742 if (*nbufs > 0)
743 uv__fs_scandir_free(dents[*nbufs - 1]);
744
745 /* End was already reached */
746 if (*nbufs == (unsigned int) req->result) {
747 uv__fs_scandir_free(dents);
748 req->ptr = NULL;
749 return UV_EOF;
750 }
751
752 dent = dents[(*nbufs)++];
753
754 ent->name = dent->d_name;
755 ent->type = uv__fs_get_dirent_type(dent);
756
757 return 0;
758 }
759
uv__fs_get_dirent_type(uv__dirent_t * dent)760 uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
761 uv_dirent_type_t type;
762
763 #ifdef HAVE_DIRENT_TYPES
764 switch (dent->d_type) {
765 case UV__DT_DIR:
766 type = UV_DIRENT_DIR;
767 break;
768 case UV__DT_FILE:
769 type = UV_DIRENT_FILE;
770 break;
771 case UV__DT_LINK:
772 type = UV_DIRENT_LINK;
773 break;
774 case UV__DT_FIFO:
775 type = UV_DIRENT_FIFO;
776 break;
777 case UV__DT_SOCKET:
778 type = UV_DIRENT_SOCKET;
779 break;
780 case UV__DT_CHAR:
781 type = UV_DIRENT_CHAR;
782 break;
783 case UV__DT_BLOCK:
784 type = UV_DIRENT_BLOCK;
785 break;
786 default:
787 type = UV_DIRENT_UNKNOWN;
788 }
789 #else
790 type = UV_DIRENT_UNKNOWN;
791 #endif
792
793 return type;
794 }
795
uv__fs_readdir_cleanup(uv_fs_t * req)796 void uv__fs_readdir_cleanup(uv_fs_t* req) {
797 uv_dir_t* dir;
798 uv_dirent_t* dirents;
799 int i;
800
801 if (req->ptr == NULL)
802 return;
803
804 dir = req->ptr;
805 dirents = dir->dirents;
806 req->ptr = NULL;
807
808 if (dirents == NULL)
809 return;
810
811 for (i = 0; i < req->result; ++i) {
812 uv__free((char*) dirents[i].name);
813 dirents[i].name = NULL;
814 }
815 }
816
817
uv_loop_configure(uv_loop_t * loop,uv_loop_option option,...)818 int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
819 va_list ap;
820 int err;
821
822 va_start(ap, option);
823 /* Any platform-agnostic options should be handled here. */
824 err = uv__loop_configure(loop, option, ap);
825 va_end(ap);
826
827 return err;
828 }
829
830
831 static uv_loop_t default_loop_struct;
832 static uv_loop_t* default_loop_ptr;
833
834
uv_default_loop(void)835 uv_loop_t* uv_default_loop(void) {
836 if (default_loop_ptr != NULL)
837 return default_loop_ptr;
838
839 if (uv_loop_init(&default_loop_struct))
840 return NULL;
841
842 default_loop_ptr = &default_loop_struct;
843 return default_loop_ptr;
844 }
845
846
uv_loop_new(void)847 uv_loop_t* uv_loop_new(void) {
848 uv_loop_t* loop;
849
850 loop = uv__malloc(sizeof(*loop));
851 if (loop == NULL)
852 return NULL;
853
854 if (uv_loop_init(loop)) {
855 uv__free(loop);
856 return NULL;
857 }
858
859 return loop;
860 }
861
862
863 void on_uv_loop_close(uv_loop_t* loop);
uv_loop_close(uv_loop_t * loop)864 int uv_loop_close(uv_loop_t* loop) {
865 struct uv__queue* q;
866 uv_handle_t* h;
867 #ifndef NDEBUG
868 void* saved_data;
869 #endif
870
871 if (uv__has_active_reqs(loop)) {
872 #ifdef USE_OHOS_DFX
873 UV_LOGI("loop:%{public}zu, active reqs:%{public}u", (size_t)loop, loop->active_reqs.count);
874 #endif
875 return UV_EBUSY;
876 }
877 uv__queue_foreach(q, &loop->handle_queue) {
878 h = uv__queue_data(q, uv_handle_t, handle_queue);
879 if (!(h->flags & UV_HANDLE_INTERNAL)) {
880 #ifdef USE_OHOS_DFX
881 UV_LOGI("loop:%{public}zu, active handle:%{public}zu", (size_t)loop, (size_t)h);
882 #endif
883 return UV_EBUSY;
884 }
885 }
886
887 on_uv_loop_close(loop);
888 uv__loop_close(loop);
889
890 #ifndef NDEBUG
891 saved_data = loop->data;
892 memset(loop, -1, sizeof(*loop));
893 loop->data = saved_data;
894 #endif
895 if (loop == default_loop_ptr)
896 default_loop_ptr = NULL;
897
898 return 0;
899 }
900
901
uv_loop_delete(uv_loop_t * loop)902 void uv_loop_delete(uv_loop_t* loop) {
903 uv_loop_t* default_loop;
904 int err;
905
906 default_loop = default_loop_ptr;
907
908 err = uv_loop_close(loop);
909 (void) err; /* Squelch compiler warnings. */
910 assert(err == 0);
911 #ifdef USE_OHOS_DFX
912 if (err != 0)
913 on_uv_loop_close(loop);
914 #endif
915 if (loop != default_loop)
916 uv__free(loop);
917 }
918
919
uv_read_start(uv_stream_t * stream,uv_alloc_cb alloc_cb,uv_read_cb read_cb)920 int uv_read_start(uv_stream_t* stream,
921 uv_alloc_cb alloc_cb,
922 uv_read_cb read_cb) {
923 if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
924 return UV_EINVAL;
925
926 if (stream->flags & UV_HANDLE_CLOSING)
927 return UV_EINVAL;
928
929 if (stream->flags & UV_HANDLE_READING)
930 return UV_EALREADY;
931
932 if (!(stream->flags & UV_HANDLE_READABLE))
933 return UV_ENOTCONN;
934
935 return uv__read_start(stream, alloc_cb, read_cb);
936 }
937
938
uv_os_free_environ(uv_env_item_t * envitems,int count)939 void uv_os_free_environ(uv_env_item_t* envitems, int count) {
940 int i;
941
942 for (i = 0; i < count; i++) {
943 uv__free(envitems[i].name);
944 }
945
946 uv__free(envitems);
947 }
948
949
uv_free_cpu_info(uv_cpu_info_t * cpu_infos,int count)950 void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
951 #ifdef __linux__
952 (void) &count;
953 uv__free(cpu_infos);
954 #else
955 int i;
956
957 for (i = 0; i < count; i++)
958 uv__free(cpu_infos[i].model);
959
960 uv__free(cpu_infos);
961 #endif /* __linux__ */
962 }
963
964
965 /* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
966 * threads have already been forcibly terminated by the operating system
967 * by the time destructors run, ergo, it's not safe to try to clean them up.
968 */
969 #if defined(__GNUC__) && !defined(_WIN32)
970 __attribute__((destructor))
971 #endif
uv_library_shutdown(void)972 void uv_library_shutdown(void) {
973 static int was_shutdown;
974
975 if (uv__exchange_int_relaxed(&was_shutdown, 1))
976 return;
977
978 uv__process_title_cleanup();
979 uv__signal_cleanup();
980 #ifdef __MVS__
981 /* TODO(itodorov) - zos: revisit when Woz compiler is available. */
982 uv__os390_cleanup();
983 #else
984 uv__threadpool_cleanup();
985 #endif
986 }
987
988
uv__metrics_update_idle_time(uv_loop_t * loop)989 void uv__metrics_update_idle_time(uv_loop_t* loop) {
990 uv__loop_metrics_t* loop_metrics;
991 uint64_t entry_time;
992 uint64_t exit_time;
993
994 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
995 return;
996
997 loop_metrics = uv__get_loop_metrics(loop);
998
999 /* The thread running uv__metrics_update_idle_time() is always the same
1000 * thread that sets provider_entry_time. So it's unnecessary to lock before
1001 * retrieving this value.
1002 */
1003 if (loop_metrics->provider_entry_time == 0)
1004 return;
1005
1006 exit_time = uv_hrtime();
1007
1008 uv_mutex_lock(&loop_metrics->lock);
1009 entry_time = loop_metrics->provider_entry_time;
1010 loop_metrics->provider_entry_time = 0;
1011 loop_metrics->provider_idle_time += exit_time - entry_time;
1012 uv_mutex_unlock(&loop_metrics->lock);
1013 }
1014
1015
uv__metrics_set_provider_entry_time(uv_loop_t * loop)1016 void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
1017 uv__loop_metrics_t* loop_metrics;
1018 uint64_t now;
1019
1020 if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
1021 return;
1022
1023 now = uv_hrtime();
1024 loop_metrics = uv__get_loop_metrics(loop);
1025 uv_mutex_lock(&loop_metrics->lock);
1026 loop_metrics->provider_entry_time = now;
1027 uv_mutex_unlock(&loop_metrics->lock);
1028 }
1029
1030
uv_metrics_info(uv_loop_t * loop,uv_metrics_t * metrics)1031 int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics) {
1032 memcpy(metrics,
1033 &uv__get_loop_metrics(loop)->metrics,
1034 sizeof(*metrics));
1035
1036 return 0;
1037 }
1038
1039
uv_metrics_idle_time(uv_loop_t * loop)1040 uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
1041 uv__loop_metrics_t* loop_metrics;
1042 uint64_t entry_time;
1043 uint64_t idle_time;
1044
1045 loop_metrics = uv__get_loop_metrics(loop);
1046 uv_mutex_lock(&loop_metrics->lock);
1047 idle_time = loop_metrics->provider_idle_time;
1048 entry_time = loop_metrics->provider_entry_time;
1049 uv_mutex_unlock(&loop_metrics->lock);
1050
1051 if (entry_time > 0)
1052 idle_time += uv_hrtime() - entry_time;
1053 return idle_time;
1054 }
1055
1056
uv__get_addr_tag(void * addr)1057 uint64_t uv__get_addr_tag(void* addr) {
1058 uint64_t tag = 0;
1059
1060 #ifdef USE_OHOS_DFX
1061 if (addr != NULL) {
1062 tag = fdsan_create_owner_tag(FDSAN_OWNER_TYPE_FILE, (uint64_t)addr);
1063 }
1064 #endif
1065 return tag;
1066 }
1067
1068
1069 #if defined(USE_OHOS_DFX) && defined(__aarch64__)
1070 static uv_once_t thread_check_guard = UV_ONCE_INIT;
init_param_once()1071 void init_param_once() {
1072 unsigned int param_value = GetIntParameter("persist.libuv.properties", -1);
1073 if (param_value == 1) {
1074 g_multi_thread_check = 1;
1075 }
1076 }
1077
1078
uv__is_multi_thread_open(void)1079 int uv__is_multi_thread_open(void) {
1080 uv_once(&thread_check_guard, init_param_once);
1081 if (g_multi_thread_check == 0) {
1082 return 0;
1083 }
1084 #ifdef USE_FFRT
1085 if (ffrt_get_cur_task() != NULL) {
1086 return 0;
1087 }
1088 #endif
1089 return 1;
1090 }
1091
1092
uv__init_thread_id(uv_loop_t * loop)1093 void uv__init_thread_id(uv_loop_t* loop) {
1094 if (uv__is_multi_thread_open()) {
1095 uv__loop_internal_fields_t* lfields_tid = uv__get_internal_fields(loop);
1096 lfields_tid->thread_id = 0;
1097 lfields_tid->thread_id |= MULTI_THREAD_CHECK_LOOP_INIT;
1098 }
1099 }
1100
1101
uv__set_thread_id(uv_loop_t * loop)1102 void uv__set_thread_id(uv_loop_t* loop) {
1103 if (uv__is_multi_thread_open()) {
1104 uv__loop_internal_fields_t* lfields_tid = uv__get_internal_fields(loop);
1105 lfields_tid->thread_id |= (unsigned int)gettid();
1106 }
1107 }
1108
1109
uv__get_thread_id(const uv_loop_t * loop)1110 static unsigned int uv__get_thread_id(const uv_loop_t* loop) {
1111 if (uv__is_multi_thread_open()) {
1112 uv__loop_internal_fields_t* lfields_tid = uv__get_internal_fields(loop);
1113 unsigned int thread_id = lfields_tid->thread_id & ~MULTI_THREAD_CHECK_LOOP_INIT;
1114 return thread_id;
1115 } else {
1116 return 0;
1117 }
1118 }
1119
1120
uv__multi_thread_check_unify(const uv_loop_t * loop,const char * funcName)1121 void uv__multi_thread_check_unify(const uv_loop_t* loop, const char* funcName) {
1122 if (!uv__is_multi_thread_open()) {
1123 return;
1124 }
1125
1126 unsigned int thread_id = uv__get_thread_id(loop);
1127 if (thread_id == 0) {
1128 return;
1129 }
1130 if (thread_id != (unsigned int)gettid()) {
1131 UV_LOGF("multi-thread occurred in function %{public}s!", funcName);
1132 abort();
1133 }
1134 }
1135 #endif