1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29 #include "uv.h"
30 #include "internal.h"
31
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <limits.h> /* PATH_MAX */
38
39 #include <sys/types.h>
40 #include <sys/socket.h>
41 #include <sys/stat.h>
42 #include <sys/time.h>
43 #include <sys/uio.h>
44 #include <pthread.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48
49 #if defined(__DragonFly__) || \
50 defined(__FreeBSD__) || \
51 defined(__FreeBSD_kernel__) || \
52 defined(__OpenBSD__) || \
53 defined(__NetBSD__)
54 # define HAVE_PREADV 1
55 #else
56 # define HAVE_PREADV 0
57 #endif
58
59 #if defined(__linux__)
60 # include "sys/utsname.h"
61 #endif
62
63 #if defined(__linux__) || defined(__sun)
64 # include <sys/sendfile.h>
65 # include <sys/sysmacros.h>
66 #endif
67
68 #if defined(__APPLE__)
69 # include <sys/sysctl.h>
70 #elif defined(__linux__) && !defined(FICLONE)
71 # include <sys/ioctl.h>
72 # define FICLONE _IOW(0x94, 9, int)
73 #endif
74
75 #if defined(_AIX) && !defined(_AIX71)
76 # include <utime.h>
77 #endif
78
79 #if defined(__APPLE__) || \
80 defined(__DragonFly__) || \
81 defined(__FreeBSD__) || \
82 defined(__FreeBSD_kernel__) || \
83 defined(__OpenBSD__) || \
84 defined(__NetBSD__)
85 # include <sys/param.h>
86 # include <sys/mount.h>
87 #elif defined(__sun) || \
88 defined(__MVS__) || \
89 defined(__NetBSD__) || \
90 defined(__HAIKU__) || \
91 defined(__QNX__)
92 # include <sys/statvfs.h>
93 #else
94 # include <sys/statfs.h>
95 #endif
96
97 #if defined(_AIX) && _XOPEN_SOURCE <= 600
98 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
99 #endif
100
101 #define INIT(subtype) \
102 do { \
103 if (req == NULL) \
104 return UV_EINVAL; \
105 UV_REQ_INIT(req, UV_FS); \
106 req->fs_type = UV_FS_ ## subtype; \
107 req->result = 0; \
108 req->ptr = NULL; \
109 req->loop = loop; \
110 req->path = NULL; \
111 req->new_path = NULL; \
112 req->bufs = NULL; \
113 req->cb = cb; \
114 } \
115 while (0)
116
117 #define PATH \
118 do { \
119 assert(path != NULL); \
120 if (cb == NULL) { \
121 req->path = path; \
122 } else { \
123 req->path = uv__strdup(path); \
124 if (req->path == NULL) \
125 return UV_ENOMEM; \
126 } \
127 } \
128 while (0)
129
130 #define PATH2 \
131 do { \
132 if (cb == NULL) { \
133 req->path = path; \
134 req->new_path = new_path; \
135 } else { \
136 size_t path_len; \
137 size_t new_path_len; \
138 path_len = strlen(path) + 1; \
139 new_path_len = strlen(new_path) + 1; \
140 req->path = uv__malloc(path_len + new_path_len); \
141 if (req->path == NULL) \
142 return UV_ENOMEM; \
143 req->new_path = req->path + path_len; \
144 memcpy((void*) req->path, path, path_len); \
145 memcpy((void*) req->new_path, new_path, new_path_len); \
146 } \
147 } \
148 while (0)
149
150 #ifdef USE_FFRT
151 #define POST \
152 do { \
153 if (cb != NULL) { \
154 uv__req_register(loop, req); \
155 uv__work_submit(loop, \
156 (uv_req_t*)req, \
157 &req->work_req, \
158 UV__WORK_FAST_IO, \
159 uv__fs_work, \
160 uv__fs_done); \
161 return 0; \
162 } \
163 else { \
164 uv__fs_work(&req->work_req); \
165 return req->result; \
166 } \
167 } \
168 while (0)
169 #else
170 #define POST \
171 do { \
172 if (cb != NULL) { \
173 uv__req_register(loop, req); \
174 uv__work_submit(loop, \
175 &req->work_req, \
176 UV__WORK_FAST_IO, \
177 uv__fs_work, \
178 uv__fs_done); \
179 return 0; \
180 } \
181 else { \
182 uv__fs_work(&req->work_req); \
183 return req->result; \
184 } \
185 } \
186 while (0)
187 #endif
188
189
uv__fs_close(int fd)190 static int uv__fs_close(int fd) {
191 int rc;
192
193 rc = uv__close_nocancel(fd);
194 if (rc == -1)
195 if (errno == EINTR || errno == EINPROGRESS)
196 rc = 0; /* The close is in progress, not an error. */
197
198 return rc;
199 }
200
201
uv__fs_fsync(uv_fs_t * req)202 static ssize_t uv__fs_fsync(uv_fs_t* req) {
203 #if defined(__APPLE__)
204 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
205 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
206 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
207 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
208 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
209 * This is the same approach taken by sqlite, except sqlite does not issue
210 * an F_BARRIERFSYNC call.
211 */
212 int r;
213
214 r = fcntl(req->file, F_FULLFSYNC);
215 if (r != 0)
216 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
217 if (r != 0)
218 r = fsync(req->file);
219 return r;
220 #else
221 return fsync(req->file);
222 #endif
223 }
224
225
uv__fs_fdatasync(uv_fs_t * req)226 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
227 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
228 return fdatasync(req->file);
229 #elif defined(__APPLE__)
230 /* See the comment in uv__fs_fsync. */
231 return uv__fs_fsync(req);
232 #else
233 return fsync(req->file);
234 #endif
235 }
236
237
UV_UNUSED(static struct timespec uv__fs_to_timespec (double time))238 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
239 struct timespec ts;
240 ts.tv_sec = time;
241 ts.tv_nsec = (time - ts.tv_sec) * 1e9;
242
243 /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
244 * stick to microsecond resolution for the sake of consistency with other
245 * platforms. I'm the original author of this compatibility hack but I'm
246 * less convinced it's useful nowadays.
247 */
248 ts.tv_nsec -= ts.tv_nsec % 1000;
249
250 if (ts.tv_nsec < 0) {
251 ts.tv_nsec += 1e9;
252 ts.tv_sec -= 1;
253 }
254 return ts;
255 }
256
UV_UNUSED(static struct timeval uv__fs_to_timeval (double time))257 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
258 struct timeval tv;
259 tv.tv_sec = time;
260 tv.tv_usec = (time - tv.tv_sec) * 1e6;
261 if (tv.tv_usec < 0) {
262 tv.tv_usec += 1e6;
263 tv.tv_sec -= 1;
264 }
265 return tv;
266 }
267
uv__fs_futime(uv_fs_t * req)268 static ssize_t uv__fs_futime(uv_fs_t* req) {
269 #if defined(__linux__) \
270 || defined(_AIX71) \
271 || defined(__HAIKU__) \
272 || defined(__GNU__)
273 struct timespec ts[2];
274 ts[0] = uv__fs_to_timespec(req->atime);
275 ts[1] = uv__fs_to_timespec(req->mtime);
276 return futimens(req->file, ts);
277 #elif defined(__APPLE__) \
278 || defined(__DragonFly__) \
279 || defined(__FreeBSD__) \
280 || defined(__FreeBSD_kernel__) \
281 || defined(__NetBSD__) \
282 || defined(__OpenBSD__) \
283 || defined(__sun)
284 struct timeval tv[2];
285 tv[0] = uv__fs_to_timeval(req->atime);
286 tv[1] = uv__fs_to_timeval(req->mtime);
287 # if defined(__sun)
288 return futimesat(req->file, NULL, tv);
289 # else
290 return futimes(req->file, tv);
291 # endif
292 #elif defined(__MVS__)
293 attrib_t atr;
294 memset(&atr, 0, sizeof(atr));
295 atr.att_mtimechg = 1;
296 atr.att_atimechg = 1;
297 atr.att_mtime = req->mtime;
298 atr.att_atime = req->atime;
299 return __fchattr(req->file, &atr, sizeof(atr));
300 #else
301 errno = ENOSYS;
302 return -1;
303 #endif
304 }
305
306
uv__fs_mkdtemp(uv_fs_t * req)307 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
308 return mkdtemp((char*) req->path) ? 0 : -1;
309 }
310
311
312 static int (*uv__mkostemp)(char*, int);
313
314
uv__mkostemp_initonce(void)315 static void uv__mkostemp_initonce(void) {
316 /* z/os doesn't have RTLD_DEFAULT but that's okay
317 * because it doesn't have mkostemp(O_CLOEXEC) either.
318 */
319 #ifdef RTLD_DEFAULT
320 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
321
322 /* We don't care about errors, but we do want to clean them up.
323 * If there has been no error, then dlerror() will just return
324 * NULL.
325 */
326 dlerror();
327 #endif /* RTLD_DEFAULT */
328 }
329
330
uv__fs_mkstemp(uv_fs_t * req)331 static int uv__fs_mkstemp(uv_fs_t* req) {
332 static uv_once_t once = UV_ONCE_INIT;
333 int r;
334 #ifdef O_CLOEXEC
335 static int no_cloexec_support;
336 #endif
337 static const char pattern[] = "XXXXXX";
338 static const size_t pattern_size = sizeof(pattern) - 1;
339 char* path;
340 size_t path_length;
341
342 path = (char*) req->path;
343 path_length = strlen(path);
344
345 /* EINVAL can be returned for 2 reasons:
346 1. The template's last 6 characters were not XXXXXX
347 2. open() didn't support O_CLOEXEC
348 We want to avoid going to the fallback path in case
349 of 1, so it's manually checked before. */
350 if (path_length < pattern_size ||
351 strcmp(path + path_length - pattern_size, pattern)) {
352 errno = EINVAL;
353 r = -1;
354 goto clobber;
355 }
356
357 uv_once(&once, uv__mkostemp_initonce);
358
359 #ifdef O_CLOEXEC
360 if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
361 r = uv__mkostemp(path, O_CLOEXEC);
362
363 if (r >= 0)
364 return r;
365
366 /* If mkostemp() returns EINVAL, it means the kernel doesn't
367 support O_CLOEXEC, so we just fallback to mkstemp() below. */
368 if (errno != EINVAL)
369 goto clobber;
370
371 /* We set the static variable so that next calls don't even
372 try to use mkostemp. */
373 uv__store_relaxed(&no_cloexec_support, 1);
374 }
375 #endif /* O_CLOEXEC */
376
377 if (req->cb != NULL)
378 uv_rwlock_rdlock(&req->loop->cloexec_lock);
379
380 r = mkstemp(path);
381
382 /* In case of failure `uv__cloexec` will leave error in `errno`,
383 * so it is enough to just set `r` to `-1`.
384 */
385 if (r >= 0 && uv__cloexec(r, 1) != 0) {
386 r = uv__close(r);
387 if (r != 0)
388 abort();
389 r = -1;
390 }
391
392 if (req->cb != NULL)
393 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
394
395 clobber:
396 if (r < 0)
397 path[0] = '\0';
398 return r;
399 }
400
401
uv__fs_open(uv_fs_t * req)402 static ssize_t uv__fs_open(uv_fs_t* req) {
403 #ifdef O_CLOEXEC
404 return open(req->path, req->flags | O_CLOEXEC, req->mode);
405 #else /* O_CLOEXEC */
406 int r;
407
408 if (req->cb != NULL)
409 uv_rwlock_rdlock(&req->loop->cloexec_lock);
410
411 r = open(req->path, req->flags, req->mode);
412
413 /* In case of failure `uv__cloexec` will leave error in `errno`,
414 * so it is enough to just set `r` to `-1`.
415 */
416 if (r >= 0 && uv__cloexec(r, 1) != 0) {
417 r = uv__close(r);
418 if (r != 0)
419 abort();
420 r = -1;
421 }
422
423 if (req->cb != NULL)
424 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
425
426 return r;
427 #endif /* O_CLOEXEC */
428 }
429
430
431 #if !HAVE_PREADV
uv__fs_preadv(uv_file fd,uv_buf_t * bufs,unsigned int nbufs,off_t off)432 static ssize_t uv__fs_preadv(uv_file fd,
433 uv_buf_t* bufs,
434 unsigned int nbufs,
435 off_t off) {
436 uv_buf_t* buf;
437 uv_buf_t* end;
438 ssize_t result;
439 ssize_t rc;
440 size_t pos;
441
442 assert(nbufs > 0);
443
444 result = 0;
445 pos = 0;
446 buf = bufs + 0;
447 end = bufs + nbufs;
448
449 for (;;) {
450 do
451 rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
452 while (rc == -1 && errno == EINTR);
453
454 if (rc == 0)
455 break;
456
457 if (rc == -1 && result == 0)
458 return UV__ERR(errno);
459
460 if (rc == -1)
461 break; /* We read some data so return that, ignore the error. */
462
463 pos += rc;
464 result += rc;
465
466 if (pos < buf->len)
467 continue;
468
469 pos = 0;
470 buf += 1;
471
472 if (buf == end)
473 break;
474 }
475
476 return result;
477 }
478 #endif
479
480
uv__fs_read(uv_fs_t * req)481 static ssize_t uv__fs_read(uv_fs_t* req) {
482 #if defined(__linux__)
483 static int no_preadv;
484 #endif
485 unsigned int iovmax;
486 ssize_t result;
487
488 iovmax = uv__getiovmax();
489 if (req->nbufs > iovmax)
490 req->nbufs = iovmax;
491
492 if (req->off < 0) {
493 if (req->nbufs == 1)
494 result = read(req->file, req->bufs[0].base, req->bufs[0].len);
495 else
496 result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
497 } else {
498 if (req->nbufs == 1) {
499 result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
500 goto done;
501 }
502
503 #if HAVE_PREADV
504 result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
505 #else
506 # if defined(__linux__)
507 if (uv__load_relaxed(&no_preadv)) retry:
508 # endif
509 {
510 result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
511 }
512 # if defined(__linux__)
513 else {
514 result = uv__preadv(req->file,
515 (struct iovec*)req->bufs,
516 req->nbufs,
517 req->off);
518 if (result == -1 && errno == ENOSYS) {
519 uv__store_relaxed(&no_preadv, 1);
520 goto retry;
521 }
522 }
523 # endif
524 #endif
525 }
526
527 done:
528 /* Early cleanup of bufs allocation, since we're done with it. */
529 if (req->bufs != req->bufsml)
530 uv__free(req->bufs);
531
532 req->bufs = NULL;
533 req->nbufs = 0;
534
535 #ifdef __PASE__
536 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
537 if (result == -1 && errno == EOPNOTSUPP) {
538 struct stat buf;
539 ssize_t rc;
540 rc = fstat(req->file, &buf);
541 if (rc == 0 && S_ISDIR(buf.st_mode)) {
542 errno = EISDIR;
543 }
544 }
545 #endif
546
547 return result;
548 }
549
550
551 #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
552 #define UV_CONST_DIRENT uv__dirent_t
553 #else
554 #define UV_CONST_DIRENT const uv__dirent_t
555 #endif
556
557
uv__fs_scandir_filter(UV_CONST_DIRENT * dent)558 static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
559 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
560 }
561
562
uv__fs_scandir_sort(UV_CONST_DIRENT ** a,UV_CONST_DIRENT ** b)563 static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
564 return strcmp((*a)->d_name, (*b)->d_name);
565 }
566
567
uv__fs_scandir(uv_fs_t * req)568 static ssize_t uv__fs_scandir(uv_fs_t* req) {
569 uv__dirent_t** dents;
570 int n;
571
572 dents = NULL;
573 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
574
575 /* NOTE: We will use nbufs as an index field */
576 req->nbufs = 0;
577
578 if (n == 0) {
579 /* OS X still needs to deallocate some memory.
580 * Memory was allocated using the system allocator, so use free() here.
581 */
582 free(dents);
583 dents = NULL;
584 } else if (n == -1) {
585 return n;
586 }
587
588 req->ptr = dents;
589
590 return n;
591 }
592
uv__fs_opendir(uv_fs_t * req)593 static int uv__fs_opendir(uv_fs_t* req) {
594 uv_dir_t* dir;
595
596 dir = uv__malloc(sizeof(*dir));
597 if (dir == NULL)
598 goto error;
599
600 dir->dir = opendir(req->path);
601 if (dir->dir == NULL)
602 goto error;
603
604 req->ptr = dir;
605 return 0;
606
607 error:
608 uv__free(dir);
609 req->ptr = NULL;
610 return -1;
611 }
612
uv__fs_readdir(uv_fs_t * req)613 static int uv__fs_readdir(uv_fs_t* req) {
614 uv_dir_t* dir;
615 uv_dirent_t* dirent;
616 struct dirent* res;
617 unsigned int dirent_idx;
618 unsigned int i;
619
620 dir = req->ptr;
621 dirent_idx = 0;
622
623 while (dirent_idx < dir->nentries) {
624 /* readdir() returns NULL on end of directory, as well as on error. errno
625 is used to differentiate between the two conditions. */
626 errno = 0;
627 res = readdir(dir->dir);
628
629 if (res == NULL) {
630 if (errno != 0)
631 goto error;
632 break;
633 }
634
635 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
636 continue;
637
638 dirent = &dir->dirents[dirent_idx];
639 dirent->name = uv__strdup(res->d_name);
640
641 if (dirent->name == NULL)
642 goto error;
643
644 dirent->type = uv__fs_get_dirent_type(res);
645 ++dirent_idx;
646 }
647
648 return dirent_idx;
649
650 error:
651 for (i = 0; i < dirent_idx; ++i) {
652 uv__free((char*) dir->dirents[i].name);
653 dir->dirents[i].name = NULL;
654 }
655
656 return -1;
657 }
658
uv__fs_closedir(uv_fs_t * req)659 static int uv__fs_closedir(uv_fs_t* req) {
660 uv_dir_t* dir;
661
662 dir = req->ptr;
663
664 if (dir->dir != NULL) {
665 closedir(dir->dir);
666 dir->dir = NULL;
667 }
668
669 uv__free(req->ptr);
670 req->ptr = NULL;
671 return 0;
672 }
673
uv__fs_statfs(uv_fs_t * req)674 static int uv__fs_statfs(uv_fs_t* req) {
675 uv_statfs_t* stat_fs;
676 #if defined(__sun) || \
677 defined(__MVS__) || \
678 defined(__NetBSD__) || \
679 defined(__HAIKU__) || \
680 defined(__QNX__)
681 struct statvfs buf;
682
683 if (0 != statvfs(req->path, &buf))
684 #else
685 struct statfs buf;
686
687 if (0 != statfs(req->path, &buf))
688 #endif /* defined(__sun) */
689 return -1;
690
691 stat_fs = uv__malloc(sizeof(*stat_fs));
692 if (stat_fs == NULL) {
693 errno = ENOMEM;
694 return -1;
695 }
696
697 #if defined(__sun) || \
698 defined(__MVS__) || \
699 defined(__OpenBSD__) || \
700 defined(__NetBSD__) || \
701 defined(__HAIKU__) || \
702 defined(__QNX__)
703 stat_fs->f_type = 0; /* f_type is not supported. */
704 #else
705 stat_fs->f_type = buf.f_type;
706 #endif
707 stat_fs->f_bsize = buf.f_bsize;
708 stat_fs->f_blocks = buf.f_blocks;
709 stat_fs->f_bfree = buf.f_bfree;
710 stat_fs->f_bavail = buf.f_bavail;
711 stat_fs->f_files = buf.f_files;
712 stat_fs->f_ffree = buf.f_ffree;
713 req->ptr = stat_fs;
714 return 0;
715 }
716
uv__fs_pathmax_size(const char * path)717 static ssize_t uv__fs_pathmax_size(const char* path) {
718 ssize_t pathmax;
719
720 pathmax = pathconf(path, _PC_PATH_MAX);
721
722 if (pathmax == -1)
723 pathmax = UV__PATH_MAX;
724
725 return pathmax;
726 }
727
uv__fs_readlink(uv_fs_t * req)728 static ssize_t uv__fs_readlink(uv_fs_t* req) {
729 ssize_t maxlen;
730 ssize_t len;
731 char* buf;
732
733 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
734 maxlen = uv__fs_pathmax_size(req->path);
735 #else
736 /* We may not have a real PATH_MAX. Read size of link. */
737 struct stat st;
738 int ret;
739 ret = lstat(req->path, &st);
740 if (ret != 0)
741 return -1;
742 if (!S_ISLNK(st.st_mode)) {
743 errno = EINVAL;
744 return -1;
745 }
746
747 maxlen = st.st_size;
748
749 /* According to readlink(2) lstat can report st_size == 0
750 for some symlinks, such as those in /proc or /sys. */
751 if (maxlen == 0)
752 maxlen = uv__fs_pathmax_size(req->path);
753 #endif
754
755 buf = uv__malloc(maxlen);
756
757 if (buf == NULL) {
758 errno = ENOMEM;
759 return -1;
760 }
761
762 #if defined(__MVS__)
763 len = os390_readlink(req->path, buf, maxlen);
764 #else
765 len = readlink(req->path, buf, maxlen);
766 #endif
767
768 if (len == -1) {
769 uv__free(buf);
770 return -1;
771 }
772
773 /* Uncommon case: resize to make room for the trailing nul byte. */
774 if (len == maxlen) {
775 buf = uv__reallocf(buf, len + 1);
776
777 if (buf == NULL)
778 return -1;
779 }
780
781 buf[len] = '\0';
782 req->ptr = buf;
783
784 return 0;
785 }
786
uv__fs_realpath(uv_fs_t * req)787 static ssize_t uv__fs_realpath(uv_fs_t* req) {
788 char* buf;
789
790 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
791 buf = realpath(req->path, NULL);
792 if (buf == NULL)
793 return -1;
794 #else
795 ssize_t len;
796
797 len = uv__fs_pathmax_size(req->path);
798 buf = uv__malloc(len + 1);
799
800 if (buf == NULL) {
801 errno = ENOMEM;
802 return -1;
803 }
804
805 if (realpath(req->path, buf) == NULL) {
806 uv__free(buf);
807 return -1;
808 }
809 #endif
810
811 req->ptr = buf;
812
813 return 0;
814 }
815
uv__fs_sendfile_emul(uv_fs_t * req)816 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
817 struct pollfd pfd;
818 int use_pread;
819 off_t offset;
820 ssize_t nsent;
821 ssize_t nread;
822 ssize_t nwritten;
823 size_t buflen;
824 size_t len;
825 ssize_t n;
826 int in_fd;
827 int out_fd;
828 char buf[8192];
829
830 len = req->bufsml[0].len;
831 in_fd = req->flags;
832 out_fd = req->file;
833 offset = req->off;
834 use_pread = 1;
835
836 /* Here are the rules regarding errors:
837 *
838 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
839 * The user needs to know that some data has already been sent, to stop
840 * them from sending it twice.
841 *
842 * 2. Write errors are always reported. Write errors are bad because they
843 * mean data loss: we've read data but now we can't write it out.
844 *
845 * We try to use pread() and fall back to regular read() if the source fd
846 * doesn't support positional reads, for example when it's a pipe fd.
847 *
848 * If we get EAGAIN when writing to the target fd, we poll() on it until
849 * it becomes writable again.
850 *
851 * FIXME: If we get a write error when use_pread==1, it should be safe to
852 * return the number of sent bytes instead of an error because pread()
853 * is, in theory, idempotent. However, special files in /dev or /proc
854 * may support pread() but not necessarily return the same data on
855 * successive reads.
856 *
857 * FIXME: There is no way now to signal that we managed to send *some* data
858 * before a write error.
859 */
860 for (nsent = 0; (size_t) nsent < len; ) {
861 buflen = len - nsent;
862
863 if (buflen > sizeof(buf))
864 buflen = sizeof(buf);
865
866 do
867 if (use_pread)
868 nread = pread(in_fd, buf, buflen, offset);
869 else
870 nread = read(in_fd, buf, buflen);
871 while (nread == -1 && errno == EINTR);
872
873 if (nread == 0)
874 goto out;
875
876 if (nread == -1) {
877 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
878 use_pread = 0;
879 continue;
880 }
881
882 if (nsent == 0)
883 nsent = -1;
884
885 goto out;
886 }
887
888 for (nwritten = 0; nwritten < nread; ) {
889 do
890 n = write(out_fd, buf + nwritten, nread - nwritten);
891 while (n == -1 && errno == EINTR);
892
893 if (n != -1) {
894 nwritten += n;
895 continue;
896 }
897
898 if (errno != EAGAIN && errno != EWOULDBLOCK) {
899 nsent = -1;
900 goto out;
901 }
902
903 pfd.fd = out_fd;
904 pfd.events = POLLOUT;
905 pfd.revents = 0;
906
907 do
908 n = poll(&pfd, 1, -1);
909 while (n == -1 && errno == EINTR);
910
911 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
912 errno = EIO;
913 nsent = -1;
914 goto out;
915 }
916 }
917
918 offset += nread;
919 nsent += nread;
920 }
921
922 out:
923 if (nsent != -1)
924 req->off = offset;
925
926 return nsent;
927 }
928
929
930 #ifdef __linux__
uv__kernel_version(void)931 static unsigned uv__kernel_version(void) {
932 static unsigned cached_version;
933 struct utsname u;
934 unsigned version;
935 unsigned major;
936 unsigned minor;
937 unsigned patch;
938
939 version = uv__load_relaxed(&cached_version);
940 if (version != 0)
941 return version;
942
943 if (-1 == uname(&u))
944 return 0;
945
946 if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
947 return 0;
948
949 version = major * 65536 + minor * 256 + patch;
950 uv__store_relaxed(&cached_version, version);
951
952 return version;
953 }
954
955
956 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
957 * in copy_file_range() when it shouldn't. There is no workaround except to
958 * fall back to a regular copy.
959 */
uv__is_buggy_cephfs(int fd)960 static int uv__is_buggy_cephfs(int fd) {
961 struct statfs s;
962
963 if (-1 == fstatfs(fd, &s))
964 return 0;
965
966 if (s.f_type != /* CephFS */ 0xC36400)
967 return 0;
968
969 return uv__kernel_version() < /* 4.20.0 */ 0x041400;
970 }
971
972
uv__is_cifs_or_smb(int fd)973 static int uv__is_cifs_or_smb(int fd) {
974 struct statfs s;
975
976 if (-1 == fstatfs(fd, &s))
977 return 0;
978
979 switch ((unsigned) s.f_type) {
980 case 0x0000517Bu: /* SMB */
981 case 0xFE534D42u: /* SMB2 */
982 case 0xFF534D42u: /* CIFS */
983 return 1;
984 }
985
986 return 0;
987 }
988
989
uv__fs_try_copy_file_range(int in_fd,off_t * off,int out_fd,size_t len)990 static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
991 int out_fd, size_t len) {
992 static int no_copy_file_range_support;
993 ssize_t r;
994
995 if (uv__load_relaxed(&no_copy_file_range_support)) {
996 errno = ENOSYS;
997 return -1;
998 }
999
1000 r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
1001
1002 if (r != -1)
1003 return r;
1004
1005 switch (errno) {
1006 case EACCES:
1007 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
1008 * copy-from command when it shouldn't.
1009 */
1010 if (uv__is_buggy_cephfs(in_fd))
1011 errno = ENOSYS; /* Use fallback. */
1012 break;
1013 case ENOSYS:
1014 uv__store_relaxed(&no_copy_file_range_support, 1);
1015 break;
1016 case EPERM:
1017 /* It's been reported that CIFS spuriously fails.
1018 * Consider it a transient error.
1019 */
1020 if (uv__is_cifs_or_smb(out_fd))
1021 errno = ENOSYS; /* Use fallback. */
1022 break;
1023 case ENOTSUP:
1024 case EXDEV:
1025 /* ENOTSUP - it could work on another file system type.
1026 * EXDEV - it will not work when in_fd and out_fd are not on the same
1027 * mounted filesystem (pre Linux 5.3)
1028 */
1029 errno = ENOSYS; /* Use fallback. */
1030 break;
1031 }
1032
1033 return -1;
1034 }
1035
1036 #endif /* __linux__ */
1037
1038
uv__fs_sendfile(uv_fs_t * req)1039 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
1040 int in_fd;
1041 int out_fd;
1042
1043 in_fd = req->flags;
1044 out_fd = req->file;
1045
1046 #if defined(__linux__) || defined(__sun)
1047 {
1048 off_t off;
1049 ssize_t r;
1050 size_t len;
1051 int try_sendfile;
1052
1053 off = req->off;
1054 len = req->bufsml[0].len;
1055 try_sendfile = 1;
1056
1057 #ifdef __linux__
1058 r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
1059 try_sendfile = (r == -1 && errno == ENOSYS);
1060 #endif
1061
1062 if (try_sendfile)
1063 r = sendfile(out_fd, in_fd, &off, len);
1064
1065 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
1066 * it still writes out data. Fortunately, we can detect it by checking if
1067 * the offset has been updated.
1068 */
1069 if (r != -1 || off > req->off) {
1070 r = off - req->off;
1071 req->off = off;
1072 return r;
1073 }
1074
1075 if (errno == EINVAL ||
1076 errno == EIO ||
1077 errno == ENOTSOCK ||
1078 errno == EXDEV) {
1079 errno = 0;
1080 return uv__fs_sendfile_emul(req);
1081 }
1082
1083 return -1;
1084 }
1085 #elif defined(__APPLE__) || \
1086 defined(__DragonFly__) || \
1087 defined(__FreeBSD__) || \
1088 defined(__FreeBSD_kernel__)
1089 {
1090 off_t len;
1091 ssize_t r;
1092
1093 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1094 * non-blocking mode and not all data could be written. If a non-zero
1095 * number of bytes have been sent, we don't consider it an error.
1096 */
1097
1098 #if defined(__FreeBSD__) || defined(__DragonFly__)
1099 #if defined(__FreeBSD__)
1100 off_t off;
1101
1102 off = req->off;
1103 r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1104 if (r >= 0) {
1105 r = off - req->off;
1106 req->off = off;
1107 return r;
1108 }
1109 #endif
1110 len = 0;
1111 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1112 #elif defined(__FreeBSD_kernel__)
1113 len = 0;
1114 r = bsd_sendfile(in_fd,
1115 out_fd,
1116 req->off,
1117 req->bufsml[0].len,
1118 NULL,
1119 &len,
1120 0);
1121 #else
1122 /* The darwin sendfile takes len as an input for the length to send,
1123 * so make sure to initialize it with the caller's value. */
1124 len = req->bufsml[0].len;
1125 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1126 #endif
1127
1128 /*
1129 * The man page for sendfile(2) on DragonFly states that `len` contains
1130 * a meaningful value ONLY in case of EAGAIN and EINTR.
1131 * Nothing is said about it's value in case of other errors, so better
1132 * not depend on the potential wrong assumption that is was not modified
1133 * by the syscall.
1134 */
1135 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1136 req->off += len;
1137 return (ssize_t) len;
1138 }
1139
1140 if (errno == EINVAL ||
1141 errno == EIO ||
1142 errno == ENOTSOCK ||
1143 errno == EXDEV) {
1144 errno = 0;
1145 return uv__fs_sendfile_emul(req);
1146 }
1147
1148 return -1;
1149 }
1150 #else
1151 /* Squelch compiler warnings. */
1152 (void) &in_fd;
1153 (void) &out_fd;
1154
1155 return uv__fs_sendfile_emul(req);
1156 #endif
1157 }
1158
1159
uv__fs_utime(uv_fs_t * req)1160 static ssize_t uv__fs_utime(uv_fs_t* req) {
1161 #if defined(__linux__) \
1162 || defined(_AIX71) \
1163 || defined(__sun) \
1164 || defined(__HAIKU__)
1165 struct timespec ts[2];
1166 ts[0] = uv__fs_to_timespec(req->atime);
1167 ts[1] = uv__fs_to_timespec(req->mtime);
1168 return utimensat(AT_FDCWD, req->path, ts, 0);
1169 #elif defined(__APPLE__) \
1170 || defined(__DragonFly__) \
1171 || defined(__FreeBSD__) \
1172 || defined(__FreeBSD_kernel__) \
1173 || defined(__NetBSD__) \
1174 || defined(__OpenBSD__)
1175 struct timeval tv[2];
1176 tv[0] = uv__fs_to_timeval(req->atime);
1177 tv[1] = uv__fs_to_timeval(req->mtime);
1178 return utimes(req->path, tv);
1179 #elif defined(_AIX) \
1180 && !defined(_AIX71)
1181 struct utimbuf buf;
1182 buf.actime = req->atime;
1183 buf.modtime = req->mtime;
1184 return utime(req->path, &buf);
1185 #elif defined(__MVS__)
1186 attrib_t atr;
1187 memset(&atr, 0, sizeof(atr));
1188 atr.att_mtimechg = 1;
1189 atr.att_atimechg = 1;
1190 atr.att_mtime = req->mtime;
1191 atr.att_atime = req->atime;
1192 return __lchattr((char*) req->path, &atr, sizeof(atr));
1193 #else
1194 errno = ENOSYS;
1195 return -1;
1196 #endif
1197 }
1198
1199
uv__fs_lutime(uv_fs_t * req)1200 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1201 #if defined(__linux__) || \
1202 defined(_AIX71) || \
1203 defined(__sun) || \
1204 defined(__HAIKU__) || \
1205 defined(__GNU__)
1206 struct timespec ts[2];
1207 ts[0] = uv__fs_to_timespec(req->atime);
1208 ts[1] = uv__fs_to_timespec(req->mtime);
1209 return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1210 #elif defined(__APPLE__) || \
1211 defined(__DragonFly__) || \
1212 defined(__FreeBSD__) || \
1213 defined(__FreeBSD_kernel__) || \
1214 defined(__NetBSD__)
1215 struct timeval tv[2];
1216 tv[0] = uv__fs_to_timeval(req->atime);
1217 tv[1] = uv__fs_to_timeval(req->mtime);
1218 return lutimes(req->path, tv);
1219 #else
1220 errno = ENOSYS;
1221 return -1;
1222 #endif
1223 }
1224
1225
uv__fs_write(uv_fs_t * req)1226 static ssize_t uv__fs_write(uv_fs_t* req) {
1227 #if defined(__linux__)
1228 static int no_pwritev;
1229 #endif
1230 ssize_t r;
1231
1232 /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
1233 * data loss. We can't use a per-file descriptor lock, the descriptor may be
1234 * a dup().
1235 */
1236 #if defined(__APPLE__)
1237 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
1238
1239 if (pthread_mutex_lock(&lock))
1240 abort();
1241 #endif
1242
1243 if (req->off < 0) {
1244 if (req->nbufs == 1)
1245 r = write(req->file, req->bufs[0].base, req->bufs[0].len);
1246 else
1247 r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
1248 } else {
1249 if (req->nbufs == 1) {
1250 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1251 goto done;
1252 }
1253 #if HAVE_PREADV
1254 r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
1255 #else
1256 # if defined(__linux__)
1257 if (no_pwritev) retry:
1258 # endif
1259 {
1260 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1261 }
1262 # if defined(__linux__)
1263 else {
1264 r = uv__pwritev(req->file,
1265 (struct iovec*) req->bufs,
1266 req->nbufs,
1267 req->off);
1268 if (r == -1 && errno == ENOSYS) {
1269 no_pwritev = 1;
1270 goto retry;
1271 }
1272 }
1273 # endif
1274 #endif
1275 }
1276
1277 done:
1278 #if defined(__APPLE__)
1279 if (pthread_mutex_unlock(&lock))
1280 abort();
1281 #endif
1282
1283 return r;
1284 }
1285
uv__fs_copyfile(uv_fs_t * req)1286 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1287 uv_fs_t fs_req;
1288 uv_file srcfd;
1289 uv_file dstfd;
1290 struct stat src_statsbuf;
1291 struct stat dst_statsbuf;
1292 int dst_flags;
1293 int result;
1294 int err;
1295 off_t bytes_to_send;
1296 off_t in_offset;
1297 off_t bytes_written;
1298 size_t bytes_chunk;
1299
1300 dstfd = -1;
1301 err = 0;
1302
1303 /* Open the source file. */
1304 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1305 uv_fs_req_cleanup(&fs_req);
1306
1307 if (srcfd < 0)
1308 return srcfd;
1309
1310 /* Get the source file's mode. */
1311 if (fstat(srcfd, &src_statsbuf)) {
1312 err = UV__ERR(errno);
1313 goto out;
1314 }
1315
1316 dst_flags = O_WRONLY | O_CREAT;
1317
1318 if (req->flags & UV_FS_COPYFILE_EXCL)
1319 dst_flags |= O_EXCL;
1320
1321 /* Open the destination file. */
1322 dstfd = uv_fs_open(NULL,
1323 &fs_req,
1324 req->new_path,
1325 dst_flags,
1326 src_statsbuf.st_mode,
1327 NULL);
1328 uv_fs_req_cleanup(&fs_req);
1329
1330 if (dstfd < 0) {
1331 err = dstfd;
1332 goto out;
1333 }
1334
1335 /* If the file is not being opened exclusively, verify that the source and
1336 destination are not the same file. If they are the same, bail out early. */
1337 if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1338 /* Get the destination file's mode. */
1339 if (fstat(dstfd, &dst_statsbuf)) {
1340 err = UV__ERR(errno);
1341 goto out;
1342 }
1343
1344 /* Check if srcfd and dstfd refer to the same file */
1345 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1346 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1347 goto out;
1348 }
1349
1350 /* Truncate the file in case the destination already existed. */
1351 if (ftruncate(dstfd, 0) != 0) {
1352 err = UV__ERR(errno);
1353 goto out;
1354 }
1355 }
1356
1357 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1358 err = UV__ERR(errno);
1359 #ifdef __linux__
1360 /* fchmod() on CIFS shares always fails with EPERM unless the share is
1361 * mounted with "noperm". As fchmod() is a meaningless operation on such
1362 * shares anyway, detect that condition and squelch the error.
1363 */
1364 if (err != UV_EPERM)
1365 goto out;
1366
1367 if (!uv__is_cifs_or_smb(dstfd))
1368 goto out;
1369
1370 err = 0;
1371 #else /* !__linux__ */
1372 goto out;
1373 #endif /* !__linux__ */
1374 }
1375
1376 #ifdef FICLONE
1377 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1378 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1379 if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1380 /* ioctl() with FICLONE succeeded. */
1381 goto out;
1382 }
1383 /* If an error occurred and force was set, return the error to the caller;
1384 * fall back to sendfile() when force was not set. */
1385 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1386 err = UV__ERR(errno);
1387 goto out;
1388 }
1389 }
1390 #else
1391 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1392 err = UV_ENOSYS;
1393 goto out;
1394 }
1395 #endif
1396
1397 bytes_to_send = src_statsbuf.st_size;
1398 in_offset = 0;
1399 while (bytes_to_send != 0) {
1400 bytes_chunk = SSIZE_MAX;
1401 if (bytes_to_send < (off_t) bytes_chunk)
1402 bytes_chunk = bytes_to_send;
1403 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1404 bytes_written = fs_req.result;
1405 uv_fs_req_cleanup(&fs_req);
1406
1407 if (bytes_written < 0) {
1408 err = bytes_written;
1409 break;
1410 }
1411
1412 bytes_to_send -= bytes_written;
1413 in_offset += bytes_written;
1414 }
1415
1416 out:
1417 if (err < 0)
1418 result = err;
1419 else
1420 result = 0;
1421
1422 /* Close the source file. */
1423 err = uv__close_nocheckstdio(srcfd);
1424
1425 /* Don't overwrite any existing errors. */
1426 if (err != 0 && result == 0)
1427 result = err;
1428
1429 /* Close the destination file if it is open. */
1430 if (dstfd >= 0) {
1431 err = uv__close_nocheckstdio(dstfd);
1432
1433 /* Don't overwrite any existing errors. */
1434 if (err != 0 && result == 0)
1435 result = err;
1436
1437 /* Remove the destination file if something went wrong. */
1438 if (result != 0) {
1439 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1440 /* Ignore the unlink return value, as an error already happened. */
1441 uv_fs_req_cleanup(&fs_req);
1442 }
1443 }
1444
1445 if (result == 0)
1446 return 0;
1447
1448 errno = UV__ERR(result);
1449 return -1;
1450 }
1451
uv__to_stat(struct stat * src,uv_stat_t * dst)1452 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1453 dst->st_dev = src->st_dev;
1454 dst->st_mode = src->st_mode;
1455 dst->st_nlink = src->st_nlink;
1456 dst->st_uid = src->st_uid;
1457 dst->st_gid = src->st_gid;
1458 dst->st_rdev = src->st_rdev;
1459 dst->st_ino = src->st_ino;
1460 dst->st_size = src->st_size;
1461 dst->st_blksize = src->st_blksize;
1462 dst->st_blocks = src->st_blocks;
1463
1464 #if defined(__APPLE__)
1465 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1466 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1467 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1468 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1469 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1470 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1471 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1472 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1473 dst->st_flags = src->st_flags;
1474 dst->st_gen = src->st_gen;
1475 #elif defined(__ANDROID__)
1476 dst->st_atim.tv_sec = src->st_atime;
1477 dst->st_atim.tv_nsec = src->st_atimensec;
1478 dst->st_mtim.tv_sec = src->st_mtime;
1479 dst->st_mtim.tv_nsec = src->st_mtimensec;
1480 dst->st_ctim.tv_sec = src->st_ctime;
1481 dst->st_ctim.tv_nsec = src->st_ctimensec;
1482 dst->st_birthtim.tv_sec = src->st_ctime;
1483 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1484 dst->st_flags = 0;
1485 dst->st_gen = 0;
1486 #elif !defined(_AIX) && \
1487 !defined(__MVS__) && ( \
1488 defined(__DragonFly__) || \
1489 defined(__FreeBSD__) || \
1490 defined(__OpenBSD__) || \
1491 defined(__NetBSD__) || \
1492 defined(_GNU_SOURCE) || \
1493 defined(_BSD_SOURCE) || \
1494 defined(_SVID_SOURCE) || \
1495 defined(_XOPEN_SOURCE) || \
1496 defined(_DEFAULT_SOURCE))
1497 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1498 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1499 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1500 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1501 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1502 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1503 # if defined(__FreeBSD__) || \
1504 defined(__NetBSD__)
1505 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1506 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1507 dst->st_flags = src->st_flags;
1508 dst->st_gen = src->st_gen;
1509 # else
1510 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1511 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1512 dst->st_flags = 0;
1513 dst->st_gen = 0;
1514 # endif
1515 #else
1516 dst->st_atim.tv_sec = src->st_atime;
1517 dst->st_atim.tv_nsec = 0;
1518 dst->st_mtim.tv_sec = src->st_mtime;
1519 dst->st_mtim.tv_nsec = 0;
1520 dst->st_ctim.tv_sec = src->st_ctime;
1521 dst->st_ctim.tv_nsec = 0;
1522 dst->st_birthtim.tv_sec = src->st_ctime;
1523 dst->st_birthtim.tv_nsec = 0;
1524 dst->st_flags = 0;
1525 dst->st_gen = 0;
1526 #endif
1527 }
1528
1529
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1530 static int uv__fs_statx(int fd,
1531 const char* path,
1532 int is_fstat,
1533 int is_lstat,
1534 uv_stat_t* buf) {
1535 STATIC_ASSERT(UV_ENOSYS != -1);
1536 #ifdef __linux__
1537 static int no_statx;
1538 struct uv__statx statxbuf;
1539 int dirfd;
1540 int flags;
1541 int mode;
1542 int rc;
1543
1544 if (uv__load_relaxed(&no_statx))
1545 return UV_ENOSYS;
1546
1547 dirfd = AT_FDCWD;
1548 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1549 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1550
1551 if (is_fstat) {
1552 dirfd = fd;
1553 flags |= 0x1000; /* AT_EMPTY_PATH */
1554 }
1555
1556 if (is_lstat)
1557 flags |= AT_SYMLINK_NOFOLLOW;
1558
1559 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1560
1561 switch (rc) {
1562 case 0:
1563 break;
1564 case -1:
1565 /* EPERM happens when a seccomp filter rejects the system call.
1566 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1567 * EOPNOTSUPP is used on DVS exported filesystems
1568 */
1569 if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1570 return -1;
1571 /* Fall through. */
1572 default:
1573 /* Normally on success, zero is returned and On error, -1 is returned.
1574 * Observed on S390 RHEL running in a docker container with statx not
1575 * implemented, rc might return 1 with 0 set as the error code in which
1576 * case we return ENOSYS.
1577 */
1578 uv__store_relaxed(&no_statx, 1);
1579 return UV_ENOSYS;
1580 }
1581
1582 buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
1583 buf->st_mode = statxbuf.stx_mode;
1584 buf->st_nlink = statxbuf.stx_nlink;
1585 buf->st_uid = statxbuf.stx_uid;
1586 buf->st_gid = statxbuf.stx_gid;
1587 buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
1588 buf->st_ino = statxbuf.stx_ino;
1589 buf->st_size = statxbuf.stx_size;
1590 buf->st_blksize = statxbuf.stx_blksize;
1591 buf->st_blocks = statxbuf.stx_blocks;
1592 buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
1593 buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
1594 buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
1595 buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
1596 buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
1597 buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
1598 buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
1599 buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
1600 buf->st_flags = 0;
1601 buf->st_gen = 0;
1602
1603 return 0;
1604 #else
1605 return UV_ENOSYS;
1606 #endif /* __linux__ */
1607 }
1608
1609
uv__fs_stat(const char * path,uv_stat_t * buf)1610 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1611 struct stat pbuf;
1612 int ret;
1613
1614 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1615 if (ret != UV_ENOSYS)
1616 return ret;
1617
1618 ret = stat(path, &pbuf);
1619 if (ret == 0)
1620 uv__to_stat(&pbuf, buf);
1621
1622 return ret;
1623 }
1624
1625
uv__fs_lstat(const char * path,uv_stat_t * buf)1626 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1627 struct stat pbuf;
1628 int ret;
1629
1630 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1631 if (ret != UV_ENOSYS)
1632 return ret;
1633
1634 ret = lstat(path, &pbuf);
1635 if (ret == 0)
1636 uv__to_stat(&pbuf, buf);
1637
1638 return ret;
1639 }
1640
1641
uv__fs_fstat(int fd,uv_stat_t * buf)1642 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1643 struct stat pbuf;
1644 int ret;
1645
1646 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1647 if (ret != UV_ENOSYS)
1648 return ret;
1649
1650 ret = fstat(fd, &pbuf);
1651 if (ret == 0)
1652 uv__to_stat(&pbuf, buf);
1653
1654 return ret;
1655 }
1656
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1657 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1658 size_t offset;
1659 /* Figure out which bufs are done */
1660 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1661 size -= bufs[offset].len;
1662
1663 /* Fix a partial read/write */
1664 if (size > 0) {
1665 bufs[offset].base += size;
1666 bufs[offset].len -= size;
1667 }
1668 return offset;
1669 }
1670
uv__fs_write_all(uv_fs_t * req)1671 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1672 unsigned int iovmax;
1673 unsigned int nbufs;
1674 uv_buf_t* bufs;
1675 ssize_t total;
1676 ssize_t result;
1677
1678 iovmax = uv__getiovmax();
1679 nbufs = req->nbufs;
1680 bufs = req->bufs;
1681 total = 0;
1682
1683 while (nbufs > 0) {
1684 req->nbufs = nbufs;
1685 if (req->nbufs > iovmax)
1686 req->nbufs = iovmax;
1687
1688 do
1689 result = uv__fs_write(req);
1690 while (result < 0 && errno == EINTR);
1691
1692 if (result <= 0) {
1693 if (total == 0)
1694 total = result;
1695 break;
1696 }
1697
1698 if (req->off >= 0)
1699 req->off += result;
1700
1701 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1702 req->bufs += req->nbufs;
1703 nbufs -= req->nbufs;
1704 total += result;
1705 }
1706
1707 if (bufs != req->bufsml)
1708 uv__free(bufs);
1709
1710 req->bufs = NULL;
1711 req->nbufs = 0;
1712
1713 return total;
1714 }
1715
1716
uv__fs_work(struct uv__work * w)1717 static void uv__fs_work(struct uv__work* w) {
1718 int retry_on_eintr;
1719 uv_fs_t* req;
1720 ssize_t r;
1721
1722 req = container_of(w, uv_fs_t, work_req);
1723 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1724 req->fs_type == UV_FS_READ);
1725
1726 do {
1727 errno = 0;
1728
1729 #define X(type, action) \
1730 case UV_FS_ ## type: \
1731 r = action; \
1732 break;
1733
1734 switch (req->fs_type) {
1735 X(ACCESS, access(req->path, req->flags));
1736 X(CHMOD, chmod(req->path, req->mode));
1737 X(CHOWN, chown(req->path, req->uid, req->gid));
1738 X(CLOSE, uv__fs_close(req->file));
1739 X(COPYFILE, uv__fs_copyfile(req));
1740 X(FCHMOD, fchmod(req->file, req->mode));
1741 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1742 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1743 X(FDATASYNC, uv__fs_fdatasync(req));
1744 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1745 X(FSYNC, uv__fs_fsync(req));
1746 X(FTRUNCATE, ftruncate(req->file, req->off));
1747 X(FUTIME, uv__fs_futime(req));
1748 X(LUTIME, uv__fs_lutime(req));
1749 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1750 X(LINK, link(req->path, req->new_path));
1751 X(MKDIR, mkdir(req->path, req->mode));
1752 X(MKDTEMP, uv__fs_mkdtemp(req));
1753 X(MKSTEMP, uv__fs_mkstemp(req));
1754 X(OPEN, uv__fs_open(req));
1755 X(READ, uv__fs_read(req));
1756 X(SCANDIR, uv__fs_scandir(req));
1757 X(OPENDIR, uv__fs_opendir(req));
1758 X(READDIR, uv__fs_readdir(req));
1759 X(CLOSEDIR, uv__fs_closedir(req));
1760 X(READLINK, uv__fs_readlink(req));
1761 X(REALPATH, uv__fs_realpath(req));
1762 X(RENAME, rename(req->path, req->new_path));
1763 X(RMDIR, rmdir(req->path));
1764 X(SENDFILE, uv__fs_sendfile(req));
1765 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1766 X(STATFS, uv__fs_statfs(req));
1767 X(SYMLINK, symlink(req->path, req->new_path));
1768 X(UNLINK, unlink(req->path));
1769 X(UTIME, uv__fs_utime(req));
1770 X(WRITE, uv__fs_write_all(req));
1771 default: abort();
1772 }
1773 #undef X
1774 } while (r == -1 && errno == EINTR && retry_on_eintr);
1775
1776 if (r == -1)
1777 req->result = UV__ERR(errno);
1778 else
1779 req->result = r;
1780
1781 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1782 req->fs_type == UV_FS_FSTAT ||
1783 req->fs_type == UV_FS_LSTAT)) {
1784 req->ptr = &req->statbuf;
1785 }
1786 }
1787
1788
uv__fs_done(struct uv__work * w,int status)1789 static void uv__fs_done(struct uv__work* w, int status) {
1790 uv_fs_t* req;
1791
1792 req = container_of(w, uv_fs_t, work_req);
1793 uv__req_unregister(req->loop, req);
1794
1795 if (status == UV_ECANCELED) {
1796 assert(req->result == 0);
1797 req->result = UV_ECANCELED;
1798 }
1799
1800 req->cb(req);
1801 }
1802
1803
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1804 int uv_fs_access(uv_loop_t* loop,
1805 uv_fs_t* req,
1806 const char* path,
1807 int flags,
1808 uv_fs_cb cb) {
1809 INIT(ACCESS);
1810 PATH;
1811 req->flags = flags;
1812 POST;
1813 }
1814
1815
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1816 int uv_fs_chmod(uv_loop_t* loop,
1817 uv_fs_t* req,
1818 const char* path,
1819 int mode,
1820 uv_fs_cb cb) {
1821 INIT(CHMOD);
1822 PATH;
1823 req->mode = mode;
1824 POST;
1825 }
1826
1827
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1828 int uv_fs_chown(uv_loop_t* loop,
1829 uv_fs_t* req,
1830 const char* path,
1831 uv_uid_t uid,
1832 uv_gid_t gid,
1833 uv_fs_cb cb) {
1834 INIT(CHOWN);
1835 PATH;
1836 req->uid = uid;
1837 req->gid = gid;
1838 POST;
1839 }
1840
1841
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1842 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1843 INIT(CLOSE);
1844 req->file = file;
1845 POST;
1846 }
1847
1848
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1849 int uv_fs_fchmod(uv_loop_t* loop,
1850 uv_fs_t* req,
1851 uv_file file,
1852 int mode,
1853 uv_fs_cb cb) {
1854 INIT(FCHMOD);
1855 req->file = file;
1856 req->mode = mode;
1857 POST;
1858 }
1859
1860
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1861 int uv_fs_fchown(uv_loop_t* loop,
1862 uv_fs_t* req,
1863 uv_file file,
1864 uv_uid_t uid,
1865 uv_gid_t gid,
1866 uv_fs_cb cb) {
1867 INIT(FCHOWN);
1868 req->file = file;
1869 req->uid = uid;
1870 req->gid = gid;
1871 POST;
1872 }
1873
1874
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1875 int uv_fs_lchown(uv_loop_t* loop,
1876 uv_fs_t* req,
1877 const char* path,
1878 uv_uid_t uid,
1879 uv_gid_t gid,
1880 uv_fs_cb cb) {
1881 INIT(LCHOWN);
1882 PATH;
1883 req->uid = uid;
1884 req->gid = gid;
1885 POST;
1886 }
1887
1888
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1889 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1890 INIT(FDATASYNC);
1891 req->file = file;
1892 POST;
1893 }
1894
1895
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1896 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1897 INIT(FSTAT);
1898 req->file = file;
1899 POST;
1900 }
1901
1902
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1903 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1904 INIT(FSYNC);
1905 req->file = file;
1906 POST;
1907 }
1908
1909
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1910 int uv_fs_ftruncate(uv_loop_t* loop,
1911 uv_fs_t* req,
1912 uv_file file,
1913 int64_t off,
1914 uv_fs_cb cb) {
1915 INIT(FTRUNCATE);
1916 req->file = file;
1917 req->off = off;
1918 POST;
1919 }
1920
1921
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1922 int uv_fs_futime(uv_loop_t* loop,
1923 uv_fs_t* req,
1924 uv_file file,
1925 double atime,
1926 double mtime,
1927 uv_fs_cb cb) {
1928 INIT(FUTIME);
1929 req->file = file;
1930 req->atime = atime;
1931 req->mtime = mtime;
1932 POST;
1933 }
1934
uv_fs_lutime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1935 int uv_fs_lutime(uv_loop_t* loop,
1936 uv_fs_t* req,
1937 const char* path,
1938 double atime,
1939 double mtime,
1940 uv_fs_cb cb) {
1941 INIT(LUTIME);
1942 PATH;
1943 req->atime = atime;
1944 req->mtime = mtime;
1945 POST;
1946 }
1947
1948
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1949 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1950 INIT(LSTAT);
1951 PATH;
1952 POST;
1953 }
1954
1955
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1956 int uv_fs_link(uv_loop_t* loop,
1957 uv_fs_t* req,
1958 const char* path,
1959 const char* new_path,
1960 uv_fs_cb cb) {
1961 INIT(LINK);
1962 PATH2;
1963 POST;
1964 }
1965
1966
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1967 int uv_fs_mkdir(uv_loop_t* loop,
1968 uv_fs_t* req,
1969 const char* path,
1970 int mode,
1971 uv_fs_cb cb) {
1972 INIT(MKDIR);
1973 PATH;
1974 req->mode = mode;
1975 POST;
1976 }
1977
1978
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1979 int uv_fs_mkdtemp(uv_loop_t* loop,
1980 uv_fs_t* req,
1981 const char* tpl,
1982 uv_fs_cb cb) {
1983 INIT(MKDTEMP);
1984 req->path = uv__strdup(tpl);
1985 if (req->path == NULL)
1986 return UV_ENOMEM;
1987 POST;
1988 }
1989
1990
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1991 int uv_fs_mkstemp(uv_loop_t* loop,
1992 uv_fs_t* req,
1993 const char* tpl,
1994 uv_fs_cb cb) {
1995 INIT(MKSTEMP);
1996 req->path = uv__strdup(tpl);
1997 if (req->path == NULL)
1998 return UV_ENOMEM;
1999 POST;
2000 }
2001
2002
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)2003 int uv_fs_open(uv_loop_t* loop,
2004 uv_fs_t* req,
2005 const char* path,
2006 int flags,
2007 int mode,
2008 uv_fs_cb cb) {
2009 INIT(OPEN);
2010 PATH;
2011 req->flags = flags;
2012 req->mode = mode;
2013 POST;
2014 }
2015
2016
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2017 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
2018 uv_file file,
2019 const uv_buf_t bufs[],
2020 unsigned int nbufs,
2021 int64_t off,
2022 uv_fs_cb cb) {
2023 INIT(READ);
2024
2025 if (bufs == NULL || nbufs == 0)
2026 return UV_EINVAL;
2027
2028 req->file = file;
2029
2030 req->nbufs = nbufs;
2031 req->bufs = req->bufsml;
2032 if (nbufs > ARRAY_SIZE(req->bufsml))
2033 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2034
2035 if (req->bufs == NULL)
2036 return UV_ENOMEM;
2037
2038 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2039
2040 req->off = off;
2041 POST;
2042 }
2043
2044
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)2045 int uv_fs_scandir(uv_loop_t* loop,
2046 uv_fs_t* req,
2047 const char* path,
2048 int flags,
2049 uv_fs_cb cb) {
2050 INIT(SCANDIR);
2051 PATH;
2052 req->flags = flags;
2053 POST;
2054 }
2055
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2056 int uv_fs_opendir(uv_loop_t* loop,
2057 uv_fs_t* req,
2058 const char* path,
2059 uv_fs_cb cb) {
2060 INIT(OPENDIR);
2061 PATH;
2062 POST;
2063 }
2064
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2065 int uv_fs_readdir(uv_loop_t* loop,
2066 uv_fs_t* req,
2067 uv_dir_t* dir,
2068 uv_fs_cb cb) {
2069 INIT(READDIR);
2070
2071 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
2072 return UV_EINVAL;
2073
2074 req->ptr = dir;
2075 POST;
2076 }
2077
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2078 int uv_fs_closedir(uv_loop_t* loop,
2079 uv_fs_t* req,
2080 uv_dir_t* dir,
2081 uv_fs_cb cb) {
2082 INIT(CLOSEDIR);
2083
2084 if (dir == NULL)
2085 return UV_EINVAL;
2086
2087 req->ptr = dir;
2088 POST;
2089 }
2090
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2091 int uv_fs_readlink(uv_loop_t* loop,
2092 uv_fs_t* req,
2093 const char* path,
2094 uv_fs_cb cb) {
2095 INIT(READLINK);
2096 PATH;
2097 POST;
2098 }
2099
2100
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2101 int uv_fs_realpath(uv_loop_t* loop,
2102 uv_fs_t* req,
2103 const char * path,
2104 uv_fs_cb cb) {
2105 INIT(REALPATH);
2106 PATH;
2107 POST;
2108 }
2109
2110
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)2111 int uv_fs_rename(uv_loop_t* loop,
2112 uv_fs_t* req,
2113 const char* path,
2114 const char* new_path,
2115 uv_fs_cb cb) {
2116 INIT(RENAME);
2117 PATH2;
2118 POST;
2119 }
2120
2121
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2122 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2123 INIT(RMDIR);
2124 PATH;
2125 POST;
2126 }
2127
2128
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)2129 int uv_fs_sendfile(uv_loop_t* loop,
2130 uv_fs_t* req,
2131 uv_file out_fd,
2132 uv_file in_fd,
2133 int64_t off,
2134 size_t len,
2135 uv_fs_cb cb) {
2136 INIT(SENDFILE);
2137 req->flags = in_fd; /* hack */
2138 req->file = out_fd;
2139 req->off = off;
2140 req->bufsml[0].len = len;
2141 POST;
2142 }
2143
2144
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2145 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2146 INIT(STAT);
2147 PATH;
2148 POST;
2149 }
2150
2151
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2152 int uv_fs_symlink(uv_loop_t* loop,
2153 uv_fs_t* req,
2154 const char* path,
2155 const char* new_path,
2156 int flags,
2157 uv_fs_cb cb) {
2158 INIT(SYMLINK);
2159 PATH2;
2160 req->flags = flags;
2161 POST;
2162 }
2163
2164
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2165 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2166 INIT(UNLINK);
2167 PATH;
2168 POST;
2169 }
2170
2171
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)2172 int uv_fs_utime(uv_loop_t* loop,
2173 uv_fs_t* req,
2174 const char* path,
2175 double atime,
2176 double mtime,
2177 uv_fs_cb cb) {
2178 INIT(UTIME);
2179 PATH;
2180 req->atime = atime;
2181 req->mtime = mtime;
2182 POST;
2183 }
2184
2185
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2186 int uv_fs_write(uv_loop_t* loop,
2187 uv_fs_t* req,
2188 uv_file file,
2189 const uv_buf_t bufs[],
2190 unsigned int nbufs,
2191 int64_t off,
2192 uv_fs_cb cb) {
2193 INIT(WRITE);
2194
2195 if (bufs == NULL || nbufs == 0)
2196 return UV_EINVAL;
2197
2198 req->file = file;
2199
2200 req->nbufs = nbufs;
2201 req->bufs = req->bufsml;
2202 if (nbufs > ARRAY_SIZE(req->bufsml))
2203 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2204
2205 if (req->bufs == NULL)
2206 return UV_ENOMEM;
2207
2208 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2209
2210 req->off = off;
2211 POST;
2212 }
2213
2214
uv_fs_req_cleanup(uv_fs_t * req)2215 void uv_fs_req_cleanup(uv_fs_t* req) {
2216 if (req == NULL)
2217 return;
2218
2219 /* Only necessary for asychronous requests, i.e., requests with a callback.
2220 * Synchronous ones don't copy their arguments and have req->path and
2221 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
2222 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2223 */
2224 if (req->path != NULL &&
2225 (req->cb != NULL ||
2226 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2227 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
2228
2229 req->path = NULL;
2230 req->new_path = NULL;
2231
2232 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2233 uv__fs_readdir_cleanup(req);
2234
2235 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2236 uv__fs_scandir_cleanup(req);
2237
2238 if (req->bufs != req->bufsml)
2239 uv__free(req->bufs);
2240 req->bufs = NULL;
2241
2242 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2243 uv__free(req->ptr);
2244 req->ptr = NULL;
2245 }
2246
2247
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2248 int uv_fs_copyfile(uv_loop_t* loop,
2249 uv_fs_t* req,
2250 const char* path,
2251 const char* new_path,
2252 int flags,
2253 uv_fs_cb cb) {
2254 INIT(COPYFILE);
2255
2256 if (flags & ~(UV_FS_COPYFILE_EXCL |
2257 UV_FS_COPYFILE_FICLONE |
2258 UV_FS_COPYFILE_FICLONE_FORCE)) {
2259 return UV_EINVAL;
2260 }
2261
2262 PATH2;
2263 req->flags = flags;
2264 POST;
2265 }
2266
2267
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2268 int uv_fs_statfs(uv_loop_t* loop,
2269 uv_fs_t* req,
2270 const char* path,
2271 uv_fs_cb cb) {
2272 INIT(STATFS);
2273 PATH;
2274 POST;
2275 }
2276
uv_fs_get_system_error(const uv_fs_t * req)2277 int uv_fs_get_system_error(const uv_fs_t* req) {
2278 return -req->result;
2279 }
2280