1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29 #include "uv.h"
30 #include "internal.h"
31
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <limits.h> /* PATH_MAX */
38
39 #include <sys/types.h>
40 #include <sys/socket.h>
41 #include <sys/stat.h>
42 #include <sys/time.h>
43 #include <sys/uio.h>
44 #include <unistd.h>
45 #include <fcntl.h>
46 #include <poll.h>
47
48 #if defined(__linux__)
49 # include <sys/sendfile.h>
50 #endif
51
52 #if defined(__sun)
53 # include <sys/sendfile.h>
54 # include <sys/sysmacros.h>
55 #endif
56
57 #if defined(__APPLE__)
58 # include <sys/sysctl.h>
59 #elif defined(__linux__) && !defined(FICLONE)
60 # include <sys/ioctl.h>
61 # define FICLONE _IOW(0x94, 9, int)
62 #endif
63
64 #if defined(_AIX) && !defined(_AIX71)
65 # include <utime.h>
66 #endif
67
68 #if defined(__APPLE__) || \
69 defined(__DragonFly__) || \
70 defined(__FreeBSD__) || \
71 defined(__OpenBSD__) || \
72 defined(__NetBSD__)
73 # include <sys/param.h>
74 # include <sys/mount.h>
75 #elif defined(__sun) || \
76 defined(__MVS__) || \
77 defined(__NetBSD__) || \
78 defined(__HAIKU__) || \
79 defined(__QNX__)
80 # include <sys/statvfs.h>
81 #else
82 # include <sys/statfs.h>
83 #endif
84
85 #if defined(__CYGWIN__) || \
86 (defined(__HAIKU__) && B_HAIKU_VERSION < B_HAIKU_VERSION_1_PRE_BETA_5) || \
87 (defined(__sun) && !defined(__illumos__)) || \
88 (defined(__APPLE__) && !TARGET_OS_IPHONE && \
89 MAC_OS_X_VERSION_MIN_REQUIRED < 110000)
90 #define preadv(fd, bufs, nbufs, off) \
91 pread(fd, (bufs)->iov_base, (bufs)->iov_len, off)
92 #define pwritev(fd, bufs, nbufs, off) \
93 pwrite(fd, (bufs)->iov_base, (bufs)->iov_len, off)
94 #endif
95
96 #if defined(_AIX) && _XOPEN_SOURCE <= 600
97 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
98 #endif
99
100 #define INIT(subtype) \
101 do { \
102 if (req == NULL) \
103 return UV_EINVAL; \
104 UV_REQ_INIT(req, UV_FS); \
105 req->fs_type = UV_FS_ ## subtype; \
106 req->result = 0; \
107 req->ptr = NULL; \
108 req->loop = loop; \
109 req->path = NULL; \
110 req->new_path = NULL; \
111 req->bufs = NULL; \
112 req->cb = cb; \
113 } \
114 while (0)
115
116 #define PATH \
117 do { \
118 assert(path != NULL); \
119 if (cb == NULL) { \
120 req->path = path; \
121 } else { \
122 req->path = uv__strdup(path); \
123 if (req->path == NULL) \
124 return UV_ENOMEM; \
125 } \
126 } \
127 while (0)
128
129 #define PATH2 \
130 do { \
131 if (cb == NULL) { \
132 req->path = path; \
133 req->new_path = new_path; \
134 } else { \
135 size_t path_len; \
136 size_t new_path_len; \
137 path_len = strlen(path) + 1; \
138 new_path_len = strlen(new_path) + 1; \
139 req->path = uv__malloc(path_len + new_path_len); \
140 if (req->path == NULL) \
141 return UV_ENOMEM; \
142 req->new_path = req->path + path_len; \
143 memcpy((void*) req->path, path, path_len); \
144 memcpy((void*) req->new_path, new_path, new_path_len); \
145 } \
146 } \
147 while (0)
148
149 #ifdef USE_FFRT
150 #define POST \
151 do { \
152 if (cb != NULL) { \
153 uv__req_register(loop, req); \
154 uv__work_submit(loop, \
155 (uv_req_t*)req, \
156 &req->work_req, \
157 UV__WORK_FAST_IO, \
158 uv__fs_work, \
159 uv__fs_done); \
160 return 0; \
161 } \
162 else { \
163 uv__fs_work(&req->work_req); \
164 return req->result; \
165 } \
166 } \
167 while (0)
168 #else
169 #define POST \
170 do { \
171 if (cb != NULL) { \
172 uv__req_register(loop, req); \
173 uv__work_submit(loop, \
174 &req->work_req, \
175 UV__WORK_FAST_IO, \
176 uv__fs_work, \
177 uv__fs_done); \
178 return 0; \
179 } \
180 else { \
181 uv__fs_work(&req->work_req); \
182 return req->result; \
183 } \
184 } \
185 while (0)
186 #endif
187
188
uv__fs_close(int fd)189 static int uv__fs_close(int fd) {
190 int rc;
191
192 rc = uv__close_nocancel(fd);
193 if (rc == -1)
194 if (errno == EINTR || errno == EINPROGRESS)
195 rc = 0; /* The close is in progress, not an error. */
196
197 return rc;
198 }
199
200
uv__fs_fsync(uv_fs_t * req)201 static ssize_t uv__fs_fsync(uv_fs_t* req) {
202 #if defined(__APPLE__)
203 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
204 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
205 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
206 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
207 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
208 * This is the same approach taken by sqlite, except sqlite does not issue
209 * an F_BARRIERFSYNC call.
210 */
211 int r;
212
213 r = fcntl(req->file, F_FULLFSYNC);
214 if (r != 0)
215 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
216 if (r != 0)
217 r = fsync(req->file);
218 return r;
219 #else
220 return fsync(req->file);
221 #endif
222 }
223
224
uv__fs_fdatasync(uv_fs_t * req)225 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
226 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
227 return fdatasync(req->file);
228 #elif defined(__APPLE__)
229 /* See the comment in uv__fs_fsync. */
230 return uv__fs_fsync(req);
231 #else
232 return fsync(req->file);
233 #endif
234 }
235
236
UV_UNUSED(static struct timespec uv__fs_to_timespec (double time))237 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
238 struct timespec ts;
239 ts.tv_sec = time;
240 ts.tv_nsec = (time - ts.tv_sec) * 1e9;
241
242 /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
243 * stick to microsecond resolution for the sake of consistency with other
244 * platforms. I'm the original author of this compatibility hack but I'm
245 * less convinced it's useful nowadays.
246 */
247 ts.tv_nsec -= ts.tv_nsec % 1000;
248
249 if (ts.tv_nsec < 0) {
250 ts.tv_nsec += 1e9;
251 ts.tv_sec -= 1;
252 }
253 return ts;
254 }
255
UV_UNUSED(static struct timeval uv__fs_to_timeval (double time))256 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
257 struct timeval tv;
258 tv.tv_sec = time;
259 tv.tv_usec = (time - tv.tv_sec) * 1e6;
260 if (tv.tv_usec < 0) {
261 tv.tv_usec += 1e6;
262 tv.tv_sec -= 1;
263 }
264 return tv;
265 }
266
uv__fs_futime(uv_fs_t * req)267 static ssize_t uv__fs_futime(uv_fs_t* req) {
268 #if defined(__linux__) \
269 || defined(_AIX71) \
270 || defined(__HAIKU__) \
271 || defined(__GNU__)
272 struct timespec ts[2];
273 ts[0] = uv__fs_to_timespec(req->atime);
274 ts[1] = uv__fs_to_timespec(req->mtime);
275 return futimens(req->file, ts);
276 #elif defined(__APPLE__) \
277 || defined(__DragonFly__) \
278 || defined(__FreeBSD__) \
279 || defined(__NetBSD__) \
280 || defined(__OpenBSD__) \
281 || defined(__sun)
282 struct timeval tv[2];
283 tv[0] = uv__fs_to_timeval(req->atime);
284 tv[1] = uv__fs_to_timeval(req->mtime);
285 # if defined(__sun)
286 return futimesat(req->file, NULL, tv);
287 # else
288 return futimes(req->file, tv);
289 # endif
290 #elif defined(__MVS__)
291 attrib_t atr;
292 memset(&atr, 0, sizeof(atr));
293 atr.att_mtimechg = 1;
294 atr.att_atimechg = 1;
295 atr.att_mtime = req->mtime;
296 atr.att_atime = req->atime;
297 return __fchattr(req->file, &atr, sizeof(atr));
298 #else
299 errno = ENOSYS;
300 return -1;
301 #endif
302 }
303
304
uv__fs_mkdtemp(uv_fs_t * req)305 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
306 return mkdtemp((char*) req->path) ? 0 : -1;
307 }
308
309
310 static int (*uv__mkostemp)(char*, int);
311
312
uv__mkostemp_initonce(void)313 static void uv__mkostemp_initonce(void) {
314 /* z/os doesn't have RTLD_DEFAULT but that's okay
315 * because it doesn't have mkostemp(O_CLOEXEC) either.
316 */
317 #ifdef RTLD_DEFAULT
318 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
319
320 /* We don't care about errors, but we do want to clean them up.
321 * If there has been no error, then dlerror() will just return
322 * NULL.
323 */
324 dlerror();
325 #endif /* RTLD_DEFAULT */
326 }
327
328
uv__fs_mkstemp(uv_fs_t * req)329 static int uv__fs_mkstemp(uv_fs_t* req) {
330 static uv_once_t once = UV_ONCE_INIT;
331 int r;
332 #ifdef O_CLOEXEC
333 static _Atomic int no_cloexec_support;
334 #endif
335 static const char pattern[] = "XXXXXX";
336 static const size_t pattern_size = sizeof(pattern) - 1;
337 char* path;
338 size_t path_length;
339
340 path = (char*) req->path;
341 path_length = strlen(path);
342
343 /* EINVAL can be returned for 2 reasons:
344 1. The template's last 6 characters were not XXXXXX
345 2. open() didn't support O_CLOEXEC
346 We want to avoid going to the fallback path in case
347 of 1, so it's manually checked before. */
348 if (path_length < pattern_size ||
349 strcmp(path + path_length - pattern_size, pattern)) {
350 errno = EINVAL;
351 r = -1;
352 goto clobber;
353 }
354
355 uv_once(&once, uv__mkostemp_initonce);
356
357 #ifdef O_CLOEXEC
358 if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
359 uv__mkostemp != NULL) {
360 r = uv__mkostemp(path, O_CLOEXEC);
361
362 if (r >= 0)
363 return r;
364
365 /* If mkostemp() returns EINVAL, it means the kernel doesn't
366 support O_CLOEXEC, so we just fallback to mkstemp() below. */
367 if (errno != EINVAL)
368 goto clobber;
369
370 /* We set the static variable so that next calls don't even
371 try to use mkostemp. */
372 atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
373 }
374 #endif /* O_CLOEXEC */
375
376 if (req->cb != NULL)
377 uv_rwlock_rdlock(&req->loop->cloexec_lock);
378
379 r = mkstemp(path);
380
381 /* In case of failure `uv__cloexec` will leave error in `errno`,
382 * so it is enough to just set `r` to `-1`.
383 */
384 if (r >= 0 && uv__cloexec(r, 1) != 0) {
385 r = uv__close(r);
386 if (r != 0)
387 abort();
388 r = -1;
389 }
390
391 if (req->cb != NULL)
392 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
393
394 clobber:
395 if (r < 0)
396 path[0] = '\0';
397 return r;
398 }
399
400
uv__fs_open(uv_fs_t * req)401 static ssize_t uv__fs_open(uv_fs_t* req) {
402 #ifdef O_CLOEXEC
403 return open(req->path, req->flags | O_CLOEXEC, req->mode);
404 #else /* O_CLOEXEC */
405 int r;
406
407 if (req->cb != NULL)
408 uv_rwlock_rdlock(&req->loop->cloexec_lock);
409
410 r = open(req->path, req->flags, req->mode);
411
412 /* In case of failure `uv__cloexec` will leave error in `errno`,
413 * so it is enough to just set `r` to `-1`.
414 */
415 if (r >= 0 && uv__cloexec(r, 1) != 0) {
416 r = uv__close(r);
417 if (r != 0)
418 abort();
419 r = -1;
420 }
421
422 if (req->cb != NULL)
423 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
424
425 return r;
426 #endif /* O_CLOEXEC */
427 }
428
429
uv__fs_read(uv_fs_t * req)430 static ssize_t uv__fs_read(uv_fs_t* req) {
431 const struct iovec* bufs;
432 unsigned int iovmax;
433 size_t nbufs;
434 ssize_t r;
435 off_t off;
436 int fd;
437
438 fd = req->file;
439 off = req->off;
440 bufs = (const struct iovec*) req->bufs;
441 nbufs = req->nbufs;
442
443 iovmax = uv__getiovmax();
444 if (nbufs > iovmax)
445 nbufs = iovmax;
446
447 r = 0;
448 if (off < 0) {
449 if (nbufs == 1)
450 r = read(fd, bufs->iov_base, bufs->iov_len);
451 else if (nbufs > 1)
452 r = readv(fd, bufs, nbufs);
453 } else {
454 if (nbufs == 1)
455 r = pread(fd, bufs->iov_base, bufs->iov_len, off);
456 else if (nbufs > 1)
457 r = preadv(fd, bufs, nbufs, off);
458 }
459
460 #ifdef __PASE__
461 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
462 if (r == -1 && errno == EOPNOTSUPP) {
463 struct stat buf;
464 ssize_t rc;
465 rc = uv__fstat(fd, &buf);
466 if (rc == 0 && S_ISDIR(buf.st_mode)) {
467 errno = EISDIR;
468 }
469 }
470 #endif
471
472 /* We don't own the buffer list in the synchronous case. */
473 if (req->cb != NULL)
474 if (req->bufs != req->bufsml)
475 uv__free(req->bufs);
476
477 req->bufs = NULL;
478 req->nbufs = 0;
479
480 return r;
481 }
482
483
uv__fs_scandir_filter(const uv__dirent_t * dent)484 static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
485 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
486 }
487
488
uv__fs_scandir_sort(const uv__dirent_t ** a,const uv__dirent_t ** b)489 static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
490 return strcmp((*a)->d_name, (*b)->d_name);
491 }
492
493
uv__fs_scandir(uv_fs_t * req)494 static ssize_t uv__fs_scandir(uv_fs_t* req) {
495 uv__dirent_t** dents;
496 int n;
497
498 dents = NULL;
499 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
500
501 /* NOTE: We will use nbufs as an index field */
502 req->nbufs = 0;
503
504 if (n == 0) {
505 /* OS X still needs to deallocate some memory.
506 * Memory was allocated using the system allocator, so use free() here.
507 */
508 free(dents);
509 dents = NULL;
510 } else if (n == -1) {
511 return n;
512 }
513
514 req->ptr = dents;
515
516 return n;
517 }
518
uv__fs_opendir(uv_fs_t * req)519 static int uv__fs_opendir(uv_fs_t* req) {
520 uv_dir_t* dir;
521
522 dir = uv__malloc(sizeof(*dir));
523 if (dir == NULL)
524 goto error;
525
526 dir->dir = opendir(req->path);
527 if (dir->dir == NULL)
528 goto error;
529
530 req->ptr = dir;
531 return 0;
532
533 error:
534 uv__free(dir);
535 req->ptr = NULL;
536 return -1;
537 }
538
uv__fs_readdir(uv_fs_t * req)539 static int uv__fs_readdir(uv_fs_t* req) {
540 uv_dir_t* dir;
541 uv_dirent_t* dirent;
542 struct dirent* res;
543 unsigned int dirent_idx;
544 unsigned int i;
545
546 dir = req->ptr;
547 dirent_idx = 0;
548
549 while (dirent_idx < dir->nentries) {
550 /* readdir() returns NULL on end of directory, as well as on error. errno
551 is used to differentiate between the two conditions. */
552 errno = 0;
553 res = readdir(dir->dir);
554
555 if (res == NULL) {
556 if (errno != 0)
557 goto error;
558 break;
559 }
560
561 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
562 continue;
563
564 dirent = &dir->dirents[dirent_idx];
565 dirent->name = uv__strdup(res->d_name);
566
567 if (dirent->name == NULL)
568 goto error;
569
570 dirent->type = uv__fs_get_dirent_type(res);
571 ++dirent_idx;
572 }
573
574 return dirent_idx;
575
576 error:
577 for (i = 0; i < dirent_idx; ++i) {
578 uv__free((char*) dir->dirents[i].name);
579 dir->dirents[i].name = NULL;
580 }
581
582 return -1;
583 }
584
uv__fs_closedir(uv_fs_t * req)585 static int uv__fs_closedir(uv_fs_t* req) {
586 uv_dir_t* dir;
587
588 dir = req->ptr;
589
590 if (dir->dir != NULL) {
591 closedir(dir->dir);
592 dir->dir = NULL;
593 }
594
595 uv__free(req->ptr);
596 req->ptr = NULL;
597 return 0;
598 }
599
uv__fs_statfs(uv_fs_t * req)600 static int uv__fs_statfs(uv_fs_t* req) {
601 uv_statfs_t* stat_fs;
602 #if defined(__sun) || \
603 defined(__MVS__) || \
604 defined(__NetBSD__) || \
605 defined(__HAIKU__) || \
606 defined(__QNX__)
607 struct statvfs buf;
608
609 if (0 != statvfs(req->path, &buf))
610 #else
611 struct statfs buf;
612
613 if (0 != statfs(req->path, &buf))
614 #endif /* defined(__sun) */
615 return -1;
616
617 stat_fs = uv__malloc(sizeof(*stat_fs));
618 if (stat_fs == NULL) {
619 errno = ENOMEM;
620 return -1;
621 }
622
623 #if defined(__sun) || \
624 defined(__MVS__) || \
625 defined(__OpenBSD__) || \
626 defined(__NetBSD__) || \
627 defined(__HAIKU__) || \
628 defined(__QNX__)
629 stat_fs->f_type = 0; /* f_type is not supported. */
630 #else
631 stat_fs->f_type = buf.f_type;
632 #endif
633 stat_fs->f_bsize = buf.f_bsize;
634 stat_fs->f_blocks = buf.f_blocks;
635 stat_fs->f_bfree = buf.f_bfree;
636 stat_fs->f_bavail = buf.f_bavail;
637 stat_fs->f_files = buf.f_files;
638 stat_fs->f_ffree = buf.f_ffree;
639 req->ptr = stat_fs;
640 return 0;
641 }
642
uv__fs_pathmax_size(const char * path)643 static ssize_t uv__fs_pathmax_size(const char* path) {
644 ssize_t pathmax;
645
646 pathmax = pathconf(path, _PC_PATH_MAX);
647
648 if (pathmax == -1)
649 pathmax = UV__PATH_MAX;
650
651 return pathmax;
652 }
653
uv__fs_readlink(uv_fs_t * req)654 static ssize_t uv__fs_readlink(uv_fs_t* req) {
655 ssize_t maxlen;
656 ssize_t len;
657 char* buf;
658
659 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
660 maxlen = uv__fs_pathmax_size(req->path);
661 #else
662 /* We may not have a real PATH_MAX. Read size of link. */
663 struct stat st;
664 int ret;
665 ret = uv__lstat(req->path, &st);
666 if (ret != 0)
667 return -1;
668 if (!S_ISLNK(st.st_mode)) {
669 errno = EINVAL;
670 return -1;
671 }
672
673 maxlen = st.st_size;
674
675 /* According to readlink(2) lstat can report st_size == 0
676 for some symlinks, such as those in /proc or /sys. */
677 if (maxlen == 0)
678 maxlen = uv__fs_pathmax_size(req->path);
679 #endif
680
681 buf = uv__malloc(maxlen);
682
683 if (buf == NULL) {
684 errno = ENOMEM;
685 return -1;
686 }
687
688 #if defined(__MVS__)
689 len = os390_readlink(req->path, buf, maxlen);
690 #else
691 len = readlink(req->path, buf, maxlen);
692 #endif
693
694 if (len == -1) {
695 uv__free(buf);
696 return -1;
697 }
698
699 /* Uncommon case: resize to make room for the trailing nul byte. */
700 if (len == maxlen) {
701 buf = uv__reallocf(buf, len + 1);
702
703 if (buf == NULL)
704 return -1;
705 }
706
707 buf[len] = '\0';
708 req->ptr = buf;
709
710 return 0;
711 }
712
uv__fs_realpath(uv_fs_t * req)713 static ssize_t uv__fs_realpath(uv_fs_t* req) {
714 char* buf;
715
716 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
717 buf = realpath(req->path, NULL);
718 if (buf == NULL)
719 return -1;
720 #else
721 ssize_t len;
722
723 len = uv__fs_pathmax_size(req->path);
724 buf = uv__malloc(len + 1);
725
726 if (buf == NULL) {
727 errno = ENOMEM;
728 return -1;
729 }
730
731 if (realpath(req->path, buf) == NULL) {
732 uv__free(buf);
733 return -1;
734 }
735 #endif
736
737 req->ptr = buf;
738
739 return 0;
740 }
741
uv__fs_sendfile_emul(uv_fs_t * req)742 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
743 struct pollfd pfd;
744 int use_pread;
745 off_t offset;
746 ssize_t nsent;
747 ssize_t nread;
748 ssize_t nwritten;
749 size_t buflen;
750 size_t len;
751 ssize_t n;
752 int in_fd;
753 int out_fd;
754 char buf[8192];
755
756 len = req->bufsml[0].len;
757 in_fd = req->flags;
758 out_fd = req->file;
759 offset = req->off;
760 use_pread = 1;
761
762 /* Here are the rules regarding errors:
763 *
764 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
765 * The user needs to know that some data has already been sent, to stop
766 * them from sending it twice.
767 *
768 * 2. Write errors are always reported. Write errors are bad because they
769 * mean data loss: we've read data but now we can't write it out.
770 *
771 * We try to use pread() and fall back to regular read() if the source fd
772 * doesn't support positional reads, for example when it's a pipe fd.
773 *
774 * If we get EAGAIN when writing to the target fd, we poll() on it until
775 * it becomes writable again.
776 *
777 * FIXME: If we get a write error when use_pread==1, it should be safe to
778 * return the number of sent bytes instead of an error because pread()
779 * is, in theory, idempotent. However, special files in /dev or /proc
780 * may support pread() but not necessarily return the same data on
781 * successive reads.
782 *
783 * FIXME: There is no way now to signal that we managed to send *some* data
784 * before a write error.
785 */
786 for (nsent = 0; (size_t) nsent < len; ) {
787 buflen = len - nsent;
788
789 if (buflen > sizeof(buf))
790 buflen = sizeof(buf);
791
792 do
793 if (use_pread)
794 nread = pread(in_fd, buf, buflen, offset);
795 else
796 nread = read(in_fd, buf, buflen);
797 while (nread == -1 && errno == EINTR);
798
799 if (nread == 0)
800 goto out;
801
802 if (nread == -1) {
803 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
804 use_pread = 0;
805 continue;
806 }
807
808 if (nsent == 0)
809 nsent = -1;
810
811 goto out;
812 }
813
814 for (nwritten = 0; nwritten < nread; ) {
815 do
816 n = write(out_fd, buf + nwritten, nread - nwritten);
817 while (n == -1 && errno == EINTR);
818
819 if (n != -1) {
820 nwritten += n;
821 continue;
822 }
823
824 if (errno != EAGAIN && errno != EWOULDBLOCK) {
825 nsent = -1;
826 goto out;
827 }
828
829 pfd.fd = out_fd;
830 pfd.events = POLLOUT;
831 pfd.revents = 0;
832
833 do
834 n = poll(&pfd, 1, -1);
835 while (n == -1 && errno == EINTR);
836
837 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
838 errno = EIO;
839 nsent = -1;
840 goto out;
841 }
842 }
843
844 offset += nread;
845 nsent += nread;
846 }
847
848 out:
849 if (nsent != -1)
850 req->off = offset;
851
852 return nsent;
853 }
854
855
856 #ifdef __linux__
857 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
858 * in copy_file_range() when it shouldn't. There is no workaround except to
859 * fall back to a regular copy.
860 */
uv__is_buggy_cephfs(int fd)861 static int uv__is_buggy_cephfs(int fd) {
862 struct statfs s;
863
864 if (-1 == fstatfs(fd, &s))
865 return 0;
866
867 if (s.f_type != /* CephFS */ 0xC36400)
868 return 0;
869
870 return uv__kernel_version() < /* 4.20.0 */ 0x041400;
871 }
872
873
uv__is_cifs_or_smb(int fd)874 static int uv__is_cifs_or_smb(int fd) {
875 struct statfs s;
876
877 if (-1 == fstatfs(fd, &s))
878 return 0;
879
880 switch ((unsigned) s.f_type) {
881 case 0x0000517Bu: /* SMB */
882 case 0xFE534D42u: /* SMB2 */
883 case 0xFF534D42u: /* CIFS */
884 return 1;
885 }
886
887 return 0;
888 }
889
890
uv__fs_try_copy_file_range(int in_fd,off_t * off,int out_fd,size_t len)891 static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
892 int out_fd, size_t len) {
893 static _Atomic int no_copy_file_range_support;
894 ssize_t r;
895
896 if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
897 errno = ENOSYS;
898 return -1;
899 }
900
901 r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
902
903 if (r != -1)
904 return r;
905
906 switch (errno) {
907 case EACCES:
908 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
909 * copy-from command when it shouldn't.
910 */
911 if (uv__is_buggy_cephfs(in_fd))
912 errno = ENOSYS; /* Use fallback. */
913 break;
914 case ENOSYS:
915 atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
916 break;
917 case EPERM:
918 /* It's been reported that CIFS spuriously fails.
919 * Consider it a transient error.
920 */
921 if (uv__is_cifs_or_smb(out_fd))
922 errno = ENOSYS; /* Use fallback. */
923 break;
924 case ENOTSUP:
925 case EXDEV:
926 /* ENOTSUP - it could work on another file system type.
927 * EXDEV - it will not work when in_fd and out_fd are not on the same
928 * mounted filesystem (pre Linux 5.3)
929 */
930 errno = ENOSYS; /* Use fallback. */
931 break;
932 }
933
934 return -1;
935 }
936
937 #endif /* __linux__ */
938
939
uv__fs_sendfile(uv_fs_t * req)940 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
941 int in_fd;
942 int out_fd;
943
944 in_fd = req->flags;
945 out_fd = req->file;
946
947 #if defined(__linux__) || defined(__sun)
948 {
949 off_t off;
950 ssize_t r;
951 size_t len;
952 int try_sendfile;
953
954 off = req->off;
955 len = req->bufsml[0].len;
956 try_sendfile = 1;
957
958 #ifdef __linux__
959 r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
960 try_sendfile = (r == -1 && errno == ENOSYS);
961 #endif
962
963 if (try_sendfile)
964 r = sendfile(out_fd, in_fd, &off, len);
965
966 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
967 * it still writes out data. Fortunately, we can detect it by checking if
968 * the offset has been updated.
969 */
970 if (r != -1 || off > req->off) {
971 r = off - req->off;
972 req->off = off;
973 return r;
974 }
975
976 if (errno == EINVAL ||
977 errno == EIO ||
978 errno == ENOTSOCK ||
979 errno == EXDEV) {
980 errno = 0;
981 return uv__fs_sendfile_emul(req);
982 }
983
984 return -1;
985 }
986 #elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
987 {
988 off_t len;
989 ssize_t r;
990
991 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
992 * non-blocking mode and not all data could be written. If a non-zero
993 * number of bytes have been sent, we don't consider it an error.
994 */
995
996 #if defined(__FreeBSD__) || defined(__DragonFly__)
997 #if defined(__FreeBSD__)
998 off_t off;
999
1000 off = req->off;
1001 r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1002 if (r >= 0) {
1003 r = off - req->off;
1004 req->off = off;
1005 return r;
1006 }
1007 #endif
1008 len = 0;
1009 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1010 #else
1011 /* The darwin sendfile takes len as an input for the length to send,
1012 * so make sure to initialize it with the caller's value. */
1013 len = req->bufsml[0].len;
1014 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1015 #endif
1016
1017 /*
1018 * The man page for sendfile(2) on DragonFly states that `len` contains
1019 * a meaningful value ONLY in case of EAGAIN and EINTR.
1020 * Nothing is said about it's value in case of other errors, so better
1021 * not depend on the potential wrong assumption that is was not modified
1022 * by the syscall.
1023 */
1024 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1025 req->off += len;
1026 return (ssize_t) len;
1027 }
1028
1029 if (errno == EINVAL ||
1030 errno == EIO ||
1031 errno == ENOTSOCK ||
1032 errno == EXDEV) {
1033 errno = 0;
1034 return uv__fs_sendfile_emul(req);
1035 }
1036
1037 return -1;
1038 }
1039 #else
1040 /* Squelch compiler warnings. */
1041 (void) &in_fd;
1042 (void) &out_fd;
1043
1044 return uv__fs_sendfile_emul(req);
1045 #endif
1046 }
1047
1048
uv__fs_utime(uv_fs_t * req)1049 static ssize_t uv__fs_utime(uv_fs_t* req) {
1050 #if defined(__linux__) \
1051 || defined(_AIX71) \
1052 || defined(__sun) \
1053 || defined(__HAIKU__)
1054 struct timespec ts[2];
1055 ts[0] = uv__fs_to_timespec(req->atime);
1056 ts[1] = uv__fs_to_timespec(req->mtime);
1057 return utimensat(AT_FDCWD, req->path, ts, 0);
1058 #elif defined(__APPLE__) \
1059 || defined(__DragonFly__) \
1060 || defined(__FreeBSD__) \
1061 || defined(__NetBSD__) \
1062 || defined(__OpenBSD__)
1063 struct timeval tv[2];
1064 tv[0] = uv__fs_to_timeval(req->atime);
1065 tv[1] = uv__fs_to_timeval(req->mtime);
1066 return utimes(req->path, tv);
1067 #elif defined(_AIX) \
1068 && !defined(_AIX71)
1069 struct utimbuf buf;
1070 buf.actime = req->atime;
1071 buf.modtime = req->mtime;
1072 return utime(req->path, &buf);
1073 #elif defined(__MVS__)
1074 attrib_t atr;
1075 memset(&atr, 0, sizeof(atr));
1076 atr.att_mtimechg = 1;
1077 atr.att_atimechg = 1;
1078 atr.att_mtime = req->mtime;
1079 atr.att_atime = req->atime;
1080 return __lchattr((char*) req->path, &atr, sizeof(atr));
1081 #else
1082 errno = ENOSYS;
1083 return -1;
1084 #endif
1085 }
1086
1087
uv__fs_lutime(uv_fs_t * req)1088 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1089 #if defined(__linux__) || \
1090 defined(_AIX71) || \
1091 defined(__sun) || \
1092 defined(__HAIKU__) || \
1093 defined(__GNU__) || \
1094 defined(__OpenBSD__)
1095 struct timespec ts[2];
1096 ts[0] = uv__fs_to_timespec(req->atime);
1097 ts[1] = uv__fs_to_timespec(req->mtime);
1098 return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1099 #elif defined(__APPLE__) || \
1100 defined(__DragonFly__) || \
1101 defined(__FreeBSD__) || \
1102 defined(__NetBSD__)
1103 struct timeval tv[2];
1104 tv[0] = uv__fs_to_timeval(req->atime);
1105 tv[1] = uv__fs_to_timeval(req->mtime);
1106 return lutimes(req->path, tv);
1107 #else
1108 errno = ENOSYS;
1109 return -1;
1110 #endif
1111 }
1112
1113
uv__fs_write(uv_fs_t * req)1114 static ssize_t uv__fs_write(uv_fs_t* req) {
1115 const struct iovec* bufs;
1116 size_t nbufs;
1117 ssize_t r;
1118 off_t off;
1119 int fd;
1120
1121 fd = req->file;
1122 off = req->off;
1123 bufs = (const struct iovec*) req->bufs;
1124 nbufs = req->nbufs;
1125
1126 r = 0;
1127 if (off < 0) {
1128 if (nbufs == 1)
1129 r = write(fd, bufs->iov_base, bufs->iov_len);
1130 else if (nbufs > 1)
1131 r = writev(fd, bufs, nbufs);
1132 } else {
1133 if (nbufs == 1)
1134 r = pwrite(fd, bufs->iov_base, bufs->iov_len, off);
1135 else if (nbufs > 1)
1136 r = pwritev(fd, bufs, nbufs, off);
1137 }
1138
1139 return r;
1140 }
1141
1142
uv__fs_copyfile(uv_fs_t * req)1143 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1144 uv_fs_t fs_req;
1145 uv_file srcfd;
1146 uv_file dstfd;
1147 struct stat src_statsbuf;
1148 struct stat dst_statsbuf;
1149 int dst_flags;
1150 int result;
1151 int err;
1152 off_t bytes_to_send;
1153 off_t in_offset;
1154 off_t bytes_written;
1155 size_t bytes_chunk;
1156
1157 dstfd = -1;
1158 err = 0;
1159
1160 /* Open the source file. */
1161 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1162 uv_fs_req_cleanup(&fs_req);
1163
1164 if (srcfd < 0)
1165 return srcfd;
1166
1167 /* Get the source file's mode. */
1168 if (uv__fstat(srcfd, &src_statsbuf)) {
1169 err = UV__ERR(errno);
1170 goto out;
1171 }
1172
1173 dst_flags = O_WRONLY | O_CREAT;
1174
1175 if (req->flags & UV_FS_COPYFILE_EXCL)
1176 dst_flags |= O_EXCL;
1177
1178 /* Open the destination file. */
1179 dstfd = uv_fs_open(NULL,
1180 &fs_req,
1181 req->new_path,
1182 dst_flags,
1183 src_statsbuf.st_mode,
1184 NULL);
1185 uv_fs_req_cleanup(&fs_req);
1186
1187 if (dstfd < 0) {
1188 err = dstfd;
1189 goto out;
1190 }
1191
1192 /* If the file is not being opened exclusively, verify that the source and
1193 destination are not the same file. If they are the same, bail out early. */
1194 if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1195 /* Get the destination file's mode. */
1196 if (uv__fstat(dstfd, &dst_statsbuf)) {
1197 err = UV__ERR(errno);
1198 goto out;
1199 }
1200
1201 /* Check if srcfd and dstfd refer to the same file */
1202 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1203 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1204 goto out;
1205 }
1206
1207 /* Truncate the file in case the destination already existed. */
1208 if (ftruncate(dstfd, 0) != 0) {
1209 err = UV__ERR(errno);
1210
1211 /* ftruncate() on ceph-fuse fails with EACCES when the file is created
1212 * with read only permissions. Since ftruncate() on a newly created
1213 * file is a meaningless operation anyway, detect that condition
1214 * and squelch the error.
1215 */
1216 if (err != UV_EACCES)
1217 goto out;
1218
1219 if (dst_statsbuf.st_size > 0)
1220 goto out;
1221
1222 err = 0;
1223 }
1224 }
1225
1226 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1227 err = UV__ERR(errno);
1228 #ifdef __linux__
1229 /* fchmod() on CIFS shares always fails with EPERM unless the share is
1230 * mounted with "noperm". As fchmod() is a meaningless operation on such
1231 * shares anyway, detect that condition and squelch the error.
1232 */
1233 if (err != UV_EPERM)
1234 goto out;
1235
1236 if (!uv__is_cifs_or_smb(dstfd))
1237 goto out;
1238
1239 err = 0;
1240 #else /* !__linux__ */
1241 goto out;
1242 #endif /* !__linux__ */
1243 }
1244
1245 #ifdef FICLONE
1246 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1247 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1248 if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1249 /* ioctl() with FICLONE succeeded. */
1250 goto out;
1251 }
1252 /* If an error occurred and force was set, return the error to the caller;
1253 * fall back to sendfile() when force was not set. */
1254 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1255 err = UV__ERR(errno);
1256 goto out;
1257 }
1258 }
1259 #else
1260 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1261 err = UV_ENOSYS;
1262 goto out;
1263 }
1264 #endif
1265
1266 bytes_to_send = src_statsbuf.st_size;
1267 in_offset = 0;
1268 while (bytes_to_send != 0) {
1269 bytes_chunk = SSIZE_MAX;
1270 if (bytes_to_send < (off_t) bytes_chunk)
1271 bytes_chunk = bytes_to_send;
1272 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1273 bytes_written = fs_req.result;
1274 uv_fs_req_cleanup(&fs_req);
1275
1276 if (bytes_written < 0) {
1277 err = bytes_written;
1278 break;
1279 }
1280
1281 bytes_to_send -= bytes_written;
1282 in_offset += bytes_written;
1283 }
1284
1285 out:
1286 if (err < 0)
1287 result = err;
1288 else
1289 result = 0;
1290
1291 /* Close the source file. */
1292 err = uv__close_nocheckstdio(srcfd);
1293
1294 /* Don't overwrite any existing errors. */
1295 if (err != 0 && result == 0)
1296 result = err;
1297
1298 /* Close the destination file if it is open. */
1299 if (dstfd >= 0) {
1300 err = uv__close_nocheckstdio(dstfd);
1301
1302 /* Don't overwrite any existing errors. */
1303 if (err != 0 && result == 0)
1304 result = err;
1305
1306 /* Remove the destination file if something went wrong. */
1307 if (result != 0) {
1308 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1309 /* Ignore the unlink return value, as an error already happened. */
1310 uv_fs_req_cleanup(&fs_req);
1311 }
1312 }
1313
1314 if (result == 0)
1315 return 0;
1316
1317 errno = UV__ERR(result);
1318 return -1;
1319 }
1320
uv__to_stat(struct stat * src,uv_stat_t * dst)1321 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1322 dst->st_dev = src->st_dev;
1323 dst->st_mode = src->st_mode;
1324 dst->st_nlink = src->st_nlink;
1325 dst->st_uid = src->st_uid;
1326 dst->st_gid = src->st_gid;
1327 dst->st_rdev = src->st_rdev;
1328 dst->st_ino = src->st_ino;
1329 dst->st_size = src->st_size;
1330 dst->st_blksize = src->st_blksize;
1331 dst->st_blocks = src->st_blocks;
1332
1333 #if defined(__APPLE__)
1334 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1335 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1336 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1337 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1338 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1339 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1340 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1341 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1342 dst->st_flags = src->st_flags;
1343 dst->st_gen = src->st_gen;
1344 #elif defined(__ANDROID__)
1345 dst->st_atim.tv_sec = src->st_atime;
1346 dst->st_atim.tv_nsec = src->st_atimensec;
1347 dst->st_mtim.tv_sec = src->st_mtime;
1348 dst->st_mtim.tv_nsec = src->st_mtimensec;
1349 dst->st_ctim.tv_sec = src->st_ctime;
1350 dst->st_ctim.tv_nsec = src->st_ctimensec;
1351 dst->st_birthtim.tv_sec = src->st_ctime;
1352 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1353 dst->st_flags = 0;
1354 dst->st_gen = 0;
1355 #elif !defined(_AIX) && \
1356 !defined(__MVS__) && ( \
1357 defined(__DragonFly__) || \
1358 defined(__FreeBSD__) || \
1359 defined(__OpenBSD__) || \
1360 defined(__NetBSD__) || \
1361 defined(_GNU_SOURCE) || \
1362 defined(_BSD_SOURCE) || \
1363 defined(_SVID_SOURCE) || \
1364 defined(_XOPEN_SOURCE) || \
1365 defined(_DEFAULT_SOURCE))
1366 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1367 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1368 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1369 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1370 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1371 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1372 # if defined(__FreeBSD__) || \
1373 defined(__NetBSD__)
1374 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1375 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1376 dst->st_flags = src->st_flags;
1377 dst->st_gen = src->st_gen;
1378 # else
1379 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1380 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1381 dst->st_flags = 0;
1382 dst->st_gen = 0;
1383 # endif
1384 #else
1385 dst->st_atim.tv_sec = src->st_atime;
1386 dst->st_atim.tv_nsec = 0;
1387 dst->st_mtim.tv_sec = src->st_mtime;
1388 dst->st_mtim.tv_nsec = 0;
1389 dst->st_ctim.tv_sec = src->st_ctime;
1390 dst->st_ctim.tv_nsec = 0;
1391 dst->st_birthtim.tv_sec = src->st_ctime;
1392 dst->st_birthtim.tv_nsec = 0;
1393 dst->st_flags = 0;
1394 dst->st_gen = 0;
1395 #endif
1396 }
1397
1398
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1399 static int uv__fs_statx(int fd,
1400 const char* path,
1401 int is_fstat,
1402 int is_lstat,
1403 uv_stat_t* buf) {
1404 STATIC_ASSERT(UV_ENOSYS != -1);
1405 #ifdef __linux__
1406 static _Atomic int no_statx;
1407 struct uv__statx statxbuf;
1408 int dirfd;
1409 int flags;
1410 int mode;
1411 int rc;
1412
1413 if (atomic_load_explicit(&no_statx, memory_order_relaxed))
1414 return UV_ENOSYS;
1415
1416 dirfd = AT_FDCWD;
1417 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1418 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1419
1420 if (is_fstat) {
1421 dirfd = fd;
1422 flags |= 0x1000; /* AT_EMPTY_PATH */
1423 }
1424
1425 if (is_lstat)
1426 flags |= AT_SYMLINK_NOFOLLOW;
1427
1428 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1429
1430 switch (rc) {
1431 case 0:
1432 break;
1433 case -1:
1434 /* EPERM happens when a seccomp filter rejects the system call.
1435 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1436 * EOPNOTSUPP is used on DVS exported filesystems
1437 */
1438 if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1439 return -1;
1440 /* Fall through. */
1441 default:
1442 /* Normally on success, zero is returned and On error, -1 is returned.
1443 * Observed on S390 RHEL running in a docker container with statx not
1444 * implemented, rc might return 1 with 0 set as the error code in which
1445 * case we return ENOSYS.
1446 */
1447 atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
1448 return UV_ENOSYS;
1449 }
1450
1451 uv__statx_to_stat(&statxbuf, buf);
1452
1453 return 0;
1454 #else
1455 return UV_ENOSYS;
1456 #endif /* __linux__ */
1457 }
1458
1459
uv__fs_stat(const char * path,uv_stat_t * buf)1460 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1461 struct stat pbuf;
1462 int ret;
1463
1464 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1465 if (ret != UV_ENOSYS)
1466 return ret;
1467
1468 ret = uv__stat(path, &pbuf);
1469 if (ret == 0)
1470 uv__to_stat(&pbuf, buf);
1471
1472 return ret;
1473 }
1474
1475
uv__fs_lstat(const char * path,uv_stat_t * buf)1476 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1477 struct stat pbuf;
1478 int ret;
1479
1480 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1481 if (ret != UV_ENOSYS)
1482 return ret;
1483
1484 ret = uv__lstat(path, &pbuf);
1485 if (ret == 0)
1486 uv__to_stat(&pbuf, buf);
1487
1488 return ret;
1489 }
1490
1491
uv__fs_fstat(int fd,uv_stat_t * buf)1492 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1493 struct stat pbuf;
1494 int ret;
1495
1496 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1497 if (ret != UV_ENOSYS)
1498 return ret;
1499
1500 ret = uv__fstat(fd, &pbuf);
1501 if (ret == 0)
1502 uv__to_stat(&pbuf, buf);
1503
1504 return ret;
1505 }
1506
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1507 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1508 size_t offset;
1509 /* Figure out which bufs are done */
1510 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1511 size -= bufs[offset].len;
1512
1513 /* Fix a partial read/write */
1514 if (size > 0) {
1515 bufs[offset].base += size;
1516 bufs[offset].len -= size;
1517 }
1518 return offset;
1519 }
1520
uv__fs_write_all(uv_fs_t * req)1521 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1522 unsigned int iovmax;
1523 unsigned int nbufs;
1524 uv_buf_t* bufs;
1525 ssize_t total;
1526 ssize_t result;
1527
1528 iovmax = uv__getiovmax();
1529 nbufs = req->nbufs;
1530 bufs = req->bufs;
1531 total = 0;
1532
1533 while (nbufs > 0) {
1534 req->nbufs = nbufs;
1535 if (req->nbufs > iovmax)
1536 req->nbufs = iovmax;
1537
1538 do
1539 result = uv__fs_write(req);
1540 while (result < 0 && errno == EINTR);
1541
1542 if (result <= 0) {
1543 if (total == 0)
1544 total = result;
1545 break;
1546 }
1547
1548 if (req->off >= 0)
1549 req->off += result;
1550
1551 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1552 req->bufs += req->nbufs;
1553 nbufs -= req->nbufs;
1554 total += result;
1555 }
1556
1557 if (bufs != req->bufsml)
1558 uv__free(bufs);
1559
1560 req->bufs = NULL;
1561 req->nbufs = 0;
1562
1563 return total;
1564 }
1565
1566
uv__fs_work(struct uv__work * w)1567 static void uv__fs_work(struct uv__work* w) {
1568 int retry_on_eintr;
1569 uv_fs_t* req;
1570 ssize_t r;
1571
1572 req = container_of(w, uv_fs_t, work_req);
1573 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1574 req->fs_type == UV_FS_READ);
1575
1576 do {
1577 errno = 0;
1578
1579 #define X(type, action) \
1580 case UV_FS_ ## type: \
1581 r = action; \
1582 break;
1583
1584 switch (req->fs_type) {
1585 X(ACCESS, access(req->path, req->flags));
1586 X(CHMOD, chmod(req->path, req->mode));
1587 X(CHOWN, chown(req->path, req->uid, req->gid));
1588 X(CLOSE, uv__fs_close(req->file));
1589 X(COPYFILE, uv__fs_copyfile(req));
1590 X(FCHMOD, fchmod(req->file, req->mode));
1591 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1592 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1593 X(FDATASYNC, uv__fs_fdatasync(req));
1594 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1595 X(FSYNC, uv__fs_fsync(req));
1596 X(FTRUNCATE, ftruncate(req->file, req->off));
1597 X(FUTIME, uv__fs_futime(req));
1598 X(LUTIME, uv__fs_lutime(req));
1599 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1600 X(LINK, link(req->path, req->new_path));
1601 X(MKDIR, mkdir(req->path, req->mode));
1602 X(MKDTEMP, uv__fs_mkdtemp(req));
1603 X(MKSTEMP, uv__fs_mkstemp(req));
1604 X(OPEN, uv__fs_open(req));
1605 X(READ, uv__fs_read(req));
1606 X(SCANDIR, uv__fs_scandir(req));
1607 X(OPENDIR, uv__fs_opendir(req));
1608 X(READDIR, uv__fs_readdir(req));
1609 X(CLOSEDIR, uv__fs_closedir(req));
1610 X(READLINK, uv__fs_readlink(req));
1611 X(REALPATH, uv__fs_realpath(req));
1612 X(RENAME, rename(req->path, req->new_path));
1613 X(RMDIR, rmdir(req->path));
1614 X(SENDFILE, uv__fs_sendfile(req));
1615 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1616 X(STATFS, uv__fs_statfs(req));
1617 X(SYMLINK, symlink(req->path, req->new_path));
1618 X(UNLINK, unlink(req->path));
1619 X(UTIME, uv__fs_utime(req));
1620 X(WRITE, uv__fs_write_all(req));
1621 default: abort();
1622 }
1623 #undef X
1624 } while (r == -1 && errno == EINTR && retry_on_eintr);
1625
1626 if (r == -1)
1627 req->result = UV__ERR(errno);
1628 else
1629 req->result = r;
1630
1631 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1632 req->fs_type == UV_FS_FSTAT ||
1633 req->fs_type == UV_FS_LSTAT)) {
1634 req->ptr = &req->statbuf;
1635 }
1636 }
1637
1638
uv__fs_done(struct uv__work * w,int status)1639 static void uv__fs_done(struct uv__work* w, int status) {
1640 uv_fs_t* req;
1641
1642 req = container_of(w, uv_fs_t, work_req);
1643 uv__req_unregister(req->loop, req);
1644
1645 if (status == UV_ECANCELED) {
1646 assert(req->result == 0);
1647 req->result = UV_ECANCELED;
1648 }
1649
1650 req->cb(req);
1651 }
1652
1653
uv__fs_post(uv_loop_t * loop,uv_fs_t * req)1654 void uv__fs_post(uv_loop_t* loop, uv_fs_t* req) {
1655 uv__req_register(loop, req);
1656 uv__work_submit(loop,
1657 #ifdef USE_FFRT
1658 NULL,
1659 #endif
1660 &req->work_req,
1661 UV__WORK_FAST_IO,
1662 uv__fs_work,
1663 uv__fs_done);
1664 }
1665
1666
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1667 int uv_fs_access(uv_loop_t* loop,
1668 uv_fs_t* req,
1669 const char* path,
1670 int flags,
1671 uv_fs_cb cb) {
1672 INIT(ACCESS);
1673 PATH;
1674 req->flags = flags;
1675 POST;
1676 }
1677
1678
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1679 int uv_fs_chmod(uv_loop_t* loop,
1680 uv_fs_t* req,
1681 const char* path,
1682 int mode,
1683 uv_fs_cb cb) {
1684 INIT(CHMOD);
1685 PATH;
1686 req->mode = mode;
1687 POST;
1688 }
1689
1690
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1691 int uv_fs_chown(uv_loop_t* loop,
1692 uv_fs_t* req,
1693 const char* path,
1694 uv_uid_t uid,
1695 uv_gid_t gid,
1696 uv_fs_cb cb) {
1697 INIT(CHOWN);
1698 PATH;
1699 req->uid = uid;
1700 req->gid = gid;
1701 POST;
1702 }
1703
1704
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1705 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1706 INIT(CLOSE);
1707 req->file = file;
1708 if (cb != NULL)
1709 if (uv__iou_fs_close(loop, req))
1710 return 0;
1711 POST;
1712 }
1713
1714
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1715 int uv_fs_fchmod(uv_loop_t* loop,
1716 uv_fs_t* req,
1717 uv_file file,
1718 int mode,
1719 uv_fs_cb cb) {
1720 INIT(FCHMOD);
1721 req->file = file;
1722 req->mode = mode;
1723 POST;
1724 }
1725
1726
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1727 int uv_fs_fchown(uv_loop_t* loop,
1728 uv_fs_t* req,
1729 uv_file file,
1730 uv_uid_t uid,
1731 uv_gid_t gid,
1732 uv_fs_cb cb) {
1733 INIT(FCHOWN);
1734 req->file = file;
1735 req->uid = uid;
1736 req->gid = gid;
1737 POST;
1738 }
1739
1740
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1741 int uv_fs_lchown(uv_loop_t* loop,
1742 uv_fs_t* req,
1743 const char* path,
1744 uv_uid_t uid,
1745 uv_gid_t gid,
1746 uv_fs_cb cb) {
1747 INIT(LCHOWN);
1748 PATH;
1749 req->uid = uid;
1750 req->gid = gid;
1751 POST;
1752 }
1753
1754
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1755 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1756 INIT(FDATASYNC);
1757 req->file = file;
1758 if (cb != NULL)
1759 if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
1760 return 0;
1761 POST;
1762 }
1763
1764
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1765 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1766 INIT(FSTAT);
1767 req->file = file;
1768 if (cb != NULL)
1769 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
1770 return 0;
1771 POST;
1772 }
1773
1774
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1775 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1776 INIT(FSYNC);
1777 req->file = file;
1778 if (cb != NULL)
1779 if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
1780 return 0;
1781 POST;
1782 }
1783
1784
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1785 int uv_fs_ftruncate(uv_loop_t* loop,
1786 uv_fs_t* req,
1787 uv_file file,
1788 int64_t off,
1789 uv_fs_cb cb) {
1790 INIT(FTRUNCATE);
1791 req->file = file;
1792 req->off = off;
1793 POST;
1794 }
1795
1796
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1797 int uv_fs_futime(uv_loop_t* loop,
1798 uv_fs_t* req,
1799 uv_file file,
1800 double atime,
1801 double mtime,
1802 uv_fs_cb cb) {
1803 INIT(FUTIME);
1804 req->file = file;
1805 req->atime = atime;
1806 req->mtime = mtime;
1807 POST;
1808 }
1809
uv_fs_lutime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1810 int uv_fs_lutime(uv_loop_t* loop,
1811 uv_fs_t* req,
1812 const char* path,
1813 double atime,
1814 double mtime,
1815 uv_fs_cb cb) {
1816 INIT(LUTIME);
1817 PATH;
1818 req->atime = atime;
1819 req->mtime = mtime;
1820 POST;
1821 }
1822
1823
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1824 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1825 INIT(LSTAT);
1826 PATH;
1827 if (cb != NULL)
1828 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
1829 return 0;
1830 POST;
1831 }
1832
1833
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1834 int uv_fs_link(uv_loop_t* loop,
1835 uv_fs_t* req,
1836 const char* path,
1837 const char* new_path,
1838 uv_fs_cb cb) {
1839 INIT(LINK);
1840 PATH2;
1841 if (cb != NULL)
1842 if (uv__iou_fs_link(loop, req))
1843 return 0;
1844 POST;
1845 }
1846
1847
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1848 int uv_fs_mkdir(uv_loop_t* loop,
1849 uv_fs_t* req,
1850 const char* path,
1851 int mode,
1852 uv_fs_cb cb) {
1853 INIT(MKDIR);
1854 PATH;
1855 req->mode = mode;
1856 if (cb != NULL)
1857 if (uv__iou_fs_mkdir(loop, req))
1858 return 0;
1859 POST;
1860 }
1861
1862
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1863 int uv_fs_mkdtemp(uv_loop_t* loop,
1864 uv_fs_t* req,
1865 const char* tpl,
1866 uv_fs_cb cb) {
1867 INIT(MKDTEMP);
1868 req->path = uv__strdup(tpl);
1869 if (req->path == NULL)
1870 return UV_ENOMEM;
1871 POST;
1872 }
1873
1874
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1875 int uv_fs_mkstemp(uv_loop_t* loop,
1876 uv_fs_t* req,
1877 const char* tpl,
1878 uv_fs_cb cb) {
1879 INIT(MKSTEMP);
1880 req->path = uv__strdup(tpl);
1881 if (req->path == NULL)
1882 return UV_ENOMEM;
1883 POST;
1884 }
1885
1886
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)1887 int uv_fs_open(uv_loop_t* loop,
1888 uv_fs_t* req,
1889 const char* path,
1890 int flags,
1891 int mode,
1892 uv_fs_cb cb) {
1893 INIT(OPEN);
1894 PATH;
1895 req->flags = flags;
1896 req->mode = mode;
1897 if (cb != NULL)
1898 if (uv__iou_fs_open(loop, req))
1899 return 0;
1900 POST;
1901 }
1902
1903
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)1904 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
1905 uv_file file,
1906 const uv_buf_t bufs[],
1907 unsigned int nbufs,
1908 int64_t off,
1909 uv_fs_cb cb) {
1910 INIT(READ);
1911
1912 if (bufs == NULL || nbufs == 0)
1913 return UV_EINVAL;
1914
1915 req->off = off;
1916 req->file = file;
1917 req->bufs = (uv_buf_t*) bufs; /* Safe, doesn't mutate |bufs| */
1918 req->nbufs = nbufs;
1919
1920 if (cb == NULL)
1921 goto post;
1922
1923 req->bufs = req->bufsml;
1924 if (nbufs > ARRAY_SIZE(req->bufsml))
1925 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1926
1927 if (req->bufs == NULL)
1928 return UV_ENOMEM;
1929
1930 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1931
1932 if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
1933 return 0;
1934
1935 post:
1936 POST;
1937 }
1938
1939
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1940 int uv_fs_scandir(uv_loop_t* loop,
1941 uv_fs_t* req,
1942 const char* path,
1943 int flags,
1944 uv_fs_cb cb) {
1945 INIT(SCANDIR);
1946 PATH;
1947 req->flags = flags;
1948 POST;
1949 }
1950
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1951 int uv_fs_opendir(uv_loop_t* loop,
1952 uv_fs_t* req,
1953 const char* path,
1954 uv_fs_cb cb) {
1955 INIT(OPENDIR);
1956 PATH;
1957 POST;
1958 }
1959
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)1960 int uv_fs_readdir(uv_loop_t* loop,
1961 uv_fs_t* req,
1962 uv_dir_t* dir,
1963 uv_fs_cb cb) {
1964 INIT(READDIR);
1965
1966 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
1967 return UV_EINVAL;
1968
1969 req->ptr = dir;
1970 POST;
1971 }
1972
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)1973 int uv_fs_closedir(uv_loop_t* loop,
1974 uv_fs_t* req,
1975 uv_dir_t* dir,
1976 uv_fs_cb cb) {
1977 INIT(CLOSEDIR);
1978
1979 if (dir == NULL)
1980 return UV_EINVAL;
1981
1982 req->ptr = dir;
1983 POST;
1984 }
1985
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1986 int uv_fs_readlink(uv_loop_t* loop,
1987 uv_fs_t* req,
1988 const char* path,
1989 uv_fs_cb cb) {
1990 INIT(READLINK);
1991 PATH;
1992 POST;
1993 }
1994
1995
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1996 int uv_fs_realpath(uv_loop_t* loop,
1997 uv_fs_t* req,
1998 const char * path,
1999 uv_fs_cb cb) {
2000 INIT(REALPATH);
2001 PATH;
2002 POST;
2003 }
2004
2005
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)2006 int uv_fs_rename(uv_loop_t* loop,
2007 uv_fs_t* req,
2008 const char* path,
2009 const char* new_path,
2010 uv_fs_cb cb) {
2011 INIT(RENAME);
2012 PATH2;
2013 if (cb != NULL)
2014 if (uv__iou_fs_rename(loop, req))
2015 return 0;
2016 POST;
2017 }
2018
2019
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2020 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2021 INIT(RMDIR);
2022 PATH;
2023 POST;
2024 }
2025
2026
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)2027 int uv_fs_sendfile(uv_loop_t* loop,
2028 uv_fs_t* req,
2029 uv_file out_fd,
2030 uv_file in_fd,
2031 int64_t off,
2032 size_t len,
2033 uv_fs_cb cb) {
2034 INIT(SENDFILE);
2035 req->flags = in_fd; /* hack */
2036 req->file = out_fd;
2037 req->off = off;
2038 req->bufsml[0].len = len;
2039 POST;
2040 }
2041
2042
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2043 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2044 INIT(STAT);
2045 PATH;
2046 if (cb != NULL)
2047 if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
2048 return 0;
2049 POST;
2050 }
2051
2052
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2053 int uv_fs_symlink(uv_loop_t* loop,
2054 uv_fs_t* req,
2055 const char* path,
2056 const char* new_path,
2057 int flags,
2058 uv_fs_cb cb) {
2059 INIT(SYMLINK);
2060 PATH2;
2061 req->flags = flags;
2062 if (cb != NULL)
2063 if (uv__iou_fs_symlink(loop, req))
2064 return 0;
2065 POST;
2066 }
2067
2068
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2069 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2070 INIT(UNLINK);
2071 PATH;
2072 if (cb != NULL)
2073 if (uv__iou_fs_unlink(loop, req))
2074 return 0;
2075 POST;
2076 }
2077
2078
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)2079 int uv_fs_utime(uv_loop_t* loop,
2080 uv_fs_t* req,
2081 const char* path,
2082 double atime,
2083 double mtime,
2084 uv_fs_cb cb) {
2085 INIT(UTIME);
2086 PATH;
2087 req->atime = atime;
2088 req->mtime = mtime;
2089 POST;
2090 }
2091
2092
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2093 int uv_fs_write(uv_loop_t* loop,
2094 uv_fs_t* req,
2095 uv_file file,
2096 const uv_buf_t bufs[],
2097 unsigned int nbufs,
2098 int64_t off,
2099 uv_fs_cb cb) {
2100 INIT(WRITE);
2101
2102 if (bufs == NULL || nbufs == 0)
2103 return UV_EINVAL;
2104
2105 req->file = file;
2106
2107 req->nbufs = nbufs;
2108 req->bufs = req->bufsml;
2109 if (nbufs > ARRAY_SIZE(req->bufsml))
2110 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2111
2112 if (req->bufs == NULL)
2113 return UV_ENOMEM;
2114
2115 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2116
2117 req->off = off;
2118
2119 if (cb != NULL)
2120 if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
2121 return 0;
2122
2123 POST;
2124 }
2125
2126
uv_fs_req_cleanup(uv_fs_t * req)2127 void uv_fs_req_cleanup(uv_fs_t* req) {
2128 if (req == NULL)
2129 return;
2130
2131 /* Only necessary for asynchronous requests, i.e., requests with a callback.
2132 * Synchronous ones don't copy their arguments and have req->path and
2133 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
2134 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2135 */
2136 if (req->path != NULL &&
2137 (req->cb != NULL ||
2138 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2139 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
2140
2141 req->path = NULL;
2142 req->new_path = NULL;
2143
2144 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2145 uv__fs_readdir_cleanup(req);
2146
2147 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2148 uv__fs_scandir_cleanup(req);
2149
2150 if (req->bufs != req->bufsml)
2151 uv__free(req->bufs);
2152 req->bufs = NULL;
2153
2154 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2155 uv__free(req->ptr);
2156 req->ptr = NULL;
2157 }
2158
2159
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2160 int uv_fs_copyfile(uv_loop_t* loop,
2161 uv_fs_t* req,
2162 const char* path,
2163 const char* new_path,
2164 int flags,
2165 uv_fs_cb cb) {
2166 INIT(COPYFILE);
2167
2168 if (flags & ~(UV_FS_COPYFILE_EXCL |
2169 UV_FS_COPYFILE_FICLONE |
2170 UV_FS_COPYFILE_FICLONE_FORCE)) {
2171 return UV_EINVAL;
2172 }
2173
2174 PATH2;
2175 req->flags = flags;
2176 POST;
2177 }
2178
2179
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2180 int uv_fs_statfs(uv_loop_t* loop,
2181 uv_fs_t* req,
2182 const char* path,
2183 uv_fs_cb cb) {
2184 INIT(STATFS);
2185 PATH;
2186 POST;
2187 }
2188
uv_fs_get_system_error(const uv_fs_t * req)2189 int uv_fs_get_system_error(const uv_fs_t* req) {
2190 return -req->result;
2191 }
2192