1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 /* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29 #include "uv.h"
30 #include "internal.h"
31
32 #include <errno.h>
33 #include <dlfcn.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <limits.h> /* PATH_MAX */
38
39 #include <sys/types.h>
40 #include <sys/socket.h>
41 #include <sys/stat.h>
42 #include <sys/time.h>
43 #include <sys/uio.h>
44 #include <pthread.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <poll.h>
48
49 #if defined(__DragonFly__) || \
50 defined(__FreeBSD__) || \
51 defined(__FreeBSD_kernel__) || \
52 defined(__OpenBSD__) || \
53 defined(__NetBSD__)
54 # define HAVE_PREADV 1
55 #else
56 # define HAVE_PREADV 0
57 #endif
58
59 #if defined(__linux__)
60 # include "sys/utsname.h"
61 #endif
62
63 #if defined(__linux__) || defined(__sun)
64 # include <sys/sendfile.h>
65 # include <sys/sysmacros.h>
66 #endif
67
68 #if defined(__APPLE__)
69 # include <sys/sysctl.h>
70 #elif defined(__linux__) && !defined(FICLONE)
71 # include <sys/ioctl.h>
72 # define FICLONE _IOW(0x94, 9, int)
73 #endif
74
75 #if defined(_AIX) && !defined(_AIX71)
76 # include <utime.h>
77 #endif
78
79 #if defined(__APPLE__) || \
80 defined(__DragonFly__) || \
81 defined(__FreeBSD__) || \
82 defined(__FreeBSD_kernel__) || \
83 defined(__OpenBSD__) || \
84 defined(__NetBSD__)
85 # include <sys/param.h>
86 # include <sys/mount.h>
87 #elif defined(__sun) || \
88 defined(__MVS__) || \
89 defined(__NetBSD__) || \
90 defined(__HAIKU__) || \
91 defined(__QNX__)
92 # include <sys/statvfs.h>
93 #else
94 # include <sys/statfs.h>
95 #endif
96
97 #if defined(_AIX) && _XOPEN_SOURCE <= 600
98 extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
99 #endif
100
101 #define INIT(subtype) \
102 do { \
103 if (req == NULL) \
104 return UV_EINVAL; \
105 UV_REQ_INIT(req, UV_FS); \
106 req->fs_type = UV_FS_ ## subtype; \
107 req->result = 0; \
108 req->ptr = NULL; \
109 req->loop = loop; \
110 req->path = NULL; \
111 req->new_path = NULL; \
112 req->bufs = NULL; \
113 req->cb = cb; \
114 } \
115 while (0)
116
117 #define PATH \
118 do { \
119 assert(path != NULL); \
120 if (cb == NULL) { \
121 req->path = path; \
122 } else { \
123 req->path = uv__strdup(path); \
124 if (req->path == NULL) \
125 return UV_ENOMEM; \
126 } \
127 } \
128 while (0)
129
130 #define PATH2 \
131 do { \
132 if (cb == NULL) { \
133 req->path = path; \
134 req->new_path = new_path; \
135 } else { \
136 size_t path_len; \
137 size_t new_path_len; \
138 path_len = strlen(path) + 1; \
139 new_path_len = strlen(new_path) + 1; \
140 req->path = uv__malloc(path_len + new_path_len); \
141 if (req->path == NULL) \
142 return UV_ENOMEM; \
143 req->new_path = req->path + path_len; \
144 memcpy((void*) req->path, path, path_len); \
145 memcpy((void*) req->new_path, new_path, new_path_len); \
146 } \
147 } \
148 while (0)
149
150 #define POST \
151 do { \
152 if (cb != NULL) { \
153 uv__req_register(loop, req); \
154 uv__work_submit(loop, \
155 &req->work_req, \
156 UV__WORK_FAST_IO, \
157 uv__fs_work, \
158 uv__fs_done); \
159 return 0; \
160 } \
161 else { \
162 uv__fs_work(&req->work_req); \
163 return req->result; \
164 } \
165 } \
166 while (0)
167
168
uv__fs_close(int fd)169 static int uv__fs_close(int fd) {
170 int rc;
171
172 rc = uv__close_nocancel(fd);
173 if (rc == -1)
174 if (errno == EINTR || errno == EINPROGRESS)
175 rc = 0; /* The close is in progress, not an error. */
176
177 return rc;
178 }
179
180
uv__fs_fsync(uv_fs_t * req)181 static ssize_t uv__fs_fsync(uv_fs_t* req) {
182 #if defined(__APPLE__)
183 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
184 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
185 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
186 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
187 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
188 * This is the same approach taken by sqlite, except sqlite does not issue
189 * an F_BARRIERFSYNC call.
190 */
191 int r;
192
193 r = fcntl(req->file, F_FULLFSYNC);
194 if (r != 0)
195 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
196 if (r != 0)
197 r = fsync(req->file);
198 return r;
199 #else
200 return fsync(req->file);
201 #endif
202 }
203
204
uv__fs_fdatasync(uv_fs_t * req)205 static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
206 #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
207 return fdatasync(req->file);
208 #elif defined(__APPLE__)
209 /* See the comment in uv__fs_fsync. */
210 return uv__fs_fsync(req);
211 #else
212 return fsync(req->file);
213 #endif
214 }
215
216
UV_UNUSED(static struct timespec uv__fs_to_timespec (double time))217 UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
218 struct timespec ts;
219 ts.tv_sec = time;
220 ts.tv_nsec = (time - ts.tv_sec) * 1e9;
221
222 /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
223 * stick to microsecond resolution for the sake of consistency with other
224 * platforms. I'm the original author of this compatibility hack but I'm
225 * less convinced it's useful nowadays.
226 */
227 ts.tv_nsec -= ts.tv_nsec % 1000;
228
229 if (ts.tv_nsec < 0) {
230 ts.tv_nsec += 1e9;
231 ts.tv_sec -= 1;
232 }
233 return ts;
234 }
235
UV_UNUSED(static struct timeval uv__fs_to_timeval (double time))236 UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
237 struct timeval tv;
238 tv.tv_sec = time;
239 tv.tv_usec = (time - tv.tv_sec) * 1e6;
240 if (tv.tv_usec < 0) {
241 tv.tv_usec += 1e6;
242 tv.tv_sec -= 1;
243 }
244 return tv;
245 }
246
uv__fs_futime(uv_fs_t * req)247 static ssize_t uv__fs_futime(uv_fs_t* req) {
248 #if defined(__linux__) \
249 || defined(_AIX71) \
250 || defined(__HAIKU__) \
251 || defined(__GNU__)
252 struct timespec ts[2];
253 ts[0] = uv__fs_to_timespec(req->atime);
254 ts[1] = uv__fs_to_timespec(req->mtime);
255 return futimens(req->file, ts);
256 #elif defined(__APPLE__) \
257 || defined(__DragonFly__) \
258 || defined(__FreeBSD__) \
259 || defined(__FreeBSD_kernel__) \
260 || defined(__NetBSD__) \
261 || defined(__OpenBSD__) \
262 || defined(__sun)
263 struct timeval tv[2];
264 tv[0] = uv__fs_to_timeval(req->atime);
265 tv[1] = uv__fs_to_timeval(req->mtime);
266 # if defined(__sun)
267 return futimesat(req->file, NULL, tv);
268 # else
269 return futimes(req->file, tv);
270 # endif
271 #elif defined(__MVS__)
272 attrib_t atr;
273 memset(&atr, 0, sizeof(atr));
274 atr.att_mtimechg = 1;
275 atr.att_atimechg = 1;
276 atr.att_mtime = req->mtime;
277 atr.att_atime = req->atime;
278 return __fchattr(req->file, &atr, sizeof(atr));
279 #else
280 errno = ENOSYS;
281 return -1;
282 #endif
283 }
284
285
uv__fs_mkdtemp(uv_fs_t * req)286 static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
287 return mkdtemp((char*) req->path) ? 0 : -1;
288 }
289
290
291 static int (*uv__mkostemp)(char*, int);
292
293
uv__mkostemp_initonce(void)294 static void uv__mkostemp_initonce(void) {
295 /* z/os doesn't have RTLD_DEFAULT but that's okay
296 * because it doesn't have mkostemp(O_CLOEXEC) either.
297 */
298 #ifdef RTLD_DEFAULT
299 uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
300
301 /* We don't care about errors, but we do want to clean them up.
302 * If there has been no error, then dlerror() will just return
303 * NULL.
304 */
305 dlerror();
306 #endif /* RTLD_DEFAULT */
307 }
308
309
uv__fs_mkstemp(uv_fs_t * req)310 static int uv__fs_mkstemp(uv_fs_t* req) {
311 static uv_once_t once = UV_ONCE_INIT;
312 int r;
313 #ifdef O_CLOEXEC
314 static int no_cloexec_support;
315 #endif
316 static const char pattern[] = "XXXXXX";
317 static const size_t pattern_size = sizeof(pattern) - 1;
318 char* path;
319 size_t path_length;
320
321 path = (char*) req->path;
322 path_length = strlen(path);
323
324 /* EINVAL can be returned for 2 reasons:
325 1. The template's last 6 characters were not XXXXXX
326 2. open() didn't support O_CLOEXEC
327 We want to avoid going to the fallback path in case
328 of 1, so it's manually checked before. */
329 if (path_length < pattern_size ||
330 strcmp(path + path_length - pattern_size, pattern)) {
331 errno = EINVAL;
332 r = -1;
333 goto clobber;
334 }
335
336 uv_once(&once, uv__mkostemp_initonce);
337
338 #ifdef O_CLOEXEC
339 if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
340 r = uv__mkostemp(path, O_CLOEXEC);
341
342 if (r >= 0)
343 return r;
344
345 /* If mkostemp() returns EINVAL, it means the kernel doesn't
346 support O_CLOEXEC, so we just fallback to mkstemp() below. */
347 if (errno != EINVAL)
348 goto clobber;
349
350 /* We set the static variable so that next calls don't even
351 try to use mkostemp. */
352 uv__store_relaxed(&no_cloexec_support, 1);
353 }
354 #endif /* O_CLOEXEC */
355
356 if (req->cb != NULL)
357 uv_rwlock_rdlock(&req->loop->cloexec_lock);
358
359 r = mkstemp(path);
360
361 /* In case of failure `uv__cloexec` will leave error in `errno`,
362 * so it is enough to just set `r` to `-1`.
363 */
364 if (r >= 0 && uv__cloexec(r, 1) != 0) {
365 r = uv__close(r);
366 if (r != 0)
367 abort();
368 r = -1;
369 }
370
371 if (req->cb != NULL)
372 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
373
374 clobber:
375 if (r < 0)
376 path[0] = '\0';
377 return r;
378 }
379
380
uv__fs_open(uv_fs_t * req)381 static ssize_t uv__fs_open(uv_fs_t* req) {
382 #ifdef O_CLOEXEC
383 return open(req->path, req->flags | O_CLOEXEC, req->mode);
384 #else /* O_CLOEXEC */
385 int r;
386
387 if (req->cb != NULL)
388 uv_rwlock_rdlock(&req->loop->cloexec_lock);
389
390 r = open(req->path, req->flags, req->mode);
391
392 /* In case of failure `uv__cloexec` will leave error in `errno`,
393 * so it is enough to just set `r` to `-1`.
394 */
395 if (r >= 0 && uv__cloexec(r, 1) != 0) {
396 r = uv__close(r);
397 if (r != 0)
398 abort();
399 r = -1;
400 }
401
402 if (req->cb != NULL)
403 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
404
405 return r;
406 #endif /* O_CLOEXEC */
407 }
408
409
410 #if !HAVE_PREADV
uv__fs_preadv(uv_file fd,uv_buf_t * bufs,unsigned int nbufs,off_t off)411 static ssize_t uv__fs_preadv(uv_file fd,
412 uv_buf_t* bufs,
413 unsigned int nbufs,
414 off_t off) {
415 uv_buf_t* buf;
416 uv_buf_t* end;
417 ssize_t result;
418 ssize_t rc;
419 size_t pos;
420
421 assert(nbufs > 0);
422
423 result = 0;
424 pos = 0;
425 buf = bufs + 0;
426 end = bufs + nbufs;
427
428 for (;;) {
429 do
430 rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
431 while (rc == -1 && errno == EINTR);
432
433 if (rc == 0)
434 break;
435
436 if (rc == -1 && result == 0)
437 return UV__ERR(errno);
438
439 if (rc == -1)
440 break; /* We read some data so return that, ignore the error. */
441
442 pos += rc;
443 result += rc;
444
445 if (pos < buf->len)
446 continue;
447
448 pos = 0;
449 buf += 1;
450
451 if (buf == end)
452 break;
453 }
454
455 return result;
456 }
457 #endif
458
459
uv__fs_read(uv_fs_t * req)460 static ssize_t uv__fs_read(uv_fs_t* req) {
461 #if defined(__linux__)
462 static int no_preadv;
463 #endif
464 unsigned int iovmax;
465 ssize_t result;
466
467 iovmax = uv__getiovmax();
468 if (req->nbufs > iovmax)
469 req->nbufs = iovmax;
470
471 if (req->off < 0) {
472 if (req->nbufs == 1)
473 result = read(req->file, req->bufs[0].base, req->bufs[0].len);
474 else
475 result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
476 } else {
477 if (req->nbufs == 1) {
478 result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
479 goto done;
480 }
481
482 #if HAVE_PREADV
483 result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
484 #else
485 # if defined(__linux__)
486 if (uv__load_relaxed(&no_preadv)) retry:
487 # endif
488 {
489 result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
490 }
491 # if defined(__linux__)
492 else {
493 result = uv__preadv(req->file,
494 (struct iovec*)req->bufs,
495 req->nbufs,
496 req->off);
497 if (result == -1 && errno == ENOSYS) {
498 uv__store_relaxed(&no_preadv, 1);
499 goto retry;
500 }
501 }
502 # endif
503 #endif
504 }
505
506 done:
507 /* Early cleanup of bufs allocation, since we're done with it. */
508 if (req->bufs != req->bufsml)
509 uv__free(req->bufs);
510
511 req->bufs = NULL;
512 req->nbufs = 0;
513
514 #ifdef __PASE__
515 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
516 if (result == -1 && errno == EOPNOTSUPP) {
517 struct stat buf;
518 ssize_t rc;
519 rc = fstat(req->file, &buf);
520 if (rc == 0 && S_ISDIR(buf.st_mode)) {
521 errno = EISDIR;
522 }
523 }
524 #endif
525
526 return result;
527 }
528
529
530 #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
531 #define UV_CONST_DIRENT uv__dirent_t
532 #else
533 #define UV_CONST_DIRENT const uv__dirent_t
534 #endif
535
536
uv__fs_scandir_filter(UV_CONST_DIRENT * dent)537 static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
538 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
539 }
540
541
uv__fs_scandir_sort(UV_CONST_DIRENT ** a,UV_CONST_DIRENT ** b)542 static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
543 return strcmp((*a)->d_name, (*b)->d_name);
544 }
545
546
uv__fs_scandir(uv_fs_t * req)547 static ssize_t uv__fs_scandir(uv_fs_t* req) {
548 uv__dirent_t** dents;
549 int n;
550
551 dents = NULL;
552 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
553
554 /* NOTE: We will use nbufs as an index field */
555 req->nbufs = 0;
556
557 if (n == 0) {
558 /* OS X still needs to deallocate some memory.
559 * Memory was allocated using the system allocator, so use free() here.
560 */
561 free(dents);
562 dents = NULL;
563 } else if (n == -1) {
564 return n;
565 }
566
567 req->ptr = dents;
568
569 return n;
570 }
571
uv__fs_opendir(uv_fs_t * req)572 static int uv__fs_opendir(uv_fs_t* req) {
573 uv_dir_t* dir;
574
575 dir = uv__malloc(sizeof(*dir));
576 if (dir == NULL)
577 goto error;
578
579 dir->dir = opendir(req->path);
580 if (dir->dir == NULL)
581 goto error;
582
583 req->ptr = dir;
584 return 0;
585
586 error:
587 uv__free(dir);
588 req->ptr = NULL;
589 return -1;
590 }
591
uv__fs_readdir(uv_fs_t * req)592 static int uv__fs_readdir(uv_fs_t* req) {
593 uv_dir_t* dir;
594 uv_dirent_t* dirent;
595 struct dirent* res;
596 unsigned int dirent_idx;
597 unsigned int i;
598
599 dir = req->ptr;
600 dirent_idx = 0;
601
602 while (dirent_idx < dir->nentries) {
603 /* readdir() returns NULL on end of directory, as well as on error. errno
604 is used to differentiate between the two conditions. */
605 errno = 0;
606 res = readdir(dir->dir);
607
608 if (res == NULL) {
609 if (errno != 0)
610 goto error;
611 break;
612 }
613
614 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
615 continue;
616
617 dirent = &dir->dirents[dirent_idx];
618 dirent->name = uv__strdup(res->d_name);
619
620 if (dirent->name == NULL)
621 goto error;
622
623 dirent->type = uv__fs_get_dirent_type(res);
624 ++dirent_idx;
625 }
626
627 return dirent_idx;
628
629 error:
630 for (i = 0; i < dirent_idx; ++i) {
631 uv__free((char*) dir->dirents[i].name);
632 dir->dirents[i].name = NULL;
633 }
634
635 return -1;
636 }
637
uv__fs_closedir(uv_fs_t * req)638 static int uv__fs_closedir(uv_fs_t* req) {
639 uv_dir_t* dir;
640
641 dir = req->ptr;
642
643 if (dir->dir != NULL) {
644 closedir(dir->dir);
645 dir->dir = NULL;
646 }
647
648 uv__free(req->ptr);
649 req->ptr = NULL;
650 return 0;
651 }
652
uv__fs_statfs(uv_fs_t * req)653 static int uv__fs_statfs(uv_fs_t* req) {
654 uv_statfs_t* stat_fs;
655 #if defined(__sun) || \
656 defined(__MVS__) || \
657 defined(__NetBSD__) || \
658 defined(__HAIKU__) || \
659 defined(__QNX__)
660 struct statvfs buf;
661
662 if (0 != statvfs(req->path, &buf))
663 #else
664 struct statfs buf;
665
666 if (0 != statfs(req->path, &buf))
667 #endif /* defined(__sun) */
668 return -1;
669
670 stat_fs = uv__malloc(sizeof(*stat_fs));
671 if (stat_fs == NULL) {
672 errno = ENOMEM;
673 return -1;
674 }
675
676 #if defined(__sun) || \
677 defined(__MVS__) || \
678 defined(__OpenBSD__) || \
679 defined(__NetBSD__) || \
680 defined(__HAIKU__) || \
681 defined(__QNX__)
682 stat_fs->f_type = 0; /* f_type is not supported. */
683 #else
684 stat_fs->f_type = buf.f_type;
685 #endif
686 stat_fs->f_bsize = buf.f_bsize;
687 stat_fs->f_blocks = buf.f_blocks;
688 stat_fs->f_bfree = buf.f_bfree;
689 stat_fs->f_bavail = buf.f_bavail;
690 stat_fs->f_files = buf.f_files;
691 stat_fs->f_ffree = buf.f_ffree;
692 req->ptr = stat_fs;
693 return 0;
694 }
695
uv__fs_pathmax_size(const char * path)696 static ssize_t uv__fs_pathmax_size(const char* path) {
697 ssize_t pathmax;
698
699 pathmax = pathconf(path, _PC_PATH_MAX);
700
701 if (pathmax == -1)
702 pathmax = UV__PATH_MAX;
703
704 return pathmax;
705 }
706
uv__fs_readlink(uv_fs_t * req)707 static ssize_t uv__fs_readlink(uv_fs_t* req) {
708 ssize_t maxlen;
709 ssize_t len;
710 char* buf;
711
712 #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
713 maxlen = uv__fs_pathmax_size(req->path);
714 #else
715 /* We may not have a real PATH_MAX. Read size of link. */
716 struct stat st;
717 int ret;
718 ret = lstat(req->path, &st);
719 if (ret != 0)
720 return -1;
721 if (!S_ISLNK(st.st_mode)) {
722 errno = EINVAL;
723 return -1;
724 }
725
726 maxlen = st.st_size;
727
728 /* According to readlink(2) lstat can report st_size == 0
729 for some symlinks, such as those in /proc or /sys. */
730 if (maxlen == 0)
731 maxlen = uv__fs_pathmax_size(req->path);
732 #endif
733
734 buf = uv__malloc(maxlen);
735
736 if (buf == NULL) {
737 errno = ENOMEM;
738 return -1;
739 }
740
741 #if defined(__MVS__)
742 len = os390_readlink(req->path, buf, maxlen);
743 #else
744 len = readlink(req->path, buf, maxlen);
745 #endif
746
747 if (len == -1) {
748 uv__free(buf);
749 return -1;
750 }
751
752 /* Uncommon case: resize to make room for the trailing nul byte. */
753 if (len == maxlen) {
754 buf = uv__reallocf(buf, len + 1);
755
756 if (buf == NULL)
757 return -1;
758 }
759
760 buf[len] = '\0';
761 req->ptr = buf;
762
763 return 0;
764 }
765
uv__fs_realpath(uv_fs_t * req)766 static ssize_t uv__fs_realpath(uv_fs_t* req) {
767 char* buf;
768
769 #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
770 buf = realpath(req->path, NULL);
771 if (buf == NULL)
772 return -1;
773 #else
774 ssize_t len;
775
776 len = uv__fs_pathmax_size(req->path);
777 buf = uv__malloc(len + 1);
778
779 if (buf == NULL) {
780 errno = ENOMEM;
781 return -1;
782 }
783
784 if (realpath(req->path, buf) == NULL) {
785 uv__free(buf);
786 return -1;
787 }
788 #endif
789
790 req->ptr = buf;
791
792 return 0;
793 }
794
uv__fs_sendfile_emul(uv_fs_t * req)795 static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
796 struct pollfd pfd;
797 int use_pread;
798 off_t offset;
799 ssize_t nsent;
800 ssize_t nread;
801 ssize_t nwritten;
802 size_t buflen;
803 size_t len;
804 ssize_t n;
805 int in_fd;
806 int out_fd;
807 char buf[8192];
808
809 len = req->bufsml[0].len;
810 in_fd = req->flags;
811 out_fd = req->file;
812 offset = req->off;
813 use_pread = 1;
814
815 /* Here are the rules regarding errors:
816 *
817 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
818 * The user needs to know that some data has already been sent, to stop
819 * them from sending it twice.
820 *
821 * 2. Write errors are always reported. Write errors are bad because they
822 * mean data loss: we've read data but now we can't write it out.
823 *
824 * We try to use pread() and fall back to regular read() if the source fd
825 * doesn't support positional reads, for example when it's a pipe fd.
826 *
827 * If we get EAGAIN when writing to the target fd, we poll() on it until
828 * it becomes writable again.
829 *
830 * FIXME: If we get a write error when use_pread==1, it should be safe to
831 * return the number of sent bytes instead of an error because pread()
832 * is, in theory, idempotent. However, special files in /dev or /proc
833 * may support pread() but not necessarily return the same data on
834 * successive reads.
835 *
836 * FIXME: There is no way now to signal that we managed to send *some* data
837 * before a write error.
838 */
839 for (nsent = 0; (size_t) nsent < len; ) {
840 buflen = len - nsent;
841
842 if (buflen > sizeof(buf))
843 buflen = sizeof(buf);
844
845 do
846 if (use_pread)
847 nread = pread(in_fd, buf, buflen, offset);
848 else
849 nread = read(in_fd, buf, buflen);
850 while (nread == -1 && errno == EINTR);
851
852 if (nread == 0)
853 goto out;
854
855 if (nread == -1) {
856 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
857 use_pread = 0;
858 continue;
859 }
860
861 if (nsent == 0)
862 nsent = -1;
863
864 goto out;
865 }
866
867 for (nwritten = 0; nwritten < nread; ) {
868 do
869 n = write(out_fd, buf + nwritten, nread - nwritten);
870 while (n == -1 && errno == EINTR);
871
872 if (n != -1) {
873 nwritten += n;
874 continue;
875 }
876
877 if (errno != EAGAIN && errno != EWOULDBLOCK) {
878 nsent = -1;
879 goto out;
880 }
881
882 pfd.fd = out_fd;
883 pfd.events = POLLOUT;
884 pfd.revents = 0;
885
886 do
887 n = poll(&pfd, 1, -1);
888 while (n == -1 && errno == EINTR);
889
890 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
891 errno = EIO;
892 nsent = -1;
893 goto out;
894 }
895 }
896
897 offset += nread;
898 nsent += nread;
899 }
900
901 out:
902 if (nsent != -1)
903 req->off = offset;
904
905 return nsent;
906 }
907
908
909 #ifdef __linux__
uv__kernel_version(void)910 static unsigned uv__kernel_version(void) {
911 static unsigned cached_version;
912 struct utsname u;
913 unsigned version;
914 unsigned major;
915 unsigned minor;
916 unsigned patch;
917
918 version = uv__load_relaxed(&cached_version);
919 if (version != 0)
920 return version;
921
922 if (-1 == uname(&u))
923 return 0;
924
925 if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
926 return 0;
927
928 version = major * 65536 + minor * 256 + patch;
929 uv__store_relaxed(&cached_version, version);
930
931 return version;
932 }
933
934
935 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
936 * in copy_file_range() when it shouldn't. There is no workaround except to
937 * fall back to a regular copy.
938 */
uv__is_buggy_cephfs(int fd)939 static int uv__is_buggy_cephfs(int fd) {
940 struct statfs s;
941
942 if (-1 == fstatfs(fd, &s))
943 return 0;
944
945 if (s.f_type != /* CephFS */ 0xC36400)
946 return 0;
947
948 return uv__kernel_version() < /* 4.20.0 */ 0x041400;
949 }
950
951
uv__is_cifs_or_smb(int fd)952 static int uv__is_cifs_or_smb(int fd) {
953 struct statfs s;
954
955 if (-1 == fstatfs(fd, &s))
956 return 0;
957
958 switch ((unsigned) s.f_type) {
959 case 0x0000517Bu: /* SMB */
960 case 0xFE534D42u: /* SMB2 */
961 case 0xFF534D42u: /* CIFS */
962 return 1;
963 }
964
965 return 0;
966 }
967
968
uv__fs_try_copy_file_range(int in_fd,off_t * off,int out_fd,size_t len)969 static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
970 int out_fd, size_t len) {
971 static int no_copy_file_range_support;
972 ssize_t r;
973
974 if (uv__load_relaxed(&no_copy_file_range_support)) {
975 errno = ENOSYS;
976 return -1;
977 }
978
979 r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
980
981 if (r != -1)
982 return r;
983
984 switch (errno) {
985 case EACCES:
986 /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
987 * copy-from command when it shouldn't.
988 */
989 if (uv__is_buggy_cephfs(in_fd))
990 errno = ENOSYS; /* Use fallback. */
991 break;
992 case ENOSYS:
993 uv__store_relaxed(&no_copy_file_range_support, 1);
994 break;
995 case EPERM:
996 /* It's been reported that CIFS spuriously fails.
997 * Consider it a transient error.
998 */
999 if (uv__is_cifs_or_smb(out_fd))
1000 errno = ENOSYS; /* Use fallback. */
1001 break;
1002 case ENOTSUP:
1003 case EXDEV:
1004 /* ENOTSUP - it could work on another file system type.
1005 * EXDEV - it will not work when in_fd and out_fd are not on the same
1006 * mounted filesystem (pre Linux 5.3)
1007 */
1008 errno = ENOSYS; /* Use fallback. */
1009 break;
1010 }
1011
1012 return -1;
1013 }
1014
1015 #endif /* __linux__ */
1016
1017
uv__fs_sendfile(uv_fs_t * req)1018 static ssize_t uv__fs_sendfile(uv_fs_t* req) {
1019 int in_fd;
1020 int out_fd;
1021
1022 in_fd = req->flags;
1023 out_fd = req->file;
1024
1025 #if defined(__linux__) || defined(__sun)
1026 {
1027 off_t off;
1028 ssize_t r;
1029 size_t len;
1030 int try_sendfile;
1031
1032 off = req->off;
1033 len = req->bufsml[0].len;
1034 try_sendfile = 1;
1035
1036 #ifdef __linux__
1037 r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
1038 try_sendfile = (r == -1 && errno == ENOSYS);
1039 #endif
1040
1041 if (try_sendfile)
1042 r = sendfile(out_fd, in_fd, &off, len);
1043
1044 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
1045 * it still writes out data. Fortunately, we can detect it by checking if
1046 * the offset has been updated.
1047 */
1048 if (r != -1 || off > req->off) {
1049 r = off - req->off;
1050 req->off = off;
1051 return r;
1052 }
1053
1054 if (errno == EINVAL ||
1055 errno == EIO ||
1056 errno == ENOTSOCK ||
1057 errno == EXDEV) {
1058 errno = 0;
1059 return uv__fs_sendfile_emul(req);
1060 }
1061
1062 return -1;
1063 }
1064 #elif defined(__APPLE__) || \
1065 defined(__DragonFly__) || \
1066 defined(__FreeBSD__) || \
1067 defined(__FreeBSD_kernel__)
1068 {
1069 off_t len;
1070 ssize_t r;
1071
1072 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
1073 * non-blocking mode and not all data could be written. If a non-zero
1074 * number of bytes have been sent, we don't consider it an error.
1075 */
1076
1077 #if defined(__FreeBSD__) || defined(__DragonFly__)
1078 #if defined(__FreeBSD__)
1079 off_t off;
1080
1081 off = req->off;
1082 r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
1083 if (r >= 0) {
1084 r = off - req->off;
1085 req->off = off;
1086 return r;
1087 }
1088 #endif
1089 len = 0;
1090 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
1091 #elif defined(__FreeBSD_kernel__)
1092 len = 0;
1093 r = bsd_sendfile(in_fd,
1094 out_fd,
1095 req->off,
1096 req->bufsml[0].len,
1097 NULL,
1098 &len,
1099 0);
1100 #else
1101 /* The darwin sendfile takes len as an input for the length to send,
1102 * so make sure to initialize it with the caller's value. */
1103 len = req->bufsml[0].len;
1104 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
1105 #endif
1106
1107 /*
1108 * The man page for sendfile(2) on DragonFly states that `len` contains
1109 * a meaningful value ONLY in case of EAGAIN and EINTR.
1110 * Nothing is said about it's value in case of other errors, so better
1111 * not depend on the potential wrong assumption that is was not modified
1112 * by the syscall.
1113 */
1114 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
1115 req->off += len;
1116 return (ssize_t) len;
1117 }
1118
1119 if (errno == EINVAL ||
1120 errno == EIO ||
1121 errno == ENOTSOCK ||
1122 errno == EXDEV) {
1123 errno = 0;
1124 return uv__fs_sendfile_emul(req);
1125 }
1126
1127 return -1;
1128 }
1129 #else
1130 /* Squelch compiler warnings. */
1131 (void) &in_fd;
1132 (void) &out_fd;
1133
1134 return uv__fs_sendfile_emul(req);
1135 #endif
1136 }
1137
1138
uv__fs_utime(uv_fs_t * req)1139 static ssize_t uv__fs_utime(uv_fs_t* req) {
1140 #if defined(__linux__) \
1141 || defined(_AIX71) \
1142 || defined(__sun) \
1143 || defined(__HAIKU__)
1144 struct timespec ts[2];
1145 ts[0] = uv__fs_to_timespec(req->atime);
1146 ts[1] = uv__fs_to_timespec(req->mtime);
1147 return utimensat(AT_FDCWD, req->path, ts, 0);
1148 #elif defined(__APPLE__) \
1149 || defined(__DragonFly__) \
1150 || defined(__FreeBSD__) \
1151 || defined(__FreeBSD_kernel__) \
1152 || defined(__NetBSD__) \
1153 || defined(__OpenBSD__)
1154 struct timeval tv[2];
1155 tv[0] = uv__fs_to_timeval(req->atime);
1156 tv[1] = uv__fs_to_timeval(req->mtime);
1157 return utimes(req->path, tv);
1158 #elif defined(_AIX) \
1159 && !defined(_AIX71)
1160 struct utimbuf buf;
1161 buf.actime = req->atime;
1162 buf.modtime = req->mtime;
1163 return utime(req->path, &buf);
1164 #elif defined(__MVS__)
1165 attrib_t atr;
1166 memset(&atr, 0, sizeof(atr));
1167 atr.att_mtimechg = 1;
1168 atr.att_atimechg = 1;
1169 atr.att_mtime = req->mtime;
1170 atr.att_atime = req->atime;
1171 return __lchattr((char*) req->path, &atr, sizeof(atr));
1172 #else
1173 errno = ENOSYS;
1174 return -1;
1175 #endif
1176 }
1177
1178
uv__fs_lutime(uv_fs_t * req)1179 static ssize_t uv__fs_lutime(uv_fs_t* req) {
1180 #if defined(__linux__) || \
1181 defined(_AIX71) || \
1182 defined(__sun) || \
1183 defined(__HAIKU__) || \
1184 defined(__GNU__)
1185 struct timespec ts[2];
1186 ts[0] = uv__fs_to_timespec(req->atime);
1187 ts[1] = uv__fs_to_timespec(req->mtime);
1188 return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
1189 #elif defined(__APPLE__) || \
1190 defined(__DragonFly__) || \
1191 defined(__FreeBSD__) || \
1192 defined(__FreeBSD_kernel__) || \
1193 defined(__NetBSD__)
1194 struct timeval tv[2];
1195 tv[0] = uv__fs_to_timeval(req->atime);
1196 tv[1] = uv__fs_to_timeval(req->mtime);
1197 return lutimes(req->path, tv);
1198 #else
1199 errno = ENOSYS;
1200 return -1;
1201 #endif
1202 }
1203
1204
uv__fs_write(uv_fs_t * req)1205 static ssize_t uv__fs_write(uv_fs_t* req) {
1206 #if defined(__linux__)
1207 static int no_pwritev;
1208 #endif
1209 ssize_t r;
1210
1211 /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
1212 * data loss. We can't use a per-file descriptor lock, the descriptor may be
1213 * a dup().
1214 */
1215 #if defined(__APPLE__)
1216 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
1217
1218 if (pthread_mutex_lock(&lock))
1219 abort();
1220 #endif
1221
1222 if (req->off < 0) {
1223 if (req->nbufs == 1)
1224 r = write(req->file, req->bufs[0].base, req->bufs[0].len);
1225 else
1226 r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
1227 } else {
1228 if (req->nbufs == 1) {
1229 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1230 goto done;
1231 }
1232 #if HAVE_PREADV
1233 r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
1234 #else
1235 # if defined(__linux__)
1236 if (no_pwritev) retry:
1237 # endif
1238 {
1239 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
1240 }
1241 # if defined(__linux__)
1242 else {
1243 r = uv__pwritev(req->file,
1244 (struct iovec*) req->bufs,
1245 req->nbufs,
1246 req->off);
1247 if (r == -1 && errno == ENOSYS) {
1248 no_pwritev = 1;
1249 goto retry;
1250 }
1251 }
1252 # endif
1253 #endif
1254 }
1255
1256 done:
1257 #if defined(__APPLE__)
1258 if (pthread_mutex_unlock(&lock))
1259 abort();
1260 #endif
1261
1262 return r;
1263 }
1264
uv__fs_copyfile(uv_fs_t * req)1265 static ssize_t uv__fs_copyfile(uv_fs_t* req) {
1266 uv_fs_t fs_req;
1267 uv_file srcfd;
1268 uv_file dstfd;
1269 struct stat src_statsbuf;
1270 struct stat dst_statsbuf;
1271 int dst_flags;
1272 int result;
1273 int err;
1274 off_t bytes_to_send;
1275 off_t in_offset;
1276 off_t bytes_written;
1277 size_t bytes_chunk;
1278
1279 dstfd = -1;
1280 err = 0;
1281
1282 /* Open the source file. */
1283 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1284 uv_fs_req_cleanup(&fs_req);
1285
1286 if (srcfd < 0)
1287 return srcfd;
1288
1289 /* Get the source file's mode. */
1290 if (fstat(srcfd, &src_statsbuf)) {
1291 err = UV__ERR(errno);
1292 goto out;
1293 }
1294
1295 dst_flags = O_WRONLY | O_CREAT;
1296
1297 if (req->flags & UV_FS_COPYFILE_EXCL)
1298 dst_flags |= O_EXCL;
1299
1300 /* Open the destination file. */
1301 dstfd = uv_fs_open(NULL,
1302 &fs_req,
1303 req->new_path,
1304 dst_flags,
1305 src_statsbuf.st_mode,
1306 NULL);
1307 uv_fs_req_cleanup(&fs_req);
1308
1309 if (dstfd < 0) {
1310 err = dstfd;
1311 goto out;
1312 }
1313
1314 /* If the file is not being opened exclusively, verify that the source and
1315 destination are not the same file. If they are the same, bail out early. */
1316 if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
1317 /* Get the destination file's mode. */
1318 if (fstat(dstfd, &dst_statsbuf)) {
1319 err = UV__ERR(errno);
1320 goto out;
1321 }
1322
1323 /* Check if srcfd and dstfd refer to the same file */
1324 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1325 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1326 goto out;
1327 }
1328
1329 /* Truncate the file in case the destination already existed. */
1330 if (ftruncate(dstfd, 0) != 0) {
1331 err = UV__ERR(errno);
1332 goto out;
1333 }
1334 }
1335
1336 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1337 err = UV__ERR(errno);
1338 #ifdef __linux__
1339 /* fchmod() on CIFS shares always fails with EPERM unless the share is
1340 * mounted with "noperm". As fchmod() is a meaningless operation on such
1341 * shares anyway, detect that condition and squelch the error.
1342 */
1343 if (err != UV_EPERM)
1344 goto out;
1345
1346 if (!uv__is_cifs_or_smb(dstfd))
1347 goto out;
1348
1349 err = 0;
1350 #else /* !__linux__ */
1351 goto out;
1352 #endif /* !__linux__ */
1353 }
1354
1355 #ifdef FICLONE
1356 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1357 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1358 if (ioctl(dstfd, FICLONE, srcfd) == 0) {
1359 /* ioctl() with FICLONE succeeded. */
1360 goto out;
1361 }
1362 /* If an error occurred and force was set, return the error to the caller;
1363 * fall back to sendfile() when force was not set. */
1364 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1365 err = UV__ERR(errno);
1366 goto out;
1367 }
1368 }
1369 #else
1370 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1371 err = UV_ENOSYS;
1372 goto out;
1373 }
1374 #endif
1375
1376 bytes_to_send = src_statsbuf.st_size;
1377 in_offset = 0;
1378 while (bytes_to_send != 0) {
1379 bytes_chunk = SSIZE_MAX;
1380 if (bytes_to_send < (off_t) bytes_chunk)
1381 bytes_chunk = bytes_to_send;
1382 uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
1383 bytes_written = fs_req.result;
1384 uv_fs_req_cleanup(&fs_req);
1385
1386 if (bytes_written < 0) {
1387 err = bytes_written;
1388 break;
1389 }
1390
1391 bytes_to_send -= bytes_written;
1392 in_offset += bytes_written;
1393 }
1394
1395 out:
1396 if (err < 0)
1397 result = err;
1398 else
1399 result = 0;
1400
1401 /* Close the source file. */
1402 err = uv__close_nocheckstdio(srcfd);
1403
1404 /* Don't overwrite any existing errors. */
1405 if (err != 0 && result == 0)
1406 result = err;
1407
1408 /* Close the destination file if it is open. */
1409 if (dstfd >= 0) {
1410 err = uv__close_nocheckstdio(dstfd);
1411
1412 /* Don't overwrite any existing errors. */
1413 if (err != 0 && result == 0)
1414 result = err;
1415
1416 /* Remove the destination file if something went wrong. */
1417 if (result != 0) {
1418 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1419 /* Ignore the unlink return value, as an error already happened. */
1420 uv_fs_req_cleanup(&fs_req);
1421 }
1422 }
1423
1424 if (result == 0)
1425 return 0;
1426
1427 errno = UV__ERR(result);
1428 return -1;
1429 }
1430
uv__to_stat(struct stat * src,uv_stat_t * dst)1431 static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1432 dst->st_dev = src->st_dev;
1433 dst->st_mode = src->st_mode;
1434 dst->st_nlink = src->st_nlink;
1435 dst->st_uid = src->st_uid;
1436 dst->st_gid = src->st_gid;
1437 dst->st_rdev = src->st_rdev;
1438 dst->st_ino = src->st_ino;
1439 dst->st_size = src->st_size;
1440 dst->st_blksize = src->st_blksize;
1441 dst->st_blocks = src->st_blocks;
1442
1443 #if defined(__APPLE__)
1444 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1445 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1446 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1447 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1448 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1449 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1450 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1451 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1452 dst->st_flags = src->st_flags;
1453 dst->st_gen = src->st_gen;
1454 #elif defined(__ANDROID__)
1455 dst->st_atim.tv_sec = src->st_atime;
1456 dst->st_atim.tv_nsec = src->st_atimensec;
1457 dst->st_mtim.tv_sec = src->st_mtime;
1458 dst->st_mtim.tv_nsec = src->st_mtimensec;
1459 dst->st_ctim.tv_sec = src->st_ctime;
1460 dst->st_ctim.tv_nsec = src->st_ctimensec;
1461 dst->st_birthtim.tv_sec = src->st_ctime;
1462 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1463 dst->st_flags = 0;
1464 dst->st_gen = 0;
1465 #elif !defined(_AIX) && \
1466 !defined(__MVS__) && ( \
1467 defined(__DragonFly__) || \
1468 defined(__FreeBSD__) || \
1469 defined(__OpenBSD__) || \
1470 defined(__NetBSD__) || \
1471 defined(_GNU_SOURCE) || \
1472 defined(_BSD_SOURCE) || \
1473 defined(_SVID_SOURCE) || \
1474 defined(_XOPEN_SOURCE) || \
1475 defined(_DEFAULT_SOURCE))
1476 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1477 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1478 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1479 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1480 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1481 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1482 # if defined(__FreeBSD__) || \
1483 defined(__NetBSD__)
1484 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1485 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1486 dst->st_flags = src->st_flags;
1487 dst->st_gen = src->st_gen;
1488 # else
1489 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1490 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1491 dst->st_flags = 0;
1492 dst->st_gen = 0;
1493 # endif
1494 #else
1495 dst->st_atim.tv_sec = src->st_atime;
1496 dst->st_atim.tv_nsec = 0;
1497 dst->st_mtim.tv_sec = src->st_mtime;
1498 dst->st_mtim.tv_nsec = 0;
1499 dst->st_ctim.tv_sec = src->st_ctime;
1500 dst->st_ctim.tv_nsec = 0;
1501 dst->st_birthtim.tv_sec = src->st_ctime;
1502 dst->st_birthtim.tv_nsec = 0;
1503 dst->st_flags = 0;
1504 dst->st_gen = 0;
1505 #endif
1506 }
1507
1508
uv__fs_statx(int fd,const char * path,int is_fstat,int is_lstat,uv_stat_t * buf)1509 static int uv__fs_statx(int fd,
1510 const char* path,
1511 int is_fstat,
1512 int is_lstat,
1513 uv_stat_t* buf) {
1514 STATIC_ASSERT(UV_ENOSYS != -1);
1515 #ifdef __linux__
1516 static int no_statx;
1517 struct uv__statx statxbuf;
1518 int dirfd;
1519 int flags;
1520 int mode;
1521 int rc;
1522
1523 if (uv__load_relaxed(&no_statx))
1524 return UV_ENOSYS;
1525
1526 dirfd = AT_FDCWD;
1527 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1528 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1529
1530 if (is_fstat) {
1531 dirfd = fd;
1532 flags |= 0x1000; /* AT_EMPTY_PATH */
1533 }
1534
1535 if (is_lstat)
1536 flags |= AT_SYMLINK_NOFOLLOW;
1537
1538 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1539
1540 switch (rc) {
1541 case 0:
1542 break;
1543 case -1:
1544 /* EPERM happens when a seccomp filter rejects the system call.
1545 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1546 * EOPNOTSUPP is used on DVS exported filesystems
1547 */
1548 if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
1549 return -1;
1550 /* Fall through. */
1551 default:
1552 /* Normally on success, zero is returned and On error, -1 is returned.
1553 * Observed on S390 RHEL running in a docker container with statx not
1554 * implemented, rc might return 1 with 0 set as the error code in which
1555 * case we return ENOSYS.
1556 */
1557 uv__store_relaxed(&no_statx, 1);
1558 return UV_ENOSYS;
1559 }
1560
1561 buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
1562 buf->st_mode = statxbuf.stx_mode;
1563 buf->st_nlink = statxbuf.stx_nlink;
1564 buf->st_uid = statxbuf.stx_uid;
1565 buf->st_gid = statxbuf.stx_gid;
1566 buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
1567 buf->st_ino = statxbuf.stx_ino;
1568 buf->st_size = statxbuf.stx_size;
1569 buf->st_blksize = statxbuf.stx_blksize;
1570 buf->st_blocks = statxbuf.stx_blocks;
1571 buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
1572 buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
1573 buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
1574 buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
1575 buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
1576 buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
1577 buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
1578 buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
1579 buf->st_flags = 0;
1580 buf->st_gen = 0;
1581
1582 return 0;
1583 #else
1584 return UV_ENOSYS;
1585 #endif /* __linux__ */
1586 }
1587
1588
uv__fs_stat(const char * path,uv_stat_t * buf)1589 static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1590 struct stat pbuf;
1591 int ret;
1592
1593 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1594 if (ret != UV_ENOSYS)
1595 return ret;
1596
1597 ret = stat(path, &pbuf);
1598 if (ret == 0)
1599 uv__to_stat(&pbuf, buf);
1600
1601 return ret;
1602 }
1603
1604
uv__fs_lstat(const char * path,uv_stat_t * buf)1605 static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1606 struct stat pbuf;
1607 int ret;
1608
1609 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1610 if (ret != UV_ENOSYS)
1611 return ret;
1612
1613 ret = lstat(path, &pbuf);
1614 if (ret == 0)
1615 uv__to_stat(&pbuf, buf);
1616
1617 return ret;
1618 }
1619
1620
uv__fs_fstat(int fd,uv_stat_t * buf)1621 static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1622 struct stat pbuf;
1623 int ret;
1624
1625 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1626 if (ret != UV_ENOSYS)
1627 return ret;
1628
1629 ret = fstat(fd, &pbuf);
1630 if (ret == 0)
1631 uv__to_stat(&pbuf, buf);
1632
1633 return ret;
1634 }
1635
uv__fs_buf_offset(uv_buf_t * bufs,size_t size)1636 static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1637 size_t offset;
1638 /* Figure out which bufs are done */
1639 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1640 size -= bufs[offset].len;
1641
1642 /* Fix a partial read/write */
1643 if (size > 0) {
1644 bufs[offset].base += size;
1645 bufs[offset].len -= size;
1646 }
1647 return offset;
1648 }
1649
uv__fs_write_all(uv_fs_t * req)1650 static ssize_t uv__fs_write_all(uv_fs_t* req) {
1651 unsigned int iovmax;
1652 unsigned int nbufs;
1653 uv_buf_t* bufs;
1654 ssize_t total;
1655 ssize_t result;
1656
1657 iovmax = uv__getiovmax();
1658 nbufs = req->nbufs;
1659 bufs = req->bufs;
1660 total = 0;
1661
1662 while (nbufs > 0) {
1663 req->nbufs = nbufs;
1664 if (req->nbufs > iovmax)
1665 req->nbufs = iovmax;
1666
1667 do
1668 result = uv__fs_write(req);
1669 while (result < 0 && errno == EINTR);
1670
1671 if (result <= 0) {
1672 if (total == 0)
1673 total = result;
1674 break;
1675 }
1676
1677 if (req->off >= 0)
1678 req->off += result;
1679
1680 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1681 req->bufs += req->nbufs;
1682 nbufs -= req->nbufs;
1683 total += result;
1684 }
1685
1686 if (bufs != req->bufsml)
1687 uv__free(bufs);
1688
1689 req->bufs = NULL;
1690 req->nbufs = 0;
1691
1692 return total;
1693 }
1694
1695
uv__fs_work(struct uv__work * w)1696 static void uv__fs_work(struct uv__work* w) {
1697 int retry_on_eintr;
1698 uv_fs_t* req;
1699 ssize_t r;
1700
1701 req = container_of(w, uv_fs_t, work_req);
1702 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1703 req->fs_type == UV_FS_READ);
1704
1705 do {
1706 errno = 0;
1707
1708 #define X(type, action) \
1709 case UV_FS_ ## type: \
1710 r = action; \
1711 break;
1712
1713 switch (req->fs_type) {
1714 X(ACCESS, access(req->path, req->flags));
1715 X(CHMOD, chmod(req->path, req->mode));
1716 X(CHOWN, chown(req->path, req->uid, req->gid));
1717 X(CLOSE, uv__fs_close(req->file));
1718 X(COPYFILE, uv__fs_copyfile(req));
1719 X(FCHMOD, fchmod(req->file, req->mode));
1720 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1721 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1722 X(FDATASYNC, uv__fs_fdatasync(req));
1723 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1724 X(FSYNC, uv__fs_fsync(req));
1725 X(FTRUNCATE, ftruncate(req->file, req->off));
1726 X(FUTIME, uv__fs_futime(req));
1727 X(LUTIME, uv__fs_lutime(req));
1728 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1729 X(LINK, link(req->path, req->new_path));
1730 X(MKDIR, mkdir(req->path, req->mode));
1731 X(MKDTEMP, uv__fs_mkdtemp(req));
1732 X(MKSTEMP, uv__fs_mkstemp(req));
1733 X(OPEN, uv__fs_open(req));
1734 X(READ, uv__fs_read(req));
1735 X(SCANDIR, uv__fs_scandir(req));
1736 X(OPENDIR, uv__fs_opendir(req));
1737 X(READDIR, uv__fs_readdir(req));
1738 X(CLOSEDIR, uv__fs_closedir(req));
1739 X(READLINK, uv__fs_readlink(req));
1740 X(REALPATH, uv__fs_realpath(req));
1741 X(RENAME, rename(req->path, req->new_path));
1742 X(RMDIR, rmdir(req->path));
1743 X(SENDFILE, uv__fs_sendfile(req));
1744 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1745 X(STATFS, uv__fs_statfs(req));
1746 X(SYMLINK, symlink(req->path, req->new_path));
1747 X(UNLINK, unlink(req->path));
1748 X(UTIME, uv__fs_utime(req));
1749 X(WRITE, uv__fs_write_all(req));
1750 default: abort();
1751 }
1752 #undef X
1753 } while (r == -1 && errno == EINTR && retry_on_eintr);
1754
1755 if (r == -1)
1756 req->result = UV__ERR(errno);
1757 else
1758 req->result = r;
1759
1760 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1761 req->fs_type == UV_FS_FSTAT ||
1762 req->fs_type == UV_FS_LSTAT)) {
1763 req->ptr = &req->statbuf;
1764 }
1765 }
1766
1767
uv__fs_done(struct uv__work * w,int status)1768 static void uv__fs_done(struct uv__work* w, int status) {
1769 uv_fs_t* req;
1770
1771 req = container_of(w, uv_fs_t, work_req);
1772 uv__req_unregister(req->loop, req);
1773
1774 if (status == UV_ECANCELED) {
1775 assert(req->result == 0);
1776 req->result = UV_ECANCELED;
1777 }
1778
1779 req->cb(req);
1780 }
1781
1782
uv_fs_access(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)1783 int uv_fs_access(uv_loop_t* loop,
1784 uv_fs_t* req,
1785 const char* path,
1786 int flags,
1787 uv_fs_cb cb) {
1788 INIT(ACCESS);
1789 PATH;
1790 req->flags = flags;
1791 POST;
1792 }
1793
1794
uv_fs_chmod(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1795 int uv_fs_chmod(uv_loop_t* loop,
1796 uv_fs_t* req,
1797 const char* path,
1798 int mode,
1799 uv_fs_cb cb) {
1800 INIT(CHMOD);
1801 PATH;
1802 req->mode = mode;
1803 POST;
1804 }
1805
1806
uv_fs_chown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1807 int uv_fs_chown(uv_loop_t* loop,
1808 uv_fs_t* req,
1809 const char* path,
1810 uv_uid_t uid,
1811 uv_gid_t gid,
1812 uv_fs_cb cb) {
1813 INIT(CHOWN);
1814 PATH;
1815 req->uid = uid;
1816 req->gid = gid;
1817 POST;
1818 }
1819
1820
uv_fs_close(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1821 int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1822 INIT(CLOSE);
1823 req->file = file;
1824 POST;
1825 }
1826
1827
uv_fs_fchmod(uv_loop_t * loop,uv_fs_t * req,uv_file file,int mode,uv_fs_cb cb)1828 int uv_fs_fchmod(uv_loop_t* loop,
1829 uv_fs_t* req,
1830 uv_file file,
1831 int mode,
1832 uv_fs_cb cb) {
1833 INIT(FCHMOD);
1834 req->file = file;
1835 req->mode = mode;
1836 POST;
1837 }
1838
1839
uv_fs_fchown(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1840 int uv_fs_fchown(uv_loop_t* loop,
1841 uv_fs_t* req,
1842 uv_file file,
1843 uv_uid_t uid,
1844 uv_gid_t gid,
1845 uv_fs_cb cb) {
1846 INIT(FCHOWN);
1847 req->file = file;
1848 req->uid = uid;
1849 req->gid = gid;
1850 POST;
1851 }
1852
1853
uv_fs_lchown(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_uid_t uid,uv_gid_t gid,uv_fs_cb cb)1854 int uv_fs_lchown(uv_loop_t* loop,
1855 uv_fs_t* req,
1856 const char* path,
1857 uv_uid_t uid,
1858 uv_gid_t gid,
1859 uv_fs_cb cb) {
1860 INIT(LCHOWN);
1861 PATH;
1862 req->uid = uid;
1863 req->gid = gid;
1864 POST;
1865 }
1866
1867
uv_fs_fdatasync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1868 int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1869 INIT(FDATASYNC);
1870 req->file = file;
1871 POST;
1872 }
1873
1874
uv_fs_fstat(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1875 int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1876 INIT(FSTAT);
1877 req->file = file;
1878 POST;
1879 }
1880
1881
uv_fs_fsync(uv_loop_t * loop,uv_fs_t * req,uv_file file,uv_fs_cb cb)1882 int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1883 INIT(FSYNC);
1884 req->file = file;
1885 POST;
1886 }
1887
1888
uv_fs_ftruncate(uv_loop_t * loop,uv_fs_t * req,uv_file file,int64_t off,uv_fs_cb cb)1889 int uv_fs_ftruncate(uv_loop_t* loop,
1890 uv_fs_t* req,
1891 uv_file file,
1892 int64_t off,
1893 uv_fs_cb cb) {
1894 INIT(FTRUNCATE);
1895 req->file = file;
1896 req->off = off;
1897 POST;
1898 }
1899
1900
uv_fs_futime(uv_loop_t * loop,uv_fs_t * req,uv_file file,double atime,double mtime,uv_fs_cb cb)1901 int uv_fs_futime(uv_loop_t* loop,
1902 uv_fs_t* req,
1903 uv_file file,
1904 double atime,
1905 double mtime,
1906 uv_fs_cb cb) {
1907 INIT(FUTIME);
1908 req->file = file;
1909 req->atime = atime;
1910 req->mtime = mtime;
1911 POST;
1912 }
1913
uv_fs_lutime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)1914 int uv_fs_lutime(uv_loop_t* loop,
1915 uv_fs_t* req,
1916 const char* path,
1917 double atime,
1918 double mtime,
1919 uv_fs_cb cb) {
1920 INIT(LUTIME);
1921 PATH;
1922 req->atime = atime;
1923 req->mtime = mtime;
1924 POST;
1925 }
1926
1927
uv_fs_lstat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)1928 int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1929 INIT(LSTAT);
1930 PATH;
1931 POST;
1932 }
1933
1934
uv_fs_link(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)1935 int uv_fs_link(uv_loop_t* loop,
1936 uv_fs_t* req,
1937 const char* path,
1938 const char* new_path,
1939 uv_fs_cb cb) {
1940 INIT(LINK);
1941 PATH2;
1942 POST;
1943 }
1944
1945
uv_fs_mkdir(uv_loop_t * loop,uv_fs_t * req,const char * path,int mode,uv_fs_cb cb)1946 int uv_fs_mkdir(uv_loop_t* loop,
1947 uv_fs_t* req,
1948 const char* path,
1949 int mode,
1950 uv_fs_cb cb) {
1951 INIT(MKDIR);
1952 PATH;
1953 req->mode = mode;
1954 POST;
1955 }
1956
1957
uv_fs_mkdtemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1958 int uv_fs_mkdtemp(uv_loop_t* loop,
1959 uv_fs_t* req,
1960 const char* tpl,
1961 uv_fs_cb cb) {
1962 INIT(MKDTEMP);
1963 req->path = uv__strdup(tpl);
1964 if (req->path == NULL)
1965 return UV_ENOMEM;
1966 POST;
1967 }
1968
1969
uv_fs_mkstemp(uv_loop_t * loop,uv_fs_t * req,const char * tpl,uv_fs_cb cb)1970 int uv_fs_mkstemp(uv_loop_t* loop,
1971 uv_fs_t* req,
1972 const char* tpl,
1973 uv_fs_cb cb) {
1974 INIT(MKSTEMP);
1975 req->path = uv__strdup(tpl);
1976 if (req->path == NULL)
1977 return UV_ENOMEM;
1978 POST;
1979 }
1980
1981
uv_fs_open(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,int mode,uv_fs_cb cb)1982 int uv_fs_open(uv_loop_t* loop,
1983 uv_fs_t* req,
1984 const char* path,
1985 int flags,
1986 int mode,
1987 uv_fs_cb cb) {
1988 INIT(OPEN);
1989 PATH;
1990 req->flags = flags;
1991 req->mode = mode;
1992 POST;
1993 }
1994
1995
uv_fs_read(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)1996 int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
1997 uv_file file,
1998 const uv_buf_t bufs[],
1999 unsigned int nbufs,
2000 int64_t off,
2001 uv_fs_cb cb) {
2002 INIT(READ);
2003
2004 if (bufs == NULL || nbufs == 0)
2005 return UV_EINVAL;
2006
2007 req->file = file;
2008
2009 req->nbufs = nbufs;
2010 req->bufs = req->bufsml;
2011 if (nbufs > ARRAY_SIZE(req->bufsml))
2012 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2013
2014 if (req->bufs == NULL)
2015 return UV_ENOMEM;
2016
2017 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2018
2019 req->off = off;
2020 POST;
2021 }
2022
2023
uv_fs_scandir(uv_loop_t * loop,uv_fs_t * req,const char * path,int flags,uv_fs_cb cb)2024 int uv_fs_scandir(uv_loop_t* loop,
2025 uv_fs_t* req,
2026 const char* path,
2027 int flags,
2028 uv_fs_cb cb) {
2029 INIT(SCANDIR);
2030 PATH;
2031 req->flags = flags;
2032 POST;
2033 }
2034
uv_fs_opendir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2035 int uv_fs_opendir(uv_loop_t* loop,
2036 uv_fs_t* req,
2037 const char* path,
2038 uv_fs_cb cb) {
2039 INIT(OPENDIR);
2040 PATH;
2041 POST;
2042 }
2043
uv_fs_readdir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2044 int uv_fs_readdir(uv_loop_t* loop,
2045 uv_fs_t* req,
2046 uv_dir_t* dir,
2047 uv_fs_cb cb) {
2048 INIT(READDIR);
2049
2050 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
2051 return UV_EINVAL;
2052
2053 req->ptr = dir;
2054 POST;
2055 }
2056
uv_fs_closedir(uv_loop_t * loop,uv_fs_t * req,uv_dir_t * dir,uv_fs_cb cb)2057 int uv_fs_closedir(uv_loop_t* loop,
2058 uv_fs_t* req,
2059 uv_dir_t* dir,
2060 uv_fs_cb cb) {
2061 INIT(CLOSEDIR);
2062
2063 if (dir == NULL)
2064 return UV_EINVAL;
2065
2066 req->ptr = dir;
2067 POST;
2068 }
2069
uv_fs_readlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2070 int uv_fs_readlink(uv_loop_t* loop,
2071 uv_fs_t* req,
2072 const char* path,
2073 uv_fs_cb cb) {
2074 INIT(READLINK);
2075 PATH;
2076 POST;
2077 }
2078
2079
uv_fs_realpath(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2080 int uv_fs_realpath(uv_loop_t* loop,
2081 uv_fs_t* req,
2082 const char * path,
2083 uv_fs_cb cb) {
2084 INIT(REALPATH);
2085 PATH;
2086 POST;
2087 }
2088
2089
uv_fs_rename(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,uv_fs_cb cb)2090 int uv_fs_rename(uv_loop_t* loop,
2091 uv_fs_t* req,
2092 const char* path,
2093 const char* new_path,
2094 uv_fs_cb cb) {
2095 INIT(RENAME);
2096 PATH2;
2097 POST;
2098 }
2099
2100
uv_fs_rmdir(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2101 int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2102 INIT(RMDIR);
2103 PATH;
2104 POST;
2105 }
2106
2107
uv_fs_sendfile(uv_loop_t * loop,uv_fs_t * req,uv_file out_fd,uv_file in_fd,int64_t off,size_t len,uv_fs_cb cb)2108 int uv_fs_sendfile(uv_loop_t* loop,
2109 uv_fs_t* req,
2110 uv_file out_fd,
2111 uv_file in_fd,
2112 int64_t off,
2113 size_t len,
2114 uv_fs_cb cb) {
2115 INIT(SENDFILE);
2116 req->flags = in_fd; /* hack */
2117 req->file = out_fd;
2118 req->off = off;
2119 req->bufsml[0].len = len;
2120 POST;
2121 }
2122
2123
uv_fs_stat(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2124 int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2125 INIT(STAT);
2126 PATH;
2127 POST;
2128 }
2129
2130
uv_fs_symlink(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2131 int uv_fs_symlink(uv_loop_t* loop,
2132 uv_fs_t* req,
2133 const char* path,
2134 const char* new_path,
2135 int flags,
2136 uv_fs_cb cb) {
2137 INIT(SYMLINK);
2138 PATH2;
2139 req->flags = flags;
2140 POST;
2141 }
2142
2143
uv_fs_unlink(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2144 int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
2145 INIT(UNLINK);
2146 PATH;
2147 POST;
2148 }
2149
2150
uv_fs_utime(uv_loop_t * loop,uv_fs_t * req,const char * path,double atime,double mtime,uv_fs_cb cb)2151 int uv_fs_utime(uv_loop_t* loop,
2152 uv_fs_t* req,
2153 const char* path,
2154 double atime,
2155 double mtime,
2156 uv_fs_cb cb) {
2157 INIT(UTIME);
2158 PATH;
2159 req->atime = atime;
2160 req->mtime = mtime;
2161 POST;
2162 }
2163
2164
uv_fs_write(uv_loop_t * loop,uv_fs_t * req,uv_file file,const uv_buf_t bufs[],unsigned int nbufs,int64_t off,uv_fs_cb cb)2165 int uv_fs_write(uv_loop_t* loop,
2166 uv_fs_t* req,
2167 uv_file file,
2168 const uv_buf_t bufs[],
2169 unsigned int nbufs,
2170 int64_t off,
2171 uv_fs_cb cb) {
2172 INIT(WRITE);
2173
2174 if (bufs == NULL || nbufs == 0)
2175 return UV_EINVAL;
2176
2177 req->file = file;
2178
2179 req->nbufs = nbufs;
2180 req->bufs = req->bufsml;
2181 if (nbufs > ARRAY_SIZE(req->bufsml))
2182 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
2183
2184 if (req->bufs == NULL)
2185 return UV_ENOMEM;
2186
2187 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
2188
2189 req->off = off;
2190 POST;
2191 }
2192
2193
uv_fs_req_cleanup(uv_fs_t * req)2194 void uv_fs_req_cleanup(uv_fs_t* req) {
2195 if (req == NULL)
2196 return;
2197
2198 /* Only necessary for asychronous requests, i.e., requests with a callback.
2199 * Synchronous ones don't copy their arguments and have req->path and
2200 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
2201 * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
2202 */
2203 if (req->path != NULL &&
2204 (req->cb != NULL ||
2205 req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
2206 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
2207
2208 req->path = NULL;
2209 req->new_path = NULL;
2210
2211 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
2212 uv__fs_readdir_cleanup(req);
2213
2214 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
2215 uv__fs_scandir_cleanup(req);
2216
2217 if (req->bufs != req->bufsml)
2218 uv__free(req->bufs);
2219 req->bufs = NULL;
2220
2221 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
2222 uv__free(req->ptr);
2223 req->ptr = NULL;
2224 }
2225
2226
uv_fs_copyfile(uv_loop_t * loop,uv_fs_t * req,const char * path,const char * new_path,int flags,uv_fs_cb cb)2227 int uv_fs_copyfile(uv_loop_t* loop,
2228 uv_fs_t* req,
2229 const char* path,
2230 const char* new_path,
2231 int flags,
2232 uv_fs_cb cb) {
2233 INIT(COPYFILE);
2234
2235 if (flags & ~(UV_FS_COPYFILE_EXCL |
2236 UV_FS_COPYFILE_FICLONE |
2237 UV_FS_COPYFILE_FICLONE_FORCE)) {
2238 return UV_EINVAL;
2239 }
2240
2241 PATH2;
2242 req->flags = flags;
2243 POST;
2244 }
2245
2246
uv_fs_statfs(uv_loop_t * loop,uv_fs_t * req,const char * path,uv_fs_cb cb)2247 int uv_fs_statfs(uv_loop_t* loop,
2248 uv_fs_t* req,
2249 const char* path,
2250 uv_fs_cb cb) {
2251 INIT(STATFS);
2252 PATH;
2253 POST;
2254 }
2255
uv_fs_get_system_error(const uv_fs_t * req)2256 int uv_fs_get_system_error(const uv_fs_t* req) {
2257 return -req->result;
2258 }
2259