1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21 #include "uv.h"
22 #include "internal.h"
23
24 #include <assert.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <errno.h>
28
29 #include <sys/sysctl.h>
30 #include <sys/types.h>
31 #include <sys/event.h>
32 #include <sys/time.h>
33 #include <unistd.h>
34 #include <fcntl.h>
35 #include <time.h>
36
37 /*
38 * Required on
39 * - Until at least FreeBSD 11.0
40 * - Older versions of Mac OS X
41 *
42 * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
43 */
44 #ifndef EV_OOBAND
45 #define EV_OOBAND EV_FLAG1
46 #endif
47
48 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
49
50
uv__kqueue_init(uv_loop_t * loop)51 int uv__kqueue_init(uv_loop_t* loop) {
52 loop->backend_fd = kqueue();
53 if (loop->backend_fd == -1)
54 return UV__ERR(errno);
55
56 uv__cloexec(loop->backend_fd, 1);
57
58 return 0;
59 }
60
61
62 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
63 static int uv__has_forked_with_cfrunloop;
64 #endif
65
uv__io_fork(uv_loop_t * loop)66 int uv__io_fork(uv_loop_t* loop) {
67 int err;
68 loop->backend_fd = -1;
69 err = uv__kqueue_init(loop);
70 if (err)
71 return err;
72
73 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
74 if (loop->cf_state != NULL) {
75 /* We cannot start another CFRunloop and/or thread in the child
76 process; CF aborts if you try or if you try to touch the thread
77 at all to kill it. So the best we can do is ignore it from now
78 on. This means we can't watch directories in the same way
79 anymore (like other BSDs). It also means we cannot properly
80 clean up the allocated resources; calling
81 uv__fsevents_loop_delete from uv_loop_close will crash the
82 process. So we sidestep the issue by pretending like we never
83 started it in the first place.
84 */
85 uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
86 uv__free(loop->cf_state);
87 loop->cf_state = NULL;
88 }
89 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
90 return err;
91 }
92
93
uv__io_check_fd(uv_loop_t * loop,int fd)94 int uv__io_check_fd(uv_loop_t* loop, int fd) {
95 struct kevent ev;
96 int rc;
97
98 rc = 0;
99 EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
100 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
101 rc = UV__ERR(errno);
102
103 EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
104 if (rc == 0)
105 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
106 abort();
107
108 return rc;
109 }
110
111
uv__io_poll(uv_loop_t * loop,int timeout)112 void uv__io_poll(uv_loop_t* loop, int timeout) {
113 struct kevent events[1024];
114 struct kevent* ev;
115 struct timespec spec;
116 unsigned int nevents;
117 unsigned int revents;
118 QUEUE* q;
119 uv__io_t* w;
120 uv_process_t* process;
121 sigset_t* pset;
122 sigset_t set;
123 uint64_t base;
124 uint64_t diff;
125 int have_signals;
126 int filter;
127 int fflags;
128 int count;
129 int nfds;
130 int fd;
131 int op;
132 int i;
133 int user_timeout;
134 int reset_timeout;
135
136 if (loop->nfds == 0) {
137 assert(QUEUE_EMPTY(&loop->watcher_queue));
138 return;
139 }
140
141 nevents = 0;
142
143 while (!QUEUE_EMPTY(&loop->watcher_queue)) {
144 q = QUEUE_HEAD(&loop->watcher_queue);
145 QUEUE_REMOVE(q);
146 QUEUE_INIT(q);
147
148 w = QUEUE_DATA(q, uv__io_t, watcher_queue);
149 assert(w->pevents != 0);
150 assert(w->fd >= 0);
151 assert(w->fd < (int) loop->nwatchers);
152
153 if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
154 filter = EVFILT_READ;
155 fflags = 0;
156 op = EV_ADD;
157
158 if (w->cb == uv__fs_event) {
159 filter = EVFILT_VNODE;
160 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
161 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
162 op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
163 }
164
165 EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
166
167 if (++nevents == ARRAY_SIZE(events)) {
168 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
169 abort();
170 nevents = 0;
171 }
172 }
173
174 if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
175 EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
176
177 if (++nevents == ARRAY_SIZE(events)) {
178 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
179 abort();
180 nevents = 0;
181 }
182 }
183
184 if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
185 EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
186
187 if (++nevents == ARRAY_SIZE(events)) {
188 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
189 abort();
190 nevents = 0;
191 }
192 }
193
194 w->events = w->pevents;
195 }
196
197 pset = NULL;
198 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
199 pset = &set;
200 sigemptyset(pset);
201 sigaddset(pset, SIGPROF);
202 }
203
204 assert(timeout >= -1);
205 base = loop->time;
206 count = 48; /* Benchmarks suggest this gives the best throughput. */
207
208 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
209 reset_timeout = 1;
210 user_timeout = timeout;
211 timeout = 0;
212 } else {
213 reset_timeout = 0;
214 }
215
216 for (;; nevents = 0) {
217 /* Only need to set the provider_entry_time if timeout != 0. The function
218 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
219 */
220 if (timeout != 0)
221 uv__metrics_set_provider_entry_time(loop);
222
223 if (timeout != -1) {
224 spec.tv_sec = timeout / 1000;
225 spec.tv_nsec = (timeout % 1000) * 1000000;
226 }
227
228 if (pset != NULL)
229 pthread_sigmask(SIG_BLOCK, pset, NULL);
230
231 nfds = kevent(loop->backend_fd,
232 events,
233 nevents,
234 events,
235 ARRAY_SIZE(events),
236 timeout == -1 ? NULL : &spec);
237
238 if (pset != NULL)
239 pthread_sigmask(SIG_UNBLOCK, pset, NULL);
240
241 /* Update loop->time unconditionally. It's tempting to skip the update when
242 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
243 * operating system didn't reschedule our process while in the syscall.
244 */
245 SAVE_ERRNO(uv__update_time(loop));
246
247 if (nfds == 0) {
248 if (reset_timeout != 0) {
249 timeout = user_timeout;
250 reset_timeout = 0;
251 if (timeout == -1)
252 continue;
253 if (timeout > 0)
254 goto update_timeout;
255 }
256
257 assert(timeout != -1);
258 return;
259 }
260
261 if (nfds == -1) {
262 if (errno != EINTR)
263 abort();
264
265 if (reset_timeout != 0) {
266 timeout = user_timeout;
267 reset_timeout = 0;
268 }
269
270 if (timeout == 0)
271 return;
272
273 if (timeout == -1)
274 continue;
275
276 /* Interrupted by a signal. Update timeout and poll again. */
277 goto update_timeout;
278 }
279
280 have_signals = 0;
281 nevents = 0;
282
283 assert(loop->watchers != NULL);
284 loop->watchers[loop->nwatchers] = (void*) events;
285 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
286 for (i = 0; i < nfds; i++) {
287 ev = events + i;
288 fd = ev->ident;
289
290 /* Handle kevent NOTE_EXIT results */
291 if (ev->filter == EVFILT_PROC) {
292 QUEUE_FOREACH(q, &loop->process_handles) {
293 process = QUEUE_DATA(q, uv_process_t, queue);
294 if (process->pid == fd) {
295 process->flags |= UV_HANDLE_REAP;
296 loop->flags |= UV_LOOP_REAP_CHILDREN;
297 break;
298 }
299 }
300 nevents++;
301 continue;
302 }
303
304 /* Skip invalidated events, see uv__platform_invalidate_fd */
305 if (fd == -1)
306 continue;
307 w = loop->watchers[fd];
308
309 if (w == NULL) {
310 /* File descriptor that we've stopped watching, disarm it.
311 * TODO: batch up. */
312 struct kevent events[1];
313
314 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
315 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
316 if (errno != EBADF && errno != ENOENT)
317 abort();
318
319 continue;
320 }
321
322 if (ev->filter == EVFILT_VNODE) {
323 assert(w->events == POLLIN);
324 assert(w->pevents == POLLIN);
325 uv__metrics_update_idle_time(loop);
326 w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
327 nevents++;
328 continue;
329 }
330
331 revents = 0;
332
333 if (ev->filter == EVFILT_READ) {
334 if (w->pevents & POLLIN) {
335 revents |= POLLIN;
336 w->rcount = ev->data;
337 } else {
338 /* TODO batch up */
339 struct kevent events[1];
340 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
341 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
342 if (errno != ENOENT)
343 abort();
344 }
345 if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
346 revents |= UV__POLLRDHUP;
347 }
348
349 if (ev->filter == EV_OOBAND) {
350 if (w->pevents & UV__POLLPRI) {
351 revents |= UV__POLLPRI;
352 w->rcount = ev->data;
353 } else {
354 /* TODO batch up */
355 struct kevent events[1];
356 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
357 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
358 if (errno != ENOENT)
359 abort();
360 }
361 }
362
363 if (ev->filter == EVFILT_WRITE) {
364 if (w->pevents & POLLOUT) {
365 revents |= POLLOUT;
366 w->wcount = ev->data;
367 } else {
368 /* TODO batch up */
369 struct kevent events[1];
370 EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
371 if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
372 if (errno != ENOENT)
373 abort();
374 }
375 }
376
377 if (ev->flags & EV_ERROR)
378 revents |= POLLERR;
379
380 if (revents == 0)
381 continue;
382
383 /* Run signal watchers last. This also affects child process watchers
384 * because those are implemented in terms of signal watchers.
385 */
386 if (w == &loop->signal_io_watcher) {
387 have_signals = 1;
388 } else {
389 uv__metrics_update_idle_time(loop);
390 w->cb(loop, w, revents);
391 }
392
393 nevents++;
394 }
395
396 if (loop->flags & UV_LOOP_REAP_CHILDREN) {
397 loop->flags &= ~UV_LOOP_REAP_CHILDREN;
398 uv__wait_children(loop);
399 }
400
401 if (reset_timeout != 0) {
402 timeout = user_timeout;
403 reset_timeout = 0;
404 }
405
406 if (have_signals != 0) {
407 uv__metrics_update_idle_time(loop);
408 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
409 }
410
411 loop->watchers[loop->nwatchers] = NULL;
412 loop->watchers[loop->nwatchers + 1] = NULL;
413
414 if (have_signals != 0)
415 return; /* Event loop should cycle now so don't poll again. */
416
417 if (nevents != 0) {
418 if (nfds == ARRAY_SIZE(events) && --count != 0) {
419 /* Poll for more events but don't block this time. */
420 timeout = 0;
421 continue;
422 }
423 return;
424 }
425
426 if (timeout == 0)
427 return;
428
429 if (timeout == -1)
430 continue;
431
432 update_timeout:
433 assert(timeout > 0);
434
435 diff = loop->time - base;
436 if (diff >= (uint64_t) timeout)
437 return;
438
439 timeout -= diff;
440 }
441 }
442
443
uv__platform_invalidate_fd(uv_loop_t * loop,int fd)444 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
445 struct kevent* events;
446 uintptr_t i;
447 uintptr_t nfds;
448
449 assert(loop->watchers != NULL);
450 assert(fd >= 0);
451
452 events = (struct kevent*) loop->watchers[loop->nwatchers];
453 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
454 if (events == NULL)
455 return;
456
457 /* Invalidate events with same file descriptor */
458 for (i = 0; i < nfds; i++)
459 if ((int) events[i].ident == fd)
460 events[i].ident = -1;
461 }
462
463
uv__fs_event(uv_loop_t * loop,uv__io_t * w,unsigned int fflags)464 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
465 uv_fs_event_t* handle;
466 struct kevent ev;
467 int events;
468 const char* path;
469 #if defined(F_GETPATH)
470 /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
471 char pathbuf[MAXPATHLEN];
472 #endif
473
474 handle = container_of(w, uv_fs_event_t, event_watcher);
475
476 if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
477 events = UV_CHANGE;
478 else
479 events = UV_RENAME;
480
481 path = NULL;
482 #if defined(F_GETPATH)
483 /* Also works when the file has been unlinked from the file system. Passing
484 * in the path when the file has been deleted is arguably a little strange
485 * but it's consistent with what the inotify backend does.
486 */
487 if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
488 path = uv__basename_r(pathbuf);
489 #endif
490 handle->cb(handle, path, events, 0);
491
492 if (handle->event_watcher.fd == -1)
493 return;
494
495 /* Watcher operates in one-shot mode, re-arm it. */
496 fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
497 | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
498
499 EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
500
501 if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
502 abort();
503 }
504
505
uv_fs_event_init(uv_loop_t * loop,uv_fs_event_t * handle)506 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
507 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
508 return 0;
509 }
510
511
uv_fs_event_start(uv_fs_event_t * handle,uv_fs_event_cb cb,const char * path,unsigned int flags)512 int uv_fs_event_start(uv_fs_event_t* handle,
513 uv_fs_event_cb cb,
514 const char* path,
515 unsigned int flags) {
516 int fd;
517 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
518 struct stat statbuf;
519 #endif
520
521 if (uv__is_active(handle))
522 return UV_EINVAL;
523
524 handle->cb = cb;
525 handle->path = uv__strdup(path);
526 if (handle->path == NULL)
527 return UV_ENOMEM;
528
529 /* TODO open asynchronously - but how do we report back errors? */
530 fd = open(handle->path, O_RDONLY);
531 if (fd == -1) {
532 uv__free(handle->path);
533 handle->path = NULL;
534 return UV__ERR(errno);
535 }
536
537 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
538 /* Nullify field to perform checks later */
539 handle->cf_cb = NULL;
540 handle->realpath = NULL;
541 handle->realpath_len = 0;
542 handle->cf_flags = flags;
543
544 if (fstat(fd, &statbuf))
545 goto fallback;
546 /* FSEvents works only with directories */
547 if (!(statbuf.st_mode & S_IFDIR))
548 goto fallback;
549
550 if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
551 int r;
552 /* The fallback fd is no longer needed */
553 uv__close_nocheckstdio(fd);
554 handle->event_watcher.fd = -1;
555 r = uv__fsevents_init(handle);
556 if (r == 0) {
557 uv__handle_start(handle);
558 } else {
559 uv__free(handle->path);
560 handle->path = NULL;
561 }
562 return r;
563 }
564 fallback:
565 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
566
567 uv__handle_start(handle);
568 uv__io_init(&handle->event_watcher, uv__fs_event, fd);
569 uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
570
571 return 0;
572 }
573
574
uv_fs_event_stop(uv_fs_event_t * handle)575 int uv_fs_event_stop(uv_fs_event_t* handle) {
576 int r;
577 r = 0;
578
579 if (!uv__is_active(handle))
580 return 0;
581
582 uv__handle_stop(handle);
583
584 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
585 if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
586 if (handle->cf_cb != NULL)
587 r = uv__fsevents_close(handle);
588 #endif
589
590 if (handle->event_watcher.fd != -1) {
591 uv__io_close(handle->loop, &handle->event_watcher);
592 uv__close(handle->event_watcher.fd);
593 handle->event_watcher.fd = -1;
594 }
595
596 uv__free(handle->path);
597 handle->path = NULL;
598
599 return r;
600 }
601
602
uv__fs_event_close(uv_fs_event_t * handle)603 void uv__fs_event_close(uv_fs_event_t* handle) {
604 uv_fs_event_stop(handle);
605 }
606