Lines Matching +full:timeout +full:- +full:minutes
34 if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) { in uv__epoll_init()
37 if (fd != -1) in uv__epoll_init()
41 loop->backend_fd = fd; in uv__epoll_init()
42 if (fd == -1) in uv__epoll_init()
55 assert(loop->watchers != NULL); in uv__platform_invalidate_fd()
58 events = (struct epoll_event*) loop->watchers[loop->nwatchers]; in uv__platform_invalidate_fd()
59 nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; in uv__platform_invalidate_fd()
64 events[i].data.fd = -1; in uv__platform_invalidate_fd()
72 if (loop->backend_fd >= 0) { in uv__platform_invalidate_fd()
77 epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy); in uv__platform_invalidate_fd()
88 e.data.fd = -1; in uv__io_check_fd()
91 if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e)) in uv__io_check_fd()
96 if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e)) in uv__io_check_fd()
103 void uv__io_poll(uv_loop_t* loop, int timeout) { in uv__io_poll() argument
104 /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes in uv__io_poll()
106 * indefinitely, we cap the timeout and poll again if necessary. in uv__io_poll()
108 * Note that "30 minutes" is a simplification because it depends on in uv__io_poll()
136 if (loop->nfds == 0) { in uv__io_poll()
137 assert(QUEUE_EMPTY(&loop->watcher_queue)); in uv__io_poll()
143 while (!QUEUE_EMPTY(&loop->watcher_queue)) { in uv__io_poll()
144 q = QUEUE_HEAD(&loop->watcher_queue); in uv__io_poll()
149 assert(w->pevents != 0); in uv__io_poll()
150 assert(w->fd >= 0); in uv__io_poll()
151 assert(w->fd < (int) loop->nwatchers); in uv__io_poll()
153 e.events = w->pevents; in uv__io_poll()
154 e.data.fd = w->fd; in uv__io_poll()
156 if (w->events == 0) in uv__io_poll()
164 if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) { in uv__io_poll()
171 if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e)) in uv__io_poll()
175 w->events = w->pevents; in uv__io_poll()
179 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { in uv__io_poll()
182 sigmask |= 1 << (SIGPROF - 1); in uv__io_poll()
185 assert(timeout >= -1); in uv__io_poll()
186 base = loop->time; in uv__io_poll()
188 real_timeout = timeout; in uv__io_poll()
190 if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { in uv__io_poll()
192 user_timeout = timeout; in uv__io_poll()
193 timeout = 0; in uv__io_poll()
209 /* Only need to set the provider_entry_time if timeout != 0. The function in uv__io_poll()
212 if (timeout != 0) in uv__io_poll()
218 if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) in uv__io_poll()
219 timeout = max_safe_timeout; in uv__io_poll()
226 nfds = epoll_pwait(loop->backend_fd, in uv__io_poll()
229 timeout, in uv__io_poll()
231 if (nfds == -1 && errno == ENOSYS) { in uv__io_poll()
236 nfds = epoll_wait(loop->backend_fd, in uv__io_poll()
239 timeout); in uv__io_poll()
240 if (nfds == -1 && errno == ENOSYS) { in uv__io_poll()
250 /* Update loop->time unconditionally. It's tempting to skip the update when in uv__io_poll()
251 * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the in uv__io_poll()
257 assert(timeout != -1); in uv__io_poll()
260 timeout = user_timeout; in uv__io_poll()
264 if (timeout == -1) in uv__io_poll()
267 if (timeout == 0) in uv__io_poll()
270 /* We may have been inside the system call for longer than |timeout| in uv__io_poll()
276 if (nfds == -1) { in uv__io_poll()
287 timeout = user_timeout; in uv__io_poll()
291 if (timeout == -1) in uv__io_poll()
294 if (timeout == 0) in uv__io_poll()
297 /* Interrupted by a signal. Update timeout and poll again. */ in uv__io_poll()
305 /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */ in uv__io_poll()
312 assert(loop->watchers != NULL); in uv__io_poll()
313 loop->watchers[loop->nwatchers] = x.watchers; in uv__io_poll()
314 loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; in uv__io_poll()
319 fd = pe->data.fd; in uv__io_poll()
322 if (fd == -1) in uv__io_poll()
326 assert((unsigned) fd < loop->nwatchers); in uv__io_poll()
328 w = loop->watchers[fd]; in uv__io_poll()
336 epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe); in uv__io_poll()
345 pe->events &= w->pevents | POLLERR | POLLHUP; in uv__io_poll()
360 * free when we switch over to edge-triggered I/O. in uv__io_poll()
362 if (pe->events == POLLERR || pe->events == POLLHUP) in uv__io_poll()
363 pe->events |= in uv__io_poll()
364 w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI); in uv__io_poll()
366 if (pe->events != 0) { in uv__io_poll()
370 if (w == &loop->signal_io_watcher) { in uv__io_poll()
374 w->cb(loop, w, pe->events); in uv__io_poll()
382 timeout = user_timeout; in uv__io_poll()
388 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); in uv__io_poll()
391 loop->watchers[loop->nwatchers] = NULL; in uv__io_poll()
392 loop->watchers[loop->nwatchers + 1] = NULL; in uv__io_poll()
398 if (nfds == ARRAY_SIZE(events) && --count != 0) { in uv__io_poll()
400 timeout = 0; in uv__io_poll()
406 if (timeout == 0) in uv__io_poll()
409 if (timeout == -1) in uv__io_poll()
413 assert(timeout > 0); in uv__io_poll()
415 real_timeout -= (loop->time - base); in uv__io_poll()
419 timeout = real_timeout; in uv__io_poll()