• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * poll_windows: poll compatibility wrapper for Windows
3  * Copyright © 2012-2013 RealVNC Ltd.
4  * Copyright © 2009-2010 Pete Batard <pete@akeo.ie>
5  * With contributions from Michael Plante, Orin Eman et al.
6  * Parts of poll implementation from libusb-win32, by Stephan Meyer et al.
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  *
22  */
23 
24 /*
25  * poll() and pipe() Windows compatibility layer for libusbx 1.0
26  *
27  * The way this layer works is by using OVERLAPPED with async I/O transfers, as
28  * OVERLAPPED have an associated event which is flagged for I/O completion.
29  *
30  * For USB pollable async I/O, you would typically:
31  * - obtain a Windows HANDLE to a file or device that has been opened in
32  *   OVERLAPPED mode
33  * - call usbi_create_fd with this handle to obtain a custom fd.
34  *   Note that if you need simultaneous R/W access, you need to call create_fd
35  *   twice, once in RW_READ and once in RW_WRITE mode to obtain 2 separate
36  *   pollable fds
37  * - leave the core functions call the poll routine and flag POLLIN/POLLOUT
38  *
39  * The pipe pollable synchronous I/O works using the overlapped event associated
40  * with a fake pipe. The read/write functions are only meant to be used in that
41  * context.
42  */
43 #include <errno.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 
47 #include "libusbi.h"
48 
49 // Uncomment to debug the polling layer
50 //#define DEBUG_POLL_WINDOWS
51 #if defined(DEBUG_POLL_WINDOWS)
52 #define poll_dbg usbi_dbg
53 #else
54 // MSVC++ < 2005 cannot use a variadic argument and non MSVC
55 // compilers produce warnings if parenthesis are ommitted.
56 #if defined(_MSC_VER) && (_MSC_VER < 1400)
57 #define poll_dbg
58 #else
59 #define poll_dbg(...)
60 #endif
61 #endif
62 
63 #if defined(_PREFAST_)
64 #pragma warning(disable:28719)
65 #endif
66 
67 #define CHECK_INIT_POLLING do {if(!is_polling_set) init_polling();} while(0)
68 
69 // public fd data
70 const struct winfd INVALID_WINFD = {-1, INVALID_HANDLE_VALUE, NULL, NULL, NULL, RW_NONE};
71 struct winfd poll_fd[MAX_FDS];
72 // internal fd data
73 struct {
74 	CRITICAL_SECTION mutex; // lock for fds
75 	// Additional variables for XP CancelIoEx partial emulation
76 	HANDLE original_handle;
77 	DWORD thread_id;
78 } _poll_fd[MAX_FDS];
79 
80 // globals
81 BOOLEAN is_polling_set = FALSE;
82 LONG pipe_number = 0;
83 static volatile LONG compat_spinlock = 0;
84 
85 #if !defined(_WIN32_WCE)
86 // CancelIoEx, available on Vista and later only, provides the ability to cancel
87 // a single transfer (OVERLAPPED) when used. As it may not be part of any of the
88 // platform headers, we hook into the Kernel32 system DLL directly to seek it.
89 static BOOL (__stdcall *pCancelIoEx)(HANDLE, LPOVERLAPPED) = NULL;
90 #define Use_Duplicate_Handles (pCancelIoEx == NULL)
91 
setup_cancel_io(void)92 static inline void setup_cancel_io(void)
93 {
94 	HMODULE hKernel32 = GetModuleHandleA("KERNEL32");
95 	if (hKernel32 != NULL) {
96 		pCancelIoEx = (BOOL (__stdcall *)(HANDLE,LPOVERLAPPED))
97 			GetProcAddress(hKernel32, "CancelIoEx");
98 	}
99 	usbi_dbg("Will use CancelIo%s for I/O cancellation",
100 		Use_Duplicate_Handles?"":"Ex");
101 }
102 
cancel_io(int _index)103 static inline BOOL cancel_io(int _index)
104 {
105 	if ((_index < 0) || (_index >= MAX_FDS)) {
106 		return FALSE;
107 	}
108 
109 	if ( (poll_fd[_index].fd < 0) || (poll_fd[_index].handle == INVALID_HANDLE_VALUE)
110 	  || (poll_fd[_index].handle == 0) || (poll_fd[_index].overlapped == NULL) ) {
111 		return TRUE;
112 	}
113 	if (poll_fd[_index].itransfer && poll_fd[_index].cancel_fn) {
114 		// Cancel outstanding transfer via the specific callback
115 		(*poll_fd[_index].cancel_fn)(poll_fd[_index].itransfer);
116 		return TRUE;
117 	}
118 	if (pCancelIoEx != NULL) {
119 		return (*pCancelIoEx)(poll_fd[_index].handle, poll_fd[_index].overlapped);
120 	}
121 	if (_poll_fd[_index].thread_id == GetCurrentThreadId()) {
122 		return CancelIo(poll_fd[_index].handle);
123 	}
124 	usbi_warn(NULL, "Unable to cancel I/O that was started from another thread");
125 	return FALSE;
126 }
127 #else
128 #define Use_Duplicate_Handles FALSE
129 
setup_cancel_io()130 static __inline void setup_cancel_io()
131 {
132 	// No setup needed on WinCE
133 }
134 
cancel_io(int _index)135 static __inline BOOL cancel_io(int _index)
136 {
137 	if ((_index < 0) || (_index >= MAX_FDS)) {
138 		return FALSE;
139 	}
140 	if ( (poll_fd[_index].fd < 0) || (poll_fd[_index].handle == INVALID_HANDLE_VALUE)
141 	  || (poll_fd[_index].handle == 0) || (poll_fd[_index].overlapped == NULL) ) {
142 		return TRUE;
143 	}
144 	if (poll_fd[_index].itransfer && poll_fd[_index].cancel_fn) {
145 		// Cancel outstanding transfer via the specific callback
146 		(*poll_fd[_index].cancel_fn)(poll_fd[_index].itransfer);
147 	}
148 	return TRUE;
149 }
150 #endif
151 
152 // Init
init_polling(void)153 void init_polling(void)
154 {
155 	int i;
156 
157 	while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) {
158 		SleepEx(0, TRUE);
159 	}
160 	if (!is_polling_set) {
161 		setup_cancel_io();
162 		for (i=0; i<MAX_FDS; i++) {
163 			poll_fd[i] = INVALID_WINFD;
164 			_poll_fd[i].original_handle = INVALID_HANDLE_VALUE;
165 			_poll_fd[i].thread_id = 0;
166 			InitializeCriticalSection(&_poll_fd[i].mutex);
167 		}
168 		is_polling_set = TRUE;
169 	}
170 	InterlockedExchange((LONG *)&compat_spinlock, 0);
171 }
172 
173 // Internal function to retrieve the table index (and lock the fd mutex)
_fd_to_index_and_lock(int fd)174 static int _fd_to_index_and_lock(int fd)
175 {
176 	int i;
177 
178 	if (fd < 0)
179 		return -1;
180 
181 	for (i=0; i<MAX_FDS; i++) {
182 		if (poll_fd[i].fd == fd) {
183 			EnterCriticalSection(&_poll_fd[i].mutex);
184 			// fd might have changed before we got to critical
185 			if (poll_fd[i].fd != fd) {
186 				LeaveCriticalSection(&_poll_fd[i].mutex);
187 				continue;
188 			}
189 			return i;
190 		}
191 	}
192 	return -1;
193 }
194 
create_overlapped(void)195 static OVERLAPPED *create_overlapped(void)
196 {
197 	OVERLAPPED *overlapped = (OVERLAPPED*) calloc(1, sizeof(OVERLAPPED));
198 	if (overlapped == NULL) {
199 		return NULL;
200 	}
201 	overlapped->hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
202 	if(overlapped->hEvent == NULL) {
203 		free (overlapped);
204 		return NULL;
205 	}
206 	return overlapped;
207 }
208 
free_overlapped(OVERLAPPED * overlapped)209 static void free_overlapped(OVERLAPPED *overlapped)
210 {
211 	if (overlapped == NULL)
212 		return;
213 
214 	if ( (overlapped->hEvent != 0)
215 	  && (overlapped->hEvent != INVALID_HANDLE_VALUE) ) {
216 		CloseHandle(overlapped->hEvent);
217 	}
218 	free(overlapped);
219 }
220 
exit_polling(void)221 void exit_polling(void)
222 {
223 	int i;
224 
225 	while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) {
226 		SleepEx(0, TRUE);
227 	}
228 	if (is_polling_set) {
229 		is_polling_set = FALSE;
230 
231 		for (i=0; i<MAX_FDS; i++) {
232 			// Cancel any async I/O (handle can be invalid)
233 			cancel_io(i);
234 			// If anything was pending on that I/O, it should be
235 			// terminating, and we should be able to access the fd
236 			// mutex lock before too long
237 			EnterCriticalSection(&_poll_fd[i].mutex);
238 			free_overlapped(poll_fd[i].overlapped);
239 			if (Use_Duplicate_Handles) {
240 				// Close duplicate handle
241 				if (_poll_fd[i].original_handle != INVALID_HANDLE_VALUE) {
242 					CloseHandle(poll_fd[i].handle);
243 				}
244 			}
245 			poll_fd[i] = INVALID_WINFD;
246 			LeaveCriticalSection(&_poll_fd[i].mutex);
247 			DeleteCriticalSection(&_poll_fd[i].mutex);
248 		}
249 	}
250 	InterlockedExchange((LONG *)&compat_spinlock, 0);
251 }
252 
253 /*
254  * Create a fake pipe.
255  * As libusbx only uses pipes for signaling, all we need from a pipe is an
256  * event. To that extent, we create a single wfd and overlapped as a means
257  * to access that event.
258  */
usbi_pipe(int filedes[2])259 int usbi_pipe(int filedes[2])
260 {
261 	int i;
262 	OVERLAPPED* overlapped;
263 
264 	CHECK_INIT_POLLING;
265 
266 	overlapped = create_overlapped();
267 
268 	if (overlapped == NULL) {
269 		return -1;
270 	}
271 	// The overlapped must have status pending for signaling to work in poll
272 	overlapped->Internal = STATUS_PENDING;
273 	overlapped->InternalHigh = 0;
274 
275 	for (i=0; i<MAX_FDS; i++) {
276 		if (poll_fd[i].fd < 0) {
277 			EnterCriticalSection(&_poll_fd[i].mutex);
278 			// fd might have been allocated before we got to critical
279 			if (poll_fd[i].fd >= 0) {
280 				LeaveCriticalSection(&_poll_fd[i].mutex);
281 				continue;
282 			}
283 
284 			// Use index as the unique fd number
285 			poll_fd[i].fd = i;
286 			// Read end of the "pipe"
287 			filedes[0] = poll_fd[i].fd;
288 			// We can use the same handle for both ends
289 			filedes[1] = filedes[0];
290 
291 			poll_fd[i].handle = DUMMY_HANDLE;
292 			poll_fd[i].overlapped = overlapped;
293 			// There's no polling on the write end, so we just use READ for our needs
294 			poll_fd[i].rw = RW_READ;
295 			_poll_fd[i].original_handle = INVALID_HANDLE_VALUE;
296 			LeaveCriticalSection(&_poll_fd[i].mutex);
297 			return 0;
298 		}
299 	}
300 	free_overlapped(overlapped);
301 	return -1;
302 }
303 
304 /*
305  * Create both an fd and an OVERLAPPED from an open Windows handle, so that
306  * it can be used with our polling function
307  * The handle MUST support overlapped transfers (usually requires CreateFile
308  * with FILE_FLAG_OVERLAPPED)
309  * Return a pollable file descriptor struct, or INVALID_WINFD on error
310  *
311  * Note that the fd returned by this function is a per-transfer fd, rather
312  * than a per-session fd and cannot be used for anything else but our
313  * custom functions (the fd itself points to the NUL: device)
314  * if you plan to do R/W on the same handle, you MUST create 2 fds: one for
315  * read and one for write. Using a single R/W fd is unsupported and will
316  * produce unexpected results
317  */
usbi_create_fd(HANDLE handle,int access_mode,struct usbi_transfer * itransfer,cancel_transfer * cancel_fn)318 struct winfd usbi_create_fd(HANDLE handle, int access_mode, struct usbi_transfer *itransfer, cancel_transfer *cancel_fn)
319 {
320 	int i;
321 	struct winfd wfd = INVALID_WINFD;
322 	OVERLAPPED* overlapped = NULL;
323 
324 	CHECK_INIT_POLLING;
325 
326 	if ((handle == 0) || (handle == INVALID_HANDLE_VALUE)) {
327 		return INVALID_WINFD;
328 	}
329 
330 	wfd.itransfer = itransfer;
331 	wfd.cancel_fn = cancel_fn;
332 
333 	if ((access_mode != RW_READ) && (access_mode != RW_WRITE)) {
334 		usbi_warn(NULL, "only one of RW_READ or RW_WRITE are supported.\n"
335 			"If you want to poll for R/W simultaneously, create multiple fds from the same handle.");
336 		return INVALID_WINFD;
337 	}
338 	if (access_mode == RW_READ) {
339 		wfd.rw = RW_READ;
340 	} else {
341 		wfd.rw = RW_WRITE;
342 	}
343 
344 	overlapped = create_overlapped();
345 	if(overlapped == NULL) {
346 		return INVALID_WINFD;
347 	}
348 
349 	for (i=0; i<MAX_FDS; i++) {
350 		if (poll_fd[i].fd < 0) {
351 			EnterCriticalSection(&_poll_fd[i].mutex);
352 			// fd might have been removed before we got to critical
353 			if (poll_fd[i].fd >= 0) {
354 				LeaveCriticalSection(&_poll_fd[i].mutex);
355 				continue;
356 			}
357 			// Use index as the unique fd number
358 			wfd.fd = i;
359 			// Attempt to emulate some of the CancelIoEx behaviour on platforms
360 			// that don't have it
361 			if (Use_Duplicate_Handles) {
362 				_poll_fd[i].thread_id = GetCurrentThreadId();
363 				if (!DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(),
364 					&wfd.handle, 0, TRUE, DUPLICATE_SAME_ACCESS)) {
365 					usbi_dbg("could not duplicate handle for CancelIo - using original one");
366 					wfd.handle = handle;
367 					// Make sure we won't close the original handle on fd deletion then
368 					_poll_fd[i].original_handle = INVALID_HANDLE_VALUE;
369 				} else {
370 					_poll_fd[i].original_handle = handle;
371 				}
372 			} else {
373 				wfd.handle = handle;
374 			}
375 			wfd.overlapped = overlapped;
376 			memcpy(&poll_fd[i], &wfd, sizeof(struct winfd));
377 			LeaveCriticalSection(&_poll_fd[i].mutex);
378 			return wfd;
379 		}
380 	}
381 	free_overlapped(overlapped);
382 	return INVALID_WINFD;
383 }
384 
_free_index(int _index)385 static void _free_index(int _index)
386 {
387 	// Cancel any async IO (Don't care about the validity of our handles for this)
388 	cancel_io(_index);
389 	// close the duplicate handle (if we have an actual duplicate)
390 	if (Use_Duplicate_Handles) {
391 		if (_poll_fd[_index].original_handle != INVALID_HANDLE_VALUE) {
392 			CloseHandle(poll_fd[_index].handle);
393 		}
394 		_poll_fd[_index].original_handle = INVALID_HANDLE_VALUE;
395 		_poll_fd[_index].thread_id = 0;
396 	}
397 	free_overlapped(poll_fd[_index].overlapped);
398 	poll_fd[_index] = INVALID_WINFD;
399 }
400 
401 /*
402  * Release a pollable file descriptor.
403  *
404  * Note that the associated Windows handle is not closed by this call
405  */
usbi_free_fd(struct winfd * wfd)406 void usbi_free_fd(struct winfd *wfd)
407 {
408 	int _index;
409 
410 	CHECK_INIT_POLLING;
411 
412 	_index = _fd_to_index_and_lock(wfd->fd);
413 	if (_index < 0) {
414 		return;
415 	}
416 	_free_index(_index);
417 	*wfd = INVALID_WINFD;
418 	LeaveCriticalSection(&_poll_fd[_index].mutex);
419 }
420 
421 /*
422  * The functions below perform various conversions between fd, handle and OVERLAPPED
423  */
fd_to_winfd(int fd)424 struct winfd fd_to_winfd(int fd)
425 {
426 	int i;
427 	struct winfd wfd;
428 
429 	CHECK_INIT_POLLING;
430 
431 	if (fd < 0)
432 		return INVALID_WINFD;
433 
434 	for (i=0; i<MAX_FDS; i++) {
435 		if (poll_fd[i].fd == fd) {
436 			EnterCriticalSection(&_poll_fd[i].mutex);
437 			// fd might have been deleted before we got to critical
438 			if (poll_fd[i].fd != fd) {
439 				LeaveCriticalSection(&_poll_fd[i].mutex);
440 				continue;
441 			}
442 			memcpy(&wfd, &poll_fd[i], sizeof(struct winfd));
443 			LeaveCriticalSection(&_poll_fd[i].mutex);
444 			return wfd;
445 		}
446 	}
447 	return INVALID_WINFD;
448 }
449 
handle_to_winfd(HANDLE handle)450 struct winfd handle_to_winfd(HANDLE handle)
451 {
452 	int i;
453 	struct winfd wfd;
454 
455 	CHECK_INIT_POLLING;
456 
457 	if ((handle == 0) || (handle == INVALID_HANDLE_VALUE))
458 		return INVALID_WINFD;
459 
460 	for (i=0; i<MAX_FDS; i++) {
461 		if (poll_fd[i].handle == handle) {
462 			EnterCriticalSection(&_poll_fd[i].mutex);
463 			// fd might have been deleted before we got to critical
464 			if (poll_fd[i].handle != handle) {
465 				LeaveCriticalSection(&_poll_fd[i].mutex);
466 				continue;
467 			}
468 			memcpy(&wfd, &poll_fd[i], sizeof(struct winfd));
469 			LeaveCriticalSection(&_poll_fd[i].mutex);
470 			return wfd;
471 		}
472 	}
473 	return INVALID_WINFD;
474 }
475 
overlapped_to_winfd(OVERLAPPED * overlapped)476 struct winfd overlapped_to_winfd(OVERLAPPED* overlapped)
477 {
478 	int i;
479 	struct winfd wfd;
480 
481 	CHECK_INIT_POLLING;
482 
483 	if (overlapped == NULL)
484 		return INVALID_WINFD;
485 
486 	for (i=0; i<MAX_FDS; i++) {
487 		if (poll_fd[i].overlapped == overlapped) {
488 			EnterCriticalSection(&_poll_fd[i].mutex);
489 			// fd might have been deleted before we got to critical
490 			if (poll_fd[i].overlapped != overlapped) {
491 				LeaveCriticalSection(&_poll_fd[i].mutex);
492 				continue;
493 			}
494 			memcpy(&wfd, &poll_fd[i], sizeof(struct winfd));
495 			LeaveCriticalSection(&_poll_fd[i].mutex);
496 			return wfd;
497 		}
498 	}
499 	return INVALID_WINFD;
500 }
501 
502 /*
503  * POSIX poll equivalent, using Windows OVERLAPPED
504  * Currently, this function only accepts one of POLLIN or POLLOUT per fd
505  * (but you can create multiple fds from the same handle for read and write)
506  */
usbi_poll(struct pollfd * fds,unsigned int nfds,int timeout)507 int usbi_poll(struct pollfd *fds, unsigned int nfds, int timeout)
508 {
509 	unsigned i;
510 	int _index, object_index, triggered;
511 	HANDLE *handles_to_wait_on;
512 	int *handle_to_index;
513 	DWORD nb_handles_to_wait_on = 0;
514 	DWORD ret;
515 
516 	CHECK_INIT_POLLING;
517 
518 	triggered = 0;
519 	handles_to_wait_on = (HANDLE*) calloc(nfds+1, sizeof(HANDLE));	// +1 for fd_update
520 	handle_to_index = (int*) calloc(nfds, sizeof(int));
521 	if ((handles_to_wait_on == NULL) || (handle_to_index == NULL)) {
522 		errno = ENOMEM;
523 		triggered = -1;
524 		goto poll_exit;
525 	}
526 
527 	for (i = 0; i < nfds; ++i) {
528 		fds[i].revents = 0;
529 
530 		// Only one of POLLIN or POLLOUT can be selected with this version of poll (not both)
531 		if ((fds[i].events & ~POLLIN) && (!(fds[i].events & POLLOUT))) {
532 			fds[i].revents |= POLLERR;
533 			errno = EACCES;
534 			usbi_warn(NULL, "unsupported set of events");
535 			triggered = -1;
536 			goto poll_exit;
537 		}
538 
539 		_index = _fd_to_index_and_lock(fds[i].fd);
540 		poll_dbg("fd[%d]=%d: (overlapped=%p) got events %04X", i, poll_fd[_index].fd, poll_fd[_index].overlapped, fds[i].events);
541 
542 		if ( (_index < 0) || (poll_fd[_index].handle == INVALID_HANDLE_VALUE)
543 		  || (poll_fd[_index].handle == 0) || (poll_fd[_index].overlapped == NULL)) {
544 			fds[i].revents |= POLLNVAL | POLLERR;
545 			errno = EBADF;
546 			if (_index >= 0) {
547 				LeaveCriticalSection(&_poll_fd[_index].mutex);
548 			}
549 			usbi_warn(NULL, "invalid fd");
550 			triggered = -1;
551 			goto poll_exit;
552 		}
553 
554 		// IN or OUT must match our fd direction
555 		if ((fds[i].events & POLLIN) && (poll_fd[_index].rw != RW_READ)) {
556 			fds[i].revents |= POLLNVAL | POLLERR;
557 			errno = EBADF;
558 			usbi_warn(NULL, "attempted POLLIN on fd without READ access");
559 			LeaveCriticalSection(&_poll_fd[_index].mutex);
560 			triggered = -1;
561 			goto poll_exit;
562 		}
563 
564 		if ((fds[i].events & POLLOUT) && (poll_fd[_index].rw != RW_WRITE)) {
565 			fds[i].revents |= POLLNVAL | POLLERR;
566 			errno = EBADF;
567 			usbi_warn(NULL, "attempted POLLOUT on fd without WRITE access");
568 			LeaveCriticalSection(&_poll_fd[_index].mutex);
569 			triggered = -1;
570 			goto poll_exit;
571 		}
572 
573 		// The following macro only works if overlapped I/O was reported pending
574 		if ( (HasOverlappedIoCompleted(poll_fd[_index].overlapped))
575 		  || (HasOverlappedIoCompletedSync(poll_fd[_index].overlapped)) ) {
576 			poll_dbg("  completed");
577 			// checks above should ensure this works:
578 			fds[i].revents = fds[i].events;
579 			triggered++;
580 		} else {
581 			handles_to_wait_on[nb_handles_to_wait_on] = poll_fd[_index].overlapped->hEvent;
582 			handle_to_index[nb_handles_to_wait_on] = i;
583 			nb_handles_to_wait_on++;
584 		}
585 		LeaveCriticalSection(&_poll_fd[_index].mutex);
586 	}
587 
588 	// If nothing was triggered, wait on all fds that require it
589 	if ((timeout != 0) && (triggered == 0) && (nb_handles_to_wait_on != 0)) {
590 		if (timeout < 0) {
591 			poll_dbg("starting infinite wait for %d handles...", (int)nb_handles_to_wait_on);
592 		} else {
593 			poll_dbg("starting %d ms wait for %d handles...", timeout, (int)nb_handles_to_wait_on);
594 		}
595 		ret = WaitForMultipleObjects(nb_handles_to_wait_on, handles_to_wait_on,
596 			FALSE, (timeout<0)?INFINITE:(DWORD)timeout);
597 		object_index = ret-WAIT_OBJECT_0;
598 		if ((object_index >= 0) && ((DWORD)object_index < nb_handles_to_wait_on)) {
599 			poll_dbg("  completed after wait");
600 			i = handle_to_index[object_index];
601 			_index = _fd_to_index_and_lock(fds[i].fd);
602 			fds[i].revents = fds[i].events;
603 			triggered++;
604 			if (_index >= 0) {
605 				LeaveCriticalSection(&_poll_fd[_index].mutex);
606 			}
607 		} else if (ret == WAIT_TIMEOUT) {
608 			poll_dbg("  timed out");
609 			triggered = 0;	// 0 = timeout
610 		} else {
611 			errno = EIO;
612 			triggered = -1;	// error
613 		}
614 	}
615 
616 poll_exit:
617 	if (handles_to_wait_on != NULL) {
618 		free(handles_to_wait_on);
619 	}
620 	if (handle_to_index != NULL) {
621 		free(handle_to_index);
622 	}
623 	return triggered;
624 }
625 
626 /*
627  * close a fake pipe fd
628  */
usbi_close(int fd)629 int usbi_close(int fd)
630 {
631 	int _index;
632 	int r = -1;
633 
634 	CHECK_INIT_POLLING;
635 
636 	_index = _fd_to_index_and_lock(fd);
637 
638 	if (_index < 0) {
639 		errno = EBADF;
640 	} else {
641 		free_overlapped(poll_fd[_index].overlapped);
642 		poll_fd[_index] = INVALID_WINFD;
643 		LeaveCriticalSection(&_poll_fd[_index].mutex);
644 	}
645 	return r;
646 }
647 
648 /*
649  * synchronous write for fake "pipe" signaling
650  */
usbi_write(int fd,const void * buf,size_t count)651 ssize_t usbi_write(int fd, const void *buf, size_t count)
652 {
653 	int _index;
654 	UNUSED(buf);
655 
656 	CHECK_INIT_POLLING;
657 
658 	if (count != sizeof(unsigned char)) {
659 		usbi_err(NULL, "this function should only used for signaling");
660 		return -1;
661 	}
662 
663 	_index = _fd_to_index_and_lock(fd);
664 
665 	if ( (_index < 0) || (poll_fd[_index].overlapped == NULL) ) {
666 		errno = EBADF;
667 		if (_index >= 0) {
668 			LeaveCriticalSection(&_poll_fd[_index].mutex);
669 		}
670 		return -1;
671 	}
672 
673 	poll_dbg("set pipe event (fd = %d, thread = %08X)", _index, GetCurrentThreadId());
674 	SetEvent(poll_fd[_index].overlapped->hEvent);
675 	poll_fd[_index].overlapped->Internal = STATUS_WAIT_0;
676 	// If two threads write on the pipe at the same time, we need to
677 	// process two separate reads => use the overlapped as a counter
678 	poll_fd[_index].overlapped->InternalHigh++;
679 
680 	LeaveCriticalSection(&_poll_fd[_index].mutex);
681 	return sizeof(unsigned char);
682 }
683 
684 /*
685  * synchronous read for fake "pipe" signaling
686  */
usbi_read(int fd,void * buf,size_t count)687 ssize_t usbi_read(int fd, void *buf, size_t count)
688 {
689 	int _index;
690 	ssize_t r = -1;
691 	UNUSED(buf);
692 
693 	CHECK_INIT_POLLING;
694 
695 	if (count != sizeof(unsigned char)) {
696 		usbi_err(NULL, "this function should only used for signaling");
697 		return -1;
698 	}
699 
700 	_index = _fd_to_index_and_lock(fd);
701 
702 	if (_index < 0) {
703 		errno = EBADF;
704 		return -1;
705 	}
706 
707 	if (WaitForSingleObject(poll_fd[_index].overlapped->hEvent, INFINITE) != WAIT_OBJECT_0) {
708 		usbi_warn(NULL, "waiting for event failed: %d", (int)GetLastError());
709 		errno = EIO;
710 		goto out;
711 	}
712 
713 	poll_dbg("clr pipe event (fd = %d, thread = %08X)", _index, GetCurrentThreadId());
714 	poll_fd[_index].overlapped->InternalHigh--;
715 	// Don't reset unless we don't have any more events to process
716 	if (poll_fd[_index].overlapped->InternalHigh <= 0) {
717 		ResetEvent(poll_fd[_index].overlapped->hEvent);
718 		poll_fd[_index].overlapped->Internal = STATUS_PENDING;
719 	}
720 
721 	r = sizeof(unsigned char);
722 
723 out:
724 	LeaveCriticalSection(&_poll_fd[_index].mutex);
725 	return r;
726 }
727