1 /**************************************************************************
2 *
3 * Copyright 2008-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * OS independent time-manipulation functions.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "os_time.h"
36 #include "detect_os.h"
37
38 #include "util/u_atomic.h"
39
40 #if DETECT_OS_UNIX
41 # include <unistd.h> /* usleep */
42 # include <time.h> /* timeval */
43 # include <sys/time.h> /* timeval */
44 # include <sched.h> /* sched_yield */
45 # include <errno.h>
46 #elif DETECT_OS_WINDOWS
47 # include <windows.h>
48 #else
49 # error Unsupported OS
50 #endif
51
52
53 int64_t
os_time_get_nano(void)54 os_time_get_nano(void)
55 {
56 #if DETECT_OS_LINUX || DETECT_OS_BSD
57
58 struct timespec tv;
59 clock_gettime(CLOCK_MONOTONIC, &tv);
60 return tv.tv_nsec + tv.tv_sec*INT64_C(1000000000);
61
62 #elif DETECT_OS_UNIX
63
64 struct timeval tv;
65 gettimeofday(&tv, NULL);
66 return tv.tv_usec*INT64_C(1000) + tv.tv_sec*INT64_C(1000000000);
67
68 #elif DETECT_OS_WINDOWS
69
70 LARGE_INTEGER frequency;
71 LARGE_INTEGER counter;
72 int64_t secs, nanosecs;
73 QueryPerformanceFrequency(&frequency);
74 QueryPerformanceCounter(&counter);
75 /* Compute seconds and nanoseconds parts separately to
76 * reduce severity of precision loss.
77 */
78 secs = counter.QuadPart / frequency.QuadPart;
79 nanosecs = (counter.QuadPart % frequency.QuadPart) * INT64_C(1000000000)
80 / frequency.QuadPart;
81 return secs*INT64_C(1000000000) + nanosecs;
82
83 #else
84
85 #error Unsupported OS
86
87 #endif
88 }
89
90
91
92 void
os_time_sleep(int64_t usecs)93 os_time_sleep(int64_t usecs)
94 {
95 #if DETECT_OS_LINUX
96 struct timespec time;
97 time.tv_sec = usecs / 1000000;
98 time.tv_nsec = (usecs % 1000000) * 1000;
99 while (clock_nanosleep(CLOCK_MONOTONIC, 0, &time, &time) == EINTR);
100
101 #elif DETECT_OS_UNIX
102 usleep(usecs);
103
104 #elif DETECT_OS_WINDOWS
105 DWORD dwMilliseconds = (DWORD) ((usecs + 999) / 1000);
106 /* Avoid Sleep(O) as that would cause to sleep for an undetermined duration */
107 if (dwMilliseconds) {
108 Sleep(dwMilliseconds);
109 }
110 #else
111 # error Unsupported OS
112 #endif
113 }
114
115
116
117 int64_t
os_time_get_absolute_timeout(uint64_t timeout)118 os_time_get_absolute_timeout(uint64_t timeout)
119 {
120 int64_t time, abs_timeout;
121
122 /* Also check for the type upper bound. */
123 if (timeout == OS_TIMEOUT_INFINITE || timeout > INT64_MAX)
124 return OS_TIMEOUT_INFINITE;
125
126 time = os_time_get_nano();
127 abs_timeout = time + (int64_t)timeout;
128
129 /* Check for overflow. */
130 if (abs_timeout < time)
131 return OS_TIMEOUT_INFINITE;
132
133 return abs_timeout;
134 }
135
136
137 bool
os_wait_until_zero(volatile int * var,uint64_t timeout)138 os_wait_until_zero(volatile int *var, uint64_t timeout)
139 {
140 if (!p_atomic_read(var))
141 return true;
142
143 if (!timeout)
144 return false;
145
146 if (timeout == OS_TIMEOUT_INFINITE) {
147 while (p_atomic_read(var)) {
148 #if DETECT_OS_UNIX
149 sched_yield();
150 #endif
151 }
152 return true;
153 }
154 else {
155 int64_t start_time = os_time_get_nano();
156 int64_t end_time = start_time + timeout;
157
158 while (p_atomic_read(var)) {
159 if (os_time_timeout(start_time, end_time, os_time_get_nano()))
160 return false;
161
162 #if DETECT_OS_UNIX
163 sched_yield();
164 #endif
165 }
166 return true;
167 }
168 }
169
170
171 bool
os_wait_until_zero_abs_timeout(volatile int * var,int64_t timeout)172 os_wait_until_zero_abs_timeout(volatile int *var, int64_t timeout)
173 {
174 if (!p_atomic_read(var))
175 return true;
176
177 if (timeout == OS_TIMEOUT_INFINITE)
178 return os_wait_until_zero(var, OS_TIMEOUT_INFINITE);
179
180 while (p_atomic_read(var)) {
181 if (os_time_get_nano() >= timeout)
182 return false;
183
184 #if DETECT_OS_UNIX
185 sched_yield();
186 #endif
187 }
188 return true;
189 }
190