1 /*
2 * Copyright 1999-2006 Brian Paul
3 * Copyright 2008 VMware, Inc.
4 * Copyright 2022 Yonggang Luo
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include "util/u_thread.h"
9
10 #include "macros.h"
11
12 #ifdef HAVE_PTHREAD
13 #include <signal.h>
14 #ifdef HAVE_PTHREAD_NP_H
15 #include <pthread_np.h>
16 #endif
17 #endif
18
19 #ifdef __HAIKU__
20 #include <OS.h>
21 #endif
22
23 #if DETECT_OS_LINUX && !defined(ANDROID)
24 #include <sched.h>
25 #elif defined(_WIN32) && !defined(HAVE_PTHREAD)
26 #include <windows.h>
27 #endif
28
29 #ifdef __FreeBSD__
30 /* pthread_np.h -> sys/param.h -> machine/param.h
31 * - defines ALIGN which clashes with our ALIGN
32 */
33 #undef ALIGN
34 #define cpu_set_t cpuset_t
35 #endif
36
37 int
util_get_current_cpu(void)38 util_get_current_cpu(void)
39 {
40 #if DETECT_OS_LINUX && !defined(ANDROID)
41 return sched_getcpu();
42
43 #elif defined(_WIN32) && !defined(HAVE_PTHREAD)
44 return GetCurrentProcessorNumber();
45
46 #else
47 return -1;
48 #endif
49 }
50
u_thread_create(thrd_t * thrd,int (* routine)(void *),void * param)51 int u_thread_create(thrd_t *thrd, int (*routine)(void *), void *param)
52 {
53 int ret = thrd_error;
54 #ifdef HAVE_PTHREAD
55 sigset_t saved_set, new_set;
56
57 sigfillset(&new_set);
58 sigdelset(&new_set, SIGSYS);
59
60 /* SIGSEGV is commonly used by Vulkan API tracing layers in order to track
61 * accesses in device memory mapped to user space. Blocking the signal hinders
62 * that tracking mechanism.
63 */
64 sigdelset(&new_set, SIGSEGV);
65 pthread_sigmask(SIG_BLOCK, &new_set, &saved_set);
66 ret = thrd_create(thrd, routine, param);
67 pthread_sigmask(SIG_SETMASK, &saved_set, NULL);
68 #else
69 ret = thrd_create(thrd, routine, param);
70 #endif
71
72 return ret;
73 }
74
u_thread_setname(const char * name)75 void u_thread_setname( const char *name )
76 {
77 #if defined(HAVE_PTHREAD)
78 #if DETECT_OS_LINUX || DETECT_OS_CYGWIN || DETECT_OS_SOLARIS || defined(__GLIBC__)
79 int ret = pthread_setname_np(pthread_self(), name);
80 if (ret == ERANGE) {
81 char buf[16];
82 const size_t len = MIN2(strlen(name), ARRAY_SIZE(buf) - 1);
83 memcpy(buf, name, len);
84 buf[len] = '\0';
85 pthread_setname_np(pthread_self(), buf);
86 }
87 #elif DETECT_OS_FREEBSD || DETECT_OS_OPENBSD
88 pthread_set_name_np(pthread_self(), name);
89 #elif DETECT_OS_NETBSD
90 pthread_setname_np(pthread_self(), "%s", (void *)name);
91 #elif DETECT_OS_APPLE
92 pthread_setname_np(name);
93 #elif DETECT_OS_HAIKU
94 rename_thread(find_thread(NULL), name);
95 #else
96 #warning Not sure how to call pthread_setname_np
97 #endif
98 #endif
99 (void)name;
100 }
101
102 bool
util_set_thread_affinity(thrd_t thread,const uint32_t * mask,uint32_t * old_mask,unsigned num_mask_bits)103 util_set_thread_affinity(thrd_t thread,
104 const uint32_t *mask,
105 uint32_t *old_mask,
106 unsigned num_mask_bits)
107 {
108 #if defined(HAVE_PTHREAD_SETAFFINITY)
109 cpu_set_t cpuset;
110
111 if (old_mask) {
112 if (pthread_getaffinity_np(thread, sizeof(cpuset), &cpuset) != 0)
113 return false;
114
115 memset(old_mask, 0, num_mask_bits / 8);
116 for (unsigned i = 0; i < num_mask_bits && i < CPU_SETSIZE; i++) {
117 if (CPU_ISSET(i, &cpuset))
118 old_mask[i / 32] |= 1u << (i % 32);
119 }
120 }
121
122 CPU_ZERO(&cpuset);
123 for (unsigned i = 0; i < num_mask_bits && i < CPU_SETSIZE; i++) {
124 if (mask[i / 32] & (1u << (i % 32)))
125 CPU_SET(i, &cpuset);
126 }
127 return pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset) == 0;
128
129 #elif defined(_WIN32) && !defined(HAVE_PTHREAD)
130 DWORD_PTR m = mask[0];
131
132 if (sizeof(m) > 4 && num_mask_bits > 32)
133 m |= (uint64_t)mask[1] << 32;
134
135 m = SetThreadAffinityMask(thread.handle, m);
136 if (!m)
137 return false;
138
139 if (old_mask) {
140 memset(old_mask, 0, num_mask_bits / 8);
141
142 old_mask[0] = m;
143 #ifdef _WIN64
144 old_mask[1] = m >> 32;
145 #endif
146 }
147
148 return true;
149 #else
150 return false;
151 #endif
152 }
153
154 int64_t
util_thread_get_time_nano(thrd_t thread)155 util_thread_get_time_nano(thrd_t thread)
156 {
157 #if defined(HAVE_PTHREAD) && !defined(__APPLE__) && !defined(__HAIKU__)
158 struct timespec ts;
159 clockid_t cid;
160
161 pthread_getcpuclockid(thread, &cid);
162 clock_gettime(cid, &ts);
163 return (int64_t)ts.tv_sec * 1000000000 + ts.tv_nsec;
164 #elif defined(_WIN32)
165 union {
166 FILETIME time;
167 ULONGLONG value;
168 } kernel_time, user_time;
169 GetThreadTimes((HANDLE)thread.handle, NULL, NULL, &kernel_time.time, &user_time.time);
170 return (kernel_time.value + user_time.value) * 100;
171 #else
172 (void)thread;
173 return 0;
174 #endif
175 }
176
177 #if defined(HAVE_PTHREAD) && !defined(__APPLE__) && !defined(__HAIKU__)
178
util_barrier_init(util_barrier * barrier,unsigned count)179 void util_barrier_init(util_barrier *barrier, unsigned count)
180 {
181 pthread_barrier_init(barrier, NULL, count);
182 }
183
util_barrier_destroy(util_barrier * barrier)184 void util_barrier_destroy(util_barrier *barrier)
185 {
186 pthread_barrier_destroy(barrier);
187 }
188
util_barrier_wait(util_barrier * barrier)189 bool util_barrier_wait(util_barrier *barrier)
190 {
191 return pthread_barrier_wait(barrier) == PTHREAD_BARRIER_SERIAL_THREAD;
192 }
193
194 #else /* If the OS doesn't have its own, implement barriers using a mutex and a condvar */
195
util_barrier_init(util_barrier * barrier,unsigned count)196 void util_barrier_init(util_barrier *barrier, unsigned count)
197 {
198 barrier->count = count;
199 barrier->waiters = 0;
200 barrier->sequence = 0;
201 (void) mtx_init(&barrier->mutex, mtx_plain);
202 cnd_init(&barrier->condvar);
203 }
204
util_barrier_destroy(util_barrier * barrier)205 void util_barrier_destroy(util_barrier *barrier)
206 {
207 assert(barrier->waiters == 0);
208 mtx_destroy(&barrier->mutex);
209 cnd_destroy(&barrier->condvar);
210 }
211
util_barrier_wait(util_barrier * barrier)212 bool util_barrier_wait(util_barrier *barrier)
213 {
214 mtx_lock(&barrier->mutex);
215
216 assert(barrier->waiters < barrier->count);
217 barrier->waiters++;
218
219 if (barrier->waiters < barrier->count) {
220 uint64_t sequence = barrier->sequence;
221
222 do {
223 cnd_wait(&barrier->condvar, &barrier->mutex);
224 } while (sequence == barrier->sequence);
225 } else {
226 barrier->waiters = 0;
227 barrier->sequence++;
228 cnd_broadcast(&barrier->condvar);
229 }
230
231 mtx_unlock(&barrier->mutex);
232
233 return true;
234 }
235
236 #endif
237