1 /**************************************************************************
2 *
3 * Copyright 1999-2006 Brian Paul
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #ifndef U_THREAD_H_
28 #define U_THREAD_H_
29
30 #include <errno.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <string.h>
34
35 #include "c11/threads.h"
36 #include "detect_os.h"
37
38 /* Some highly performance-sensitive thread-local variables like the current GL
39 * context are declared with the initial-exec model on Linux. glibc allocates a
40 * fixed number of extra slots for initial-exec TLS variables at startup, and
41 * Mesa relies on (even if it's dlopen()ed after init) being able to fit into
42 * those. This model saves the call to look up the address of the TLS variable.
43 *
44 * However, if we don't have this TLS model available on the platform, then we
45 * still want to use normal TLS (which involves a function call, but not the
46 * expensive pthread_getspecific() or its equivalent).
47 */
48 #if DETECT_OS_APPLE
49 /* Apple Clang emits wrappers when using thread_local that break module linkage,
50 * but not with __thread
51 */
52 #define __THREAD_INITIAL_EXEC __thread
53 #elif defined(__GLIBC__)
54 #define __THREAD_INITIAL_EXEC thread_local __attribute__((tls_model("initial-exec")))
55 #define REALLY_INITIAL_EXEC
56 #else
57 #define __THREAD_INITIAL_EXEC thread_local
58 #endif
59
60 #ifdef __cplusplus
61 extern "C" {
62 #endif
63
64 int
65 util_get_current_cpu(void);
66
67 int u_thread_create(thrd_t *thrd, int (*routine)(void *), void *param);
68
69 void u_thread_setname( const char *name );
70
71 /**
72 * Set thread affinity.
73 *
74 * \param thread Thread
75 * \param mask Set this affinity mask
76 * \param old_mask Previous affinity mask returned if not NULL
77 * \param num_mask_bits Number of bits in both masks
78 * \return true on success
79 */
80 bool
81 util_set_thread_affinity(thrd_t thread,
82 const uint32_t *mask,
83 uint32_t *old_mask,
84 unsigned num_mask_bits);
85
86 static inline bool
util_set_current_thread_affinity(const uint32_t * mask,uint32_t * old_mask,unsigned num_mask_bits)87 util_set_current_thread_affinity(const uint32_t *mask,
88 uint32_t *old_mask,
89 unsigned num_mask_bits)
90 {
91 return util_set_thread_affinity(thrd_current(), mask, old_mask,
92 num_mask_bits);
93 }
94
95 /*
96 * Thread statistics.
97 */
98
99 /* Return the time of a thread's CPU time clock. */
100 int64_t
101 util_thread_get_time_nano(thrd_t thread);
102
103 /* Return the time of the current thread's CPU time clock. */
104 static inline int64_t
util_current_thread_get_time_nano(void)105 util_current_thread_get_time_nano(void)
106 {
107 return util_thread_get_time_nano(thrd_current());
108 }
109
u_thread_is_self(thrd_t thread)110 static inline bool u_thread_is_self(thrd_t thread)
111 {
112 return thrd_equal(thrd_current(), thread);
113 }
114
115 /*
116 * util_barrier
117 */
118
119 #if defined(HAVE_PTHREAD) && !defined(__APPLE__) && !defined(__HAIKU__)
120
121 typedef pthread_barrier_t util_barrier;
122
123 #else /* If the OS doesn't have its own, implement barriers using a mutex and a condvar */
124
125 typedef struct {
126 unsigned count;
127 unsigned waiters;
128 uint64_t sequence;
129 mtx_t mutex;
130 cnd_t condvar;
131 } util_barrier;
132
133 #endif
134
135 void util_barrier_init(util_barrier *barrier, unsigned count);
136
137 void util_barrier_destroy(util_barrier *barrier);
138
139 bool util_barrier_wait(util_barrier *barrier);
140
141 /*
142 * Semaphores
143 */
144
145 typedef struct
146 {
147 mtx_t mutex;
148 cnd_t cond;
149 int counter;
150 } util_semaphore;
151
152
153 static inline void
util_semaphore_init(util_semaphore * sema,int init_val)154 util_semaphore_init(util_semaphore *sema, int init_val)
155 {
156 (void) mtx_init(&sema->mutex, mtx_plain);
157 cnd_init(&sema->cond);
158 sema->counter = init_val;
159 }
160
161 static inline void
util_semaphore_destroy(util_semaphore * sema)162 util_semaphore_destroy(util_semaphore *sema)
163 {
164 mtx_destroy(&sema->mutex);
165 cnd_destroy(&sema->cond);
166 }
167
168 /** Signal/increment semaphore counter */
169 static inline void
util_semaphore_signal(util_semaphore * sema)170 util_semaphore_signal(util_semaphore *sema)
171 {
172 mtx_lock(&sema->mutex);
173 sema->counter++;
174 cnd_signal(&sema->cond);
175 mtx_unlock(&sema->mutex);
176 }
177
178 /** Wait for semaphore counter to be greater than zero */
179 static inline void
util_semaphore_wait(util_semaphore * sema)180 util_semaphore_wait(util_semaphore *sema)
181 {
182 mtx_lock(&sema->mutex);
183 while (sema->counter <= 0) {
184 cnd_wait(&sema->cond, &sema->mutex);
185 }
186 sema->counter--;
187 mtx_unlock(&sema->mutex);
188 }
189
190 #ifdef __cplusplus
191 }
192 #endif
193
194 #endif /* U_THREAD_H_ */
195