• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    Copyright (c) 2011, 2014 mingw-w64 project
3    Copyright (c) 2015 Intel Corporation
4 
5    Permission is hereby granted, free of charge, to any person obtaining a
6    copy of this software and associated documentation files (the "Software"),
7    to deal in the Software without restriction, including without limitation
8    the rights to use, copy, modify, merge, publish, distribute, sublicense,
9    and/or sell copies of the Software, and to permit persons to whom the
10    Software is furnished to do so, subject to the following conditions:
11 
12    The above copyright notice and this permission notice shall be included in
13    all copies or substantial portions of the Software.
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21    DEALINGS IN THE SOFTWARE.
22 */
23 
24 #include <windows.h>
25 #include <stdio.h>
26 #include <malloc.h>
27 #include <stdbool.h>
28 #include "pthread.h"
29 #include "misc.h"
30 
31 typedef enum {
32   Unlocked,        /* Not locked. */
33   Locked,          /* Locked but without waiters. */
34   Waiting,         /* Locked, may have waiters. */
35 } mutex_state_t;
36 
37 typedef enum {
38   Normal,
39   Errorcheck,
40   Recursive,
41 } mutex_type_t;
42 
43 /* The heap-allocated part of a mutex. */
44 typedef struct {
45   mutex_state_t state;
46   mutex_type_t type;
47   HANDLE event;       /* Auto-reset event, or NULL if not yet allocated. */
48   unsigned rec_lock;  /* For recursive mutexes, the number of times the
49                          mutex has been locked in excess by the same thread. */
50   volatile DWORD owner;  /* For recursive and error-checking mutexes, the
51                             ID of the owning thread if the mutex is locked. */
52 } mutex_impl_t;
53 
54 /* Whether a mutex is still a static initializer (not a pointer to
55    a mutex_impl_t). */
56 static bool
is_static_initializer(pthread_mutex_t m)57 is_static_initializer(pthread_mutex_t m)
58 {
59   /* Treat 0 as a static initializer as well (for normal mutexes),
60      to tolerate sloppy code in libgomp. (We should rather fix that code!) */
61   intptr_t v = (intptr_t)m;
62   return v >= -3 && v <= 0;
63 /* Should be simple:
64   return (uintptr_t)m >= (uintptr_t)-3; */
65 }
66 
67 /* Create and return the implementation part of a mutex from a static
68    initialiser. Return NULL on out-of-memory error. */
69 static WINPTHREADS_ATTRIBUTE((noinline)) mutex_impl_t *
mutex_impl_init(pthread_mutex_t * m,mutex_impl_t * mi)70 mutex_impl_init(pthread_mutex_t *m, mutex_impl_t *mi)
71 {
72   mutex_impl_t *new_mi = malloc(sizeof(mutex_impl_t));
73   if (new_mi == NULL)
74     return NULL;
75   new_mi->state = Unlocked;
76   new_mi->type = (mi == (void *)PTHREAD_RECURSIVE_MUTEX_INITIALIZER ? Recursive
77                   : mi == (void *)PTHREAD_ERRORCHECK_MUTEX_INITIALIZER ? Errorcheck
78                   : Normal);
79   new_mi->event = NULL;
80   new_mi->rec_lock = 0;
81   new_mi->owner = (DWORD)-1;
82   if (__sync_bool_compare_and_swap(m, (pthread_mutex_t)mi, (pthread_mutex_t)new_mi)) {
83     return new_mi;
84   } else {
85     /* Someone created the struct before us. */
86     free(new_mi);
87     return (mutex_impl_t *)*m;
88   }
89 }
90 
91 #define likely(cond) __builtin_expect((cond) != 0, 1)
92 #define unlikely(cond) __builtin_expect((cond) != 0, 0)
93 
94 /* Return the implementation part of a mutex, creating it if necessary.
95    Return NULL on out-of-memory error. */
96 static inline mutex_impl_t *
mutex_impl(pthread_mutex_t * m)97 mutex_impl(pthread_mutex_t *m)
98 {
99   mutex_impl_t *mi = (mutex_impl_t *)*m;
100   if (is_static_initializer((pthread_mutex_t)mi)) {
101     return mutex_impl_init(m, mi);
102   } else {
103     /* mi cannot be null here; avoid a test in the fast path. */
104     if (mi == NULL)
105       __builtin_unreachable();
106     return mi;
107   }
108 }
109 
110 /* Lock a mutex. Give up after 'timeout' ms (with ETIMEDOUT),
111    or never if timeout=INFINITE. */
112 static inline int
pthread_mutex_lock_intern(pthread_mutex_t * m,DWORD timeout)113 pthread_mutex_lock_intern (pthread_mutex_t *m, DWORD timeout)
114 {
115   mutex_impl_t *mi = mutex_impl(m);
116   if (mi == NULL)
117     return ENOMEM;
118 
119   mutex_state_t old_state = __sync_lock_test_and_set(&mi->state, Locked);
120   if (unlikely(old_state != Unlocked)) {
121     /* The mutex is already locked. */
122 
123     if (mi->type != Normal) {
124       /* Recursive or Errorcheck */
125       if (mi->owner == GetCurrentThreadId()) {
126         /* FIXME: A recursive mutex should not need two atomic ops when locking
127            recursively.  We could rewrite by doing compare-and-swap instead of
128            test-and-set the first time, but it would lead to more code
129            duplication and add a conditional branch to the critical path. */
130         __sync_bool_compare_and_swap(&mi->state, Locked, old_state);
131         if (mi->type == Recursive) {
132           mi->rec_lock++;
133           return 0;
134         } else {
135           /* type == Errorcheck */
136           return EDEADLK;
137         }
138       }
139     }
140 
141     /* Make sure there is an event object on which to wait. */
142     if (mi->event == NULL) {
143       /* Make an auto-reset event object. */
144       HANDLE ev = CreateEvent(NULL, false, false, NULL);
145       if (ev == NULL) {
146         switch (GetLastError()) {
147         case ERROR_ACCESS_DENIED:
148           return EPERM;
149         default:
150           return ENOMEM;    /* Probably accurate enough. */
151         }
152       }
153       if (!__sync_bool_compare_and_swap(&mi->event, NULL, ev)) {
154         /* Someone created the event before us. */
155         CloseHandle(ev);
156       }
157     }
158 
159     /* At this point, mi->event is non-NULL. */
160 
161     while (__sync_lock_test_and_set(&mi->state, Waiting) != Unlocked) {
162       /* For timed locking attempts, it is possible (although unlikely)
163          that we are woken up but someone else grabs the lock before us,
164          and we have to go back to sleep again. In that case, the total
165          wait may be longer than expected. */
166 
167       unsigned r = _pthread_wait_for_single_object(mi->event, timeout);
168       switch (r) {
169       case WAIT_TIMEOUT:
170         return ETIMEDOUT;
171       case WAIT_OBJECT_0:
172         break;
173       default:
174         return EINVAL;
175       }
176     }
177   }
178 
179   if (mi->type != Normal)
180     mi->owner = GetCurrentThreadId();
181 
182   return 0;
183 }
184 
185 int
pthread_mutex_lock(pthread_mutex_t * m)186 pthread_mutex_lock (pthread_mutex_t *m)
187 {
188   return pthread_mutex_lock_intern (m, INFINITE);
189 }
190 
pthread_mutex_timedlock(pthread_mutex_t * m,const struct timespec * ts)191 int pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *ts)
192 {
193   unsigned long long patience;
194   if (ts != NULL) {
195     unsigned long long end = _pthread_time_in_ms_from_timespec(ts);
196     unsigned long long now = _pthread_time_in_ms();
197     patience = end > now ? end - now : 0;
198     if (patience > 0xffffffff)
199       patience = INFINITE;
200   } else {
201     patience = INFINITE;
202   }
203   return pthread_mutex_lock_intern(m, patience);
204 }
205 
pthread_mutex_unlock(pthread_mutex_t * m)206 int pthread_mutex_unlock(pthread_mutex_t *m)
207 {
208   /* Here m might an initialiser of an error-checking or recursive mutex, in
209      which case the behaviour is well-defined, so we can't skip this check. */
210   mutex_impl_t *mi = mutex_impl(m);
211   if (mi == NULL)
212     return ENOMEM;
213 
214   if (unlikely(mi->type != Normal)) {
215     if (mi->state == Unlocked)
216       return EINVAL;
217     if (mi->owner != GetCurrentThreadId())
218       return EPERM;
219     if (mi->rec_lock > 0) {
220       mi->rec_lock--;
221       return 0;
222     }
223     mi->owner = (DWORD)-1;
224   }
225   if (unlikely(__sync_lock_test_and_set(&mi->state, Unlocked) == Waiting)) {
226     if (!SetEvent(mi->event))
227       return EPERM;
228   }
229   return 0;
230 }
231 
pthread_mutex_trylock(pthread_mutex_t * m)232 int pthread_mutex_trylock(pthread_mutex_t *m)
233 {
234   mutex_impl_t *mi = mutex_impl(m);
235   if (mi == NULL)
236     return ENOMEM;
237 
238   if (__sync_bool_compare_and_swap(&mi->state, Unlocked, Locked)) {
239     if (mi->type != Normal)
240       mi->owner = GetCurrentThreadId();
241     return 0;
242   } else {
243     if (mi->type == Recursive && mi->owner == GetCurrentThreadId()) {
244       mi->rec_lock++;
245       return 0;
246     }
247     return EBUSY;
248   }
249 }
250 
251 int
pthread_mutex_init(pthread_mutex_t * m,const pthread_mutexattr_t * a)252 pthread_mutex_init (pthread_mutex_t *m, const pthread_mutexattr_t *a)
253 {
254   pthread_mutex_t init = PTHREAD_MUTEX_INITIALIZER;
255   if (a != NULL) {
256     int pshared;
257     if (pthread_mutexattr_getpshared(a, &pshared) == 0
258         && pshared == PTHREAD_PROCESS_SHARED)
259       return ENOSYS;
260 
261     int type;
262     if (pthread_mutexattr_gettype(a, &type) == 0) {
263       switch (type) {
264       case PTHREAD_MUTEX_ERRORCHECK:
265         init = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER;
266         break;
267       case PTHREAD_MUTEX_RECURSIVE:
268         init = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
269         break;
270       default:
271         init = PTHREAD_MUTEX_INITIALIZER;
272         break;
273       }
274     }
275   }
276   *m = init;
277   return 0;
278 }
279 
pthread_mutex_destroy(pthread_mutex_t * m)280 int pthread_mutex_destroy (pthread_mutex_t *m)
281 {
282   mutex_impl_t *mi = (mutex_impl_t *)*m;
283   if (!is_static_initializer((pthread_mutex_t)mi)) {
284     if (mi->event != NULL)
285       CloseHandle(mi->event);
286     free(mi);
287     /* Sabotage attempts to re-use the mutex before initialising it again. */
288     *m = (pthread_mutex_t)NULL;
289   }
290 
291   return 0;
292 }
293 
pthread_mutexattr_init(pthread_mutexattr_t * a)294 int pthread_mutexattr_init(pthread_mutexattr_t *a)
295 {
296   *a = PTHREAD_MUTEX_NORMAL | (PTHREAD_PROCESS_PRIVATE << 3);
297   return 0;
298 }
299 
pthread_mutexattr_destroy(pthread_mutexattr_t * a)300 int pthread_mutexattr_destroy(pthread_mutexattr_t *a)
301 {
302   if (!a)
303     return EINVAL;
304 
305   return 0;
306 }
307 
pthread_mutexattr_gettype(const pthread_mutexattr_t * a,int * type)308 int pthread_mutexattr_gettype(const pthread_mutexattr_t *a, int *type)
309 {
310   if (!a || !type)
311     return EINVAL;
312 
313   *type = *a & 3;
314 
315   return 0;
316 }
317 
pthread_mutexattr_settype(pthread_mutexattr_t * a,int type)318 int pthread_mutexattr_settype(pthread_mutexattr_t *a, int type)
319 {
320     if (!a || (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE && type != PTHREAD_MUTEX_ERRORCHECK))
321       return EINVAL;
322     *a &= ~3;
323     *a |= type;
324 
325     return 0;
326 }
327 
pthread_mutexattr_getpshared(const pthread_mutexattr_t * a,int * type)328 int pthread_mutexattr_getpshared(const pthread_mutexattr_t *a, int *type)
329 {
330     if (!a || !type)
331       return EINVAL;
332     *type = (*a & 4 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE);
333 
334     return 0;
335 }
336 
pthread_mutexattr_setpshared(pthread_mutexattr_t * a,int type)337 int pthread_mutexattr_setpshared(pthread_mutexattr_t * a, int type)
338 {
339     int r = 0;
340     if (!a || (type != PTHREAD_PROCESS_SHARED
341 	&& type != PTHREAD_PROCESS_PRIVATE))
342       return EINVAL;
343     if (type == PTHREAD_PROCESS_SHARED)
344     {
345       type = PTHREAD_PROCESS_PRIVATE;
346       r = ENOSYS;
347     }
348     type = (type == PTHREAD_PROCESS_SHARED ? 4 : 0);
349 
350     *a &= ~4;
351     *a |= type;
352 
353     return r;
354 }
355 
pthread_mutexattr_getprotocol(const pthread_mutexattr_t * a,int * type)356 int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *a, int *type)
357 {
358     *type = *a & (8 + 16);
359 
360     return 0;
361 }
362 
pthread_mutexattr_setprotocol(pthread_mutexattr_t * a,int type)363 int pthread_mutexattr_setprotocol(pthread_mutexattr_t *a, int type)
364 {
365     if ((type & (8 + 16)) != 8 + 16) return EINVAL;
366 
367     *a &= ~(8 + 16);
368     *a |= type;
369 
370     return 0;
371 }
372 
pthread_mutexattr_getprioceiling(const pthread_mutexattr_t * a,int * prio)373 int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *a, int * prio)
374 {
375     *prio = *a / PTHREAD_PRIO_MULT;
376     return 0;
377 }
378 
pthread_mutexattr_setprioceiling(pthread_mutexattr_t * a,int prio)379 int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *a, int prio)
380 {
381     *a &= (PTHREAD_PRIO_MULT - 1);
382     *a += prio * PTHREAD_PRIO_MULT;
383 
384     return 0;
385 }
386