1 /*
2 * Copyright © 2015 Intel
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef _SIMPLE_MTX_H
25 #define _SIMPLE_MTX_H
26
27 #include "util/futex.h"
28 #include "util/macros.h"
29 #include "util/u_call_once.h"
30 #include "u_atomic.h"
31
32 #if UTIL_FUTEX_SUPPORTED
33 #if defined(HAVE_VALGRIND) && !defined(NDEBUG)
34 # include <valgrind.h>
35 # include <helgrind.h>
36 # define HG(x) x
37 #else
38 # define HG(x)
39 #endif
40 #else /* !UTIL_FUTEX_SUPPORTED */
41 # include "c11/threads.h"
42 #endif /* UTIL_FUTEX_SUPPORTED */
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48 #if UTIL_FUTEX_SUPPORTED
49
50 /* mtx_t - Fast, simple mutex
51 *
52 * While modern pthread mutexes are very fast (implemented using futex), they
53 * still incur a call to an external DSO and overhead of the generality and
54 * features of pthread mutexes. Most mutexes in mesa only needs lock/unlock,
55 * and the idea here is that we can inline the atomic operation and make the
56 * fast case just two intructions. Mutexes are subtle and finicky to
57 * implement, so we carefully copy the implementation from Ulrich Dreppers
58 * well-written and well-reviewed paper:
59 *
60 * "Futexes Are Tricky"
61 * http://www.akkadia.org/drepper/futex.pdf
62 *
63 * We implement "mutex3", which gives us a mutex that has no syscalls on
64 * uncontended lock or unlock. Further, the uncontended case boils down to a
65 * locked cmpxchg and an untaken branch, the uncontended unlock is just a
66 * locked decr and an untaken branch. We use __builtin_expect() to indicate
67 * that contention is unlikely so that gcc will put the contention code out of
68 * the main code flow.
69 *
70 * A fast mutex only supports lock/unlock, can't be recursive or used with
71 * condition variables.
72 */
73
74 typedef struct {
75 uint32_t val;
76 } simple_mtx_t;
77
78 #define SIMPLE_MTX_INITIALIZER { 0 }
79
80 #define _SIMPLE_MTX_INVALID_VALUE 0xd0d0d0d0
81
82 static inline void
simple_mtx_init(simple_mtx_t * mtx,ASSERTED int type)83 simple_mtx_init(simple_mtx_t *mtx, ASSERTED int type)
84 {
85 assert(type == mtx_plain);
86
87 mtx->val = 0;
88
89 HG(ANNOTATE_RWLOCK_CREATE(mtx));
90 }
91
92 static inline void
simple_mtx_destroy(ASSERTED simple_mtx_t * mtx)93 simple_mtx_destroy(ASSERTED simple_mtx_t *mtx)
94 {
95 HG(ANNOTATE_RWLOCK_DESTROY(mtx));
96 #ifndef NDEBUG
97 mtx->val = _SIMPLE_MTX_INVALID_VALUE;
98 #endif
99 }
100
101 static inline void
simple_mtx_lock(simple_mtx_t * mtx)102 simple_mtx_lock(simple_mtx_t *mtx)
103 {
104 uint32_t c;
105
106 c = p_atomic_cmpxchg(&mtx->val, 0, 1);
107
108 assert(c != _SIMPLE_MTX_INVALID_VALUE);
109
110 if (__builtin_expect(c != 0, 0)) {
111 if (c != 2)
112 c = p_atomic_xchg(&mtx->val, 2);
113 while (c != 0) {
114 futex_wait(&mtx->val, 2, NULL);
115 c = p_atomic_xchg(&mtx->val, 2);
116 }
117 }
118
119 HG(ANNOTATE_RWLOCK_ACQUIRED(mtx, 1));
120 }
121
122 static inline void
simple_mtx_unlock(simple_mtx_t * mtx)123 simple_mtx_unlock(simple_mtx_t *mtx)
124 {
125 uint32_t c;
126
127 HG(ANNOTATE_RWLOCK_RELEASED(mtx, 1));
128
129 c = p_atomic_fetch_add(&mtx->val, -1);
130
131 assert(c != _SIMPLE_MTX_INVALID_VALUE);
132
133 if (__builtin_expect(c != 1, 0)) {
134 mtx->val = 0;
135 futex_wake(&mtx->val, 1);
136 }
137 }
138
139 static inline void
simple_mtx_assert_locked(simple_mtx_t * mtx)140 simple_mtx_assert_locked(simple_mtx_t *mtx)
141 {
142 assert(mtx->val);
143 }
144
145 #else /* !UTIL_FUTEX_SUPPORTED */
146
147 typedef struct simple_mtx_t {
148 util_once_flag flag;
149 mtx_t mtx;
150 } simple_mtx_t;
151
152 #define SIMPLE_MTX_INITIALIZER { UTIL_ONCE_FLAG_INIT }
153
154 void _simple_mtx_plain_init_once(simple_mtx_t *mtx);
155
156 static inline void
157 _simple_mtx_init_with_once(simple_mtx_t *mtx)
158 {
159 util_call_once_data(&mtx->flag,
160 (util_call_once_data_func)_simple_mtx_plain_init_once, mtx);
161 }
162
163 void
164 simple_mtx_init(simple_mtx_t *mtx, int type);
165
166 void
167 simple_mtx_destroy(simple_mtx_t *mtx);
168
169 static inline void
170 simple_mtx_lock(simple_mtx_t *mtx)
171 {
172 _simple_mtx_init_with_once(mtx);
173 mtx_lock(&mtx->mtx);
174 }
175
176 static inline void
177 simple_mtx_unlock(simple_mtx_t *mtx)
178 {
179 _simple_mtx_init_with_once(mtx);
180 mtx_unlock(&mtx->mtx);
181 }
182
183 static inline void
184 simple_mtx_assert_locked(simple_mtx_t *mtx)
185 {
186 #ifndef NDEBUG
187 _simple_mtx_init_with_once(mtx);
188 /* NOTE: this would not work for recursive mutexes, but
189 * mtx_t doesn't support those
190 */
191 int ret = mtx_trylock(&mtx->mtx);
192 assert(ret == thrd_busy);
193 if (ret == thrd_success)
194 mtx_unlock(&mtx->mtx);
195 #else
196 (void)mtx;
197 #endif
198 }
199
200 #endif /* UTIL_FUTEX_SUPPORTED */
201
202 #ifdef __cplusplus
203 }
204 #endif
205
206 #endif /* _SIMPLE_MTX_H */
207