• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #ifndef _SIMPLE_MTX_H
25 #define _SIMPLE_MTX_H
26 
27 #include "util/futex.h"
28 #include "util/macros.h"
29 #include "u_atomic.h"
30 
31 #include "c11/threads.h"
32 
33 #if UTIL_FUTEX_SUPPORTED
34 
35 #if defined(HAVE_VALGRIND) && !defined(NDEBUG)
36 #  include <valgrind.h>
37 #  include <helgrind.h>
38 #  define HG(x) x
39 #else
40 #  define HG(x)
41 #endif
42 
43 /* mtx_t - Fast, simple mutex
44  *
45  * While modern pthread mutexes are very fast (implemented using futex), they
46  * still incur a call to an external DSO and overhead of the generality and
47  * features of pthread mutexes.  Most mutexes in mesa only needs lock/unlock,
48  * and the idea here is that we can inline the atomic operation and make the
49  * fast case just two intructions.  Mutexes are subtle and finicky to
50  * implement, so we carefully copy the implementation from Ulrich Dreppers
51  * well-written and well-reviewed paper:
52  *
53  *   "Futexes Are Tricky"
54  *   http://www.akkadia.org/drepper/futex.pdf
55  *
56  * We implement "mutex3", which gives us a mutex that has no syscalls on
57  * uncontended lock or unlock.  Further, the uncontended case boils down to a
58  * locked cmpxchg and an untaken branch, the uncontended unlock is just a
59  * locked decr and an untaken branch.  We use __builtin_expect() to indicate
60  * that contention is unlikely so that gcc will put the contention code out of
61  * the main code flow.
62  *
63  * A fast mutex only supports lock/unlock, can't be recursive or used with
64  * condition variables.
65  */
66 
67 typedef struct {
68    uint32_t val;
69 } simple_mtx_t;
70 
71 #define _SIMPLE_MTX_INITIALIZER_NP { 0 }
72 
73 #define _SIMPLE_MTX_INVALID_VALUE 0xd0d0d0d0
74 
75 static inline void
simple_mtx_init(simple_mtx_t * mtx,ASSERTED int type)76 simple_mtx_init(simple_mtx_t *mtx, ASSERTED int type)
77 {
78    assert(type == mtx_plain);
79 
80    mtx->val = 0;
81 
82    HG(ANNOTATE_RWLOCK_CREATE(mtx));
83 }
84 
85 static inline void
simple_mtx_destroy(ASSERTED simple_mtx_t * mtx)86 simple_mtx_destroy(ASSERTED simple_mtx_t *mtx)
87 {
88    HG(ANNOTATE_RWLOCK_DESTROY(mtx));
89 #ifndef NDEBUG
90    mtx->val = _SIMPLE_MTX_INVALID_VALUE;
91 #endif
92 }
93 
94 static inline void
simple_mtx_lock(simple_mtx_t * mtx)95 simple_mtx_lock(simple_mtx_t *mtx)
96 {
97    uint32_t c;
98 
99    c = p_atomic_cmpxchg(&mtx->val, 0, 1);
100 
101    assert(c != _SIMPLE_MTX_INVALID_VALUE);
102 
103    if (__builtin_expect(c != 0, 0)) {
104       if (c != 2)
105          c = p_atomic_xchg(&mtx->val, 2);
106       while (c != 0) {
107          futex_wait(&mtx->val, 2, NULL);
108          c = p_atomic_xchg(&mtx->val, 2);
109       }
110    }
111 
112    HG(ANNOTATE_RWLOCK_ACQUIRED(mtx, 1));
113 }
114 
115 static inline void
simple_mtx_unlock(simple_mtx_t * mtx)116 simple_mtx_unlock(simple_mtx_t *mtx)
117 {
118    uint32_t c;
119 
120    HG(ANNOTATE_RWLOCK_RELEASED(mtx, 1));
121 
122    c = p_atomic_fetch_add(&mtx->val, -1);
123 
124    assert(c != _SIMPLE_MTX_INVALID_VALUE);
125 
126    if (__builtin_expect(c != 1, 0)) {
127       mtx->val = 0;
128       futex_wake(&mtx->val, 1);
129    }
130 }
131 
132 static inline void
simple_mtx_assert_locked(simple_mtx_t * mtx)133 simple_mtx_assert_locked(simple_mtx_t *mtx)
134 {
135    assert(mtx->val);
136 }
137 
138 #else
139 
140 typedef mtx_t simple_mtx_t;
141 
142 #define _SIMPLE_MTX_INITIALIZER_NP _MTX_INITIALIZER_NP
143 
144 static inline void
simple_mtx_init(simple_mtx_t * mtx,int type)145 simple_mtx_init(simple_mtx_t *mtx, int type)
146 {
147    mtx_init(mtx, type);
148 }
149 
150 static inline void
simple_mtx_destroy(simple_mtx_t * mtx)151 simple_mtx_destroy(simple_mtx_t *mtx)
152 {
153    mtx_destroy(mtx);
154 }
155 
156 static inline void
simple_mtx_lock(simple_mtx_t * mtx)157 simple_mtx_lock(simple_mtx_t *mtx)
158 {
159    mtx_lock(mtx);
160 }
161 
162 static inline void
simple_mtx_unlock(simple_mtx_t * mtx)163 simple_mtx_unlock(simple_mtx_t *mtx)
164 {
165    mtx_unlock(mtx);
166 }
167 
168 static inline void
simple_mtx_assert_locked(simple_mtx_t * mtx)169 simple_mtx_assert_locked(simple_mtx_t *mtx)
170 {
171 #ifndef NDEBUG
172    /* NOTE: this would not work for recursive mutexes, but
173     * mtx_t doesn't support those
174     */
175    int ret = mtx_trylock(mtx);
176    assert(ret == thrd_busy);
177    if (ret == thrd_success)
178       mtx_unlock(mtx);
179 #else
180    (void)mtx;
181 #endif
182 }
183 
184 #endif
185 
186 #endif
187