1 //===------------------------- mutex.cpp ----------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "mutex"
11 #include "limits"
12 #include "system_error"
13 #include "include/atomic_support.h"
14 #include "__undef_macros"
15
16 _LIBCPP_BEGIN_NAMESPACE_STD
17 #ifndef _LIBCPP_HAS_NO_THREADS
18
19 const defer_lock_t defer_lock = {};
20 const try_to_lock_t try_to_lock = {};
21 const adopt_lock_t adopt_lock = {};
22
~mutex()23 mutex::~mutex()
24 {
25 __libcpp_mutex_destroy(&__m_);
26 }
27
28 void
lock()29 mutex::lock()
30 {
31 int ec = __libcpp_mutex_lock(&__m_);
32 if (ec)
33 __throw_system_error(ec, "mutex lock failed");
34 }
35
36 bool
try_lock()37 mutex::try_lock() _NOEXCEPT
38 {
39 return __libcpp_mutex_trylock(&__m_);
40 }
41
42 void
unlock()43 mutex::unlock() _NOEXCEPT
44 {
45 int ec = __libcpp_mutex_unlock(&__m_);
46 (void)ec;
47 _LIBCPP_ASSERT(ec == 0, "call to mutex::unlock failed");
48 }
49
50 // recursive_mutex
51
recursive_mutex()52 recursive_mutex::recursive_mutex()
53 {
54 int ec = __libcpp_recursive_mutex_init(&__m_);
55 if (ec)
56 __throw_system_error(ec, "recursive_mutex constructor failed");
57 }
58
~recursive_mutex()59 recursive_mutex::~recursive_mutex()
60 {
61 int e = __libcpp_recursive_mutex_destroy(&__m_);
62 (void)e;
63 _LIBCPP_ASSERT(e == 0, "call to ~recursive_mutex() failed");
64 }
65
66 void
lock()67 recursive_mutex::lock()
68 {
69 int ec = __libcpp_recursive_mutex_lock(&__m_);
70 if (ec)
71 __throw_system_error(ec, "recursive_mutex lock failed");
72 }
73
74 void
unlock()75 recursive_mutex::unlock() _NOEXCEPT
76 {
77 int e = __libcpp_recursive_mutex_unlock(&__m_);
78 (void)e;
79 _LIBCPP_ASSERT(e == 0, "call to recursive_mutex::unlock() failed");
80 }
81
82 bool
try_lock()83 recursive_mutex::try_lock() _NOEXCEPT
84 {
85 return __libcpp_recursive_mutex_trylock(&__m_);
86 }
87
88 // timed_mutex
89
timed_mutex()90 timed_mutex::timed_mutex()
91 : __locked_(false)
92 {
93 }
94
~timed_mutex()95 timed_mutex::~timed_mutex()
96 {
97 lock_guard<mutex> _(__m_);
98 }
99
100 void
lock()101 timed_mutex::lock()
102 {
103 unique_lock<mutex> lk(__m_);
104 while (__locked_)
105 __cv_.wait(lk);
106 __locked_ = true;
107 }
108
109 bool
try_lock()110 timed_mutex::try_lock() _NOEXCEPT
111 {
112 unique_lock<mutex> lk(__m_, try_to_lock);
113 if (lk.owns_lock() && !__locked_)
114 {
115 __locked_ = true;
116 return true;
117 }
118 return false;
119 }
120
121 void
unlock()122 timed_mutex::unlock() _NOEXCEPT
123 {
124 lock_guard<mutex> _(__m_);
125 __locked_ = false;
126 __cv_.notify_one();
127 }
128
129 // recursive_timed_mutex
130
recursive_timed_mutex()131 recursive_timed_mutex::recursive_timed_mutex()
132 : __count_(0),
133 __id_(0)
134 {
135 }
136
~recursive_timed_mutex()137 recursive_timed_mutex::~recursive_timed_mutex()
138 {
139 lock_guard<mutex> _(__m_);
140 }
141
142 void
lock()143 recursive_timed_mutex::lock()
144 {
145 __libcpp_thread_id id = __libcpp_thread_get_current_id();
146 unique_lock<mutex> lk(__m_);
147 if (__libcpp_thread_id_equal(id, __id_))
148 {
149 if (__count_ == numeric_limits<size_t>::max())
150 __throw_system_error(EAGAIN, "recursive_timed_mutex lock limit reached");
151 ++__count_;
152 return;
153 }
154 while (__count_ != 0)
155 __cv_.wait(lk);
156 __count_ = 1;
157 __id_ = id;
158 }
159
160 bool
try_lock()161 recursive_timed_mutex::try_lock() _NOEXCEPT
162 {
163 __libcpp_thread_id id = __libcpp_thread_get_current_id();
164 unique_lock<mutex> lk(__m_, try_to_lock);
165 if (lk.owns_lock() && (__count_ == 0 || __libcpp_thread_id_equal(id, __id_)))
166 {
167 if (__count_ == numeric_limits<size_t>::max())
168 return false;
169 ++__count_;
170 __id_ = id;
171 return true;
172 }
173 return false;
174 }
175
176 void
unlock()177 recursive_timed_mutex::unlock() _NOEXCEPT
178 {
179 unique_lock<mutex> lk(__m_);
180 if (--__count_ == 0)
181 {
182 __id_ = 0;
183 lk.unlock();
184 __cv_.notify_one();
185 }
186 }
187
188 #endif // !_LIBCPP_HAS_NO_THREADS
189
190 // If dispatch_once_f ever handles C++ exceptions, and if one can get to it
191 // without illegal macros (unexpected macros not beginning with _UpperCase or
192 // __lowercase), and if it stops spinning waiting threads, then call_once should
193 // call into dispatch_once_f instead of here. Relevant radar this code needs to
194 // keep in sync with: 7741191.
195
196 #ifndef _LIBCPP_HAS_NO_THREADS
197 _LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut = _LIBCPP_MUTEX_INITIALIZER;
198 _LIBCPP_SAFE_STATIC static __libcpp_condvar_t cv = _LIBCPP_CONDVAR_INITIALIZER;
199 #endif
200
201 void
__call_once(volatile unsigned long & flag,void * arg,void (* func)(void *))202 __call_once(volatile unsigned long& flag, void* arg, void(*func)(void*))
203 {
204 #if defined(_LIBCPP_HAS_NO_THREADS)
205 if (flag == 0)
206 {
207 #ifndef _LIBCPP_NO_EXCEPTIONS
208 try
209 {
210 #endif // _LIBCPP_NO_EXCEPTIONS
211 flag = 1;
212 func(arg);
213 flag = ~0ul;
214 #ifndef _LIBCPP_NO_EXCEPTIONS
215 }
216 catch (...)
217 {
218 flag = 0ul;
219 throw;
220 }
221 #endif // _LIBCPP_NO_EXCEPTIONS
222 }
223 #else // !_LIBCPP_HAS_NO_THREADS
224 __libcpp_mutex_lock(&mut);
225 while (flag == 1)
226 __libcpp_condvar_wait(&cv, &mut);
227 if (flag == 0)
228 {
229 #ifndef _LIBCPP_NO_EXCEPTIONS
230 try
231 {
232 #endif // _LIBCPP_NO_EXCEPTIONS
233 __libcpp_relaxed_store(&flag, 1ul);
234 __libcpp_mutex_unlock(&mut);
235 func(arg);
236 __libcpp_mutex_lock(&mut);
237 __libcpp_atomic_store(&flag, ~0ul, _AO_Release);
238 __libcpp_mutex_unlock(&mut);
239 __libcpp_condvar_broadcast(&cv);
240 #ifndef _LIBCPP_NO_EXCEPTIONS
241 }
242 catch (...)
243 {
244 __libcpp_mutex_lock(&mut);
245 __libcpp_relaxed_store(&flag, 0ul);
246 __libcpp_mutex_unlock(&mut);
247 __libcpp_condvar_broadcast(&cv);
248 throw;
249 }
250 #endif // _LIBCPP_NO_EXCEPTIONS
251 }
252 else
253 __libcpp_mutex_unlock(&mut);
254 #endif // !_LIBCPP_HAS_NO_THREADS
255
256 }
257
258 _LIBCPP_END_NAMESPACE_STD
259