1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <unistd.h>
17 #include "cpp/mutex.h"
18 #ifndef _GNU_SOURCE
19 #define _GNU_SOURCE
20 #endif
21
22 #include <map>
23 #include <functional>
24 #include "sync/sync.h"
25 #include "eu/co_routine.h"
26 #include "internal_inc/osal.h"
27 #include "internal_inc/types.h"
28 #include "sync/mutex_private.h"
29 #include "dfx/log/ffrt_log_api.h"
30 #include "ffrt_trace.h"
31 #include "tm/cpu_task.h"
32
33 namespace ffrt {
try_lock()34 bool mutexPrivate::try_lock()
35 {
36 int v = sync_detail::UNLOCK;
37 bool ret = l.compare_exchange_strong(v, sync_detail::LOCK, std::memory_order_acquire, std::memory_order_relaxed);
38 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
39 if (ret) {
40 uint64_t task = ExecuteCtx::Cur()->task ? reinterpret_cast<uint64_t>(ExecuteCtx::Cur()->task) : GetTid();
41 MutexGraph::Instance().AddNode(task, 0, false);
42 owner.store(task, std::memory_order_relaxed);
43 }
44 #endif
45 return ret;
46 }
47
lock()48 void mutexPrivate::lock()
49 {
50 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
51 uint64_t task;
52 uint64_t ownerTask;
53 task = ExecuteCtx::Cur()->task ? reinterpret_cast<uint64_t>(ExecuteCtx::Cur()->task) : GetTid();
54 ownerTask = owner.load(std::memory_order_relaxed);
55 if (ownerTask) {
56 MutexGraph::Instance().AddNode(task, ownerTask, true);
57 } else {
58 MutexGraph::Instance().AddNode(task, 0, false);
59 }
60 #endif
61 int v = sync_detail::UNLOCK;
62 if (l.compare_exchange_strong(v, sync_detail::LOCK, std::memory_order_acquire, std::memory_order_relaxed)) {
63 goto lock_out;
64 }
65 if (l.load(std::memory_order_relaxed) == sync_detail::WAIT) {
66 wait();
67 }
68 while (l.exchange(sync_detail::WAIT, std::memory_order_acquire) != sync_detail::UNLOCK) {
69 wait();
70 }
71
72 lock_out:
73 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
74 owner.store(task, std::memory_order_relaxed);
75 #endif
76 return;
77 }
78
try_lock()79 bool RecursiveMutexPrivate::try_lock()
80 {
81 auto ctx = ExecuteCtx::Cur();
82 auto task = ctx->task;
83 if ((!USE_COROUTINE) || (task == nullptr)) {
84 fMutex.lock();
85 if (taskLockNums.first == UINT64_MAX) {
86 fMutex.unlock();
87 mt.lock();
88 fMutex.lock();
89 taskLockNums = std::make_pair(GetTid(), 1);
90 fMutex.unlock();
91 return true;
92 }
93
94 if (taskLockNums.first == GetTid()) {
95 taskLockNums.second += 1;
96 fMutex.unlock();
97 return true;
98 }
99
100 fMutex.unlock();
101 return false;
102 }
103
104 fMutex.lock();
105 if (taskLockNums.first == UINT64_MAX) {
106 fMutex.unlock();
107 mt.lock();
108 fMutex.lock();
109 taskLockNums = std::make_pair(task->gid | 0x8000000000000000, 1);
110 fMutex.unlock();
111 return true;
112 }
113
114 if (taskLockNums.first == (task->gid | 0x8000000000000000)) {
115 taskLockNums.second += 1;
116 fMutex.unlock();
117 return true;
118 }
119
120 fMutex.unlock();
121 return false;
122 }
123
lock()124 void RecursiveMutexPrivate::lock()
125 {
126 auto ctx = ExecuteCtx::Cur();
127 auto task = ctx->task;
128 if ((!USE_COROUTINE) || (task == nullptr)) {
129 fMutex.lock();
130 if (taskLockNums.first != GetTid()) {
131 fMutex.unlock();
132 mt.lock();
133 fMutex.lock();
134 taskLockNums = std::make_pair(GetTid(), 1);
135 fMutex.unlock();
136 return;
137 }
138
139 taskLockNums.second += 1;
140 fMutex.unlock();
141 return;
142 }
143
144 fMutex.lock();
145 if (taskLockNums.first != (task->gid | 0x8000000000000000)) {
146 fMutex.unlock();
147 mt.lock();
148 fMutex.lock();
149 taskLockNums = std::make_pair(task->gid | 0x8000000000000000, 1);
150 fMutex.unlock();
151 return;
152 }
153
154 taskLockNums.second += 1;
155 fMutex.unlock();
156 }
157
unlock()158 void RecursiveMutexPrivate::unlock()
159 {
160 auto ctx = ExecuteCtx::Cur();
161 auto task = ctx->task;
162 if ((!USE_COROUTINE) || (task == nullptr)) {
163 fMutex.lock();
164 if (taskLockNums.first != GetTid()) {
165 fMutex.unlock();
166 return;
167 }
168
169 if (taskLockNums.second == 1) {
170 taskLockNums = std::make_pair(UINT64_MAX, 0);
171 fMutex.unlock();
172 mt.unlock();
173 return;
174 }
175
176 taskLockNums.second -= 1;
177 fMutex.unlock();
178 return;
179 }
180
181 fMutex.lock();
182 if (taskLockNums.first != (task->gid | 0x8000000000000000)) {
183 fMutex.unlock();
184 return;
185 }
186
187 if (taskLockNums.second == 1) {
188 taskLockNums = std::make_pair(UINT64_MAX, 0);
189 fMutex.unlock();
190 mt.unlock();
191 return;
192 }
193
194 taskLockNums.second -= 1;
195 fMutex.unlock();
196 }
197
unlock()198 void mutexPrivate::unlock()
199 {
200 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
201 uint64_t ownerTask = owner.load(std::memory_order_relaxed);
202 owner.store(0, std::memory_order_relaxed);
203 MutexGraph::Instance().RemoveNode(ownerTask);
204 #endif
205 if (l.exchange(sync_detail::UNLOCK, std::memory_order_release) == sync_detail::WAIT) {
206 wake();
207 }
208 }
209
wait()210 void mutexPrivate::wait()
211 {
212 auto ctx = ExecuteCtx::Cur();
213 auto task = ctx->task;
214 bool legacyMode = task != nullptr ? (task->coRoutine != nullptr ? task->coRoutine->legacyMode : false) : false;
215 if (!USE_COROUTINE || task == nullptr || legacyMode) {
216 wlock.lock();
217 if (l.load(std::memory_order_relaxed) != sync_detail::WAIT) {
218 wlock.unlock();
219 return;
220 }
221 list.PushBack(ctx->wn.node);
222 std::unique_lock<std::mutex> lk(ctx->wn.wl);
223 if (legacyMode) {
224 task->coRoutine->blockType = BlockType::BLOCK_THREAD;
225 ctx->wn.task = task;
226 }
227 wlock.unlock();
228 ctx->wn.cv.wait(lk);
229 return;
230 } else {
231 FFRT_BLOCK_TRACER(task->gid, mtx);
232 CoWait([this](CPUEUTask* inTask) -> bool {
233 wlock.lock();
234 if (l.load(std::memory_order_relaxed) != sync_detail::WAIT) {
235 wlock.unlock();
236 return false;
237 }
238 list.PushBack(inTask->fq_we.node);
239 wlock.unlock();
240 return true;
241 });
242 }
243 }
244
wake()245 void mutexPrivate::wake()
246 {
247 wlock.lock();
248 if (list.Empty()) {
249 wlock.unlock();
250 return;
251 }
252 WaitEntry* we = list.PopFront(&WaitEntry::node);
253 if (we == nullptr) {
254 wlock.unlock();
255 return;
256 }
257 CPUEUTask* task = we->task;
258 bool blockThread = (task && task->coRoutine) ? (task->coRoutine->blockType == BlockType::BLOCK_THREAD) : false;
259 if (!USE_COROUTINE || we->weType == 2 || blockThread) {
260 WaitUntilEntry* wue = static_cast<WaitUntilEntry*>(we);
261 std::unique_lock lk(wue->wl);
262 if (blockThread) {
263 task->coRoutine->blockType = BlockType::BLOCK_COROUTINE;
264 we->task = nullptr;
265 }
266 wlock.unlock();
267 wue->cv.notify_one();
268 } else {
269 wlock.unlock();
270 CoWake(task, false);
271 }
272 }
273 } // namespace ffrt
274
275 #ifdef __cplusplus
276 extern "C" {
277 #endif
278 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_init(ffrt_mutex_t * mutex,const ffrt_mutexattr_t * attr)279 int ffrt_mutex_init(ffrt_mutex_t *mutex, const ffrt_mutexattr_t* attr)
280 {
281 if (!mutex) {
282 FFRT_LOGE("mutex should not be empty");
283 return ffrt_error_inval;
284 }
285 if (attr != nullptr) {
286 FFRT_LOGE("only support normal mutex");
287 return ffrt_error;
288 }
289 static_assert(sizeof(ffrt::mutexPrivate) <= ffrt_mutex_storage_size,
290 "size must be less than ffrt_mutex_storage_size");
291
292 new (mutex)ffrt::mutexPrivate();
293 return ffrt_success;
294 }
295
296 API_ATTRIBUTE((visibility("default")))
ffrt_recursive_mutex_init(ffrt_mutex_t * mutex,const ffrt_mutexattr_t * attr)297 int ffrt_recursive_mutex_init(ffrt_mutex_t* mutex, const ffrt_mutexattr_t* attr)
298 {
299 if (!mutex) {
300 FFRT_LOGE("mutex should not be empty");
301 return ffrt_error_inval;
302 }
303
304 if (attr != nullptr) {
305 FFRT_LOGE("only support normal mutex");
306 return ffrt_error;
307 }
308
309 static_assert(sizeof(ffrt::RecursiveMutexPrivate) <= ffrt_mutex_storage_size,
310 "size must be less than ffrt_mutex_storage_size");
311
312 new (mutex)ffrt::RecursiveMutexPrivate();
313 return ffrt_success;
314 }
315
316 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_lock(ffrt_mutex_t * mutex)317 int ffrt_mutex_lock(ffrt_mutex_t* mutex)
318 {
319 if (!mutex) {
320 FFRT_LOGE("mutex should not be empty");
321 return ffrt_error_inval;
322 }
323 auto p = reinterpret_cast<ffrt::mutexPrivate *>(mutex);
324 p->lock();
325 return ffrt_success;
326 }
327
328 API_ATTRIBUTE((visibility("default")))
ffrt_recursive_mutex_lock(ffrt_mutex_t * mutex)329 int ffrt_recursive_mutex_lock(ffrt_mutex_t* mutex)
330 {
331 if (!mutex) {
332 FFRT_LOGE("mutex should not be empty");
333 return ffrt_error_inval;
334 }
335
336 auto p = reinterpret_cast<ffrt::RecursiveMutexPrivate*>(mutex);
337 p->lock();
338 return ffrt_success;
339 }
340
341 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_unlock(ffrt_mutex_t * mutex)342 int ffrt_mutex_unlock(ffrt_mutex_t* mutex)
343 {
344 if (!mutex) {
345 FFRT_LOGE("mutex should not be empty");
346 return ffrt_error_inval;
347 }
348 auto p = reinterpret_cast<ffrt::mutexPrivate *>(mutex);
349 p->unlock();
350 return ffrt_success;
351 }
352
353 API_ATTRIBUTE((visibility("default")))
ffrt_recursive_mutex_unlock(ffrt_mutex_t * mutex)354 int ffrt_recursive_mutex_unlock(ffrt_mutex_t* mutex)
355 {
356 if (!mutex) {
357 FFRT_LOGE("mutex should not be empty");
358 return ffrt_error_inval;
359 }
360
361 auto p = reinterpret_cast<ffrt::RecursiveMutexPrivate*>(mutex);
362 p->unlock();
363 return ffrt_success;
364 }
365
366 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_trylock(ffrt_mutex_t * mutex)367 int ffrt_mutex_trylock(ffrt_mutex_t* mutex)
368 {
369 if (!mutex) {
370 FFRT_LOGE("mutex should not be empty");
371 return ffrt_error_inval;
372 }
373 auto p = reinterpret_cast<ffrt::mutexPrivate *>(mutex);
374 return p->try_lock() ? ffrt_success : ffrt_error_busy;
375 }
376
377 API_ATTRIBUTE((visibility("default")))
ffrt_recursive_mutex_trylock(ffrt_mutex_t * mutex)378 int ffrt_recursive_mutex_trylock(ffrt_mutex_t* mutex)
379 {
380 if (!mutex) {
381 FFRT_LOGE("mutex should not be empty");
382 return ffrt_error_inval;
383 }
384
385 auto p = reinterpret_cast<ffrt::RecursiveMutexPrivate*>(mutex);
386 return p->try_lock() ? ffrt_success : ffrt_error_busy;
387 }
388
389 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_destroy(ffrt_mutex_t * mutex)390 int ffrt_mutex_destroy(ffrt_mutex_t* mutex)
391 {
392 if (!mutex) {
393 FFRT_LOGE("mutex should not be empty");
394 return ffrt_error_inval;
395 }
396 auto p = reinterpret_cast<ffrt::mutexPrivate *>(mutex);
397 p->~mutexPrivate();
398 return ffrt_success;
399 }
400
401 API_ATTRIBUTE((visibility("default")))
ffrt_recursive_mutex_destroy(ffrt_mutex_t * mutex)402 int ffrt_recursive_mutex_destroy(ffrt_mutex_t* mutex)
403 {
404 if (!mutex) {
405 FFRT_LOGE("mutex should not be empty");
406 return ffrt_error_inval;
407 }
408
409 auto p = reinterpret_cast<ffrt::RecursiveMutexPrivate*>(mutex);
410 p->~RecursiveMutexPrivate();
411 return ffrt_success;
412 }
413
414 #ifdef __cplusplus
415 }
416 #endif
417