1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <unistd.h>
17 #include "cpp/mutex.h"
18 #ifndef _GNU_SOURCE
19 #define _GNU_SOURCE
20 #endif
21
22 #include <map>
23 #include <functional>
24 #include "sync/sync.h"
25 #include "core/task_ctx.h"
26 #include "eu/co_routine.h"
27 #include "internal_inc/osal.h"
28 #include "sync/mutex_private.h"
29 #include "dfx/log/ffrt_log_api.h"
30 #include "dfx/trace/ffrt_trace.h"
31
32 namespace ffrt {
try_lock()33 bool mutexPrivate::try_lock()
34 {
35 int v = sync_detail::UNLOCK;
36 bool ret = l.compare_exchange_strong(v, sync_detail::LOCK, std::memory_order_acquire, std::memory_order_relaxed);
37 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
38 if (ret) {
39 uint64_t task = ExecuteCtx::Cur()->task ? reinterpret_cast<uint64_t>(ExecuteCtx::Cur()->task) : GetTid();
40 MutexGraph::Instance().AddNode(task, 0, false);
41 owner.store(task, std::memory_order_relaxed);
42 }
43 #endif
44 return ret;
45 }
46
lock()47 void mutexPrivate::lock()
48 {
49 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
50 uint64_t task;
51 uint64_t ownerTask;
52 task = ExecuteCtx::Cur()->task ? reinterpret_cast<uint64_t>(ExecuteCtx::Cur()->task) : GetTid();
53 ownerTask = owner.load(std::memory_order_relaxed);
54 if (ownerTask) {
55 MutexGraph::Instance().AddNode(task, ownerTask, true);
56 } else {
57 MutexGraph::Instance().AddNode(task, 0, false);
58 }
59 #endif
60 int v = sync_detail::UNLOCK;
61 if (l.compare_exchange_strong(v, sync_detail::LOCK, std::memory_order_acquire, std::memory_order_relaxed)) {
62 goto lock_out;
63 }
64 if (l.load(std::memory_order_relaxed) == sync_detail::WAIT) {
65 wait();
66 }
67 while (l.exchange(sync_detail::WAIT, std::memory_order_acquire) != sync_detail::UNLOCK) {
68 wait();
69 }
70
71 lock_out:
72 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
73 owner.store(task, std::memory_order_relaxed);
74 #endif
75 return;
76 }
77
unlock()78 void mutexPrivate::unlock()
79 {
80 #ifdef FFRT_MUTEX_DEADLOCK_CHECK
81 uint64_t ownerTask;
82 ownerTask = owner.load(std::memory_order_relaxed);
83 owner.store(0, std::memory_order_relaxed);
84 MutexGraph::Instance().RemoveNode(ownerTask);
85 #endif
86 if (l.exchange(sync_detail::UNLOCK, std::memory_order_release) == sync_detail::WAIT) {
87 wake();
88 }
89 }
90
wait()91 void mutexPrivate::wait()
92 {
93 auto ctx = ExecuteCtx::Cur();
94 auto task = ctx->task;
95 if (!USE_COROUTINE || task == nullptr) {
96 wlock.lock();
97 if (l.load(std::memory_order_relaxed) != sync_detail::WAIT) {
98 wlock.unlock();
99 return;
100 }
101 list.PushBack(ctx->wn.node);
102 std::unique_lock<std::mutex> lk(ctx->wn.wl);
103 wlock.unlock();
104 ctx->wn.cv.wait(lk);
105 return;
106 } else {
107 FFRT_BLOCK_TRACER(task->gid, mtx);
108 CoWait([this](TaskCtx* inTask) -> bool {
109 wlock.lock();
110 if (l.load(std::memory_order_relaxed) != sync_detail::WAIT) {
111 wlock.unlock();
112 return false;
113 }
114 list.PushBack(inTask->fq_we.node);
115 wlock.unlock();
116 return true;
117 });
118 }
119 }
120
wake()121 void mutexPrivate::wake()
122 {
123 wlock.lock();
124 if (list.Empty()) {
125 wlock.unlock();
126 return;
127 }
128 WaitEntry* we = list.PopFront(&WaitEntry::node);
129 if (we == nullptr) {
130 wlock.unlock();
131 return;
132 }
133 TaskCtx* task = we->task;
134 if (!USE_COROUTINE || we->weType == 2) {
135 WaitUntilEntry* wue = static_cast<WaitUntilEntry*>(we);
136 std::unique_lock lk(wue->wl);
137 wlock.unlock();
138 wue->cv.notify_one();
139 } else {
140 wlock.unlock();
141 CoWake(task, false);
142 }
143 }
144 } // namespace ffrt
145
146 #ifdef __cplusplus
147 extern "C" {
148 #endif
149 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_init(ffrt_mutex_t * mutex,const ffrt_mutexattr_t * attr)150 int ffrt_mutex_init(ffrt_mutex_t *mutex, const ffrt_mutexattr_t* attr)
151 {
152 if (!mutex) {
153 FFRT_LOGE("mutex should not be empty");
154 return ffrt_error_inval;
155 }
156 if (attr != nullptr) {
157 FFRT_LOGE("only support normal mutex");
158 return ffrt_error;
159 }
160 static_assert(sizeof(ffrt::mutexPrivate) <= ffrt_mutex_storage_size,
161 "size must be less than ffrt_mutex_storage_size");
162
163 new (mutex)ffrt::mutexPrivate();
164 return ffrt_success;
165 }
166
167 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_lock(ffrt_mutex_t * mutex)168 int ffrt_mutex_lock(ffrt_mutex_t* mutex)
169 {
170 if (!mutex) {
171 FFRT_LOGE("mutex should not be empty");
172 return ffrt_error_inval;
173 }
174 auto p = (ffrt::mutexPrivate*)mutex;
175 p->lock();
176 return ffrt_success;
177 }
178
179 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_unlock(ffrt_mutex_t * mutex)180 int ffrt_mutex_unlock(ffrt_mutex_t* mutex)
181 {
182 if (!mutex) {
183 FFRT_LOGE("mutex should not be empty");
184 return ffrt_error_inval;
185 }
186 auto p = (ffrt::mutexPrivate*)mutex;
187 p->unlock();
188 return ffrt_success;
189 }
190
191 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_trylock(ffrt_mutex_t * mutex)192 int ffrt_mutex_trylock(ffrt_mutex_t* mutex)
193 {
194 if (!mutex) {
195 FFRT_LOGE("mutex should not be empty");
196 return ffrt_error_inval;
197 }
198 auto p = (ffrt::mutexPrivate*)mutex;
199 return p->try_lock() ? ffrt_success : ffrt_error_busy;
200 }
201
202 API_ATTRIBUTE((visibility("default")))
ffrt_mutex_destroy(ffrt_mutex_t * mutex)203 int ffrt_mutex_destroy(ffrt_mutex_t* mutex)
204 {
205 if (!mutex) {
206 FFRT_LOGE("mutex should not be empty");
207 return ffrt_error_inval;
208 }
209 auto p = (ffrt::mutexPrivate*)mutex;
210 p->~mutexPrivate();
211 return ffrt_success;
212 }
213 #ifdef __cplusplus
214 }
215 #endif
216