1 /**
2 * Copyright (c) 2024-2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "plugins/ets/runtime/ets_vm.h"
16 #include "plugins/ets/runtime/ets_platform_types.h"
17 #include "plugins/ets/runtime/types/ets_sync_primitives.h"
18 #include "runtime/include/thread_scopes.h"
19
20 #include <atomic>
21
22 namespace ark::ets {
23
24 /*static*/
Create(EtsCoroutine * coro)25 EtsMutex *EtsMutex::Create(EtsCoroutine *coro)
26 {
27 EtsHandleScope scope(coro);
28 auto *klass = PlatformTypes(coro)->coreMutex;
29 auto hMutex = EtsHandle<EtsMutex>(coro, EtsMutex::FromEtsObject(EtsObject::Create(coro, klass)));
30 auto *waitersList = EtsWaitersList::Create(coro);
31 ASSERT(hMutex.GetPtr() != nullptr);
32 hMutex->SetWaitersList(coro, waitersList);
33 return hMutex.GetPtr();
34 }
35
BackOff(uint32_t i)36 ALWAYS_INLINE inline static void BackOff(uint32_t i)
37 {
38 volatile uint32_t x; // Volatile to make sure loop is not optimized out.
39 const uint32_t spinCount = 10 * i;
40 for (uint32_t spin = 0; spin < spinCount; spin++) {
41 x = x + 1;
42 }
43 }
44
TrySpinLockFor(std::atomic<uint32_t> & waiters,uint32_t expected,uint32_t desired)45 static bool TrySpinLockFor(std::atomic<uint32_t> &waiters, uint32_t expected, uint32_t desired)
46 {
47 static constexpr uint32_t maxBackOff = 3; // NOLINT(readability-identifier-naming)
48 static constexpr uint32_t maxIter = 3; // NOLINT(readability-identifier-naming)
49 uint32_t exp;
50 for (uint32_t i = 1; i <= maxIter; ++i) {
51 exp = expected;
52 if (waiters.compare_exchange_weak(exp, desired, std::memory_order_acq_rel, std::memory_order_relaxed)) {
53 return true;
54 }
55 BackOff(std::min<uint32_t>(i, maxBackOff));
56 }
57 return false;
58 }
59
Lock()60 void EtsMutex::Lock()
61 {
62 if (TrySpinLockFor(waiters_, 0, 1)) {
63 return;
64 }
65 // Atomic with acq_rel order reason: sync Lock/Unlock in other threads
66 if (waiters_.fetch_add(1, std::memory_order_acq_rel) == 0) {
67 return;
68 }
69 auto *coro = EtsCoroutine::GetCurrent();
70 ASSERT(coro != nullptr);
71 auto *coroManager = coro->GetCoroutineManager();
72 auto awaitee = EtsWaitersList::Node(coroManager);
73 SuspendCoroutine(&awaitee);
74 }
75
Unlock()76 void EtsMutex::Unlock()
77 {
78 // Atomic with acq_rel order reason: sync Lock/Unlock in other threads
79 if (waiters_.fetch_sub(1, std::memory_order_acq_rel) == 1) {
80 return;
81 }
82 ResumeCoroutine();
83 }
84
IsHeld()85 bool EtsMutex::IsHeld()
86 {
87 // Atomic with relaxed order reason: sync is not needed here
88 // because it is expected that method is not called concurrently with Lock/Unlock
89 return waiters_.load(std::memory_order_relaxed) != 0;
90 }
91
92 /*static*/
Create(EtsCoroutine * coro)93 EtsEvent *EtsEvent::Create(EtsCoroutine *coro)
94 {
95 EtsHandleScope scope(coro);
96 auto *klass = PlatformTypes(coro)->coreEvent;
97 auto hEvent = EtsHandle<EtsEvent>(coro, EtsEvent::FromEtsObject(EtsObject::Create(coro, klass)));
98 auto *waitersList = EtsWaitersList::Create(coro);
99 ASSERT(hEvent.GetPtr() != nullptr);
100 hEvent->SetWaitersList(coro, waitersList);
101 return hEvent.GetPtr();
102 }
103
Wait()104 void EtsEvent::Wait()
105 {
106 // Atomic with acq_rel order reason: sync Wait/Fire in other threads
107 auto state = state_.fetch_add(ONE_WAITER, std::memory_order_acq_rel);
108 if (IsFireState(state)) {
109 return;
110 }
111 auto *coro = EtsCoroutine::GetCurrent();
112 ASSERT(coro != nullptr);
113 auto *coroManager = coro->GetCoroutineManager();
114 auto awaitee = EtsWaitersList::Node(coroManager);
115 SuspendCoroutine(&awaitee);
116 }
117
Fire()118 void EtsEvent::Fire()
119 {
120 // Atomic with acq_rel order reason: sync Wait/Fire in other threads
121 auto state = state_.exchange(FIRE_STATE, std::memory_order_acq_rel);
122 if (IsFireState(state)) {
123 return;
124 }
125 for (auto waiters = GetNumberOfWaiters(state); waiters > 0; --waiters) {
126 ResumeCoroutine();
127 }
128 }
129
130 /* static */
Create(EtsCoroutine * coro)131 EtsCondVar *EtsCondVar::Create(EtsCoroutine *coro)
132 {
133 EtsHandleScope scope(coro);
134 auto *klass = PlatformTypes(coro)->coreCondVar;
135 auto hCondVar = EtsHandle<EtsCondVar>(coro, EtsCondVar::FromEtsObject(EtsObject::Create(klass)));
136 auto *waitersList = EtsWaitersList::Create(coro);
137 ASSERT(hCondVar.GetPtr() != nullptr);
138 hCondVar->SetWaitersList(coro, waitersList);
139 return hCondVar.GetPtr();
140 }
141
Wait(EtsHandle<EtsMutex> & mutex)142 void EtsCondVar::Wait(EtsHandle<EtsMutex> &mutex)
143 {
144 ASSERT(mutex->IsHeld());
145 waiters_++;
146 mutex->Unlock();
147 auto *coro = EtsCoroutine::GetCurrent();
148 ASSERT(coro != nullptr);
149 auto *coroManager = coro->GetCoroutineManager();
150 auto awaitee = EtsWaitersList::Node(coroManager);
151 SuspendCoroutine(&awaitee);
152 mutex->Lock();
153 }
154
NotifyOne(EtsMutex * mutex)155 void EtsCondVar::NotifyOne([[maybe_unused]] EtsMutex *mutex)
156 {
157 ASSERT(mutex->IsHeld());
158 if (waiters_ != 0) {
159 ResumeCoroutine();
160 waiters_--;
161 }
162 }
163
NotifyAll(EtsMutex * mutex)164 void EtsCondVar::NotifyAll([[maybe_unused]] EtsMutex *mutex)
165 {
166 ASSERT(mutex->IsHeld());
167 while (waiters_ != 0) {
168 ResumeCoroutine();
169 waiters_--;
170 }
171 }
172
173 /* static */
Create(EtsCoroutine * coro)174 EtsQueueSpinlock *EtsQueueSpinlock::Create(EtsCoroutine *coro)
175 {
176 auto *klass = PlatformTypes(coro)->coreQueueSpinlock;
177 return EtsQueueSpinlock::FromEtsObject(EtsObject::Create(klass));
178 }
179
Acquire(Guard * waiter)180 void EtsQueueSpinlock::Acquire(Guard *waiter)
181 {
182 // Atomic with acq_rel order reason: to guarantee happens-before for critical sections
183 auto *oldTail = tail_.exchange(waiter, std::memory_order_acq_rel);
184 if (oldTail == nullptr) {
185 return;
186 }
187 // Atomic with release order reason: to guarantee happens-before with waiter constructor
188 oldTail->next_.store(waiter, std::memory_order_release);
189 auto spinWait = SpinWait();
190 ScopedNativeCodeThread nativeCode(EtsCoroutine::GetCurrent());
191 // Atomic with acquire order reason: to guarantee happens-before for critical sections
192 while (!waiter->isOwner_.load(std::memory_order_acquire)) {
193 spinWait();
194 }
195 }
196
Release(Guard * owner)197 void EtsQueueSpinlock::Release(Guard *owner)
198 {
199 auto *head = owner;
200 // Atomic with release order reason: to guarantee happens-before for critical sections
201 if (tail_.compare_exchange_strong(head, nullptr, std::memory_order_release, std::memory_order_relaxed)) {
202 return;
203 }
204 // Atomic with acquire order reason: to guarantee happens-before with next constructor
205 Guard *next = owner->next_.load(std::memory_order_acquire);
206 auto spinWait = SpinWait();
207 while (next == nullptr) {
208 spinWait();
209 // Atomic with acquire order reason: to guarantee happens-before with next constructor
210 next = owner->next_.load(std::memory_order_acquire);
211 }
212 // Atomic with release order reason: to guarantee happens-before for critical sections
213 next->isOwner_.store(true, std::memory_order_release);
214 }
215
IsHeld() const216 bool EtsQueueSpinlock::IsHeld() const
217 {
218 // Atomic with relaxed order reason: sync is not needed here
219 // because it is expected that method is not called concurrently with Acquire/Release
220 return tail_.load(std::memory_order_relaxed) != nullptr;
221 }
222
Guard(EtsHandle<EtsQueueSpinlock> & spinlock)223 EtsQueueSpinlock::Guard::Guard(EtsHandle<EtsQueueSpinlock> &spinlock) : spinlock_(spinlock)
224 {
225 ASSERT(spinlock_.GetPtr() != nullptr);
226 spinlock_->Acquire(this);
227 }
228
~Guard()229 EtsQueueSpinlock::Guard::~Guard()
230 {
231 ASSERT(spinlock_.GetPtr() != nullptr);
232 spinlock_->Release(this);
233 }
234
235 } // namespace ark::ets
236