• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef API_BASE_CONTAINERS_ATOMICS_H
17 #define API_BASE_CONTAINERS_ATOMICS_H
18 
19 #include <stdint.h>
20 
21 #include <base/namespace.h>
22 #if defined(_MSC_VER) && defined(WIN32)
23 #include <intrin.h>
24 #elif defined(__arm__) || defined(__aarch64__)
25 #include <arm_acle.h>
26 #endif
27 
BASE_BEGIN_NAMESPACE()28 BASE_BEGIN_NAMESPACE()
29 /*
30  * Implementation of InterlockedIncrement/InterlockedDecrement (int32_t atomics)
31  * Bare minimum to implement thread safe reference counters.
32  */
33 
34 #if defined(_MSC_VER) && defined(WIN32)
35 // On windows and visual studio, we just forward to the matching OS methods.
36 inline int32_t AtomicIncrement(volatile int32_t* a) noexcept
37 {
38     return ::_InterlockedIncrement(reinterpret_cast<volatile long*>(a));
39 }
40 
AtomicIncrementRelaxed(volatile int32_t * a)41 inline int32_t AtomicIncrementRelaxed(volatile int32_t* a) noexcept
42 {
43     return ::_InterlockedIncrement(reinterpret_cast<volatile long*>(a));
44 }
AtomicIncrementAcquire(volatile int32_t * a)45 inline int32_t AtomicIncrementAcquire(volatile int32_t* a) noexcept
46 {
47     return ::_InterlockedIncrement(reinterpret_cast<volatile long*>(a));
48 }
AtomicIncrementRelease(volatile int32_t * a)49 inline int32_t AtomicIncrementRelease(volatile int32_t* a) noexcept
50 {
51     return ::_InterlockedIncrement(reinterpret_cast<volatile long*>(a));
52 }
53 
AtomicDecrement(volatile int32_t * a)54 inline int32_t AtomicDecrement(volatile int32_t* a) noexcept
55 {
56     return ::_InterlockedDecrement(reinterpret_cast<volatile long*>(a));
57 }
58 
AtomicDecrementRelaxed(volatile int32_t * a)59 inline int32_t AtomicDecrementRelaxed(volatile int32_t* a) noexcept
60 {
61     return ::_InterlockedDecrement(reinterpret_cast<volatile long*>(a));
62 }
AtomicDecrementAcquire(volatile int32_t * a)63 inline int32_t AtomicDecrementAcquire(volatile int32_t* a) noexcept
64 {
65     return ::_InterlockedDecrement(reinterpret_cast<volatile long*>(a));
66 }
AtomicDecrementRelease(volatile int32_t * a)67 inline int32_t AtomicDecrementRelease(volatile int32_t* a) noexcept
68 {
69     return ::_InterlockedDecrement(reinterpret_cast<volatile long*>(a));
70 }
71 
AtomicRead(const volatile int32_t * a)72 inline int32_t AtomicRead(const volatile int32_t* a) noexcept
73 {
74     return ::_InterlockedExchangeAdd(reinterpret_cast<volatile long*>(const_cast<volatile int32_t*>(a)), 0);
75 }
AtomicReadRelaxed(const volatile int32_t * a)76 inline int32_t AtomicReadRelaxed(const volatile int32_t* a) noexcept
77 {
78     return *a;
79 }
AtomicReadAcquire(const volatile int32_t * a)80 inline int32_t AtomicReadAcquire(const volatile int32_t* a) noexcept
81 {
82     auto ret = *a;
83     _ReadWriteBarrier();
84     return ret;
85 }
86 
AtomicIncrementIfNotZero(volatile int32_t * a)87 inline int32_t AtomicIncrementIfNotZero(volatile int32_t* a) noexcept
88 {
89     int32_t v = AtomicReadRelaxed(a);
90     while (v) {
91         int32_t temp = v;
92         v = ::_InterlockedCompareExchange(reinterpret_cast<volatile long*>(a), v + 1, v);
93         if (v == temp) {
94             return temp;
95         }
96     }
97     return v;
98 }
99 
AtomicFenceRelaxed()100 inline void AtomicFenceRelaxed() noexcept
101 {
102     // no op
103 }
AtomicFenceAcquire()104 inline void AtomicFenceAcquire() noexcept
105 {
106     _ReadWriteBarrier();
107 }
AtomicFenceRelease()108 inline void AtomicFenceRelease() noexcept
109 {
110     _ReadWriteBarrier();
111 }
112 
113 /**
114  * @brief Simple spin lock with pause instruction
115  */
116 class SpinLock {
117 public:
118     SpinLock() noexcept = default;
119     ~SpinLock() noexcept = default;
120 
121     SpinLock(const SpinLock&) = delete;
122     SpinLock& operator=(const SpinLock&) = delete;
123     SpinLock(SpinLock&&) = delete;
124     SpinLock& operator=(SpinLock&&) = delete;
125 
Lock()126     void Lock() noexcept
127     {
128         while (_InterlockedCompareExchange(&lock_, 1, 0) == 1) {
129             _mm_pause();
130         }
131     }
Unlock()132     void Unlock() noexcept
133     {
134         _InterlockedExchange(&lock_, 0);
135     }
136 
137 private:
138     long lock_ = 0;
139 };
140 #elif defined(__has_builtin) && __has_builtin(__atomic_add_fetch) && __has_builtin(__atomic_load_n) && \
141     __has_builtin(__atomic_compare_exchange_n)
142 /* gcc built in atomics, supported on clang also */
143 inline int32_t AtomicIncrement(volatile int32_t* a) noexcept
144 {
145     return __atomic_add_fetch(a, 1, __ATOMIC_ACQ_REL);
146 }
147 
148 inline int32_t AtomicIncrementRelaxed(volatile int32_t* a) noexcept
149 {
150     return __atomic_add_fetch(a, 1, __ATOMIC_RELAXED);
151 }
152 inline int32_t AtomicIncrementAcquire(volatile int32_t* a) noexcept
153 {
154     return __atomic_add_fetch(a, 1, __ATOMIC_ACQUIRE);
155 }
156 inline int32_t AtomicIncrementRelease(volatile int32_t* a) noexcept
157 {
158     return __atomic_add_fetch(a, 1, __ATOMIC_RELEASE);
159 }
160 
161 inline int32_t AtomicDecrement(volatile int32_t* a) noexcept
162 {
163     return __atomic_add_fetch(a, -1, __ATOMIC_ACQ_REL);
164 }
165 
166 inline int32_t AtomicDecrementRelaxed(volatile int32_t* a) noexcept
167 {
168     return __atomic_add_fetch(a, -1, __ATOMIC_RELAXED);
169 }
170 inline int32_t AtomicDecrementAcquire(volatile int32_t* a) noexcept
171 {
172     return __atomic_add_fetch(a, -1, __ATOMIC_ACQUIRE);
173 }
174 inline int32_t AtomicDecrementRelease(volatile int32_t* a) noexcept
175 {
176     return __atomic_add_fetch(a, -1, __ATOMIC_RELEASE);
177 }
178 
179 inline int32_t AtomicRead(const volatile int32_t* a) noexcept
180 {
181     return __atomic_load_n(a, __ATOMIC_ACQUIRE);
182 }
183 inline int32_t AtomicReadRelaxed(const volatile int32_t* a) noexcept
184 {
185     return __atomic_load_n(a, __ATOMIC_RELAXED);
186 }
187 inline int32_t AtomicReadAcquire(const volatile int32_t* a) noexcept
188 {
189     return __atomic_load_n(a, __ATOMIC_ACQUIRE);
190 }
191 
192 inline int32_t AtomicIncrementIfNotZero(volatile int32_t* a) noexcept
193 {
194     int32_t v = AtomicReadRelaxed(a);
195     while (v) {
196         int32_t temp = v;
197         if (__atomic_compare_exchange_n(a, &v, temp + 1, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
198             return temp;
199         }
200     }
201     return v;
202 }
203 
204 inline void AtomicFenceRelaxed() noexcept
205 {
206     __atomic_thread_fence(__ATOMIC_RELAXED);
207 }
208 inline void AtomicFenceAcquire() noexcept
209 {
210     __atomic_thread_fence(__ATOMIC_ACQUIRE);
211 }
212 inline void AtomicFenceRelease() noexcept
213 {
214     __atomic_thread_fence(__ATOMIC_RELEASE);
215 }
216 /**
217  * @brief Implements simple TTAS spin lock with pause instruction
218  */
219 class SpinLock {
220 public:
221     SpinLock() noexcept = default;
222     ~SpinLock() noexcept = default;
223 
224     SpinLock(const SpinLock&) = delete;
225     SpinLock& operator=(const SpinLock&) = delete;
226     SpinLock(SpinLock&&) = delete;
227     SpinLock& operator=(SpinLock&&) = delete;
228 
229     void Lock() noexcept
230     {
231         long expected = 0;
232 
233 #if defined(__aarch64__)
234         __sevl();
235 #endif
236 
237         while (!__atomic_compare_exchange_n(&lock_, &expected, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
238             expected = 0;
239 #if __has_builtin(__builtin_ia32_pause)
240             __builtin_ia32_pause();
241 #elif defined(__arm__) || defined(__aarch64__)
242             __wfe();
243 #endif
244         }
245     }
246     void Unlock() noexcept
247     {
248         __atomic_store_n(&lock_, 0, __ATOMIC_RELEASE);
249 #if defined(__arm__)
250         __sev();
251 #endif
252     }
253 
254 private:
255     long lock_ = 0;
256 };
257 
258 #else
259 #error Compiler / Platform specific atomic methods not implemented !
260 #endif
261 
262 /**
263  * @brief Scoped helper to lock and unlock spin locks.
264  */
265 class ScopedSpinLock {
266 public:
267     ScopedSpinLock(const ScopedSpinLock&) = delete;
268     ScopedSpinLock& operator=(const ScopedSpinLock&) = delete;
269     ScopedSpinLock(ScopedSpinLock&&) = delete;
270     ScopedSpinLock& operator=(ScopedSpinLock&&) = delete;
271 
ScopedSpinLock(SpinLock & l)272     explicit ScopedSpinLock(SpinLock& l) noexcept : lock_(l)
273     {
274         lock_.Lock();
275     }
~ScopedSpinLock()276     ~ScopedSpinLock() noexcept
277     {
278         lock_.Unlock();
279     }
280 
281 private:
282     SpinLock& lock_;
283 };
284 
285 BASE_END_NAMESPACE()
286 #endif // API_BASE_CONTAINERS_ATOMICS_H
287