• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef META_BASE_ATOMICS_H
16 #define META_BASE_ATOMICS_H
17 
18 #include <stdint.h>
19 
20 #include <core/namespace.h>
21 #if defined(_MSC_VER) && defined(WIN32)
22 #include <intrin.h>
23 #elif defined(__arm__) || defined(__aarch64__)
24 #include <arm_acle.h>
25 #endif
26 
27 CORE_BEGIN_NAMESPACE();
28 /*
29  * Implementation of InterlockedIncrement/InterlockedDecrement (int32_t atomics)
30  * Bare minimum to implement thread safe reference counters.
31  **/
32 
33 #if defined(_MSC_VER) && defined(WIN32)
34 // On windows and visual studio, we just forward to the matching OS methods.
AtomicIncrement(volatile int32_t * a)35 inline int32_t AtomicIncrement(volatile int32_t* a) noexcept
36 {
37     return ::_InterlockedIncrement((long*)a);
38 }
AtomicDecrement(volatile int32_t * a)39 inline int32_t AtomicDecrement(volatile int32_t* a) noexcept
40 {
41     return ::_InterlockedDecrement((long*)a);
42 }
AtomicRead(const volatile int32_t * a)43 inline int32_t AtomicRead(const volatile int32_t* a) noexcept
44 {
45     return ::_InterlockedExchangeAdd((long*)a, 0);
46 }
AtomicIncrementIfNotZero(volatile int32_t * a)47 inline int32_t AtomicIncrementIfNotZero(volatile int32_t* a) noexcept
48 {
49     int32_t v = AtomicRead(a);
50     while (v) {
51         int32_t temp = v;
52         v = ::_InterlockedCompareExchange((long*)a, v + 1, v);
53         if (v == temp) {
54             return temp;
55         }
56     }
57     return v;
58 }
59 
60 // Trivial spinlock implemented with atomics.
61 // NOTE: this does NOT yield while waiting, so use this ONLY in places where lock contention times are expected to be
62 // trivial. Also does not ensure fairness. but most likely enough for our reference counting purposes.
63 // and of course is non-recursive, so you can only lock once in a thread.
64 class SpinLock {
65 public:
66     SpinLock() noexcept = default;
67     ~SpinLock() noexcept = default;
68     SpinLock(const SpinLock&) = delete;
69     SpinLock& operator=(const SpinLock&) = delete;
70     SpinLock(SpinLock&&) = delete;
71     SpinLock& operator=(SpinLock&&) = delete;
Lock()72     void Lock() noexcept
73     {
74         while (_InterlockedCompareExchange(&lock_, 1, 0) == 1) {
75             _mm_pause();
76         }
77     }
Unlock()78     void Unlock() noexcept
79     {
80         _InterlockedExchange(&lock_, 0);
81     }
82 
83 private:
84     long lock_ = 0;
85 };
86 #elif defined(__has_builtin) && __has_builtin(__atomic_add_fetch) && __has_builtin(__atomic_load_n) && \
87     __has_builtin(__atomic_compare_exchange_n)
88 /* gcc built in atomics, supported on clang also */
AtomicIncrement(volatile int32_t * a)89 inline int32_t AtomicIncrement(volatile int32_t* a) noexcept
90 {
91     return __atomic_add_fetch(a, 1, __ATOMIC_ACQ_REL);
92 }
AtomicDecrement(volatile int32_t * a)93 inline int32_t AtomicDecrement(volatile int32_t* a) noexcept
94 {
95     return __atomic_add_fetch(a, -1, __ATOMIC_ACQ_REL);
96 }
AtomicRead(const volatile int32_t * a)97 inline int32_t AtomicRead(const volatile int32_t* a) noexcept
98 {
99     return __atomic_load_n(a, __ATOMIC_ACQUIRE);
100 }
AtomicIncrementIfNotZero(volatile int32_t * a)101 inline int32_t AtomicIncrementIfNotZero(volatile int32_t* a) noexcept
102 {
103     int32_t v = AtomicRead(a);
104     while (v) {
105         int32_t temp = v;
106         if (__atomic_compare_exchange_n(a, &v, temp + 1, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
107             return temp;
108         }
109     }
110     return v;
111 }
112 // Trivial spinlock implemented with atomics.
113 // NOTE: this does NOT yield while waiting, so use this ONLY in places where lock contention times are expected to be
114 // trivial. Also does not ensure fairness. but most likely enough for our reference counting purposes.
115 // and of course is non-recursive, so you can only lock once in a thread.
116 class SpinLock {
117 public:
118     SpinLock() noexcept = default;
119     ~SpinLock() noexcept = default;
120     SpinLock(const SpinLock&) = delete;
121     SpinLock& operator=(const SpinLock&) = delete;
122     SpinLock(SpinLock&&) = delete;
123     SpinLock& operator=(SpinLock&&) = delete;
Lock()124     void Lock() noexcept
125     {
126         long expected = 0;
127 #if defined(__aarch64__)
128         __sevl();
129 #endif
130         while (!__atomic_compare_exchange_n(&lock_, &expected, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
131             expected = 0;
132 #if __has_builtin(__builtin_ia32_pause)
133             __builtin_ia32_pause();
134 #elif defined(__arm__) || defined(__aarch64__)
135             __wfe();
136 #endif
137         }
138     }
Unlock()139     void Unlock() noexcept
140     {
141         __atomic_store_n(&lock_, 0, __ATOMIC_RELEASE);
142 #if defined(__arm__)
143         __sev();
144 #endif
145     }
146 
147 private:
148     long lock_ = 0;
149 };
150 
151 #else
152 #error Compiler / Platform specific atomic methods not implemented !
153 #endif
154 
155 /**
156  * @brief Scoped helper to lock and unlock spin locks.
157  */
158 class ScopedSpinLock {
159 public:
160     ScopedSpinLock(const ScopedSpinLock&) = delete;
161     ScopedSpinLock& operator=(const ScopedSpinLock&) = delete;
162     ScopedSpinLock(ScopedSpinLock&&) = delete;
163     ScopedSpinLock& operator=(ScopedSpinLock&&) = delete;
164 
ScopedSpinLock(SpinLock & l)165     explicit ScopedSpinLock(SpinLock& l) noexcept : lock_(l)
166     {
167         lock_.Lock();
168     }
~ScopedSpinLock()169     ~ScopedSpinLock() noexcept
170     {
171         lock_.Unlock();
172     }
173 
174 private:
175     SpinLock& lock_;
176 };
177 
178 CORE_END_NAMESPACE();
179 #endif
180