1 //===-- A simple equivalent of std::atomic ----------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
10 #define LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
11
12 #include "src/__support/macros/attributes.h"
13 #include "src/__support/macros/properties/architectures.h"
14
15 #include "type_traits.h"
16
17 namespace LIBC_NAMESPACE {
18 namespace cpp {
19
20 enum class MemoryOrder : int {
21 RELAXED = __ATOMIC_RELAXED,
22 CONSUME = __ATOMIC_CONSUME,
23 ACQUIRE = __ATOMIC_ACQUIRE,
24 RELEASE = __ATOMIC_RELEASE,
25 ACQ_REL = __ATOMIC_ACQ_REL,
26 SEQ_CST = __ATOMIC_SEQ_CST
27 };
28
29 // These are a clang extension, see the clang documenation for more information:
30 // https://clang.llvm.org/docs/LanguageExtensions.html#scoped-atomic-builtins.
31 enum class MemoryScope : int {
32 #if defined(__MEMORY_SCOPE_SYSTEM) && defined(__MEMORY_SCOPE_DEVICE)
33 SYSTEM = __MEMORY_SCOPE_SYSTEM,
34 DEVICE = __MEMORY_SCOPE_DEVICE,
35 #else
36 SYSTEM = 0,
37 DEVICE = 0,
38 #endif
39 };
40
41 template <typename T> struct Atomic {
42 // For now, we will restrict to only arithmetic types.
43 static_assert(is_arithmetic_v<T>, "Only arithmetic types can be atomic.");
44
45 private:
46 // The value stored should be appropriately aligned so that
47 // hardware instructions used to perform atomic operations work
48 // correctly.
49 static constexpr int ALIGNMENT = sizeof(T) > alignof(T) ? sizeof(T)
50 : alignof(T);
51
52 public:
53 using value_type = T;
54
55 // We keep the internal value public so that it can be addressable.
56 // This is useful in places like the Linux futex operations where
57 // we need pointers to the memory of the atomic values. Load and store
58 // operations should be performed using the atomic methods however.
59 alignas(ALIGNMENT) value_type val;
60
61 constexpr Atomic() = default;
62
63 // Intializes the value without using atomic operations.
AtomicAtomic64 constexpr Atomic(value_type v) : val(v) {}
65
66 Atomic(const Atomic &) = delete;
67 Atomic &operator=(const Atomic &) = delete;
68
69 // Atomic load.
TAtomic70 operator T() { return __atomic_load_n(&val, int(MemoryOrder::SEQ_CST)); }
71
72 T load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
73 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
74 #if __has_builtin(__scoped_atomic_load_n)
75 return __scoped_atomic_load_n(&val, int(mem_ord), (int)(mem_scope));
76 #else
77 return __atomic_load_n(&val, int(mem_ord));
78 #endif
79 }
80
81 // Atomic store.
82 T operator=(T rhs) {
83 __atomic_store_n(&val, rhs, int(MemoryOrder::SEQ_CST));
84 return rhs;
85 }
86
87 void store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
88 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
89 #if __has_builtin(__scoped_atomic_store_n)
90 __scoped_atomic_store_n(&val, rhs, int(mem_ord), (int)(mem_scope));
91 #else
92 __atomic_store_n(&val, rhs, int(mem_ord));
93 #endif
94 }
95
96 // Atomic compare exchange
97 bool compare_exchange_strong(
98 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
99 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
100 return __atomic_compare_exchange_n(&val, &expected, desired, false,
101 int(mem_ord), int(mem_ord));
102 }
103
104 // Atomic compare exchange (separate success and failure memory orders)
105 bool compare_exchange_strong(
106 T &expected, T desired, MemoryOrder success_order,
107 MemoryOrder failure_order,
108 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
109 return __atomic_compare_exchange_n(&val, &expected, desired, false,
110 static_cast<int>(success_order),
111 static_cast<int>(failure_order));
112 }
113
114 // Atomic compare exchange (weak version)
115 bool compare_exchange_weak(
116 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
117 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
118 return __atomic_compare_exchange_n(&val, &expected, desired, true,
119 static_cast<int>(mem_ord),
120 static_cast<int>(mem_ord));
121 }
122
123 // Atomic compare exchange (weak version with separate success and failure
124 // memory orders)
125 bool compare_exchange_weak(
126 T &expected, T desired, MemoryOrder success_order,
127 MemoryOrder failure_order,
128 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
129 return __atomic_compare_exchange_n(&val, &expected, desired, true,
130 static_cast<int>(success_order),
131 static_cast<int>(failure_order));
132 }
133
134 T exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
135 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
136 #if __has_builtin(__scoped_atomic_exchange_n)
137 return __scoped_atomic_exchange_n(&val, desired, int(mem_ord),
138 (int)(mem_scope));
139 #else
140 return __atomic_exchange_n(&val, desired, int(mem_ord));
141 #endif
142 }
143
144 T fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
145 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
146 #if __has_builtin(__scoped_atomic_fetch_add)
147 return __scoped_atomic_fetch_add(&val, increment, int(mem_ord),
148 (int)(mem_scope));
149 #else
150 return __atomic_fetch_add(&val, increment, int(mem_ord));
151 #endif
152 }
153
154 T fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
155 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
156 #if __has_builtin(__scoped_atomic_fetch_or)
157 return __scoped_atomic_fetch_or(&val, mask, int(mem_ord), (int)(mem_scope));
158 #else
159 return __atomic_fetch_or(&val, mask, int(mem_ord));
160 #endif
161 }
162
163 T fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
164 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
165 #if __has_builtin(__scoped_atomic_fetch_and)
166 return __scoped_atomic_fetch_and(&val, mask, int(mem_ord),
167 (int)(mem_scope));
168 #else
169 return __atomic_fetch_and(&val, mask, int(mem_ord));
170 #endif
171 }
172
173 T fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
174 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
175 #if __has_builtin(__scoped_atomic_fetch_sub)
176 return __scoped_atomic_fetch_sub(&val, decrement, int(mem_ord),
177 (int)(mem_scope));
178 #else
179 return __atomic_fetch_sub(&val, decrement, int(mem_ord));
180 #endif
181 }
182
183 // Set the value without using an atomic operation. This is useful
184 // in initializing atomic values without a constructor.
setAtomic185 void set(T rhs) { val = rhs; }
186 };
187
188 // Issue a thread fence with the given memory ordering.
atomic_thread_fence(MemoryOrder mem_ord)189 LIBC_INLINE void atomic_thread_fence([[maybe_unused]] MemoryOrder mem_ord) {
190 // The NVPTX backend currently does not support atomic thread fences so we use a
191 // full system fence instead.
192 #ifdef LIBC_TARGET_ARCH_IS_NVPTX
193 __nvvm_membar_sys();
194 #else
195 __atomic_thread_fence(static_cast<int>(mem_ord));
196 #endif
197 }
198
199 // Establishes memory synchronization ordering of non-atomic and relaxed atomic
200 // accesses, as instructed by order, between a thread and a signal handler
201 // executed on the same thread. This is equivalent to atomic_thread_fence,
202 // except no instructions for memory ordering are issued. Only reordering of
203 // the instructions by the compiler is suppressed as order instructs.
atomic_signal_fence(MemoryOrder mem_ord)204 LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
205 #if __has_builtin(__atomic_signal_fence)
206 __atomic_signal_fence(static_cast<int>(mem_ord));
207 #else
208 // if the builtin is not ready, use asm as a full compiler barrier.
209 asm volatile("" ::: "memory");
210 #endif
211 }
212
213 } // namespace cpp
214 } // namespace LIBC_NAMESPACE
215
216 #endif // LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
217