1 // Copyright 2024 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14
15 #include <stdatomic.h>
16 #include <stdbool.h>
17 #include <stddef.h>
18 #include <stdint.h>
19
20 #include "pw_preprocessor/compiler.h"
21
22 #ifndef __unused
23 #define __unused __attribute__((unused))
24 #endif
25
26 PW_MODIFY_DIAGNOSTICS_PUSH();
27 PW_MODIFY_DIAGNOSTIC(ignored, "-Wcast-qual");
28
29 // We use __builtin_mem* to avoid libc dependency.
30 #define memcpy __builtin_memcpy
31 #define memcmp __builtin_memcmp
32
pw_SaveAndDisableInterrupts(void)33 static inline uint32_t pw_SaveAndDisableInterrupts(void) {
34 uint32_t status;
35 asm volatile(
36 "mrs %0, PRIMASK\n"
37 "cpsid i"
38 : "=r"(status)::"memory");
39 return status;
40 }
41
pw_RestoreInterrupts(uint32_t status)42 static inline void pw_RestoreInterrupts(uint32_t status) {
43 asm volatile("msr PRIMASK,%0" ::"r"(status) : "memory");
44 }
45
atomic_lock(__unused const volatile void * ptr)46 static inline uint32_t atomic_lock(__unused const volatile void* ptr) {
47 uint32_t save = pw_SaveAndDisableInterrupts();
48 return save;
49 }
50
atomic_unlock(__unused const volatile void * ptr,uint32_t save)51 static inline void atomic_unlock(__unused const volatile void* ptr,
52 uint32_t save) {
53 pw_RestoreInterrupts(save);
54 }
55
56 #ifdef __clang__
57
58 // Clang objects if you redefine a builtin.
59 #pragma redefine_extname __atomic_load_c __atomic_load
60 #pragma redefine_extname __atomic_store_c __atomic_store
61 #pragma redefine_extname __atomic_exchange_c __atomic_exchange
62 #pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
63 #pragma redefine_extname __atomic_is_lock_free_c __atomic_is_lock_free
64
65 #else // __clang__
66
__atomic_test_and_set_c(volatile void * mem,__unused int model)67 _Bool __atomic_test_and_set_c(volatile void* mem, __unused int model) {
68 uint32_t save = atomic_lock(mem);
69 bool result = *(volatile bool*)mem;
70 *(volatile bool*)mem = true;
71 atomic_unlock(mem, save);
72 return result;
73 }
74
75 #define __atomic_load_c __atomic_load
76 #define __atomic_store_c __atomic_store
77 #define __atomic_exchange_c __atomic_exchange
78 #define __atomic_compare_exchange_c __atomic_compare_exchange
79 #define __atomic_is_lock_free_c __atomic_is_lock_free
80
81 #endif // __clang__
82
83 // Whether atomic operations for the given size (and alignment) are lock-free.
__atomic_is_lock_free_c(__unused size_t size,__unused const volatile void * ptr)84 bool __atomic_is_lock_free_c(__unused size_t size,
85 __unused const volatile void* ptr) {
86 #if !__ARM_ARCH_6M__
87 if (size == 1 || size == 2 || size == 4) {
88 size_t align = size - 1;
89 return (((uintptr_t)ptr) & align) == 0;
90 }
91 #endif
92 return false;
93 }
94
95 // An atomic load operation. This is atomic with respect to the source pointer
96 // only.
__atomic_load_c(unsigned int size,const volatile void * src,void * dest,__unused int model)97 void __atomic_load_c(unsigned int size,
98 const volatile void* src,
99 void* dest,
100 __unused int model) {
101 uint32_t save = atomic_lock(src);
102 memcpy(dest, (const void*)src, size);
103 atomic_unlock(src, save);
104 }
105
106 // An atomic store operation. This is atomic with respect to the destination
107 // pointer only.
__atomic_store_c(unsigned int size,volatile void * dest,void * src,__unused int model)108 void __atomic_store_c(unsigned int size,
109 volatile void* dest,
110 void* src,
111 __unused int model) {
112 uint32_t save = atomic_lock(src);
113 memcpy((void*)dest, src, size);
114 atomic_unlock(src, save);
115 }
116
117 // Atomic compare and exchange operation. If the value at *ptr is identical
118 // to the value at *expected, then this copies value at *desired to *ptr. If
119 // they are not, then this stores the current value from *ptr in *expected.
120 //
121 // This function returns 1 if the exchange takes place or 0 if it fails.
__atomic_compare_exchange_c(unsigned int size,volatile void * ptr,void * expected,void * desired,__unused int success,__unused int failure)122 _Bool __atomic_compare_exchange_c(unsigned int size,
123 volatile void* ptr,
124 void* expected,
125 void* desired,
126 __unused int success,
127 __unused int failure) {
128 uint32_t save = atomic_lock(ptr);
129 if (memcmp((void*)ptr, expected, size) == 0) {
130 memcpy((void*)ptr, desired, size);
131 atomic_unlock(ptr, save);
132 return 1;
133 }
134 memcpy(expected, (void*)ptr, size);
135 atomic_unlock(ptr, save);
136 return 0;
137 }
138
139 // Performs an atomic exchange operation between two pointers. This is atomic
140 // with respect to the target address.
__atomic_exchange_c(unsigned int size,volatile void * ptr,void * val,void * old,__unused int model)141 void __atomic_exchange_c(unsigned int size,
142 volatile void* ptr,
143 void* val,
144 void* old,
145 __unused int model) {
146 uint32_t save = atomic_lock(ptr);
147 memcpy(old, (void*)ptr, size);
148 memcpy((void*)ptr, val, size);
149 atomic_unlock(ptr, save);
150 }
151
152 #if __ARM_ARCH_6M__
153 #define ATOMIC_OPTIMIZED_CASES \
154 ATOMIC_OPTIMIZED_CASE(1, uint8_t) \
155 ATOMIC_OPTIMIZED_CASE(2, uint16_t) \
156 ATOMIC_OPTIMIZED_CASE(4, unsigned int) \
157 ATOMIC_OPTIMIZED_CASE(8, uint64_t)
158 #else
159 #define ATOMIC_OPTIMIZED_CASES ATOMIC_OPTIMIZED_CASE(8, uint64_t)
160 #endif
161
162 #define ATOMIC_OPTIMIZED_CASE(n, type) \
163 type __atomic_load_##n(const volatile void* src, __unused int memorder) { \
164 uint32_t save = atomic_lock(src); \
165 type val = *(const volatile type*)src; \
166 atomic_unlock(src, save); \
167 return val; \
168 }
169
170 ATOMIC_OPTIMIZED_CASES
171
172 #undef ATOMIC_OPTIMIZED_CASE
173
174 #define ATOMIC_OPTIMIZED_CASE(n, type) \
175 void __atomic_store_##n(volatile void* dest, type val, __unused int model) { \
176 uint32_t save = atomic_lock(dest); \
177 *(volatile type*)dest = val; \
178 atomic_unlock(dest, save); \
179 }
180
181 ATOMIC_OPTIMIZED_CASES
182
183 #undef ATOMIC_OPTIMIZED_CASE
184
185 #define ATOMIC_OPTIMIZED_CASE(n, type) \
186 bool __atomic_compare_exchange_##n(volatile void* ptr, \
187 void* expected, \
188 type desired, \
189 __unused bool weak, \
190 __unused int success, \
191 __unused int failure) { \
192 uint32_t save = atomic_lock(ptr); \
193 if (*(volatile type*)ptr == *(type*)expected) { \
194 *(volatile type*)ptr = desired; \
195 atomic_unlock(ptr, save); \
196 return true; \
197 } \
198 *(type*)expected = *(volatile type*)ptr; \
199 atomic_unlock(ptr, save); \
200 return false; \
201 }
202
203 ATOMIC_OPTIMIZED_CASES
204
205 #undef ATOMIC_OPTIMIZED_CASE
206
207 #define ATOMIC_OPTIMIZED_CASE(n, type) \
208 type __atomic_exchange_##n( \
209 volatile void* dest, type val, __unused int model) { \
210 uint32_t save = atomic_lock(dest); \
211 type tmp = *(volatile type*)dest; \
212 *(volatile type*)dest = val; \
213 atomic_unlock(dest, save); \
214 return tmp; \
215 }
216
217 ATOMIC_OPTIMIZED_CASES
218
219 #undef ATOMIC_OPTIMIZED_CASE
220
221 // Atomic read-modify-write operations for integers of various sizes.
222
223 #define ATOMIC_RMW(n, type, opname, op) \
224 type __atomic_fetch_##opname##_##n( \
225 volatile void* ptr, type val, __unused int model) { \
226 uint32_t save = atomic_lock(ptr); \
227 type tmp = *(volatile type*)ptr; \
228 *(volatile type*)ptr = tmp op val; \
229 atomic_unlock(ptr, save); \
230 return tmp; \
231 }
232
233 #define ATOMIC_RMW_NAND(n, type) \
234 type __atomic_fetch_nand_##n(type* ptr, type val, __unused int model) { \
235 uint32_t save = atomic_lock(ptr); \
236 type tmp = *ptr; \
237 *ptr = ~(tmp & val); \
238 atomic_unlock(ptr, save); \
239 return tmp; \
240 }
241
242 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, add, +)
243
244 ATOMIC_OPTIMIZED_CASES
245
246 #undef ATOMIC_OPTIMIZED_CASE
247 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, sub, -)
248
249 ATOMIC_OPTIMIZED_CASES
250
251 #undef ATOMIC_OPTIMIZED_CASE
252 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, and, &)
253
254 ATOMIC_OPTIMIZED_CASES
255
256 #undef ATOMIC_OPTIMIZED_CASE
257 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, or, |)
258
259 ATOMIC_OPTIMIZED_CASES
260
261 #undef ATOMIC_OPTIMIZED_CASE
262 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW(n, type, xor, ^)
263
264 ATOMIC_OPTIMIZED_CASES
265
266 #undef ATOMIC_OPTIMIZED_CASE
267
268 #if __has_builtin(__c11_atomic_fetch_nand)
269 #define ATOMIC_OPTIMIZED_CASE(n, type) ATOMIC_RMW_NAND(n, type)
270 ATOMIC_OPTIMIZED_CASES
271 #undef ATOMIC_OPTIMIZED_CASE
272 #endif
273
274 PW_MODIFY_DIAGNOSTICS_POP();
275