• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 #pragma once
8 #ifndef MIMALLOC_ATOMIC_H
9 #define MIMALLOC_ATOMIC_H
10 
11 // --------------------------------------------------------------------------------------------
12 // Atomics
13 // We need to be portable between C, C++, and MSVC.
14 // We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode.
15 // This is why we try to use only `uintptr_t` and `<type>*` as atomic types.
16 // To gain better insight in the range of used atomics, we use explicitly named memory order operations
17 // instead of passing the memory order as a parameter.
18 // -----------------------------------------------------------------------------------------------
19 
20 #if defined(__cplusplus)
21 // Use C++ atomics
22 #include <atomic>
23 #define  _Atomic(tp)            std::atomic<tp>
24 #define  mi_atomic(name)        std::atomic_##name
25 #define  mi_memory_order(name)  std::memory_order_##name
26 #if (__cplusplus >= 202002L)    // c++20, see issue #571
27  #define MI_ATOMIC_VAR_INIT(x)  x
28 #elif !defined(ATOMIC_VAR_INIT)
29  #define MI_ATOMIC_VAR_INIT(x)  x
30 #else
31  #define MI_ATOMIC_VAR_INIT(x)  ATOMIC_VAR_INIT(x)
32 #endif
33 #elif defined(_MSC_VER)
34 // Use MSVC C wrapper for C11 atomics
35 #define  _Atomic(tp)            tp
36 #define  MI_ATOMIC_VAR_INIT(x)  x
37 #define  mi_atomic(name)        mi_atomic_##name
38 #define  mi_memory_order(name)  mi_memory_order_##name
39 #else
40 // Use C11 atomics
41 #include <stdatomic.h>
42 #define  mi_atomic(name)        atomic_##name
43 #define  mi_memory_order(name)  memory_order_##name
44 #if (__STDC_VERSION__ >= 201710L) // c17, see issue #735
45  #define MI_ATOMIC_VAR_INIT(x) x
46 #elif !defined(ATOMIC_VAR_INIT)
47  #define MI_ATOMIC_VAR_INIT(x) x
48 #else
49  #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
50 #endif
51 #endif
52 
53 // Various defines for all used memory orders in mimalloc
54 #define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail)  \
55   mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail)
56 
57 #define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail)  \
58   mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail)
59 
60 #define mi_atomic_load_acquire(p)                mi_atomic(load_explicit)(p,mi_memory_order(acquire))
61 #define mi_atomic_load_relaxed(p)                mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
62 #define mi_atomic_store_release(p,x)             mi_atomic(store_explicit)(p,x,mi_memory_order(release))
63 #define mi_atomic_store_relaxed(p,x)             mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
64 #define mi_atomic_exchange_release(p,x)          mi_atomic(exchange_explicit)(p,x,mi_memory_order(release))
65 #define mi_atomic_exchange_acq_rel(p,x)          mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel))
66 #define mi_atomic_cas_weak_release(p,exp,des)    mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
67 #define mi_atomic_cas_weak_acq_rel(p,exp,des)    mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
68 #define mi_atomic_cas_strong_release(p,exp,des)  mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
69 #define mi_atomic_cas_strong_acq_rel(p,exp,des)  mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
70 
71 #define mi_atomic_add_relaxed(p,x)               mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed))
72 #define mi_atomic_sub_relaxed(p,x)               mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed))
73 #define mi_atomic_add_acq_rel(p,x)               mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel))
74 #define mi_atomic_sub_acq_rel(p,x)               mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel))
75 #define mi_atomic_and_acq_rel(p,x)               mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel))
76 #define mi_atomic_or_acq_rel(p,x)                mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel))
77 
78 #define mi_atomic_increment_relaxed(p)           mi_atomic_add_relaxed(p,(uintptr_t)1)
79 #define mi_atomic_decrement_relaxed(p)           mi_atomic_sub_relaxed(p,(uintptr_t)1)
80 #define mi_atomic_increment_acq_rel(p)           mi_atomic_add_acq_rel(p,(uintptr_t)1)
81 #define mi_atomic_decrement_acq_rel(p)           mi_atomic_sub_acq_rel(p,(uintptr_t)1)
82 
83 static inline void mi_atomic_yield(void);
84 static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add);
85 static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
86 
87 
88 #if defined(__cplusplus) || !defined(_MSC_VER)
89 
90 // In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value)
91 // We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well
92 #define mi_atomic_load_ptr_acquire(tp,p)                mi_atomic_load_acquire(p)
93 #define mi_atomic_load_ptr_relaxed(tp,p)                mi_atomic_load_relaxed(p)
94 
95 // In C++ we need to add casts to help resolve templates if NULL is passed
96 #if defined(__cplusplus)
97 #define mi_atomic_store_ptr_release(tp,p,x)             mi_atomic_store_release(p,(tp*)x)
98 #define mi_atomic_store_ptr_relaxed(tp,p,x)             mi_atomic_store_relaxed(p,(tp*)x)
99 #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des)    mi_atomic_cas_weak_release(p,exp,(tp*)des)
100 #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des)    mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
101 #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des)  mi_atomic_cas_strong_release(p,exp,(tp*)des)
102 #define mi_atomic_exchange_ptr_release(tp,p,x)          mi_atomic_exchange_release(p,(tp*)x)
103 #define mi_atomic_exchange_ptr_acq_rel(tp,p,x)          mi_atomic_exchange_acq_rel(p,(tp*)x)
104 #else
105 #define mi_atomic_store_ptr_release(tp,p,x)             mi_atomic_store_release(p,x)
106 #define mi_atomic_store_ptr_relaxed(tp,p,x)             mi_atomic_store_relaxed(p,x)
107 #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des)    mi_atomic_cas_weak_release(p,exp,des)
108 #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des)    mi_atomic_cas_weak_acq_rel(p,exp,des)
109 #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des)  mi_atomic_cas_strong_release(p,exp,des)
110 #define mi_atomic_exchange_ptr_release(tp,p,x)          mi_atomic_exchange_release(p,x)
111 #define mi_atomic_exchange_ptr_acq_rel(tp,p,x)          mi_atomic_exchange_acq_rel(p,x)
112 #endif
113 
114 // These are used by the statistics
mi_atomic_addi64_relaxed(volatile int64_t * p,int64_t add)115 static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {
116   return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
117 }
mi_atomic_maxi64_relaxed(volatile int64_t * p,int64_t x)118 static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
119   int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p);
120   while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, &current, x)) { /* nothing */ };
121 }
122 
123 // Used by timers
124 #define mi_atomic_loadi64_acquire(p)            mi_atomic(load_explicit)(p,mi_memory_order(acquire))
125 #define mi_atomic_loadi64_relaxed(p)            mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
126 #define mi_atomic_storei64_release(p,x)         mi_atomic(store_explicit)(p,x,mi_memory_order(release))
127 #define mi_atomic_storei64_relaxed(p,x)         mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
128 
129 #define mi_atomic_casi64_strong_acq_rel(p,e,d)  mi_atomic_cas_strong_acq_rel(p,e,d)
130 #define mi_atomic_addi64_acq_rel(p,i)           mi_atomic_add_acq_rel(p,i)
131 
132 
133 #elif defined(_MSC_VER)
134 
135 // MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics.
136 #define WIN32_LEAN_AND_MEAN
137 #include <windows.h>
138 #include <intrin.h>
139 #ifdef _WIN64
140 typedef LONG64   msc_intptr_t;
141 #define MI_64(f) f##64
142 #else
143 typedef LONG     msc_intptr_t;
144 #define MI_64(f) f
145 #endif
146 
147 typedef enum mi_memory_order_e {
148   mi_memory_order_relaxed,
149   mi_memory_order_consume,
150   mi_memory_order_acquire,
151   mi_memory_order_release,
152   mi_memory_order_acq_rel,
153   mi_memory_order_seq_cst
154 } mi_memory_order;
155 
mi_atomic_fetch_add_explicit(_Atomic (uintptr_t)* p,uintptr_t add,mi_memory_order mo)156 static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) {
157   (void)(mo);
158   return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
159 }
mi_atomic_fetch_sub_explicit(_Atomic (uintptr_t)* p,uintptr_t sub,mi_memory_order mo)160 static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) {
161   (void)(mo);
162   return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub));
163 }
mi_atomic_fetch_and_explicit(_Atomic (uintptr_t)* p,uintptr_t x,mi_memory_order mo)164 static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
165   (void)(mo);
166   return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
167 }
mi_atomic_fetch_or_explicit(_Atomic (uintptr_t)* p,uintptr_t x,mi_memory_order mo)168 static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
169   (void)(mo);
170   return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
171 }
mi_atomic_compare_exchange_strong_explicit(_Atomic (uintptr_t)* p,uintptr_t * expected,uintptr_t desired,mi_memory_order mo1,mi_memory_order mo2)172 static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
173   (void)(mo1); (void)(mo2);
174   uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected));
175   if (read == *expected) {
176     return true;
177   }
178   else {
179     *expected = read;
180     return false;
181   }
182 }
mi_atomic_compare_exchange_weak_explicit(_Atomic (uintptr_t)* p,uintptr_t * expected,uintptr_t desired,mi_memory_order mo1,mi_memory_order mo2)183 static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
184   return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2);
185 }
mi_atomic_exchange_explicit(_Atomic (uintptr_t)* p,uintptr_t exchange,mi_memory_order mo)186 static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) {
187   (void)(mo);
188   return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange);
189 }
mi_atomic_thread_fence(mi_memory_order mo)190 static inline void mi_atomic_thread_fence(mi_memory_order mo) {
191   (void)(mo);
192   _Atomic(uintptr_t) x = 0;
193   mi_atomic_exchange_explicit(&x, 1, mo);
194 }
mi_atomic_load_explicit(_Atomic (uintptr_t)const * p,mi_memory_order mo)195 static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
196   (void)(mo);
197 #if defined(_M_IX86) || defined(_M_X64)
198   return *p;
199 #else
200   uintptr_t x = *p;
201   if (mo > mi_memory_order_relaxed) {
202     while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ };
203   }
204   return x;
205 #endif
206 }
mi_atomic_store_explicit(_Atomic (uintptr_t)* p,uintptr_t x,mi_memory_order mo)207 static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
208   (void)(mo);
209 #if defined(_M_IX86) || defined(_M_X64)
210   *p = x;
211 #else
212   mi_atomic_exchange_explicit(p, x, mo);
213 #endif
214 }
mi_atomic_loadi64_explicit(_Atomic (int64_t)* p,mi_memory_order mo)215 static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) {
216   (void)(mo);
217 #if defined(_M_X64)
218   return *p;
219 #else
220   int64_t old = *p;
221   int64_t x = old;
222   while ((old = InterlockedCompareExchange64(p, x, old)) != x) {
223     x = old;
224   }
225   return x;
226 #endif
227 }
mi_atomic_storei64_explicit(_Atomic (int64_t)* p,int64_t x,mi_memory_order mo)228 static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) {
229   (void)(mo);
230 #if defined(x_M_IX86) || defined(_M_X64)
231   *p = x;
232 #else
233   InterlockedExchange64(p, x);
234 #endif
235 }
236 
237 // These are used by the statistics
mi_atomic_addi64_relaxed(volatile _Atomic (int64_t)* p,int64_t add)238 static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) {
239 #ifdef _WIN64
240   return (int64_t)mi_atomic_addi((int64_t*)p, add);
241 #else
242   int64_t current;
243   int64_t sum;
244   do {
245     current = *p;
246     sum = current + add;
247   } while (_InterlockedCompareExchange64(p, sum, current) != current);
248   return current;
249 #endif
250 }
mi_atomic_maxi64_relaxed(volatile _Atomic (int64_t)* p,int64_t x)251 static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
252   int64_t current;
253   do {
254     current = *p;
255   } while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
256 }
257 
mi_atomic_addi64_acq_rel(volatile _Atomic (int64_t *)p,int64_t i)258 static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
259   mi_atomic_addi64_relaxed(p, i);
260 }
261 
mi_atomic_casi64_strong_acq_rel(volatile _Atomic (int64_t *)p,int64_t * exp,int64_t des)262 static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
263   int64_t read = _InterlockedCompareExchange64(p, des, *exp);
264   if (read == *exp) {
265     return true;
266   }
267   else {
268     *exp = read;
269     return false;
270   }
271 }
272 
273 // The pointer macros cast to `uintptr_t`.
274 #define mi_atomic_load_ptr_acquire(tp,p)                (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
275 #define mi_atomic_load_ptr_relaxed(tp,p)                (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
276 #define mi_atomic_store_ptr_release(tp,p,x)             mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
277 #define mi_atomic_store_ptr_relaxed(tp,p,x)             mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
278 #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des)    mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
279 #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des)    mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
280 #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des)  mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
281 #define mi_atomic_exchange_ptr_release(tp,p,x)          (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
282 #define mi_atomic_exchange_ptr_acq_rel(tp,p,x)          (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
283 
284 #define mi_atomic_loadi64_acquire(p)    mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire))
285 #define mi_atomic_loadi64_relaxed(p)    mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed))
286 #define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release))
287 #define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed))
288 
289 
290 #endif
291 
292 
293 // Atomically add a signed value; returns the previous value.
mi_atomic_addi(_Atomic (intptr_t)* p,intptr_t add)294 static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) {
295   return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add);
296 }
297 
298 // Atomically subtract a signed value; returns the previous value.
mi_atomic_subi(_Atomic (intptr_t)* p,intptr_t sub)299 static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
300   return (intptr_t)mi_atomic_addi(p, -sub);
301 }
302 
303 typedef _Atomic(uintptr_t) mi_atomic_once_t;
304 
305 // Returns true only on the first invocation
mi_atomic_once(mi_atomic_once_t * once)306 static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
307   if (mi_atomic_load_relaxed(once) != 0) return false;     // quick test
308   uintptr_t expected = 0;
309   return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
310 }
311 
312 typedef _Atomic(uintptr_t) mi_atomic_guard_t;
313 
314 // Allows only one thread to execute at a time
315 #define mi_atomic_guard(guard) \
316   uintptr_t _mi_guard_expected = 0; \
317   for(bool _mi_guard_once = true; \
318       _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
319       (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
320 
321 
322 
323 // Yield
324 #if defined(__cplusplus)
325 #include <thread>
mi_atomic_yield(void)326 static inline void mi_atomic_yield(void) {
327   std::this_thread::yield();
328 }
329 #elif defined(_WIN32)
330 #define WIN32_LEAN_AND_MEAN
331 #include <windows.h>
mi_atomic_yield(void)332 static inline void mi_atomic_yield(void) {
333   YieldProcessor();
334 }
335 #elif defined(__SSE2__)
336 #include <emmintrin.h>
mi_atomic_yield(void)337 static inline void mi_atomic_yield(void) {
338   _mm_pause();
339 }
340 #elif (defined(__GNUC__) || defined(__clang__)) && \
341       (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \
342        defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__)
343 #if defined(__x86_64__) || defined(__i386__)
mi_atomic_yield(void)344 static inline void mi_atomic_yield(void) {
345   __asm__ volatile ("pause" ::: "memory");
346 }
347 #elif defined(__aarch64__)
mi_atomic_yield(void)348 static inline void mi_atomic_yield(void) {
349   __asm__ volatile("wfe");
350 }
351 #elif (defined(__arm__) && __ARM_ARCH__ >= 7)
mi_atomic_yield(void)352 static inline void mi_atomic_yield(void) {
353   __asm__ volatile("yield" ::: "memory");
354 }
355 #elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
356 #ifdef __APPLE__
mi_atomic_yield(void)357 static inline void mi_atomic_yield(void) {
358   __asm__ volatile ("or r27,r27,r27" ::: "memory");
359 }
360 #else
mi_atomic_yield(void)361 static inline void mi_atomic_yield(void) {
362   __asm__ __volatile__ ("or 27,27,27" ::: "memory");
363 }
364 #endif
365 #elif defined(__armel__) || defined(__ARMEL__)
mi_atomic_yield(void)366 static inline void mi_atomic_yield(void) {
367   __asm__ volatile ("nop" ::: "memory");
368 }
369 #endif
370 #elif defined(__sun)
371 // Fallback for other archs
372 #include <synch.h>
mi_atomic_yield(void)373 static inline void mi_atomic_yield(void) {
374   smt_pause();
375 }
376 #elif defined(__wasi__)
377 #include <sched.h>
mi_atomic_yield(void)378 static inline void mi_atomic_yield(void) {
379   sched_yield();
380 }
381 #else
382 #include <unistd.h>
mi_atomic_yield(void)383 static inline void mi_atomic_yield(void) {
384   sleep(0);
385 }
386 #endif
387 
388 
389 #endif // __MIMALLOC_ATOMIC_H
390