1 /* 2 * Copyright © 2011 Ryan Lortie 3 * 4 * This library is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU Lesser General Public 6 * License as published by the Free Software Foundation; either 7 * version 2.1 of the License, or (at your option) any later version. 8 * 9 * This library is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * Lesser General Public License for more details. 13 * 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * Author: Ryan Lortie <desrt@desrt.ca> 18 */ 19 20 #ifndef __G_ATOMIC_H__ 21 #define __G_ATOMIC_H__ 22 23 #if !defined (__GLIB_H_INSIDE__) && !defined (GLIB_COMPILATION) 24 #error "Only <glib.h> can be included directly." 25 #endif 26 27 #include <glib/gtypes.h> 28 29 #if defined(glib_typeof_2_68) && GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68 30 /* for glib_typeof */ 31 #include <type_traits> 32 #endif 33 34 G_BEGIN_DECLS 35 36 GLIB_AVAILABLE_IN_ALL 37 gint g_atomic_int_get (const volatile gint *atomic); 38 GLIB_AVAILABLE_IN_ALL 39 void g_atomic_int_set (volatile gint *atomic, 40 gint newval); 41 GLIB_AVAILABLE_IN_ALL 42 void g_atomic_int_inc (volatile gint *atomic); 43 GLIB_AVAILABLE_IN_ALL 44 gboolean g_atomic_int_dec_and_test (volatile gint *atomic); 45 GLIB_AVAILABLE_IN_ALL 46 gboolean g_atomic_int_compare_and_exchange (volatile gint *atomic, 47 gint oldval, 48 gint newval); 49 GLIB_AVAILABLE_IN_ALL 50 gint g_atomic_int_add (volatile gint *atomic, 51 gint val); 52 GLIB_AVAILABLE_IN_2_30 53 guint g_atomic_int_and (volatile guint *atomic, 54 guint val); 55 GLIB_AVAILABLE_IN_2_30 56 guint g_atomic_int_or (volatile guint *atomic, 57 guint val); 58 GLIB_AVAILABLE_IN_ALL 59 guint g_atomic_int_xor (volatile guint *atomic, 60 guint val); 61 62 GLIB_AVAILABLE_IN_ALL 63 gpointer g_atomic_pointer_get (const volatile void *atomic); 64 GLIB_AVAILABLE_IN_ALL 65 void g_atomic_pointer_set (volatile void *atomic, 66 gpointer newval); 67 GLIB_AVAILABLE_IN_ALL 68 gboolean g_atomic_pointer_compare_and_exchange (volatile void *atomic, 69 gpointer oldval, 70 gpointer newval); 71 GLIB_AVAILABLE_IN_ALL 72 gssize g_atomic_pointer_add (volatile void *atomic, 73 gssize val); 74 GLIB_AVAILABLE_IN_2_30 75 gsize g_atomic_pointer_and (volatile void *atomic, 76 gsize val); 77 GLIB_AVAILABLE_IN_2_30 78 gsize g_atomic_pointer_or (volatile void *atomic, 79 gsize val); 80 GLIB_AVAILABLE_IN_ALL 81 gsize g_atomic_pointer_xor (volatile void *atomic, 82 gsize val); 83 84 GLIB_DEPRECATED_IN_2_30_FOR(g_atomic_int_add) 85 gint g_atomic_int_exchange_and_add (volatile gint *atomic, 86 gint val); 87 88 G_END_DECLS 89 90 #if defined(G_ATOMIC_LOCK_FREE) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) 91 92 /* We prefer the new C11-style atomic extension of GCC if available */ 93 /* OHOS_GLIB_COMPATIBLE 94 * ohos.glib.compatible.001: glib 2.62.5 update 2.68.1 Incompatible with gstreamer 1.16.2 95 * static volatile gsize _init_once = 0; // Conflicts with volatile, 96 * if (g_once_init_enter (&_init_once)) 97 * add "&& !defined(__clang__)" 98 */ 99 #if defined(__ATOMIC_SEQ_CST) && !defined(__clang__) 100 101 #define g_atomic_int_get(atomic) \ 102 (G_GNUC_EXTENSION ({ \ 103 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 104 gint gaig_temp; \ 105 (void) (0 ? *(atomic) ^ *(atomic) : 1); \ 106 __atomic_load ((gint *)(atomic), &gaig_temp, __ATOMIC_SEQ_CST); \ 107 (gint) gaig_temp; \ 108 })) 109 #define g_atomic_int_set(atomic, newval) \ 110 (G_GNUC_EXTENSION ({ \ 111 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 112 gint gais_temp = (gint) (newval); \ 113 (void) (0 ? *(atomic) ^ (newval) : 1); \ 114 __atomic_store ((gint *)(atomic), &gais_temp, __ATOMIC_SEQ_CST); \ 115 })) 116 117 #if defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68) 118 #define g_atomic_pointer_get(atomic) \ 119 (G_GNUC_EXTENSION ({ \ 120 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 121 glib_typeof (*(atomic)) gapg_temp_newval; \ 122 glib_typeof ((atomic)) gapg_temp_atomic = (atomic); \ 123 __atomic_load (gapg_temp_atomic, &gapg_temp_newval, __ATOMIC_SEQ_CST); \ 124 gapg_temp_newval; \ 125 })) 126 #define g_atomic_pointer_set(atomic, newval) \ 127 (G_GNUC_EXTENSION ({ \ 128 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 129 glib_typeof ((atomic)) gaps_temp_atomic = (atomic); \ 130 glib_typeof (*(atomic)) gaps_temp_newval = (newval); \ 131 (void) (0 ? (gpointer) * (atomic) : NULL); \ 132 __atomic_store (gaps_temp_atomic, &gaps_temp_newval, __ATOMIC_SEQ_CST); \ 133 })) 134 #else /* if !(defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68)) */ 135 #define g_atomic_pointer_get(atomic) \ 136 (G_GNUC_EXTENSION ({ \ 137 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 138 gpointer gapg_temp_newval; \ 139 gpointer *gapg_temp_atomic = (gpointer *)(atomic); \ 140 __atomic_load (gapg_temp_atomic, &gapg_temp_newval, __ATOMIC_SEQ_CST); \ 141 gapg_temp_newval; \ 142 })) 143 #define g_atomic_pointer_set(atomic, newval) \ 144 (G_GNUC_EXTENSION ({ \ 145 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 146 gpointer *gaps_temp_atomic = (gpointer *)(atomic); \ 147 gpointer gaps_temp_newval = (gpointer)(newval); \ 148 (void) (0 ? (gpointer) *(atomic) : NULL); \ 149 __atomic_store (gaps_temp_atomic, &gaps_temp_newval, __ATOMIC_SEQ_CST); \ 150 })) 151 #endif /* if defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68) */ 152 153 #define g_atomic_int_inc(atomic) \ 154 (G_GNUC_EXTENSION ({ \ 155 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 156 (void) (0 ? *(atomic) ^ *(atomic) : 1); \ 157 (void) __atomic_fetch_add ((atomic), 1, __ATOMIC_SEQ_CST); \ 158 })) 159 #define g_atomic_int_dec_and_test(atomic) \ 160 (G_GNUC_EXTENSION ({ \ 161 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 162 (void) (0 ? *(atomic) ^ *(atomic) : 1); \ 163 __atomic_fetch_sub ((atomic), 1, __ATOMIC_SEQ_CST) == 1; \ 164 })) 165 #define g_atomic_int_compare_and_exchange(atomic, oldval, newval) \ 166 (G_GNUC_EXTENSION ({ \ 167 gint gaicae_oldval = (oldval); \ 168 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 169 (void) (0 ? *(atomic) ^ (newval) ^ (oldval) : 1); \ 170 __atomic_compare_exchange_n ((atomic), &gaicae_oldval, (newval), FALSE, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ? TRUE : FALSE; \ 171 })) 172 #define g_atomic_int_add(atomic, val) \ 173 (G_GNUC_EXTENSION ({ \ 174 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 175 (void) (0 ? *(atomic) ^ (val) : 1); \ 176 (gint) __atomic_fetch_add ((atomic), (val), __ATOMIC_SEQ_CST); \ 177 })) 178 #define g_atomic_int_and(atomic, val) \ 179 (G_GNUC_EXTENSION ({ \ 180 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 181 (void) (0 ? *(atomic) ^ (val) : 1); \ 182 (guint) __atomic_fetch_and ((atomic), (val), __ATOMIC_SEQ_CST); \ 183 })) 184 #define g_atomic_int_or(atomic, val) \ 185 (G_GNUC_EXTENSION ({ \ 186 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 187 (void) (0 ? *(atomic) ^ (val) : 1); \ 188 (guint) __atomic_fetch_or ((atomic), (val), __ATOMIC_SEQ_CST); \ 189 })) 190 #define g_atomic_int_xor(atomic, val) \ 191 (G_GNUC_EXTENSION ({ \ 192 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 193 (void) (0 ? *(atomic) ^ (val) : 1); \ 194 (guint) __atomic_fetch_xor ((atomic), (val), __ATOMIC_SEQ_CST); \ 195 })) 196 197 #if defined(glib_typeof) && defined(__cplusplus) && __cplusplus >= 201103L 198 /* This is typesafe because we check we can assign oldval to the type of 199 * (*atomic). Unfortunately it can only be done in C++ because gcc/clang warn 200 * when atomic is volatile and not oldval, or when atomic is gsize* and oldval 201 * is NULL. Note that clang++ force us to be typesafe because it is an error if the 2nd 202 * argument of __atomic_compare_exchange_n() has a different type than the 203 * first. 204 * https://gitlab.gnome.org/GNOME/glib/-/merge_requests/1919 205 * https://gitlab.gnome.org/GNOME/glib/-/merge_requests/1715#note_1024120. */ 206 #define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \ 207 (G_GNUC_EXTENSION ({ \ 208 G_STATIC_ASSERT (sizeof (oldval) == sizeof (gpointer)); \ 209 glib_typeof (*(atomic)) gapcae_oldval = (oldval); \ 210 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 211 (void) (0 ? (gpointer) *(atomic) : NULL); \ 212 __atomic_compare_exchange_n ((atomic), &gapcae_oldval, (newval), FALSE, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ? TRUE : FALSE; \ 213 })) 214 #else /* if !(defined(glib_typeof) && defined(__cplusplus) && __cplusplus >= 201103L) */ 215 #define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \ 216 (G_GNUC_EXTENSION ({ \ 217 G_STATIC_ASSERT (sizeof (oldval) == sizeof (gpointer)); \ 218 gpointer gapcae_oldval = (gpointer)(oldval); \ 219 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 220 (void) (0 ? (gpointer) *(atomic) : NULL); \ 221 __atomic_compare_exchange_n ((atomic), &gapcae_oldval, (newval), FALSE, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ? TRUE : FALSE; \ 222 })) 223 #endif /* defined(glib_typeof) */ 224 #define g_atomic_pointer_add(atomic, val) \ 225 (G_GNUC_EXTENSION ({ \ 226 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 227 (void) (0 ? (gpointer) *(atomic) : NULL); \ 228 (void) (0 ? (val) ^ (val) : 1); \ 229 (gssize) __atomic_fetch_add ((atomic), (val), __ATOMIC_SEQ_CST); \ 230 })) 231 #define g_atomic_pointer_and(atomic, val) \ 232 (G_GNUC_EXTENSION ({ \ 233 gsize *gapa_atomic = (gsize *) (atomic); \ 234 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 235 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gsize)); \ 236 (void) (0 ? (gpointer) *(atomic) : NULL); \ 237 (void) (0 ? (val) ^ (val) : 1); \ 238 (gsize) __atomic_fetch_and (gapa_atomic, (val), __ATOMIC_SEQ_CST); \ 239 })) 240 #define g_atomic_pointer_or(atomic, val) \ 241 (G_GNUC_EXTENSION ({ \ 242 gsize *gapo_atomic = (gsize *) (atomic); \ 243 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 244 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gsize)); \ 245 (void) (0 ? (gpointer) *(atomic) : NULL); \ 246 (void) (0 ? (val) ^ (val) : 1); \ 247 (gsize) __atomic_fetch_or (gapo_atomic, (val), __ATOMIC_SEQ_CST); \ 248 })) 249 #define g_atomic_pointer_xor(atomic, val) \ 250 (G_GNUC_EXTENSION ({ \ 251 gsize *gapx_atomic = (gsize *) (atomic); \ 252 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 253 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gsize)); \ 254 (void) (0 ? (gpointer) *(atomic) : NULL); \ 255 (void) (0 ? (val) ^ (val) : 1); \ 256 (gsize) __atomic_fetch_xor (gapx_atomic, (val), __ATOMIC_SEQ_CST); \ 257 })) 258 259 #else /* defined(__ATOMIC_SEQ_CST) */ 260 261 /* We want to achieve __ATOMIC_SEQ_CST semantics here. See 262 * https://en.cppreference.com/w/c/atomic/memory_order#Constants. For load 263 * operations, that means performing an *acquire*: 264 * > A load operation with this memory order performs the acquire operation on 265 * > the affected memory location: no reads or writes in the current thread can 266 * > be reordered before this load. All writes in other threads that release 267 * > the same atomic variable are visible in the current thread. 268 * 269 * “no reads or writes in the current thread can be reordered before this load” 270 * is implemented using a compiler barrier (a no-op `__asm__` section) to 271 * prevent instruction reordering. Writes in other threads are synchronised 272 * using `__sync_synchronize()`. It’s unclear from the GCC documentation whether 273 * `__sync_synchronize()` acts as a compiler barrier, hence our explicit use of 274 * one. 275 * 276 * For store operations, `__ATOMIC_SEQ_CST` means performing a *release*: 277 * > A store operation with this memory order performs the release operation: 278 * > no reads or writes in the current thread can be reordered after this store. 279 * > All writes in the current thread are visible in other threads that acquire 280 * > the same atomic variable (see Release-Acquire ordering below) and writes 281 * > that carry a dependency into the atomic variable become visible in other 282 * > threads that consume the same atomic (see Release-Consume ordering below). 283 * 284 * “no reads or writes in the current thread can be reordered after this store” 285 * is implemented using a compiler barrier to prevent instruction reordering. 286 * “All writes in the current thread are visible in other threads” is implemented 287 * using `__sync_synchronize()`; similarly for “writes that carry a dependency”. 288 */ 289 #define g_atomic_int_get(atomic) \ 290 (G_GNUC_EXTENSION ({ \ 291 gint gaig_result; \ 292 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 293 (void) (0 ? *(atomic) ^ *(atomic) : 1); \ 294 gaig_result = (gint) *(atomic); \ 295 __sync_synchronize (); \ 296 __asm__ __volatile__ ("" : : : "memory"); \ 297 gaig_result; \ 298 })) 299 #define g_atomic_int_set(atomic, newval) \ 300 (G_GNUC_EXTENSION ({ \ 301 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 302 (void) (0 ? *(atomic) ^ (newval) : 1); \ 303 __sync_synchronize (); \ 304 __asm__ __volatile__ ("" : : : "memory"); \ 305 *(atomic) = (newval); \ 306 })) 307 #define g_atomic_pointer_get(atomic) \ 308 (G_GNUC_EXTENSION ({ \ 309 gpointer gapg_result; \ 310 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 311 gapg_result = (gpointer) *(atomic); \ 312 __sync_synchronize (); \ 313 __asm__ __volatile__ ("" : : : "memory"); \ 314 gapg_result; \ 315 })) 316 #if defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68) 317 #define g_atomic_pointer_set(atomic, newval) \ 318 (G_GNUC_EXTENSION ({ \ 319 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 320 (void) (0 ? (gpointer) *(atomic) : NULL); \ 321 __sync_synchronize (); \ 322 __asm__ __volatile__ ("" : : : "memory"); \ 323 *(atomic) = (glib_typeof (*(atomic))) (gsize) (newval); \ 324 })) 325 #else /* if !(defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68)) */ 326 #define g_atomic_pointer_set(atomic, newval) \ 327 (G_GNUC_EXTENSION ({ \ 328 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 329 (void) (0 ? (gpointer) *(atomic) : NULL); \ 330 __sync_synchronize (); \ 331 __asm__ __volatile__ ("" : : : "memory"); \ 332 *(atomic) = (gpointer) (gsize) (newval); \ 333 })) 334 #endif /* if defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68) */ 335 336 #define g_atomic_int_inc(atomic) \ 337 (G_GNUC_EXTENSION ({ \ 338 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 339 (void) (0 ? *(atomic) ^ *(atomic) : 1); \ 340 (void) __sync_fetch_and_add ((atomic), 1); \ 341 })) 342 #define g_atomic_int_dec_and_test(atomic) \ 343 (G_GNUC_EXTENSION ({ \ 344 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 345 (void) (0 ? *(atomic) ^ *(atomic) : 1); \ 346 __sync_fetch_and_sub ((atomic), 1) == 1; \ 347 })) 348 #define g_atomic_int_compare_and_exchange(atomic, oldval, newval) \ 349 (G_GNUC_EXTENSION ({ \ 350 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 351 (void) (0 ? *(atomic) ^ (newval) ^ (oldval) : 1); \ 352 __sync_bool_compare_and_swap ((atomic), (oldval), (newval)) ? TRUE : FALSE; \ 353 })) 354 #define g_atomic_int_add(atomic, val) \ 355 (G_GNUC_EXTENSION ({ \ 356 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 357 (void) (0 ? *(atomic) ^ (val) : 1); \ 358 (gint) __sync_fetch_and_add ((atomic), (val)); \ 359 })) 360 #define g_atomic_int_and(atomic, val) \ 361 (G_GNUC_EXTENSION ({ \ 362 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 363 (void) (0 ? *(atomic) ^ (val) : 1); \ 364 (guint) __sync_fetch_and_and ((atomic), (val)); \ 365 })) 366 #define g_atomic_int_or(atomic, val) \ 367 (G_GNUC_EXTENSION ({ \ 368 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 369 (void) (0 ? *(atomic) ^ (val) : 1); \ 370 (guint) __sync_fetch_and_or ((atomic), (val)); \ 371 })) 372 #define g_atomic_int_xor(atomic, val) \ 373 (G_GNUC_EXTENSION ({ \ 374 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gint)); \ 375 (void) (0 ? *(atomic) ^ (val) : 1); \ 376 (guint) __sync_fetch_and_xor ((atomic), (val)); \ 377 })) 378 379 #define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \ 380 (G_GNUC_EXTENSION ({ \ 381 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 382 (void) (0 ? (gpointer) *(atomic) : NULL); \ 383 __sync_bool_compare_and_swap ((atomic), (oldval), (newval)) ? TRUE : FALSE; \ 384 })) 385 #define g_atomic_pointer_add(atomic, val) \ 386 (G_GNUC_EXTENSION ({ \ 387 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 388 (void) (0 ? (gpointer) *(atomic) : NULL); \ 389 (void) (0 ? (val) ^ (val) : 1); \ 390 (gssize) __sync_fetch_and_add ((atomic), (val)); \ 391 })) 392 #define g_atomic_pointer_and(atomic, val) \ 393 (G_GNUC_EXTENSION ({ \ 394 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 395 (void) (0 ? (gpointer) *(atomic) : NULL); \ 396 (void) (0 ? (val) ^ (val) : 1); \ 397 (gsize) __sync_fetch_and_and ((atomic), (val)); \ 398 })) 399 #define g_atomic_pointer_or(atomic, val) \ 400 (G_GNUC_EXTENSION ({ \ 401 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 402 (void) (0 ? (gpointer) *(atomic) : NULL); \ 403 (void) (0 ? (val) ^ (val) : 1); \ 404 (gsize) __sync_fetch_and_or ((atomic), (val)); \ 405 })) 406 #define g_atomic_pointer_xor(atomic, val) \ 407 (G_GNUC_EXTENSION ({ \ 408 G_STATIC_ASSERT (sizeof *(atomic) == sizeof (gpointer)); \ 409 (void) (0 ? (gpointer) *(atomic) : NULL); \ 410 (void) (0 ? (val) ^ (val) : 1); \ 411 (gsize) __sync_fetch_and_xor ((atomic), (val)); \ 412 })) 413 414 #endif /* !defined(__ATOMIC_SEQ_CST) */ 415 416 #else /* defined(G_ATOMIC_LOCK_FREE) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */ 417 418 #define g_atomic_int_get(atomic) \ 419 (g_atomic_int_get ((gint *) (atomic))) 420 #define g_atomic_int_set(atomic, newval) \ 421 (g_atomic_int_set ((gint *) (atomic), (gint) (newval))) 422 #define g_atomic_int_compare_and_exchange(atomic, oldval, newval) \ 423 (g_atomic_int_compare_and_exchange ((gint *) (atomic), (oldval), (newval))) 424 #define g_atomic_int_add(atomic, val) \ 425 (g_atomic_int_add ((gint *) (atomic), (val))) 426 #define g_atomic_int_and(atomic, val) \ 427 (g_atomic_int_and ((guint *) (atomic), (val))) 428 #define g_atomic_int_or(atomic, val) \ 429 (g_atomic_int_or ((guint *) (atomic), (val))) 430 #define g_atomic_int_xor(atomic, val) \ 431 (g_atomic_int_xor ((guint *) (atomic), (val))) 432 #define g_atomic_int_inc(atomic) \ 433 (g_atomic_int_inc ((gint *) (atomic))) 434 #define g_atomic_int_dec_and_test(atomic) \ 435 (g_atomic_int_dec_and_test ((gint *) (atomic))) 436 437 #if defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68) 438 /* The (void *) cast in the middle *looks* redundant, because 439 * g_atomic_pointer_get returns void * already, but it's to silence 440 * -Werror=bad-function-cast when we're doing something like: 441 * guintptr a, b; ...; a = g_atomic_pointer_get (&b); 442 * which would otherwise be assigning the void * result of 443 * g_atomic_pointer_get directly to the pointer-sized but 444 * non-pointer-typed result. */ 445 #define g_atomic_pointer_get(atomic) \ 446 (glib_typeof (*(atomic))) (void *) ((g_atomic_pointer_get) ((void *) atomic)) 447 #else /* !(defined(glib_typeof) && (!defined(glib_typeof_2_68) || GLIB_VERSION_MIN_REQUIRED >= GLIB_VERSION_2_68)) */ 448 #define g_atomic_pointer_get(atomic) \ 449 (g_atomic_pointer_get (atomic)) 450 #endif 451 452 #define g_atomic_pointer_set(atomic, newval) \ 453 (g_atomic_pointer_set ((atomic), (gpointer) (newval))) 454 455 #define g_atomic_pointer_compare_and_exchange(atomic, oldval, newval) \ 456 (g_atomic_pointer_compare_and_exchange ((atomic), (gpointer) (oldval), (gpointer) (newval))) 457 #define g_atomic_pointer_add(atomic, val) \ 458 (g_atomic_pointer_add ((atomic), (gssize) (val))) 459 #define g_atomic_pointer_and(atomic, val) \ 460 (g_atomic_pointer_and ((atomic), (gsize) (val))) 461 #define g_atomic_pointer_or(atomic, val) \ 462 (g_atomic_pointer_or ((atomic), (gsize) (val))) 463 #define g_atomic_pointer_xor(atomic, val) \ 464 (g_atomic_pointer_xor ((atomic), (gsize) (val))) 465 466 #endif /* defined(G_ATOMIC_LOCK_FREE) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */ 467 468 #endif /* __G_ATOMIC_H__ */ 469