1 /* 2 * kmp_os.h -- KPTS runtime header file. 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef KMP_OS_H 14 #define KMP_OS_H 15 16 #include "kmp_config.h" 17 #include <atomic> 18 #include <stdarg.h> 19 #include <stdlib.h> 20 21 #define KMP_FTN_PLAIN 1 22 #define KMP_FTN_APPEND 2 23 #define KMP_FTN_UPPER 3 24 /* 25 #define KMP_FTN_PREPEND 4 26 #define KMP_FTN_UAPPEND 5 27 */ 28 29 #define KMP_PTR_SKIP (sizeof(void *)) 30 31 /* -------------------------- Compiler variations ------------------------ */ 32 33 #define KMP_OFF 0 34 #define KMP_ON 1 35 36 #define KMP_MEM_CONS_VOLATILE 0 37 #define KMP_MEM_CONS_FENCE 1 38 39 #ifndef KMP_MEM_CONS_MODEL 40 #define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE 41 #endif 42 43 #ifndef __has_cpp_attribute 44 #define __has_cpp_attribute(x) 0 45 #endif 46 47 #ifndef __has_attribute 48 #define __has_attribute(x) 0 49 #endif 50 51 /* ------------------------- Compiler recognition ---------------------- */ 52 #define KMP_COMPILER_ICC 0 53 #define KMP_COMPILER_GCC 0 54 #define KMP_COMPILER_CLANG 0 55 #define KMP_COMPILER_MSVC 0 56 57 #if defined(__INTEL_COMPILER) 58 #undef KMP_COMPILER_ICC 59 #define KMP_COMPILER_ICC 1 60 #elif defined(__clang__) 61 #undef KMP_COMPILER_CLANG 62 #define KMP_COMPILER_CLANG 1 63 #elif defined(__GNUC__) 64 #undef KMP_COMPILER_GCC 65 #define KMP_COMPILER_GCC 1 66 #elif defined(_MSC_VER) 67 #undef KMP_COMPILER_MSVC 68 #define KMP_COMPILER_MSVC 1 69 #else 70 #error Unknown compiler 71 #endif 72 73 #if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD) 74 #define KMP_AFFINITY_SUPPORTED 1 75 #if KMP_OS_WINDOWS && KMP_ARCH_X86_64 76 #define KMP_GROUP_AFFINITY 1 77 #else 78 #define KMP_GROUP_AFFINITY 0 79 #endif 80 #else 81 #define KMP_AFFINITY_SUPPORTED 0 82 #define KMP_GROUP_AFFINITY 0 83 #endif 84 85 /* Check for quad-precision extension. */ 86 #define KMP_HAVE_QUAD 0 87 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 88 #if KMP_COMPILER_ICC 89 /* _Quad is already defined for icc */ 90 #undef KMP_HAVE_QUAD 91 #define KMP_HAVE_QUAD 1 92 #elif KMP_COMPILER_CLANG 93 /* Clang doesn't support a software-implemented 94 128-bit extended precision type yet */ 95 typedef long double _Quad; 96 #elif KMP_COMPILER_GCC 97 /* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad */ 98 #if !KMP_OS_NETBSD 99 typedef __float128 _Quad; 100 #undef KMP_HAVE_QUAD 101 #define KMP_HAVE_QUAD 1 102 #endif 103 #elif KMP_COMPILER_MSVC 104 typedef long double _Quad; 105 #endif 106 #else 107 #if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC 108 typedef long double _Quad; 109 #undef KMP_HAVE_QUAD 110 #define KMP_HAVE_QUAD 1 111 #endif 112 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ 113 114 #define KMP_USE_X87CONTROL 0 115 #if KMP_OS_WINDOWS 116 #define KMP_END_OF_LINE "\r\n" 117 typedef char kmp_int8; 118 typedef unsigned char kmp_uint8; 119 typedef short kmp_int16; 120 typedef unsigned short kmp_uint16; 121 typedef int kmp_int32; 122 typedef unsigned int kmp_uint32; 123 #define KMP_INT32_SPEC "d" 124 #define KMP_UINT32_SPEC "u" 125 #ifndef KMP_STRUCT64 126 typedef __int64 kmp_int64; 127 typedef unsigned __int64 kmp_uint64; 128 #define KMP_INT64_SPEC "I64d" 129 #define KMP_UINT64_SPEC "I64u" 130 #else 131 struct kmp_struct64 { 132 kmp_int32 a, b; 133 }; 134 typedef struct kmp_struct64 kmp_int64; 135 typedef struct kmp_struct64 kmp_uint64; 136 /* Not sure what to use for KMP_[U]INT64_SPEC here */ 137 #endif 138 #if KMP_ARCH_X86 && KMP_MSVC_COMPAT 139 #undef KMP_USE_X87CONTROL 140 #define KMP_USE_X87CONTROL 1 141 #endif 142 #if KMP_ARCH_X86_64 143 #define KMP_INTPTR 1 144 typedef __int64 kmp_intptr_t; 145 typedef unsigned __int64 kmp_uintptr_t; 146 #define KMP_INTPTR_SPEC "I64d" 147 #define KMP_UINTPTR_SPEC "I64u" 148 #endif 149 #endif /* KMP_OS_WINDOWS */ 150 151 #if KMP_OS_UNIX 152 #define KMP_END_OF_LINE "\n" 153 typedef char kmp_int8; 154 typedef unsigned char kmp_uint8; 155 typedef short kmp_int16; 156 typedef unsigned short kmp_uint16; 157 typedef int kmp_int32; 158 typedef unsigned int kmp_uint32; 159 typedef long long kmp_int64; 160 typedef unsigned long long kmp_uint64; 161 #define KMP_INT32_SPEC "d" 162 #define KMP_UINT32_SPEC "u" 163 #define KMP_INT64_SPEC "lld" 164 #define KMP_UINT64_SPEC "llu" 165 #endif /* KMP_OS_UNIX */ 166 167 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS 168 #define KMP_SIZE_T_SPEC KMP_UINT32_SPEC 169 #elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \ 170 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 171 #define KMP_SIZE_T_SPEC KMP_UINT64_SPEC 172 #else 173 #error "Can't determine size_t printf format specifier." 174 #endif 175 176 #if KMP_ARCH_X86 177 #define KMP_SIZE_T_MAX (0xFFFFFFFF) 178 #else 179 #define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF) 180 #endif 181 182 typedef size_t kmp_size_t; 183 typedef float kmp_real32; 184 typedef double kmp_real64; 185 186 #ifndef KMP_INTPTR 187 #define KMP_INTPTR 1 188 typedef long kmp_intptr_t; 189 typedef unsigned long kmp_uintptr_t; 190 #define KMP_INTPTR_SPEC "ld" 191 #define KMP_UINTPTR_SPEC "lu" 192 #endif 193 194 #ifdef BUILD_I8 195 typedef kmp_int64 kmp_int; 196 typedef kmp_uint64 kmp_uint; 197 #else 198 typedef kmp_int32 kmp_int; 199 typedef kmp_uint32 kmp_uint; 200 #endif /* BUILD_I8 */ 201 #define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF) 202 #define KMP_INT_MIN ((kmp_int32)0x80000000) 203 204 // stdarg handling 205 #if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && \ 206 (KMP_OS_FREEBSD || KMP_OS_LINUX) 207 typedef va_list *kmp_va_list; 208 #define kmp_va_deref(ap) (*(ap)) 209 #define kmp_va_addr_of(ap) (&(ap)) 210 #else 211 typedef va_list kmp_va_list; 212 #define kmp_va_deref(ap) (ap) 213 #define kmp_va_addr_of(ap) (ap) 214 #endif 215 216 #ifdef __cplusplus 217 // macros to cast out qualifiers and to re-interpret types 218 #define CCAST(type, var) const_cast<type>(var) 219 #define RCAST(type, var) reinterpret_cast<type>(var) 220 //------------------------------------------------------------------------- 221 // template for debug prints specification ( d, u, lld, llu ), and to obtain 222 // signed/unsigned flavors of a type 223 template <typename T> struct traits_t {}; 224 // int 225 template <> struct traits_t<signed int> { 226 typedef signed int signed_t; 227 typedef unsigned int unsigned_t; 228 typedef double floating_t; 229 static char const *spec; 230 static const signed_t max_value = 0x7fffffff; 231 static const signed_t min_value = 0x80000000; 232 static const int type_size = sizeof(signed_t); 233 }; 234 // unsigned int 235 template <> struct traits_t<unsigned int> { 236 typedef signed int signed_t; 237 typedef unsigned int unsigned_t; 238 typedef double floating_t; 239 static char const *spec; 240 static const unsigned_t max_value = 0xffffffff; 241 static const unsigned_t min_value = 0x00000000; 242 static const int type_size = sizeof(unsigned_t); 243 }; 244 // long 245 template <> struct traits_t<signed long> { 246 typedef signed long signed_t; 247 typedef unsigned long unsigned_t; 248 typedef long double floating_t; 249 static char const *spec; 250 static const int type_size = sizeof(signed_t); 251 }; 252 // long long 253 template <> struct traits_t<signed long long> { 254 typedef signed long long signed_t; 255 typedef unsigned long long unsigned_t; 256 typedef long double floating_t; 257 static char const *spec; 258 static const signed_t max_value = 0x7fffffffffffffffLL; 259 static const signed_t min_value = 0x8000000000000000LL; 260 static const int type_size = sizeof(signed_t); 261 }; 262 // unsigned long long 263 template <> struct traits_t<unsigned long long> { 264 typedef signed long long signed_t; 265 typedef unsigned long long unsigned_t; 266 typedef long double floating_t; 267 static char const *spec; 268 static const unsigned_t max_value = 0xffffffffffffffffLL; 269 static const unsigned_t min_value = 0x0000000000000000LL; 270 static const int type_size = sizeof(unsigned_t); 271 }; 272 //------------------------------------------------------------------------- 273 #else 274 #define CCAST(type, var) (type)(var) 275 #define RCAST(type, var) (type)(var) 276 #endif // __cplusplus 277 278 #define KMP_EXPORT extern /* export declaration in guide libraries */ 279 280 #if __GNUC__ >= 4 && !defined(__MINGW32__) 281 #define __forceinline __inline 282 #endif 283 284 /* Check if the OS/arch can support user-level mwait */ 285 // All mwait code tests for UMWAIT first, so it should only fall back to ring3 286 // MWAIT for KNL. 287 #define KMP_HAVE_MWAIT \ 288 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \ 289 !KMP_MIC2) 290 #define KMP_HAVE_UMWAIT \ 291 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \ 292 !KMP_MIC) 293 294 #if KMP_OS_WINDOWS 295 #include <windows.h> 296 297 static inline int KMP_GET_PAGE_SIZE(void) { 298 SYSTEM_INFO si; 299 GetSystemInfo(&si); 300 return si.dwPageSize; 301 } 302 #else 303 #define KMP_GET_PAGE_SIZE() getpagesize() 304 #endif 305 306 #define PAGE_ALIGNED(_addr) \ 307 (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1))) 308 #define ALIGN_TO_PAGE(x) \ 309 (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1))) 310 311 /* ---------- Support for cache alignment, padding, etc. ----------------*/ 312 313 #ifdef __cplusplus 314 extern "C" { 315 #endif // __cplusplus 316 317 #define INTERNODE_CACHE_LINE 4096 /* for multi-node systems */ 318 319 /* Define the default size of the cache line */ 320 #ifndef CACHE_LINE 321 #define CACHE_LINE 128 /* cache line size in bytes */ 322 #else 323 #if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN) 324 // 2006-02-13: This produces too many warnings on OS X*. Disable for now 325 #warning CACHE_LINE is too small. 326 #endif 327 #endif /* CACHE_LINE */ 328 329 #define KMP_CACHE_PREFETCH(ADDR) /* nothing */ 330 331 // Define attribute that indicates that the fall through from the previous 332 // case label is intentional and should not be diagnosed by a compiler 333 // Code from libcxx/include/__config 334 // Use a function like macro to imply that it must be followed by a semicolon 335 #if __cplusplus > 201402L && __has_cpp_attribute(fallthrough) 336 # define KMP_FALLTHROUGH() [[fallthrough]] 337 #elif __has_cpp_attribute(clang::fallthrough) 338 # define KMP_FALLTHROUGH() [[clang::fallthrough]] 339 #elif __has_attribute(fallthrough) || __GNUC__ >= 7 340 # define KMP_FALLTHROUGH() __attribute__((__fallthrough__)) 341 #else 342 # define KMP_FALLTHROUGH() ((void)0) 343 #endif 344 345 #if KMP_HAVE_ATTRIBUTE_WAITPKG 346 #define KMP_ATTRIBUTE_TARGET_WAITPKG __attribute__((target("waitpkg"))) 347 #else 348 #define KMP_ATTRIBUTE_TARGET_WAITPKG /* Nothing */ 349 #endif 350 351 #if KMP_HAVE_ATTRIBUTE_RTM 352 #define KMP_ATTRIBUTE_TARGET_RTM __attribute__((target("rtm"))) 353 #else 354 #define KMP_ATTRIBUTE_TARGET_RTM /* Nothing */ 355 #endif 356 357 // Define attribute that indicates a function does not return 358 #if __cplusplus >= 201103L 359 #define KMP_NORETURN [[noreturn]] 360 #elif KMP_OS_WINDOWS 361 #define KMP_NORETURN __declspec(noreturn) 362 #else 363 #define KMP_NORETURN __attribute__((noreturn)) 364 #endif 365 366 #if KMP_OS_WINDOWS && KMP_MSVC_COMPAT 367 #define KMP_ALIGN(bytes) __declspec(align(bytes)) 368 #define KMP_THREAD_LOCAL __declspec(thread) 369 #define KMP_ALIAS /* Nothing */ 370 #else 371 #define KMP_ALIGN(bytes) __attribute__((aligned(bytes))) 372 #define KMP_THREAD_LOCAL __thread 373 #define KMP_ALIAS(alias_of) __attribute__((alias(alias_of))) 374 #endif 375 376 #if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB 377 #define KMP_WEAK_ATTRIBUTE_EXTERNAL __attribute__((weak)) 378 #else 379 #define KMP_WEAK_ATTRIBUTE_EXTERNAL /* Nothing */ 380 #endif 381 382 #if KMP_HAVE_WEAK_ATTRIBUTE 383 #define KMP_WEAK_ATTRIBUTE_INTERNAL __attribute__((weak)) 384 #else 385 #define KMP_WEAK_ATTRIBUTE_INTERNAL /* Nothing */ 386 #endif 387 388 // Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME 389 #ifndef KMP_STR 390 #define KMP_STR(x) _KMP_STR(x) 391 #define _KMP_STR(x) #x 392 #endif 393 394 #ifdef KMP_USE_VERSION_SYMBOLS 395 // If using versioned symbols, KMP_EXPAND_NAME prepends 396 // __kmp_api_ to the real API name 397 #define KMP_EXPAND_NAME(api_name) _KMP_EXPAND_NAME(api_name) 398 #define _KMP_EXPAND_NAME(api_name) __kmp_api_##api_name 399 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) \ 400 _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, "VERSION") 401 #define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver) \ 402 __typeof__(__kmp_api_##api_name) __kmp_api_##api_name##_##ver_num##_alias \ 403 __attribute__((alias(KMP_STR(__kmp_api_##api_name)))); \ 404 __asm__( \ 405 ".symver " KMP_STR(__kmp_api_##api_name##_##ver_num##_alias) "," KMP_STR( \ 406 api_name) "@" ver_str "\n\t"); \ 407 __asm__(".symver " KMP_STR(__kmp_api_##api_name) "," KMP_STR( \ 408 api_name) "@@" default_ver "\n\t") 409 #else // KMP_USE_VERSION_SYMBOLS 410 #define KMP_EXPAND_NAME(api_name) api_name 411 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) /* Nothing */ 412 #endif // KMP_USE_VERSION_SYMBOLS 413 414 /* Temporary note: if performance testing of this passes, we can remove 415 all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */ 416 #define KMP_DO_ALIGN(bytes) KMP_ALIGN(bytes) 417 #define KMP_ALIGN_CACHE KMP_ALIGN(CACHE_LINE) 418 #define KMP_ALIGN_CACHE_INTERNODE KMP_ALIGN(INTERNODE_CACHE_LINE) 419 420 /* General purpose fence types for memory operations */ 421 enum kmp_mem_fence_type { 422 kmp_no_fence, /* No memory fence */ 423 kmp_acquire_fence, /* Acquire (read) memory fence */ 424 kmp_release_fence, /* Release (write) memory fence */ 425 kmp_full_fence /* Full (read+write) memory fence */ 426 }; 427 428 // Synchronization primitives 429 430 #if KMP_ASM_INTRINS && KMP_OS_WINDOWS 431 432 #if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG 433 #pragma intrinsic(InterlockedExchangeAdd) 434 #pragma intrinsic(InterlockedCompareExchange) 435 #pragma intrinsic(InterlockedExchange) 436 #pragma intrinsic(InterlockedExchange64) 437 #endif 438 439 // Using InterlockedIncrement / InterlockedDecrement causes a library loading 440 // ordering problem, so we use InterlockedExchangeAdd instead. 441 #define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1) 442 #define KMP_TEST_THEN_INC_ACQ32(p) \ 443 InterlockedExchangeAdd((volatile long *)(p), 1) 444 #define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4) 445 #define KMP_TEST_THEN_ADD4_ACQ32(p) \ 446 InterlockedExchangeAdd((volatile long *)(p), 4) 447 #define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1) 448 #define KMP_TEST_THEN_DEC_ACQ32(p) \ 449 InterlockedExchangeAdd((volatile long *)(p), -1) 450 #define KMP_TEST_THEN_ADD32(p, v) \ 451 InterlockedExchangeAdd((volatile long *)(p), (v)) 452 453 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \ 454 InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv)) 455 456 #define KMP_XCHG_FIXED32(p, v) \ 457 InterlockedExchange((volatile long *)(p), (long)(v)) 458 #define KMP_XCHG_FIXED64(p, v) \ 459 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v)) 460 461 inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) { 462 kmp_int32 tmp = InterlockedExchange((volatile long *)p, *(long *)&v); 463 return *(kmp_real32 *)&tmp; 464 } 465 466 // Routines that we still need to implement in assembly. 467 extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v); 468 extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v); 469 extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v); 470 extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v); 471 extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v); 472 extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v); 473 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v); 474 extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v); 475 extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v); 476 477 extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv, 478 kmp_int8 sv); 479 extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv, 480 kmp_int16 sv); 481 extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv, 482 kmp_int32 sv); 483 extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv, 484 kmp_int64 sv); 485 extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv, 486 kmp_int8 sv); 487 extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p, 488 kmp_int16 cv, kmp_int16 sv); 489 extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p, 490 kmp_int32 cv, kmp_int32 sv); 491 extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p, 492 kmp_int64 cv, kmp_int64 sv); 493 494 extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v); 495 extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v); 496 extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v); 497 extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v); 498 extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v); 499 extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v); 500 501 //#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1) 502 //#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1) 503 #define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL) 504 #define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL) 505 //#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4) 506 //#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4) 507 #define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL) 508 #define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL) 509 //#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1) 510 //#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1) 511 #define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL) 512 #define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL) 513 //#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v)) 514 #define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v)) 515 #define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v)) 516 517 #define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v)) 518 #define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v)) 519 #define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v)) 520 #define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v)) 521 #define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v)) 522 #define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v)) 523 524 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \ 525 __kmp_compare_and_store8((p), (cv), (sv)) 526 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \ 527 __kmp_compare_and_store8((p), (cv), (sv)) 528 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \ 529 __kmp_compare_and_store16((p), (cv), (sv)) 530 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \ 531 __kmp_compare_and_store16((p), (cv), (sv)) 532 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \ 533 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 534 (kmp_int32)(sv)) 535 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \ 536 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 537 (kmp_int32)(sv)) 538 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 539 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 540 (kmp_int64)(sv)) 541 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 542 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 543 (kmp_int64)(sv)) 544 545 #if KMP_ARCH_X86 546 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 547 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 548 (kmp_int32)(sv)) 549 #else /* 64 bit pointers */ 550 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 551 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 552 (kmp_int64)(sv)) 553 #endif /* KMP_ARCH_X86 */ 554 555 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ 556 __kmp_compare_and_store_ret8((p), (cv), (sv)) 557 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ 558 __kmp_compare_and_store_ret16((p), (cv), (sv)) 559 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 560 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 561 (kmp_int64)(sv)) 562 563 #define KMP_XCHG_FIXED8(p, v) \ 564 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v)); 565 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v)); 566 //#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v)); 567 //#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v)); 568 //#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v)); 569 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v)); 570 571 #elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64) 572 573 /* cast p to correct type so that proper intrinsic will be used */ 574 #define KMP_TEST_THEN_INC32(p) \ 575 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1) 576 #define KMP_TEST_THEN_INC_ACQ32(p) \ 577 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1) 578 #if KMP_ARCH_MIPS 579 #define KMP_TEST_THEN_INC64(p) \ 580 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 581 #define KMP_TEST_THEN_INC_ACQ64(p) \ 582 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 583 #else 584 #define KMP_TEST_THEN_INC64(p) \ 585 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL) 586 #define KMP_TEST_THEN_INC_ACQ64(p) \ 587 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL) 588 #endif 589 #define KMP_TEST_THEN_ADD4_32(p) \ 590 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4) 591 #define KMP_TEST_THEN_ADD4_ACQ32(p) \ 592 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4) 593 #if KMP_ARCH_MIPS 594 #define KMP_TEST_THEN_ADD4_64(p) \ 595 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST) 596 #define KMP_TEST_THEN_ADD4_ACQ64(p) \ 597 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST) 598 #define KMP_TEST_THEN_DEC64(p) \ 599 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 600 #define KMP_TEST_THEN_DEC_ACQ64(p) \ 601 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 602 #else 603 #define KMP_TEST_THEN_ADD4_64(p) \ 604 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL) 605 #define KMP_TEST_THEN_ADD4_ACQ64(p) \ 606 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL) 607 #define KMP_TEST_THEN_DEC64(p) \ 608 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL) 609 #define KMP_TEST_THEN_DEC_ACQ64(p) \ 610 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL) 611 #endif 612 #define KMP_TEST_THEN_DEC32(p) \ 613 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1) 614 #define KMP_TEST_THEN_DEC_ACQ32(p) \ 615 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1) 616 #define KMP_TEST_THEN_ADD8(p, v) \ 617 __sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v)) 618 #define KMP_TEST_THEN_ADD32(p, v) \ 619 __sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v)) 620 #if KMP_ARCH_MIPS 621 #define KMP_TEST_THEN_ADD64(p, v) \ 622 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 623 __ATOMIC_SEQ_CST) 624 #else 625 #define KMP_TEST_THEN_ADD64(p, v) \ 626 __sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v)) 627 #endif 628 629 #define KMP_TEST_THEN_OR8(p, v) \ 630 __sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v)) 631 #define KMP_TEST_THEN_AND8(p, v) \ 632 __sync_fetch_and_and((volatile kmp_int8 *)(p), (kmp_int8)(v)) 633 #define KMP_TEST_THEN_OR32(p, v) \ 634 __sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 635 #define KMP_TEST_THEN_AND32(p, v) \ 636 __sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 637 #if KMP_ARCH_MIPS 638 #define KMP_TEST_THEN_OR64(p, v) \ 639 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 640 __ATOMIC_SEQ_CST) 641 #define KMP_TEST_THEN_AND64(p, v) \ 642 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 643 __ATOMIC_SEQ_CST) 644 #else 645 #define KMP_TEST_THEN_OR64(p, v) \ 646 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 647 #define KMP_TEST_THEN_AND64(p, v) \ 648 __sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 649 #endif 650 651 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \ 652 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \ 653 (kmp_uint8)(sv)) 654 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \ 655 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \ 656 (kmp_uint8)(sv)) 657 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \ 658 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \ 659 (kmp_uint16)(sv)) 660 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \ 661 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \ 662 (kmp_uint16)(sv)) 663 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \ 664 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \ 665 (kmp_uint32)(sv)) 666 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \ 667 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \ 668 (kmp_uint32)(sv)) 669 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 670 __sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \ 671 (void *)(sv)) 672 673 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ 674 __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \ 675 (kmp_uint8)(sv)) 676 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ 677 __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \ 678 (kmp_uint16)(sv)) 679 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \ 680 __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \ 681 (kmp_uint32)(sv)) 682 #if KMP_ARCH_MIPS 683 static inline bool mips_sync_bool_compare_and_swap( 684 volatile kmp_uint64 *p, kmp_uint64 cv, kmp_uint64 sv) { 685 return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST, 686 __ATOMIC_SEQ_CST); 687 } 688 static inline bool mips_sync_val_compare_and_swap( 689 volatile kmp_uint64 *p, kmp_uint64 cv, kmp_uint64 sv) { 690 __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST, 691 __ATOMIC_SEQ_CST); 692 return cv; 693 } 694 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 695 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv),\ 696 (kmp_uint64)(sv)) 697 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 698 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv),\ 699 (kmp_uint64)(sv)) 700 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 701 mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 702 (kmp_uint64)(sv)) 703 #else 704 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 705 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 706 (kmp_uint64)(sv)) 707 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 708 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 709 (kmp_uint64)(sv)) 710 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 711 __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 712 (kmp_uint64)(sv)) 713 #endif 714 715 #define KMP_XCHG_FIXED8(p, v) \ 716 __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v)) 717 #define KMP_XCHG_FIXED16(p, v) \ 718 __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v)) 719 #define KMP_XCHG_FIXED32(p, v) \ 720 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 721 #define KMP_XCHG_FIXED64(p, v) \ 722 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 723 724 inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) { 725 kmp_int32 tmp = 726 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), *(kmp_uint32 *)&v); 727 return *(kmp_real32 *)&tmp; 728 } 729 730 inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) { 731 kmp_int64 tmp = 732 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), *(kmp_uint64 *)&v); 733 return *(kmp_real64 *)&tmp; 734 } 735 736 #else 737 738 extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v); 739 extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v); 740 extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v); 741 extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v); 742 extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v); 743 extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v); 744 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v); 745 extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v); 746 extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v); 747 748 extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv, 749 kmp_int8 sv); 750 extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv, 751 kmp_int16 sv); 752 extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv, 753 kmp_int32 sv); 754 extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv, 755 kmp_int64 sv); 756 extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv, 757 kmp_int8 sv); 758 extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p, 759 kmp_int16 cv, kmp_int16 sv); 760 extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p, 761 kmp_int32 cv, kmp_int32 sv); 762 extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p, 763 kmp_int64 cv, kmp_int64 sv); 764 765 extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v); 766 extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v); 767 extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v); 768 extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v); 769 extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v); 770 extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v); 771 772 #define KMP_TEST_THEN_INC32(p) \ 773 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1) 774 #define KMP_TEST_THEN_INC_ACQ32(p) \ 775 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1) 776 #define KMP_TEST_THEN_INC64(p) \ 777 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL) 778 #define KMP_TEST_THEN_INC_ACQ64(p) \ 779 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL) 780 #define KMP_TEST_THEN_ADD4_32(p) \ 781 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4) 782 #define KMP_TEST_THEN_ADD4_ACQ32(p) \ 783 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4) 784 #define KMP_TEST_THEN_ADD4_64(p) \ 785 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL) 786 #define KMP_TEST_THEN_ADD4_ACQ64(p) \ 787 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL) 788 #define KMP_TEST_THEN_DEC32(p) \ 789 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1) 790 #define KMP_TEST_THEN_DEC_ACQ32(p) \ 791 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1) 792 #define KMP_TEST_THEN_DEC64(p) \ 793 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL) 794 #define KMP_TEST_THEN_DEC_ACQ64(p) \ 795 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL) 796 #define KMP_TEST_THEN_ADD8(p, v) \ 797 __kmp_test_then_add8((volatile kmp_int8 *)(p), (kmp_int8)(v)) 798 #define KMP_TEST_THEN_ADD32(p, v) \ 799 __kmp_test_then_add32((volatile kmp_int32 *)(p), (kmp_int32)(v)) 800 #define KMP_TEST_THEN_ADD64(p, v) \ 801 __kmp_test_then_add64((volatile kmp_int64 *)(p), (kmp_int64)(v)) 802 803 #define KMP_TEST_THEN_OR8(p, v) \ 804 __kmp_test_then_or8((volatile kmp_int8 *)(p), (kmp_int8)(v)) 805 #define KMP_TEST_THEN_AND8(p, v) \ 806 __kmp_test_then_and8((volatile kmp_int8 *)(p), (kmp_int8)(v)) 807 #define KMP_TEST_THEN_OR32(p, v) \ 808 __kmp_test_then_or32((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 809 #define KMP_TEST_THEN_AND32(p, v) \ 810 __kmp_test_then_and32((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 811 #define KMP_TEST_THEN_OR64(p, v) \ 812 __kmp_test_then_or64((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 813 #define KMP_TEST_THEN_AND64(p, v) \ 814 __kmp_test_then_and64((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 815 816 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \ 817 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \ 818 (kmp_int8)(sv)) 819 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \ 820 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \ 821 (kmp_int8)(sv)) 822 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \ 823 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \ 824 (kmp_int16)(sv)) 825 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \ 826 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \ 827 (kmp_int16)(sv)) 828 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \ 829 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 830 (kmp_int32)(sv)) 831 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \ 832 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 833 (kmp_int32)(sv)) 834 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 835 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 836 (kmp_int64)(sv)) 837 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 838 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 839 (kmp_int64)(sv)) 840 841 #if KMP_ARCH_X86 842 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 843 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 844 (kmp_int32)(sv)) 845 #else /* 64 bit pointers */ 846 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 847 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 848 (kmp_int64)(sv)) 849 #endif /* KMP_ARCH_X86 */ 850 851 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ 852 __kmp_compare_and_store_ret8((p), (cv), (sv)) 853 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ 854 __kmp_compare_and_store_ret16((p), (cv), (sv)) 855 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \ 856 __kmp_compare_and_store_ret32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 857 (kmp_int32)(sv)) 858 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 859 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 860 (kmp_int64)(sv)) 861 862 #define KMP_XCHG_FIXED8(p, v) \ 863 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v)); 864 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v)); 865 #define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v)); 866 #define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v)); 867 #define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v)); 868 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v)); 869 870 #endif /* KMP_ASM_INTRINS */ 871 872 /* ------------- relaxed consistency memory model stuff ------------------ */ 873 874 #if KMP_OS_WINDOWS 875 #ifdef __ABSOFT_WIN 876 #define KMP_MB() asm("nop") 877 #define KMP_IMB() asm("nop") 878 #else 879 #define KMP_MB() /* _asm{ nop } */ 880 #define KMP_IMB() /* _asm{ nop } */ 881 #endif 882 #endif /* KMP_OS_WINDOWS */ 883 884 #if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \ 885 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 886 #define KMP_MB() __sync_synchronize() 887 #endif 888 889 #ifndef KMP_MB 890 #define KMP_MB() /* nothing to do */ 891 #endif 892 893 #ifndef KMP_IMB 894 #define KMP_IMB() /* nothing to do */ 895 #endif 896 897 #ifndef KMP_ST_REL32 898 #define KMP_ST_REL32(A, D) (*(A) = (D)) 899 #endif 900 901 #ifndef KMP_ST_REL64 902 #define KMP_ST_REL64(A, D) (*(A) = (D)) 903 #endif 904 905 #ifndef KMP_LD_ACQ32 906 #define KMP_LD_ACQ32(A) (*(A)) 907 #endif 908 909 #ifndef KMP_LD_ACQ64 910 #define KMP_LD_ACQ64(A) (*(A)) 911 #endif 912 913 /* ------------------------------------------------------------------------ */ 914 // FIXME - maybe this should this be 915 // 916 // #define TCR_4(a) (*(volatile kmp_int32 *)(&a)) 917 // #define TCW_4(a,b) (a) = (*(volatile kmp_int32 *)&(b)) 918 // 919 // #define TCR_8(a) (*(volatile kmp_int64 *)(a)) 920 // #define TCW_8(a,b) (a) = (*(volatile kmp_int64 *)(&b)) 921 // 922 // I'm fairly certain this is the correct thing to do, but I'm afraid 923 // of performance regressions. 924 925 #define TCR_1(a) (a) 926 #define TCW_1(a, b) (a) = (b) 927 #define TCR_4(a) (a) 928 #define TCW_4(a, b) (a) = (b) 929 #define TCI_4(a) (++(a)) 930 #define TCD_4(a) (--(a)) 931 #define TCR_8(a) (a) 932 #define TCW_8(a, b) (a) = (b) 933 #define TCI_8(a) (++(a)) 934 #define TCD_8(a) (--(a)) 935 #define TCR_SYNC_4(a) (a) 936 #define TCW_SYNC_4(a, b) (a) = (b) 937 #define TCX_SYNC_4(a, b, c) \ 938 KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \ 939 (kmp_int32)(b), (kmp_int32)(c)) 940 #define TCR_SYNC_8(a) (a) 941 #define TCW_SYNC_8(a, b) (a) = (b) 942 #define TCX_SYNC_8(a, b, c) \ 943 KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \ 944 (kmp_int64)(b), (kmp_int64)(c)) 945 946 #if KMP_ARCH_X86 || KMP_ARCH_MIPS 947 // What about ARM? 948 #define TCR_PTR(a) ((void *)TCR_4(a)) 949 #define TCW_PTR(a, b) TCW_4((a), (b)) 950 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a)) 951 #define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b)) 952 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c))) 953 954 #else /* 64 bit pointers */ 955 956 #define TCR_PTR(a) ((void *)TCR_8(a)) 957 #define TCW_PTR(a, b) TCW_8((a), (b)) 958 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a)) 959 #define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b)) 960 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c))) 961 962 #endif /* KMP_ARCH_X86 */ 963 964 /* If these FTN_{TRUE,FALSE} values change, may need to change several places 965 where they are used to check that language is Fortran, not C. */ 966 967 #ifndef FTN_TRUE 968 #define FTN_TRUE TRUE 969 #endif 970 971 #ifndef FTN_FALSE 972 #define FTN_FALSE FALSE 973 #endif 974 975 typedef void (*microtask_t)(int *gtid, int *npr, ...); 976 977 #ifdef USE_VOLATILE_CAST 978 #define VOLATILE_CAST(x) (volatile x) 979 #else 980 #define VOLATILE_CAST(x) (x) 981 #endif 982 983 #define KMP_WAIT __kmp_wait_4 984 #define KMP_WAIT_PTR __kmp_wait_4_ptr 985 #define KMP_EQ __kmp_eq_4 986 #define KMP_NEQ __kmp_neq_4 987 #define KMP_LT __kmp_lt_4 988 #define KMP_GE __kmp_ge_4 989 #define KMP_LE __kmp_le_4 990 991 /* Workaround for Intel(R) 64 code gen bug when taking address of static array 992 * (Intel(R) 64 Tracker #138) */ 993 #if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX 994 #define STATIC_EFI2_WORKAROUND 995 #else 996 #define STATIC_EFI2_WORKAROUND static 997 #endif 998 999 // Support of BGET usage 1000 #ifndef KMP_USE_BGET 1001 #define KMP_USE_BGET 1 1002 #endif 1003 1004 // Switches for OSS builds 1005 #ifndef USE_CMPXCHG_FIX 1006 #define USE_CMPXCHG_FIX 1 1007 #endif 1008 1009 // Enable dynamic user lock 1010 #define KMP_USE_DYNAMIC_LOCK 1 1011 1012 // Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if 1013 // dynamic user lock is turned on 1014 #if KMP_USE_DYNAMIC_LOCK 1015 // Visual studio can't handle the asm sections in this code 1016 #define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC 1017 #ifdef KMP_USE_ADAPTIVE_LOCKS 1018 #undef KMP_USE_ADAPTIVE_LOCKS 1019 #endif 1020 #define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX 1021 #endif 1022 1023 // Enable tick time conversion of ticks to seconds 1024 #if KMP_STATS_ENABLED 1025 #define KMP_HAVE_TICK_TIME \ 1026 (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64)) 1027 #endif 1028 1029 // Warning levels 1030 enum kmp_warnings_level { 1031 kmp_warnings_off = 0, /* No warnings */ 1032 kmp_warnings_low, /* Minimal warnings (default) */ 1033 kmp_warnings_explicit = 6, /* Explicitly set to ON - more warnings */ 1034 kmp_warnings_verbose /* reserved */ 1035 }; 1036 1037 #ifdef __cplusplus 1038 } // extern "C" 1039 #endif // __cplusplus 1040 1041 // Macros for C++11 atomic functions 1042 #define KMP_ATOMIC_LD(p, order) (p)->load(std::memory_order_##order) 1043 #define KMP_ATOMIC_OP(op, p, v, order) (p)->op(v, std::memory_order_##order) 1044 1045 // For non-default load/store 1046 #define KMP_ATOMIC_LD_ACQ(p) KMP_ATOMIC_LD(p, acquire) 1047 #define KMP_ATOMIC_LD_RLX(p) KMP_ATOMIC_LD(p, relaxed) 1048 #define KMP_ATOMIC_ST_REL(p, v) KMP_ATOMIC_OP(store, p, v, release) 1049 #define KMP_ATOMIC_ST_RLX(p, v) KMP_ATOMIC_OP(store, p, v, relaxed) 1050 1051 // For non-default fetch_<op> 1052 #define KMP_ATOMIC_ADD(p, v) KMP_ATOMIC_OP(fetch_add, p, v, acq_rel) 1053 #define KMP_ATOMIC_SUB(p, v) KMP_ATOMIC_OP(fetch_sub, p, v, acq_rel) 1054 #define KMP_ATOMIC_AND(p, v) KMP_ATOMIC_OP(fetch_and, p, v, acq_rel) 1055 #define KMP_ATOMIC_OR(p, v) KMP_ATOMIC_OP(fetch_or, p, v, acq_rel) 1056 #define KMP_ATOMIC_INC(p) KMP_ATOMIC_OP(fetch_add, p, 1, acq_rel) 1057 #define KMP_ATOMIC_DEC(p) KMP_ATOMIC_OP(fetch_sub, p, 1, acq_rel) 1058 #define KMP_ATOMIC_ADD_RLX(p, v) KMP_ATOMIC_OP(fetch_add, p, v, relaxed) 1059 #define KMP_ATOMIC_INC_RLX(p) KMP_ATOMIC_OP(fetch_add, p, 1, relaxed) 1060 1061 // Callers of the following functions cannot see the side effect on "expected". 1062 template <typename T> 1063 bool __kmp_atomic_compare_store(std::atomic<T> *p, T expected, T desired) { 1064 return p->compare_exchange_strong( 1065 expected, desired, std::memory_order_acq_rel, std::memory_order_relaxed); 1066 } 1067 1068 template <typename T> 1069 bool __kmp_atomic_compare_store_acq(std::atomic<T> *p, T expected, T desired) { 1070 return p->compare_exchange_strong( 1071 expected, desired, std::memory_order_acquire, std::memory_order_relaxed); 1072 } 1073 1074 template <typename T> 1075 bool __kmp_atomic_compare_store_rel(std::atomic<T> *p, T expected, T desired) { 1076 return p->compare_exchange_strong( 1077 expected, desired, std::memory_order_release, std::memory_order_relaxed); 1078 } 1079 1080 #endif /* KMP_OS_H */ 1081 // Safe C API 1082 #include "kmp_safe_c_api.h" 1083