Lines Matching refs:__order
710 _LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
712 return __order == memory_order_relaxed ? __ATOMIC_RELAXED:
713 (__order == memory_order_acquire ? __ATOMIC_ACQUIRE:
714 (__order == memory_order_release ? __ATOMIC_RELEASE:
715 (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST:
716 (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL:
720 _LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order)…
722 return __order == memory_order_relaxed ? __ATOMIC_RELAXED:
723 (__order == memory_order_acquire ? __ATOMIC_ACQUIRE:
724 (__order == memory_order_release ? __ATOMIC_RELAXED:
725 (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST:
726 (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE:
743 void __cxx_atomic_thread_fence(memory_order __order) {
744 __atomic_thread_fence(__to_gcc_order(__order));
748 void __cxx_atomic_signal_fence(memory_order __order) {
749 __atomic_signal_fence(__to_gcc_order(__order));
755 memory_order __order) {
757 __to_gcc_order(__order));
763 memory_order __order) {
765 __to_gcc_order(__order));
771 memory_order __order) {
774 __to_gcc_order(__order));
780 _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
783 __to_gcc_order(__order));
790 _Tp __value, memory_order __order) {
793 __to_gcc_order(__order));
800 memory_order __order) {
803 __to_gcc_order(__order));
867 _Td __delta, memory_order __order) {
869 __to_gcc_order(__order));
875 memory_order __order) {
877 __to_gcc_order(__order));
883 _Td __delta, memory_order __order) {
885 __to_gcc_order(__order));
891 memory_order __order) {
893 __to_gcc_order(__order));
899 _Tp __pattern, memory_order __order) {
901 __to_gcc_order(__order));
907 _Tp __pattern, memory_order __order) {
909 __to_gcc_order(__order));
915 _Tp __pattern, memory_order __order) {
917 __to_gcc_order(__order));
923 memory_order __order) {
925 __to_gcc_order(__order));
931 _Tp __pattern, memory_order __order) {
933 __to_gcc_order(__order));
939 memory_order __order) {
941 __to_gcc_order(__order));
965 void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
966 __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
970 void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
971 __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order));
987 void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order)…
988 __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order));
992 void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) _NOEXCE…
993 __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order));
998 _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXC…
1000 …c_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order));
1004 _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
1006 …c_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order));
1011 …_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT {
1012 …__c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order));
1016 _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NO…
1017 …__c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order));
1044 …fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
1045 …_c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1049 _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _N…
1050 …_c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1055 …dd(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
1056 …_c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1060 …_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
1061 …_c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1066 …fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
1067 …_c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1071 _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _N…
1072 …_c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1076 …ub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
1077 …_c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1081 …_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
1082 …_c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
1087 …tch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
1088 …11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
1092 _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) …
1093 …11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
1098 …etch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
1099 …c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
1103 _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _…
1104 …c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
1109 …tch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
1110 …11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
1114 _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) …
1115 …11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
1570 memory_order __order;
1573 return !__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val);
1579 _LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_wait(_Atp* __a, _Tp const __val, memory_order __order)
1581 __cxx_atomic_wait_test_fn_impl<_Atp, _Tp> __test_fn = {__a, __val, __order};