1 /* 2 * 3 * Copyright 2017 gRPC authors. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 */ 18 19 #ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_H 20 #define GRPC_CORE_LIB_GPRPP_ATOMIC_H 21 22 #include <grpc/support/port_platform.h> 23 24 #include <atomic> 25 26 #include <grpc/support/atm.h> 27 28 namespace grpc_core { 29 30 enum class MemoryOrder { 31 RELAXED = static_cast<int>(std::memory_order_relaxed), 32 CONSUME = static_cast<int>(std::memory_order_consume), 33 ACQUIRE = static_cast<int>(std::memory_order_acquire), 34 RELEASE = static_cast<int>(std::memory_order_release), 35 ACQ_REL = static_cast<int>(std::memory_order_acq_rel), 36 SEQ_CST = static_cast<int>(std::memory_order_seq_cst) 37 }; 38 39 template <typename T> 40 class Atomic { 41 public: storage_(val)42 explicit Atomic(T val = T()) : storage_(val) {} 43 Load(MemoryOrder order)44 T Load(MemoryOrder order) const { 45 return storage_.load(static_cast<std::memory_order>(order)); 46 } 47 Store(T val,MemoryOrder order)48 void Store(T val, MemoryOrder order) { 49 storage_.store(val, static_cast<std::memory_order>(order)); 50 } 51 Exchange(T desired,MemoryOrder order)52 T Exchange(T desired, MemoryOrder order) { 53 return storage_.exchange(desired, static_cast<std::memory_order>(order)); 54 } 55 CompareExchangeWeak(T * expected,T desired,MemoryOrder success,MemoryOrder failure)56 bool CompareExchangeWeak(T* expected, T desired, MemoryOrder success, 57 MemoryOrder failure) { 58 return GPR_ATM_INC_CAS_THEN(storage_.compare_exchange_weak( 59 *expected, desired, static_cast<std::memory_order>(success), 60 static_cast<std::memory_order>(failure))); 61 } 62 CompareExchangeStrong(T * expected,T desired,MemoryOrder success,MemoryOrder failure)63 bool CompareExchangeStrong(T* expected, T desired, MemoryOrder success, 64 MemoryOrder failure) { 65 return GPR_ATM_INC_CAS_THEN(storage_.compare_exchange_strong( 66 *expected, desired, static_cast<std::memory_order>(success), 67 static_cast<std::memory_order>(failure))); 68 } 69 70 template <typename Arg> 71 T FetchAdd(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) { 72 return GPR_ATM_INC_ADD_THEN(storage_.fetch_add( 73 static_cast<Arg>(arg), static_cast<std::memory_order>(order))); 74 } 75 76 template <typename Arg> 77 T FetchSub(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) { 78 return GPR_ATM_INC_ADD_THEN(storage_.fetch_sub( 79 static_cast<Arg>(arg), static_cast<std::memory_order>(order))); 80 } 81 82 // Atomically increment a counter only if the counter value is not zero. 83 // Returns true if increment took place; false if counter is zero. 84 bool IncrementIfNonzero(MemoryOrder load_order = MemoryOrder::ACQUIRE) { 85 T count = storage_.load(static_cast<std::memory_order>(load_order)); 86 do { 87 // If zero, we are done (without an increment). If not, we must do a CAS 88 // to maintain the contract: do not increment the counter if it is already 89 // zero 90 if (count == 0) { 91 return false; 92 } 93 } while (!CompareExchangeWeak(&count, count + 1, MemoryOrder::ACQ_REL, 94 load_order)); 95 return true; 96 } 97 98 private: 99 std::atomic<T> storage_; 100 }; 101 102 } // namespace grpc_core 103 104 #endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_H */ 105