1 // These classes are part of the Census project. Please cc 2 // census-team@google.com for any changes to this file. 3 4 #ifndef BASE_CENSUSHANDLE_H_ 5 #define BASE_CENSUSHANDLE_H_ 6 7 #include <atomic> 8 #include <cstdint> 9 #include <memory> 10 11 #include "third_party/abseil-cpp/absl/base/attributes.h" 12 13 class CensusHandle; 14 15 namespace stats_census { 16 class CensusHandleManager; 17 bool IsDefaultHandle(const CensusHandle&); 18 } // namespace stats_census 19 20 // This class defines an opaque handle, CensusHandle, that is created inside 21 // Census library. This handle is embedded inside TraceContext and gets copied 22 // as part of the NewCallback mechanism. It is only interpreted by Census 23 // library. See stats/census/public/census-interface.h for details. 24 // CensusHandle is thread-compatible. 25 class CensusHandle { 26 // This class carefully synchronizes with the SIGPROF signal handler. Any 27 // operation which mutates a `CensusHandle` by reference or pointer might be 28 // modifying the thread-local CensusHandle. Any subsequent `Unref()` can race 29 // with the signal handler. As such, any call to `Unref()` should first use a 30 // `std::atomic_signal_fence` to ensure that the signal handler sees all 31 // earlier mutations before the `Unref()`. 32 public: 33 constexpr CensusHandle() = default; 34 CensusHandle(const CensusHandle & other)35 CensusHandle(const CensusHandle& other) 36 : entry_([&] { 37 // cache `other.get_entry()` to avoid redundant loads due 38 // to compilers not caching atomic loads. 39 auto* entry = other.get_entry(); 40 if (entry != nullptr) entry->Ref(); 41 return entry; 42 }()) {} 43 44 CensusHandle& operator=(const CensusHandle& other) { 45 EntryBase* old_entry = get_entry(); 46 EntryBase* other_entry = other.get_entry(); 47 if (old_entry == other_entry) return *this; 48 set_entry(other_entry); 49 50 // As CensusHandle is thread-compatible and not thread-safe, we don't need 51 // to handle users concurrently doing reads and writes of a CensusHandle 52 // from different threads; users will need to do their own synchronization 53 // in that case. We need to make sure that when the sigprof handler 54 // interrupts this function, and reads the old value of rep_, then the code 55 // below to unref old cannot have started. atomic_signal_fence (as opposed 56 // to an atomic_thread_fence) expresses this constraint directly and allows 57 // the compiler to enforce it as efficiently as it can. 58 std::atomic_signal_fence(std::memory_order_seq_cst); 59 60 if (other_entry != nullptr) other_entry->Ref(); 61 if (old_entry != nullptr) old_entry->Unref(); 62 return *this; 63 } 64 65 #ifndef SWIG CensusHandle(CensusHandle && other)66 CensusHandle(CensusHandle&& other) noexcept : entry_(other.get_entry()) { 67 other.set_entry(nullptr); 68 } 69 70 CensusHandle& operator=(CensusHandle&& other) noexcept { 71 if (this == &other) return *this; 72 EntryBase* old_entry = get_entry(); 73 EntryBase* other_entry = other.get_entry(); 74 set_entry(other_entry); 75 other.set_entry(nullptr); 76 77 // As CensusHandle is thread-compatible and not thread-safe, we don't need 78 // to handle users concurrently doing reads and writes of a CensusHandle 79 // from different threads; users will need to do their own synchronization 80 // in that case. We need to make sure that when the sigprof handler 81 // interrupts this function, and reads the old value of entry_, then the 82 // code below to unref old cannot have started. atomic_signal_fence (as 83 // opposed to an atomic_thread_fence) expresses this constraint directly and 84 // allows the compiler to enforce it as efficiently as it can. 85 std::atomic_signal_fence(std::memory_order_seq_cst); 86 87 if (old_entry != nullptr) old_entry->Unref(); 88 return *this; 89 } 90 #endif 91 ~CensusHandle()92 ~CensusHandle() { 93 if (EntryBase* e = get_entry(); e != nullptr) { 94 // As CensusHandle is thread-compatible and not thread-safe, we don't need 95 // to handle users concurrently doing reads and writes of a CensusHandle 96 // from different threads; users will need to do their own synchronization 97 // in that case. We need to make sure that when the sigprof handler 98 // interrupts this function, and reads the old value of entry_, then the 99 // code below to unref old cannot have started. atomic_signal_fence (as 100 // opposed to an atomic_thread_fence) expresses this constraint directly 101 // and allows the compiler to enforce it as efficiently as it can. 102 std::atomic_signal_fence(std::memory_order_seq_cst); 103 e->Unref(); 104 } 105 } 106 Swap(CensusHandle * other)107 void Swap(CensusHandle* other) { 108 EntryBase* tmp = get_entry(); 109 set_entry(other->get_entry()); 110 other->set_entry(tmp); 111 } 112 113 // A swap implementation for CensusHandle that is more efficient than 114 // std::swap. swap(CensusHandle & h1,CensusHandle & h2)115 friend void swap(CensusHandle& h1, CensusHandle& h2) noexcept { 116 h1.Swap(&h2); 117 } 118 119 private: 120 friend class stats_census::CensusHandleManager; 121 friend bool ::stats_census::IsDefaultHandle(const CensusHandle&); 122 friend class TestEntry; // Defined/used in censushandle_test.cc. 123 124 // EntryBase is a base class used in the new Census implementation. It 125 // implements similar reference counting logic to 126 // util/refcount/reference_counted.cc, but uses 8 bytes instead of 16. 127 // 128 // EntryBase should only be used by Census; other code should use CensusHandle 129 // or call functions in the Census library. 130 class EntryBase { 131 public: EntryBase()132 EntryBase() : rc_(1) {} ~EntryBase()133 virtual ~EntryBase() {} 134 135 protected: 136 // Increments the reference count. Ref()137 void Ref() const { rc_.fetch_add(1, std::memory_order_relaxed); } 138 139 // Decrements the reference count and returns true if it is the last 140 // reference. UnrefNoDelete()141 ABSL_MUST_USE_RESULT bool UnrefNoDelete() const { 142 // If we read rc_ and it is 1, this is the only reference. We can avoid 143 // doing the decrement in that case, and just delete. 144 // Note: load is much faster than fetch_sub, so doing the read makes the 145 // last Unref much faster, but the others slightly slower. This is a net 146 // cycle win when there are only a few Unref calls per EntryBase object. 147 if (rc_.load(std::memory_order_acquire) == 1) { 148 return true; 149 } 150 intptr_t rc_old = rc_.fetch_sub(1, std::memory_order_acq_rel); 151 return rc_old == 1; 152 } 153 154 // Decrements the reference count and deletes `this` if it is the last 155 // reference. Unref()156 void Unref() const { 157 if (UnrefNoDelete()) delete this; 158 } 159 160 private: 161 // The reference count. 162 mutable std::atomic<intptr_t> rc_; 163 friend class ::CensusHandle; 164 friend class TestEntry; 165 }; 166 167 #ifndef SWIG CensusHandle(std::unique_ptr<EntryBase> e)168 explicit CensusHandle(std::unique_ptr<EntryBase> e) : entry_(e.release()) {} 169 #endif 170 171 // Returns the current value of `entry` using a relaxed atomic load. get_entry()172 EntryBase* get_entry() const { 173 return entry_.load(std::memory_order_relaxed); 174 } 175 176 // Sets the current value of `entry` using a relaxed atomic store. set_entry(EntryBase * e)177 void set_entry(EntryBase* e) { entry_.store(e, std::memory_order_relaxed); } 178 179 // Atomic to be read from signal handlers (SIGPROF). 180 std::atomic<EntryBase*> entry_ = {nullptr}; 181 static_assert(std::atomic<EntryBase*>::is_always_lock_free); 182 }; 183 184 #endif // BASE_CENSUSHANDLE_H_