1 // Copyright 2019 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "absl/strings/internal/cordz_handle.h"
15
16 #include <atomic>
17
18 #include "absl/base/internal/raw_logging.h" // For ABSL_RAW_CHECK
19 #include "absl/synchronization/mutex.h"
20
21 namespace absl {
22 ABSL_NAMESPACE_BEGIN
23 namespace cord_internal {
24
25 namespace {
26
27 struct Queue {
28 Queue() = default;
29
30 absl::Mutex mutex;
ABSL_GUARDED_BYabsl::cord_internal::__anon950795db0111::Queue31 std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
32
33 // Returns true if this delete queue is empty. This method does not acquire
34 // the lock, but does a 'load acquire' observation on the delete queue tail.
35 // It is used inside Delete() to check for the presence of a delete queue
36 // without holding the lock. The assumption is that the caller is in the
37 // state of 'being deleted', and can not be newly discovered by a concurrent
38 // 'being constructed' snapshot instance. Practically, this means that any
39 // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
40 // before / after' semantics and atomic fences.
IsEmptyabsl::cord_internal::__anon950795db0111::Queue41 bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
42 return dq_tail.load(std::memory_order_acquire) == nullptr;
43 }
44 };
45
GlobalQueue()46 static Queue* GlobalQueue() {
47 static Queue* global_queue = new Queue;
48 return global_queue;
49 }
50
51 } // namespace
52
CordzHandle(bool is_snapshot)53 CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
54 Queue* global_queue = GlobalQueue();
55 if (is_snapshot) {
56 MutexLock lock(&global_queue->mutex);
57 CordzHandle* dq_tail =
58 global_queue->dq_tail.load(std::memory_order_acquire);
59 if (dq_tail != nullptr) {
60 dq_prev_ = dq_tail;
61 dq_tail->dq_next_ = this;
62 }
63 global_queue->dq_tail.store(this, std::memory_order_release);
64 }
65 }
66
~CordzHandle()67 CordzHandle::~CordzHandle() {
68 Queue* global_queue = GlobalQueue();
69 if (is_snapshot_) {
70 std::vector<CordzHandle*> to_delete;
71 {
72 MutexLock lock(&global_queue->mutex);
73 CordzHandle* next = dq_next_;
74 if (dq_prev_ == nullptr) {
75 // We were head of the queue, delete every CordzHandle until we reach
76 // either the end of the list, or a snapshot handle.
77 while (next && !next->is_snapshot_) {
78 to_delete.push_back(next);
79 next = next->dq_next_;
80 }
81 } else {
82 // Another CordzHandle existed before this one, don't delete anything.
83 dq_prev_->dq_next_ = next;
84 }
85 if (next) {
86 next->dq_prev_ = dq_prev_;
87 } else {
88 global_queue->dq_tail.store(dq_prev_, std::memory_order_release);
89 }
90 }
91 for (CordzHandle* handle : to_delete) {
92 delete handle;
93 }
94 }
95 }
96
SafeToDelete() const97 bool CordzHandle::SafeToDelete() const {
98 return is_snapshot_ || GlobalQueue()->IsEmpty();
99 }
100
Delete(CordzHandle * handle)101 void CordzHandle::Delete(CordzHandle* handle) {
102 assert(handle);
103 if (handle) {
104 Queue* const queue = GlobalQueue();
105 if (!handle->SafeToDelete()) {
106 MutexLock lock(&queue->mutex);
107 CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
108 if (dq_tail != nullptr) {
109 handle->dq_prev_ = dq_tail;
110 dq_tail->dq_next_ = handle;
111 queue->dq_tail.store(handle, std::memory_order_release);
112 return;
113 }
114 }
115 delete handle;
116 }
117 }
118
DiagnosticsGetDeleteQueue()119 std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
120 std::vector<const CordzHandle*> handles;
121 Queue* global_queue = GlobalQueue();
122 MutexLock lock(&global_queue->mutex);
123 CordzHandle* dq_tail = global_queue->dq_tail.load(std::memory_order_acquire);
124 for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
125 handles.push_back(p);
126 }
127 return handles;
128 }
129
DiagnosticsHandleIsSafeToInspect(const CordzHandle * handle) const130 bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
131 const CordzHandle* handle) const {
132 if (!is_snapshot_) return false;
133 if (handle == nullptr) return true;
134 if (handle->is_snapshot_) return false;
135 bool snapshot_found = false;
136 Queue* global_queue = GlobalQueue();
137 MutexLock lock(&global_queue->mutex);
138 for (const CordzHandle* p = global_queue->dq_tail; p; p = p->dq_prev_) {
139 if (p == handle) return !snapshot_found;
140 if (p == this) snapshot_found = true;
141 }
142 ABSL_ASSERT(snapshot_found); // Assert that 'this' is in delete queue.
143 return true;
144 }
145
146 std::vector<const CordzHandle*>
DiagnosticsGetSafeToInspectDeletedHandles()147 CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
148 std::vector<const CordzHandle*> handles;
149 if (!is_snapshot()) {
150 return handles;
151 }
152
153 Queue* global_queue = GlobalQueue();
154 MutexLock lock(&global_queue->mutex);
155 for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
156 if (!p->is_snapshot()) {
157 handles.push_back(p);
158 }
159 }
160 return handles;
161 }
162
163 } // namespace cord_internal
164 ABSL_NAMESPACE_END
165 } // namespace absl
166