• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_THREAD_POOL_TRACKED_REF_H_
6 #define BASE_TASK_THREAD_POOL_TRACKED_REF_H_
7 
8 #include "base/atomic_ref_count.h"
9 #include "base/check.h"
10 #include "base/gtest_prod_util.h"
11 #include "base/memory/ptr_util.h"
12 #include "base/memory/raw_ptr.h"
13 #include "base/memory/raw_ptr_exclusion.h"
14 #include "base/synchronization/waitable_event.h"
15 #include "base/template_util.h"
16 #include "third_party/abseil-cpp/absl/types/optional.h"
17 
18 namespace base {
19 namespace internal {
20 
21 // TrackedRefs are effectively a ref-counting scheme for objects that have a
22 // single owner.
23 //
24 // Deletion is still controlled by the single owner but ~T() itself will block
25 // until all the TrackedRefs handed by its TrackedRefFactory have been released
26 // (by ~TrackedRef<T>()).
27 //
28 // Just like WeakPtrFactory: TrackedRefFactory<T> should be the last member of T
29 // to ensure ~TrackedRefFactory<T>() runs first in ~T().
30 //
31 // The owner of a T should hence be certain that the last TrackedRefs to T are
32 // already gone or on their way out before destroying it or ~T() will hang
33 // (indicating a bug in the tear down logic -- proper refcounting on the other
34 // hand would result in a leak).
35 //
36 // TrackedRefFactory only makes sense to use on types that are always leaked in
37 // production but need to be torn down in tests (blocking destruction is
38 // impractical in production).
39 //
40 // Why would we ever need such a thing? In thread_pool there is a clear
41 // ownership hierarchy with mostly single owners and little refcounting. In
42 // production nothing is ever torn down so this isn't a problem. In tests
43 // however we must JoinForTesting(). At that point, all the raw back T* refs
44 // used by the worker threads are problematic because they can result in use-
45 // after-frees if a worker outlives the deletion of its corresponding
46 // ThreadPool/TaskTracker/ThreadGroup/etc.
47 //
48 // JoinForTesting() isn't so hard when all workers are managed. But with cleanup
49 // semantics (reclaiming a worker who's been idle for too long) it becomes
50 // tricky because workers can go unaccounted for before they exit their main
51 // (https://crbug.com/827615).
52 //
53 // For that reason and to clearly document the ownership model, thread_pool
54 // uses TrackedRefs.
55 //
56 // On top of being a clearer ownership model than proper refcounting, a hang in
57 // tear down in a test with out-of-order tear down logic is much preferred to
58 // letting its worker thread and associated constructs outlive the test
59 // (potentially resulting in flakes in unrelated tests running later in the same
60 // process).
61 //
62 // Note: While there's nothing thread_pool specific about TrackedRefs it
63 // requires an ownership model where all the TrackedRefs are released on other
64 // threads in sync with ~T(). This isn't a typical use case beyond shutting down
65 // ThreadPool in tests and as such this is kept internal here for now.
66 
67 template <class T>
68 class TrackedRefFactory;
69 
70 // TrackedRef<T> can be used like a T*.
71 template <class T>
72 class TrackedRef {
73  public:
74   // Moveable and copyable.
TrackedRef(TrackedRef<T> && other)75   TrackedRef(TrackedRef<T>&& other)
76       : ptr_(other.ptr_), factory_(other.factory_) {
77     // Null out |other_|'s factory so its destructor doesn't decrement
78     // |live_tracked_refs_|.
79     other.factory_ = nullptr;
80   }
TrackedRef(const TrackedRef<T> & other)81   TrackedRef(const TrackedRef<T>& other)
82       : ptr_(other.ptr_), factory_(other.factory_) {
83     factory_->live_tracked_refs_.Increment();
84   }
85 
86   // Intentionally not assignable for now because it makes the logic slightly
87   // convoluted and it's not a use case that makes sense for the types using
88   // this at the moment.
89   TrackedRef& operator=(TrackedRef<T>&& other) = delete;
90   TrackedRef& operator=(const TrackedRef<T>& other) = delete;
91 
~TrackedRef()92   ~TrackedRef() {
93     if (factory_ && !factory_->live_tracked_refs_.Decrement()) {
94       DCHECK(factory_->ready_to_destroy_);
95       DCHECK(!factory_->ready_to_destroy_->IsSignaled());
96       factory_->ready_to_destroy_->Signal();
97     }
98   }
99 
100   T& operator*() const { return *ptr_; }
101 
102   T* operator->() const { return ptr_; }
103 
104   explicit operator bool() const { return ptr_ != nullptr; }
105 
106   bool operator==(const void* compared_ptr) const {
107     return ptr_ == compared_ptr;
108   }
109 
110   // Returns the raw pointer stored in this TrackedRef. This is occasionally
111   // useful for operations in scope but, as with other smart pointers, it
112   // shouldn't be used beyond the scope of this TrackedRef.
get()113   T* get() const { return ptr_; }
114 
115  private:
116   friend class TrackedRefFactory<T>;
117 
TrackedRef(T * ptr,TrackedRefFactory<T> * factory)118   TrackedRef(T* ptr, TrackedRefFactory<T>* factory)
119       : ptr_(ptr), factory_(factory) {
120     factory_->live_tracked_refs_.Increment();
121   }
122 
123   // This field is not a raw_ptr<> because it was filtered by the rewriter for:
124   // #union
125   RAW_PTR_EXCLUSION T* ptr_;
126   // This field is not a raw_ptr<> because it was filtered by the rewriter for:
127   // #union
128   RAW_PTR_EXCLUSION TrackedRefFactory<T>* factory_;
129 };
130 
131 // TrackedRefFactory<T> should be the last member of T.
132 template <class T>
133 class TrackedRefFactory {
134  public:
TrackedRefFactory(T * ptr)135   explicit TrackedRefFactory(T* ptr)
136       : ptr_(ptr), self_ref_(TrackedRef<T>(ptr_.get(), this)) {
137     DCHECK(ptr_);
138   }
139 
140   TrackedRefFactory(const TrackedRefFactory&) = delete;
141   TrackedRefFactory& operator=(const TrackedRefFactory&) = delete;
142 
~TrackedRefFactory()143   ~TrackedRefFactory() {
144     // Enter the destruction phase.
145     ready_to_destroy_.emplace();
146 
147     // Release self-ref. If this was the last one it will signal the event right
148     // away. Otherwise it establishes an happens-after relationship between
149     // |ready_to_destroy.emplace()| and the eventual
150     // |ready_to_destroy_->Signal()|.
151     self_ref_.reset();
152 
153     ready_to_destroy_->Wait();
154   }
155 
GetTrackedRef()156   TrackedRef<T> GetTrackedRef() {
157     // TrackedRefs cannot be obtained after |live_tracked_refs_| has already
158     // reached zero. In other words, the owner of a TrackedRefFactory shouldn't
159     // vend new TrackedRefs while it's being destroyed (owners of TrackedRefs
160     // may still copy/move their refs around during the destruction phase).
161     DCHECK(!live_tracked_refs_.IsZero());
162     return TrackedRef<T>(ptr_.get(), this);
163   }
164 
165  private:
166   friend class TrackedRef<T>;
167   FRIEND_TEST_ALL_PREFIXES(TrackedRefTest, CopyAndMoveSemantics);
168 
169   const raw_ptr<T> ptr_;
170 
171   // The number of live TrackedRefs vended by this factory.
172   AtomicRefCount live_tracked_refs_{0};
173 
174   // Non-null during the destruction phase. Signaled once |live_tracked_refs_|
175   // reaches 0. Note: making this optional and only initializing it in the
176   // destruction phase avoids keeping a handle open for the entire session.
177   absl::optional<WaitableEvent> ready_to_destroy_;
178 
179   // TrackedRefFactory holds a TrackedRef as well to prevent
180   // |live_tracked_refs_| from ever reaching zero before ~TrackedRefFactory().
181   absl::optional<TrackedRef<T>> self_ref_;
182 };
183 
184 }  // namespace internal
185 }  // namespace base
186 
187 #endif  // BASE_TASK_THREAD_POOL_TRACKED_REF_H_
188