• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_THREAD_POOL_TRACKED_REF_H_
6 #define BASE_TASK_THREAD_POOL_TRACKED_REF_H_
7 
8 #include <optional>
9 
10 #include "base/atomic_ref_count.h"
11 #include "base/check.h"
12 #include "base/gtest_prod_util.h"
13 #include "base/memory/ptr_util.h"
14 #include "base/memory/raw_ptr.h"
15 #include "base/synchronization/waitable_event.h"
16 
17 namespace base {
18 namespace internal {
19 
20 // TrackedRefs are effectively a ref-counting scheme for objects that have a
21 // single owner.
22 //
23 // Deletion is still controlled by the single owner but ~T() itself will block
24 // until all the TrackedRefs handed by its TrackedRefFactory have been released
25 // (by ~TrackedRef<T>()).
26 //
27 // Just like WeakPtrFactory: TrackedRefFactory<T> should be the last member of T
28 // to ensure ~TrackedRefFactory<T>() runs first in ~T().
29 //
30 // The owner of a T should hence be certain that the last TrackedRefs to T are
31 // already gone or on their way out before destroying it or ~T() will hang
32 // (indicating a bug in the tear down logic -- proper refcounting on the other
33 // hand would result in a leak).
34 //
35 // TrackedRefFactory only makes sense to use on types that are always leaked in
36 // production but need to be torn down in tests (blocking destruction is
37 // impractical in production).
38 //
39 // Why would we ever need such a thing? In thread_pool there is a clear
40 // ownership hierarchy with mostly single owners and little refcounting. In
41 // production nothing is ever torn down so this isn't a problem. In tests
42 // however we must JoinForTesting(). At that point, all the raw back T* refs
43 // used by the worker threads are problematic because they can result in use-
44 // after-frees if a worker outlives the deletion of its corresponding
45 // ThreadPool/TaskTracker/ThreadGroup/etc.
46 //
47 // JoinForTesting() isn't so hard when all workers are managed. But with cleanup
48 // semantics (reclaiming a worker who's been idle for too long) it becomes
49 // tricky because workers can go unaccounted for before they exit their main
50 // (https://crbug.com/827615).
51 //
52 // For that reason and to clearly document the ownership model, thread_pool
53 // uses TrackedRefs.
54 //
55 // On top of being a clearer ownership model than proper refcounting, a hang in
56 // tear down in a test with out-of-order tear down logic is much preferred to
57 // letting its worker thread and associated constructs outlive the test
58 // (potentially resulting in flakes in unrelated tests running later in the same
59 // process).
60 //
61 // Note: While there's nothing thread_pool specific about TrackedRefs it
62 // requires an ownership model where all the TrackedRefs are released on other
63 // threads in sync with ~T(). This isn't a typical use case beyond shutting down
64 // ThreadPool in tests and as such this is kept internal here for now.
65 
66 template <class T>
67 class TrackedRefFactory;
68 
69 // TrackedRef<T> can be used like a T*.
70 template <class T>
71 class TrackedRef {
72  public:
73   // Moveable and copyable.
TrackedRef(TrackedRef<T> && other)74   TrackedRef(TrackedRef<T>&& other)
75       : ptr_(other.ptr_), factory_(other.factory_) {
76     // Null out |other_|'s factory so its destructor doesn't decrement
77     // |live_tracked_refs_|.
78     other.factory_ = nullptr;
79   }
TrackedRef(const TrackedRef<T> & other)80   TrackedRef(const TrackedRef<T>& other)
81       : ptr_(other.ptr_), factory_(other.factory_) {
82     factory_->live_tracked_refs_.Increment();
83   }
84 
85   // Intentionally not assignable for now because it makes the logic slightly
86   // convoluted and it's not a use case that makes sense for the types using
87   // this at the moment.
88   TrackedRef& operator=(TrackedRef<T>&& other) = delete;
89   TrackedRef& operator=(const TrackedRef<T>& other) = delete;
90 
~TrackedRef()91   ~TrackedRef() {
92     if (factory_ && !factory_->live_tracked_refs_.Decrement()) {
93       DCHECK(factory_->ready_to_destroy_);
94       DCHECK(!factory_->ready_to_destroy_->IsSignaled());
95       factory_->ready_to_destroy_->Signal();
96     }
97   }
98 
99   T& operator*() const { return *ptr_; }
100 
101   T* operator->() const { return ptr_; }
102 
103   explicit operator bool() const { return ptr_ != nullptr; }
104 
105   bool operator==(const void* compared_ptr) const {
106     return ptr_ == compared_ptr;
107   }
108 
109   // Returns the raw pointer stored in this TrackedRef. This is occasionally
110   // useful for operations in scope but, as with other smart pointers, it
111   // shouldn't be used beyond the scope of this TrackedRef.
get()112   T* get() const { return ptr_; }
113 
114  private:
115   friend class TrackedRefFactory<T>;
116 
TrackedRef(T * ptr,TrackedRefFactory<T> * factory)117   TrackedRef(T* ptr, TrackedRefFactory<T>* factory)
118       : ptr_(ptr), factory_(factory) {
119     factory_->live_tracked_refs_.Increment();
120   }
121 
122   raw_ptr<T, LeakedDanglingUntriaged> ptr_;
123   raw_ptr<TrackedRefFactory<T>, LeakedDanglingUntriaged> factory_;
124 };
125 
126 // TrackedRefFactory<T> should be the last member of T.
127 template <class T>
128 class TrackedRefFactory {
129  public:
TrackedRefFactory(T * ptr)130   explicit TrackedRefFactory(T* ptr)
131       : ptr_(ptr), self_ref_(TrackedRef<T>(ptr_.get(), this)) {
132     DCHECK(ptr_);
133   }
134 
135   TrackedRefFactory(const TrackedRefFactory&) = delete;
136   TrackedRefFactory& operator=(const TrackedRefFactory&) = delete;
137 
~TrackedRefFactory()138   ~TrackedRefFactory() {
139     // Enter the destruction phase.
140     ready_to_destroy_.emplace();
141 
142     // Release self-ref. If this was the last one it will signal the event right
143     // away. Otherwise it establishes an happens-after relationship between
144     // |ready_to_destroy.emplace()| and the eventual
145     // |ready_to_destroy_->Signal()|.
146     self_ref_.reset();
147 
148     ready_to_destroy_->Wait();
149   }
150 
GetTrackedRef()151   TrackedRef<T> GetTrackedRef() {
152     // TrackedRefs cannot be obtained after |live_tracked_refs_| has already
153     // reached zero. In other words, the owner of a TrackedRefFactory shouldn't
154     // vend new TrackedRefs while it's being destroyed (owners of TrackedRefs
155     // may still copy/move their refs around during the destruction phase).
156     DCHECK(!live_tracked_refs_.IsZero());
157     return TrackedRef<T>(ptr_.get(), this);
158   }
159 
160  private:
161   friend class TrackedRef<T>;
162   FRIEND_TEST_ALL_PREFIXES(TrackedRefTest, CopyAndMoveSemantics);
163 
164   const raw_ptr<T> ptr_;
165 
166   // The number of live TrackedRefs vended by this factory.
167   AtomicRefCount live_tracked_refs_{0};
168 
169   // Non-null during the destruction phase. Signaled once |live_tracked_refs_|
170   // reaches 0. Note: making this optional and only initializing it in the
171   // destruction phase avoids keeping a handle open for the entire session.
172   std::optional<WaitableEvent> ready_to_destroy_;
173 
174   // TrackedRefFactory holds a TrackedRef as well to prevent
175   // |live_tracked_refs_| from ever reaching zero before ~TrackedRefFactory().
176   std::optional<TrackedRef<T>> self_ref_;
177 };
178 
179 }  // namespace internal
180 }  // namespace base
181 
182 #endif  // BASE_TASK_THREAD_POOL_TRACKED_REF_H_
183