• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_LOCAL_HEAP_H_
6 #define V8_HEAP_LOCAL_HEAP_H_
7 
8 #include <atomic>
9 #include <memory>
10 
11 #include "src/base/logging.h"
12 #include "src/base/macros.h"
13 #include "src/base/platform/condition-variable.h"
14 #include "src/base/platform/mutex.h"
15 #include "src/common/assert-scope.h"
16 #include "src/execution/isolate.h"
17 #include "src/handles/persistent-handles.h"
18 #include "src/heap/concurrent-allocator.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 class Heap;
24 class Safepoint;
25 class LocalHandles;
26 
27 // LocalHeap is used by the GC to track all threads with heap access in order to
28 // stop them before performing a collection. LocalHeaps can be either Parked or
29 // Running and are in Parked mode when initialized.
30 //   Running: Thread is allowed to access the heap but needs to give the GC the
31 //            chance to run regularly by manually invoking Safepoint(). The
32 //            thread can be parked using ParkedScope.
33 //   Parked:  Heap access is not allowed, so the GC will not stop this thread
34 //            for a collection. Useful when threads do not need heap access for
35 //            some time or for blocking operations like locking a mutex.
36 class V8_EXPORT_PRIVATE LocalHeap {
37  public:
38   using GCEpilogueCallback = void(void* data);
39 
40   explicit LocalHeap(
41       Heap* heap, ThreadKind kind,
42       std::unique_ptr<PersistentHandles> persistent_handles = nullptr);
43   ~LocalHeap();
44 
45   // Frequently invoked by local thread to check whether safepoint was requested
46   // from the main thread.
Safepoint()47   void Safepoint() {
48     DCHECK(AllowSafepoints::IsAllowed());
49     ThreadState current = state_.load_relaxed();
50 
51     if (V8_UNLIKELY(current.IsRunningWithSlowPathFlag())) {
52       SafepointSlowPath();
53     }
54   }
55 
handles()56   LocalHandles* handles() { return handles_.get(); }
57 
58   template <typename T>
NewPersistentHandle(T object)59   Handle<T> NewPersistentHandle(T object) {
60     if (!persistent_handles_) {
61       EnsurePersistentHandles();
62     }
63     return persistent_handles_->NewHandle(object);
64   }
65 
66   template <typename T>
NewPersistentHandle(Handle<T> object)67   Handle<T> NewPersistentHandle(Handle<T> object) {
68     return NewPersistentHandle(*object);
69   }
70 
71   template <typename T>
NewPersistentMaybeHandle(MaybeHandle<T> maybe_handle)72   MaybeHandle<T> NewPersistentMaybeHandle(MaybeHandle<T> maybe_handle) {
73     Handle<T> handle;
74     if (maybe_handle.ToHandle(&handle)) {
75       return NewPersistentHandle(handle);
76     }
77     return kNullMaybeHandle;
78   }
79 
80   void AttachPersistentHandles(
81       std::unique_ptr<PersistentHandles> persistent_handles);
82   std::unique_ptr<PersistentHandles> DetachPersistentHandles();
83 #ifdef DEBUG
HasPersistentHandles()84   bool HasPersistentHandles() { return !!persistent_handles_; }
85   bool ContainsPersistentHandle(Address* location);
86   bool ContainsLocalHandle(Address* location);
87   bool IsHandleDereferenceAllowed();
88 #endif
89 
90   bool IsParked();
91   bool IsRunning();
92 
heap()93   Heap* heap() { return heap_; }
AsHeap()94   Heap* AsHeap() { return heap(); }
95 
marking_barrier()96   MarkingBarrier* marking_barrier() { return marking_barrier_.get(); }
old_space_allocator()97   ConcurrentAllocator* old_space_allocator() {
98     return old_space_allocator_.get();
99   }
code_space_allocator()100   ConcurrentAllocator* code_space_allocator() {
101     return code_space_allocator_.get();
102   }
shared_old_space_allocator()103   ConcurrentAllocator* shared_old_space_allocator() {
104     return shared_old_space_allocator_.get();
105   }
106 
RegisterCodeObject(Handle<Code> code)107   void RegisterCodeObject(Handle<Code> code) {
108     heap()->RegisterCodeObject(code);
109   }
110 
111   // Mark/Unmark linear allocation areas black. Used for black allocation.
112   void MarkLinearAllocationAreaBlack();
113   void UnmarkLinearAllocationArea();
114 
115   // Give up linear allocation areas. Used for mark-compact GC.
116   void FreeLinearAllocationArea();
117 
118   // Free all shared LABs. Used by the shared mark-compact GC.
119   void FreeSharedLinearAllocationArea();
120 
121   // Create filler object in linear allocation areas. Verifying requires
122   // iterable heap.
123   void MakeLinearAllocationAreaIterable();
124 
125   // Fetches a pointer to the local heap from the thread local storage.
126   // It is intended to be used in handle and write barrier code where it is
127   // difficult to get a pointer to the current instance of local heap otherwise.
128   // The result may be a nullptr if there is no local heap instance associated
129   // with the current thread.
130   static LocalHeap* Current();
131 
132 #ifdef DEBUG
133   void VerifyCurrent();
134 #endif
135 
136   // Allocate an uninitialized object.
137   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
138       int size_in_bytes, AllocationType allocation,
139       AllocationOrigin origin = AllocationOrigin::kRuntime,
140       AllocationAlignment alignment = kTaggedAligned);
141 
142   // Allocates an uninitialized object and crashes when object
143   // cannot be allocated.
144   V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
145       int size_in_bytes, AllocationType allocation,
146       AllocationOrigin origin = AllocationOrigin::kRuntime,
147       AllocationAlignment alignment = kTaggedAligned);
148 
149   inline void CreateFillerObjectAt(Address addr, int size,
150                                    ClearRecordedSlots clear_slots_mode);
151 
is_main_thread()152   bool is_main_thread() const { return is_main_thread_; }
deserialization_complete()153   bool deserialization_complete() const {
154     return heap_->deserialization_complete();
155   }
read_only_space()156   ReadOnlySpace* read_only_space() { return heap_->read_only_space(); }
157 
158   // Requests GC and blocks until the collection finishes.
159   bool TryPerformCollection();
160 
161   // Adds a callback that is invoked with the given |data| after each GC.
162   // The callback is invoked on the main thread before any background thread
163   // resumes. The callback must not allocate or make any other calls that
164   // can trigger GC.
165   void AddGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
166   void RemoveGCEpilogueCallback(GCEpilogueCallback* callback, void* data);
167 
168   // Used to make SetupMainThread() available to unit tests.
169   void SetUpMainThreadForTesting();
170 
171  private:
172   using ParkedBit = base::BitField8<bool, 0, 1>;
173   using SafepointRequestedBit = ParkedBit::Next<bool, 1>;
174   using CollectionRequestedBit = SafepointRequestedBit::Next<bool, 1>;
175 
176   class ThreadState final {
177    public:
Parked()178     static constexpr ThreadState Parked() {
179       return ThreadState(ParkedBit::kMask);
180     }
Running()181     static constexpr ThreadState Running() { return ThreadState(0); }
182 
IsRunning()183     constexpr bool IsRunning() const { return !ParkedBit::decode(raw_state_); }
184 
SetRunning()185     constexpr ThreadState SetRunning() const V8_WARN_UNUSED_RESULT {
186       return ThreadState(raw_state_ & ~ParkedBit::kMask);
187     }
188 
IsParked()189     constexpr bool IsParked() const { return ParkedBit::decode(raw_state_); }
190 
SetParked()191     constexpr ThreadState SetParked() const V8_WARN_UNUSED_RESULT {
192       return ThreadState(ParkedBit::kMask | raw_state_);
193     }
194 
IsSafepointRequested()195     constexpr bool IsSafepointRequested() const {
196       return SafepointRequestedBit::decode(raw_state_);
197     }
198 
IsCollectionRequested()199     constexpr bool IsCollectionRequested() const {
200       return CollectionRequestedBit::decode(raw_state_);
201     }
202 
IsRunningWithSlowPathFlag()203     constexpr bool IsRunningWithSlowPathFlag() const {
204       return IsRunning() && (raw_state_ & (SafepointRequestedBit::kMask |
205                                            CollectionRequestedBit::kMask));
206     }
207 
208    private:
ThreadState(uint8_t value)209     constexpr explicit ThreadState(uint8_t value) : raw_state_(value) {}
210 
raw()211     constexpr uint8_t raw() const { return raw_state_; }
212 
213     uint8_t raw_state_;
214 
215     friend class LocalHeap;
216   };
217 
218   class AtomicThreadState final {
219    public:
AtomicThreadState(ThreadState state)220     constexpr explicit AtomicThreadState(ThreadState state)
221         : raw_state_(state.raw()) {}
222 
CompareExchangeStrong(ThreadState & expected,ThreadState updated)223     bool CompareExchangeStrong(ThreadState& expected, ThreadState updated) {
224       return raw_state_.compare_exchange_strong(expected.raw_state_,
225                                                 updated.raw());
226     }
227 
CompareExchangeWeak(ThreadState & expected,ThreadState updated)228     bool CompareExchangeWeak(ThreadState& expected, ThreadState updated) {
229       return raw_state_.compare_exchange_weak(expected.raw_state_,
230                                               updated.raw());
231     }
232 
SetParked()233     ThreadState SetParked() {
234       return ThreadState(raw_state_.fetch_or(ParkedBit::kMask));
235     }
236 
SetSafepointRequested()237     ThreadState SetSafepointRequested() {
238       return ThreadState(raw_state_.fetch_or(SafepointRequestedBit::kMask));
239     }
240 
ClearSafepointRequested()241     ThreadState ClearSafepointRequested() {
242       return ThreadState(raw_state_.fetch_and(~SafepointRequestedBit::kMask));
243     }
244 
SetCollectionRequested()245     ThreadState SetCollectionRequested() {
246       return ThreadState(raw_state_.fetch_or(CollectionRequestedBit::kMask));
247     }
248 
ClearCollectionRequested()249     ThreadState ClearCollectionRequested() {
250       return ThreadState(raw_state_.fetch_and(~CollectionRequestedBit::kMask));
251     }
252 
load_relaxed()253     ThreadState load_relaxed() const {
254       return ThreadState(raw_state_.load(std::memory_order_relaxed));
255     }
256 
257    private:
258     std::atomic<uint8_t> raw_state_;
259   };
260 
261   // Slow path of allocation that performs GC and then retries allocation in
262   // loop.
263   Address PerformCollectionAndAllocateAgain(int object_size,
264                                             AllocationType type,
265                                             AllocationOrigin origin,
266                                             AllocationAlignment alignment);
267 
Park()268   void Park() {
269     DCHECK(AllowSafepoints::IsAllowed());
270     ThreadState expected = ThreadState::Running();
271     if (!state_.CompareExchangeWeak(expected, ThreadState::Parked())) {
272       ParkSlowPath();
273     }
274   }
275 
Unpark()276   void Unpark() {
277     DCHECK(AllowSafepoints::IsAllowed());
278     ThreadState expected = ThreadState::Parked();
279     if (!state_.CompareExchangeWeak(expected, ThreadState::Running())) {
280       UnparkSlowPath();
281     }
282   }
283 
284   void ParkSlowPath();
285   void UnparkSlowPath();
286   void EnsureParkedBeforeDestruction();
287   void SafepointSlowPath();
288   void SleepInSafepoint();
289   void SleepInUnpark();
290 
291   void EnsurePersistentHandles();
292 
293   void InvokeGCEpilogueCallbacksInSafepoint();
294 
295   void SetUpMainThread();
296   void SetUp();
297 
298   Heap* heap_;
299   bool is_main_thread_;
300 
301   AtomicThreadState state_;
302 
303   bool allocation_failed_;
304   bool main_thread_parked_;
305 
306   LocalHeap* prev_;
307   LocalHeap* next_;
308 
309   std::unique_ptr<LocalHandles> handles_;
310   std::unique_ptr<PersistentHandles> persistent_handles_;
311   std::unique_ptr<MarkingBarrier> marking_barrier_;
312 
313   std::vector<std::pair<GCEpilogueCallback*, void*>> gc_epilogue_callbacks_;
314 
315   std::unique_ptr<ConcurrentAllocator> old_space_allocator_;
316   std::unique_ptr<ConcurrentAllocator> code_space_allocator_;
317   std::unique_ptr<ConcurrentAllocator> shared_old_space_allocator_;
318 
319   friend class CollectionBarrier;
320   friend class ConcurrentAllocator;
321   friend class GlobalSafepoint;
322   friend class IsolateSafepoint;
323   friend class Heap;
324   friend class Isolate;
325   friend class ParkedScope;
326   friend class SafepointScope;
327   friend class UnparkedScope;
328 };
329 
330 }  // namespace internal
331 }  // namespace v8
332 
333 #endif  // V8_HEAP_LOCAL_HEAP_H_
334