• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MEMORY_CHUNK_H_
6 #define V8_HEAP_MEMORY_CHUNK_H_
7 
8 #include <atomic>
9 
10 #include "src/base/macros.h"
11 #include "src/base/platform/mutex.h"
12 #include "src/common/globals.h"
13 #include "src/heap/basic-memory-chunk.h"
14 #include "src/heap/heap.h"
15 #include "src/heap/invalidated-slots.h"
16 #include "src/heap/list.h"
17 #include "src/heap/marking.h"
18 #include "src/heap/memory-chunk-layout.h"
19 #include "src/heap/slot-set.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 class CodeObjectRegistry;
25 class FreeListCategory;
26 
27 // MemoryChunk represents a memory region owned by a specific space.
28 // It is divided into the header and the body. Chunk start is always
29 // 1MB aligned. Start of the body is aligned so it can accommodate
30 // any heap object.
31 class MemoryChunk : public BasicMemoryChunk {
32  public:
33   // |kDone|: The page state when sweeping is complete or sweeping must not be
34   //   performed on that page. Sweeper threads that are done with their work
35   //   will set this value and not touch the page anymore.
36   // |kPending|: This page is ready for parallel sweeping.
37   // |kInProgress|: This page is currently swept by a sweeper thread.
38   enum class ConcurrentSweepingState : intptr_t {
39     kDone,
40     kPending,
41     kInProgress,
42   };
43 
44   static const size_t kHeaderSize = MemoryChunkLayout::kMemoryChunkHeaderSize;
45 
46   static const intptr_t kOldToNewSlotSetOffset =
47       MemoryChunkLayout::kSlotSetOffset;
48 
49   // Page size in bytes.  This must be a multiple of the OS page size.
50   static const int kPageSize = 1 << kPageSizeBits;
51 
52   // Maximum number of nested code memory modification scopes.
53   static const int kMaxWriteUnprotectCounter = 3;
54 
55   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)56   static MemoryChunk* FromAddress(Address a) {
57     return cast(BasicMemoryChunk::FromAddress(a));
58   }
59 
60   // Only works if the object is in the first kPageSize of the MemoryChunk.
FromHeapObject(HeapObject o)61   static MemoryChunk* FromHeapObject(HeapObject o) {
62     return cast(BasicMemoryChunk::FromHeapObject(o));
63   }
64 
cast(BasicMemoryChunk * chunk)65   static MemoryChunk* cast(BasicMemoryChunk* chunk) {
66     SLOW_DCHECK(!chunk->InReadOnlySpace());
67     return static_cast<MemoryChunk*>(chunk);
68   }
69 
cast(const BasicMemoryChunk * chunk)70   static const MemoryChunk* cast(const BasicMemoryChunk* chunk) {
71     SLOW_DCHECK(!chunk->InReadOnlySpace());
72     return static_cast<const MemoryChunk*>(chunk);
73   }
74 
buckets()75   size_t buckets() const { return SlotSet::BucketsForSize(size()); }
76 
77   void SetOldGenerationPageFlags(bool is_marking);
78   void SetYoungGenerationPageFlags(bool is_marking);
79 
80   static inline void MoveExternalBackingStoreBytes(
81       ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
82       size_t amount);
83 
84   void DiscardUnusedMemory(Address addr, size_t size);
85 
mutex()86   base::Mutex* mutex() { return mutex_; }
87 
set_concurrent_sweeping_state(ConcurrentSweepingState state)88   void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
89     concurrent_sweeping_ = state;
90   }
91 
concurrent_sweeping_state()92   ConcurrentSweepingState concurrent_sweeping_state() {
93     return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
94   }
95 
SweepingDone()96   bool SweepingDone() {
97     return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
98   }
99 
100   template <RememberedSetType type>
ContainsSlots()101   bool ContainsSlots() {
102     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
103            invalidated_slots<type>() != nullptr;
104   }
105 
106   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
slot_set()107   SlotSet* slot_set() {
108     if (access_mode == AccessMode::ATOMIC)
109       return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
110     return slot_set_[type];
111   }
112 
113   template <AccessMode access_mode = AccessMode::ATOMIC>
sweeping_slot_set()114   SlotSet* sweeping_slot_set() {
115     if (access_mode == AccessMode::ATOMIC)
116       return base::AsAtomicPointer::Acquire_Load(&sweeping_slot_set_);
117     return sweeping_slot_set_;
118   }
119 
120   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
typed_slot_set()121   TypedSlotSet* typed_slot_set() {
122     if (access_mode == AccessMode::ATOMIC)
123       return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
124     return typed_slot_set_[type];
125   }
126 
127   template <RememberedSetType type>
128   V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
129   SlotSet* AllocateSweepingSlotSet();
130   SlotSet* AllocateSlotSet(SlotSet** slot_set);
131 
132   // Not safe to be called concurrently.
133   template <RememberedSetType type>
134   void ReleaseSlotSet();
135   void ReleaseSlotSet(SlotSet** slot_set);
136   void ReleaseSweepingSlotSet();
137   template <RememberedSetType type>
138   TypedSlotSet* AllocateTypedSlotSet();
139   // Not safe to be called concurrently.
140   template <RememberedSetType type>
141   void ReleaseTypedSlotSet();
142 
143   template <RememberedSetType type>
144   InvalidatedSlots* AllocateInvalidatedSlots();
145   template <RememberedSetType type>
146   void ReleaseInvalidatedSlots();
147   template <RememberedSetType type>
148   V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
149   void InvalidateRecordedSlots(HeapObject object);
150   template <RememberedSetType type>
151   bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
152   template <RememberedSetType type>
invalidated_slots()153   InvalidatedSlots* invalidated_slots() {
154     return invalidated_slots_[type];
155   }
156 
157   void AllocateYoungGenerationBitmap();
158   void ReleaseYoungGenerationBitmap();
159 
160   int FreeListsLength();
161 
162   // Approximate amount of physical memory committed for this chunk.
163   V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
164 
ProgressBar()165   size_t ProgressBar() {
166     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
167     return progress_bar_.load(std::memory_order_acquire);
168   }
169 
TrySetProgressBar(size_t old_value,size_t new_value)170   bool TrySetProgressBar(size_t old_value, size_t new_value) {
171     DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
172     return progress_bar_.compare_exchange_strong(old_value, new_value,
173                                                  std::memory_order_acq_rel);
174   }
175 
ResetProgressBar()176   void ResetProgressBar() {
177     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
178       progress_bar_.store(0, std::memory_order_release);
179     }
180   }
181 
182   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
183                                                  size_t amount);
184 
185   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
186                                                  size_t amount);
187 
ExternalBackingStoreBytes(ExternalBackingStoreType type)188   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
189     return external_backing_store_bytes_[type];
190   }
191 
owner()192   Space* owner() const {
193     return reinterpret_cast<Space*>(BasicMemoryChunk::owner());
194   }
195 
196   // Gets the chunk's allocation space, potentially dealing with a null owner_
197   // (like read-only chunks have).
198   inline AllocationSpace owner_identity() const;
199 
200   // Emits a memory barrier. For TSAN builds the other thread needs to perform
201   // MemoryChunk::synchronized_heap() to simulate the barrier.
202   void InitializationMemoryFence();
203 
204   V8_EXPORT_PRIVATE void SetReadable();
205   V8_EXPORT_PRIVATE void SetReadAndExecutable();
206   V8_EXPORT_PRIVATE void SetReadAndWritable();
207 
SetDefaultCodePermissions()208   void SetDefaultCodePermissions() {
209     if (FLAG_jitless) {
210       SetReadable();
211     } else {
212       SetReadAndExecutable();
213     }
214   }
215 
list_node()216   heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
list_node()217   const heap::ListNode<MemoryChunk>& list_node() const { return list_node_; }
218 
GetCodeObjectRegistry()219   CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
220 
possibly_empty_buckets()221   PossiblyEmptyBuckets* possibly_empty_buckets() {
222     return &possibly_empty_buckets_;
223   }
224 
225   // Release memory allocated by the chunk, except that which is needed by
226   // read-only space chunks.
227   void ReleaseAllocatedMemoryNeededForWritableChunk();
228 
229 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
object_start_bitmap()230   ObjectStartBitmap* object_start_bitmap() { return &object_start_bitmap_; }
231 #endif
232 
233  protected:
234   static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
235                                  Executability executable);
236 
237   // Release all memory allocated by the chunk. Should be called when memory
238   // chunk is about to be freed.
239   void ReleaseAllAllocatedMemory();
240 
241   // Sets the requested page permissions only if the write unprotect counter
242   // has reached 0.
243   void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
244       PageAllocator::Permission permission);
245 
246   template <AccessMode mode>
young_generation_bitmap()247   ConcurrentBitmap<mode>* young_generation_bitmap() const {
248     return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
249   }
250 #ifdef DEBUG
251   static void ValidateOffsets(MemoryChunk* chunk);
252 #endif
253 
254   // A single slot set for small pages (of size kPageSize) or an array of slot
255   // set for large pages. In the latter case the number of entries in the array
256   // is ceil(size() / kPageSize).
257   SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
258 
259   // Used by the incremental marker to keep track of the scanning progress in
260   // large objects that have a progress bar and are scanned in increments.
261   std::atomic<size_t> progress_bar_;
262 
263   // Count of bytes marked black on page.
264   std::atomic<intptr_t> live_byte_count_;
265 
266   // A single slot set for small pages (of size kPageSize) or an array of slot
267   // set for large pages. In the latter case the number of entries in the array
268   // is ceil(size() / kPageSize).
269   SlotSet* sweeping_slot_set_;
270   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
271   InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
272 
273   base::Mutex* mutex_;
274 
275   std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
276 
277   base::Mutex* page_protection_change_mutex_;
278 
279   // This field is only relevant for code pages. It depicts the number of
280   // times a component requested this page to be read+writeable. The
281   // counter is decremented when a component resets to read+executable.
282   // If Value() == 0 => The memory is read and executable.
283   // If Value() >= 1 => The Memory is read and writable (and maybe executable).
284   // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
285   // excessive nesting of scopes.
286   // All executable MemoryChunks are allocated rw based on the assumption that
287   // they will be used immediately for an allocation. They are initialized
288   // with the number of open CodeSpaceMemoryModificationScopes. The caller
289   // that triggers the page allocation is responsible for decrementing the
290   // counter.
291   uintptr_t write_unprotect_counter_;
292 
293   // Tracks off-heap memory used by this memory chunk.
294   std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
295 
296   heap::ListNode<MemoryChunk> list_node_;
297 
298   FreeListCategory** categories_;
299 
300   std::atomic<intptr_t> young_generation_live_byte_count_;
301   Bitmap* young_generation_bitmap_;
302 
303   CodeObjectRegistry* code_object_registry_;
304 
305   PossiblyEmptyBuckets possibly_empty_buckets_;
306 
307 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
308   ObjectStartBitmap object_start_bitmap_;
309 #endif
310 
311  private:
312   friend class ConcurrentMarkingState;
313   friend class MajorMarkingState;
314   friend class MajorAtomicMarkingState;
315   friend class MajorNonAtomicMarkingState;
316   friend class MemoryAllocator;
317   friend class MemoryChunkValidator;
318   friend class MinorMarkingState;
319   friend class MinorNonAtomicMarkingState;
320   friend class PagedSpace;
321 };
322 
323 }  // namespace internal
324 }  // namespace v8
325 
326 #endif  // V8_HEAP_MEMORY_CHUNK_H_
327