• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MEMORY_CHUNK_H_
6 #define V8_HEAP_MEMORY_CHUNK_H_
7 
8 #include <atomic>
9 
10 #include "src/base/macros.h"
11 #include "src/base/platform/mutex.h"
12 #include "src/common/globals.h"
13 #include "src/heap/base/active-system-pages.h"
14 #include "src/heap/basic-memory-chunk.h"
15 #include "src/heap/heap.h"
16 #include "src/heap/invalidated-slots.h"
17 #include "src/heap/list.h"
18 #include "src/heap/marking.h"
19 #include "src/heap/memory-chunk-layout.h"
20 #include "src/heap/slot-set.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 class CodeObjectRegistry;
26 class FreeListCategory;
27 
28 // MemoryChunk represents a memory region owned by a specific space.
29 // It is divided into the header and the body. Chunk start is always
30 // 1MB aligned. Start of the body is aligned so it can accommodate
31 // any heap object.
32 class MemoryChunk : public BasicMemoryChunk {
33  public:
34   // |kDone|: The page state when sweeping is complete or sweeping must not be
35   //   performed on that page. Sweeper threads that are done with their work
36   //   will set this value and not touch the page anymore.
37   // |kPending|: This page is ready for parallel sweeping.
38   // |kInProgress|: This page is currently swept by a sweeper thread.
39   enum class ConcurrentSweepingState : intptr_t {
40     kDone,
41     kPending,
42     kInProgress,
43   };
44 
45   static const size_t kHeaderSize = MemoryChunkLayout::kMemoryChunkHeaderSize;
46 
47   static const intptr_t kOldToNewSlotSetOffset =
48       MemoryChunkLayout::kSlotSetOffset;
49 
50   // Page size in bytes.  This must be a multiple of the OS page size.
51   static const int kPageSize = 1 << kPageSizeBits;
52 
53   // Maximum number of nested code memory modification scopes.
54   static const int kMaxWriteUnprotectCounter = 3;
55 
56   MemoryChunk(Heap* heap, BaseSpace* space, size_t size, Address area_start,
57               Address area_end, VirtualMemory reservation,
58               Executability executable, PageSize page_size);
59 
60   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)61   static MemoryChunk* FromAddress(Address a) {
62     return cast(BasicMemoryChunk::FromAddress(a));
63   }
64 
65   // Only works if the object is in the first kPageSize of the MemoryChunk.
FromHeapObject(HeapObject o)66   static MemoryChunk* FromHeapObject(HeapObject o) {
67     return cast(BasicMemoryChunk::FromHeapObject(o));
68   }
69 
cast(BasicMemoryChunk * chunk)70   static MemoryChunk* cast(BasicMemoryChunk* chunk) {
71     SLOW_DCHECK(!chunk->InReadOnlySpace());
72     return static_cast<MemoryChunk*>(chunk);
73   }
74 
cast(const BasicMemoryChunk * chunk)75   static const MemoryChunk* cast(const BasicMemoryChunk* chunk) {
76     SLOW_DCHECK(!chunk->InReadOnlySpace());
77     return static_cast<const MemoryChunk*>(chunk);
78   }
79 
buckets()80   size_t buckets() const { return SlotSet::BucketsForSize(size()); }
81 
82   void SetOldGenerationPageFlags(bool is_marking);
83   void SetYoungGenerationPageFlags(bool is_marking);
84 
85   static inline void MoveExternalBackingStoreBytes(
86       ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
87       size_t amount);
88 
89   void DiscardUnusedMemory(Address addr, size_t size);
90 
mutex()91   base::Mutex* mutex() { return mutex_; }
92 
set_concurrent_sweeping_state(ConcurrentSweepingState state)93   void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
94     concurrent_sweeping_ = state;
95   }
96 
concurrent_sweeping_state()97   ConcurrentSweepingState concurrent_sweeping_state() {
98     return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
99   }
100 
SweepingDone()101   bool SweepingDone() const {
102     return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
103   }
104 
105   template <RememberedSetType type>
ContainsSlots()106   bool ContainsSlots() {
107     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
108            invalidated_slots<type>() != nullptr;
109   }
110 
111   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
slot_set()112   SlotSet* slot_set() {
113     if (access_mode == AccessMode::ATOMIC)
114       return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
115     return slot_set_[type];
116   }
117 
118   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
typed_slot_set()119   TypedSlotSet* typed_slot_set() {
120     if (access_mode == AccessMode::ATOMIC)
121       return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
122     return typed_slot_set_[type];
123   }
124 
125   template <RememberedSetType type>
126   V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
127   SlotSet* AllocateSweepingSlotSet();
128   SlotSet* AllocateSlotSet(SlotSet** slot_set);
129 
130   // Not safe to be called concurrently.
131   template <RememberedSetType type>
132   void ReleaseSlotSet();
133   void ReleaseSlotSet(SlotSet** slot_set);
134 
135   template <RememberedSetType type>
136   TypedSlotSet* AllocateTypedSlotSet();
137   // Not safe to be called concurrently.
138   template <RememberedSetType type>
139   void ReleaseTypedSlotSet();
140 
141   template <RememberedSetType type>
142   InvalidatedSlots* AllocateInvalidatedSlots();
143   template <RememberedSetType type>
144   void ReleaseInvalidatedSlots();
145   template <RememberedSetType type>
146   V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object);
147   void InvalidateRecordedSlots(HeapObject object);
148   template <RememberedSetType type>
149   bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
150   template <RememberedSetType type>
invalidated_slots()151   InvalidatedSlots* invalidated_slots() {
152     return invalidated_slots_[type];
153   }
154 
155   void AllocateYoungGenerationBitmap();
156   void ReleaseYoungGenerationBitmap();
157 
158   int FreeListsLength();
159 
160   // Approximate amount of physical memory committed for this chunk.
161   V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() const;
162 
ProgressBar()163   class ProgressBar& ProgressBar() {
164     return progress_bar_;
165   }
ProgressBar()166   const class ProgressBar& ProgressBar() const { return progress_bar_; }
167 
168   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
169                                                  size_t amount);
170 
171   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
172                                                  size_t amount);
173 
ExternalBackingStoreBytes(ExternalBackingStoreType type)174   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const {
175     return external_backing_store_bytes_[type];
176   }
177 
owner()178   Space* owner() const {
179     return reinterpret_cast<Space*>(BasicMemoryChunk::owner());
180   }
181 
182   // Gets the chunk's allocation space, potentially dealing with a null owner_
183   // (like read-only chunks have).
184   inline AllocationSpace owner_identity() const;
185 
186   // Emits a memory barrier. For TSAN builds the other thread needs to perform
187   // MemoryChunk::synchronized_heap() to simulate the barrier.
188   void InitializationMemoryFence();
189 
GetCodeModificationPermission()190   static PageAllocator::Permission GetCodeModificationPermission() {
191     return FLAG_write_code_using_rwx ? PageAllocator::kReadWriteExecute
192                                      : PageAllocator::kReadWrite;
193   }
194 
195   V8_EXPORT_PRIVATE void SetReadable();
196   V8_EXPORT_PRIVATE void SetReadAndExecutable();
197 
198   V8_EXPORT_PRIVATE void SetCodeModificationPermissions();
199   V8_EXPORT_PRIVATE void SetDefaultCodePermissions();
200 
list_node()201   heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
list_node()202   const heap::ListNode<MemoryChunk>& list_node() const { return list_node_; }
203 
GetCodeObjectRegistry()204   CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
205 
possibly_empty_buckets()206   PossiblyEmptyBuckets* possibly_empty_buckets() {
207     return &possibly_empty_buckets_;
208   }
209 
210   // Release memory allocated by the chunk, except that which is needed by
211   // read-only space chunks.
212   void ReleaseAllocatedMemoryNeededForWritableChunk();
213 
214 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
object_start_bitmap()215   ObjectStartBitmap* object_start_bitmap() { return &object_start_bitmap_; }
216 #endif
217 
218  protected:
219   // Release all memory allocated by the chunk. Should be called when memory
220   // chunk is about to be freed.
221   void ReleaseAllAllocatedMemory();
222 
223   // Sets the requested page permissions only if the write unprotect counter
224   // has reached 0.
225   void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
226       PageAllocator::Permission permission);
227 
228   template <AccessMode mode>
young_generation_bitmap()229   ConcurrentBitmap<mode>* young_generation_bitmap() const {
230     return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
231   }
232 #ifdef DEBUG
233   static void ValidateOffsets(MemoryChunk* chunk);
234 #endif
235 
236   // A single slot set for small pages (of size kPageSize) or an array of slot
237   // set for large pages. In the latter case the number of entries in the array
238   // is ceil(size() / kPageSize).
239   SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
240 
241   // Used by the marker to keep track of the scanning progress in large objects
242   // that have a progress bar and are scanned in increments.
243   class ProgressBar progress_bar_;
244 
245   // Count of bytes marked black on page.
246   std::atomic<intptr_t> live_byte_count_;
247 
248   // A single slot set for small pages (of size kPageSize) or an array of slot
249   // set for large pages. In the latter case the number of entries in the array
250   // is ceil(size() / kPageSize).
251   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
252   InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
253 
254   base::Mutex* mutex_;
255 
256   std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
257 
258   base::Mutex* page_protection_change_mutex_;
259 
260   // This field is only relevant for code pages. It depicts the number of
261   // times a component requested this page to be read+writeable. The
262   // counter is decremented when a component resets to read+executable.
263   // If Value() == 0 => The memory is read and executable.
264   // If Value() >= 1 => The Memory is read and writable (and maybe executable).
265   // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
266   // excessive nesting of scopes.
267   // All executable MemoryChunks are allocated rw based on the assumption that
268   // they will be used immediately for an allocation. They are initialized
269   // with the number of open CodeSpaceMemoryModificationScopes. The caller
270   // that triggers the page allocation is responsible for decrementing the
271   // counter.
272   uintptr_t write_unprotect_counter_;
273 
274   // Tracks off-heap memory used by this memory chunk.
275   std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
276 
277   heap::ListNode<MemoryChunk> list_node_;
278 
279   FreeListCategory** categories_;
280 
281   std::atomic<intptr_t> young_generation_live_byte_count_;
282   Bitmap* young_generation_bitmap_;
283 
284   CodeObjectRegistry* code_object_registry_;
285 
286   PossiblyEmptyBuckets possibly_empty_buckets_;
287 
288   ActiveSystemPages active_system_pages_;
289 
290 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
291   ObjectStartBitmap object_start_bitmap_;
292 #endif
293 
294  private:
295   friend class ConcurrentMarkingState;
296   friend class MajorMarkingState;
297   friend class MajorAtomicMarkingState;
298   friend class MajorNonAtomicMarkingState;
299   friend class MemoryAllocator;
300   friend class MemoryChunkValidator;
301   friend class MinorMarkingState;
302   friend class MinorNonAtomicMarkingState;
303   friend class PagedSpace;
304 };
305 
306 }  // namespace internal
307 }  // namespace v8
308 
309 #endif  // V8_HEAP_MEMORY_CHUNK_H_
310