• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_BASIC_MEMORY_CHUNK_H_
6 #define V8_HEAP_BASIC_MEMORY_CHUNK_H_
7 
8 #include <type_traits>
9 #include <unordered_map>
10 
11 #include "src/base/atomic-utils.h"
12 #include "src/base/flags.h"
13 #include "src/common/globals.h"
14 #include "src/flags/flags.h"
15 #include "src/heap/marking.h"
16 #include "src/heap/memory-chunk-layout.h"
17 #include "src/objects/heap-object.h"
18 #include "src/utils/allocation.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 class BaseSpace;
24 
25 class BasicMemoryChunk {
26  public:
27   // Use with std data structures.
28   struct Hasher {
operatorHasher29     size_t operator()(const BasicMemoryChunk* const chunk) const {
30       return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
31     }
32   };
33 
34   enum Flag : uintptr_t {
35     NO_FLAGS = 0u,
36     IS_EXECUTABLE = 1u << 0,
37     POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
38     POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
39     // A page in the from-space or a young large page that was not scavenged
40     // yet.
41     FROM_PAGE = 1u << 3,
42     // A page in the to-space or a young large page that was scavenged.
43     TO_PAGE = 1u << 4,
44     LARGE_PAGE = 1u << 5,
45     EVACUATION_CANDIDATE = 1u << 6,
46     NEVER_EVACUATE = 1u << 7,
47 
48     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
49     // from new to old space during evacuation.
50     PAGE_NEW_OLD_PROMOTION = 1u << 9,
51 
52     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
53     // within the new space during evacuation.
54     PAGE_NEW_NEW_PROMOTION = 1u << 10,
55 
56     // This flag is intended to be used for testing. Works only when both
57     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
58     // are set. It forces the page to become an evacuation candidate at next
59     // candidates selection cycle.
60     FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
61 
62     // This flag is intended to be used for testing.
63     NEVER_ALLOCATE_ON_PAGE = 1u << 12,
64 
65     // The memory chunk is already logically freed, however the actual freeing
66     // still has to be performed.
67     PRE_FREED = 1u << 13,
68 
69     // |POOLED|: When actually freeing this chunk, only uncommit and do not
70     // give up the reservation as we still reuse the chunk at some point.
71     POOLED = 1u << 14,
72 
73     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
74     //   has been aborted and needs special handling by the sweeper.
75     COMPACTION_WAS_ABORTED = 1u << 15,
76 
77     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
78     // on pages is sometimes aborted. The flag is used to avoid repeatedly
79     // triggering on the same page.
80     COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
81 
82     // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
83     // enabled.
84     INCREMENTAL_MARKING = 1u << 17,
85     NEW_SPACE_BELOW_AGE_MARK = 1u << 18,
86 
87     // The memory chunk freeing bookkeeping has been performed but the chunk has
88     // not yet been freed.
89     UNREGISTERED = 1u << 19,
90 
91     // The memory chunk belongs to the read-only heap and does not participate
92     // in garbage collection. This is used instead of owner for identity
93     // checking since read-only chunks have no owner once they are detached.
94     READ_ONLY_HEAP = 1u << 20,
95 
96     // The memory chunk is pinned in memory and can't be moved. This is likely
97     // because there exists a potential pointer to somewhere in the chunk which
98     // can't be updated.
99     PINNED = 1u << 21,
100 
101     // This page belongs to a shared heap.
102     IN_SHARED_HEAP = 1u << 22,
103   };
104 
105   using MainThreadFlags = base::Flags<Flag, uintptr_t>;
106 
107   static constexpr MainThreadFlags kAllFlagsMask = ~MainThreadFlags(NO_FLAGS);
108 
109   static constexpr MainThreadFlags kPointersToHereAreInterestingMask =
110       POINTERS_TO_HERE_ARE_INTERESTING;
111 
112   static constexpr MainThreadFlags kPointersFromHereAreInterestingMask =
113       POINTERS_FROM_HERE_ARE_INTERESTING;
114 
115   static constexpr MainThreadFlags kEvacuationCandidateMask =
116       EVACUATION_CANDIDATE;
117 
118   static constexpr MainThreadFlags kIsInYoungGenerationMask =
119       MainThreadFlags(FROM_PAGE) | MainThreadFlags(TO_PAGE);
120 
121   static constexpr MainThreadFlags kIsLargePageMask = LARGE_PAGE;
122 
123   static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask =
124       MainThreadFlags(kEvacuationCandidateMask) |
125       MainThreadFlags(kIsInYoungGenerationMask);
126 
127   static const intptr_t kAlignment =
128       (static_cast<uintptr_t>(1) << kPageSizeBits);
129 
130   static const intptr_t kAlignmentMask = kAlignment - 1;
131 
132   BasicMemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
133                    Address area_start, Address area_end,
134                    VirtualMemory reservation);
135 
BaseAddress(Address a)136   static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
137 
address()138   Address address() const { return reinterpret_cast<Address>(this); }
139 
140   // Returns the offset of a given address to this page.
Offset(Address a)141   inline size_t Offset(Address a) const {
142     return static_cast<size_t>(a - address());
143   }
144 
145   // Some callers rely on the fact that this can operate on both
146   // tagged and aligned object addresses.
AddressToMarkbitIndex(Address addr)147   inline uint32_t AddressToMarkbitIndex(Address addr) const {
148     return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
149   }
150 
MarkbitIndexToAddress(uint32_t index)151   inline Address MarkbitIndexToAddress(uint32_t index) const {
152     return this->address() + (index << kTaggedSizeLog2);
153   }
154 
size()155   size_t size() const { return size_; }
set_size(size_t size)156   void set_size(size_t size) { size_ = size; }
157 
area_start()158   Address area_start() const { return area_start_; }
159 
area_end()160   Address area_end() const { return area_end_; }
set_area_end(Address area_end)161   void set_area_end(Address area_end) { area_end_ = area_end; }
162 
area_size()163   size_t area_size() const {
164     return static_cast<size_t>(area_end() - area_start());
165   }
166 
heap()167   Heap* heap() const {
168     DCHECK_NOT_NULL(heap_);
169     return heap_;
170   }
171 
172   // Gets the chunk's owner or null if the space has been detached.
owner()173   BaseSpace* owner() const { return owner_; }
174 
set_owner(BaseSpace * space)175   void set_owner(BaseSpace* space) { owner_ = space; }
176 
SetFlag(Flag flag)177   void SetFlag(Flag flag) { main_thread_flags_ |= flag; }
IsFlagSet(Flag flag)178   bool IsFlagSet(Flag flag) const { return main_thread_flags_ & flag; }
ClearFlag(Flag flag)179   void ClearFlag(Flag flag) {
180     main_thread_flags_ = main_thread_flags_.without(flag);
181   }
ClearFlags(MainThreadFlags flags)182   void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
183   // Set or clear multiple flags at a time. `mask` indicates which flags are
184   // should be replaced with new `flags`.
185   void SetFlags(MainThreadFlags flags, MainThreadFlags mask = kAllFlagsMask) {
186     main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
187   }
188 
189   // Return all current flags.
GetFlags()190   MainThreadFlags GetFlags() const { return main_thread_flags_; }
191 
192  private:
InReadOnlySpaceRaw()193   bool InReadOnlySpaceRaw() const { return IsFlagSet(READ_ONLY_HEAP); }
194 
195  public:
InReadOnlySpace()196   bool InReadOnlySpace() const {
197 #ifdef THREAD_SANITIZER
198     // This is needed because TSAN does not process the memory fence
199     // emitted after page initialization.
200     SynchronizedHeapLoad();
201 #endif
202     return IsFlagSet(READ_ONLY_HEAP);
203   }
204 
NeverEvacuate()205   bool NeverEvacuate() const { return IsFlagSet(NEVER_EVACUATE); }
206 
MarkNeverEvacuate()207   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
208 
CanAllocate()209   bool CanAllocate() const {
210     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
211   }
212 
IsEvacuationCandidate()213   bool IsEvacuationCandidate() const {
214     DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
215     return IsFlagSet(EVACUATION_CANDIDATE);
216   }
217 
ShouldSkipEvacuationSlotRecording()218   bool ShouldSkipEvacuationSlotRecording() const {
219     MainThreadFlags flags = GetFlags();
220     return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
221            ((flags & COMPACTION_WAS_ABORTED) == 0);
222   }
223 
executable()224   Executability executable() const {
225     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
226   }
227 
IsFromPage()228   bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
IsToPage()229   bool IsToPage() const { return IsFlagSet(TO_PAGE); }
IsLargePage()230   bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
InYoungGeneration()231   bool InYoungGeneration() const {
232     return (GetFlags() & kIsInYoungGenerationMask) != 0;
233   }
InNewSpace()234   bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
InNewLargeObjectSpace()235   bool InNewLargeObjectSpace() const {
236     return InYoungGeneration() && IsLargePage();
237   }
238   bool InOldSpace() const;
239   V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
240 
InSharedHeap()241   bool InSharedHeap() const { return IsFlagSet(IN_SHARED_HEAP); }
242 
IsWritable()243   bool IsWritable() const {
244     // If this is a read-only space chunk but heap_ is non-null, it has not yet
245     // been sealed and can be written to.
246     return !InReadOnlySpace() || heap_ != nullptr;
247   }
248 
IsPinned()249   bool IsPinned() const { return IsFlagSet(PINNED); }
250 
Contains(Address addr)251   bool Contains(Address addr) const {
252     return addr >= area_start() && addr < area_end();
253   }
254 
255   // Checks whether |addr| can be a limit of addresses in this page. It's a
256   // limit if it's in the page, or if it's just after the last byte of the page.
ContainsLimit(Address addr)257   bool ContainsLimit(Address addr) const {
258     return addr >= area_start() && addr <= area_end();
259   }
260 
wasted_memory()261   size_t wasted_memory() const { return wasted_memory_; }
add_wasted_memory(size_t waste)262   void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
allocated_bytes()263   size_t allocated_bytes() const { return allocated_bytes_; }
264 
265   static const intptr_t kSizeOffset = MemoryChunkLayout::kSizeOffset;
266   static const intptr_t kFlagsOffset = MemoryChunkLayout::kFlagsOffset;
267   static const intptr_t kHeapOffset = MemoryChunkLayout::kHeapOffset;
268   static const intptr_t kAreaStartOffset = MemoryChunkLayout::kAreaStartOffset;
269   static const intptr_t kAreaEndOffset = MemoryChunkLayout::kAreaEndOffset;
270   static const intptr_t kMarkingBitmapOffset =
271       MemoryChunkLayout::kMarkingBitmapOffset;
272   static const size_t kHeaderSize =
273       MemoryChunkLayout::kBasicMemoryChunkHeaderSize;
274 
275   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)276   static BasicMemoryChunk* FromAddress(Address a) {
277     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
278     return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
279   }
280 
281   // Only works if the object is in the first kPageSize of the MemoryChunk.
FromHeapObject(HeapObject o)282   static BasicMemoryChunk* FromHeapObject(HeapObject o) {
283     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
284     return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
285   }
286 
287   template <AccessMode mode>
marking_bitmap()288   ConcurrentBitmap<mode>* marking_bitmap() const {
289     return static_cast<ConcurrentBitmap<mode>*>(
290         Bitmap::FromAddress(address() + kMarkingBitmapOffset));
291   }
292 
HighWaterMark()293   Address HighWaterMark() const { return address() + high_water_mark_; }
294 
UpdateHighWaterMark(Address mark)295   static inline void UpdateHighWaterMark(Address mark) {
296     if (mark == kNullAddress) return;
297     // Need to subtract one from the mark because when a chunk is full the
298     // top points to the next address after the chunk, which effectively belongs
299     // to another chunk. See the comment to Page::FromAllocationAreaAddress.
300     BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(mark - 1);
301     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
302     intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
303     while ((new_mark > old_mark) &&
304            !chunk->high_water_mark_.compare_exchange_weak(
305                old_mark, new_mark, std::memory_order_acq_rel)) {
306     }
307   }
308 
reserved_memory()309   VirtualMemory* reserved_memory() { return &reservation_; }
310 
ResetAllocationStatistics()311   void ResetAllocationStatistics() {
312     allocated_bytes_ = area_size();
313     wasted_memory_ = 0;
314   }
315 
IncreaseAllocatedBytes(size_t bytes)316   void IncreaseAllocatedBytes(size_t bytes) {
317     DCHECK_LE(bytes, area_size());
318     allocated_bytes_ += bytes;
319   }
320 
DecreaseAllocatedBytes(size_t bytes)321   void DecreaseAllocatedBytes(size_t bytes) {
322     DCHECK_LE(bytes, area_size());
323     DCHECK_GE(allocated_bytes(), bytes);
324     allocated_bytes_ -= bytes;
325   }
326 
327 #ifdef THREAD_SANITIZER
328   // Perform a dummy acquire load to tell TSAN that there is no data race in
329   // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
330   // release store.
331   void SynchronizedHeapLoad() const;
332 #endif
333 
334  protected:
335   // Overall size of the chunk, including the header and guards.
336   size_t size_;
337 
338   // Flags that are only mutable from the main thread when no concurrent
339   // component (e.g. marker, sweeper) is running.
340   MainThreadFlags main_thread_flags_{NO_FLAGS};
341 
342   // TODO(v8:7464): Find a way to remove this.
343   // This goes against the spirit for the BasicMemoryChunk, but until C++14/17
344   // is the default it needs to live here because MemoryChunk is not standard
345   // layout under C++11.
346   Heap* heap_;
347 
348   // Start and end of allocatable memory on this chunk.
349   Address area_start_;
350   Address area_end_;
351 
352   // Byte allocated on the page, which includes all objects on the page and the
353   // linear allocation area.
354   size_t allocated_bytes_;
355   // Freed memory that was not added to the free list.
356   size_t wasted_memory_;
357 
358   // Assuming the initial allocation on a page is sequential, count highest
359   // number of bytes ever allocated on the page.
360   std::atomic<intptr_t> high_water_mark_;
361 
362   // The space owning this memory chunk.
363   std::atomic<BaseSpace*> owner_;
364 
365   // If the chunk needs to remember its memory reservation, it is stored here.
366   VirtualMemory reservation_;
367 
368   friend class BasicMemoryChunkValidator;
369   friend class ConcurrentMarkingState;
370   friend class MajorMarkingState;
371   friend class MajorAtomicMarkingState;
372   friend class MajorNonAtomicMarkingState;
373   friend class MemoryAllocator;
374   friend class MinorMarkingState;
375   friend class MinorNonAtomicMarkingState;
376   friend class PagedSpace;
377 };
378 
379 DEFINE_OPERATORS_FOR_FLAGS(BasicMemoryChunk::MainThreadFlags)
380 
381 STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
382 
383 }  // namespace internal
384 }  // namespace v8
385 
386 #endif  // V8_HEAP_BASIC_MEMORY_CHUNK_H_
387