• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_BASIC_MEMORY_CHUNK_H_
6 #define V8_HEAP_BASIC_MEMORY_CHUNK_H_
7 
8 #include <type_traits>
9 #include <unordered_map>
10 
11 #include "src/base/atomic-utils.h"
12 #include "src/common/globals.h"
13 #include "src/flags/flags.h"
14 #include "src/heap/marking.h"
15 #include "src/heap/memory-chunk-layout.h"
16 #include "src/objects/heap-object.h"
17 #include "src/utils/allocation.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 class BaseSpace;
23 
24 class BasicMemoryChunk {
25  public:
26   // Use with std data structures.
27   struct Hasher {
operatorHasher28     size_t operator()(const BasicMemoryChunk* const chunk) const {
29       return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
30     }
31   };
32 
33   enum Flag {
34     NO_FLAGS = 0u,
35     IS_EXECUTABLE = 1u << 0,
36     POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
37     POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
38     // A page in the from-space or a young large page that was not scavenged
39     // yet.
40     FROM_PAGE = 1u << 3,
41     // A page in the to-space or a young large page that was scavenged.
42     TO_PAGE = 1u << 4,
43     LARGE_PAGE = 1u << 5,
44     EVACUATION_CANDIDATE = 1u << 6,
45     NEVER_EVACUATE = 1u << 7,
46 
47     // Large objects can have a progress bar in their page header. These object
48     // are scanned in increments and will be kept black while being scanned.
49     // Even if the mutator writes to them they will be kept black and a white
50     // to grey transition is performed in the value.
51     HAS_PROGRESS_BAR = 1u << 8,
52 
53     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
54     // from new to old space during evacuation.
55     PAGE_NEW_OLD_PROMOTION = 1u << 9,
56 
57     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
58     // within the new space during evacuation.
59     PAGE_NEW_NEW_PROMOTION = 1u << 10,
60 
61     // This flag is intended to be used for testing. Works only when both
62     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
63     // are set. It forces the page to become an evacuation candidate at next
64     // candidates selection cycle.
65     FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
66 
67     // This flag is intended to be used for testing.
68     NEVER_ALLOCATE_ON_PAGE = 1u << 12,
69 
70     // The memory chunk is already logically freed, however the actual freeing
71     // still has to be performed.
72     PRE_FREED = 1u << 13,
73 
74     // |POOLED|: When actually freeing this chunk, only uncommit and do not
75     // give up the reservation as we still reuse the chunk at some point.
76     POOLED = 1u << 14,
77 
78     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
79     //   has been aborted and needs special handling by the sweeper.
80     COMPACTION_WAS_ABORTED = 1u << 15,
81 
82     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
83     // on pages is sometimes aborted. The flag is used to avoid repeatedly
84     // triggering on the same page.
85     COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
86 
87     // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
88     // to iterate the page.
89     SWEEP_TO_ITERATE = 1u << 17,
90 
91     // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
92     // enabled.
93     INCREMENTAL_MARKING = 1u << 18,
94     NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
95 
96     // The memory chunk freeing bookkeeping has been performed but the chunk has
97     // not yet been freed.
98     UNREGISTERED = 1u << 20,
99 
100     // The memory chunk belongs to the read-only heap and does not participate
101     // in garbage collection. This is used instead of owner for identity
102     // checking since read-only chunks have no owner once they are detached.
103     READ_ONLY_HEAP = 1u << 21,
104 
105     // The memory chunk is pinned in memory and can't be moved. This is likely
106     // because there exists a potential pointer to somewhere in the chunk which
107     // can't be updated.
108     PINNED = 1u << 22,
109   };
110 
111   static const intptr_t kAlignment =
112       (static_cast<uintptr_t>(1) << kPageSizeBits);
113 
114   static const intptr_t kAlignmentMask = kAlignment - 1;
115 
116   BasicMemoryChunk(size_t size, Address area_start, Address area_end);
117 
BaseAddress(Address a)118   static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
119 
address()120   Address address() const { return reinterpret_cast<Address>(this); }
121 
122   // Returns the offset of a given address to this page.
Offset(Address a)123   inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
124 
125   // Some callers rely on the fact that this can operate on both
126   // tagged and aligned object addresses.
AddressToMarkbitIndex(Address addr)127   inline uint32_t AddressToMarkbitIndex(Address addr) const {
128     return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
129   }
130 
MarkbitIndexToAddress(uint32_t index)131   inline Address MarkbitIndexToAddress(uint32_t index) const {
132     return this->address() + (index << kTaggedSizeLog2);
133   }
134 
size()135   size_t size() const { return size_; }
set_size(size_t size)136   void set_size(size_t size) { size_ = size; }
137 
area_start()138   Address area_start() const { return area_start_; }
139 
area_end()140   Address area_end() const { return area_end_; }
set_area_end(Address area_end)141   void set_area_end(Address area_end) { area_end_ = area_end; }
142 
area_size()143   size_t area_size() const {
144     return static_cast<size_t>(area_end() - area_start());
145   }
146 
heap()147   Heap* heap() const {
148     DCHECK_NOT_NULL(heap_);
149     return heap_;
150   }
151 
152   // Gets the chunk's owner or null if the space has been detached.
owner()153   BaseSpace* owner() const { return owner_; }
154 
set_owner(BaseSpace * space)155   void set_owner(BaseSpace* space) { owner_ = space; }
156 
157   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
SetFlag(Flag flag)158   void SetFlag(Flag flag) {
159     if (access_mode == AccessMode::NON_ATOMIC) {
160       flags_ |= flag;
161     } else {
162       base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
163     }
164   }
165 
166   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
IsFlagSet(Flag flag)167   bool IsFlagSet(Flag flag) const {
168     return (GetFlags<access_mode>() & flag) != 0;
169   }
170 
ClearFlag(Flag flag)171   void ClearFlag(Flag flag) { flags_ &= ~flag; }
172 
173   // Set or clear multiple flags at a time. The flags in the mask are set to
174   // the value in "flags", the rest retain the current value in |flags_|.
SetFlags(uintptr_t flags,uintptr_t mask)175   void SetFlags(uintptr_t flags, uintptr_t mask) {
176     flags_ = (flags_ & ~mask) | (flags & mask);
177   }
178 
179   // Return all current flags.
180   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
GetFlags()181   uintptr_t GetFlags() const {
182     if (access_mode == AccessMode::NON_ATOMIC) {
183       return flags_;
184     } else {
185       return base::AsAtomicWord::Relaxed_Load(&flags_);
186     }
187   }
188 
189   using Flags = uintptr_t;
190 
191   static const Flags kPointersToHereAreInterestingMask =
192       POINTERS_TO_HERE_ARE_INTERESTING;
193 
194   static const Flags kPointersFromHereAreInterestingMask =
195       POINTERS_FROM_HERE_ARE_INTERESTING;
196 
197   static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
198 
199   static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
200 
201   static const Flags kIsLargePageMask = LARGE_PAGE;
202 
203   static const Flags kSkipEvacuationSlotsRecordingMask =
204       kEvacuationCandidateMask | kIsInYoungGenerationMask;
205 
InReadOnlySpace()206   bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
207 
NeverEvacuate()208   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
209 
MarkNeverEvacuate()210   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
211 
CanAllocate()212   bool CanAllocate() {
213     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
214   }
215 
216   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
IsEvacuationCandidate()217   bool IsEvacuationCandidate() {
218     DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
219              IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
220     return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
221   }
222 
223   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
ShouldSkipEvacuationSlotRecording()224   bool ShouldSkipEvacuationSlotRecording() {
225     uintptr_t flags = GetFlags<access_mode>();
226     return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
227            ((flags & COMPACTION_WAS_ABORTED) == 0);
228   }
229 
executable()230   Executability executable() {
231     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
232   }
233 
IsFromPage()234   bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
IsToPage()235   bool IsToPage() const { return IsFlagSet(TO_PAGE); }
IsLargePage()236   bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
InYoungGeneration()237   bool InYoungGeneration() const {
238     return (GetFlags() & kIsInYoungGenerationMask) != 0;
239   }
InNewSpace()240   bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
InNewLargeObjectSpace()241   bool InNewLargeObjectSpace() const {
242     return InYoungGeneration() && IsLargePage();
243   }
244   bool InOldSpace() const;
245   V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
246 
IsWritable()247   bool IsWritable() const {
248     // If this is a read-only space chunk but heap_ is non-null, it has not yet
249     // been sealed and can be written to.
250     return !InReadOnlySpace() || heap_ != nullptr;
251   }
252 
IsPinned()253   bool IsPinned() const { return IsFlagSet(PINNED); }
254 
Contains(Address addr)255   bool Contains(Address addr) const {
256     return addr >= area_start() && addr < area_end();
257   }
258 
259   // Checks whether |addr| can be a limit of addresses in this page. It's a
260   // limit if it's in the page, or if it's just after the last byte of the page.
ContainsLimit(Address addr)261   bool ContainsLimit(Address addr) const {
262     return addr >= area_start() && addr <= area_end();
263   }
264 
265   static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
266                                       Address area_start, Address area_end,
267                                       BaseSpace* owner,
268                                       VirtualMemory reservation);
269 
wasted_memory()270   size_t wasted_memory() const { return wasted_memory_; }
add_wasted_memory(size_t waste)271   void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
allocated_bytes()272   size_t allocated_bytes() const { return allocated_bytes_; }
273 
274   static const intptr_t kSizeOffset = MemoryChunkLayout::kSizeOffset;
275   static const intptr_t kFlagsOffset = MemoryChunkLayout::kFlagsOffset;
276   static const intptr_t kHeapOffset = MemoryChunkLayout::kHeapOffset;
277   static const intptr_t kAreaStartOffset = MemoryChunkLayout::kAreaStartOffset;
278   static const intptr_t kAreaEndOffset = MemoryChunkLayout::kAreaEndOffset;
279   static const intptr_t kMarkingBitmapOffset =
280       MemoryChunkLayout::kMarkingBitmapOffset;
281   static const size_t kHeaderSize =
282       MemoryChunkLayout::kBasicMemoryChunkHeaderSize;
283 
284   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)285   static BasicMemoryChunk* FromAddress(Address a) {
286     return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
287   }
288 
289   // Only works if the object is in the first kPageSize of the MemoryChunk.
FromHeapObject(HeapObject o)290   static BasicMemoryChunk* FromHeapObject(HeapObject o) {
291     return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
292   }
293 
294   template <AccessMode mode>
marking_bitmap()295   ConcurrentBitmap<mode>* marking_bitmap() const {
296     return static_cast<ConcurrentBitmap<mode>*>(
297         Bitmap::FromAddress(address() + kMarkingBitmapOffset));
298   }
299 
HighWaterMark()300   Address HighWaterMark() { return address() + high_water_mark_; }
301 
UpdateHighWaterMark(Address mark)302   static inline void UpdateHighWaterMark(Address mark) {
303     if (mark == kNullAddress) return;
304     // Need to subtract one from the mark because when a chunk is full the
305     // top points to the next address after the chunk, which effectively belongs
306     // to another chunk. See the comment to Page::FromAllocationAreaAddress.
307     BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(mark - 1);
308     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
309     intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
310     while ((new_mark > old_mark) &&
311            !chunk->high_water_mark_.compare_exchange_weak(
312                old_mark, new_mark, std::memory_order_acq_rel)) {
313     }
314   }
315 
reserved_memory()316   VirtualMemory* reserved_memory() { return &reservation_; }
317 
ResetAllocationStatistics()318   void ResetAllocationStatistics() {
319     allocated_bytes_ = area_size();
320     wasted_memory_ = 0;
321   }
322 
IncreaseAllocatedBytes(size_t bytes)323   void IncreaseAllocatedBytes(size_t bytes) {
324     DCHECK_LE(bytes, area_size());
325     allocated_bytes_ += bytes;
326   }
327 
DecreaseAllocatedBytes(size_t bytes)328   void DecreaseAllocatedBytes(size_t bytes) {
329     DCHECK_LE(bytes, area_size());
330     DCHECK_GE(allocated_bytes(), bytes);
331     allocated_bytes_ -= bytes;
332   }
333 
334 #ifdef THREAD_SANITIZER
335   // Perform a dummy acquire load to tell TSAN that there is no data race in
336   // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
337   // release store.
338   void SynchronizedHeapLoad();
339 #endif
340 
341  protected:
342   // Overall size of the chunk, including the header and guards.
343   size_t size_;
344 
345   uintptr_t flags_ = NO_FLAGS;
346 
347   // TODO(v8:7464): Find a way to remove this.
348   // This goes against the spirit for the BasicMemoryChunk, but until C++14/17
349   // is the default it needs to live here because MemoryChunk is not standard
350   // layout under C++11.
351   Heap* heap_;
352 
353   // Start and end of allocatable memory on this chunk.
354   Address area_start_;
355   Address area_end_;
356 
357   // Byte allocated on the page, which includes all objects on the page and the
358   // linear allocation area.
359   size_t allocated_bytes_;
360   // Freed memory that was not added to the free list.
361   size_t wasted_memory_;
362 
363   // Assuming the initial allocation on a page is sequential, count highest
364   // number of bytes ever allocated on the page.
365   std::atomic<intptr_t> high_water_mark_;
366 
367   // The space owning this memory chunk.
368   std::atomic<BaseSpace*> owner_;
369 
370   // If the chunk needs to remember its memory reservation, it is stored here.
371   VirtualMemory reservation_;
372 
373   friend class BasicMemoryChunkValidator;
374   friend class ConcurrentMarkingState;
375   friend class MajorMarkingState;
376   friend class MajorAtomicMarkingState;
377   friend class MajorNonAtomicMarkingState;
378   friend class MemoryAllocator;
379   friend class MinorMarkingState;
380   friend class MinorNonAtomicMarkingState;
381   friend class PagedSpace;
382 };
383 
384 STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
385 
386 }  // namespace internal
387 }  // namespace v8
388 
389 #endif  // V8_HEAP_BASIC_MEMORY_CHUNK_H_
390