• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/spaces.h"
6 
7 #include <algorithm>
8 #include <cinttypes>
9 #include <utility>
10 
11 #include "src/base/bits.h"
12 #include "src/base/bounded-page-allocator.h"
13 #include "src/base/macros.h"
14 #include "src/common/globals.h"
15 #include "src/heap/combined-heap.h"
16 #include "src/heap/concurrent-marking.h"
17 #include "src/heap/gc-tracer.h"
18 #include "src/heap/heap-controller.h"
19 #include "src/heap/heap.h"
20 #include "src/heap/incremental-marking-inl.h"
21 #include "src/heap/invalidated-slots-inl.h"
22 #include "src/heap/large-spaces.h"
23 #include "src/heap/mark-compact.h"
24 #include "src/heap/memory-chunk.h"
25 #include "src/heap/read-only-heap.h"
26 #include "src/heap/remembered-set.h"
27 #include "src/heap/slot-set.h"
28 #include "src/init/v8.h"
29 #include "src/logging/counters.h"
30 #include "src/objects/free-space-inl.h"
31 #include "src/objects/heap-object.h"
32 #include "src/objects/js-array-buffer-inl.h"
33 #include "src/objects/objects-inl.h"
34 #include "src/sanitizer/msan.h"
35 #include "src/snapshot/snapshot.h"
36 #include "src/utils/ostreams.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 // These checks are here to ensure that the lower 32 bits of any real heap
42 // object can't overlap with the lower 32 bits of cleared weak reference value
43 // and therefore it's enough to compare only the lower 32 bits of a MaybeObject
44 // in order to figure out if it's a cleared weak reference or not.
45 STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
46 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
47 
AllocateFreeListCategories()48 void Page::AllocateFreeListCategories() {
49   DCHECK_NULL(categories_);
50   categories_ =
51       new FreeListCategory*[owner()->free_list()->number_of_categories()]();
52   for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
53        i++) {
54     DCHECK_NULL(categories_[i]);
55     categories_[i] = new FreeListCategory();
56   }
57 }
58 
InitializeFreeListCategories()59 void Page::InitializeFreeListCategories() {
60   for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
61        i++) {
62     categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
63   }
64 }
65 
ReleaseFreeListCategories()66 void Page::ReleaseFreeListCategories() {
67   if (categories_ != nullptr) {
68     for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
69          i++) {
70       if (categories_[i] != nullptr) {
71         delete categories_[i];
72         categories_[i] = nullptr;
73       }
74     }
75     delete[] categories_;
76     categories_ = nullptr;
77   }
78 }
79 
ConvertNewToOld(Page * old_page)80 Page* Page::ConvertNewToOld(Page* old_page) {
81   DCHECK(old_page);
82   DCHECK(old_page->InNewSpace());
83   OldSpace* old_space = old_page->heap()->old_space();
84   old_page->set_owner(old_space);
85   old_page->SetFlags(0, static_cast<uintptr_t>(~0));
86   Page* new_page = old_space->InitializePage(old_page);
87   old_space->AddPage(new_page);
88   return new_page;
89 }
90 
MoveOldToNewRememberedSetForSweeping()91 void Page::MoveOldToNewRememberedSetForSweeping() {
92   CHECK_NULL(sweeping_slot_set_);
93   sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
94   slot_set_[OLD_TO_NEW] = nullptr;
95 }
96 
MergeOldToNewRememberedSets()97 void Page::MergeOldToNewRememberedSets() {
98   if (sweeping_slot_set_ == nullptr) return;
99 
100   if (slot_set_[OLD_TO_NEW]) {
101     RememberedSet<OLD_TO_NEW>::Iterate(
102         this,
103         [this](MaybeObjectSlot slot) {
104           Address address = slot.address();
105           RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
106           return KEEP_SLOT;
107         },
108         SlotSet::KEEP_EMPTY_BUCKETS);
109 
110     ReleaseSlotSet<OLD_TO_NEW>();
111   }
112 
113   CHECK_NULL(slot_set_[OLD_TO_NEW]);
114   slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
115   sweeping_slot_set_ = nullptr;
116 }
117 
AvailableInFreeList()118 size_t Page::AvailableInFreeList() {
119   size_t sum = 0;
120   ForAllFreeListCategories([&sum](FreeListCategory* category) {
121     sum += category->available();
122   });
123   return sum;
124 }
125 
126 #ifdef DEBUG
127 namespace {
128 // Skips filler starting from the given filler until the end address.
129 // Returns the first address after the skipped fillers.
SkipFillers(HeapObject filler,Address end)130 Address SkipFillers(HeapObject filler, Address end) {
131   Address addr = filler.address();
132   while (addr < end) {
133     filler = HeapObject::FromAddress(addr);
134     CHECK(filler.IsFreeSpaceOrFiller());
135     addr = filler.address() + filler.Size();
136   }
137   return addr;
138 }
139 }  // anonymous namespace
140 #endif  // DEBUG
141 
ShrinkToHighWaterMark()142 size_t Page::ShrinkToHighWaterMark() {
143   // Shrinking only makes sense outside of the CodeRange, where we don't care
144   // about address space fragmentation.
145   VirtualMemory* reservation = reserved_memory();
146   if (!reservation->IsReserved()) return 0;
147 
148   // Shrink pages to high water mark. The water mark points either to a filler
149   // or the area_end.
150   HeapObject filler = HeapObject::FromAddress(HighWaterMark());
151   if (filler.address() == area_end()) return 0;
152   CHECK(filler.IsFreeSpaceOrFiller());
153   // Ensure that no objects were allocated in [filler, area_end) region.
154   DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
155   // Ensure that no objects will be allocated on this page.
156   DCHECK_EQ(0u, AvailableInFreeList());
157 
158   // Ensure that slot sets are empty. Otherwise the buckets for the shrinked
159   // area would not be freed when deallocating this page.
160   DCHECK_NULL(slot_set<OLD_TO_NEW>());
161   DCHECK_NULL(slot_set<OLD_TO_OLD>());
162   DCHECK_NULL(sweeping_slot_set());
163 
164   size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
165                             MemoryAllocator::GetCommitPageSize());
166   if (unused > 0) {
167     DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
168     if (FLAG_trace_gc_verbose) {
169       PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
170                    reinterpret_cast<void*>(this),
171                    reinterpret_cast<void*>(area_end()),
172                    reinterpret_cast<void*>(area_end() - unused));
173     }
174     heap()->CreateFillerObjectAt(
175         filler.address(),
176         static_cast<int>(area_end() - filler.address() - unused),
177         ClearRecordedSlots::kNo);
178     heap()->memory_allocator()->PartialFreeMemory(
179         this, address() + size() - unused, unused, area_end() - unused);
180     if (filler.address() != area_end()) {
181       CHECK(filler.IsFreeSpaceOrFiller());
182       CHECK_EQ(filler.address() + filler.Size(), area_end());
183     }
184   }
185   return unused;
186 }
187 
CreateBlackArea(Address start,Address end)188 void Page::CreateBlackArea(Address start, Address end) {
189   DCHECK(heap()->incremental_marking()->black_allocation());
190   DCHECK_EQ(Page::FromAddress(start), this);
191   DCHECK_LT(start, end);
192   DCHECK_EQ(Page::FromAddress(end - 1), this);
193   IncrementalMarking::MarkingState* marking_state =
194       heap()->incremental_marking()->marking_state();
195   marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
196                                         AddressToMarkbitIndex(end));
197   marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
198 }
199 
CreateBlackAreaBackground(Address start,Address end)200 void Page::CreateBlackAreaBackground(Address start, Address end) {
201   DCHECK(heap()->incremental_marking()->black_allocation());
202   DCHECK_EQ(Page::FromAddress(start), this);
203   DCHECK_LT(start, end);
204   DCHECK_EQ(Page::FromAddress(end - 1), this);
205   IncrementalMarking::AtomicMarkingState* marking_state =
206       heap()->incremental_marking()->atomic_marking_state();
207   marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
208                                         AddressToMarkbitIndex(end));
209   heap()->incremental_marking()->IncrementLiveBytesBackground(
210       this, static_cast<intptr_t>(end - start));
211 }
212 
DestroyBlackArea(Address start,Address end)213 void Page::DestroyBlackArea(Address start, Address end) {
214   DCHECK(heap()->incremental_marking()->black_allocation());
215   DCHECK_EQ(Page::FromAddress(start), this);
216   DCHECK_LT(start, end);
217   DCHECK_EQ(Page::FromAddress(end - 1), this);
218   IncrementalMarking::MarkingState* marking_state =
219       heap()->incremental_marking()->marking_state();
220   marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
221                                           AddressToMarkbitIndex(end));
222   marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
223 }
224 
DestroyBlackAreaBackground(Address start,Address end)225 void Page::DestroyBlackAreaBackground(Address start, Address end) {
226   DCHECK(heap()->incremental_marking()->black_allocation());
227   DCHECK_EQ(Page::FromAddress(start), this);
228   DCHECK_LT(start, end);
229   DCHECK_EQ(Page::FromAddress(end - 1), this);
230   IncrementalMarking::AtomicMarkingState* marking_state =
231       heap()->incremental_marking()->atomic_marking_state();
232   marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
233                                           AddressToMarkbitIndex(end));
234   heap()->incremental_marking()->IncrementLiveBytesBackground(
235       this, -static_cast<intptr_t>(end - start));
236 }
237 
238 // -----------------------------------------------------------------------------
239 // PagedSpace implementation
240 
AddAllocationObserver(AllocationObserver * observer)241 void Space::AddAllocationObserver(AllocationObserver* observer) {
242   allocation_counter_.AddAllocationObserver(observer);
243 }
244 
RemoveAllocationObserver(AllocationObserver * observer)245 void Space::RemoveAllocationObserver(AllocationObserver* observer) {
246   allocation_counter_.RemoveAllocationObserver(observer);
247 }
248 
PauseAllocationObservers()249 void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
250 
ResumeAllocationObservers()251 void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
252 
ComputeLimit(Address start,Address end,size_t min_size)253 Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
254                                           size_t min_size) {
255   DCHECK_GE(end - start, min_size);
256 
257   if (heap()->inline_allocation_disabled()) {
258     // Fit the requested area exactly.
259     return start + min_size;
260   } else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
261     // Ensure there are no unaccounted allocations.
262     DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
263 
264     // Generated code may allocate inline from the linear allocation area for.
265     // To make sure we can observe these allocations, we use a lower ©limit.
266     size_t step = allocation_counter_.NextBytes();
267     DCHECK_NE(step, 0);
268     size_t rounded_step =
269         RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
270     // Use uint64_t to avoid overflow on 32-bit
271     uint64_t step_end =
272         static_cast<uint64_t>(start) + Max(min_size, rounded_step);
273     uint64_t new_end = Min(step_end, static_cast<uint64_t>(end));
274     return static_cast<Address>(new_end);
275   } else {
276     // The entire node can be used as the linear allocation area.
277     return end;
278   }
279 }
280 
UpdateAllocationOrigins(AllocationOrigin origin)281 void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
282   DCHECK(!((origin != AllocationOrigin::kGC) &&
283            (heap()->isolate()->current_vm_state() == GC)));
284   allocations_origins_[static_cast<int>(origin)]++;
285 }
286 
PrintAllocationsOrigins()287 void SpaceWithLinearArea::PrintAllocationsOrigins() {
288   PrintIsolate(
289       heap()->isolate(),
290       "Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
291       name(), allocations_origins_[0], allocations_origins_[1],
292       allocations_origins_[2]);
293 }
294 
CloseAndMakeIterable()295 LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
296   if (IsValid()) {
297     MakeIterable();
298     const LinearAllocationArea old_info = allocation_info_;
299     allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
300     return old_info;
301   }
302   return LinearAllocationArea(kNullAddress, kNullAddress);
303 }
304 
MakeIterable()305 void LocalAllocationBuffer::MakeIterable() {
306   if (IsValid()) {
307     heap_->CreateFillerObjectAtBackground(
308         allocation_info_.top(),
309         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
310         ClearFreedMemoryMode::kDontClearFreedMemory);
311   }
312 }
313 
LocalAllocationBuffer(Heap * heap,LinearAllocationArea allocation_info)314 LocalAllocationBuffer::LocalAllocationBuffer(
315     Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
316     : heap_(heap),
317       allocation_info_(allocation_info) {
318   if (IsValid()) {
319     heap_->CreateFillerObjectAtBackground(
320         allocation_info_.top(),
321         static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
322         ClearFreedMemoryMode::kDontClearFreedMemory);
323   }
324 }
325 
LocalAllocationBuffer(LocalAllocationBuffer && other)326 LocalAllocationBuffer::LocalAllocationBuffer(LocalAllocationBuffer&& other)
327     V8_NOEXCEPT {
328   *this = std::move(other);
329 }
330 
operator =(LocalAllocationBuffer && other)331 LocalAllocationBuffer& LocalAllocationBuffer::operator=(
332     LocalAllocationBuffer&& other) V8_NOEXCEPT {
333   heap_ = other.heap_;
334   allocation_info_ = other.allocation_info_;
335 
336   other.allocation_info_.Reset(kNullAddress, kNullAddress);
337   return *this;
338 }
339 
AddAllocationObserver(AllocationObserver * observer)340 void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
341   if (!allocation_counter_.IsStepInProgress()) {
342     AdvanceAllocationObservers();
343     Space::AddAllocationObserver(observer);
344     UpdateInlineAllocationLimit(0);
345   } else {
346     Space::AddAllocationObserver(observer);
347   }
348 }
349 
RemoveAllocationObserver(AllocationObserver * observer)350 void SpaceWithLinearArea::RemoveAllocationObserver(
351     AllocationObserver* observer) {
352   if (!allocation_counter_.IsStepInProgress()) {
353     AdvanceAllocationObservers();
354     Space::RemoveAllocationObserver(observer);
355     UpdateInlineAllocationLimit(0);
356   } else {
357     Space::RemoveAllocationObserver(observer);
358   }
359 }
360 
PauseAllocationObservers()361 void SpaceWithLinearArea::PauseAllocationObservers() {
362   AdvanceAllocationObservers();
363   Space::PauseAllocationObservers();
364 }
365 
ResumeAllocationObservers()366 void SpaceWithLinearArea::ResumeAllocationObservers() {
367   Space::ResumeAllocationObservers();
368   MarkLabStartInitialized();
369   UpdateInlineAllocationLimit(0);
370 }
371 
AdvanceAllocationObservers()372 void SpaceWithLinearArea::AdvanceAllocationObservers() {
373   if (allocation_info_.top() &&
374       allocation_info_.start() != allocation_info_.top()) {
375     allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
376                                                    allocation_info_.start());
377     MarkLabStartInitialized();
378   }
379 }
380 
MarkLabStartInitialized()381 void SpaceWithLinearArea::MarkLabStartInitialized() {
382   allocation_info_.MoveStartToTop();
383   if (identity() == NEW_SPACE) {
384     heap()->new_space()->MoveOriginalTopForward();
385 
386 #if DEBUG
387     heap()->VerifyNewSpaceTop();
388 #endif
389   }
390 }
391 
392 // Perform an allocation step when the step is reached. size_in_bytes is the
393 // actual size needed for the object (required for InvokeAllocationObservers).
394 // aligned_size_in_bytes is the size of the object including the filler right
395 // before it to reach the right alignment (required to DCHECK the start of the
396 // object). allocation_size is the size of the actual allocation which needs to
397 // be used for the accounting. It can be different from aligned_size_in_bytes in
398 // PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
399 // able to align the allocation afterwards.
InvokeAllocationObservers(Address soon_object,size_t size_in_bytes,size_t aligned_size_in_bytes,size_t allocation_size)400 void SpaceWithLinearArea::InvokeAllocationObservers(
401     Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes,
402     size_t allocation_size) {
403   DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
404   DCHECK_LE(aligned_size_in_bytes, allocation_size);
405   DCHECK(size_in_bytes == aligned_size_in_bytes ||
406          aligned_size_in_bytes == allocation_size);
407 
408   if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;
409 
410   if (allocation_size >= allocation_counter_.NextBytes()) {
411     // Only the first object in a LAB should reach the next step.
412     DCHECK_EQ(soon_object,
413               allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
414 
415     // Right now the LAB only contains that one object.
416     DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
417               allocation_info_.limit());
418 
419     // Ensure that there is a valid object
420     if (identity() == CODE_SPACE) {
421       MemoryChunk* chunk = MemoryChunk::FromAddress(soon_object);
422       heap()->UnprotectAndRegisterMemoryChunk(chunk);
423     }
424     heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size_in_bytes),
425                                 ClearRecordedSlots::kNo);
426 
427 #if DEBUG
428     // Ensure that allocation_info_ isn't modified during one of the
429     // AllocationObserver::Step methods.
430     LinearAllocationArea saved_allocation_info = allocation_info_;
431 #endif
432 
433     // Run AllocationObserver::Step through the AllocationCounter.
434     allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
435                                                   allocation_size);
436 
437     // Ensure that start/top/limit didn't change.
438     DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
439     DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
440     DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
441   }
442 
443   DCHECK_IMPLIES(allocation_counter_.IsActive(),
444                  (allocation_info_.limit() - allocation_info_.start()) <
445                      allocation_counter_.NextBytes());
446 }
447 
FreeListsLength()448 int MemoryChunk::FreeListsLength() {
449   int length = 0;
450   for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
451        cat++) {
452     if (categories_[cat] != nullptr) {
453       length += categories_[cat]->FreeListLength();
454     }
455   }
456   return length;
457 }
458 
459 }  // namespace internal
460 }  // namespace v8
461