1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/new-spaces.h"
6
7 #include "src/heap/array-buffer-sweeper.h"
8 #include "src/heap/heap-inl.h"
9 #include "src/heap/incremental-marking.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/heap/memory-allocator.h"
12 #include "src/heap/paged-spaces.h"
13 #include "src/heap/safepoint.h"
14 #include "src/heap/spaces-inl.h"
15 #include "src/heap/spaces.h"
16
17 namespace v8 {
18 namespace internal {
19
InitializePage(MemoryChunk * chunk)20 Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
21 bool in_to_space = (id() != kFromSpace);
22 chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
23 Page* page = static_cast<Page*>(chunk);
24 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
25 page->list_node().Initialize();
26 #ifdef ENABLE_MINOR_MC
27 if (FLAG_minor_mc) {
28 page->AllocateYoungGenerationBitmap();
29 heap()
30 ->minor_mark_compact_collector()
31 ->non_atomic_marking_state()
32 ->ClearLiveness(page);
33 }
34 #endif // ENABLE_MINOR_MC
35 page->InitializationMemoryFence();
36 return page;
37 }
38
EnsureCurrentCapacity()39 bool SemiSpace::EnsureCurrentCapacity() {
40 if (is_committed()) {
41 const int expected_pages =
42 static_cast<int>(current_capacity_ / Page::kPageSize);
43 MemoryChunk* current_page = first_page();
44 int actual_pages = 0;
45
46 // First iterate through the pages list until expected pages if so many
47 // pages exist.
48 while (current_page != nullptr && actual_pages < expected_pages) {
49 actual_pages++;
50 current_page = current_page->list_node().next();
51 }
52
53 // Free all overallocated pages which are behind current_page.
54 while (current_page) {
55 MemoryChunk* next_current = current_page->list_node().next();
56 memory_chunk_list_.Remove(current_page);
57 // Clear new space flags to avoid this page being treated as a new
58 // space page that is potentially being swept.
59 current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
60 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
61 current_page);
62 current_page = next_current;
63 }
64
65 // Add more pages if we have less than expected_pages.
66 IncrementalMarking::NonAtomicMarkingState* marking_state =
67 heap()->incremental_marking()->non_atomic_marking_state();
68 while (actual_pages < expected_pages) {
69 actual_pages++;
70 current_page =
71 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
72 MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
73 NOT_EXECUTABLE);
74 if (current_page == nullptr) return false;
75 DCHECK_NOT_NULL(current_page);
76 memory_chunk_list_.PushBack(current_page);
77 marking_state->ClearLiveness(current_page);
78 current_page->SetFlags(first_page()->GetFlags(),
79 static_cast<uintptr_t>(Page::kCopyAllFlags));
80 heap()->CreateFillerObjectAt(current_page->area_start(),
81 static_cast<int>(current_page->area_size()),
82 ClearRecordedSlots::kNo);
83 }
84 }
85 return true;
86 }
87
88 // -----------------------------------------------------------------------------
89 // SemiSpace implementation
90
SetUp(size_t initial_capacity,size_t maximum_capacity)91 void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
92 DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
93 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
94 current_capacity_ = minimum_capacity_;
95 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
96 committed_ = false;
97 }
98
TearDown()99 void SemiSpace::TearDown() {
100 // Properly uncommit memory to keep the allocator counters in sync.
101 if (is_committed()) {
102 Uncommit();
103 }
104 current_capacity_ = maximum_capacity_ = 0;
105 }
106
Commit()107 bool SemiSpace::Commit() {
108 DCHECK(!is_committed());
109 const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
110 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
111 // Pages in the new spaces can be moved to the old space by the full
112 // collector. Therefore, they must be initialized with the same FreeList as
113 // old pages.
114 Page* new_page =
115 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
116 MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
117 NOT_EXECUTABLE);
118 if (new_page == nullptr) {
119 if (pages_added) RewindPages(pages_added);
120 return false;
121 }
122 memory_chunk_list_.PushBack(new_page);
123 }
124 Reset();
125 AccountCommitted(current_capacity_);
126 if (age_mark_ == kNullAddress) {
127 age_mark_ = first_page()->area_start();
128 }
129 committed_ = true;
130 return true;
131 }
132
Uncommit()133 bool SemiSpace::Uncommit() {
134 DCHECK(is_committed());
135 while (!memory_chunk_list_.Empty()) {
136 MemoryChunk* chunk = memory_chunk_list_.front();
137 memory_chunk_list_.Remove(chunk);
138 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
139 }
140 current_page_ = nullptr;
141 AccountUncommitted(current_capacity_);
142 committed_ = false;
143 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
144 return true;
145 }
146
CommittedPhysicalMemory()147 size_t SemiSpace::CommittedPhysicalMemory() {
148 if (!is_committed()) return 0;
149 size_t size = 0;
150 for (Page* p : *this) {
151 size += p->CommittedPhysicalMemory();
152 }
153 return size;
154 }
155
GrowTo(size_t new_capacity)156 bool SemiSpace::GrowTo(size_t new_capacity) {
157 if (!is_committed()) {
158 if (!Commit()) return false;
159 }
160 DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
161 DCHECK_LE(new_capacity, maximum_capacity_);
162 DCHECK_GT(new_capacity, current_capacity_);
163 const size_t delta = new_capacity - current_capacity_;
164 DCHECK(IsAligned(delta, AllocatePageSize()));
165 const int delta_pages = static_cast<int>(delta / Page::kPageSize);
166 DCHECK(last_page());
167 IncrementalMarking::NonAtomicMarkingState* marking_state =
168 heap()->incremental_marking()->non_atomic_marking_state();
169 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
170 Page* new_page =
171 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
172 MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
173 NOT_EXECUTABLE);
174 if (new_page == nullptr) {
175 if (pages_added) RewindPages(pages_added);
176 return false;
177 }
178 memory_chunk_list_.PushBack(new_page);
179 marking_state->ClearLiveness(new_page);
180 // Duplicate the flags that was set on the old page.
181 new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
182 }
183 AccountCommitted(delta);
184 current_capacity_ = new_capacity;
185 return true;
186 }
187
RewindPages(int num_pages)188 void SemiSpace::RewindPages(int num_pages) {
189 DCHECK_GT(num_pages, 0);
190 DCHECK(last_page());
191 while (num_pages > 0) {
192 MemoryChunk* last = last_page();
193 memory_chunk_list_.Remove(last);
194 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
195 num_pages--;
196 }
197 }
198
ShrinkTo(size_t new_capacity)199 bool SemiSpace::ShrinkTo(size_t new_capacity) {
200 DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
201 DCHECK_GE(new_capacity, minimum_capacity_);
202 DCHECK_LT(new_capacity, current_capacity_);
203 if (is_committed()) {
204 const size_t delta = current_capacity_ - new_capacity;
205 DCHECK(IsAligned(delta, Page::kPageSize));
206 int delta_pages = static_cast<int>(delta / Page::kPageSize);
207 RewindPages(delta_pages);
208 AccountUncommitted(delta);
209 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
210 }
211 current_capacity_ = new_capacity;
212 return true;
213 }
214
FixPagesFlags(intptr_t flags,intptr_t mask)215 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
216 for (Page* page : *this) {
217 page->set_owner(this);
218 page->SetFlags(flags, mask);
219 if (id_ == kToSpace) {
220 page->ClearFlag(MemoryChunk::FROM_PAGE);
221 page->SetFlag(MemoryChunk::TO_PAGE);
222 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
223 heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
224 page, 0);
225 } else {
226 page->SetFlag(MemoryChunk::FROM_PAGE);
227 page->ClearFlag(MemoryChunk::TO_PAGE);
228 }
229 DCHECK(page->InYoungGeneration());
230 }
231 }
232
Reset()233 void SemiSpace::Reset() {
234 DCHECK(first_page());
235 DCHECK(last_page());
236 current_page_ = first_page();
237 pages_used_ = 0;
238 }
239
RemovePage(Page * page)240 void SemiSpace::RemovePage(Page* page) {
241 if (current_page_ == page) {
242 if (page->prev_page()) {
243 current_page_ = page->prev_page();
244 }
245 }
246 memory_chunk_list_.Remove(page);
247 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
248 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
249 DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
250 }
251 }
252
PrependPage(Page * page)253 void SemiSpace::PrependPage(Page* page) {
254 page->SetFlags(current_page()->GetFlags(),
255 static_cast<uintptr_t>(Page::kCopyAllFlags));
256 page->set_owner(this);
257 memory_chunk_list_.PushFront(page);
258 pages_used_++;
259 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
260 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
261 IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
262 }
263 }
264
Swap(SemiSpace * from,SemiSpace * to)265 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
266 // We won't be swapping semispaces without data in them.
267 DCHECK(from->first_page());
268 DCHECK(to->first_page());
269
270 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
271
272 // We swap all properties but id_.
273 std::swap(from->current_capacity_, to->current_capacity_);
274 std::swap(from->maximum_capacity_, to->maximum_capacity_);
275 std::swap(from->minimum_capacity_, to->minimum_capacity_);
276 std::swap(from->age_mark_, to->age_mark_);
277 std::swap(from->committed_, to->committed_);
278 std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
279 std::swap(from->current_page_, to->current_page_);
280 std::swap(from->external_backing_store_bytes_,
281 to->external_backing_store_bytes_);
282
283 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
284 from->FixPagesFlags(0, 0);
285 }
286
set_age_mark(Address mark)287 void SemiSpace::set_age_mark(Address mark) {
288 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
289 age_mark_ = mark;
290 // Mark all pages up to the one containing mark.
291 for (Page* p : PageRange(space_start(), mark)) {
292 p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
293 }
294 }
295
GetObjectIterator(Heap * heap)296 std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
297 // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
298 UNREACHABLE();
299 }
300
301 #ifdef DEBUG
Print()302 void SemiSpace::Print() {}
303 #endif
304
305 #ifdef VERIFY_HEAP
Verify()306 void SemiSpace::Verify() {
307 bool is_from_space = (id_ == kFromSpace);
308 size_t external_backing_store_bytes[kNumTypes];
309
310 for (int i = 0; i < kNumTypes; i++) {
311 external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
312 }
313
314 for (Page* page : *this) {
315 CHECK_EQ(page->owner(), this);
316 CHECK(page->InNewSpace());
317 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
318 : MemoryChunk::TO_PAGE));
319 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
320 : MemoryChunk::FROM_PAGE));
321 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
322 if (!is_from_space) {
323 // The pointers-from-here-are-interesting flag isn't updated dynamically
324 // on from-space pages, so it might be out of sync with the marking state.
325 if (page->heap()->incremental_marking()->IsMarking()) {
326 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
327 } else {
328 CHECK(
329 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
330 }
331 }
332 for (int i = 0; i < kNumTypes; i++) {
333 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
334 external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
335 }
336
337 CHECK_IMPLIES(page->list_node().prev(),
338 page->list_node().prev()->list_node().next() == page);
339 }
340 for (int i = 0; i < kNumTypes; i++) {
341 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
342 CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
343 }
344 }
345 #endif
346
347 #ifdef DEBUG
AssertValidRange(Address start,Address end)348 void SemiSpace::AssertValidRange(Address start, Address end) {
349 // Addresses belong to same semi-space
350 Page* page = Page::FromAllocationAreaAddress(start);
351 Page* end_page = Page::FromAllocationAreaAddress(end);
352 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
353 DCHECK_EQ(space, end_page->owner());
354 // Start address is before end address, either on same page,
355 // or end address is on a later page in the linked list of
356 // semi-space pages.
357 if (page == end_page) {
358 DCHECK_LE(start, end);
359 } else {
360 while (page != end_page) {
361 page = page->next_page();
362 }
363 DCHECK(page);
364 }
365 }
366 #endif
367
368 // -----------------------------------------------------------------------------
369 // SemiSpaceObjectIterator implementation.
370
SemiSpaceObjectIterator(NewSpace * space)371 SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
372 Initialize(space->first_allocatable_address(), space->top());
373 }
374
Initialize(Address start,Address end)375 void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
376 SemiSpace::AssertValidRange(start, end);
377 current_ = start;
378 limit_ = end;
379 }
380
CommittedPhysicalMemory()381 size_t NewSpace::CommittedPhysicalMemory() {
382 if (!base::OS::HasLazyCommits()) return CommittedMemory();
383 BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
384 size_t size = to_space_.CommittedPhysicalMemory();
385 if (from_space_.is_committed()) {
386 size += from_space_.CommittedPhysicalMemory();
387 }
388 return size;
389 }
390
391 // -----------------------------------------------------------------------------
392 // NewSpace implementation
393
NewSpace(Heap * heap,v8::PageAllocator * page_allocator,size_t initial_semispace_capacity,size_t max_semispace_capacity)394 NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
395 size_t initial_semispace_capacity,
396 size_t max_semispace_capacity)
397 : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
398 to_space_(heap, kToSpace),
399 from_space_(heap, kFromSpace) {
400 DCHECK(initial_semispace_capacity <= max_semispace_capacity);
401
402 to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
403 from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
404 if (!to_space_.Commit()) {
405 V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
406 }
407 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
408 ResetLinearAllocationArea();
409 }
410
TearDown()411 void NewSpace::TearDown() {
412 allocation_info_.Reset(kNullAddress, kNullAddress);
413
414 to_space_.TearDown();
415 from_space_.TearDown();
416 }
417
Flip()418 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
419
Grow()420 void NewSpace::Grow() {
421 DCHECK_IMPLIES(FLAG_local_heaps, heap()->safepoint()->IsActive());
422 // Double the semispace size but only up to maximum capacity.
423 DCHECK(TotalCapacity() < MaximumCapacity());
424 size_t new_capacity =
425 Min(MaximumCapacity(),
426 static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
427 if (to_space_.GrowTo(new_capacity)) {
428 // Only grow from space if we managed to grow to-space.
429 if (!from_space_.GrowTo(new_capacity)) {
430 // If we managed to grow to-space but couldn't grow from-space,
431 // attempt to shrink to-space.
432 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
433 // We are in an inconsistent state because we could not
434 // commit/uncommit memory from new space.
435 FATAL("inconsistent state");
436 }
437 }
438 }
439 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
440 }
441
Shrink()442 void NewSpace::Shrink() {
443 size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
444 size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
445 if (rounded_new_capacity < TotalCapacity() &&
446 to_space_.ShrinkTo(rounded_new_capacity)) {
447 // Only shrink from-space if we managed to shrink to-space.
448 from_space_.Reset();
449 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
450 // If we managed to shrink to-space but couldn't shrink from
451 // space, attempt to grow to-space again.
452 if (!to_space_.GrowTo(from_space_.current_capacity())) {
453 // We are in an inconsistent state because we could not
454 // commit/uncommit memory from new space.
455 FATAL("inconsistent state");
456 }
457 }
458 }
459 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
460 }
461
Rebalance()462 bool NewSpace::Rebalance() {
463 // Order here is important to make use of the page pool.
464 return to_space_.EnsureCurrentCapacity() &&
465 from_space_.EnsureCurrentCapacity();
466 }
467
UpdateLinearAllocationArea()468 void NewSpace::UpdateLinearAllocationArea() {
469 AdvanceAllocationObservers();
470
471 Address new_top = to_space_.page_low();
472 BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
473 allocation_info_.Reset(new_top, to_space_.page_high());
474 // The order of the following two stores is important.
475 // See the corresponding loads in ConcurrentMarking::Run.
476 original_limit_.store(limit(), std::memory_order_relaxed);
477 original_top_.store(top(), std::memory_order_release);
478 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
479
480 UpdateInlineAllocationLimit(0);
481 }
482
ResetLinearAllocationArea()483 void NewSpace::ResetLinearAllocationArea() {
484 to_space_.Reset();
485 UpdateLinearAllocationArea();
486 // Clear all mark-bits in the to-space.
487 IncrementalMarking::NonAtomicMarkingState* marking_state =
488 heap()->incremental_marking()->non_atomic_marking_state();
489 for (Page* p : to_space_) {
490 marking_state->ClearLiveness(p);
491 // Concurrent marking may have local live bytes for this page.
492 heap()->concurrent_marking()->ClearMemoryChunkData(p);
493 }
494 }
495
UpdateInlineAllocationLimit(size_t min_size)496 void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
497 Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
498 DCHECK_LE(top(), new_limit);
499 DCHECK_LE(new_limit, to_space_.page_high());
500 allocation_info_.set_limit(new_limit);
501 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
502
503 #if DEBUG
504 VerifyTop();
505 #endif
506 }
507
AddFreshPage()508 bool NewSpace::AddFreshPage() {
509 Address top = allocation_info_.top();
510 DCHECK(!OldSpace::IsAtPageStart(top));
511
512 if (!to_space_.AdvancePage()) {
513 // No more pages left to advance.
514 return false;
515 }
516
517 // Clear remainder of current page.
518 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
519 int remaining_in_page = static_cast<int>(limit - top);
520 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
521 UpdateLinearAllocationArea();
522
523 return true;
524 }
525
AddFreshPageSynchronized()526 bool NewSpace::AddFreshPageSynchronized() {
527 base::MutexGuard guard(&mutex_);
528 return AddFreshPage();
529 }
530
EnsureAllocation(int size_in_bytes,AllocationAlignment alignment)531 bool NewSpace::EnsureAllocation(int size_in_bytes,
532 AllocationAlignment alignment) {
533 AdvanceAllocationObservers();
534
535 Address old_top = allocation_info_.top();
536 Address high = to_space_.page_high();
537 int filler_size = Heap::GetFillToAlign(old_top, alignment);
538 int aligned_size_in_bytes = size_in_bytes + filler_size;
539
540 if (old_top + aligned_size_in_bytes <= high) {
541 UpdateInlineAllocationLimit(aligned_size_in_bytes);
542 return true;
543 }
544
545 // Not enough room in the page, try to allocate a new one.
546 if (!AddFreshPage()) {
547 return false;
548 }
549
550 old_top = allocation_info_.top();
551 high = to_space_.page_high();
552 filler_size = Heap::GetFillToAlign(old_top, alignment);
553 aligned_size_in_bytes = size_in_bytes + filler_size;
554
555 DCHECK(old_top + aligned_size_in_bytes <= high);
556 UpdateInlineAllocationLimit(aligned_size_in_bytes);
557 return true;
558 }
559
MaybeFreeUnusedLab(LinearAllocationArea info)560 void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
561 if (info.limit() != kNullAddress && info.limit() == top()) {
562 DCHECK_NE(info.top(), kNullAddress);
563 allocation_info_.set_top(info.top());
564 allocation_info_.MoveStartToTop();
565 original_top_.store(info.top(), std::memory_order_release);
566 }
567
568 #if DEBUG
569 VerifyTop();
570 #endif
571 }
572
GetObjectIterator(Heap * heap)573 std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
574 return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
575 }
576
AllocateRawSlow(int size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin)577 AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
578 AllocationAlignment alignment,
579 AllocationOrigin origin) {
580 #ifdef V8_HOST_ARCH_32_BIT
581 return alignment != kWordAligned
582 ? AllocateRawAligned(size_in_bytes, alignment, origin)
583 : AllocateRawUnaligned(size_in_bytes, origin);
584 #else
585 #ifdef V8_COMPRESS_POINTERS
586 // TODO(ishell, v8:8875): Consider using aligned allocations once the
587 // allocation alignment inconsistency is fixed. For now we keep using
588 // unaligned access since both x64 and arm64 architectures (where pointer
589 // compression is supported) allow unaligned access to doubles and full words.
590 #endif // V8_COMPRESS_POINTERS
591 return AllocateRawUnaligned(size_in_bytes, origin);
592 #endif
593 }
594
AllocateRawUnaligned(int size_in_bytes,AllocationOrigin origin)595 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
596 AllocationOrigin origin) {
597 if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
598 return AllocationResult::Retry();
599 }
600
601 DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
602
603 AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
604 DCHECK(!result.IsRetry());
605
606 InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
607 size_in_bytes);
608
609 return result;
610 }
611
AllocateRawAligned(int size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin)612 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
613 AllocationAlignment alignment,
614 AllocationOrigin origin) {
615 if (!EnsureAllocation(size_in_bytes, alignment)) {
616 return AllocationResult::Retry();
617 }
618
619 DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
620
621 int aligned_size_in_bytes;
622
623 AllocationResult result = AllocateFastAligned(
624 size_in_bytes, &aligned_size_in_bytes, alignment, origin);
625 DCHECK(!result.IsRetry());
626
627 InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
628 aligned_size_in_bytes, aligned_size_in_bytes);
629
630 return result;
631 }
632
VerifyTop()633 void NewSpace::VerifyTop() {
634 // Ensure validity of LAB: start <= top <= limit
635 DCHECK_LE(allocation_info_.start(), allocation_info_.top());
636 DCHECK_LE(allocation_info_.top(), allocation_info_.limit());
637
638 // Ensure that original_top_ always equals LAB start.
639 DCHECK_EQ(original_top_, allocation_info_.start());
640
641 // Ensure that limit() is <= original_limit_, original_limit_ always needs
642 // to be end of curent to space page.
643 DCHECK_LE(allocation_info_.limit(), original_limit_);
644 DCHECK_EQ(original_limit_, to_space_.page_high());
645 }
646
647 #ifdef VERIFY_HEAP
648 // We do not use the SemiSpaceObjectIterator because verification doesn't assume
649 // that it works (it depends on the invariants we are checking).
Verify(Isolate * isolate)650 void NewSpace::Verify(Isolate* isolate) {
651 // The allocation pointer should be in the space or at the very end.
652 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
653
654 // There should be objects packed in from the low address up to the
655 // allocation pointer.
656 Address current = to_space_.first_page()->area_start();
657 CHECK_EQ(current, to_space_.space_start());
658
659 size_t external_space_bytes[kNumTypes];
660 for (int i = 0; i < kNumTypes; i++) {
661 external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
662 }
663
664 while (current != top()) {
665 if (!Page::IsAlignedToPageSize(current)) {
666 // The allocation pointer should not be in the middle of an object.
667 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
668 current < top());
669
670 HeapObject object = HeapObject::FromAddress(current);
671
672 // The first word should be a map, and we expect all map pointers to
673 // be in map space or read-only space.
674 Map map = object.map();
675 CHECK(map.IsMap());
676 CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
677
678 // The object should not be code or a map.
679 CHECK(!object.IsMap());
680 CHECK(!object.IsAbstractCode());
681
682 // The object itself should look OK.
683 object.ObjectVerify(isolate);
684
685 // All the interior pointers should be contained in the heap.
686 VerifyPointersVisitor visitor(heap());
687 int size = object.Size();
688 object.IterateBody(map, size, &visitor);
689
690 if (object.IsExternalString()) {
691 ExternalString external_string = ExternalString::cast(object);
692 size_t size = external_string.ExternalPayloadSize();
693 external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
694 }
695
696 current += size;
697 } else {
698 // At end of page, switch to next page.
699 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
700 current = page->area_start();
701 }
702 }
703
704 for (int i = 0; i < kNumTypes; i++) {
705 if (i == ExternalBackingStoreType::kArrayBuffer) continue;
706 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
707 CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
708 }
709
710 size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
711 CHECK_EQ(bytes,
712 ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
713
714 // Check semi-spaces.
715 CHECK_EQ(from_space_.id(), kFromSpace);
716 CHECK_EQ(to_space_.id(), kToSpace);
717 from_space_.Verify();
718 to_space_.Verify();
719 }
720 #endif
721
722 } // namespace internal
723 } // namespace v8
724