1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/paged-spaces.h"
6
7 #include <atomic>
8
9 #include "src/base/optional.h"
10 #include "src/base/platform/mutex.h"
11 #include "src/execution/isolate.h"
12 #include "src/execution/vm-state-inl.h"
13 #include "src/heap/array-buffer-sweeper.h"
14 #include "src/heap/heap.h"
15 #include "src/heap/incremental-marking.h"
16 #include "src/heap/memory-allocator.h"
17 #include "src/heap/memory-chunk-inl.h"
18 #include "src/heap/memory-chunk-layout.h"
19 #include "src/heap/paged-spaces-inl.h"
20 #include "src/heap/read-only-heap.h"
21 #include "src/heap/safepoint.h"
22 #include "src/logging/runtime-call-stats-scope.h"
23 #include "src/objects/string.h"
24 #include "src/utils/utils.h"
25
26 namespace v8 {
27 namespace internal {
28
29 // ----------------------------------------------------------------------------
30 // PagedSpaceObjectIterator
31
PagedSpaceObjectIterator(Heap * heap,PagedSpace * space)32 PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
33 PagedSpace* space)
34 : cur_addr_(kNullAddress),
35 cur_end_(kNullAddress),
36 space_(space),
37 page_range_(space->first_page(), nullptr),
38 current_page_(page_range_.begin())
39 #if V8_COMPRESS_POINTERS
40 ,
41 cage_base_(heap->isolate())
42 #endif // V8_COMPRESS_POINTERS
43 {
44 heap->MakeHeapIterable();
45 USE(space_);
46 }
47
PagedSpaceObjectIterator(Heap * heap,PagedSpace * space,Page * page)48 PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
49 PagedSpace* space,
50 Page* page)
51 : cur_addr_(kNullAddress),
52 cur_end_(kNullAddress),
53 space_(space),
54 page_range_(page),
55 current_page_(page_range_.begin())
56 #if V8_COMPRESS_POINTERS
57 ,
58 cage_base_(heap->isolate())
59 #endif // V8_COMPRESS_POINTERS
60 {
61 heap->MakeHeapIterable();
62 #ifdef DEBUG
63 AllocationSpace owner = page->owner_identity();
64 DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
65 #endif // DEBUG
66 }
67
68 // We have hit the end of the page and should advance to the next block of
69 // objects. This happens at the end of the page.
AdvanceToNextPage()70 bool PagedSpaceObjectIterator::AdvanceToNextPage() {
71 DCHECK_EQ(cur_addr_, cur_end_);
72 if (current_page_ == page_range_.end()) return false;
73 Page* cur_page = *(current_page_++);
74
75 cur_addr_ = cur_page->area_start();
76 cur_end_ = cur_page->area_end();
77 DCHECK(cur_page->SweepingDone());
78 return true;
79 }
InitializePage(MemoryChunk * chunk)80 Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
81 Page* page = static_cast<Page*>(chunk);
82 DCHECK_EQ(
83 MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
84 page->area_size());
85 // Make sure that categories are initialized before freeing the area.
86 page->ResetAllocationStatistics();
87 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
88 page->AllocateFreeListCategories();
89 page->InitializeFreeListCategories();
90 page->list_node().Initialize();
91 page->InitializationMemoryFence();
92 return page;
93 }
94
PagedSpace(Heap * heap,AllocationSpace space,Executability executable,FreeList * free_list,LinearAllocationArea * allocation_info_,CompactionSpaceKind compaction_space_kind)95 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
96 Executability executable, FreeList* free_list,
97 LinearAllocationArea* allocation_info_,
98 CompactionSpaceKind compaction_space_kind)
99 : SpaceWithLinearArea(heap, space, free_list, allocation_info_),
100 executable_(executable),
101 compaction_space_kind_(compaction_space_kind) {
102 area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
103 accounting_stats_.Clear();
104 }
105
TearDown()106 void PagedSpace::TearDown() {
107 while (!memory_chunk_list_.Empty()) {
108 MemoryChunk* chunk = memory_chunk_list_.front();
109 memory_chunk_list_.Remove(chunk);
110 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
111 chunk);
112 }
113 accounting_stats_.Clear();
114 }
115
RefillFreeList()116 void PagedSpace::RefillFreeList() {
117 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
118 // generation spaces out.
119 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
120 identity() != MAP_SPACE) {
121 return;
122 }
123 MarkCompactCollector* collector = heap()->mark_compact_collector();
124 size_t added = 0;
125
126 {
127 Page* p = nullptr;
128 while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
129 // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
130 // entries here to make them unavailable for allocations.
131 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
132 p->ForAllFreeListCategories([this](FreeListCategory* category) {
133 category->Reset(free_list());
134 });
135 }
136
137 // Only during compaction pages can actually change ownership. This is
138 // safe because there exists no other competing action on the page links
139 // during compaction.
140 if (is_compaction_space()) {
141 DCHECK_NE(this, p->owner());
142 PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
143 base::MutexGuard guard(owner->mutex());
144 owner->RefineAllocatedBytesAfterSweeping(p);
145 owner->RemovePage(p);
146 added += AddPage(p);
147 added += p->wasted_memory();
148 } else {
149 base::MutexGuard guard(mutex());
150 DCHECK_EQ(this, p->owner());
151 RefineAllocatedBytesAfterSweeping(p);
152 added += RelinkFreeListCategories(p);
153 added += p->wasted_memory();
154 }
155 if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
156 }
157 }
158 }
159
MergeCompactionSpace(CompactionSpace * other)160 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
161 base::MutexGuard guard(mutex());
162
163 DCHECK(identity() == other->identity());
164
165 // Unmerged fields:
166 // area_size_
167 other->FreeLinearAllocationArea();
168
169 for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
170 i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
171 allocations_origins_[i] += other->allocations_origins_[i];
172 }
173
174 // The linear allocation area of {other} should be destroyed now.
175 DCHECK_EQ(kNullAddress, other->top());
176 DCHECK_EQ(kNullAddress, other->limit());
177
178 // Move over pages.
179 for (auto it = other->begin(); it != other->end();) {
180 Page* p = *(it++);
181
182 // Ensure that pages are initialized before objects on it are discovered by
183 // concurrent markers.
184 p->InitializationMemoryFence();
185
186 // Relinking requires the category to be unlinked.
187 other->RemovePage(p);
188 AddPage(p);
189 DCHECK_IMPLIES(
190 !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
191 p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
192
193 // TODO(leszeks): Here we should allocation step, but:
194 // 1. Allocation groups are currently not handled properly by the sampling
195 // allocation profiler, and
196 // 2. Observers might try to take the space lock, which isn't reentrant.
197 // We'll have to come up with a better solution for allocation stepping
198 // before shipping, which will likely be using LocalHeap.
199 }
200 for (auto p : other->GetNewPages()) {
201 heap()->NotifyOldGenerationExpansion(identity(), p);
202 }
203
204 DCHECK_EQ(0u, other->Size());
205 DCHECK_EQ(0u, other->Capacity());
206 }
207
CommittedPhysicalMemory() const208 size_t PagedSpace::CommittedPhysicalMemory() const {
209 if (!base::OS::HasLazyCommits()) {
210 DCHECK_EQ(0, committed_physical_memory());
211 return CommittedMemory();
212 }
213 BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
214 return committed_physical_memory();
215 }
216
IncrementCommittedPhysicalMemory(size_t increment_value)217 void PagedSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
218 if (!base::OS::HasLazyCommits() || increment_value == 0) return;
219 size_t old_value = committed_physical_memory_.fetch_add(
220 increment_value, std::memory_order_relaxed);
221 USE(old_value);
222 DCHECK_LT(old_value, old_value + increment_value);
223 }
224
DecrementCommittedPhysicalMemory(size_t decrement_value)225 void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
226 if (!base::OS::HasLazyCommits() || decrement_value == 0) return;
227 size_t old_value = committed_physical_memory_.fetch_sub(
228 decrement_value, std::memory_order_relaxed);
229 USE(old_value);
230 DCHECK_GT(old_value, old_value - decrement_value);
231 }
232
233 #if DEBUG
VerifyCommittedPhysicalMemory() const234 void PagedSpace::VerifyCommittedPhysicalMemory() const {
235 heap()->safepoint()->AssertActive();
236 size_t size = 0;
237 for (const Page* page : *this) {
238 DCHECK(page->SweepingDone());
239 size += page->CommittedPhysicalMemory();
240 }
241 // Ensure that the space's counter matches the sum of all page counters.
242 DCHECK_EQ(size, CommittedPhysicalMemory());
243 }
244 #endif // DEBUG
245
ContainsSlow(Address addr) const246 bool PagedSpace::ContainsSlow(Address addr) const {
247 Page* p = Page::FromAddress(addr);
248 for (const Page* page : *this) {
249 if (page == p) return true;
250 }
251 return false;
252 }
253
RefineAllocatedBytesAfterSweeping(Page * page)254 void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
255 CHECK(page->SweepingDone());
256 auto marking_state =
257 heap()->incremental_marking()->non_atomic_marking_state();
258 // The live_byte on the page was accounted in the space allocated
259 // bytes counter. After sweeping allocated_bytes() contains the
260 // accurate live byte count on the page.
261 size_t old_counter = marking_state->live_bytes(page);
262 size_t new_counter = page->allocated_bytes();
263 DCHECK_GE(old_counter, new_counter);
264 if (old_counter > new_counter) {
265 DecreaseAllocatedBytes(old_counter - new_counter, page);
266 }
267 marking_state->SetLiveBytes(page, 0);
268 }
269
RemovePageSafe(int size_in_bytes)270 Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
271 base::MutexGuard guard(mutex());
272 Page* page = free_list()->GetPageForSize(size_in_bytes);
273 if (!page) return nullptr;
274 RemovePage(page);
275 return page;
276 }
277
AddPage(Page * page)278 size_t PagedSpace::AddPage(Page* page) {
279 CHECK(page->SweepingDone());
280 page->set_owner(this);
281 memory_chunk_list_.PushBack(page);
282 AccountCommitted(page->size());
283 IncreaseCapacity(page->area_size());
284 IncreaseAllocatedBytes(page->allocated_bytes(), page);
285 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
286 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
287 IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
288 }
289 IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
290 return RelinkFreeListCategories(page);
291 }
292
RemovePage(Page * page)293 void PagedSpace::RemovePage(Page* page) {
294 CHECK(page->SweepingDone());
295 memory_chunk_list_.Remove(page);
296 UnlinkFreeListCategories(page);
297 DecreaseAllocatedBytes(page->allocated_bytes(), page);
298 DecreaseCapacity(page->area_size());
299 AccountUncommitted(page->size());
300 for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
301 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
302 DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
303 }
304 DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
305 }
306
SetTopAndLimit(Address top,Address limit)307 void PagedSpace::SetTopAndLimit(Address top, Address limit) {
308 DCHECK(top == limit ||
309 Page::FromAddress(top) == Page::FromAddress(limit - 1));
310 BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
311 allocation_info_->Reset(top, limit);
312
313 base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
314 if (!is_compaction_space())
315 optional_guard.emplace(&pending_allocation_mutex_);
316 original_limit_ = limit;
317 original_top_ = top;
318 }
319
ShrinkPageToHighWaterMark(Page * page)320 size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
321 size_t unused = page->ShrinkToHighWaterMark();
322 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
323 AccountUncommitted(unused);
324 return unused;
325 }
326
ResetFreeList()327 void PagedSpace::ResetFreeList() {
328 for (Page* page : *this) {
329 free_list_->EvictFreeListItems(page);
330 }
331 DCHECK(free_list_->IsEmpty());
332 }
333
ShrinkImmortalImmovablePages()334 void PagedSpace::ShrinkImmortalImmovablePages() {
335 DCHECK(!heap()->deserialization_complete());
336 BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
337 FreeLinearAllocationArea();
338 ResetFreeList();
339 for (Page* page : *this) {
340 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
341 ShrinkPageToHighWaterMark(page);
342 }
343 }
344
Expand()345 Page* PagedSpace::Expand() {
346 Page* page = heap()->memory_allocator()->AllocatePage(
347 MemoryAllocator::AllocationMode::kRegular, this, executable());
348 if (page == nullptr) return nullptr;
349 ConcurrentAllocationMutex guard(this);
350 AddPage(page);
351 Free(page->area_start(), page->area_size(),
352 SpaceAccountingMode::kSpaceAccounted);
353 return page;
354 }
355
ExpandBackground(size_t size_in_bytes)356 base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
357 size_t size_in_bytes) {
358 Page* page = heap()->memory_allocator()->AllocatePage(
359 MemoryAllocator::AllocationMode::kRegular, this, executable());
360 if (page == nullptr) return {};
361 base::MutexGuard lock(&space_mutex_);
362 AddPage(page);
363 if (identity() == CODE_SPACE || identity() == CODE_LO_SPACE) {
364 heap()->isolate()->AddCodeMemoryChunk(page);
365 }
366 Address object_start = page->area_start();
367 CHECK_LE(size_in_bytes, page->area_size());
368 Free(page->area_start() + size_in_bytes, page->area_size() - size_in_bytes,
369 SpaceAccountingMode::kSpaceAccounted);
370 AddRangeToActiveSystemPages(page, object_start, object_start + size_in_bytes);
371 return std::make_pair(object_start, size_in_bytes);
372 }
373
CountTotalPages() const374 int PagedSpace::CountTotalPages() const {
375 int count = 0;
376 for (const Page* page : *this) {
377 count++;
378 USE(page);
379 }
380 return count;
381 }
382
SetLinearAllocationArea(Address top,Address limit)383 void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
384 SetTopAndLimit(top, limit);
385 if (top != kNullAddress && top != limit &&
386 heap()->incremental_marking()->black_allocation()) {
387 Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
388 }
389 }
390
DecreaseLimit(Address new_limit)391 void PagedSpace::DecreaseLimit(Address new_limit) {
392 Address old_limit = limit();
393 DCHECK_LE(top(), new_limit);
394 DCHECK_GE(old_limit, new_limit);
395 if (new_limit != old_limit) {
396 base::Optional<CodePageMemoryModificationScope> optional_scope;
397
398 if (identity() == CODE_SPACE) {
399 MemoryChunk* chunk = MemoryChunk::FromAddress(new_limit);
400 optional_scope.emplace(chunk);
401 }
402
403 ConcurrentAllocationMutex guard(this);
404 SetTopAndLimit(top(), new_limit);
405 Free(new_limit, old_limit - new_limit,
406 SpaceAccountingMode::kSpaceAccounted);
407 if (heap()->incremental_marking()->black_allocation()) {
408 Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
409 old_limit);
410 }
411 }
412 }
413
MarkLinearAllocationAreaBlack()414 void PagedSpace::MarkLinearAllocationAreaBlack() {
415 DCHECK(heap()->incremental_marking()->black_allocation());
416 Address current_top = top();
417 Address current_limit = limit();
418 if (current_top != kNullAddress && current_top != current_limit) {
419 Page::FromAllocationAreaAddress(current_top)
420 ->CreateBlackArea(current_top, current_limit);
421 }
422 }
423
UnmarkLinearAllocationArea()424 void PagedSpace::UnmarkLinearAllocationArea() {
425 Address current_top = top();
426 Address current_limit = limit();
427 if (current_top != kNullAddress && current_top != current_limit) {
428 Page::FromAllocationAreaAddress(current_top)
429 ->DestroyBlackArea(current_top, current_limit);
430 }
431 }
432
MakeLinearAllocationAreaIterable()433 void PagedSpace::MakeLinearAllocationAreaIterable() {
434 Address current_top = top();
435 Address current_limit = limit();
436 if (current_top != kNullAddress && current_top != current_limit) {
437 base::Optional<CodePageMemoryModificationScope> optional_scope;
438
439 if (identity() == CODE_SPACE) {
440 MemoryChunk* chunk = MemoryChunk::FromAddress(current_top);
441 optional_scope.emplace(chunk);
442 }
443
444 heap_->CreateFillerObjectAt(current_top,
445 static_cast<int>(current_limit - current_top),
446 ClearRecordedSlots::kNo);
447 }
448 }
449
Available() const450 size_t PagedSpace::Available() const {
451 ConcurrentAllocationMutex guard(this);
452 return free_list_->Available();
453 }
454
455 namespace {
456
GetUnprotectMemoryOrigin(bool is_compaction_space)457 UnprotectMemoryOrigin GetUnprotectMemoryOrigin(bool is_compaction_space) {
458 return is_compaction_space ? UnprotectMemoryOrigin::kMaybeOffMainThread
459 : UnprotectMemoryOrigin::kMainThread;
460 }
461
462 } // namespace
463
FreeLinearAllocationArea()464 void PagedSpace::FreeLinearAllocationArea() {
465 // Mark the old linear allocation area with a free space map so it can be
466 // skipped when scanning the heap.
467 Address current_top = top();
468 Address current_limit = limit();
469 if (current_top == kNullAddress) {
470 DCHECK_EQ(kNullAddress, current_limit);
471 return;
472 }
473
474 AdvanceAllocationObservers();
475
476 if (current_top != current_limit &&
477 heap()->incremental_marking()->black_allocation()) {
478 Page::FromAddress(current_top)
479 ->DestroyBlackArea(current_top, current_limit);
480 }
481
482 SetTopAndLimit(kNullAddress, kNullAddress);
483 DCHECK_GE(current_limit, current_top);
484
485 // The code page of the linear allocation area needs to be unprotected
486 // because we are going to write a filler into that memory area below.
487 if (identity() == CODE_SPACE) {
488 heap()->UnprotectAndRegisterMemoryChunk(
489 MemoryChunk::FromAddress(current_top),
490 GetUnprotectMemoryOrigin(is_compaction_space()));
491 }
492
493 DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
494 heap()->incremental_marking()->marking_state()->IsWhite(
495 HeapObject::FromAddress(current_top)));
496 Free(current_top, current_limit - current_top,
497 SpaceAccountingMode::kSpaceAccounted);
498 }
499
ReleasePage(Page * page)500 void PagedSpace::ReleasePage(Page* page) {
501 DCHECK_EQ(
502 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
503 page));
504 DCHECK_EQ(page->owner(), this);
505
506 free_list_->EvictFreeListItems(page);
507
508 if (Page::FromAllocationAreaAddress(allocation_info_->top()) == page) {
509 SetTopAndLimit(kNullAddress, kNullAddress);
510 }
511
512 if (identity() == CODE_SPACE) {
513 heap()->isolate()->RemoveCodeMemoryChunk(page);
514 }
515
516 AccountUncommitted(page->size());
517 DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
518 accounting_stats_.DecreaseCapacity(page->area_size());
519 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
520 page);
521 }
522
SetReadable()523 void PagedSpace::SetReadable() {
524 DCHECK(identity() == CODE_SPACE);
525 for (Page* page : *this) {
526 DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
527 page->SetReadable();
528 }
529 }
530
SetReadAndExecutable()531 void PagedSpace::SetReadAndExecutable() {
532 DCHECK(identity() == CODE_SPACE);
533 for (Page* page : *this) {
534 DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
535 page->SetReadAndExecutable();
536 }
537 }
538
SetCodeModificationPermissions()539 void PagedSpace::SetCodeModificationPermissions() {
540 DCHECK(identity() == CODE_SPACE);
541 for (Page* page : *this) {
542 DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
543 page->SetCodeModificationPermissions();
544 }
545 }
546
GetObjectIterator(Heap * heap)547 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
548 return std::unique_ptr<ObjectIterator>(
549 new PagedSpaceObjectIterator(heap, this));
550 }
551
TryAllocationFromFreeListMain(size_t size_in_bytes,AllocationOrigin origin)552 bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
553 AllocationOrigin origin) {
554 ConcurrentAllocationMutex guard(this);
555 DCHECK(IsAligned(size_in_bytes, kTaggedSize));
556 DCHECK_LE(top(), limit());
557 #ifdef DEBUG
558 if (top() != limit()) {
559 DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
560 }
561 #endif
562 // Don't free list allocate if there is linear space available.
563 DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
564
565 // Mark the old linear allocation area with a free space map so it can be
566 // skipped when scanning the heap. This also puts it back in the free list
567 // if it is big enough.
568 FreeLinearAllocationArea();
569
570 size_t new_node_size = 0;
571 FreeSpace new_node =
572 free_list_->Allocate(size_in_bytes, &new_node_size, origin);
573 if (new_node.is_null()) return false;
574 DCHECK_GE(new_node_size, size_in_bytes);
575
576 // The old-space-step might have finished sweeping and restarted marking.
577 // Verify that it did not turn the page of the new node into an evacuation
578 // candidate.
579 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
580
581 // Memory in the linear allocation area is counted as allocated. We may free
582 // a little of this again immediately - see below.
583 Page* page = Page::FromHeapObject(new_node);
584 IncreaseAllocatedBytes(new_node_size, page);
585
586 DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
587 Address start = new_node.address();
588 Address end = new_node.address() + new_node_size;
589 Address limit = ComputeLimit(start, end, size_in_bytes);
590 DCHECK_LE(limit, end);
591 DCHECK_LE(size_in_bytes, limit - start);
592 if (limit != end) {
593 if (identity() == CODE_SPACE) {
594 heap()->UnprotectAndRegisterMemoryChunk(
595 page, GetUnprotectMemoryOrigin(is_compaction_space()));
596 }
597 Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
598 }
599 SetLinearAllocationArea(start, limit);
600 AddRangeToActiveSystemPages(page, start, limit);
601
602 return true;
603 }
604
RawRefillLabBackground(LocalHeap * local_heap,size_t min_size_in_bytes,size_t max_size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin)605 base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
606 LocalHeap* local_heap, size_t min_size_in_bytes, size_t max_size_in_bytes,
607 AllocationAlignment alignment, AllocationOrigin origin) {
608 DCHECK(!is_compaction_space());
609 DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
610 identity() == MAP_SPACE);
611 DCHECK(origin == AllocationOrigin::kRuntime ||
612 origin == AllocationOrigin::kGC);
613 DCHECK_IMPLIES(!local_heap, origin == AllocationOrigin::kGC);
614
615 base::Optional<std::pair<Address, size_t>> result =
616 TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
617 alignment, origin);
618 if (result) return result;
619
620 MarkCompactCollector* collector = heap()->mark_compact_collector();
621 // Sweeping is still in progress.
622 if (collector->sweeping_in_progress()) {
623 // First try to refill the free-list, concurrent sweeper threads
624 // may have freed some objects in the meantime.
625 RefillFreeList();
626
627 // Retry the free list allocation.
628 result = TryAllocationFromFreeListBackground(
629 min_size_in_bytes, max_size_in_bytes, alignment, origin);
630 if (result) return result;
631
632 if (IsSweepingAllowedOnThread(local_heap)) {
633 // Now contribute to sweeping from background thread and then try to
634 // reallocate.
635 const int kMaxPagesToSweep = 1;
636 int max_freed = collector->sweeper()->ParallelSweepSpace(
637 identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
638 static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
639
640 RefillFreeList();
641
642 if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
643 result = TryAllocationFromFreeListBackground(
644 min_size_in_bytes, max_size_in_bytes, alignment, origin);
645 if (result) return result;
646 }
647 }
648 }
649
650 if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
651 heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
652 result = ExpandBackground(max_size_in_bytes);
653 if (result) {
654 DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
655 return result;
656 }
657 }
658
659 if (collector->sweeping_in_progress()) {
660 // Complete sweeping for this space.
661 if (IsSweepingAllowedOnThread(local_heap)) {
662 collector->DrainSweepingWorklistForSpace(identity());
663 }
664
665 RefillFreeList();
666
667 // Last try to acquire memory from free list.
668 return TryAllocationFromFreeListBackground(
669 min_size_in_bytes, max_size_in_bytes, alignment, origin);
670 }
671
672 return {};
673 }
674
675 base::Optional<std::pair<Address, size_t>>
TryAllocationFromFreeListBackground(size_t min_size_in_bytes,size_t max_size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin)676 PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
677 size_t max_size_in_bytes,
678 AllocationAlignment alignment,
679 AllocationOrigin origin) {
680 base::MutexGuard lock(&space_mutex_);
681 DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
682 DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
683 identity() == MAP_SPACE);
684
685 size_t new_node_size = 0;
686 FreeSpace new_node =
687 free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
688 if (new_node.is_null()) return {};
689 DCHECK_GE(new_node_size, min_size_in_bytes);
690
691 // The old-space-step might have finished sweeping and restarted marking.
692 // Verify that it did not turn the page of the new node into an evacuation
693 // candidate.
694 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
695
696 // Memory in the linear allocation area is counted as allocated. We may free
697 // a little of this again immediately - see below.
698 Page* page = Page::FromHeapObject(new_node);
699 IncreaseAllocatedBytes(new_node_size, page);
700
701 heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
702
703 size_t used_size_in_bytes = std::min(new_node_size, max_size_in_bytes);
704
705 Address start = new_node.address();
706 Address end = new_node.address() + new_node_size;
707 Address limit = new_node.address() + used_size_in_bytes;
708 DCHECK_LE(limit, end);
709 DCHECK_LE(min_size_in_bytes, limit - start);
710 if (limit != end) {
711 if (identity() == CODE_SPACE) {
712 heap()->UnprotectAndRegisterMemoryChunk(
713 page, UnprotectMemoryOrigin::kMaybeOffMainThread);
714 }
715 Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
716 }
717 AddRangeToActiveSystemPages(page, start, limit);
718
719 return std::make_pair(start, used_size_in_bytes);
720 }
721
IsSweepingAllowedOnThread(LocalHeap * local_heap) const722 bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) const {
723 // Code space sweeping is only allowed on main thread.
724 return (local_heap && local_heap->is_main_thread()) ||
725 identity() != CODE_SPACE;
726 }
727
728 #ifdef DEBUG
Print()729 void PagedSpace::Print() {}
730 #endif
731
732 #ifdef VERIFY_HEAP
Verify(Isolate * isolate,ObjectVisitor * visitor)733 void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
734 bool allocation_pointer_found_in_space =
735 (allocation_info_->top() == allocation_info_->limit());
736 size_t external_space_bytes[kNumTypes];
737 size_t external_page_bytes[kNumTypes];
738
739 for (int i = 0; i < kNumTypes; i++) {
740 external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
741 }
742
743 PtrComprCageBase cage_base(isolate);
744 for (Page* page : *this) {
745 CHECK_EQ(page->owner(), this);
746
747 for (int i = 0; i < kNumTypes; i++) {
748 external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
749 }
750
751 if (page == Page::FromAllocationAreaAddress(allocation_info_->top())) {
752 allocation_pointer_found_in_space = true;
753 }
754 CHECK(page->SweepingDone());
755 PagedSpaceObjectIterator it(isolate->heap(), this, page);
756 Address end_of_previous_object = page->area_start();
757 Address top = page->area_end();
758
759 for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
760 CHECK(end_of_previous_object <= object.address());
761
762 // The first word should be a map, and we expect all map pointers to
763 // be in map space.
764 Map map = object.map(cage_base);
765 CHECK(map.IsMap(cage_base));
766 CHECK(ReadOnlyHeap::Contains(map) ||
767 isolate->heap()->space_for_maps()->Contains(map));
768
769 // Perform space-specific object verification.
770 VerifyObject(object);
771
772 // The object itself should look OK.
773 object.ObjectVerify(isolate);
774
775 if (identity() != RO_SPACE && !FLAG_verify_heap_skip_remembered_set) {
776 isolate->heap()->VerifyRememberedSetFor(object);
777 }
778
779 // All the interior pointers should be contained in the heap.
780 int size = object.Size(cage_base);
781 object.IterateBody(map, size, visitor);
782 CHECK(object.address() + size <= top);
783 end_of_previous_object = object.address() + size;
784
785 if (object.IsExternalString(cage_base)) {
786 ExternalString external_string = ExternalString::cast(object);
787 size_t payload_size = external_string.ExternalPayloadSize();
788 external_page_bytes[ExternalBackingStoreType::kExternalString] +=
789 payload_size;
790 }
791 }
792 for (int i = 0; i < kNumTypes; i++) {
793 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
794 CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
795 external_space_bytes[t] += external_page_bytes[t];
796 }
797
798 CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
799 CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
800 }
801 for (int i = 0; i < kNumTypes; i++) {
802 if (i == ExternalBackingStoreType::kArrayBuffer) continue;
803 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
804 CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
805 }
806 CHECK(allocation_pointer_found_in_space);
807
808 if (identity() == OLD_SPACE && !FLAG_concurrent_array_buffer_sweeping) {
809 size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
810 CHECK_EQ(bytes,
811 ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
812 }
813
814 #ifdef DEBUG
815 VerifyCountersAfterSweeping(isolate->heap());
816 #endif
817 }
818
VerifyLiveBytes()819 void PagedSpace::VerifyLiveBytes() {
820 IncrementalMarking::MarkingState* marking_state =
821 heap()->incremental_marking()->marking_state();
822 PtrComprCageBase cage_base(heap()->isolate());
823 for (Page* page : *this) {
824 CHECK(page->SweepingDone());
825 PagedSpaceObjectIterator it(heap(), this, page);
826 int black_size = 0;
827 for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
828 // All the interior pointers should be contained in the heap.
829 if (marking_state->IsBlack(object)) {
830 black_size += object.Size(cage_base);
831 }
832 }
833 CHECK_LE(black_size, marking_state->live_bytes(page));
834 }
835 }
836 #endif // VERIFY_HEAP
837
838 #ifdef DEBUG
VerifyCountersAfterSweeping(Heap * heap)839 void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
840 size_t total_capacity = 0;
841 size_t total_allocated = 0;
842 PtrComprCageBase cage_base(heap->isolate());
843 for (Page* page : *this) {
844 DCHECK(page->SweepingDone());
845 total_capacity += page->area_size();
846 PagedSpaceObjectIterator it(heap, this, page);
847 size_t real_allocated = 0;
848 for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
849 if (!object.IsFreeSpaceOrFiller()) {
850 real_allocated += object.Size(cage_base);
851 }
852 }
853 total_allocated += page->allocated_bytes();
854 // The real size can be smaller than the accounted size if array trimming,
855 // object slack tracking happened after sweeping.
856 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
857 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
858 }
859 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
860 DCHECK_EQ(total_allocated, accounting_stats_.Size());
861 }
862
VerifyCountersBeforeConcurrentSweeping()863 void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
864 // We need to refine the counters on pages that are already swept and have
865 // not been moved over to the actual space. Otherwise, the AccountingStats
866 // are just an over approximation.
867 RefillFreeList();
868
869 size_t total_capacity = 0;
870 size_t total_allocated = 0;
871 auto marking_state =
872 heap()->incremental_marking()->non_atomic_marking_state();
873 for (Page* page : *this) {
874 size_t page_allocated =
875 page->SweepingDone()
876 ? page->allocated_bytes()
877 : static_cast<size_t>(marking_state->live_bytes(page));
878 total_capacity += page->area_size();
879 total_allocated += page_allocated;
880 DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
881 }
882 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
883 DCHECK_EQ(total_allocated, accounting_stats_.Size());
884 }
885 #endif
886
UpdateInlineAllocationLimit(size_t min_size)887 void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
888 // Ensure there are no unaccounted allocations.
889 DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
890
891 Address new_limit = ComputeLimit(top(), limit(), min_size);
892 DCHECK_LE(top(), new_limit);
893 DCHECK_LE(new_limit, limit());
894 DecreaseLimit(new_limit);
895 }
896
897 // -----------------------------------------------------------------------------
898 // OldSpace implementation
899
PrepareForMarkCompact()900 void PagedSpace::PrepareForMarkCompact() {
901 // Clear the free list before a full GC---it will be rebuilt afterward.
902 free_list_->Reset();
903 }
904
RefillLabMain(int size_in_bytes,AllocationOrigin origin)905 bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
906 VMState<GC> state(heap()->isolate());
907 RCS_SCOPE(heap()->isolate(),
908 RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
909 return RawRefillLabMain(size_in_bytes, origin);
910 }
911
Expand()912 Page* CompactionSpace::Expand() {
913 Page* page = PagedSpace::Expand();
914 new_pages_.push_back(page);
915 return page;
916 }
917
RefillLabMain(int size_in_bytes,AllocationOrigin origin)918 bool CompactionSpace::RefillLabMain(int size_in_bytes,
919 AllocationOrigin origin) {
920 return RawRefillLabMain(size_in_bytes, origin);
921 }
922
TryExpand(int size_in_bytes,AllocationOrigin origin)923 bool PagedSpace::TryExpand(int size_in_bytes, AllocationOrigin origin) {
924 Page* page = Expand();
925 if (!page) return false;
926 if (!is_compaction_space()) {
927 heap()->NotifyOldGenerationExpansion(identity(), page);
928 }
929 DCHECK((CountTotalPages() > 1) ||
930 (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
931 return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
932 origin);
933 }
934
RawRefillLabMain(int size_in_bytes,AllocationOrigin origin)935 bool PagedSpace::RawRefillLabMain(int size_in_bytes, AllocationOrigin origin) {
936 // Allocation in this space has failed.
937 DCHECK_GE(size_in_bytes, 0);
938 const int kMaxPagesToSweep = 1;
939
940 if (TryAllocationFromFreeListMain(size_in_bytes, origin)) return true;
941
942 MarkCompactCollector* collector = heap()->mark_compact_collector();
943 // Sweeping is still in progress.
944 if (collector->sweeping_in_progress()) {
945 // First try to refill the free-list, concurrent sweeper threads
946 // may have freed some objects in the meantime.
947 RefillFreeList();
948
949 // Retry the free list allocation.
950 if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
951 origin))
952 return true;
953
954 if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
955 origin))
956 return true;
957 }
958
959 if (is_compaction_space()) {
960 // The main thread may have acquired all swept pages. Try to steal from
961 // it. This can only happen during young generation evacuation.
962 PagedSpace* main_space = heap()->paged_space(identity());
963 Page* page = main_space->RemovePageSafe(size_in_bytes);
964 if (page != nullptr) {
965 AddPage(page);
966 if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
967 origin))
968 return true;
969 }
970 }
971
972 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
973 heap()->CanExpandOldGeneration(AreaSize())) {
974 if (TryExpand(size_in_bytes, origin)) {
975 return true;
976 }
977 }
978
979 // Try sweeping all pages.
980 if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) {
981 return true;
982 }
983
984 if (heap()->gc_state() != Heap::NOT_IN_GC && !heap()->force_oom()) {
985 // Avoid OOM crash in the GC in order to invoke NearHeapLimitCallback after
986 // GC and give it a chance to increase the heap limit.
987 return TryExpand(size_in_bytes, origin);
988 }
989 return false;
990 }
991
ContributeToSweepingMain(int required_freed_bytes,int max_pages,int size_in_bytes,AllocationOrigin origin)992 bool PagedSpace::ContributeToSweepingMain(int required_freed_bytes,
993 int max_pages, int size_in_bytes,
994 AllocationOrigin origin) {
995 // Cleanup invalidated old-to-new refs for compaction space in the
996 // final atomic pause.
997 Sweeper::SweepingMode sweeping_mode =
998 is_compaction_space() ? Sweeper::SweepingMode::kEagerDuringGC
999 : Sweeper::SweepingMode::kLazyOrConcurrent;
1000
1001 MarkCompactCollector* collector = heap()->mark_compact_collector();
1002 if (collector->sweeping_in_progress()) {
1003 collector->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
1004 required_freed_bytes, max_pages);
1005 RefillFreeList();
1006 return TryAllocationFromFreeListMain(size_in_bytes, origin);
1007 }
1008 return false;
1009 }
1010
AddRangeToActiveSystemPages(Page * page,Address start,Address end)1011 void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
1012 Address end) {
1013 DCHECK_LE(page->address(), start);
1014 DCHECK_LT(start, end);
1015 DCHECK_LE(end, page->address() + Page::kPageSize);
1016
1017 const size_t added_pages = page->active_system_pages()->Add(
1018 start - page->address(), end - page->address(),
1019 MemoryAllocator::GetCommitPageSizeBits());
1020
1021 IncrementCommittedPhysicalMemory(added_pages *
1022 MemoryAllocator::GetCommitPageSize());
1023 }
1024
ReduceActiveSystemPages(Page * page,ActiveSystemPages active_system_pages)1025 void PagedSpace::ReduceActiveSystemPages(
1026 Page* page, ActiveSystemPages active_system_pages) {
1027 const size_t reduced_pages =
1028 page->active_system_pages()->Reduce(active_system_pages);
1029 DecrementCommittedPhysicalMemory(reduced_pages *
1030 MemoryAllocator::GetCommitPageSize());
1031 }
1032
EnsureAllocation(int size_in_bytes,AllocationAlignment alignment,AllocationOrigin origin,int * out_max_aligned_size)1033 bool PagedSpace::EnsureAllocation(int size_in_bytes,
1034 AllocationAlignment alignment,
1035 AllocationOrigin origin,
1036 int* out_max_aligned_size) {
1037 if (!is_compaction_space()) {
1038 // Start incremental marking before the actual allocation, this allows the
1039 // allocation function to mark the object black when incremental marking is
1040 // running.
1041 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
1042 heap()->GCFlagsForIncrementalMarking(),
1043 kGCCallbackScheduleIdleGarbageCollection);
1044 }
1045
1046 // We don't know exactly how much filler we need to align until space is
1047 // allocated, so assume the worst case.
1048 size_in_bytes += Heap::GetMaximumFillToAlign(alignment);
1049 if (out_max_aligned_size) {
1050 *out_max_aligned_size = size_in_bytes;
1051 }
1052 if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
1053 return true;
1054 }
1055 return RefillLabMain(size_in_bytes, origin);
1056 }
1057
1058 // -----------------------------------------------------------------------------
1059 // MapSpace implementation
1060
1061 // TODO(dmercadier): use a heap instead of sorting like that.
1062 // Using a heap will have multiple benefits:
1063 // - for now, SortFreeList is only called after sweeping, which is somewhat
1064 // late. Using a heap, sorting could be done online: FreeListCategories would
1065 // be inserted in a heap (ie, in a sorted manner).
1066 // - SortFreeList is a bit fragile: any change to FreeListMap (or to
1067 // MapSpace::free_list_) could break it.
SortFreeList()1068 void MapSpace::SortFreeList() {
1069 using LiveBytesPagePair = std::pair<size_t, Page*>;
1070 std::vector<LiveBytesPagePair> pages;
1071 pages.reserve(CountTotalPages());
1072
1073 for (Page* p : *this) {
1074 free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
1075 pages.push_back(std::make_pair(p->allocated_bytes(), p));
1076 }
1077
1078 // Sorting by least-allocated-bytes first.
1079 std::sort(pages.begin(), pages.end(),
1080 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
1081 return a.first < b.first;
1082 });
1083
1084 for (LiveBytesPagePair const& p : pages) {
1085 // Since AddCategory inserts in head position, it reverts the order produced
1086 // by the sort above: least-allocated-bytes will be Added first, and will
1087 // therefore be the last element (and the first one will be
1088 // most-allocated-bytes).
1089 free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
1090 }
1091 }
1092
1093 #ifdef VERIFY_HEAP
VerifyObject(HeapObject object) const1094 void MapSpace::VerifyObject(HeapObject object) const { CHECK(object.IsMap()); }
1095 #endif
1096
1097 } // namespace internal
1098 } // namespace v8
1099