1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/large-spaces.h"
6
7 #include "src/base/platform/mutex.h"
8 #include "src/base/sanitizer/msan.h"
9 #include "src/common/globals.h"
10 #include "src/execution/isolate.h"
11 #include "src/heap/combined-heap.h"
12 #include "src/heap/incremental-marking.h"
13 #include "src/heap/list.h"
14 #include "src/heap/marking.h"
15 #include "src/heap/memory-allocator.h"
16 #include "src/heap/memory-chunk-inl.h"
17 #include "src/heap/remembered-set.h"
18 #include "src/heap/slot-set.h"
19 #include "src/heap/spaces-inl.h"
20 #include "src/logging/log.h"
21 #include "src/objects/objects-inl.h"
22 #include "src/utils/ostreams.h"
23
24 namespace v8 {
25 namespace internal {
26
27 // This check is here to ensure that the lower 32 bits of any real heap object
28 // can't overlap with the lower 32 bits of cleared weak reference value and
29 // therefore it's enough to compare only the lower 32 bits of a MaybeObject in
30 // order to figure out if it's a cleared weak reference or not.
31 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
32
LargePage(Heap * heap,BaseSpace * space,size_t chunk_size,Address area_start,Address area_end,VirtualMemory reservation,Executability executable)33 LargePage::LargePage(Heap* heap, BaseSpace* space, size_t chunk_size,
34 Address area_start, Address area_end,
35 VirtualMemory reservation, Executability executable)
36 : MemoryChunk(heap, space, chunk_size, area_start, area_end,
37 std::move(reservation), executable, PageSize::kLarge) {
38 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
39
40 if (executable && chunk_size > LargePage::kMaxCodePageSize) {
41 FATAL("Code page is too large.");
42 }
43
44 SetFlag(MemoryChunk::LARGE_PAGE);
45 list_node().Initialize();
46 }
47
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable)48 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
49 Executability executable) {
50 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
51 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
52 FATAL("Code page is too large.");
53 }
54
55 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
56
57 LargePage* page = static_cast<LargePage*>(chunk);
58 page->SetFlag(MemoryChunk::LARGE_PAGE);
59 page->list_node().Initialize();
60 return page;
61 }
62
Available() const63 size_t LargeObjectSpace::Available() const {
64 // We return zero here since we cannot take advantage of already allocated
65 // large object memory.
66 return 0;
67 }
68
GetAddressToShrink(Address object_address,size_t object_size)69 Address LargePage::GetAddressToShrink(Address object_address,
70 size_t object_size) {
71 if (executable() == EXECUTABLE) {
72 return 0;
73 }
74 size_t used_size = ::RoundUp((object_address - address()) + object_size,
75 MemoryAllocator::GetCommitPageSize());
76 if (used_size < CommittedPhysicalMemory()) {
77 return address() + used_size;
78 }
79 return 0;
80 }
81
ClearOutOfLiveRangeSlots(Address free_start)82 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
83 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
84 SlotSet::FREE_EMPTY_BUCKETS);
85 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
86 SlotSet::FREE_EMPTY_BUCKETS);
87 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
88 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
89 }
90
91 // -----------------------------------------------------------------------------
92 // LargeObjectSpaceObjectIterator
93
LargeObjectSpaceObjectIterator(LargeObjectSpace * space)94 LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
95 LargeObjectSpace* space) {
96 current_ = space->first_page();
97 }
98
Next()99 HeapObject LargeObjectSpaceObjectIterator::Next() {
100 if (current_ == nullptr) return HeapObject();
101
102 HeapObject object = current_->GetObject();
103 current_ = current_->next_page();
104 return object;
105 }
106
107 // -----------------------------------------------------------------------------
108 // OldLargeObjectSpace
109
LargeObjectSpace(Heap * heap,AllocationSpace id)110 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
111 : Space(heap, id, new NoFreeList()),
112 size_(0),
113 page_count_(0),
114 objects_size_(0),
115 pending_object_(0) {}
116
TearDown()117 void LargeObjectSpace::TearDown() {
118 while (!memory_chunk_list_.Empty()) {
119 LargePage* page = first_page();
120 LOG(heap()->isolate(),
121 DeleteEvent("LargeObjectChunk",
122 reinterpret_cast<void*>(page->address())));
123 memory_chunk_list_.Remove(page);
124 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
125 page);
126 }
127 }
128
AdvanceAndInvokeAllocationObservers(Address soon_object,size_t object_size)129 void LargeObjectSpace::AdvanceAndInvokeAllocationObservers(Address soon_object,
130 size_t object_size) {
131 if (!allocation_counter_.IsActive()) return;
132
133 if (object_size >= allocation_counter_.NextBytes()) {
134 allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
135 object_size);
136 }
137
138 // Large objects can be accounted immediately since no LAB is involved.
139 allocation_counter_.AdvanceAllocationObservers(object_size);
140 }
141
AllocateRaw(int object_size)142 AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
143 return AllocateRaw(object_size, NOT_EXECUTABLE);
144 }
145
AllocateRaw(int object_size,Executability executable)146 AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
147 Executability executable) {
148 DCHECK(!FLAG_enable_third_party_heap);
149 // Check if we want to force a GC before growing the old space further.
150 // If so, fail the allocation.
151 if (!heap()->CanExpandOldGeneration(object_size) ||
152 !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
153 return AllocationResult::Failure();
154 }
155
156 LargePage* page = AllocateLargePage(object_size, executable);
157 if (page == nullptr) return AllocationResult::Failure();
158 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
159 HeapObject object = page->GetObject();
160 UpdatePendingObject(object);
161 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
162 heap()->GCFlagsForIncrementalMarking(),
163 kGCCallbackScheduleIdleGarbageCollection);
164 if (heap()->incremental_marking()->black_allocation()) {
165 heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
166 }
167 DCHECK_IMPLIES(
168 heap()->incremental_marking()->black_allocation(),
169 heap()->incremental_marking()->marking_state()->IsBlack(object));
170 page->InitializationMemoryFence();
171 heap()->NotifyOldGenerationExpansion(identity(), page);
172 AdvanceAndInvokeAllocationObservers(object.address(),
173 static_cast<size_t>(object_size));
174 return AllocationResult::FromObject(object);
175 }
176
AllocateRawBackground(LocalHeap * local_heap,int object_size)177 AllocationResult OldLargeObjectSpace::AllocateRawBackground(
178 LocalHeap* local_heap, int object_size) {
179 return AllocateRawBackground(local_heap, object_size, NOT_EXECUTABLE);
180 }
181
AllocateRawBackground(LocalHeap * local_heap,int object_size,Executability executable)182 AllocationResult OldLargeObjectSpace::AllocateRawBackground(
183 LocalHeap* local_heap, int object_size, Executability executable) {
184 DCHECK(!FLAG_enable_third_party_heap);
185 // Check if we want to force a GC before growing the old space further.
186 // If so, fail the allocation.
187 if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
188 !heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
189 return AllocationResult::Failure();
190 }
191
192 LargePage* page = AllocateLargePage(object_size, executable);
193 if (page == nullptr) return AllocationResult::Failure();
194 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
195 HeapObject object = page->GetObject();
196 heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
197 if (heap()->incremental_marking()->black_allocation()) {
198 heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
199 }
200 DCHECK_IMPLIES(
201 heap()->incremental_marking()->black_allocation(),
202 heap()->incremental_marking()->marking_state()->IsBlack(object));
203 page->InitializationMemoryFence();
204 if (identity() == CODE_LO_SPACE) {
205 heap()->isolate()->AddCodeMemoryChunk(page);
206 }
207 return AllocationResult::FromObject(object);
208 }
209
AllocateLargePage(int object_size,Executability executable)210 LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
211 Executability executable) {
212 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
213 this, object_size, executable);
214 if (page == nullptr) return nullptr;
215 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
216
217 {
218 base::MutexGuard guard(&allocation_mutex_);
219 AddPage(page, object_size);
220 }
221
222 HeapObject object = page->GetObject();
223
224 heap()->CreateFillerObjectAt(object.address(), object_size,
225 ClearRecordedSlots::kNo);
226 return page;
227 }
228
CommittedPhysicalMemory() const229 size_t LargeObjectSpace::CommittedPhysicalMemory() const {
230 // On a platform that provides lazy committing of memory, we over-account
231 // the actually committed memory. There is no easy way right now to support
232 // precise accounting of committed memory in large object space.
233 return CommittedMemory();
234 }
235
FindPage(Address a)236 LargePage* CodeLargeObjectSpace::FindPage(Address a) {
237 base::MutexGuard guard(&allocation_mutex_);
238 const Address key = BasicMemoryChunk::FromAddress(a)->address();
239 auto it = chunk_map_.find(key);
240 if (it != chunk_map_.end()) {
241 LargePage* page = it->second;
242 CHECK(page->Contains(a));
243 return page;
244 }
245 return nullptr;
246 }
247
ClearMarkingStateOfLiveObjects()248 void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
249 IncrementalMarking::NonAtomicMarkingState* marking_state =
250 heap()->incremental_marking()->non_atomic_marking_state();
251 LargeObjectSpaceObjectIterator it(this);
252 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
253 if (marking_state->IsBlackOrGrey(obj)) {
254 Marking::MarkWhite(marking_state->MarkBitFrom(obj));
255 MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
256 RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
257 chunk->ProgressBar().ResetIfEnabled();
258 marking_state->SetLiveBytes(chunk, 0);
259 }
260 DCHECK(marking_state->IsWhite(obj));
261 }
262 }
263
InsertChunkMapEntries(LargePage * page)264 void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
265 for (Address current = reinterpret_cast<Address>(page);
266 current < reinterpret_cast<Address>(page) + page->size();
267 current += MemoryChunk::kPageSize) {
268 chunk_map_[current] = page;
269 }
270 }
271
RemoveChunkMapEntries(LargePage * page)272 void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
273 for (Address current = page->address();
274 current < reinterpret_cast<Address>(page) + page->size();
275 current += MemoryChunk::kPageSize) {
276 chunk_map_.erase(current);
277 }
278 }
279
PromoteNewLargeObject(LargePage * page)280 void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
281 DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
282 DCHECK(page->IsLargePage());
283 DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
284 DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
285 PtrComprCageBase cage_base(heap()->isolate());
286 size_t object_size = static_cast<size_t>(page->GetObject().Size(cage_base));
287 static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
288 page->ClearFlag(MemoryChunk::FROM_PAGE);
289 AddPage(page, object_size);
290 }
291
AddPage(LargePage * page,size_t object_size)292 void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
293 size_ += static_cast<int>(page->size());
294 AccountCommitted(page->size());
295 objects_size_ += object_size;
296 page_count_++;
297 memory_chunk_list_.PushBack(page);
298 page->set_owner(this);
299 page->SetOldGenerationPageFlags(!is_off_thread() &&
300 heap()->incremental_marking()->IsMarking());
301 }
302
RemovePage(LargePage * page,size_t object_size)303 void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
304 size_ -= static_cast<int>(page->size());
305 AccountUncommitted(page->size());
306 objects_size_ -= object_size;
307 page_count_--;
308 memory_chunk_list_.Remove(page);
309 page->set_owner(nullptr);
310 }
311
FreeUnmarkedObjects()312 void LargeObjectSpace::FreeUnmarkedObjects() {
313 LargePage* current = first_page();
314 IncrementalMarking::NonAtomicMarkingState* marking_state =
315 heap()->incremental_marking()->non_atomic_marking_state();
316 // Right-trimming does not update the objects_size_ counter. We are lazily
317 // updating it after every GC.
318 size_t surviving_object_size = 0;
319 PtrComprCageBase cage_base(heap()->isolate());
320 while (current) {
321 LargePage* next_current = current->next_page();
322 HeapObject object = current->GetObject();
323 DCHECK(!marking_state->IsGrey(object));
324 size_t size = static_cast<size_t>(object.Size(cage_base));
325 if (marking_state->IsBlack(object)) {
326 Address free_start;
327 surviving_object_size += size;
328 if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
329 0) {
330 DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
331 current->ClearOutOfLiveRangeSlots(free_start);
332 const size_t bytes_to_free =
333 current->size() - (free_start - current->address());
334 heap()->memory_allocator()->PartialFreeMemory(
335 current, free_start, bytes_to_free,
336 current->area_start() + object.Size(cage_base));
337 size_ -= bytes_to_free;
338 AccountUncommitted(bytes_to_free);
339 }
340 } else {
341 RemovePage(current, size);
342 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
343 current);
344 }
345 current = next_current;
346 }
347 objects_size_ = surviving_object_size;
348 }
349
Contains(HeapObject object) const350 bool LargeObjectSpace::Contains(HeapObject object) const {
351 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
352
353 bool owned = (chunk->owner() == this);
354
355 SLOW_DCHECK(!owned || ContainsSlow(object.address()));
356
357 return owned;
358 }
359
ContainsSlow(Address addr) const360 bool LargeObjectSpace::ContainsSlow(Address addr) const {
361 for (const LargePage* page : *this) {
362 if (page->Contains(addr)) return true;
363 }
364 return false;
365 }
366
GetObjectIterator(Heap * heap)367 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
368 Heap* heap) {
369 return std::unique_ptr<ObjectIterator>(
370 new LargeObjectSpaceObjectIterator(this));
371 }
372
373 #ifdef VERIFY_HEAP
374 // We do not assume that the large object iterator works, because it depends
375 // on the invariants we are checking during verification.
Verify(Isolate * isolate)376 void LargeObjectSpace::Verify(Isolate* isolate) {
377 size_t external_backing_store_bytes[kNumTypes];
378
379 for (int i = 0; i < kNumTypes; i++) {
380 external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
381 }
382
383 PtrComprCageBase cage_base(isolate);
384 for (LargePage* chunk = first_page(); chunk != nullptr;
385 chunk = chunk->next_page()) {
386 // Each chunk contains an object that starts at the large object page's
387 // object area start.
388 HeapObject object = chunk->GetObject();
389 Page* page = Page::FromHeapObject(object);
390 CHECK(object.address() == page->area_start());
391
392 // The first word should be a map, and we expect all map pointers to be
393 // in map space or read-only space.
394 Map map = object.map(cage_base);
395 CHECK(map.IsMap(cage_base));
396 CHECK(ReadOnlyHeap::Contains(map) ||
397 isolate->heap()->space_for_maps()->Contains(map));
398
399 // We have only the following types in the large object space:
400 const bool is_valid_lo_space_object = //
401 object.IsAbstractCode(cage_base) || //
402 object.IsBigInt(cage_base) || //
403 object.IsByteArray(cage_base) || //
404 object.IsContext(cage_base) || //
405 object.IsExternalString(cage_base) || //
406 object.IsFeedbackMetadata(cage_base) || //
407 object.IsFeedbackVector(cage_base) || //
408 object.IsFixedArray(cage_base) || //
409 object.IsFixedDoubleArray(cage_base) || //
410 object.IsFreeSpace(cage_base) || //
411 object.IsPreparseData(cage_base) || //
412 object.IsPropertyArray(cage_base) || //
413 object.IsScopeInfo() || //
414 object.IsSeqString(cage_base) || //
415 object.IsSloppyArgumentsElements(cage_base) || //
416 object.IsSwissNameDictionary() || //
417 object.IsThinString(cage_base) || //
418 object.IsUncompiledDataWithoutPreparseData(cage_base) || //
419 #if V8_ENABLE_WEBASSEMBLY //
420 object.IsWasmArray() || //
421 #endif //
422 object.IsWeakArrayList(cage_base) || //
423 object.IsWeakFixedArray(cage_base);
424 if (!is_valid_lo_space_object) {
425 object.Print();
426 FATAL("Found invalid Object (instance_type=%i) in large object space.",
427 object.map(cage_base).instance_type());
428 }
429
430 // The object itself should look OK.
431 object.ObjectVerify(isolate);
432
433 if (!FLAG_verify_heap_skip_remembered_set) {
434 heap()->VerifyRememberedSetFor(object);
435 }
436
437 // Byte arrays and strings don't have interior pointers.
438 if (object.IsAbstractCode(cage_base)) {
439 VerifyPointersVisitor code_visitor(heap());
440 object.IterateBody(map, object.Size(cage_base), &code_visitor);
441 } else if (object.IsFixedArray(cage_base)) {
442 FixedArray array = FixedArray::cast(object);
443 for (int j = 0; j < array.length(); j++) {
444 Object element = array.get(j);
445 if (element.IsHeapObject()) {
446 HeapObject element_object = HeapObject::cast(element);
447 CHECK(IsValidHeapObject(heap(), element_object));
448 CHECK(element_object.map(cage_base).IsMap(cage_base));
449 }
450 }
451 } else if (object.IsPropertyArray(cage_base)) {
452 PropertyArray array = PropertyArray::cast(object);
453 for (int j = 0; j < array.length(); j++) {
454 Object property = array.get(j);
455 if (property.IsHeapObject()) {
456 HeapObject property_object = HeapObject::cast(property);
457 CHECK(heap()->Contains(property_object));
458 CHECK(property_object.map(cage_base).IsMap(cage_base));
459 }
460 }
461 }
462 for (int i = 0; i < kNumTypes; i++) {
463 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
464 external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
465 }
466
467 CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
468 CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
469 }
470 for (int i = 0; i < kNumTypes; i++) {
471 ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
472 CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
473 }
474 }
475 #endif
476
477 #ifdef DEBUG
Print()478 void LargeObjectSpace::Print() {
479 StdoutStream os;
480 LargeObjectSpaceObjectIterator it(this);
481 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
482 obj.Print(os);
483 }
484 }
485 #endif // DEBUG
486
UpdatePendingObject(HeapObject object)487 void LargeObjectSpace::UpdatePendingObject(HeapObject object) {
488 base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
489 pending_object_.store(object.address(), std::memory_order_release);
490 }
491
OldLargeObjectSpace(Heap * heap)492 OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
493 : LargeObjectSpace(heap, LO_SPACE) {}
494
OldLargeObjectSpace(Heap * heap,AllocationSpace id)495 OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
496 : LargeObjectSpace(heap, id) {}
497
NewLargeObjectSpace(Heap * heap,size_t capacity)498 NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
499 : LargeObjectSpace(heap, NEW_LO_SPACE),
500 capacity_(capacity) {}
501
AllocateRaw(int object_size)502 AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
503 DCHECK(!FLAG_enable_third_party_heap);
504 // Do not allocate more objects if promoting the existing object would exceed
505 // the old generation capacity.
506 if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
507 return AllocationResult::Failure();
508 }
509
510 // Allocation for the first object must succeed independent from the capacity.
511 if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
512 return AllocationResult::Failure();
513 }
514
515 LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
516 if (page == nullptr) return AllocationResult::Failure();
517
518 // The size of the first object may exceed the capacity.
519 capacity_ = std::max(capacity_, SizeOfObjects());
520
521 HeapObject result = page->GetObject();
522 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
523 page->SetFlag(MemoryChunk::TO_PAGE);
524 UpdatePendingObject(result);
525 if (FLAG_minor_mc) {
526 page->AllocateYoungGenerationBitmap();
527 heap()
528 ->minor_mark_compact_collector()
529 ->non_atomic_marking_state()
530 ->ClearLiveness(page);
531 }
532 page->InitializationMemoryFence();
533 DCHECK(page->IsLargePage());
534 DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
535 AdvanceAndInvokeAllocationObservers(result.address(),
536 static_cast<size_t>(object_size));
537 return AllocationResult::FromObject(result);
538 }
539
Available() const540 size_t NewLargeObjectSpace::Available() const {
541 return capacity_ - SizeOfObjects();
542 }
543
Flip()544 void NewLargeObjectSpace::Flip() {
545 for (LargePage* chunk = first_page(); chunk != nullptr;
546 chunk = chunk->next_page()) {
547 chunk->SetFlag(MemoryChunk::FROM_PAGE);
548 chunk->ClearFlag(MemoryChunk::TO_PAGE);
549 }
550 }
551
FreeDeadObjects(const std::function<bool (HeapObject)> & is_dead)552 void NewLargeObjectSpace::FreeDeadObjects(
553 const std::function<bool(HeapObject)>& is_dead) {
554 bool is_marking = heap()->incremental_marking()->IsMarking();
555 size_t surviving_object_size = 0;
556 bool freed_pages = false;
557 PtrComprCageBase cage_base(heap()->isolate());
558 for (auto it = begin(); it != end();) {
559 LargePage* page = *it;
560 it++;
561 HeapObject object = page->GetObject();
562 size_t size = static_cast<size_t>(object.Size(cage_base));
563 if (is_dead(object)) {
564 freed_pages = true;
565 RemovePage(page, size);
566 heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
567 page);
568 if (FLAG_concurrent_marking && is_marking) {
569 heap()->concurrent_marking()->ClearMemoryChunkData(page);
570 }
571 } else {
572 surviving_object_size += size;
573 }
574 }
575 // Right-trimming does not update the objects_size_ counter. We are lazily
576 // updating it after every GC.
577 objects_size_ = surviving_object_size;
578 if (freed_pages) {
579 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
580 }
581 }
582
SetCapacity(size_t capacity)583 void NewLargeObjectSpace::SetCapacity(size_t capacity) {
584 capacity_ = std::max(capacity, SizeOfObjects());
585 }
586
CodeLargeObjectSpace(Heap * heap)587 CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
588 : OldLargeObjectSpace(heap, CODE_LO_SPACE),
589 chunk_map_(kInitialChunkMapCapacity) {}
590
AllocateRaw(int object_size)591 AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
592 DCHECK(!FLAG_enable_third_party_heap);
593 return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
594 }
595
AllocateRawBackground(LocalHeap * local_heap,int object_size)596 AllocationResult CodeLargeObjectSpace::AllocateRawBackground(
597 LocalHeap* local_heap, int object_size) {
598 DCHECK(!FLAG_enable_third_party_heap);
599 return OldLargeObjectSpace::AllocateRawBackground(local_heap, object_size,
600 EXECUTABLE);
601 }
602
AddPage(LargePage * page,size_t object_size)603 void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
604 OldLargeObjectSpace::AddPage(page, object_size);
605 InsertChunkMapEntries(page);
606 }
607
RemovePage(LargePage * page,size_t object_size)608 void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
609 RemoveChunkMapEntries(page);
610 heap()->isolate()->RemoveCodeMemoryChunk(page);
611 OldLargeObjectSpace::RemovePage(page, object_size);
612 }
613
614 } // namespace internal
615 } // namespace v8
616