• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/large-spaces.h"
6 
7 #include "src/base/platform/mutex.h"
8 #include "src/common/globals.h"
9 #include "src/execution/isolate.h"
10 #include "src/heap/combined-heap.h"
11 #include "src/heap/incremental-marking.h"
12 #include "src/heap/list.h"
13 #include "src/heap/marking.h"
14 #include "src/heap/memory-allocator.h"
15 #include "src/heap/memory-chunk-inl.h"
16 #include "src/heap/remembered-set.h"
17 #include "src/heap/slot-set.h"
18 #include "src/heap/spaces-inl.h"
19 #include "src/logging/log.h"
20 #include "src/objects/objects-inl.h"
21 #include "src/sanitizer/msan.h"
22 #include "src/utils/ostreams.h"
23 
24 namespace v8 {
25 namespace internal {
26 
27 // This check is here to ensure that the lower 32 bits of any real heap object
28 // can't overlap with the lower 32 bits of cleared weak reference value and
29 // therefore it's enough to compare only the lower 32 bits of a MaybeObject in
30 // order to figure out if it's a cleared weak reference or not.
31 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
32 
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable)33 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
34                                  Executability executable) {
35   if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
36     STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
37     FATAL("Code page is too large.");
38   }
39 
40   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
41 
42   LargePage* page = static_cast<LargePage*>(chunk);
43   page->SetFlag(MemoryChunk::LARGE_PAGE);
44   page->list_node().Initialize();
45   return page;
46 }
47 
Available()48 size_t LargeObjectSpace::Available() {
49   // We return zero here since we cannot take advantage of already allocated
50   // large object memory.
51   return 0;
52 }
53 
GetAddressToShrink(Address object_address,size_t object_size)54 Address LargePage::GetAddressToShrink(Address object_address,
55                                       size_t object_size) {
56   if (executable() == EXECUTABLE) {
57     return 0;
58   }
59   size_t used_size = ::RoundUp((object_address - address()) + object_size,
60                                MemoryAllocator::GetCommitPageSize());
61   if (used_size < CommittedPhysicalMemory()) {
62     return address() + used_size;
63   }
64   return 0;
65 }
66 
ClearOutOfLiveRangeSlots(Address free_start)67 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
68   DCHECK_NULL(this->sweeping_slot_set());
69   RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
70                                          SlotSet::FREE_EMPTY_BUCKETS);
71   RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
72                                          SlotSet::FREE_EMPTY_BUCKETS);
73   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
74   RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
75 }
76 
77 // -----------------------------------------------------------------------------
78 // LargeObjectSpaceObjectIterator
79 
LargeObjectSpaceObjectIterator(LargeObjectSpace * space)80 LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
81     LargeObjectSpace* space) {
82   current_ = space->first_page();
83 }
84 
Next()85 HeapObject LargeObjectSpaceObjectIterator::Next() {
86   if (current_ == nullptr) return HeapObject();
87 
88   HeapObject object = current_->GetObject();
89   current_ = current_->next_page();
90   return object;
91 }
92 
93 // -----------------------------------------------------------------------------
94 // OldLargeObjectSpace
95 
LargeObjectSpace(Heap * heap,AllocationSpace id)96 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
97     : Space(heap, id, new NoFreeList()),
98       size_(0),
99       page_count_(0),
100       objects_size_(0) {}
101 
TearDown()102 void LargeObjectSpace::TearDown() {
103   while (!memory_chunk_list_.Empty()) {
104     LargePage* page = first_page();
105     LOG(heap()->isolate(),
106         DeleteEvent("LargeObjectChunk",
107                     reinterpret_cast<void*>(page->address())));
108     memory_chunk_list_.Remove(page);
109     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
110   }
111 }
112 
AdvanceAndInvokeAllocationObservers(Address soon_object,size_t object_size)113 void LargeObjectSpace::AdvanceAndInvokeAllocationObservers(Address soon_object,
114                                                            size_t object_size) {
115   if (!allocation_counter_.IsActive()) return;
116 
117   if (object_size >= allocation_counter_.NextBytes()) {
118     allocation_counter_.InvokeAllocationObservers(soon_object, object_size,
119                                                   object_size);
120   }
121 
122   // Large objects can be accounted immediately since no LAB is involved.
123   allocation_counter_.AdvanceAllocationObservers(object_size);
124 }
125 
AllocateRaw(int object_size)126 AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
127   return AllocateRaw(object_size, NOT_EXECUTABLE);
128 }
129 
AllocateRaw(int object_size,Executability executable)130 AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
131                                                   Executability executable) {
132   // Check if we want to force a GC before growing the old space further.
133   // If so, fail the allocation.
134   if (!heap()->CanExpandOldGeneration(object_size) ||
135       !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
136     return AllocationResult::Retry(identity());
137   }
138 
139   LargePage* page = AllocateLargePage(object_size, executable);
140   if (page == nullptr) return AllocationResult::Retry(identity());
141   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
142   HeapObject object = page->GetObject();
143   heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
144       heap()->GCFlagsForIncrementalMarking(),
145       kGCCallbackScheduleIdleGarbageCollection);
146   if (heap()->incremental_marking()->black_allocation()) {
147     heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
148   }
149   DCHECK_IMPLIES(
150       heap()->incremental_marking()->black_allocation(),
151       heap()->incremental_marking()->marking_state()->IsBlack(object));
152   page->InitializationMemoryFence();
153   heap()->NotifyOldGenerationExpansion(identity(), page);
154   AdvanceAndInvokeAllocationObservers(object.address(),
155                                       static_cast<size_t>(object_size));
156   return object;
157 }
158 
AllocateRawBackground(LocalHeap * local_heap,int object_size)159 AllocationResult OldLargeObjectSpace::AllocateRawBackground(
160     LocalHeap* local_heap, int object_size) {
161   // Check if we want to force a GC before growing the old space further.
162   // If so, fail the allocation.
163   if (!heap()->CanExpandOldGenerationBackground(object_size) ||
164       !heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
165     return AllocationResult::Retry(identity());
166   }
167 
168   LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
169   if (page == nullptr) return AllocationResult::Retry(identity());
170   page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
171   HeapObject object = page->GetObject();
172   heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
173   if (heap()->incremental_marking()->black_allocation()) {
174     heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
175   }
176   DCHECK_IMPLIES(
177       heap()->incremental_marking()->black_allocation(),
178       heap()->incremental_marking()->marking_state()->IsBlack(object));
179   page->InitializationMemoryFence();
180   return object;
181 }
182 
AllocateLargePage(int object_size,Executability executable)183 LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
184                                                Executability executable) {
185   LargePage* page = heap()->memory_allocator()->AllocateLargePage(
186       object_size, this, executable);
187   if (page == nullptr) return nullptr;
188   DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
189 
190   {
191     base::MutexGuard guard(&allocation_mutex_);
192     AddPage(page, object_size);
193   }
194 
195   HeapObject object = page->GetObject();
196 
197   heap()->CreateFillerObjectAt(object.address(), object_size,
198                                ClearRecordedSlots::kNo);
199   return page;
200 }
201 
CommittedPhysicalMemory()202 size_t LargeObjectSpace::CommittedPhysicalMemory() {
203   // On a platform that provides lazy committing of memory, we over-account
204   // the actually committed memory. There is no easy way right now to support
205   // precise accounting of committed memory in large object space.
206   return CommittedMemory();
207 }
208 
FindPage(Address a)209 LargePage* CodeLargeObjectSpace::FindPage(Address a) {
210   const Address key = BasicMemoryChunk::FromAddress(a)->address();
211   auto it = chunk_map_.find(key);
212   if (it != chunk_map_.end()) {
213     LargePage* page = it->second;
214     CHECK(page->Contains(a));
215     return page;
216   }
217   return nullptr;
218 }
219 
ClearMarkingStateOfLiveObjects()220 void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
221   IncrementalMarking::NonAtomicMarkingState* marking_state =
222       heap()->incremental_marking()->non_atomic_marking_state();
223   LargeObjectSpaceObjectIterator it(this);
224   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
225     if (marking_state->IsBlackOrGrey(obj)) {
226       Marking::MarkWhite(marking_state->MarkBitFrom(obj));
227       MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
228       RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
229       chunk->ResetProgressBar();
230       marking_state->SetLiveBytes(chunk, 0);
231     }
232     DCHECK(marking_state->IsWhite(obj));
233   }
234 }
235 
InsertChunkMapEntries(LargePage * page)236 void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
237   for (Address current = reinterpret_cast<Address>(page);
238        current < reinterpret_cast<Address>(page) + page->size();
239        current += MemoryChunk::kPageSize) {
240     chunk_map_[current] = page;
241   }
242 }
243 
RemoveChunkMapEntries(LargePage * page)244 void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
245   for (Address current = page->address();
246        current < reinterpret_cast<Address>(page) + page->size();
247        current += MemoryChunk::kPageSize) {
248     chunk_map_.erase(current);
249   }
250 }
251 
PromoteNewLargeObject(LargePage * page)252 void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
253   DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
254   DCHECK(page->IsLargePage());
255   DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
256   DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
257   size_t object_size = static_cast<size_t>(page->GetObject().Size());
258   static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
259   page->ClearFlag(MemoryChunk::FROM_PAGE);
260   AddPage(page, object_size);
261 }
262 
AddPage(LargePage * page,size_t object_size)263 void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
264   size_ += static_cast<int>(page->size());
265   AccountCommitted(page->size());
266   objects_size_ += object_size;
267   page_count_++;
268   memory_chunk_list_.PushBack(page);
269   page->set_owner(this);
270   page->SetOldGenerationPageFlags(!is_off_thread() &&
271                                   heap()->incremental_marking()->IsMarking());
272 }
273 
RemovePage(LargePage * page,size_t object_size)274 void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
275   size_ -= static_cast<int>(page->size());
276   AccountUncommitted(page->size());
277   objects_size_ -= object_size;
278   page_count_--;
279   memory_chunk_list_.Remove(page);
280   page->set_owner(nullptr);
281 }
282 
FreeUnmarkedObjects()283 void LargeObjectSpace::FreeUnmarkedObjects() {
284   LargePage* current = first_page();
285   IncrementalMarking::NonAtomicMarkingState* marking_state =
286       heap()->incremental_marking()->non_atomic_marking_state();
287   // Right-trimming does not update the objects_size_ counter. We are lazily
288   // updating it after every GC.
289   size_t surviving_object_size = 0;
290   while (current) {
291     LargePage* next_current = current->next_page();
292     HeapObject object = current->GetObject();
293     DCHECK(!marking_state->IsGrey(object));
294     size_t size = static_cast<size_t>(object.Size());
295     if (marking_state->IsBlack(object)) {
296       Address free_start;
297       surviving_object_size += size;
298       if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
299           0) {
300         DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
301         current->ClearOutOfLiveRangeSlots(free_start);
302         const size_t bytes_to_free =
303             current->size() - (free_start - current->address());
304         heap()->memory_allocator()->PartialFreeMemory(
305             current, free_start, bytes_to_free,
306             current->area_start() + object.Size());
307         size_ -= bytes_to_free;
308         AccountUncommitted(bytes_to_free);
309       }
310     } else {
311       RemovePage(current, size);
312       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
313           current);
314     }
315     current = next_current;
316   }
317   objects_size_ = surviving_object_size;
318 }
319 
Contains(HeapObject object)320 bool LargeObjectSpace::Contains(HeapObject object) {
321   BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
322 
323   bool owned = (chunk->owner() == this);
324 
325   SLOW_DCHECK(!owned || ContainsSlow(object.address()));
326 
327   return owned;
328 }
329 
ContainsSlow(Address addr)330 bool LargeObjectSpace::ContainsSlow(Address addr) {
331   for (LargePage* page : *this) {
332     if (page->Contains(addr)) return true;
333   }
334   return false;
335 }
336 
GetObjectIterator(Heap * heap)337 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator(
338     Heap* heap) {
339   return std::unique_ptr<ObjectIterator>(
340       new LargeObjectSpaceObjectIterator(this));
341 }
342 
343 #ifdef VERIFY_HEAP
344 // We do not assume that the large object iterator works, because it depends
345 // on the invariants we are checking during verification.
Verify(Isolate * isolate)346 void LargeObjectSpace::Verify(Isolate* isolate) {
347   size_t external_backing_store_bytes[kNumTypes];
348 
349   for (int i = 0; i < kNumTypes; i++) {
350     external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
351   }
352 
353   for (LargePage* chunk = first_page(); chunk != nullptr;
354        chunk = chunk->next_page()) {
355     // Each chunk contains an object that starts at the large object page's
356     // object area start.
357     HeapObject object = chunk->GetObject();
358     Page* page = Page::FromHeapObject(object);
359     CHECK(object.address() == page->area_start());
360 
361     // The first word should be a map, and we expect all map pointers to be
362     // in map space or read-only space.
363     Map map = object.map();
364     CHECK(map.IsMap());
365     CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
366 
367     // We have only the following types in the large object space:
368     if (!(object.IsAbstractCode() || object.IsSeqString() ||
369           object.IsExternalString() || object.IsThinString() ||
370           object.IsFixedArray() || object.IsFixedDoubleArray() ||
371           object.IsWeakFixedArray() || object.IsWeakArrayList() ||
372           object.IsPropertyArray() || object.IsByteArray() ||
373           object.IsFeedbackVector() || object.IsBigInt() ||
374           object.IsFreeSpace() || object.IsFeedbackMetadata() ||
375           object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
376           object.IsPreparseData()) &&
377         !FLAG_young_generation_large_objects) {
378       FATAL("Found invalid Object (instance_type=%i) in large object space.",
379             object.map().instance_type());
380     }
381 
382     // The object itself should look OK.
383     object.ObjectVerify(isolate);
384 
385     if (!FLAG_verify_heap_skip_remembered_set) {
386       heap()->VerifyRememberedSetFor(object);
387     }
388 
389     // Byte arrays and strings don't have interior pointers.
390     if (object.IsAbstractCode()) {
391       VerifyPointersVisitor code_visitor(heap());
392       object.IterateBody(map, object.Size(), &code_visitor);
393     } else if (object.IsFixedArray()) {
394       FixedArray array = FixedArray::cast(object);
395       for (int j = 0; j < array.length(); j++) {
396         Object element = array.get(j);
397         if (element.IsHeapObject()) {
398           HeapObject element_object = HeapObject::cast(element);
399           CHECK(IsValidHeapObject(heap(), element_object));
400           CHECK(element_object.map().IsMap());
401         }
402       }
403     } else if (object.IsPropertyArray()) {
404       PropertyArray array = PropertyArray::cast(object);
405       for (int j = 0; j < array.length(); j++) {
406         Object property = array.get(j);
407         if (property.IsHeapObject()) {
408           HeapObject property_object = HeapObject::cast(property);
409           CHECK(heap()->Contains(property_object));
410           CHECK(property_object.map().IsMap());
411         }
412       }
413     }
414     for (int i = 0; i < kNumTypes; i++) {
415       ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
416       external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
417     }
418   }
419   for (int i = 0; i < kNumTypes; i++) {
420     ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
421     CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
422   }
423 }
424 #endif
425 
426 #ifdef DEBUG
Print()427 void LargeObjectSpace::Print() {
428   StdoutStream os;
429   LargeObjectSpaceObjectIterator it(this);
430   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
431     obj.Print(os);
432   }
433 }
434 #endif  // DEBUG
435 
OldLargeObjectSpace(Heap * heap)436 OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap)
437     : LargeObjectSpace(heap, LO_SPACE) {}
438 
OldLargeObjectSpace(Heap * heap,AllocationSpace id)439 OldLargeObjectSpace::OldLargeObjectSpace(Heap* heap, AllocationSpace id)
440     : LargeObjectSpace(heap, id) {}
441 
NewLargeObjectSpace(Heap * heap,size_t capacity)442 NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
443     : LargeObjectSpace(heap, NEW_LO_SPACE),
444       pending_object_(0),
445       capacity_(capacity) {}
446 
AllocateRaw(int object_size)447 AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
448   // Do not allocate more objects if promoting the existing object would exceed
449   // the old generation capacity.
450   if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
451     return AllocationResult::Retry(identity());
452   }
453 
454   // Allocation for the first object must succeed independent from the capacity.
455   if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
456     return AllocationResult::Retry(identity());
457   }
458 
459   LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
460   if (page == nullptr) return AllocationResult::Retry(identity());
461 
462   // The size of the first object may exceed the capacity.
463   capacity_ = Max(capacity_, SizeOfObjects());
464 
465   HeapObject result = page->GetObject();
466   page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
467   page->SetFlag(MemoryChunk::TO_PAGE);
468   pending_object_.store(result.address(), std::memory_order_relaxed);
469 #ifdef ENABLE_MINOR_MC
470   if (FLAG_minor_mc) {
471     page->AllocateYoungGenerationBitmap();
472     heap()
473         ->minor_mark_compact_collector()
474         ->non_atomic_marking_state()
475         ->ClearLiveness(page);
476   }
477 #endif  // ENABLE_MINOR_MC
478   page->InitializationMemoryFence();
479   DCHECK(page->IsLargePage());
480   DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
481   AdvanceAndInvokeAllocationObservers(result.address(),
482                                       static_cast<size_t>(object_size));
483   return result;
484 }
485 
Available()486 size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
487 
Flip()488 void NewLargeObjectSpace::Flip() {
489   for (LargePage* chunk = first_page(); chunk != nullptr;
490        chunk = chunk->next_page()) {
491     chunk->SetFlag(MemoryChunk::FROM_PAGE);
492     chunk->ClearFlag(MemoryChunk::TO_PAGE);
493   }
494 }
495 
FreeDeadObjects(const std::function<bool (HeapObject)> & is_dead)496 void NewLargeObjectSpace::FreeDeadObjects(
497     const std::function<bool(HeapObject)>& is_dead) {
498   bool is_marking = heap()->incremental_marking()->IsMarking();
499   size_t surviving_object_size = 0;
500   bool freed_pages = false;
501   for (auto it = begin(); it != end();) {
502     LargePage* page = *it;
503     it++;
504     HeapObject object = page->GetObject();
505     size_t size = static_cast<size_t>(object.Size());
506     if (is_dead(object)) {
507       freed_pages = true;
508       RemovePage(page, size);
509       heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
510       if (FLAG_concurrent_marking && is_marking) {
511         heap()->concurrent_marking()->ClearMemoryChunkData(page);
512       }
513     } else {
514       surviving_object_size += size;
515     }
516   }
517   // Right-trimming does not update the objects_size_ counter. We are lazily
518   // updating it after every GC.
519   objects_size_ = surviving_object_size;
520   if (freed_pages) {
521     heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
522   }
523 }
524 
SetCapacity(size_t capacity)525 void NewLargeObjectSpace::SetCapacity(size_t capacity) {
526   capacity_ = Max(capacity, SizeOfObjects());
527 }
528 
CodeLargeObjectSpace(Heap * heap)529 CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
530     : OldLargeObjectSpace(heap, CODE_LO_SPACE),
531       chunk_map_(kInitialChunkMapCapacity) {}
532 
AllocateRaw(int object_size)533 AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
534   return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
535 }
536 
AddPage(LargePage * page,size_t object_size)537 void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
538   OldLargeObjectSpace::AddPage(page, object_size);
539   InsertChunkMapEntries(page);
540 }
541 
RemovePage(LargePage * page,size_t object_size)542 void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
543   RemoveChunkMapEntries(page);
544   heap()->isolate()->RemoveCodeMemoryChunk(page);
545   OldLargeObjectSpace::RemovePage(page, object_size);
546 }
547 
548 }  // namespace internal
549 }  // namespace v8
550