• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/heap.h"
6 
7 #include <atomic>
8 #include <cinttypes>
9 #include <iomanip>
10 #include <memory>
11 #include <unordered_map>
12 #include <unordered_set>
13 
14 #include "include/v8-locker.h"
15 #include "src/api/api-inl.h"
16 #include "src/base/bits.h"
17 #include "src/base/flags.h"
18 #include "src/base/logging.h"
19 #include "src/base/once.h"
20 #include "src/base/platform/mutex.h"
21 #include "src/base/utils/random-number-generator.h"
22 #include "src/builtins/accessors.h"
23 #include "src/codegen/assembler-inl.h"
24 #include "src/codegen/compilation-cache.h"
25 #include "src/common/assert-scope.h"
26 #include "src/common/globals.h"
27 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
28 #include "src/debug/debug.h"
29 #include "src/deoptimizer/deoptimizer.h"
30 #include "src/execution/embedder-state.h"
31 #include "src/execution/isolate-utils-inl.h"
32 #include "src/execution/microtask-queue.h"
33 #include "src/execution/v8threads.h"
34 #include "src/execution/vm-state-inl.h"
35 #include "src/handles/global-handles-inl.h"
36 #include "src/heap/array-buffer-sweeper.h"
37 #include "src/heap/base/stack.h"
38 #include "src/heap/basic-memory-chunk.h"
39 #include "src/heap/code-object-registry.h"
40 #include "src/heap/code-range.h"
41 #include "src/heap/code-stats.h"
42 #include "src/heap/collection-barrier.h"
43 #include "src/heap/combined-heap.h"
44 #include "src/heap/concurrent-allocator.h"
45 #include "src/heap/concurrent-marking.h"
46 #include "src/heap/cppgc-js/cpp-heap.h"
47 #include "src/heap/embedder-tracing.h"
48 #include "src/heap/finalization-registry-cleanup-task.h"
49 #include "src/heap/gc-idle-time-handler.h"
50 #include "src/heap/gc-tracer-inl.h"
51 #include "src/heap/gc-tracer.h"
52 #include "src/heap/heap-controller.h"
53 #include "src/heap/heap-layout-tracer.h"
54 #include "src/heap/heap-write-barrier-inl.h"
55 #include "src/heap/incremental-marking-inl.h"
56 #include "src/heap/incremental-marking.h"
57 #include "src/heap/large-spaces.h"
58 #include "src/heap/local-heap.h"
59 #include "src/heap/mark-compact-inl.h"
60 #include "src/heap/mark-compact.h"
61 #include "src/heap/marking-barrier-inl.h"
62 #include "src/heap/marking-barrier.h"
63 #include "src/heap/memory-chunk-inl.h"
64 #include "src/heap/memory-chunk-layout.h"
65 #include "src/heap/memory-measurement.h"
66 #include "src/heap/memory-reducer.h"
67 #include "src/heap/object-stats.h"
68 #include "src/heap/objects-visiting-inl.h"
69 #include "src/heap/objects-visiting.h"
70 #include "src/heap/paged-spaces-inl.h"
71 #include "src/heap/parked-scope.h"
72 #include "src/heap/read-only-heap.h"
73 #include "src/heap/remembered-set.h"
74 #include "src/heap/safepoint.h"
75 #include "src/heap/scavenge-job.h"
76 #include "src/heap/scavenger-inl.h"
77 #include "src/heap/stress-marking-observer.h"
78 #include "src/heap/stress-scavenge-observer.h"
79 #include "src/heap/sweeper.h"
80 #include "src/init/bootstrapper.h"
81 #include "src/init/v8.h"
82 #include "src/interpreter/interpreter.h"
83 #include "src/logging/log.h"
84 #include "src/logging/runtime-call-stats-scope.h"
85 #include "src/numbers/conversions.h"
86 #include "src/objects/data-handler.h"
87 #include "src/objects/feedback-vector.h"
88 #include "src/objects/free-space-inl.h"
89 #include "src/objects/hash-table-inl.h"
90 #include "src/objects/instance-type.h"
91 #include "src/objects/maybe-object.h"
92 #include "src/objects/shared-function-info.h"
93 #include "src/objects/slots-atomic-inl.h"
94 #include "src/objects/slots-inl.h"
95 #include "src/regexp/regexp.h"
96 #include "src/snapshot/embedded/embedded-data.h"
97 #include "src/snapshot/serializer-deserializer.h"
98 #include "src/snapshot/snapshot.h"
99 #include "src/strings/string-stream.h"
100 #include "src/strings/unicode-decoder.h"
101 #include "src/strings/unicode-inl.h"
102 #include "src/tracing/trace-event.h"
103 #include "src/utils/utils-inl.h"
104 #include "src/utils/utils.h"
105 
106 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
107 #include "src/heap/conservative-stack-visitor.h"
108 #endif
109 
110 #include "src/base/platform/wrappers.h"
111 // Has to be the last include (doesn't have include guards):
112 #include "src/objects/object-macros.h"
113 
114 namespace v8 {
115 namespace internal {
116 
117 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
GetIsolateFromWritableObject(HeapObject object)118 Isolate* Heap::GetIsolateFromWritableObject(HeapObject object) {
119   return reinterpret_cast<Isolate*>(
120       third_party_heap::Heap::GetIsolate(object.address()));
121 }
122 #endif
123 
124 // These are outside the Heap class so they can be forward-declared
125 // in heap-write-barrier-inl.h.
Heap_PageFlagsAreConsistent(HeapObject object)126 bool Heap_PageFlagsAreConsistent(HeapObject object) {
127   return Heap::PageFlagsAreConsistent(object);
128 }
129 
Heap_ValueMightRequireGenerationalWriteBarrier(HeapObject value)130 bool Heap_ValueMightRequireGenerationalWriteBarrier(HeapObject value) {
131   if (!value.IsCode()) return true;
132   // Code objects are never in new space and thus don't require generational
133   // write barrier.
134   DCHECK(!ObjectInYoungGeneration(value));
135   return false;
136 }
137 
Heap_GenerationalBarrierSlow(HeapObject object,Address slot,HeapObject value)138 void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
139                                   HeapObject value) {
140   Heap::GenerationalBarrierSlow(object, slot, value);
141 }
142 
Heap_WriteBarrierForCodeSlow(Code host)143 void Heap_WriteBarrierForCodeSlow(Code host) {
144   Heap::WriteBarrierForCodeSlow(host);
145 }
146 
Heap_GenerationalBarrierForCodeSlow(Code host,RelocInfo * rinfo,HeapObject object)147 void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
148                                          HeapObject object) {
149   Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
150 }
151 
Heap_GenerationalEphemeronKeyBarrierSlow(Heap * heap,EphemeronHashTable table,Address slot)152 void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
153                                               EphemeronHashTable table,
154                                               Address slot) {
155   heap->RecordEphemeronKeyWrite(table, slot);
156 }
157 
SetConstructStubCreateDeoptPCOffset(int pc_offset)158 void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
159   DCHECK_EQ(Smi::zero(), construct_stub_create_deopt_pc_offset());
160   set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
161 }
162 
SetConstructStubInvokeDeoptPCOffset(int pc_offset)163 void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
164   DCHECK_EQ(Smi::zero(), construct_stub_invoke_deopt_pc_offset());
165   set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
166 }
167 
SetInterpreterEntryReturnPCOffset(int pc_offset)168 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
169   DCHECK_EQ(Smi::zero(), interpreter_entry_return_pc_offset());
170   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
171 }
172 
SetSerializedObjects(FixedArray objects)173 void Heap::SetSerializedObjects(FixedArray objects) {
174   DCHECK(isolate()->serializer_enabled());
175   set_serialized_objects(objects);
176 }
177 
SetSerializedGlobalProxySizes(FixedArray sizes)178 void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
179   DCHECK(isolate()->serializer_enabled());
180   set_serialized_global_proxy_sizes(sizes);
181 }
182 
SetBasicBlockProfilingData(Handle<ArrayList> list)183 void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
184   set_basic_block_profiling_data(*list);
185 }
186 
operator ==(const Heap::GCCallbackTuple & other) const187 bool Heap::GCCallbackTuple::operator==(
188     const Heap::GCCallbackTuple& other) const {
189   return other.callback == callback && other.data == data;
190 }
191 
192 class ScavengeTaskObserver : public AllocationObserver {
193  public:
ScavengeTaskObserver(Heap * heap,intptr_t step_size)194   ScavengeTaskObserver(Heap* heap, intptr_t step_size)
195       : AllocationObserver(step_size), heap_(heap) {}
196 
Step(int bytes_allocated,Address,size_t)197   void Step(int bytes_allocated, Address, size_t) override {
198     heap_->ScheduleScavengeTaskIfNeeded();
199   }
200 
201  private:
202   Heap* heap_;
203 };
204 
Heap()205 Heap::Heap()
206     : isolate_(isolate()),
207       heap_allocator_(this),
208       memory_pressure_level_(MemoryPressureLevel::kNone),
209       global_pretenuring_feedback_(kInitialFeedbackCapacity),
210       safepoint_(std::make_unique<IsolateSafepoint>(this)),
211       external_string_table_(this),
212       allocation_type_for_in_place_internalizable_strings_(
213           isolate()->OwnsStringTable() ? AllocationType::kOld
214                                        : AllocationType::kSharedOld),
215       collection_barrier_(new CollectionBarrier(this)) {
216   // Ensure old_generation_size_ is a multiple of kPageSize.
217   DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
218 
219   max_regular_code_object_size_ = MemoryChunkLayout::MaxRegularCodeObjectSize();
220 
221   set_native_contexts_list(Smi::zero());
222   set_allocation_sites_list(Smi::zero());
223   set_dirty_js_finalization_registries_list(Smi::zero());
224   set_dirty_js_finalization_registries_list_tail(Smi::zero());
225 
226   // Put a dummy entry in the remembered pages so we can find the list the
227   // minidump even if there are no real unmapped pages.
228   RememberUnmappedPage(kNullAddress, false);
229 }
230 
231 Heap::~Heap() = default;
232 
MaxReserved()233 size_t Heap::MaxReserved() {
234   const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
235   return static_cast<size_t>(2 * max_semi_space_size_ +
236                              kMaxNewLargeObjectSpaceSize +
237                              max_old_generation_size());
238 }
239 
YoungGenerationSizeFromOldGenerationSize(size_t old_generation)240 size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
241   // Compute the semi space size and cap it.
242   size_t ratio = old_generation <= kOldGenerationLowMemory
243                      ? kOldGenerationToSemiSpaceRatioLowMemory
244                      : kOldGenerationToSemiSpaceRatio;
245   size_t semi_space = old_generation / ratio;
246   semi_space = std::min({semi_space, kMaxSemiSpaceSize});
247   semi_space = std::max({semi_space, kMinSemiSpaceSize});
248   semi_space = RoundUp(semi_space, Page::kPageSize);
249   return YoungGenerationSizeFromSemiSpaceSize(semi_space);
250 }
251 
HeapSizeFromPhysicalMemory(uint64_t physical_memory)252 size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
253   // Compute the old generation size and cap it.
254   uint64_t old_generation = physical_memory /
255                             kPhysicalMemoryToOldGenerationRatio *
256                             kHeapLimitMultiplier;
257   old_generation =
258       std::min(old_generation,
259                static_cast<uint64_t>(MaxOldGenerationSize(physical_memory)));
260   old_generation =
261       std::max({old_generation, static_cast<uint64_t>(V8HeapTrait::kMinSize)});
262   old_generation = RoundUp(old_generation, Page::kPageSize);
263 
264   size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
265       static_cast<size_t>(old_generation));
266   return static_cast<size_t>(old_generation) + young_generation;
267 }
268 
GenerationSizesFromHeapSize(size_t heap_size,size_t * young_generation_size,size_t * old_generation_size)269 void Heap::GenerationSizesFromHeapSize(size_t heap_size,
270                                        size_t* young_generation_size,
271                                        size_t* old_generation_size) {
272   // Initialize values for the case when the given heap size is too small.
273   *young_generation_size = 0;
274   *old_generation_size = 0;
275   // Binary search for the largest old generation size that fits to the given
276   // heap limit considering the correspondingly sized young generation.
277   size_t lower = 0, upper = heap_size;
278   while (lower + 1 < upper) {
279     size_t old_generation = lower + (upper - lower) / 2;
280     size_t young_generation =
281         YoungGenerationSizeFromOldGenerationSize(old_generation);
282     if (old_generation + young_generation <= heap_size) {
283       // This size configuration fits into the given heap limit.
284       *young_generation_size = young_generation;
285       *old_generation_size = old_generation;
286       lower = old_generation;
287     } else {
288       upper = old_generation;
289     }
290   }
291 }
292 
MinYoungGenerationSize()293 size_t Heap::MinYoungGenerationSize() {
294   return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize);
295 }
296 
MinOldGenerationSize()297 size_t Heap::MinOldGenerationSize() {
298   size_t paged_space_count =
299       LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
300   return paged_space_count * Page::kPageSize;
301 }
302 
AllocatorLimitOnMaxOldGenerationSize()303 size_t Heap::AllocatorLimitOnMaxOldGenerationSize() {
304 #ifdef V8_COMPRESS_POINTERS
305   // Isolate and the young generation are also allocated on the heap.
306   return kPtrComprCageReservationSize -
307          YoungGenerationSizeFromSemiSpaceSize(kMaxSemiSpaceSize) -
308          RoundUp(sizeof(Isolate), size_t{1} << kPageSizeBits);
309 #else
310   return std::numeric_limits<size_t>::max();
311 #endif
312 }
313 
MaxOldGenerationSize(uint64_t physical_memory)314 size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
315   size_t max_size = V8HeapTrait::kMaxSize;
316   // Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
317   // systems with physical memory bigger than 16GB. The physical memory
318   // is rounded up to GB.
319   constexpr bool x64_bit = Heap::kHeapLimitMultiplier >= 2;
320   if (FLAG_huge_max_old_generation_size && x64_bit &&
321       (physical_memory + 512 * MB) / GB >= 16) {
322     DCHECK_EQ(max_size / GB, 2);
323     max_size *= 2;
324   }
325   return std::min(max_size, AllocatorLimitOnMaxOldGenerationSize());
326 }
327 
YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size)328 size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
329   return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
330 }
331 
SemiSpaceSizeFromYoungGenerationSize(size_t young_generation_size)332 size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
333     size_t young_generation_size) {
334   return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
335 }
336 
Capacity()337 size_t Heap::Capacity() {
338   if (!HasBeenSetUp()) return 0;
339 
340   if (FLAG_enable_third_party_heap) return tp_heap_->Capacity();
341 
342   return NewSpaceCapacity() + OldGenerationCapacity();
343 }
344 
OldGenerationCapacity()345 size_t Heap::OldGenerationCapacity() {
346   if (!HasBeenSetUp()) return 0;
347   PagedSpaceIterator spaces(this);
348   size_t total = 0;
349   for (PagedSpace* space = spaces.Next(); space != nullptr;
350        space = spaces.Next()) {
351     total += space->Capacity();
352   }
353   return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
354 }
355 
CommittedOldGenerationMemory()356 size_t Heap::CommittedOldGenerationMemory() {
357   if (!HasBeenSetUp()) return 0;
358 
359   PagedSpaceIterator spaces(this);
360   size_t total = 0;
361   for (PagedSpace* space = spaces.Next(); space != nullptr;
362        space = spaces.Next()) {
363     total += space->CommittedMemory();
364   }
365   return total + lo_space_->Size() + code_lo_space_->Size();
366 }
367 
CommittedMemoryOfUnmapper()368 size_t Heap::CommittedMemoryOfUnmapper() {
369   if (!HasBeenSetUp()) return 0;
370 
371   return memory_allocator()->unmapper()->CommittedBufferedMemory();
372 }
373 
CommittedMemory()374 size_t Heap::CommittedMemory() {
375   if (!HasBeenSetUp()) return 0;
376 
377   size_t new_space_committed = new_space_ ? new_space_->CommittedMemory() : 0;
378   size_t new_lo_space_committed = new_lo_space_ ? new_lo_space_->Size() : 0;
379 
380   return new_space_committed + new_lo_space_committed +
381          CommittedOldGenerationMemory();
382 }
383 
CommittedPhysicalMemory()384 size_t Heap::CommittedPhysicalMemory() {
385   if (!HasBeenSetUp()) return 0;
386 
387   size_t total = 0;
388   for (SpaceIterator it(this); it.HasNext();) {
389     total += it.Next()->CommittedPhysicalMemory();
390   }
391 
392   return total;
393 }
394 
CommittedMemoryExecutable()395 size_t Heap::CommittedMemoryExecutable() {
396   if (!HasBeenSetUp()) return 0;
397 
398   return static_cast<size_t>(memory_allocator()->SizeExecutable());
399 }
400 
UpdateMaximumCommitted()401 void Heap::UpdateMaximumCommitted() {
402   if (!HasBeenSetUp()) return;
403 
404   const size_t current_committed_memory = CommittedMemory();
405   if (current_committed_memory > maximum_committed_) {
406     maximum_committed_ = current_committed_memory;
407   }
408 }
409 
Available()410 size_t Heap::Available() {
411   if (!HasBeenSetUp()) return 0;
412 
413   size_t total = 0;
414 
415   for (SpaceIterator it(this); it.HasNext();) {
416     total += it.Next()->Available();
417   }
418 
419   total += memory_allocator()->Available();
420   return total;
421 }
422 
CanExpandOldGeneration(size_t size)423 bool Heap::CanExpandOldGeneration(size_t size) {
424   if (force_oom_ || force_gc_on_next_allocation_) return false;
425   if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
426   // The OldGenerationCapacity does not account compaction spaces used
427   // during evacuation. Ensure that expanding the old generation does push
428   // the total allocated memory size over the maximum heap size.
429   return memory_allocator()->Size() + size <= MaxReserved();
430 }
431 
CanExpandOldGenerationBackground(LocalHeap * local_heap,size_t size)432 bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
433                                             size_t size) {
434   if (force_oom_) return false;
435 
436   // When the heap is tearing down, then GC requests from background threads
437   // are not served and the threads are allowed to expand the heap to avoid OOM.
438   return gc_state() == TEAR_DOWN || IsMainThreadParked(local_heap) ||
439          memory_allocator()->Size() + size <= MaxReserved();
440 }
441 
CanPromoteYoungAndExpandOldGeneration(size_t size)442 bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
443   size_t new_space_capacity = NewSpaceCapacity();
444   size_t new_lo_space_capacity = new_lo_space_ ? new_lo_space_->Size() : 0;
445 
446   // Over-estimate the new space size using capacity to allow some slack.
447   return CanExpandOldGeneration(size + new_space_capacity +
448                                 new_lo_space_capacity);
449 }
450 
HasBeenSetUp() const451 bool Heap::HasBeenSetUp() const {
452   // We will always have an old space when the heap is set up.
453   return old_space_ != nullptr;
454 }
455 
SelectGarbageCollector(AllocationSpace space,const char ** reason)456 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
457                                               const char** reason) {
458   // Is global GC requested?
459   if (space != NEW_SPACE && space != NEW_LO_SPACE) {
460     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
461     *reason = "GC in old space requested";
462     return GarbageCollector::MARK_COMPACTOR;
463   }
464 
465   if (FLAG_gc_global || ShouldStressCompaction() || !new_space()) {
466     *reason = "GC in old space forced by flags";
467     return GarbageCollector::MARK_COMPACTOR;
468   }
469 
470   if (incremental_marking()->NeedsFinalization() &&
471       AllocationLimitOvershotByLargeMargin()) {
472     *reason = "Incremental marking needs finalization";
473     return GarbageCollector::MARK_COMPACTOR;
474   }
475 
476   if (FLAG_separate_gc_phases && incremental_marking()->IsMarking()) {
477     // TODO(v8:12503): Remove previous condition when flag gets removed.
478     *reason = "Incremental marking forced finalization";
479     return GarbageCollector::MARK_COMPACTOR;
480   }
481 
482   if (!CanPromoteYoungAndExpandOldGeneration(0)) {
483     isolate_->counters()
484         ->gc_compactor_caused_by_oldspace_exhaustion()
485         ->Increment();
486     *reason = "scavenge might not succeed";
487     return GarbageCollector::MARK_COMPACTOR;
488   }
489 
490   DCHECK(!FLAG_single_generation);
491   DCHECK(!FLAG_gc_global);
492   // Default
493   *reason = nullptr;
494   return YoungGenerationCollector();
495 }
496 
SetGCState(HeapState state)497 void Heap::SetGCState(HeapState state) {
498   gc_state_.store(state, std::memory_order_relaxed);
499 }
500 
IsGCWithoutStack() const501 bool Heap::IsGCWithoutStack() const {
502   return local_embedder_heap_tracer()->embedder_stack_state() ==
503          cppgc::EmbedderStackState::kNoHeapPointers;
504 }
505 
PrintShortHeapStatistics()506 void Heap::PrintShortHeapStatistics() {
507   if (!FLAG_trace_gc_verbose) return;
508   PrintIsolate(isolate_,
509                "Memory allocator,       used: %6zu KB,"
510                " available: %6zu KB\n",
511                memory_allocator()->Size() / KB,
512                memory_allocator()->Available() / KB);
513   PrintIsolate(isolate_,
514                "Read-only space,        used: %6zu KB"
515                ", available: %6zu KB"
516                ", committed: %6zu KB\n",
517                read_only_space_->Size() / KB, size_t{0},
518                read_only_space_->CommittedMemory() / KB);
519   PrintIsolate(isolate_,
520                "New space,              used: %6zu KB"
521                ", available: %6zu KB"
522                ", committed: %6zu KB\n",
523                NewSpaceSize() / KB, new_space_->Available() / KB,
524                new_space_->CommittedMemory() / KB);
525   PrintIsolate(isolate_,
526                "New large object space, used: %6zu KB"
527                ", available: %6zu KB"
528                ", committed: %6zu KB\n",
529                new_lo_space_->SizeOfObjects() / KB,
530                new_lo_space_->Available() / KB,
531                new_lo_space_->CommittedMemory() / KB);
532   PrintIsolate(isolate_,
533                "Old space,              used: %6zu KB"
534                ", available: %6zu KB"
535                ", committed: %6zu KB\n",
536                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
537                old_space_->CommittedMemory() / KB);
538   PrintIsolate(isolate_,
539                "Code space,             used: %6zu KB"
540                ", available: %6zu KB"
541                ", committed: %6zu KB\n",
542                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
543                code_space_->CommittedMemory() / KB);
544   if (map_space()) {
545     PrintIsolate(isolate_,
546                  "Map space,              used: %6zu KB"
547                  ", available: %6zu KB"
548                  ", committed: %6zu KB\n",
549                  map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
550                  map_space_->CommittedMemory() / KB);
551   }
552   PrintIsolate(isolate_,
553                "Large object space,     used: %6zu KB"
554                ", available: %6zu KB"
555                ", committed: %6zu KB\n",
556                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
557                lo_space_->CommittedMemory() / KB);
558   PrintIsolate(isolate_,
559                "Code large object space,     used: %6zu KB"
560                ", available: %6zu KB"
561                ", committed: %6zu KB\n",
562                code_lo_space_->SizeOfObjects() / KB,
563                code_lo_space_->Available() / KB,
564                code_lo_space_->CommittedMemory() / KB);
565   ReadOnlySpace* const ro_space = read_only_space_;
566   PrintIsolate(isolate_,
567                "All spaces,             used: %6zu KB"
568                ", available: %6zu KB"
569                ", committed: %6zu KB\n",
570                (this->SizeOfObjects() + ro_space->Size()) / KB,
571                (this->Available()) / KB,
572                (this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
573   PrintIsolate(isolate_,
574                "Unmapper buffering %zu chunks of committed: %6zu KB\n",
575                memory_allocator()->unmapper()->NumberOfCommittedChunks(),
576                CommittedMemoryOfUnmapper() / KB);
577   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
578                external_memory_.total() / KB);
579   PrintIsolate(isolate_, "Backing store memory: %6" PRIu64 " KB\n",
580                backing_store_bytes() / KB);
581   PrintIsolate(isolate_, "External memory global %zu KB\n",
582                external_memory_callback_() / KB);
583   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
584                total_gc_time_ms_);
585 }
586 
PrintFreeListsStats()587 void Heap::PrintFreeListsStats() {
588   DCHECK(FLAG_trace_gc_freelists);
589 
590   if (FLAG_trace_gc_freelists_verbose) {
591     PrintIsolate(isolate_,
592                  "Freelists statistics per Page: "
593                  "[category: length || total free bytes]\n");
594   }
595 
596   std::vector<int> categories_lengths(
597       old_space()->free_list()->number_of_categories(), 0);
598   std::vector<size_t> categories_sums(
599       old_space()->free_list()->number_of_categories(), 0);
600   unsigned int pageCnt = 0;
601 
602   // This loops computes freelists lengths and sum.
603   // If FLAG_trace_gc_freelists_verbose is enabled, it also prints
604   // the stats of each FreeListCategory of each Page.
605   for (Page* page : *old_space()) {
606     std::ostringstream out_str;
607 
608     if (FLAG_trace_gc_freelists_verbose) {
609       out_str << "Page " << std::setw(4) << pageCnt;
610     }
611 
612     for (int cat = kFirstCategory;
613          cat <= old_space()->free_list()->last_category(); cat++) {
614       FreeListCategory* free_list =
615           page->free_list_category(static_cast<FreeListCategoryType>(cat));
616       int length = free_list->FreeListLength();
617       size_t sum = free_list->SumFreeList();
618 
619       if (FLAG_trace_gc_freelists_verbose) {
620         out_str << "[" << cat << ": " << std::setw(4) << length << " || "
621                 << std::setw(6) << sum << " ]"
622                 << (cat == old_space()->free_list()->last_category() ? "\n"
623                                                                      : ", ");
624       }
625       categories_lengths[cat] += length;
626       categories_sums[cat] += sum;
627     }
628 
629     if (FLAG_trace_gc_freelists_verbose) {
630       PrintIsolate(isolate_, "%s", out_str.str().c_str());
631     }
632 
633     pageCnt++;
634   }
635 
636   // Print statistics about old_space (pages, free/wasted/used memory...).
637   PrintIsolate(
638       isolate_,
639       "%d pages. Free space: %.1f MB (waste: %.2f). "
640       "Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
641       pageCnt, static_cast<double>(old_space_->Available()) / MB,
642       static_cast<double>(old_space_->Waste()) / MB,
643       static_cast<double>(old_space_->Size()) / MB,
644       static_cast<double>(old_space_->Capacity()) / MB,
645       static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
646 
647   // Print global statistics of each FreeListCategory (length & sum).
648   PrintIsolate(isolate_,
649                "FreeLists global statistics: "
650                "[category: length || total free KB]\n");
651   std::ostringstream out_str;
652   for (int cat = kFirstCategory;
653        cat <= old_space()->free_list()->last_category(); cat++) {
654     out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
655             << std::fixed << std::setprecision(2)
656             << static_cast<double>(categories_sums[cat]) / KB << " KB]"
657             << (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
658   }
659   PrintIsolate(isolate_, "%s", out_str.str().c_str());
660 }
661 
DumpJSONHeapStatistics(std::stringstream & stream)662 void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
663   HeapStatistics stats;
664   reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
665 
666 // clang-format off
667 #define DICT(s) "{" << s << "}"
668 #define LIST(s) "[" << s << "]"
669 #define QUOTE(s) "\"" << s << "\""
670 #define MEMBER(s) QUOTE(s) << ":"
671 
672   auto SpaceStatistics = [this](int space_index) {
673     HeapSpaceStatistics space_stats;
674     reinterpret_cast<v8::Isolate*>(isolate())->GetHeapSpaceStatistics(
675         &space_stats, space_index);
676     std::stringstream stream;
677     stream << DICT(
678       MEMBER("name")
679         << QUOTE(BaseSpace::GetSpaceName(
680               static_cast<AllocationSpace>(space_index)))
681         << ","
682       MEMBER("size") << space_stats.space_size() << ","
683       MEMBER("used_size") << space_stats.space_used_size() << ","
684       MEMBER("available_size") << space_stats.space_available_size() << ","
685       MEMBER("physical_size") << space_stats.physical_space_size());
686     return stream.str();
687   };
688 
689   stream << DICT(
690     MEMBER("isolate") << QUOTE(reinterpret_cast<void*>(isolate())) << ","
691     MEMBER("id") << gc_count() << ","
692     MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
693     MEMBER("total_heap_size") << stats.total_heap_size() << ","
694     MEMBER("total_heap_size_executable")
695       << stats.total_heap_size_executable() << ","
696     MEMBER("total_physical_size") << stats.total_physical_size() << ","
697     MEMBER("total_available_size") << stats.total_available_size() << ","
698     MEMBER("used_heap_size") << stats.used_heap_size() << ","
699     MEMBER("heap_size_limit") << stats.heap_size_limit() << ","
700     MEMBER("malloced_memory") << stats.malloced_memory() << ","
701     MEMBER("external_memory") << stats.external_memory() << ","
702     MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
703     MEMBER("spaces") << LIST(
704       SpaceStatistics(RO_SPACE)      << "," <<
705       SpaceStatistics(NEW_SPACE)     << "," <<
706       SpaceStatistics(OLD_SPACE)     << "," <<
707       SpaceStatistics(CODE_SPACE)    << "," <<
708       SpaceStatistics(MAP_SPACE)     << "," <<
709       SpaceStatistics(LO_SPACE)      << "," <<
710       SpaceStatistics(CODE_LO_SPACE) << "," <<
711       SpaceStatistics(NEW_LO_SPACE)));
712 
713 #undef DICT
714 #undef LIST
715 #undef QUOTE
716 #undef MEMBER
717   // clang-format on
718 }
719 
ReportStatisticsAfterGC()720 void Heap::ReportStatisticsAfterGC() {
721   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
722        ++i) {
723     isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i),
724                           deferred_counters_[i]);
725     deferred_counters_[i] = 0;
726   }
727 }
728 
729 class Heap::AllocationTrackerForDebugging final
730     : public HeapObjectAllocationTracker {
731  public:
IsNeeded()732   static bool IsNeeded() {
733     return FLAG_verify_predictable || FLAG_fuzzer_gc_analysis ||
734            (FLAG_trace_allocation_stack_interval > 0);
735   }
736 
AllocationTrackerForDebugging(Heap * heap)737   explicit AllocationTrackerForDebugging(Heap* heap) : heap_(heap) {
738     CHECK(IsNeeded());
739     heap_->AddHeapObjectAllocationTracker(this);
740   }
741 
~AllocationTrackerForDebugging()742   ~AllocationTrackerForDebugging() final {
743     heap_->RemoveHeapObjectAllocationTracker(this);
744     if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
745       PrintAllocationsHash();
746     }
747   }
748 
AllocationEvent(Address addr,int size)749   void AllocationEvent(Address addr, int size) final {
750     if (FLAG_verify_predictable) {
751       allocations_count_.fetch_add(1, std::memory_order_relaxed);
752       // Advance synthetic time by making a time request.
753       heap_->MonotonicallyIncreasingTimeInMs();
754 
755       UpdateAllocationsHash(HeapObject::FromAddress(addr));
756       UpdateAllocationsHash(size);
757 
758       if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
759         PrintAllocationsHash();
760       }
761     } else if (FLAG_fuzzer_gc_analysis) {
762       allocations_count_.fetch_add(1, std::memory_order_relaxed);
763     } else if (FLAG_trace_allocation_stack_interval > 0) {
764       allocations_count_.fetch_add(1, std::memory_order_relaxed);
765       if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
766         heap_->isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
767       }
768     }
769   }
770 
MoveEvent(Address source,Address target,int size)771   void MoveEvent(Address source, Address target, int size) final {
772     if (FLAG_verify_predictable) {
773       allocations_count_.fetch_add(1, std::memory_order_relaxed);
774       // Advance synthetic time by making a time request.
775       heap_->MonotonicallyIncreasingTimeInMs();
776 
777       UpdateAllocationsHash(HeapObject::FromAddress(source));
778       UpdateAllocationsHash(HeapObject::FromAddress(target));
779       UpdateAllocationsHash(size);
780 
781       if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
782         PrintAllocationsHash();
783       }
784     } else if (FLAG_fuzzer_gc_analysis) {
785       allocations_count_.fetch_add(1, std::memory_order_relaxed);
786     }
787   }
788 
UpdateObjectSizeEvent(Address,int)789   void UpdateObjectSizeEvent(Address, int) final {}
790 
791  private:
UpdateAllocationsHash(HeapObject object)792   void UpdateAllocationsHash(HeapObject object) {
793     Address object_address = object.address();
794     MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
795     AllocationSpace allocation_space = memory_chunk->owner_identity();
796 
797     STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
798     uint32_t value =
799         static_cast<uint32_t>(object_address - memory_chunk->address()) |
800         (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
801 
802     UpdateAllocationsHash(value);
803   }
804 
UpdateAllocationsHash(uint32_t value)805   void UpdateAllocationsHash(uint32_t value) {
806     const uint16_t c1 = static_cast<uint16_t>(value);
807     const uint16_t c2 = static_cast<uint16_t>(value >> 16);
808     raw_allocations_hash_ =
809         StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
810     raw_allocations_hash_ =
811         StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
812   }
813 
PrintAllocationsHash()814   void PrintAllocationsHash() {
815     uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
816     PrintF("\n### Allocations = %zu, hash = 0x%08x\n",
817            allocations_count_.load(std::memory_order_relaxed), hash);
818   }
819 
820   Heap* const heap_;
821   // Count of all allocations performed through C++ bottlenecks. This needs to
822   // be atomic as objects are moved in parallel in the GC which counts as
823   // allocations.
824   std::atomic<size_t> allocations_count_{0};
825   // Running hash over allocations performed.
826   uint32_t raw_allocations_hash_ = 0;
827 };
828 
AddHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)829 void Heap::AddHeapObjectAllocationTracker(
830     HeapObjectAllocationTracker* tracker) {
831   if (allocation_trackers_.empty() && FLAG_inline_new) {
832     DisableInlineAllocation();
833   }
834   allocation_trackers_.push_back(tracker);
835 }
836 
RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)837 void Heap::RemoveHeapObjectAllocationTracker(
838     HeapObjectAllocationTracker* tracker) {
839   allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
840                                          allocation_trackers_.end(), tracker),
841                              allocation_trackers_.end());
842   if (allocation_trackers_.empty() && FLAG_inline_new) {
843     EnableInlineAllocation();
844   }
845 }
846 
AddRetainingPathTarget(Handle<HeapObject> object,RetainingPathOption option)847 void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
848                                   RetainingPathOption option) {
849   if (!FLAG_track_retaining_path) {
850     PrintF("Retaining path tracking requires --track-retaining-path\n");
851   } else {
852     Handle<WeakArrayList> array(retaining_path_targets(), isolate());
853     int index = array->length();
854     array = WeakArrayList::AddToEnd(isolate(), array,
855                                     MaybeObjectHandle::Weak(object));
856     set_retaining_path_targets(*array);
857     DCHECK_EQ(array->length(), index + 1);
858     retaining_path_target_option_[index] = option;
859   }
860 }
861 
IsRetainingPathTarget(HeapObject object,RetainingPathOption * option)862 bool Heap::IsRetainingPathTarget(HeapObject object,
863                                  RetainingPathOption* option) {
864   WeakArrayList targets = retaining_path_targets();
865   int length = targets.length();
866   MaybeObject object_to_check = HeapObjectReference::Weak(object);
867   for (int i = 0; i < length; i++) {
868     MaybeObject target = targets.Get(i);
869     DCHECK(target->IsWeakOrCleared());
870     if (target == object_to_check) {
871       DCHECK(retaining_path_target_option_.count(i));
872       *option = retaining_path_target_option_[i];
873       return true;
874     }
875   }
876   return false;
877 }
878 
PrintRetainingPath(HeapObject target,RetainingPathOption option)879 void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
880   PrintF("\n\n\n");
881   PrintF("#################################################\n");
882   PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target.ptr()));
883   HeapObject object = target;
884   std::vector<std::pair<HeapObject, bool>> retaining_path;
885   Root root = Root::kUnknown;
886   bool ephemeron = false;
887   while (true) {
888     retaining_path.push_back(std::make_pair(object, ephemeron));
889     if (option == RetainingPathOption::kTrackEphemeronPath &&
890         ephemeron_retainer_.count(object)) {
891       object = ephemeron_retainer_[object];
892       ephemeron = true;
893     } else if (retainer_.count(object)) {
894       object = retainer_[object];
895       ephemeron = false;
896     } else {
897       if (retaining_root_.count(object)) {
898         root = retaining_root_[object];
899       }
900       break;
901     }
902   }
903   int distance = static_cast<int>(retaining_path.size());
904   for (auto node : retaining_path) {
905     HeapObject node_object = node.first;
906     bool node_ephemeron = node.second;
907     PrintF("\n");
908     PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
909     PrintF("Distance from root %d%s: ", distance,
910            node_ephemeron ? " (ephemeron)" : "");
911     node_object.ShortPrint();
912     PrintF("\n");
913 #ifdef OBJECT_PRINT
914     node_object.Print();
915     PrintF("\n");
916 #endif
917     --distance;
918   }
919   PrintF("\n");
920   PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
921   PrintF("Root: %s\n", RootVisitor::RootName(root));
922   PrintF("-------------------------------------------------\n");
923 }
924 
UpdateRetainersMapAfterScavenge(std::unordered_map<HeapObject,HeapObject,Object::Hasher> * map)925 void UpdateRetainersMapAfterScavenge(
926     std::unordered_map<HeapObject, HeapObject, Object::Hasher>* map) {
927   std::unordered_map<HeapObject, HeapObject, Object::Hasher> updated_map;
928 
929   for (auto pair : *map) {
930     HeapObject object = pair.first;
931     HeapObject retainer = pair.second;
932 
933     if (Heap::InFromPage(object)) {
934       MapWord map_word = object.map_word(kRelaxedLoad);
935       if (!map_word.IsForwardingAddress()) continue;
936       object = map_word.ToForwardingAddress();
937     }
938 
939     if (Heap::InFromPage(retainer)) {
940       MapWord map_word = retainer.map_word(kRelaxedLoad);
941       if (!map_word.IsForwardingAddress()) continue;
942       retainer = map_word.ToForwardingAddress();
943     }
944 
945     updated_map[object] = retainer;
946   }
947 
948   *map = std::move(updated_map);
949 }
950 
UpdateRetainersAfterScavenge()951 void Heap::UpdateRetainersAfterScavenge() {
952   if (!incremental_marking()->IsMarking()) return;
953 
954   // This isn't supported for Minor MC.
955   DCHECK(!FLAG_minor_mc);
956 
957   UpdateRetainersMapAfterScavenge(&retainer_);
958   UpdateRetainersMapAfterScavenge(&ephemeron_retainer_);
959 
960   std::unordered_map<HeapObject, Root, Object::Hasher> updated_retaining_root;
961 
962   for (auto pair : retaining_root_) {
963     HeapObject object = pair.first;
964 
965     if (Heap::InFromPage(object)) {
966       MapWord map_word = object.map_word(kRelaxedLoad);
967       if (!map_word.IsForwardingAddress()) continue;
968       object = map_word.ToForwardingAddress();
969     }
970 
971     updated_retaining_root[object] = pair.second;
972   }
973 
974   retaining_root_ = std::move(updated_retaining_root);
975 }
976 
AddRetainer(HeapObject retainer,HeapObject object)977 void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
978   if (retainer_.count(object)) return;
979   retainer_[object] = retainer;
980   RetainingPathOption option = RetainingPathOption::kDefault;
981   if (IsRetainingPathTarget(object, &option)) {
982     // Check if the retaining path was already printed in
983     // AddEphemeronRetainer().
984     if (ephemeron_retainer_.count(object) == 0 ||
985         option == RetainingPathOption::kDefault) {
986       PrintRetainingPath(object, option);
987     }
988   }
989 }
990 
AddEphemeronRetainer(HeapObject retainer,HeapObject object)991 void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
992   if (ephemeron_retainer_.count(object)) return;
993   ephemeron_retainer_[object] = retainer;
994   RetainingPathOption option = RetainingPathOption::kDefault;
995   if (IsRetainingPathTarget(object, &option) &&
996       option == RetainingPathOption::kTrackEphemeronPath) {
997     // Check if the retaining path was already printed in AddRetainer().
998     if (retainer_.count(object) == 0) {
999       PrintRetainingPath(object, option);
1000     }
1001   }
1002 }
1003 
AddRetainingRoot(Root root,HeapObject object)1004 void Heap::AddRetainingRoot(Root root, HeapObject object) {
1005   if (retaining_root_.count(object)) return;
1006   retaining_root_[object] = root;
1007   RetainingPathOption option = RetainingPathOption::kDefault;
1008   if (IsRetainingPathTarget(object, &option)) {
1009     PrintRetainingPath(object, option);
1010   }
1011 }
1012 
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)1013 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
1014   deferred_counters_[feature]++;
1015 }
1016 
UncommitFromSpace()1017 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
1018 
GarbageCollectionPrologue(GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1019 void Heap::GarbageCollectionPrologue(
1020     GarbageCollectionReason gc_reason,
1021     const v8::GCCallbackFlags gc_callback_flags) {
1022   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
1023 
1024   is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
1025                           current_gc_flags_ & kForcedGC ||
1026                           force_gc_on_next_allocation_;
1027   is_current_gc_for_heap_profiler_ =
1028       gc_reason == GarbageCollectionReason::kHeapProfiler;
1029   if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
1030 
1031 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1032   heap_allocator_.UpdateAllocationTimeout();
1033 #endif  // V8_ENABLE_ALLOCATION_TIMEOUT
1034 
1035   // There may be an allocation memento behind objects in new space. Upon
1036   // evacuation of a non-full new space (or if we are on the last page) there
1037   // may be uninitialized memory behind top. We fill the remainder of the page
1038   // with a filler.
1039   if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
1040 
1041   // Reset GC statistics.
1042   promoted_objects_size_ = 0;
1043   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
1044   semi_space_copied_object_size_ = 0;
1045   nodes_died_in_new_space_ = 0;
1046   nodes_copied_in_new_space_ = 0;
1047   nodes_promoted_ = 0;
1048 
1049   UpdateMaximumCommitted();
1050 
1051 #ifdef DEBUG
1052   DCHECK(!AllowGarbageCollection::IsAllowed());
1053   DCHECK_EQ(gc_state(), NOT_IN_GC);
1054 
1055   if (FLAG_gc_verbose) Print();
1056 #endif  // DEBUG
1057 
1058   if (new_space_ && new_space_->IsAtMaximumCapacity()) {
1059     maximum_size_scavenges_++;
1060   } else {
1061     maximum_size_scavenges_ = 0;
1062   }
1063   memory_allocator()->unmapper()->PrepareForGC();
1064 }
1065 
GarbageCollectionPrologueInSafepoint()1066 void Heap::GarbageCollectionPrologueInSafepoint() {
1067   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
1068   gc_count_++;
1069 
1070   if (new_space_) {
1071     UpdateNewSpaceAllocationCounter();
1072     CheckNewSpaceExpansionCriteria();
1073     new_space_->ResetParkedAllocationBuffers();
1074   }
1075 }
1076 
UpdateNewSpaceAllocationCounter()1077 void Heap::UpdateNewSpaceAllocationCounter() {
1078   new_space_allocation_counter_ = NewSpaceAllocationCounter();
1079 }
1080 
NewSpaceAllocationCounter()1081 size_t Heap::NewSpaceAllocationCounter() {
1082   return new_space_allocation_counter_ +
1083          (new_space_ ? new_space()->AllocatedSinceLastGC() : 0);
1084 }
1085 
SizeOfObjects()1086 size_t Heap::SizeOfObjects() {
1087   size_t total = 0;
1088 
1089   for (SpaceIterator it(this); it.HasNext();) {
1090     total += it.Next()->SizeOfObjects();
1091   }
1092   return total;
1093 }
1094 
TotalGlobalHandlesSize()1095 size_t Heap::TotalGlobalHandlesSize() {
1096   return isolate_->global_handles()->TotalSize();
1097 }
1098 
UsedGlobalHandlesSize()1099 size_t Heap::UsedGlobalHandlesSize() {
1100   return isolate_->global_handles()->UsedSize();
1101 }
1102 
MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap & local_pretenuring_feedback)1103 void Heap::MergeAllocationSitePretenuringFeedback(
1104     const PretenuringFeedbackMap& local_pretenuring_feedback) {
1105   PtrComprCageBase cage_base(isolate());
1106   AllocationSite site;
1107   for (auto& site_and_count : local_pretenuring_feedback) {
1108     site = site_and_count.first;
1109     MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
1110     if (map_word.IsForwardingAddress()) {
1111       site = AllocationSite::cast(map_word.ToForwardingAddress());
1112     }
1113 
1114     // We have not validated the allocation site yet, since we have not
1115     // dereferenced the site during collecting information.
1116     // This is an inlined check of AllocationMemento::IsValid.
1117     if (!site.IsAllocationSite() || site.IsZombie()) continue;
1118 
1119     const int value = static_cast<int>(site_and_count.second);
1120     DCHECK_LT(0, value);
1121     if (site.IncrementMementoFoundCount(value)) {
1122       // For sites in the global map the count is accessed through the site.
1123       global_pretenuring_feedback_.insert(std::make_pair(site, 0));
1124     }
1125   }
1126 }
1127 
AddAllocationObserversToAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)1128 void Heap::AddAllocationObserversToAllSpaces(
1129     AllocationObserver* observer, AllocationObserver* new_space_observer) {
1130   DCHECK(observer && new_space_observer);
1131 
1132   for (SpaceIterator it(this); it.HasNext();) {
1133     Space* space = it.Next();
1134     if (space == new_space()) {
1135       space->AddAllocationObserver(new_space_observer);
1136     } else {
1137       space->AddAllocationObserver(observer);
1138     }
1139   }
1140 }
1141 
RemoveAllocationObserversFromAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)1142 void Heap::RemoveAllocationObserversFromAllSpaces(
1143     AllocationObserver* observer, AllocationObserver* new_space_observer) {
1144   DCHECK(observer && new_space_observer);
1145 
1146   for (SpaceIterator it(this); it.HasNext();) {
1147     Space* space = it.Next();
1148     if (space == new_space()) {
1149       space->RemoveAllocationObserver(new_space_observer);
1150     } else {
1151       space->RemoveAllocationObserver(observer);
1152     }
1153   }
1154 }
1155 
PublishPendingAllocations()1156 void Heap::PublishPendingAllocations() {
1157   if (FLAG_enable_third_party_heap) return;
1158   if (new_space_) new_space_->MarkLabStartInitialized();
1159   PagedSpaceIterator spaces(this);
1160   for (PagedSpace* space = spaces.Next(); space != nullptr;
1161        space = spaces.Next()) {
1162     space->MoveOriginalTopForward();
1163   }
1164   lo_space_->ResetPendingObject();
1165   if (new_lo_space_) new_lo_space_->ResetPendingObject();
1166   code_lo_space_->ResetPendingObject();
1167 }
1168 
1169 namespace {
MakePretenureDecision(AllocationSite site,AllocationSite::PretenureDecision current_decision,double ratio,bool maximum_size_scavenge)1170 inline bool MakePretenureDecision(
1171     AllocationSite site, AllocationSite::PretenureDecision current_decision,
1172     double ratio, bool maximum_size_scavenge) {
1173   // Here we just allow state transitions from undecided or maybe tenure
1174   // to don't tenure, maybe tenure, or tenure.
1175   if ((current_decision == AllocationSite::kUndecided ||
1176        current_decision == AllocationSite::kMaybeTenure)) {
1177     if (ratio >= AllocationSite::kPretenureRatio) {
1178       // We just transition into tenure state when the semi-space was at
1179       // maximum capacity.
1180       if (maximum_size_scavenge) {
1181         site.set_deopt_dependent_code(true);
1182         site.set_pretenure_decision(AllocationSite::kTenure);
1183         // Currently we just need to deopt when we make a state transition to
1184         // tenure.
1185         return true;
1186       }
1187       site.set_pretenure_decision(AllocationSite::kMaybeTenure);
1188     } else {
1189       site.set_pretenure_decision(AllocationSite::kDontTenure);
1190     }
1191   }
1192   return false;
1193 }
1194 
1195 // Clear feedback calculation fields until the next gc.
ResetPretenuringFeedback(AllocationSite site)1196 inline void ResetPretenuringFeedback(AllocationSite site) {
1197   site.set_memento_found_count(0);
1198   site.set_memento_create_count(0);
1199 }
1200 
DigestPretenuringFeedback(Isolate * isolate,AllocationSite site,bool maximum_size_scavenge)1201 inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
1202                                       bool maximum_size_scavenge) {
1203   bool deopt = false;
1204   int create_count = site.memento_create_count();
1205   int found_count = site.memento_found_count();
1206   bool minimum_mementos_created =
1207       create_count >= AllocationSite::kPretenureMinimumCreated;
1208   double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
1209                      ? static_cast<double>(found_count) / create_count
1210                      : 0.0;
1211   AllocationSite::PretenureDecision current_decision =
1212       site.pretenure_decision();
1213 
1214   if (minimum_mementos_created) {
1215     deopt = MakePretenureDecision(site, current_decision, ratio,
1216                                   maximum_size_scavenge);
1217   }
1218 
1219   if (FLAG_trace_pretenuring_statistics) {
1220     PrintIsolate(isolate,
1221                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
1222                  "(%d, %d, %f) %s => %s\n",
1223                  reinterpret_cast<void*>(site.ptr()), create_count, found_count,
1224                  ratio, site.PretenureDecisionName(current_decision),
1225                  site.PretenureDecisionName(site.pretenure_decision()));
1226   }
1227 
1228   ResetPretenuringFeedback(site);
1229   return deopt;
1230 }
1231 
PretenureAllocationSiteManually(Isolate * isolate,AllocationSite site)1232 bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
1233   AllocationSite::PretenureDecision current_decision =
1234       site.pretenure_decision();
1235   bool deopt = true;
1236   if (current_decision == AllocationSite::kUndecided ||
1237       current_decision == AllocationSite::kMaybeTenure) {
1238     site.set_deopt_dependent_code(true);
1239     site.set_pretenure_decision(AllocationSite::kTenure);
1240   } else {
1241     deopt = false;
1242   }
1243   if (FLAG_trace_pretenuring_statistics) {
1244     PrintIsolate(isolate,
1245                  "pretenuring manually requested: AllocationSite(%p): "
1246                  "%s => %s\n",
1247                  reinterpret_cast<void*>(site.ptr()),
1248                  site.PretenureDecisionName(current_decision),
1249                  site.PretenureDecisionName(site.pretenure_decision()));
1250   }
1251 
1252   ResetPretenuringFeedback(site);
1253   return deopt;
1254 }
1255 
1256 }  // namespace
1257 
RemoveAllocationSitePretenuringFeedback(AllocationSite site)1258 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
1259   global_pretenuring_feedback_.erase(site);
1260 }
1261 
DeoptMaybeTenuredAllocationSites()1262 bool Heap::DeoptMaybeTenuredAllocationSites() {
1263   return new_space_ && new_space_->IsAtMaximumCapacity() &&
1264          maximum_size_scavenges_ == 0;
1265 }
1266 
ProcessPretenuringFeedback()1267 void Heap::ProcessPretenuringFeedback() {
1268   bool trigger_deoptimization = false;
1269   if (FLAG_allocation_site_pretenuring) {
1270     int tenure_decisions = 0;
1271     int dont_tenure_decisions = 0;
1272     int allocation_mementos_found = 0;
1273     int allocation_sites = 0;
1274     int active_allocation_sites = 0;
1275 
1276     AllocationSite site;
1277 
1278     // Step 1: Digest feedback for recorded allocation sites.
1279     bool maximum_size_scavenge = MaximumSizeScavenge();
1280     for (auto& site_and_count : global_pretenuring_feedback_) {
1281       allocation_sites++;
1282       site = site_and_count.first;
1283       // Count is always access through the site.
1284       DCHECK_EQ(0, site_and_count.second);
1285       int found_count = site.memento_found_count();
1286       // An entry in the storage does not imply that the count is > 0 because
1287       // allocation sites might have been reset due to too many objects dying
1288       // in old space.
1289       if (found_count > 0) {
1290         DCHECK(site.IsAllocationSite());
1291         active_allocation_sites++;
1292         allocation_mementos_found += found_count;
1293         if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
1294           trigger_deoptimization = true;
1295         }
1296         if (site.GetAllocationType() == AllocationType::kOld) {
1297           tenure_decisions++;
1298         } else {
1299           dont_tenure_decisions++;
1300         }
1301       }
1302     }
1303 
1304     // Step 2: Pretenure allocation sites for manual requests.
1305     if (allocation_sites_to_pretenure_) {
1306       while (!allocation_sites_to_pretenure_->empty()) {
1307         auto pretenure_site = allocation_sites_to_pretenure_->Pop();
1308         if (PretenureAllocationSiteManually(isolate_, pretenure_site)) {
1309           trigger_deoptimization = true;
1310         }
1311       }
1312       allocation_sites_to_pretenure_.reset();
1313     }
1314 
1315     // Step 3: Deopt maybe tenured allocation sites if necessary.
1316     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
1317     if (deopt_maybe_tenured) {
1318       ForeachAllocationSite(
1319           allocation_sites_list(),
1320           [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
1321             DCHECK(site.IsAllocationSite());
1322             allocation_sites++;
1323             if (site.IsMaybeTenure()) {
1324               site.set_deopt_dependent_code(true);
1325               trigger_deoptimization = true;
1326             }
1327           });
1328     }
1329 
1330     if (trigger_deoptimization) {
1331       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1332     }
1333 
1334     if (FLAG_trace_pretenuring_statistics &&
1335         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
1336          dont_tenure_decisions > 0)) {
1337       PrintIsolate(isolate(),
1338                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
1339                    "active_sites=%d "
1340                    "mementos=%d tenured=%d not_tenured=%d\n",
1341                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
1342                    active_allocation_sites, allocation_mementos_found,
1343                    tenure_decisions, dont_tenure_decisions);
1344     }
1345 
1346     global_pretenuring_feedback_.clear();
1347     global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
1348   }
1349 }
1350 
PretenureAllocationSiteOnNextCollection(AllocationSite site)1351 void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
1352   if (!allocation_sites_to_pretenure_) {
1353     allocation_sites_to_pretenure_.reset(
1354         new GlobalHandleVector<AllocationSite>(this));
1355   }
1356   allocation_sites_to_pretenure_->Push(site);
1357 }
1358 
InvalidateCodeDeoptimizationData(Code code)1359 void Heap::InvalidateCodeDeoptimizationData(Code code) {
1360   CodePageMemoryModificationScope modification_scope(code);
1361   code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
1362 }
1363 
DeoptMarkedAllocationSites()1364 void Heap::DeoptMarkedAllocationSites() {
1365   // TODO(hpayer): If iterating over the allocation sites list becomes a
1366   // performance issue, use a cache data structure in heap instead.
1367 
1368   ForeachAllocationSite(allocation_sites_list(), [](AllocationSite site) {
1369     if (site.deopt_dependent_code()) {
1370       site.dependent_code().MarkCodeForDeoptimization(
1371           DependentCode::kAllocationSiteTenuringChangedGroup);
1372       site.set_deopt_dependent_code(false);
1373     }
1374   });
1375 
1376   Deoptimizer::DeoptimizeMarkedCode(isolate_);
1377 }
1378 
GarbageCollectionEpilogueInSafepoint(GarbageCollector collector)1379 void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
1380   if (collector == GarbageCollector::MARK_COMPACTOR) {
1381     memory_pressure_level_.store(MemoryPressureLevel::kNone,
1382                                  std::memory_order_relaxed);
1383   }
1384 
1385   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
1386 
1387   safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
1388     local_heap->InvokeGCEpilogueCallbacksInSafepoint();
1389   });
1390 
1391 #define UPDATE_COUNTERS_FOR_SPACE(space)                \
1392   isolate_->counters()->space##_bytes_available()->Set( \
1393       static_cast<int>(space()->Available()));          \
1394   isolate_->counters()->space##_bytes_committed()->Set( \
1395       static_cast<int>(space()->CommittedMemory()));    \
1396   isolate_->counters()->space##_bytes_used()->Set(      \
1397       static_cast<int>(space()->SizeOfObjects()));
1398 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
1399   if (space()->CommittedMemory() > 0) {                                \
1400     isolate_->counters()->external_fragmentation_##space()->AddSample( \
1401         static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) /    \
1402                                    space()->CommittedMemory()));       \
1403   }
1404 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
1405   UPDATE_COUNTERS_FOR_SPACE(space)                         \
1406   UPDATE_FRAGMENTATION_FOR_SPACE(space)
1407 
1408   if (new_space()) {
1409     UPDATE_COUNTERS_FOR_SPACE(new_space)
1410   }
1411 
1412   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
1413   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
1414 
1415   if (map_space()) {
1416     UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
1417   }
1418 
1419   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
1420 #undef UPDATE_COUNTERS_FOR_SPACE
1421 #undef UPDATE_FRAGMENTATION_FOR_SPACE
1422 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
1423 
1424 #ifdef DEBUG
1425   // Old-to-new slot sets must be empty after each collection.
1426   for (SpaceIterator it(this); it.HasNext();) {
1427     Space* space = it.Next();
1428 
1429     for (MemoryChunk* chunk = space->first_page(); chunk != space->last_page();
1430          chunk = chunk->list_node().next())
1431       DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
1432   }
1433 
1434   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
1435   if (FLAG_print_handles) PrintHandles();
1436   if (FLAG_code_stats) ReportCodeStatistics("After GC");
1437   if (FLAG_check_handle_count) CheckHandleCount();
1438 #endif
1439 
1440   if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
1441     ZapFromSpace();
1442   }
1443 
1444   if (new_space()) {
1445     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
1446     ReduceNewSpaceSize();
1447   }
1448 
1449   // Remove CollectionRequested flag from main thread state, as the collection
1450   // was just performed.
1451   safepoint()->AssertActive();
1452   LocalHeap::ThreadState old_state =
1453       main_thread_local_heap()->state_.ClearCollectionRequested();
1454 
1455   CHECK(old_state.IsRunning());
1456 
1457   // Resume all threads waiting for the GC.
1458   collection_barrier_->ResumeThreadsAwaitingCollection();
1459 }
1460 
GarbageCollectionEpilogue(GarbageCollector collector)1461 void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
1462   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
1463   AllowGarbageCollection for_the_rest_of_the_epilogue;
1464 
1465   UpdateMaximumCommitted();
1466 
1467   if (FLAG_track_retaining_path &&
1468       collector == GarbageCollector::MARK_COMPACTOR) {
1469     retainer_.clear();
1470     ephemeron_retainer_.clear();
1471     retaining_root_.clear();
1472   }
1473 
1474   isolate_->counters()->alive_after_last_gc()->Set(
1475       static_cast<int>(SizeOfObjects()));
1476 
1477   isolate_->string_table()->UpdateCountersIfOwnedBy(isolate_);
1478 
1479   if (CommittedMemory() > 0) {
1480     isolate_->counters()->external_fragmentation_total()->AddSample(
1481         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
1482 
1483     isolate_->counters()->heap_sample_total_committed()->AddSample(
1484         static_cast<int>(CommittedMemory() / KB));
1485     isolate_->counters()->heap_sample_total_used()->AddSample(
1486         static_cast<int>(SizeOfObjects() / KB));
1487     if (map_space()) {
1488       isolate_->counters()->heap_sample_map_space_committed()->AddSample(
1489           static_cast<int>(map_space()->CommittedMemory() / KB));
1490     }
1491     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
1492         static_cast<int>(code_space()->CommittedMemory() / KB));
1493 
1494     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
1495         static_cast<int>(MaximumCommittedMemory() / KB));
1496   }
1497 
1498 #ifdef DEBUG
1499   ReportStatisticsAfterGC();
1500 #endif  // DEBUG
1501 
1502   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
1503 }
1504 
1505 class V8_NODISCARD GCCallbacksScope {
1506  public:
GCCallbacksScope(Heap * heap)1507   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
1508     heap_->gc_callbacks_depth_++;
1509   }
~GCCallbacksScope()1510   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
1511 
CheckReenter()1512   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
1513 
1514  private:
1515   Heap* heap_;
1516 };
1517 
HandleGCRequest()1518 void Heap::HandleGCRequest() {
1519   if (IsStressingScavenge() && stress_scavenge_observer_->HasRequestedGC()) {
1520     CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
1521     stress_scavenge_observer_->RequestedGCDone();
1522   } else if (HighMemoryPressure()) {
1523     incremental_marking()->reset_request_type();
1524     CheckMemoryPressure();
1525   } else if (CollectionRequested()) {
1526     CheckCollectionRequested();
1527   } else if (incremental_marking()->request_type() ==
1528              IncrementalMarking::GCRequestType::COMPLETE_MARKING) {
1529     incremental_marking()->reset_request_type();
1530     CollectAllGarbage(current_gc_flags_,
1531                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
1532                       current_gc_callback_flags_);
1533   } else if (incremental_marking()->request_type() ==
1534                  IncrementalMarking::GCRequestType::FINALIZATION &&
1535              incremental_marking()->IsMarking() &&
1536              !incremental_marking()->finalize_marking_completed()) {
1537     incremental_marking()->reset_request_type();
1538     FinalizeIncrementalMarkingIncrementally(
1539         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
1540   }
1541 }
1542 
ScheduleScavengeTaskIfNeeded()1543 void Heap::ScheduleScavengeTaskIfNeeded() {
1544   DCHECK_NOT_NULL(scavenge_job_);
1545   scavenge_job_->ScheduleTaskIfNeeded(this);
1546 }
1547 
CollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1548 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
1549                              const v8::GCCallbackFlags gc_callback_flags) {
1550   // Since we are ignoring the return value, the exact choice of space does
1551   // not matter, so long as we do not specify NEW_SPACE, which would not
1552   // cause a full GC.
1553   set_current_gc_flags(flags);
1554   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1555   set_current_gc_flags(kNoGCFlags);
1556 }
1557 
1558 namespace {
1559 
CompareWords(int size,HeapObject a,HeapObject b)1560 intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
1561   int slots = size / kTaggedSize;
1562   DCHECK_EQ(a.Size(), size);
1563   DCHECK_EQ(b.Size(), size);
1564   Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a.address());
1565   Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b.address());
1566   for (int i = 0; i < slots; i++) {
1567     if (*slot_a != *slot_b) {
1568       return *slot_a - *slot_b;
1569     }
1570     slot_a++;
1571     slot_b++;
1572   }
1573   return 0;
1574 }
1575 
ReportDuplicates(int size,std::vector<HeapObject> * objects)1576 void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
1577   if (objects->size() == 0) return;
1578 
1579   sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
1580     intptr_t c = CompareWords(size, a, b);
1581     if (c != 0) return c < 0;
1582     return a < b;
1583   });
1584 
1585   std::vector<std::pair<int, HeapObject>> duplicates;
1586   HeapObject current = (*objects)[0];
1587   int count = 1;
1588   for (size_t i = 1; i < objects->size(); i++) {
1589     if (CompareWords(size, current, (*objects)[i]) == 0) {
1590       count++;
1591     } else {
1592       if (count > 1) {
1593         duplicates.push_back(std::make_pair(count - 1, current));
1594       }
1595       count = 1;
1596       current = (*objects)[i];
1597     }
1598   }
1599   if (count > 1) {
1600     duplicates.push_back(std::make_pair(count - 1, current));
1601   }
1602 
1603   int threshold = FLAG_trace_duplicate_threshold_kb * KB;
1604 
1605   sort(duplicates.begin(), duplicates.end());
1606   for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
1607     int duplicate_bytes = it->first * size;
1608     if (duplicate_bytes < threshold) break;
1609     PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
1610            duplicate_bytes / KB);
1611     PrintF("Sample object: ");
1612     it->second.Print();
1613     PrintF("============================\n");
1614   }
1615 }
1616 }  // anonymous namespace
1617 
CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)1618 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1619   // Since we are ignoring the return value, the exact choice of space does
1620   // not matter, so long as we do not specify NEW_SPACE, which would not
1621   // cause a full GC.
1622   // Major GC would invoke weak handle callbacks on weakly reachable
1623   // handles, but won't collect weakly reachable objects until next
1624   // major GC.  Therefore if we collect aggressively and weak handle callback
1625   // has been invoked, we rerun major GC to release objects which become
1626   // garbage.
1627   // Note: as weak callbacks can execute arbitrary code, we cannot
1628   // hope that eventually there will be no weak callbacks invocations.
1629   // Therefore stop recollecting after several attempts.
1630   if (gc_reason == GarbageCollectionReason::kLastResort) {
1631     InvokeNearHeapLimitCallback();
1632   }
1633   RCS_SCOPE(isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
1634 
1635   // The optimizing compiler may be unnecessarily holding on to memory.
1636   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1637   isolate()->ClearSerializerData();
1638   set_current_gc_flags(
1639       kReduceMemoryFootprintMask |
1640       (gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC
1641                                                                     : 0));
1642   isolate_->compilation_cache()->Clear();
1643   const int kMaxNumberOfAttempts = 7;
1644   const int kMinNumberOfAttempts = 2;
1645   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1646     if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) &&
1647         attempt + 1 >= kMinNumberOfAttempts) {
1648       break;
1649     }
1650   }
1651 
1652   set_current_gc_flags(kNoGCFlags);
1653   EagerlyFreeExternalMemory();
1654 
1655   if (FLAG_trace_duplicate_threshold_kb) {
1656     std::map<int, std::vector<HeapObject>> objects_by_size;
1657     PagedSpaceIterator spaces(this);
1658     for (PagedSpace* space = spaces.Next(); space != nullptr;
1659          space = spaces.Next()) {
1660       PagedSpaceObjectIterator it(this, space);
1661       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
1662         objects_by_size[obj.Size()].push_back(obj);
1663       }
1664     }
1665     {
1666       LargeObjectSpaceObjectIterator it(lo_space());
1667       for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
1668         objects_by_size[obj.Size()].push_back(obj);
1669       }
1670     }
1671     for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
1672          ++it) {
1673       ReportDuplicates(it->first, &it->second);
1674     }
1675   }
1676 }
1677 
PreciseCollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)1678 void Heap::PreciseCollectAllGarbage(int flags,
1679                                     GarbageCollectionReason gc_reason,
1680                                     const GCCallbackFlags gc_callback_flags) {
1681   if (!incremental_marking()->IsStopped()) {
1682     FinalizeIncrementalMarkingAtomically(gc_reason);
1683   }
1684   CollectAllGarbage(flags, gc_reason, gc_callback_flags);
1685 }
1686 
ReportExternalMemoryPressure()1687 void Heap::ReportExternalMemoryPressure() {
1688   const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1689       static_cast<GCCallbackFlags>(
1690           kGCCallbackFlagSynchronousPhantomCallbackProcessing |
1691           kGCCallbackFlagCollectAllExternalMemory);
1692   int64_t current = external_memory_.total();
1693   int64_t baseline = external_memory_.low_since_mark_compact();
1694   int64_t limit = external_memory_.limit();
1695   TRACE_EVENT2(
1696       "devtools.timeline,v8", "V8.ExternalMemoryPressure", "external_memory_mb",
1697       static_cast<int>((current - baseline) / MB), "external_memory_limit_mb",
1698       static_cast<int>((limit - baseline) / MB));
1699   if (current > baseline + external_memory_hard_limit()) {
1700     CollectAllGarbage(
1701         kReduceMemoryFootprintMask,
1702         GarbageCollectionReason::kExternalMemoryPressure,
1703         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
1704                                      kGCCallbackFlagsForExternalMemory));
1705     return;
1706   }
1707   if (incremental_marking()->IsStopped()) {
1708     if (incremental_marking()->CanBeActivated()) {
1709       StartIncrementalMarking(GCFlagsForIncrementalMarking(),
1710                               GarbageCollectionReason::kExternalMemoryPressure,
1711                               kGCCallbackFlagsForExternalMemory);
1712     } else {
1713       CollectAllGarbage(i::Heap::kNoGCFlags,
1714                         GarbageCollectionReason::kExternalMemoryPressure,
1715                         kGCCallbackFlagsForExternalMemory);
1716     }
1717   } else {
1718     // Incremental marking is turned on an has already been started.
1719     const double kMinStepSize = 5;
1720     const double kMaxStepSize = 10;
1721     const double ms_step = std::min(
1722         kMaxStepSize, std::max(kMinStepSize, static_cast<double>(current) /
1723                                                  limit * kMinStepSize));
1724     const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
1725     // Extend the gc callback flags with external memory flags.
1726     current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
1727         current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1728     incremental_marking()->AdvanceWithDeadline(
1729         deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
1730   }
1731 }
1732 
external_memory_limit()1733 int64_t Heap::external_memory_limit() { return external_memory_.limit(); }
1734 
DevToolsTraceEventScope(Heap * heap,const char * event_name,const char * event_type)1735 Heap::DevToolsTraceEventScope::DevToolsTraceEventScope(Heap* heap,
1736                                                        const char* event_name,
1737                                                        const char* event_type)
1738     : heap_(heap), event_name_(event_name) {
1739   TRACE_EVENT_BEGIN2("devtools.timeline,v8", event_name_, "usedHeapSizeBefore",
1740                      heap_->SizeOfObjects(), "type", event_type);
1741 }
1742 
~DevToolsTraceEventScope()1743 Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
1744   TRACE_EVENT_END1("devtools.timeline,v8", event_name_, "usedHeapSizeAfter",
1745                    heap_->SizeOfObjects());
1746 }
1747 
GetGCTypeFromGarbageCollector(GarbageCollector collector)1748 static GCType GetGCTypeFromGarbageCollector(GarbageCollector collector) {
1749   switch (collector) {
1750     case GarbageCollector::MARK_COMPACTOR:
1751       return kGCTypeMarkSweepCompact;
1752     case GarbageCollector::SCAVENGER:
1753       return kGCTypeScavenge;
1754     case GarbageCollector::MINOR_MARK_COMPACTOR:
1755       return kGCTypeMinorMarkCompact;
1756     default:
1757       UNREACHABLE();
1758   }
1759 }
1760 
CollectGarbage(AllocationSpace space,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1761 bool Heap::CollectGarbage(AllocationSpace space,
1762                           GarbageCollectionReason gc_reason,
1763                           const v8::GCCallbackFlags gc_callback_flags) {
1764   if (V8_UNLIKELY(!deserialization_complete_)) {
1765     // During isolate initialization heap always grows. GC is only requested
1766     // if a new page allocation fails. In such a case we should crash with
1767     // an out-of-memory instead of performing GC because the prologue/epilogue
1768     // callbacks may see objects that are not yet deserialized.
1769     CHECK(always_allocate());
1770     FatalProcessOutOfMemory("GC during deserialization");
1771   }
1772 
1773   // CollectGarbage consists of three parts:
1774   // 1. The prologue part which may execute callbacks. These callbacks may
1775   // allocate and trigger another garbage collection.
1776   // 2. The main garbage collection phase.
1777   // 3. The epilogue part which may execute callbacks. These callbacks may
1778   // allocate and trigger another garbage collection
1779 
1780   // Part 1: Invoke all callbacks which should happen before the actual garbage
1781   // collection is triggered. Note that these callbacks may trigger another
1782   // garbage collection since they may allocate.
1783 
1784   DCHECK(AllowGarbageCollection::IsAllowed());
1785 
1786   // Ensure that all pending phantom callbacks are invoked.
1787   isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
1788 
1789   const char* collector_reason = nullptr;
1790   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
1791   GCType gc_type = GetGCTypeFromGarbageCollector(collector);
1792 
1793   {
1794     GCCallbacksScope scope(this);
1795     // Temporary override any embedder stack state as callbacks may create
1796     // their own state on the stack and recursively trigger GC.
1797     EmbedderStackStateScope embedder_scope(
1798         this, EmbedderStackStateScope::kExplicitInvocation,
1799         EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
1800     if (scope.CheckReenter()) {
1801       AllowGarbageCollection allow_gc;
1802       AllowJavascriptExecution allow_js(isolate());
1803       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1804       VMState<EXTERNAL> callback_state(isolate_);
1805       HandleScope handle_scope(isolate_);
1806       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1807     }
1808   }
1809 
1810   // Part 2: The main garbage collection phase.
1811   DisallowGarbageCollection no_gc_during_gc;
1812 
1813   size_t freed_global_handles = 0;
1814   size_t committed_memory_before = collector == GarbageCollector::MARK_COMPACTOR
1815                                        ? CommittedOldGenerationMemory()
1816                                        : 0;
1817   {
1818     tracer()->StartObservablePause();
1819     VMState<GC> state(isolate());
1820     DevToolsTraceEventScope devtools_trace_event_scope(
1821         this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
1822         GarbageCollectionReasonToString(gc_reason));
1823 
1824     // Filter on-stack reference below this method.
1825     isolate()
1826         ->global_handles()
1827         ->CleanupOnStackReferencesBelowCurrentStackPosition();
1828 
1829     if (collector == GarbageCollector::MARK_COMPACTOR && cpp_heap()) {
1830       // CppHeap needs a stack marker at the top of all entry points to allow
1831       // deterministic passes over the stack. E.g., a verifier that should only
1832       // find a subset of references of the marker.
1833       //
1834       // TODO(chromium:1056170): Consider adding a component that keeps track
1835       // of relevant GC stack regions where interesting pointers can be found.
1836       static_cast<v8::internal::CppHeap*>(cpp_heap())
1837           ->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
1838     }
1839 
1840     GarbageCollectionPrologue(gc_reason, gc_callback_flags);
1841     {
1842       GCTracer::RecordGCPhasesInfo record_gc_phases_info(this, collector);
1843       base::Optional<TimedHistogramScope> histogram_timer_scope;
1844       base::Optional<OptionalTimedHistogramScope>
1845           histogram_timer_priority_scope;
1846       if (record_gc_phases_info.type_timer) {
1847         histogram_timer_scope.emplace(record_gc_phases_info.type_timer,
1848                                       isolate_);
1849         TRACE_EVENT0("v8", record_gc_phases_info.type_timer->name());
1850       }
1851       if (record_gc_phases_info.type_priority_timer) {
1852         OptionalTimedHistogramScopeMode mode =
1853             isolate_->IsMemorySavingsModeActive()
1854                 ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
1855                 : OptionalTimedHistogramScopeMode::TAKE_TIME;
1856         histogram_timer_priority_scope.emplace(
1857             record_gc_phases_info.type_priority_timer, isolate_, mode);
1858       }
1859 
1860       if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
1861         tp_heap_->CollectGarbage();
1862       } else {
1863         freed_global_handles += PerformGarbageCollection(
1864             collector, gc_reason, collector_reason, gc_callback_flags);
1865       }
1866       // Clear flags describing the current GC now that the current GC is
1867       // complete. Do this before GarbageCollectionEpilogue() since that could
1868       // trigger another unforced GC.
1869       is_current_gc_forced_ = false;
1870       is_current_gc_for_heap_profiler_ = false;
1871 
1872       if (collector == GarbageCollector::MARK_COMPACTOR ||
1873           collector == GarbageCollector::SCAVENGER) {
1874         tracer()->RecordGCPhasesHistograms(record_gc_phases_info.mode);
1875       }
1876     }
1877 
1878     GarbageCollectionEpilogue(collector);
1879     if (collector == GarbageCollector::MARK_COMPACTOR &&
1880         FLAG_track_detached_contexts) {
1881       isolate()->CheckDetachedContextsAfterGC();
1882     }
1883 
1884     if (collector == GarbageCollector::MARK_COMPACTOR) {
1885       // Calculate used memory first, then committed memory. Following code
1886       // assumes that committed >= used, which might not hold when this is
1887       // calculated in the wrong order and background threads allocate
1888       // in-between.
1889       size_t used_memory_after = OldGenerationSizeOfObjects();
1890       size_t committed_memory_after = CommittedOldGenerationMemory();
1891       MemoryReducer::Event event;
1892       event.type = MemoryReducer::kMarkCompact;
1893       event.time_ms = MonotonicallyIncreasingTimeInMs();
1894       // Trigger one more GC if
1895       // - this GC decreased committed memory,
1896       // - there is high fragmentation,
1897       event.next_gc_likely_to_collect_more =
1898           (committed_memory_before > committed_memory_after + MB) ||
1899           HasHighFragmentation(used_memory_after, committed_memory_after);
1900       event.committed_memory = committed_memory_after;
1901       if (deserialization_complete_) {
1902         memory_reducer_->NotifyMarkCompact(event);
1903       }
1904       if (initial_max_old_generation_size_ < max_old_generation_size() &&
1905           used_memory_after < initial_max_old_generation_size_threshold_) {
1906         set_max_old_generation_size(initial_max_old_generation_size_);
1907       }
1908     }
1909 
1910     tracer()->StopAtomicPause();
1911     tracer()->StopObservablePause();
1912     tracer()->UpdateStatistics(collector);
1913     // Young generation cycles finish atomically. It is important that
1914     // StopObservablePause, UpdateStatistics and StopCycle are called in this
1915     // order; the latter may replace the current event with that of an
1916     // interrupted full cycle.
1917     if (IsYoungGenerationCollector(collector)) {
1918       tracer()->StopYoungCycleIfNeeded();
1919     } else {
1920       tracer()->StopFullCycleIfNeeded();
1921     }
1922   }
1923 
1924   // Part 3: Invoke all callbacks which should happen after the actual garbage
1925   // collection is triggered. Note that these callbacks may trigger another
1926   // garbage collection since they may allocate.
1927 
1928   {
1929     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1930     gc_post_processing_depth_++;
1931     {
1932       AllowGarbageCollection allow_gc;
1933       AllowJavascriptExecution allow_js(isolate());
1934       freed_global_handles +=
1935           isolate_->global_handles()->PostGarbageCollectionProcessing(
1936               collector, gc_callback_flags);
1937     }
1938     gc_post_processing_depth_--;
1939   }
1940 
1941   {
1942     GCCallbacksScope scope(this);
1943     if (scope.CheckReenter()) {
1944       AllowGarbageCollection allow_gc;
1945       AllowJavascriptExecution allow_js(isolate());
1946       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1947       VMState<EXTERNAL> callback_state(isolate_);
1948       HandleScope handle_scope(isolate_);
1949       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1950     }
1951   }
1952 
1953   if (collector == GarbageCollector::MARK_COMPACTOR &&
1954       (gc_callback_flags & (kGCCallbackFlagForced |
1955                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1956     isolate()->CountUsage(v8::Isolate::kForcedGC);
1957   }
1958 
1959   // Start incremental marking for the next cycle. We do this only for scavenger
1960   // to avoid a loop where mark-compact causes another mark-compact.
1961   if (IsYoungGenerationCollector(collector)) {
1962     StartIncrementalMarkingIfAllocationLimitIsReached(
1963         GCFlagsForIncrementalMarking(),
1964         kGCCallbackScheduleIdleGarbageCollection);
1965   }
1966 
1967   if (!CanExpandOldGeneration(0)) {
1968     InvokeNearHeapLimitCallback();
1969     if (!CanExpandOldGeneration(0)) {
1970       FatalProcessOutOfMemory("Reached heap limit");
1971     }
1972   }
1973 
1974   return freed_global_handles > 0;
1975 }
1976 
NotifyContextDisposed(bool dependant_context)1977 int Heap::NotifyContextDisposed(bool dependant_context) {
1978   if (!dependant_context) {
1979     tracer()->ResetSurvivalEvents();
1980     old_generation_size_configured_ = false;
1981     set_old_generation_allocation_limit(initial_old_generation_size_);
1982     MemoryReducer::Event event;
1983     event.type = MemoryReducer::kPossibleGarbage;
1984     event.time_ms = MonotonicallyIncreasingTimeInMs();
1985     memory_reducer_->NotifyPossibleGarbage(event);
1986   }
1987   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1988   if (!isolate()->context().is_null()) {
1989     RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
1990     isolate()->raw_native_context().set_retained_maps(
1991         ReadOnlyRoots(this).empty_weak_array_list());
1992   }
1993   return ++contexts_disposed_;
1994 }
1995 
StartIncrementalMarking(int gc_flags,GarbageCollectionReason gc_reason,GCCallbackFlags gc_callback_flags)1996 void Heap::StartIncrementalMarking(int gc_flags,
1997                                    GarbageCollectionReason gc_reason,
1998                                    GCCallbackFlags gc_callback_flags) {
1999   DCHECK(incremental_marking()->IsStopped());
2000 
2001   // Sweeping needs to be completed such that markbits are all cleared before
2002   // starting marking again.
2003   CompleteSweepingFull();
2004 
2005   base::Optional<SafepointScope> safepoint_scope;
2006 
2007   {
2008     AllowGarbageCollection allow_shared_gc;
2009     IgnoreLocalGCRequests ignore_gc_requests(this);
2010     safepoint_scope.emplace(this);
2011   }
2012 
2013 #ifdef DEBUG
2014   VerifyCountersAfterSweeping();
2015 #endif
2016 
2017   // Now that sweeping is completed, we can start the next full GC cycle.
2018   tracer()->StartCycle(GarbageCollector::MARK_COMPACTOR, gc_reason, nullptr,
2019                        GCTracer::MarkingType::kIncremental);
2020 
2021   set_current_gc_flags(gc_flags);
2022   current_gc_callback_flags_ = gc_callback_flags;
2023   incremental_marking()->Start(gc_reason);
2024 }
2025 
CompleteSweepingFull()2026 void Heap::CompleteSweepingFull() {
2027   array_buffer_sweeper()->EnsureFinished();
2028   mark_compact_collector()->EnsureSweepingCompleted(
2029       MarkCompactCollector::SweepingForcedFinalizationMode::kUnifiedHeap);
2030 
2031   DCHECK(!mark_compact_collector()->sweeping_in_progress());
2032   DCHECK_IMPLIES(cpp_heap(),
2033                  !CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
2034   DCHECK(!tracer()->IsSweepingInProgress());
2035 }
2036 
StartIncrementalMarkingIfAllocationLimitIsReached(int gc_flags,const GCCallbackFlags gc_callback_flags)2037 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
2038     int gc_flags, const GCCallbackFlags gc_callback_flags) {
2039   if (incremental_marking()->IsStopped()) {
2040     switch (IncrementalMarkingLimitReached()) {
2041       case IncrementalMarkingLimit::kHardLimit:
2042         StartIncrementalMarking(
2043             gc_flags,
2044             OldGenerationSpaceAvailable() <= NewSpaceCapacity()
2045                 ? GarbageCollectionReason::kAllocationLimit
2046                 : GarbageCollectionReason::kGlobalAllocationLimit,
2047             gc_callback_flags);
2048         break;
2049       case IncrementalMarkingLimit::kSoftLimit:
2050         incremental_marking()->incremental_marking_job()->ScheduleTask(this);
2051         break;
2052       case IncrementalMarkingLimit::kFallbackForEmbedderLimit:
2053         // This is a fallback case where no appropriate limits have been
2054         // configured yet.
2055         MemoryReducer::Event event;
2056         event.type = MemoryReducer::kPossibleGarbage;
2057         event.time_ms = MonotonicallyIncreasingTimeInMs();
2058         memory_reducer()->NotifyPossibleGarbage(event);
2059         break;
2060       case IncrementalMarkingLimit::kNoLimit:
2061         break;
2062     }
2063   }
2064 }
2065 
StartIncrementalMarkingIfAllocationLimitIsReachedBackground()2066 void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
2067   if (!incremental_marking()->IsStopped() ||
2068       !incremental_marking()->CanBeActivated()) {
2069     return;
2070   }
2071 
2072   const size_t old_generation_space_available = OldGenerationSpaceAvailable();
2073 
2074   if (old_generation_space_available < NewSpaceCapacity()) {
2075     incremental_marking()->incremental_marking_job()->ScheduleTask(this);
2076   }
2077 }
2078 
StartIdleIncrementalMarking(GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)2079 void Heap::StartIdleIncrementalMarking(
2080     GarbageCollectionReason gc_reason,
2081     const GCCallbackFlags gc_callback_flags) {
2082   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
2083                           gc_callback_flags);
2084 }
2085 
MoveRange(HeapObject dst_object,const ObjectSlot dst_slot,const ObjectSlot src_slot,int len,WriteBarrierMode mode)2086 void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
2087                      const ObjectSlot src_slot, int len,
2088                      WriteBarrierMode mode) {
2089   DCHECK_NE(len, 0);
2090   DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
2091   const ObjectSlot dst_end(dst_slot + len);
2092   // Ensure no range overflow.
2093   DCHECK(dst_slot < dst_end);
2094   DCHECK(src_slot < src_slot + len);
2095 
2096   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
2097     if (dst_slot < src_slot) {
2098       // Copy tagged values forward using relaxed load/stores that do not
2099       // involve value decompression.
2100       const AtomicSlot atomic_dst_end(dst_end);
2101       AtomicSlot dst(dst_slot);
2102       AtomicSlot src(src_slot);
2103       while (dst < atomic_dst_end) {
2104         *dst = *src;
2105         ++dst;
2106         ++src;
2107       }
2108     } else {
2109       // Copy tagged values backwards using relaxed load/stores that do not
2110       // involve value decompression.
2111       const AtomicSlot atomic_dst_begin(dst_slot);
2112       AtomicSlot dst(dst_slot + len - 1);
2113       AtomicSlot src(src_slot + len - 1);
2114       while (dst >= atomic_dst_begin) {
2115         *dst = *src;
2116         --dst;
2117         --src;
2118       }
2119     }
2120   } else {
2121     MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
2122   }
2123   if (mode == SKIP_WRITE_BARRIER) return;
2124   WriteBarrierForRange(dst_object, dst_slot, dst_end);
2125 }
2126 
2127 // Instantiate Heap::CopyRange() for ObjectSlot and MaybeObjectSlot.
2128 template void Heap::CopyRange<ObjectSlot>(HeapObject dst_object,
2129                                           ObjectSlot dst_slot,
2130                                           ObjectSlot src_slot, int len,
2131                                           WriteBarrierMode mode);
2132 template void Heap::CopyRange<MaybeObjectSlot>(HeapObject dst_object,
2133                                                MaybeObjectSlot dst_slot,
2134                                                MaybeObjectSlot src_slot,
2135                                                int len, WriteBarrierMode mode);
2136 
2137 template <typename TSlot>
CopyRange(HeapObject dst_object,const TSlot dst_slot,const TSlot src_slot,int len,WriteBarrierMode mode)2138 void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
2139                      const TSlot src_slot, int len, WriteBarrierMode mode) {
2140   DCHECK_NE(len, 0);
2141 
2142   DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
2143   const TSlot dst_end(dst_slot + len);
2144   // Ensure ranges do not overlap.
2145   DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
2146 
2147   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
2148     // Copy tagged values using relaxed load/stores that do not involve value
2149     // decompression.
2150     const AtomicSlot atomic_dst_end(dst_end);
2151     AtomicSlot dst(dst_slot);
2152     AtomicSlot src(src_slot);
2153     while (dst < atomic_dst_end) {
2154       *dst = *src;
2155       ++dst;
2156       ++src;
2157     }
2158   } else {
2159     MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
2160   }
2161   if (mode == SKIP_WRITE_BARRIER) return;
2162   WriteBarrierForRange(dst_object, dst_slot, dst_end);
2163 }
2164 
EnsureFromSpaceIsCommitted()2165 void Heap::EnsureFromSpaceIsCommitted() {
2166   if (!new_space_) return;
2167   if (new_space_->CommitFromSpaceIfNeeded()) return;
2168 
2169   // Committing memory to from space failed.
2170   // Memory is exhausted and we will die.
2171   FatalProcessOutOfMemory("Committing semi space failed.");
2172 }
2173 
CollectionRequested()2174 bool Heap::CollectionRequested() {
2175   return collection_barrier_->WasGCRequested();
2176 }
2177 
CollectGarbageForBackground(LocalHeap * local_heap)2178 void Heap::CollectGarbageForBackground(LocalHeap* local_heap) {
2179   CHECK(local_heap->is_main_thread());
2180   CollectAllGarbage(current_gc_flags_,
2181                     GarbageCollectionReason::kBackgroundAllocationFailure,
2182                     current_gc_callback_flags_);
2183 }
2184 
CheckCollectionRequested()2185 void Heap::CheckCollectionRequested() {
2186   if (!CollectionRequested()) return;
2187 
2188   CollectAllGarbage(current_gc_flags_,
2189                     GarbageCollectionReason::kBackgroundAllocationFailure,
2190                     current_gc_callback_flags_);
2191 }
2192 
2193 #if V8_ENABLE_WEBASSEMBLY
EnsureWasmCanonicalRttsSize(int length)2194 void Heap::EnsureWasmCanonicalRttsSize(int length) {
2195   Handle<WeakArrayList> current_rtts = handle(wasm_canonical_rtts(), isolate_);
2196   if (length <= current_rtts->length()) return;
2197   Handle<WeakArrayList> result = WeakArrayList::EnsureSpace(
2198       isolate(), current_rtts, length, AllocationType::kOld);
2199   result->set_length(length);
2200   set_wasm_canonical_rtts(*result);
2201 }
2202 #endif
2203 
UpdateSurvivalStatistics(int start_new_space_size)2204 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
2205   if (start_new_space_size == 0) return;
2206 
2207   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
2208                       static_cast<double>(start_new_space_size) * 100);
2209 
2210   if (previous_semi_space_copied_object_size_ > 0) {
2211     promotion_rate_ =
2212         (static_cast<double>(promoted_objects_size_) /
2213          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
2214   } else {
2215     promotion_rate_ = 0;
2216   }
2217 
2218   semi_space_copied_rate_ =
2219       (static_cast<double>(semi_space_copied_object_size_) /
2220        static_cast<double>(start_new_space_size) * 100);
2221 
2222   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
2223   tracer()->AddSurvivalRatio(survival_rate);
2224 }
2225 
2226 namespace {
CollectorScopeId(GarbageCollector collector)2227 GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
2228   switch (collector) {
2229     case GarbageCollector::MARK_COMPACTOR:
2230       return GCTracer::Scope::ScopeId::MARK_COMPACTOR;
2231     case GarbageCollector::MINOR_MARK_COMPACTOR:
2232       return GCTracer::Scope::ScopeId::MINOR_MARK_COMPACTOR;
2233     case GarbageCollector::SCAVENGER:
2234       return GCTracer::Scope::ScopeId::SCAVENGER;
2235   }
2236   UNREACHABLE();
2237 }
2238 }  // namespace
2239 
PerformGarbageCollection(GarbageCollector collector,GarbageCollectionReason gc_reason,const char * collector_reason,const v8::GCCallbackFlags gc_callback_flags)2240 size_t Heap::PerformGarbageCollection(
2241     GarbageCollector collector, GarbageCollectionReason gc_reason,
2242     const char* collector_reason, const v8::GCCallbackFlags gc_callback_flags) {
2243   DisallowJavascriptExecution no_js(isolate());
2244 
2245   if (IsYoungGenerationCollector(collector)) {
2246     CompleteSweepingYoung(collector);
2247 #ifdef VERIFY_HEAP
2248     if (FLAG_verify_heap) {
2249       // If heap verification is enabled, we want to ensure that sweeping is
2250       // completed here, as it will be triggered from Heap::Verify anyway.
2251       // In this way, sweeping finalization is accounted to the corresponding
2252       // full GC cycle.
2253       CompleteSweepingFull();
2254     }
2255 #endif  // VERIFY_HEAP
2256     tracer()->StartCycle(collector, gc_reason, collector_reason,
2257                          GCTracer::MarkingType::kAtomic);
2258   } else {
2259     DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
2260     CompleteSweepingFull();
2261     // If incremental marking has been activated, the full GC cycle has already
2262     // started, so don't start a new one.
2263     if (!incremental_marking_->WasActivated()) {
2264       tracer()->StartCycle(collector, gc_reason, collector_reason,
2265                            GCTracer::MarkingType::kAtomic);
2266     }
2267   }
2268 
2269   tracer()->StartAtomicPause();
2270   if (!Heap::IsYoungGenerationCollector(collector) &&
2271       incremental_marking_->WasActivated()) {
2272     tracer()->UpdateCurrentEvent(gc_reason, collector_reason);
2273   }
2274 
2275   DCHECK(tracer()->IsConsistentWithCollector(collector));
2276   TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
2277 
2278   base::Optional<SafepointScope> safepoint_scope;
2279 
2280   {
2281     AllowGarbageCollection allow_shared_gc;
2282     IgnoreLocalGCRequests ignore_gc_requests(this);
2283     safepoint_scope.emplace(this);
2284   }
2285 
2286   collection_barrier_->StopTimeToCollectionTimer();
2287 
2288 #ifdef VERIFY_HEAP
2289   if (FLAG_verify_heap) {
2290     // We don't really perform a GC here but need this scope for the nested
2291     // SafepointScope inside Verify().
2292     AllowGarbageCollection allow_gc;
2293     Verify();
2294   }
2295 #endif  // VERIFY_HEAP
2296 
2297   tracer()->StartInSafepoint();
2298 
2299   GarbageCollectionPrologueInSafepoint();
2300 
2301   EnsureFromSpaceIsCommitted();
2302 
2303   size_t start_young_generation_size =
2304       NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
2305 
2306   switch (collector) {
2307     case GarbageCollector::MARK_COMPACTOR:
2308       MarkCompact();
2309       break;
2310     case GarbageCollector::MINOR_MARK_COMPACTOR:
2311       MinorMarkCompact();
2312       break;
2313     case GarbageCollector::SCAVENGER:
2314       Scavenge();
2315       break;
2316   }
2317 
2318   ProcessPretenuringFeedback();
2319 
2320   UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
2321   ConfigureInitialOldGenerationSize();
2322 
2323   if (collector != GarbageCollector::MARK_COMPACTOR) {
2324     // Objects that died in the new space might have been accounted
2325     // as bytes marked ahead of schedule by the incremental marker.
2326     incremental_marking()->UpdateMarkedBytesAfterScavenge(
2327         start_young_generation_size - SurvivedYoungObjectSize());
2328   }
2329 
2330   if (!fast_promotion_mode_ || collector == GarbageCollector::MARK_COMPACTOR) {
2331     ComputeFastPromotionMode();
2332   }
2333 
2334   isolate_->counters()->objs_since_last_young()->Set(0);
2335 
2336   isolate_->eternal_handles()->PostGarbageCollectionProcessing();
2337 
2338   // Update relocatables.
2339   Relocatable::PostGarbageCollectionProcessing(isolate_);
2340 
2341   size_t freed_global_handles;
2342 
2343   {
2344     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
2345     // First round weak callbacks are not supposed to allocate and trigger
2346     // nested GCs.
2347     freed_global_handles =
2348         isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
2349   }
2350 
2351   if (collector == GarbageCollector::MARK_COMPACTOR) {
2352     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
2353     // TraceEpilogue may trigger operations that invalidate global handles. It
2354     // has to be called *after* all other operations that potentially touch and
2355     // reset global handles. It is also still part of the main garbage
2356     // collection pause and thus needs to be called *before* any operation that
2357     // can potentially trigger recursive garbage
2358     local_embedder_heap_tracer()->TraceEpilogue();
2359   }
2360 
2361 #if defined(CPPGC_YOUNG_GENERATION)
2362   // Schedule Oilpan's Minor GC. Since the minor GC doesn't support conservative
2363   // stack scanning, do it only when Scavenger runs from task, which is
2364   // non-nestable.
2365   if (cpp_heap() && IsYoungGenerationCollector(collector)) {
2366     const bool with_stack = (gc_reason != GarbageCollectionReason::kTask);
2367     CppHeap::From(cpp_heap())
2368         ->RunMinorGC(with_stack ? CppHeap::StackState::kMayContainHeapPointers
2369                                 : CppHeap::StackState::kNoHeapPointers);
2370   }
2371 #endif  // defined(CPPGC_YOUNG_GENERATION)
2372 
2373 #ifdef VERIFY_HEAP
2374   if (FLAG_verify_heap) {
2375     // We don't really perform a GC here but need this scope for the nested
2376     // SafepointScope inside Verify().
2377     AllowGarbageCollection allow_gc;
2378     Verify();
2379   }
2380 #endif  // VERIFY_HEAP
2381 
2382   RecomputeLimits(collector);
2383 
2384   GarbageCollectionEpilogueInSafepoint(collector);
2385 
2386   tracer()->StopInSafepoint();
2387 
2388   return freed_global_handles;
2389 }
2390 
CollectSharedGarbage(GarbageCollectionReason gc_reason)2391 void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
2392   CHECK(deserialization_complete());
2393   DCHECK(!IsShared());
2394   DCHECK_NOT_NULL(isolate()->shared_isolate());
2395 
2396   isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
2397       isolate(), gc_reason);
2398 }
2399 
PerformSharedGarbageCollection(Isolate * initiator,GarbageCollectionReason gc_reason)2400 void Heap::PerformSharedGarbageCollection(Isolate* initiator,
2401                                           GarbageCollectionReason gc_reason) {
2402   DCHECK(IsShared());
2403 
2404   // Stop all client isolates attached to this isolate
2405   GlobalSafepointScope global_safepoint(initiator);
2406 
2407   // Migrate shared isolate to the main thread of the initiator isolate.
2408   v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate()));
2409   v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate()));
2410 
2411   tracer()->StartObservablePause();
2412   DCHECK(!incremental_marking_->WasActivated());
2413   DCHECK_NOT_NULL(isolate()->global_safepoint());
2414 
2415   isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
2416     client->heap()->FreeSharedLinearAllocationAreas();
2417 
2418     // As long as we need to iterate the client heap to find references into the
2419     // shared heap, all client heaps need to be iterable.
2420     client->heap()->MakeHeapIterable();
2421 
2422     if (FLAG_concurrent_marking) {
2423       client->heap()->concurrent_marking()->Pause();
2424     }
2425   });
2426 
2427   const GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
2428   PerformGarbageCollection(collector, gc_reason, nullptr);
2429 
2430   isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
2431     if (FLAG_concurrent_marking &&
2432         client->heap()->incremental_marking()->IsMarking()) {
2433       client->heap()->concurrent_marking()->RescheduleJobIfNeeded();
2434     }
2435   });
2436 
2437   tracer()->StopAtomicPause();
2438   tracer()->StopObservablePause();
2439   tracer()->UpdateStatistics(collector);
2440   tracer()->StopFullCycleIfNeeded();
2441 }
2442 
CompleteSweepingYoung(GarbageCollector collector)2443 void Heap::CompleteSweepingYoung(GarbageCollector collector) {
2444   GCTracer::Scope::ScopeId scope_id;
2445 
2446   switch (collector) {
2447     case GarbageCollector::MINOR_MARK_COMPACTOR:
2448       scope_id = GCTracer::Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS;
2449       break;
2450     case GarbageCollector::SCAVENGER:
2451       scope_id = GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS;
2452       break;
2453     default:
2454       UNREACHABLE();
2455   }
2456 
2457   {
2458     TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
2459     array_buffer_sweeper()->EnsureFinished();
2460   }
2461 
2462   // If sweeping is in progress and there are no sweeper tasks running, finish
2463   // the sweeping here, to avoid having to pause and resume during the young
2464   // generation GC.
2465   mark_compact_collector()->FinishSweepingIfOutOfWork();
2466 
2467 #if defined(CPPGC_YOUNG_GENERATION)
2468   // Always complete sweeping if young generation is enabled.
2469   if (cpp_heap()) CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
2470 #endif  // defined(CPPGC_YOUNG_GENERATION)
2471 }
2472 
EnsureSweepingCompleted(HeapObject object)2473 void Heap::EnsureSweepingCompleted(HeapObject object) {
2474   if (!mark_compact_collector()->sweeping_in_progress()) return;
2475 
2476   BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
2477   if (basic_chunk->InReadOnlySpace()) return;
2478 
2479   MemoryChunk* chunk = MemoryChunk::cast(basic_chunk);
2480   if (chunk->SweepingDone()) return;
2481 
2482   // SweepingDone() is always true for large pages.
2483   DCHECK(!chunk->IsLargePage());
2484 
2485   Page* page = Page::cast(chunk);
2486   mark_compact_collector()->EnsurePageIsSwept(page);
2487 }
2488 
RecomputeLimits(GarbageCollector collector)2489 void Heap::RecomputeLimits(GarbageCollector collector) {
2490   if (!((collector == GarbageCollector::MARK_COMPACTOR) ||
2491         (HasLowYoungGenerationAllocationRate() &&
2492          old_generation_size_configured_))) {
2493     return;
2494   }
2495 
2496   double v8_gc_speed =
2497       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
2498   double v8_mutator_speed =
2499       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
2500   double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor(
2501       this, max_old_generation_size(), v8_gc_speed, v8_mutator_speed);
2502   double global_growing_factor = 0;
2503   if (UseGlobalMemoryScheduling()) {
2504     DCHECK_NOT_NULL(local_embedder_heap_tracer());
2505     double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond();
2506     double embedder_speed =
2507         tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond();
2508     double embedder_growing_factor =
2509         (embedder_gc_speed > 0 && embedder_speed > 0)
2510             ? MemoryController<GlobalMemoryTrait>::GrowingFactor(
2511                   this, max_global_memory_size_, embedder_gc_speed,
2512                   embedder_speed)
2513             : 0;
2514     global_growing_factor =
2515         std::max(v8_growing_factor, embedder_growing_factor);
2516   }
2517 
2518   size_t old_gen_size = OldGenerationSizeOfObjects();
2519   size_t new_space_capacity = NewSpaceCapacity();
2520   HeapGrowingMode mode = CurrentHeapGrowingMode();
2521 
2522   if (collector == GarbageCollector::MARK_COMPACTOR) {
2523     external_memory_.ResetAfterGC();
2524 
2525     set_old_generation_allocation_limit(
2526         MemoryController<V8HeapTrait>::CalculateAllocationLimit(
2527             this, old_gen_size, min_old_generation_size_,
2528             max_old_generation_size(), new_space_capacity, v8_growing_factor,
2529             mode));
2530     if (UseGlobalMemoryScheduling()) {
2531       DCHECK_GT(global_growing_factor, 0);
2532       global_allocation_limit_ =
2533           MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
2534               this, GlobalSizeOfObjects(), min_global_memory_size_,
2535               max_global_memory_size_, new_space_capacity,
2536               global_growing_factor, mode);
2537     }
2538     CheckIneffectiveMarkCompact(
2539         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
2540   } else if (HasLowYoungGenerationAllocationRate() &&
2541              old_generation_size_configured_) {
2542     size_t new_old_generation_limit =
2543         MemoryController<V8HeapTrait>::CalculateAllocationLimit(
2544             this, old_gen_size, min_old_generation_size_,
2545             max_old_generation_size(), new_space_capacity, v8_growing_factor,
2546             mode);
2547     if (new_old_generation_limit < old_generation_allocation_limit()) {
2548       set_old_generation_allocation_limit(new_old_generation_limit);
2549     }
2550     if (UseGlobalMemoryScheduling()) {
2551       DCHECK_GT(global_growing_factor, 0);
2552       size_t new_global_limit =
2553           MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
2554               this, GlobalSizeOfObjects(), min_global_memory_size_,
2555               max_global_memory_size_, new_space_capacity,
2556               global_growing_factor, mode);
2557       if (new_global_limit < global_allocation_limit_) {
2558         global_allocation_limit_ = new_global_limit;
2559       }
2560     }
2561   }
2562 }
2563 
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)2564 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
2565   RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
2566   for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
2567     if (gc_type & info.gc_type) {
2568       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
2569       info.callback(isolate, gc_type, flags, info.data);
2570     }
2571   }
2572 }
2573 
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags flags)2574 void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
2575   RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
2576   for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
2577     if (gc_type & info.gc_type) {
2578       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
2579       info.callback(isolate, gc_type, flags, info.data);
2580     }
2581   }
2582 }
2583 
MarkCompact()2584 void Heap::MarkCompact() {
2585   PauseAllocationObserversScope pause_observers(this);
2586 
2587   SetGCState(MARK_COMPACT);
2588 
2589   PROFILE(isolate_, CodeMovingGCEvent());
2590   CodeSpaceMemoryModificationScope code_modification(this);
2591 
2592   // Disable soft allocation limits in the shared heap, if one exists, as
2593   // promotions into the shared heap should always succeed.
2594   OptionalAlwaysAllocateScope always_allocate_shared_heap(
2595       isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
2596                                   : nullptr);
2597 
2598   UpdateOldGenerationAllocationCounter();
2599   uint64_t size_of_objects_before_gc = SizeOfObjects();
2600 
2601   mark_compact_collector()->Prepare();
2602 
2603   ms_count_++;
2604   contexts_disposed_ = 0;
2605 
2606   MarkCompactPrologue();
2607 
2608   mark_compact_collector()->CollectGarbage();
2609 
2610   MarkCompactEpilogue();
2611 
2612   if (FLAG_allocation_site_pretenuring) {
2613     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
2614   }
2615   old_generation_size_configured_ = true;
2616   // This should be updated before PostGarbageCollectionProcessing, which
2617   // can cause another GC. Take into account the objects promoted during
2618   // GC.
2619   old_generation_allocation_counter_at_last_gc_ +=
2620       static_cast<size_t>(promoted_objects_size_);
2621   old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
2622   global_memory_at_last_gc_ = GlobalSizeOfObjects();
2623 }
2624 
MinorMarkCompact()2625 void Heap::MinorMarkCompact() {
2626   DCHECK(FLAG_minor_mc);
2627   DCHECK(new_space());
2628 
2629   if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
2630     isolate()->PrintWithTimestamp(
2631         "[IncrementalMarking] MinorMarkCompact during marking.\n");
2632   }
2633 
2634   PauseAllocationObserversScope pause_observers(this);
2635   SetGCState(MINOR_MARK_COMPACT);
2636 
2637   TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
2638   AlwaysAllocateScope always_allocate(this);
2639   // Disable soft allocation limits in the shared heap, if one exists, as
2640   // promotions into the shared heap should always succeed.
2641   OptionalAlwaysAllocateScope always_allocate_shared_heap(
2642       isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
2643                                   : nullptr);
2644   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2645       incremental_marking());
2646   // Young generation garbage collection is orthogonal from full GC marking. It
2647   // is possible that objects that are currently being processed for marking are
2648   // reclaimed in the young generation GC that interleaves concurrent marking.
2649   // Pause concurrent markers to allow processing them using
2650   // `UpdateMarkingWorklistAfterYoungGenGC()`.
2651   ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
2652   CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
2653       CppHeap::From(cpp_heap_));
2654 
2655   minor_mark_compact_collector_->CollectGarbage();
2656 
2657   SetGCState(NOT_IN_GC);
2658 }
2659 
MarkCompactEpilogue()2660 void Heap::MarkCompactEpilogue() {
2661   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
2662   SetGCState(NOT_IN_GC);
2663 
2664   isolate_->counters()->objs_since_last_full()->Set(0);
2665 
2666   incremental_marking()->Epilogue();
2667 
2668   DCHECK(incremental_marking()->IsStopped());
2669 }
2670 
MarkCompactPrologue()2671 void Heap::MarkCompactPrologue() {
2672   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
2673   isolate_->descriptor_lookup_cache()->Clear();
2674   RegExpResultsCache::Clear(string_split_cache());
2675   RegExpResultsCache::Clear(regexp_multiple_cache());
2676 
2677   isolate_->compilation_cache()->MarkCompactPrologue();
2678 
2679   FlushNumberStringCache();
2680 }
2681 
CheckNewSpaceExpansionCriteria()2682 void Heap::CheckNewSpaceExpansionCriteria() {
2683   if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
2684       survived_since_last_expansion_ > new_space_->TotalCapacity()) {
2685     // Grow the size of new space if there is room to grow, and enough data
2686     // has survived scavenge since the last expansion.
2687     new_space_->Grow();
2688     survived_since_last_expansion_ = 0;
2689   }
2690   new_lo_space()->SetCapacity(new_space()->Capacity());
2691 }
2692 
EvacuateYoungGeneration()2693 void Heap::EvacuateYoungGeneration() {
2694   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
2695   base::MutexGuard guard(relocation_mutex());
2696   // Young generation garbage collection is orthogonal from full GC marking. It
2697   // is possible that objects that are currently being processed for marking are
2698   // reclaimed in the young generation GC that interleaves concurrent marking.
2699   // Pause concurrent markers to allow processing them using
2700   // `UpdateMarkingWorklistAfterYoungGenGC()`.
2701   ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
2702   CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
2703       CppHeap::From(cpp_heap_));
2704   if (!FLAG_concurrent_marking) {
2705     DCHECK(fast_promotion_mode_);
2706     DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
2707   }
2708 
2709   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2710 
2711   // Move pages from new->old generation.
2712   PageRange range(new_space()->first_allocatable_address(), new_space()->top());
2713   for (auto it = range.begin(); it != range.end();) {
2714     Page* p = (*++it)->prev_page();
2715     new_space()->from_space().RemovePage(p);
2716     Page::ConvertNewToOld(p);
2717     if (incremental_marking()->IsMarking())
2718       mark_compact_collector()->RecordLiveSlotsOnPage(p);
2719   }
2720 
2721   // Reset new space.
2722   if (!new_space()->Rebalance()) {
2723     FatalProcessOutOfMemory("NewSpace::Rebalance");
2724   }
2725   new_space()->ResetLinearAllocationArea();
2726   new_space()->set_age_mark(new_space()->top());
2727 
2728   for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
2729     LargePage* page = *it;
2730     // Increment has to happen after we save the page, because it is going to
2731     // be removed below.
2732     it++;
2733     lo_space()->PromoteNewLargeObject(page);
2734   }
2735 
2736   // Fix up special trackers.
2737   external_string_table_.PromoteYoung();
2738   // GlobalHandles are updated in PostGarbageCollectonProcessing
2739 
2740   size_t promoted = new_space()->Size() + new_lo_space()->Size();
2741   IncrementYoungSurvivorsCounter(promoted);
2742   IncrementPromotedObjectsSize(promoted);
2743   IncrementSemiSpaceCopiedObjectSize(0);
2744 }
2745 
Scavenge()2746 void Heap::Scavenge() {
2747   DCHECK_NOT_NULL(new_space());
2748   DCHECK_IMPLIES(FLAG_separate_gc_phases, !incremental_marking()->IsMarking());
2749 
2750   if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
2751     isolate()->PrintWithTimestamp(
2752         "[IncrementalMarking] Scavenge during marking.\n");
2753   }
2754 
2755   if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
2756     tracer()->NotifyYoungGenerationHandling(
2757         YoungGenerationHandling::kFastPromotionDuringScavenge);
2758     EvacuateYoungGeneration();
2759     return;
2760   }
2761   tracer()->NotifyYoungGenerationHandling(
2762       YoungGenerationHandling::kRegularScavenge);
2763 
2764   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
2765   base::MutexGuard guard(relocation_mutex());
2766   // Young generation garbage collection is orthogonal from full GC marking. It
2767   // is possible that objects that are currently being processed for marking are
2768   // reclaimed in the young generation GC that interleaves concurrent marking.
2769   // Pause concurrent markers to allow processing them using
2770   // `UpdateMarkingWorklistAfterYoungGenGC()`.
2771   ConcurrentMarking::PauseScope pause_js_marking(concurrent_marking());
2772   CppHeap::PauseConcurrentMarkingScope pause_cpp_marking(
2773       CppHeap::From(cpp_heap_));
2774   // There are soft limits in the allocation code, designed to trigger a mark
2775   // sweep collection by failing allocations. There is no sense in trying to
2776   // trigger one during scavenge: scavenges allocation should always succeed.
2777   AlwaysAllocateScope scope(this);
2778 
2779   // Disable soft allocation limits in the shared heap, if one exists, as
2780   // promotions into the shared heap should always succeed.
2781   OptionalAlwaysAllocateScope always_allocate_shared_heap(
2782       isolate()->shared_isolate() ? isolate()->shared_isolate()->heap()
2783                                   : nullptr);
2784 
2785   // Bump-pointer allocations done during scavenge are not real allocations.
2786   // Pause the inline allocation steps.
2787   PauseAllocationObserversScope pause_observers(this);
2788   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2789       incremental_marking());
2790 
2791   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2792 
2793   SetGCState(SCAVENGE);
2794 
2795   // Flip the semispaces.  After flipping, to space is empty, from space has
2796   // live objects.
2797   new_space()->Flip();
2798   new_space()->ResetLinearAllocationArea();
2799 
2800   // We also flip the young generation large object space. All large objects
2801   // will be in the from space.
2802   new_lo_space()->Flip();
2803   new_lo_space()->ResetPendingObject();
2804 
2805   // Implements Cheney's copying algorithm
2806   scavenger_collector_->CollectGarbage();
2807 
2808   SetGCState(NOT_IN_GC);
2809 }
2810 
ComputeFastPromotionMode()2811 void Heap::ComputeFastPromotionMode() {
2812   if (!new_space_) return;
2813 
2814   const size_t survived_in_new_space =
2815       survived_last_scavenge_ * 100 / NewSpaceCapacity();
2816   fast_promotion_mode_ =
2817       !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
2818       !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
2819       survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
2820 
2821   if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
2822     PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n",
2823                  fast_promotion_mode_ ? "true" : "false",
2824                  survived_in_new_space);
2825   }
2826 }
2827 
UnprotectAndRegisterMemoryChunk(MemoryChunk * chunk,UnprotectMemoryOrigin origin)2828 void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
2829                                            UnprotectMemoryOrigin origin) {
2830   if (!write_protect_code_memory()) return;
2831   if (code_page_collection_memory_modification_scope_depth_ > 0) {
2832     base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
2833     if (unprotected_memory_chunks_.insert(chunk).second) {
2834       chunk->SetCodeModificationPermissions();
2835     }
2836   } else {
2837     DCHECK_GT(code_space_memory_modification_scope_depth_, 0);
2838   }
2839 }
2840 
UnprotectAndRegisterMemoryChunk(HeapObject object,UnprotectMemoryOrigin origin)2841 void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object,
2842                                            UnprotectMemoryOrigin origin) {
2843   UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object), origin);
2844 }
2845 
UnregisterUnprotectedMemoryChunk(MemoryChunk * chunk)2846 void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
2847   unprotected_memory_chunks_.erase(chunk);
2848 }
2849 
ProtectUnprotectedMemoryChunks()2850 void Heap::ProtectUnprotectedMemoryChunks() {
2851   base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
2852   for (auto chunk = unprotected_memory_chunks_.begin();
2853        chunk != unprotected_memory_chunks_.end(); chunk++) {
2854     DCHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
2855     (*chunk)->SetDefaultCodePermissions();
2856   }
2857   unprotected_memory_chunks_.clear();
2858 }
2859 
Contains(String string)2860 bool Heap::ExternalStringTable::Contains(String string) {
2861   for (size_t i = 0; i < young_strings_.size(); ++i) {
2862     if (young_strings_[i] == string) return true;
2863   }
2864   for (size_t i = 0; i < old_strings_.size(); ++i) {
2865     if (old_strings_[i] == string) return true;
2866   }
2867   return false;
2868 }
2869 
UpdateExternalString(String string,size_t old_payload,size_t new_payload)2870 void Heap::UpdateExternalString(String string, size_t old_payload,
2871                                 size_t new_payload) {
2872   DCHECK(string.IsExternalString());
2873   if (FLAG_enable_third_party_heap) return;
2874 
2875   Page* page = Page::FromHeapObject(string);
2876 
2877   if (old_payload > new_payload) {
2878     page->DecrementExternalBackingStoreBytes(
2879         ExternalBackingStoreType::kExternalString, old_payload - new_payload);
2880   } else {
2881     page->IncrementExternalBackingStoreBytes(
2882         ExternalBackingStoreType::kExternalString, new_payload - old_payload);
2883   }
2884 }
2885 
UpdateYoungReferenceInExternalStringTableEntry(Heap * heap,FullObjectSlot p)2886 String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
2887                                                             FullObjectSlot p) {
2888   PtrComprCageBase cage_base(heap->isolate());
2889   HeapObject obj = HeapObject::cast(*p);
2890   MapWord first_word = obj.map_word(cage_base, kRelaxedLoad);
2891 
2892   String new_string;
2893 
2894   if (InFromPage(obj)) {
2895     if (!first_word.IsForwardingAddress()) {
2896       // Unreachable external string can be finalized.
2897       String string = String::cast(obj);
2898       if (!string.IsExternalString(cage_base)) {
2899         // Original external string has been internalized.
2900         DCHECK(string.IsThinString(cage_base));
2901         return String();
2902       }
2903       heap->FinalizeExternalString(string);
2904       return String();
2905     }
2906     new_string = String::cast(first_word.ToForwardingAddress());
2907   } else {
2908     new_string = String::cast(obj);
2909   }
2910 
2911   // String is still reachable.
2912   if (new_string.IsThinString(cage_base)) {
2913     // Filtering Thin strings out of the external string table.
2914     return String();
2915   } else if (new_string.IsExternalString(cage_base)) {
2916     MemoryChunk::MoveExternalBackingStoreBytes(
2917         ExternalBackingStoreType::kExternalString,
2918         Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
2919         ExternalString::cast(new_string).ExternalPayloadSize());
2920     return new_string;
2921   }
2922 
2923   // Internalization can replace external strings with non-external strings.
2924   return new_string.IsExternalString(cage_base) ? new_string : String();
2925 }
2926 
VerifyYoung()2927 void Heap::ExternalStringTable::VerifyYoung() {
2928 #ifdef DEBUG
2929   std::set<String> visited_map;
2930   std::map<MemoryChunk*, size_t> size_map;
2931   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2932   for (size_t i = 0; i < young_strings_.size(); ++i) {
2933     String obj = String::cast(young_strings_[i]);
2934     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2935     DCHECK(mc->InYoungGeneration());
2936     DCHECK(heap_->InYoungGeneration(obj));
2937     DCHECK(!obj.IsTheHole(heap_->isolate()));
2938     DCHECK(obj.IsExternalString());
2939     // Note: we can have repeated elements in the table.
2940     DCHECK_EQ(0, visited_map.count(obj));
2941     visited_map.insert(obj);
2942     size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
2943   }
2944   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2945        it != size_map.end(); it++)
2946     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2947 #endif
2948 }
2949 
Verify()2950 void Heap::ExternalStringTable::Verify() {
2951 #ifdef DEBUG
2952   std::set<String> visited_map;
2953   std::map<MemoryChunk*, size_t> size_map;
2954   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2955   VerifyYoung();
2956   for (size_t i = 0; i < old_strings_.size(); ++i) {
2957     String obj = String::cast(old_strings_[i]);
2958     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2959     DCHECK(!mc->InYoungGeneration());
2960     DCHECK(!heap_->InYoungGeneration(obj));
2961     DCHECK(!obj.IsTheHole(heap_->isolate()));
2962     DCHECK(obj.IsExternalString());
2963     // Note: we can have repeated elements in the table.
2964     DCHECK_EQ(0, visited_map.count(obj));
2965     visited_map.insert(obj);
2966     size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
2967   }
2968   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2969        it != size_map.end(); it++)
2970     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2971 #endif
2972 }
2973 
UpdateYoungReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2974 void Heap::ExternalStringTable::UpdateYoungReferences(
2975     Heap::ExternalStringTableUpdaterCallback updater_func) {
2976   if (young_strings_.empty()) return;
2977 
2978   FullObjectSlot start(young_strings_.data());
2979   FullObjectSlot end(young_strings_.data() + young_strings_.size());
2980   FullObjectSlot last = start;
2981 
2982   for (FullObjectSlot p = start; p < end; ++p) {
2983     String target = updater_func(heap_, p);
2984 
2985     if (target.is_null()) continue;
2986 
2987     DCHECK(target.IsExternalString());
2988 
2989     if (InYoungGeneration(target)) {
2990       // String is still in new space. Update the table entry.
2991       last.store(target);
2992       ++last;
2993     } else {
2994       // String got promoted. Move it to the old string list.
2995       old_strings_.push_back(target);
2996     }
2997   }
2998 
2999   DCHECK(last <= end);
3000   young_strings_.resize(last - start);
3001 #ifdef VERIFY_HEAP
3002   if (FLAG_verify_heap) {
3003     VerifyYoung();
3004   }
3005 #endif
3006 }
3007 
PromoteYoung()3008 void Heap::ExternalStringTable::PromoteYoung() {
3009   old_strings_.reserve(old_strings_.size() + young_strings_.size());
3010   std::move(std::begin(young_strings_), std::end(young_strings_),
3011             std::back_inserter(old_strings_));
3012   young_strings_.clear();
3013 }
3014 
IterateYoung(RootVisitor * v)3015 void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
3016   if (!young_strings_.empty()) {
3017     v->VisitRootPointers(
3018         Root::kExternalStringsTable, nullptr,
3019         FullObjectSlot(young_strings_.data()),
3020         FullObjectSlot(young_strings_.data() + young_strings_.size()));
3021   }
3022 }
3023 
IterateAll(RootVisitor * v)3024 void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
3025   IterateYoung(v);
3026   if (!old_strings_.empty()) {
3027     v->VisitRootPointers(
3028         Root::kExternalStringsTable, nullptr,
3029         FullObjectSlot(old_strings_.data()),
3030         FullObjectSlot(old_strings_.data() + old_strings_.size()));
3031   }
3032 }
3033 
UpdateYoungReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)3034 void Heap::UpdateYoungReferencesInExternalStringTable(
3035     ExternalStringTableUpdaterCallback updater_func) {
3036   external_string_table_.UpdateYoungReferences(updater_func);
3037 }
3038 
UpdateReferences(Heap::ExternalStringTableUpdaterCallback updater_func)3039 void Heap::ExternalStringTable::UpdateReferences(
3040     Heap::ExternalStringTableUpdaterCallback updater_func) {
3041   if (old_strings_.size() > 0) {
3042     FullObjectSlot start(old_strings_.data());
3043     FullObjectSlot end(old_strings_.data() + old_strings_.size());
3044     for (FullObjectSlot p = start; p < end; ++p)
3045       p.store(updater_func(heap_, p));
3046   }
3047 
3048   UpdateYoungReferences(updater_func);
3049 }
3050 
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)3051 void Heap::UpdateReferencesInExternalStringTable(
3052     ExternalStringTableUpdaterCallback updater_func) {
3053   external_string_table_.UpdateReferences(updater_func);
3054 }
3055 
ProcessAllWeakReferences(WeakObjectRetainer * retainer)3056 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
3057   ProcessNativeContexts(retainer);
3058   ProcessAllocationSites(retainer);
3059   ProcessDirtyJSFinalizationRegistries(retainer);
3060 }
3061 
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)3062 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
3063   ProcessNativeContexts(retainer);
3064 }
3065 
ProcessNativeContexts(WeakObjectRetainer * retainer)3066 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
3067   Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
3068   // Update the head of the list of contexts.
3069   set_native_contexts_list(head);
3070 }
3071 
ProcessAllocationSites(WeakObjectRetainer * retainer)3072 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
3073   Object allocation_site_obj =
3074       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
3075   set_allocation_sites_list(allocation_site_obj);
3076 }
3077 
ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer * retainer)3078 void Heap::ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer) {
3079   Object head = VisitWeakList<JSFinalizationRegistry>(
3080       this, dirty_js_finalization_registries_list(), retainer);
3081   set_dirty_js_finalization_registries_list(head);
3082   // If the list is empty, set the tail to undefined. Otherwise the tail is set
3083   // by WeakListVisitor<JSFinalizationRegistry>::VisitLiveObject.
3084   if (head.IsUndefined(isolate())) {
3085     set_dirty_js_finalization_registries_list_tail(head);
3086   }
3087 }
3088 
ProcessWeakListRoots(WeakObjectRetainer * retainer)3089 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
3090   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
3091   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
3092   set_dirty_js_finalization_registries_list(
3093       retainer->RetainAs(dirty_js_finalization_registries_list()));
3094   set_dirty_js_finalization_registries_list_tail(
3095       retainer->RetainAs(dirty_js_finalization_registries_list_tail()));
3096 }
3097 
ForeachAllocationSite(Object list,const std::function<void (AllocationSite)> & visitor)3098 void Heap::ForeachAllocationSite(
3099     Object list, const std::function<void(AllocationSite)>& visitor) {
3100   DisallowGarbageCollection no_gc;
3101   Object current = list;
3102   while (current.IsAllocationSite()) {
3103     AllocationSite site = AllocationSite::cast(current);
3104     visitor(site);
3105     Object current_nested = site.nested_site();
3106     while (current_nested.IsAllocationSite()) {
3107       AllocationSite nested_site = AllocationSite::cast(current_nested);
3108       visitor(nested_site);
3109       current_nested = nested_site.nested_site();
3110     }
3111     current = site.weak_next();
3112   }
3113 }
3114 
ResetAllAllocationSitesDependentCode(AllocationType allocation)3115 void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
3116   DisallowGarbageCollection no_gc_scope;
3117   bool marked = false;
3118 
3119   ForeachAllocationSite(allocation_sites_list(),
3120                         [&marked, allocation, this](AllocationSite site) {
3121                           if (site.GetAllocationType() == allocation) {
3122                             site.ResetPretenureDecision();
3123                             site.set_deopt_dependent_code(true);
3124                             marked = true;
3125                             RemoveAllocationSitePretenuringFeedback(site);
3126                             return;
3127                           }
3128                         });
3129   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
3130 }
3131 
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)3132 void Heap::EvaluateOldSpaceLocalPretenuring(
3133     uint64_t size_of_objects_before_gc) {
3134   uint64_t size_of_objects_after_gc = SizeOfObjects();
3135   double old_generation_survival_rate =
3136       (static_cast<double>(size_of_objects_after_gc) * 100) /
3137       static_cast<double>(size_of_objects_before_gc);
3138 
3139   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
3140     // Too many objects died in the old generation, pretenuring of wrong
3141     // allocation sites may be the cause for that. We have to deopt all
3142     // dependent code registered in the allocation sites to re-evaluate
3143     // our pretenuring decisions.
3144     ResetAllAllocationSitesDependentCode(AllocationType::kOld);
3145     if (FLAG_trace_pretenuring) {
3146       PrintF(
3147           "Deopt all allocation sites dependent code due to low survival "
3148           "rate in the old generation %f\n",
3149           old_generation_survival_rate);
3150     }
3151   }
3152 }
3153 
VisitExternalResources(v8::ExternalResourceVisitor * visitor)3154 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
3155   DisallowGarbageCollection no_gc;
3156   // All external strings are listed in the external string table.
3157 
3158   class ExternalStringTableVisitorAdapter : public RootVisitor {
3159    public:
3160     explicit ExternalStringTableVisitorAdapter(
3161         Isolate* isolate, v8::ExternalResourceVisitor* visitor)
3162         : isolate_(isolate), visitor_(visitor) {}
3163     void VisitRootPointers(Root root, const char* description,
3164                            FullObjectSlot start, FullObjectSlot end) override {
3165       for (FullObjectSlot p = start; p < end; ++p) {
3166         DCHECK((*p).IsExternalString());
3167         visitor_->VisitExternalString(
3168             Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
3169       }
3170     }
3171 
3172    private:
3173     Isolate* isolate_;
3174     v8::ExternalResourceVisitor* visitor_;
3175   } external_string_table_visitor(isolate(), visitor);
3176 
3177   external_string_table_.IterateAll(&external_string_table_visitor);
3178 }
3179 
3180 STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
3181 
3182 #ifdef V8_COMPRESS_POINTERS
3183 // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
3184 // is only kTaggedSize aligned but we can keep using unaligned access since
3185 // both x64 and arm64 architectures (where pointer compression supported)
3186 // allow unaligned access to doubles.
3187 STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
3188 #else
3189 STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
3190 #endif
3191 
3192 STATIC_ASSERT(!USE_ALLOCATION_ALIGNMENT_BOOL ||
3193               (HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
3194 
GetMaximumFillToAlign(AllocationAlignment alignment)3195 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
3196   switch (alignment) {
3197     case kTaggedAligned:
3198       return 0;
3199     case kDoubleAligned:
3200     case kDoubleUnaligned:
3201       return kDoubleSize - kTaggedSize;
3202     default:
3203       UNREACHABLE();
3204   }
3205 }
3206 
3207 // static
GetFillToAlign(Address address,AllocationAlignment alignment)3208 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
3209   if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
3210     return kTaggedSize;
3211   if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
3212     return kDoubleSize - kTaggedSize;  // No fill if double is always aligned.
3213   return 0;
3214 }
3215 
GetCodeRangeReservedAreaSize()3216 size_t Heap::GetCodeRangeReservedAreaSize() {
3217   return CodeRange::GetWritableReservedAreaSize();
3218 }
3219 
PrecedeWithFiller(HeapObject object,int filler_size)3220 HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
3221   CreateFillerObjectAt(object.address(), filler_size,
3222                        ClearFreedMemoryMode::kDontClearFreedMemory);
3223   return HeapObject::FromAddress(object.address() + filler_size);
3224 }
3225 
AlignWithFiller(HeapObject object,int object_size,int allocation_size,AllocationAlignment alignment)3226 HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
3227                                  int allocation_size,
3228                                  AllocationAlignment alignment) {
3229   int filler_size = allocation_size - object_size;
3230   DCHECK_LT(0, filler_size);
3231   int pre_filler = GetFillToAlign(object.address(), alignment);
3232   if (pre_filler) {
3233     object = PrecedeWithFiller(object, pre_filler);
3234     filler_size -= pre_filler;
3235   }
3236   if (filler_size) {
3237     CreateFillerObjectAt(object.address() + object_size, filler_size,
3238                          ClearFreedMemoryMode::kDontClearFreedMemory);
3239   }
3240   return object;
3241 }
3242 
AllocateExternalBackingStore(const std::function<void * (size_t)> & allocate,size_t byte_length)3243 void* Heap::AllocateExternalBackingStore(
3244     const std::function<void*(size_t)>& allocate, size_t byte_length) {
3245   if (!always_allocate() && new_space()) {
3246     size_t new_space_backing_store_bytes =
3247         new_space()->ExternalBackingStoreBytes();
3248     if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
3249         new_space_backing_store_bytes >= byte_length) {
3250       // Performing a young generation GC amortizes over the allocated backing
3251       // store bytes and may free enough external bytes for this allocation.
3252       CollectGarbage(NEW_SPACE,
3253                      GarbageCollectionReason::kExternalMemoryPressure);
3254     }
3255   }
3256   void* result = allocate(byte_length);
3257   if (result) return result;
3258   if (!always_allocate()) {
3259     for (int i = 0; i < 2; i++) {
3260       CollectGarbage(OLD_SPACE,
3261                      GarbageCollectionReason::kExternalMemoryPressure);
3262       result = allocate(byte_length);
3263       if (result) return result;
3264     }
3265     isolate()->counters()->gc_last_resort_from_handles()->Increment();
3266     CollectAllAvailableGarbage(
3267         GarbageCollectionReason::kExternalMemoryPressure);
3268   }
3269   return allocate(byte_length);
3270 }
3271 
ConfigureInitialOldGenerationSize()3272 void Heap::ConfigureInitialOldGenerationSize() {
3273   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
3274     const size_t minimum_growing_step =
3275         MemoryController<V8HeapTrait>::MinimumAllocationLimitGrowingStep(
3276             CurrentHeapGrowingMode());
3277     const size_t new_old_generation_allocation_limit =
3278         std::max(OldGenerationSizeOfObjects() + minimum_growing_step,
3279                  static_cast<size_t>(
3280                      static_cast<double>(old_generation_allocation_limit()) *
3281                      (tracer()->AverageSurvivalRatio() / 100)));
3282     if (new_old_generation_allocation_limit <
3283         old_generation_allocation_limit()) {
3284       set_old_generation_allocation_limit(new_old_generation_allocation_limit);
3285     } else {
3286       old_generation_size_configured_ = true;
3287     }
3288     if (UseGlobalMemoryScheduling()) {
3289       const size_t new_global_memory_limit = std::max(
3290           GlobalSizeOfObjects() + minimum_growing_step,
3291           static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
3292                               (tracer()->AverageSurvivalRatio() / 100)));
3293       if (new_global_memory_limit < global_allocation_limit_) {
3294         global_allocation_limit_ = new_global_memory_limit;
3295       }
3296     }
3297   }
3298 }
3299 
FlushNumberStringCache()3300 void Heap::FlushNumberStringCache() {
3301   // Flush the number to string cache.
3302   int len = number_string_cache().length();
3303   for (int i = 0; i < len; i++) {
3304     number_string_cache().set_undefined(i);
3305   }
3306 }
3307 
3308 namespace {
3309 
CreateFillerObjectAtImpl(Heap * heap,Address addr,int size,ClearFreedMemoryMode clear_memory_mode)3310 HeapObject CreateFillerObjectAtImpl(Heap* heap, Address addr, int size,
3311                                     ClearFreedMemoryMode clear_memory_mode) {
3312   if (size == 0) return HeapObject();
3313   HeapObject filler = HeapObject::FromAddress(addr);
3314   ReadOnlyRoots roots(heap);
3315   if (size == kTaggedSize) {
3316     filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
3317                                     SKIP_WRITE_BARRIER);
3318   } else if (size == 2 * kTaggedSize) {
3319     filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
3320                                     SKIP_WRITE_BARRIER);
3321     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
3322       AtomicSlot slot(ObjectSlot(addr) + 1);
3323       *slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
3324     }
3325   } else {
3326     DCHECK_GT(size, 2 * kTaggedSize);
3327     filler.set_map_after_allocation(roots.unchecked_free_space_map(),
3328                                     SKIP_WRITE_BARRIER);
3329     FreeSpace::cast(filler).set_size(size, kRelaxedStore);
3330     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
3331       MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
3332                    (size / kTaggedSize) - 2);
3333     }
3334   }
3335 
3336   // At this point, we may be deserializing the heap from a snapshot, and
3337   // none of the maps have been created yet and are nullptr.
3338   DCHECK((filler.map_slot().contains_map_value(kNullAddress) &&
3339           !heap->deserialization_complete()) ||
3340          filler.map(heap->isolate()).IsMap());
3341 
3342   return filler;
3343 }
3344 
3345 #ifdef DEBUG
VerifyNoNeedToClearSlots(Address start,Address end)3346 void VerifyNoNeedToClearSlots(Address start, Address end) {
3347   BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
3348   if (basic_chunk->InReadOnlySpace()) return;
3349   MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
3350   if (chunk->InYoungGeneration()) return;
3351   BaseSpace* space = chunk->owner();
3352   space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
3353 }
3354 #else
VerifyNoNeedToClearSlots(Address start,Address end)3355 void VerifyNoNeedToClearSlots(Address start, Address end) {}
3356 #endif  // DEBUG
3357 
3358 }  // namespace
3359 
CreateFillerObjectAt(Address addr,int size,ClearFreedMemoryMode clear_memory_mode)3360 HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
3361                                       ClearFreedMemoryMode clear_memory_mode) {
3362   // TODO(leszeks): Verify that no slots need to be recorded.
3363   HeapObject filler =
3364       CreateFillerObjectAtImpl(this, addr, size, clear_memory_mode);
3365   VerifyNoNeedToClearSlots(addr, addr + size);
3366   return filler;
3367 }
3368 
CreateFillerObjectAtBackground(Address addr,int size,ClearFreedMemoryMode clear_memory_mode)3369 void Heap::CreateFillerObjectAtBackground(
3370     Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
3371   CreateFillerObjectAtImpl(this, addr, size, clear_memory_mode);
3372   // Do not verify whether slots are cleared here: the concurrent sweeper is not
3373   // allowed to access the main thread's remembered set.
3374 }
3375 
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots clear_slots_mode)3376 HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
3377                                       ClearRecordedSlots clear_slots_mode) {
3378   // TODO(mlippautz): It would be nice to DCHECK that we never call this
3379   // with {addr} pointing into large object space; however we currently
3380   // initialize LO allocations with a filler, see
3381   // LargeObjectSpace::AllocateLargePage.
3382   if (size == 0) return HeapObject();
3383   HeapObject filler = CreateFillerObjectAtImpl(
3384       this, addr, size,
3385       clear_slots_mode == ClearRecordedSlots::kYes
3386           ? ClearFreedMemoryMode::kClearFreedMemory
3387           : ClearFreedMemoryMode::kDontClearFreedMemory);
3388   if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
3389     if (clear_slots_mode == ClearRecordedSlots::kYes) {
3390       ClearRecordedSlotRange(addr, addr + size);
3391     } else {
3392       VerifyNoNeedToClearSlots(addr, addr + size);
3393     }
3394   }
3395   return filler;
3396 }
3397 
CanMoveObjectStart(HeapObject object)3398 bool Heap::CanMoveObjectStart(HeapObject object) {
3399   if (!FLAG_move_object_start) return false;
3400 
3401   // Sampling heap profiler may have a reference to the object.
3402   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
3403 
3404   if (IsLargeObject(object)) return false;
3405 
3406   // Compilation jobs may have references to the object.
3407   if (isolate()->concurrent_recompilation_enabled() &&
3408       isolate()->optimizing_compile_dispatcher()->HasJobs()) {
3409     return false;
3410   }
3411 
3412   // We can move the object start if the page was already swept.
3413   return Page::FromHeapObject(object)->SweepingDone();
3414 }
3415 
IsImmovable(HeapObject object)3416 bool Heap::IsImmovable(HeapObject object) {
3417   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
3418     return third_party_heap::Heap::IsImmovable(object);
3419 
3420   BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
3421   return chunk->NeverEvacuate() || IsLargeObject(object);
3422 }
3423 
IsLargeObject(HeapObject object)3424 bool Heap::IsLargeObject(HeapObject object) {
3425   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
3426     return third_party_heap::Heap::InLargeObjectSpace(object.address()) ||
3427            third_party_heap::Heap::InSpace(object.address(), CODE_LO_SPACE);
3428   return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
3429 }
3430 
3431 #ifdef ENABLE_SLOW_DCHECKS
3432 namespace {
3433 
3434 class LeftTrimmerVerifierRootVisitor : public RootVisitor {
3435  public:
LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)3436   explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
3437       : to_check_(to_check) {}
3438 
3439   LeftTrimmerVerifierRootVisitor(const LeftTrimmerVerifierRootVisitor&) =
3440       delete;
3441   LeftTrimmerVerifierRootVisitor& operator=(
3442       const LeftTrimmerVerifierRootVisitor&) = delete;
3443 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)3444   void VisitRootPointers(Root root, const char* description,
3445                          FullObjectSlot start, FullObjectSlot end) override {
3446     for (FullObjectSlot p = start; p < end; ++p) {
3447       DCHECK_NE(*p, to_check_);
3448     }
3449   }
3450 
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)3451   void VisitRootPointers(Root root, const char* description,
3452                          OffHeapObjectSlot start,
3453                          OffHeapObjectSlot end) override {
3454     DCHECK_EQ(root, Root::kStringTable);
3455     // We can skip iterating the string table, it doesn't point to any fixed
3456     // arrays.
3457   }
3458 
3459  private:
3460   FixedArrayBase to_check_;
3461 };
3462 }  // namespace
3463 #endif  // ENABLE_SLOW_DCHECKS
3464 
3465 namespace {
MayContainRecordedSlots(HeapObject object)3466 bool MayContainRecordedSlots(HeapObject object) {
3467   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
3468   // New space object do not have recorded slots.
3469   if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
3470     return false;
3471   // Allowlist objects that definitely do not have pointers.
3472   if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
3473   // Conservatively return true for other objects.
3474   return true;
3475 }
3476 }  // namespace
3477 
OnMoveEvent(HeapObject target,HeapObject source,int size_in_bytes)3478 void Heap::OnMoveEvent(HeapObject target, HeapObject source,
3479                        int size_in_bytes) {
3480   HeapProfiler* heap_profiler = isolate_->heap_profiler();
3481   if (heap_profiler->is_tracking_object_moves()) {
3482     heap_profiler->ObjectMoveEvent(source.address(), target.address(),
3483                                    size_in_bytes);
3484   }
3485   for (auto& tracker : allocation_trackers_) {
3486     tracker->MoveEvent(source.address(), target.address(), size_in_bytes);
3487   }
3488   if (target.IsSharedFunctionInfo()) {
3489     LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
3490                                                          target.address()));
3491   } else if (target.IsNativeContext()) {
3492     if (isolate_->current_embedder_state() != nullptr) {
3493       isolate_->current_embedder_state()->OnMoveEvent(source.address(),
3494                                                       target.address());
3495     }
3496     PROFILE(isolate_,
3497             NativeContextMoveEvent(source.address(), target.address()));
3498   }
3499 }
3500 
LeftTrimFixedArray(FixedArrayBase object,int elements_to_trim)3501 FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
3502                                         int elements_to_trim) {
3503   if (elements_to_trim == 0) {
3504     // This simplifies reasoning in the rest of the function.
3505     return object;
3506   }
3507   CHECK(!object.is_null());
3508   DCHECK(CanMoveObjectStart(object));
3509   // Add custom visitor to concurrent marker if new left-trimmable type
3510   // is added.
3511   DCHECK(object.IsFixedArray() || object.IsFixedDoubleArray());
3512   const int element_size = object.IsFixedArray() ? kTaggedSize : kDoubleSize;
3513   const int bytes_to_trim = elements_to_trim * element_size;
3514   Map map = object.map();
3515 
3516   // For now this trick is only applied to fixed arrays which may be in new
3517   // space or old space. In a large object space the object's start must
3518   // coincide with chunk and thus the trick is just not applicable.
3519   DCHECK(!IsLargeObject(object));
3520   DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
3521 
3522   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3523   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
3524   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
3525 
3526   const int len = object.length();
3527   DCHECK(elements_to_trim <= len);
3528 
3529   // Calculate location of new array start.
3530   Address old_start = object.address();
3531   Address new_start = old_start + bytes_to_trim;
3532 
3533   if (incremental_marking()->IsMarking()) {
3534     incremental_marking()->NotifyLeftTrimming(
3535         object, HeapObject::FromAddress(new_start));
3536   }
3537 
3538 #ifdef DEBUG
3539   if (MayContainRecordedSlots(object)) {
3540     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
3541     DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
3542     DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
3543   }
3544 #endif
3545 
3546   // Technically in new space this write might be omitted (except for
3547   // debug mode which iterates through the heap), but to play safer
3548   // we still do it.
3549   CreateFillerObjectAt(old_start, bytes_to_trim,
3550                        MayContainRecordedSlots(object)
3551                            ? ClearRecordedSlots::kYes
3552                            : ClearRecordedSlots::kNo);
3553 
3554   // Initialize header of the trimmed array. Since left trimming is only
3555   // performed on pages which are not concurrently swept creating a filler
3556   // object does not require synchronization.
3557   RELAXED_WRITE_FIELD(object, bytes_to_trim,
3558                       Object(MapWord::FromMap(map).ptr()));
3559   RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
3560                       Smi::FromInt(len - elements_to_trim));
3561 
3562   FixedArrayBase new_object =
3563       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3564 
3565   // Notify the heap profiler of change in object layout.
3566   OnMoveEvent(new_object, object, new_object.Size());
3567 
3568 #ifdef ENABLE_SLOW_DCHECKS
3569   if (FLAG_enable_slow_asserts) {
3570     // Make sure the stack or other roots (e.g., Handles) don't contain pointers
3571     // to the original FixedArray (which is now the filler object).
3572     base::Optional<SafepointScope> safepoint_scope;
3573 
3574     {
3575       AllowGarbageCollection allow_gc;
3576       IgnoreLocalGCRequests ignore_gc_requests(this);
3577       safepoint_scope.emplace(this);
3578     }
3579 
3580     LeftTrimmerVerifierRootVisitor root_visitor(object);
3581     ReadOnlyRoots(this).Iterate(&root_visitor);
3582     IterateRoots(&root_visitor, {});
3583   }
3584 #endif  // ENABLE_SLOW_DCHECKS
3585 
3586   return new_object;
3587 }
3588 
RightTrimFixedArray(FixedArrayBase object,int elements_to_trim)3589 void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
3590   const int len = object.length();
3591   DCHECK_LE(elements_to_trim, len);
3592   DCHECK_GE(elements_to_trim, 0);
3593 
3594   int bytes_to_trim;
3595   if (object.IsByteArray()) {
3596     int new_size = ByteArray::SizeFor(len - elements_to_trim);
3597     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3598     DCHECK_GE(bytes_to_trim, 0);
3599   } else if (object.IsFixedArray()) {
3600     CHECK_NE(elements_to_trim, len);
3601     bytes_to_trim = elements_to_trim * kTaggedSize;
3602   } else {
3603     DCHECK(object.IsFixedDoubleArray());
3604     CHECK_NE(elements_to_trim, len);
3605     bytes_to_trim = elements_to_trim * kDoubleSize;
3606   }
3607 
3608   CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
3609 }
3610 
RightTrimWeakFixedArray(WeakFixedArray object,int elements_to_trim)3611 void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
3612                                    int elements_to_trim) {
3613   // This function is safe to use only at the end of the mark compact
3614   // collection: When marking, we record the weak slots, and shrinking
3615   // invalidates them.
3616   DCHECK_EQ(gc_state(), MARK_COMPACT);
3617   CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
3618                                        elements_to_trim * kTaggedSize);
3619 }
3620 
3621 template <typename T>
CreateFillerForArray(T object,int elements_to_trim,int bytes_to_trim)3622 void Heap::CreateFillerForArray(T object, int elements_to_trim,
3623                                 int bytes_to_trim) {
3624   DCHECK(object.IsFixedArrayBase() || object.IsByteArray() ||
3625          object.IsWeakFixedArray());
3626 
3627   // For now this trick is only applied to objects in new and paged space.
3628   DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
3629 
3630   if (bytes_to_trim == 0) {
3631     DCHECK_EQ(elements_to_trim, 0);
3632     // No need to create filler and update live bytes counters.
3633     return;
3634   }
3635 
3636   // Calculate location of new array end.
3637   int old_size = object.Size();
3638   Address old_end = object.address() + old_size;
3639   Address new_end = old_end - bytes_to_trim;
3640 
3641 #ifdef DEBUG
3642   if (MayContainRecordedSlots(object)) {
3643     MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
3644     DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
3645     DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
3646   }
3647 #endif
3648 
3649   bool clear_slots = MayContainRecordedSlots(object);
3650 
3651   // Technically in new space this write might be omitted (except for
3652   // debug mode which iterates through the heap), but to play safer
3653   // we still do it.
3654   // We do not create a filler for objects in a large object space.
3655   if (!IsLargeObject(object)) {
3656     HeapObject filler = CreateFillerObjectAt(
3657         new_end, bytes_to_trim,
3658         clear_slots ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
3659     DCHECK(!filler.is_null());
3660     // Clear the mark bits of the black area that belongs now to the filler.
3661     // This is an optimization. The sweeper will release black fillers anyway.
3662     if (incremental_marking()->black_allocation() &&
3663         incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
3664       Page* page = Page::FromAddress(new_end);
3665       incremental_marking()->marking_state()->bitmap(page)->ClearRange(
3666           page->AddressToMarkbitIndex(new_end),
3667           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
3668     }
3669   } else if (clear_slots) {
3670     // Large objects are not swept, so it is not necessary to clear the
3671     // recorded slot.
3672     MemsetTagged(ObjectSlot(new_end), Object(kClearedFreeMemoryValue),
3673                  (old_end - new_end) / kTaggedSize);
3674   }
3675 
3676   // Initialize header of the trimmed array. We are storing the new length
3677   // using release store after creating a filler for the left-over space to
3678   // avoid races with the sweeper thread.
3679   object.set_length(object.length() - elements_to_trim, kReleaseStore);
3680 
3681   // Notify the heap object allocation tracker of change in object layout. The
3682   // array may not be moved during GC, and size has to be adjusted nevertheless.
3683   for (auto& tracker : allocation_trackers_) {
3684     tracker->UpdateObjectSizeEvent(object.address(), object.Size());
3685   }
3686 }
3687 
MakeHeapIterable()3688 void Heap::MakeHeapIterable() {
3689   mark_compact_collector()->EnsureSweepingCompleted(
3690       MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
3691 
3692   safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3693     local_heap->MakeLinearAllocationAreaIterable();
3694   });
3695 
3696   PagedSpaceIterator spaces(this);
3697   for (PagedSpace* space = spaces.Next(); space != nullptr;
3698        space = spaces.Next()) {
3699     space->MakeLinearAllocationAreaIterable();
3700   }
3701 
3702   if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
3703 }
3704 
FreeLinearAllocationAreas()3705 void Heap::FreeLinearAllocationAreas() {
3706   safepoint()->IterateLocalHeaps(
3707       [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
3708 
3709   PagedSpaceIterator spaces(this);
3710   for (PagedSpace* space = spaces.Next(); space != nullptr;
3711        space = spaces.Next()) {
3712     space->FreeLinearAllocationArea();
3713   }
3714 
3715   if (new_space()) new_space()->FreeLinearAllocationArea();
3716 }
3717 
FreeSharedLinearAllocationAreas()3718 void Heap::FreeSharedLinearAllocationAreas() {
3719   if (!isolate()->shared_isolate()) return;
3720   safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3721     local_heap->FreeSharedLinearAllocationArea();
3722   });
3723   FreeMainThreadSharedLinearAllocationAreas();
3724 }
3725 
FreeMainThreadSharedLinearAllocationAreas()3726 void Heap::FreeMainThreadSharedLinearAllocationAreas() {
3727   if (!isolate()->shared_isolate()) return;
3728   shared_old_allocator_->FreeLinearAllocationArea();
3729   if (shared_map_allocator_) shared_map_allocator_->FreeLinearAllocationArea();
3730   main_thread_local_heap()->FreeSharedLinearAllocationArea();
3731 }
3732 
3733 namespace {
3734 
ComputeMutatorUtilizationImpl(double mutator_speed,double gc_speed)3735 double ComputeMutatorUtilizationImpl(double mutator_speed, double gc_speed) {
3736   constexpr double kMinMutatorUtilization = 0.0;
3737   constexpr double kConservativeGcSpeedInBytesPerMillisecond = 200000;
3738   if (mutator_speed == 0) return kMinMutatorUtilization;
3739   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
3740   // Derivation:
3741   // mutator_utilization = mutator_time / (mutator_time + gc_time)
3742   // mutator_time = 1 / mutator_speed
3743   // gc_time = 1 / gc_speed
3744   // mutator_utilization = (1 / mutator_speed) /
3745   //                       (1 / mutator_speed + 1 / gc_speed)
3746   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
3747   return gc_speed / (mutator_speed + gc_speed);
3748 }
3749 
3750 }  // namespace
3751 
ComputeMutatorUtilization(const char * tag,double mutator_speed,double gc_speed)3752 double Heap::ComputeMutatorUtilization(const char* tag, double mutator_speed,
3753                                        double gc_speed) {
3754   double result = ComputeMutatorUtilizationImpl(mutator_speed, gc_speed);
3755   if (FLAG_trace_mutator_utilization) {
3756     isolate()->PrintWithTimestamp(
3757         "%s mutator utilization = %.3f ("
3758         "mutator_speed=%.f, gc_speed=%.f)\n",
3759         tag, result, mutator_speed, gc_speed);
3760   }
3761   return result;
3762 }
3763 
HasLowYoungGenerationAllocationRate()3764 bool Heap::HasLowYoungGenerationAllocationRate() {
3765   double mu = ComputeMutatorUtilization(
3766       "Young generation",
3767       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(),
3768       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
3769   constexpr double kHighMutatorUtilization = 0.993;
3770   return mu > kHighMutatorUtilization;
3771 }
3772 
HasLowOldGenerationAllocationRate()3773 bool Heap::HasLowOldGenerationAllocationRate() {
3774   double mu = ComputeMutatorUtilization(
3775       "Old generation",
3776       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond(),
3777       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
3778   const double kHighMutatorUtilization = 0.993;
3779   return mu > kHighMutatorUtilization;
3780 }
3781 
HasLowEmbedderAllocationRate()3782 bool Heap::HasLowEmbedderAllocationRate() {
3783   if (!UseGlobalMemoryScheduling()) return true;
3784 
3785   DCHECK_NOT_NULL(local_embedder_heap_tracer());
3786   double mu = ComputeMutatorUtilization(
3787       "Embedder",
3788       tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond(),
3789       tracer()->EmbedderSpeedInBytesPerMillisecond());
3790   const double kHighMutatorUtilization = 0.993;
3791   return mu > kHighMutatorUtilization;
3792 }
3793 
HasLowAllocationRate()3794 bool Heap::HasLowAllocationRate() {
3795   return HasLowYoungGenerationAllocationRate() &&
3796          HasLowOldGenerationAllocationRate() && HasLowEmbedderAllocationRate();
3797 }
3798 
IsIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3799 bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
3800                                     double mutator_utilization) {
3801   const double kHighHeapPercentage = 0.8;
3802   const double kLowMutatorUtilization = 0.4;
3803   return old_generation_size >=
3804              kHighHeapPercentage * max_old_generation_size() &&
3805          mutator_utilization < kLowMutatorUtilization;
3806 }
3807 
CheckIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3808 void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
3809                                        double mutator_utilization) {
3810   const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
3811   if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
3812   if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
3813     consecutive_ineffective_mark_compacts_ = 0;
3814     return;
3815   }
3816   ++consecutive_ineffective_mark_compacts_;
3817   if (consecutive_ineffective_mark_compacts_ ==
3818       kMaxConsecutiveIneffectiveMarkCompacts) {
3819     if (InvokeNearHeapLimitCallback()) {
3820       // The callback increased the heap limit.
3821       consecutive_ineffective_mark_compacts_ = 0;
3822       return;
3823     }
3824     FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
3825   }
3826 }
3827 
HasHighFragmentation()3828 bool Heap::HasHighFragmentation() {
3829   size_t used = OldGenerationSizeOfObjects();
3830   size_t committed = CommittedOldGenerationMemory();
3831   return HasHighFragmentation(used, committed);
3832 }
3833 
HasHighFragmentation(size_t used,size_t committed)3834 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
3835   const size_t kSlack = 16 * MB;
3836   // Fragmentation is high if committed > 2 * used + kSlack.
3837   // Rewrite the exression to avoid overflow.
3838   DCHECK_GE(committed, used);
3839   return committed - used > used + kSlack;
3840 }
3841 
ShouldOptimizeForMemoryUsage()3842 bool Heap::ShouldOptimizeForMemoryUsage() {
3843   const size_t kOldGenerationSlack = max_old_generation_size() / 8;
3844   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
3845          isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
3846          !CanExpandOldGeneration(kOldGenerationSlack);
3847 }
3848 
ActivateMemoryReducerIfNeeded()3849 void Heap::ActivateMemoryReducerIfNeeded() {
3850   // Activate memory reducer when switching to background if
3851   // - there was no mark compact since the start.
3852   // - the committed memory can be potentially reduced.
3853   // 2 pages for the old, code, and map space + 1 page for new space.
3854   const int kMinCommittedMemory = 7 * Page::kPageSize;
3855   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
3856       isolate()->IsIsolateInBackground()) {
3857     MemoryReducer::Event event;
3858     event.type = MemoryReducer::kPossibleGarbage;
3859     event.time_ms = MonotonicallyIncreasingTimeInMs();
3860     memory_reducer_->NotifyPossibleGarbage(event);
3861   }
3862 }
3863 
ReduceNewSpaceSize()3864 void Heap::ReduceNewSpaceSize() {
3865   static const size_t kLowAllocationThroughput = 1000;
3866   const double allocation_throughput =
3867       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
3868 
3869   if (FLAG_predictable) return;
3870 
3871   if (ShouldReduceMemory() ||
3872       ((allocation_throughput != 0) &&
3873        (allocation_throughput < kLowAllocationThroughput))) {
3874     new_space_->Shrink();
3875     new_lo_space_->SetCapacity(new_space_->Capacity());
3876     UncommitFromSpace();
3877   }
3878 }
3879 
NewSpaceSize()3880 size_t Heap::NewSpaceSize() { return new_space() ? new_space()->Size() : 0; }
3881 
NewSpaceCapacity()3882 size_t Heap::NewSpaceCapacity() {
3883   return new_space() ? new_space()->Capacity() : 0;
3884 }
3885 
FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason)3886 void Heap::FinalizeIncrementalMarkingIfComplete(
3887     GarbageCollectionReason gc_reason) {
3888   if (incremental_marking()->IsMarking() &&
3889       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
3890        (!incremental_marking()->finalize_marking_completed() &&
3891         mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
3892         local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
3893     FinalizeIncrementalMarkingIncrementally(gc_reason);
3894   } else if (incremental_marking()->IsComplete() ||
3895              (incremental_marking()->IsMarking() &&
3896               mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
3897               local_embedder_heap_tracer()
3898                   ->ShouldFinalizeIncrementalMarking())) {
3899     CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3900   }
3901 }
3902 
FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason)3903 void Heap::FinalizeIncrementalMarkingAtomically(
3904     GarbageCollectionReason gc_reason) {
3905   DCHECK(!incremental_marking()->IsStopped());
3906   CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3907 }
3908 
InvokeIncrementalMarkingPrologueCallbacks()3909 void Heap::InvokeIncrementalMarkingPrologueCallbacks() {
3910   GCCallbacksScope scope(this);
3911   if (scope.CheckReenter()) {
3912     AllowGarbageCollection allow_allocation;
3913     TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
3914     VMState<EXTERNAL> state(isolate_);
3915     HandleScope handle_scope(isolate_);
3916     CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3917   }
3918 }
3919 
InvokeIncrementalMarkingEpilogueCallbacks()3920 void Heap::InvokeIncrementalMarkingEpilogueCallbacks() {
3921   GCCallbacksScope scope(this);
3922   if (scope.CheckReenter()) {
3923     AllowGarbageCollection allow_allocation;
3924     TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
3925     VMState<EXTERNAL> state(isolate_);
3926     HandleScope handle_scope(isolate_);
3927     CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3928   }
3929 }
3930 
FinalizeIncrementalMarkingIncrementally(GarbageCollectionReason gc_reason)3931 void Heap::FinalizeIncrementalMarkingIncrementally(
3932     GarbageCollectionReason gc_reason) {
3933   if (FLAG_trace_incremental_marking) {
3934     isolate()->PrintWithTimestamp(
3935         "[IncrementalMarking] (%s).\n",
3936         Heap::GarbageCollectionReasonToString(gc_reason));
3937   }
3938 
3939   DevToolsTraceEventScope devtools_trace_event_scope(
3940       this, "MajorGC", "incremental finalization step");
3941 
3942   NestedTimedHistogramScope incremental_marking_scope(
3943       isolate()->counters()->gc_incremental_marking_finalize());
3944   TRACE_EVENT1(
3945       "v8", "V8.GCIncrementalMarkingFinalize", "epoch",
3946       tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL_FINALIZE));
3947   TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
3948                  ThreadKind::kMain);
3949 
3950   IgnoreLocalGCRequests ignore_gc_requests(this);
3951   InvokeIncrementalMarkingPrologueCallbacks();
3952   incremental_marking()->FinalizeIncrementally();
3953   InvokeIncrementalMarkingEpilogueCallbacks();
3954 }
3955 
NotifyObjectLayoutChange(HeapObject object,const DisallowGarbageCollection &,InvalidateRecordedSlots invalidate_recorded_slots)3956 void Heap::NotifyObjectLayoutChange(
3957     HeapObject object, const DisallowGarbageCollection&,
3958     InvalidateRecordedSlots invalidate_recorded_slots) {
3959   if (incremental_marking()->IsMarking()) {
3960     incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
3961     if (incremental_marking()->IsCompacting() &&
3962         invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
3963         MayContainRecordedSlots(object)) {
3964       MemoryChunk::FromHeapObject(object)
3965           ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
3966     }
3967   }
3968   if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
3969       MayContainRecordedSlots(object)) {
3970     MemoryChunk::FromHeapObject(object)
3971         ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
3972   }
3973 #ifdef VERIFY_HEAP
3974   if (FLAG_verify_heap) {
3975     DCHECK(pending_layout_change_object_.is_null());
3976     pending_layout_change_object_ = object;
3977   }
3978 #endif
3979 }
3980 
3981 #ifdef VERIFY_HEAP
3982 // Helper class for collecting slot addresses.
3983 class SlotCollectingVisitor final : public ObjectVisitor {
3984  public:
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)3985   void VisitPointers(HeapObject host, ObjectSlot start,
3986                      ObjectSlot end) override {
3987     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
3988   }
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)3989   void VisitPointers(HeapObject host, MaybeObjectSlot start,
3990                      MaybeObjectSlot end) final {
3991     for (MaybeObjectSlot p = start; p < end; ++p) {
3992       slots_.push_back(p);
3993     }
3994   }
3995 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)3996   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
3997     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
3998 #ifdef V8_EXTERNAL_CODE_SPACE
3999     code_slots_.push_back(slot);
4000 #endif
4001   }
4002 
VisitCodeTarget(Code host,RelocInfo * rinfo)4003   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
4004 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4005   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4006     UNREACHABLE();
4007   }
4008 
VisitMapPointer(HeapObject object)4009   void VisitMapPointer(HeapObject object) override {}  // do nothing by default
4010 
number_of_slots()4011   int number_of_slots() { return static_cast<int>(slots_.size()); }
4012 
slot(int i)4013   MaybeObjectSlot slot(int i) { return slots_[i]; }
4014 #ifdef V8_EXTERNAL_CODE_SPACE
code_slot(int i)4015   CodeObjectSlot code_slot(int i) { return code_slots_[i]; }
number_of_code_slots()4016   int number_of_code_slots() { return static_cast<int>(code_slots_.size()); }
4017 #endif
4018 
4019  private:
4020   std::vector<MaybeObjectSlot> slots_;
4021 #ifdef V8_EXTERNAL_CODE_SPACE
4022   std::vector<CodeObjectSlot> code_slots_;
4023 #endif
4024 };
4025 
VerifyObjectLayoutChange(HeapObject object,Map new_map)4026 void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
4027   // Object layout changes are currently not supported on background threads.
4028   DCHECK_NULL(LocalHeap::Current());
4029 
4030   if (!FLAG_verify_heap) return;
4031 
4032   PtrComprCageBase cage_base(isolate());
4033 
4034   // Check that Heap::NotifyObjectLayoutChange was called for object transitions
4035   // that are not safe for concurrent marking.
4036   // If you see this check triggering for a freshly allocated object,
4037   // use object->set_map_after_allocation() to initialize its map.
4038   if (pending_layout_change_object_.is_null()) {
4039     VerifySafeMapTransition(object, new_map);
4040   } else {
4041     DCHECK_EQ(pending_layout_change_object_, object);
4042     pending_layout_change_object_ = HeapObject();
4043   }
4044 }
4045 
VerifySafeMapTransition(HeapObject object,Map new_map)4046 void Heap::VerifySafeMapTransition(HeapObject object, Map new_map) {
4047   PtrComprCageBase cage_base(isolate());
4048 
4049   if (object.IsJSObject(cage_base)) {
4050     // Without double unboxing all in-object fields of a JSObject are tagged.
4051     return;
4052   }
4053   if (object.IsString(cage_base) &&
4054       (new_map == ReadOnlyRoots(this).thin_string_map() ||
4055        new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
4056     // When transitioning a string to ThinString,
4057     // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
4058     // tagged fields are introduced.
4059     return;
4060   }
4061   if (FLAG_shared_string_table && object.IsString(cage_base) &&
4062       InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
4063     // In-place internalization does not change a string's fields.
4064     //
4065     // When sharing the string table, the setting and re-setting of maps below
4066     // can race when there are parallel internalization operations, causing
4067     // DCHECKs to fail.
4068     return;
4069   }
4070   // Check that the set of slots before and after the transition match.
4071   SlotCollectingVisitor old_visitor;
4072   object.IterateFast(cage_base, &old_visitor);
4073   MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
4074   // Temporarily set the new map to iterate new slots.
4075   object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
4076   SlotCollectingVisitor new_visitor;
4077   object.IterateFast(cage_base, &new_visitor);
4078   // Restore the old map.
4079   object.set_map_word(old_map_word, kRelaxedStore);
4080   DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
4081   for (int i = 0; i < new_visitor.number_of_slots(); i++) {
4082     DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
4083   }
4084 #ifdef V8_EXTERNAL_CODE_SPACE
4085   DCHECK_EQ(new_visitor.number_of_code_slots(),
4086             old_visitor.number_of_code_slots());
4087   for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
4088     DCHECK_EQ(new_visitor.code_slot(i), old_visitor.code_slot(i));
4089   }
4090 #endif  // V8_EXTERNAL_CODE_SPACE
4091 }
4092 #endif  // VERIFY_HEAP
4093 
ComputeHeapState()4094 GCIdleTimeHeapState Heap::ComputeHeapState() {
4095   GCIdleTimeHeapState heap_state;
4096   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4097   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4098   return heap_state;
4099 }
4100 
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)4101 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4102                                  GCIdleTimeHeapState heap_state,
4103                                  double deadline_in_ms) {
4104   bool result = false;
4105   switch (action) {
4106     case GCIdleTimeAction::kDone:
4107       result = true;
4108       break;
4109     case GCIdleTimeAction::kIncrementalStep: {
4110       incremental_marking()->AdvanceWithDeadline(
4111           deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4112           StepOrigin::kTask);
4113       FinalizeIncrementalMarkingIfComplete(
4114           GarbageCollectionReason::kFinalizeMarkingViaTask);
4115       result = incremental_marking()->IsStopped();
4116       break;
4117     }
4118   }
4119 
4120   return result;
4121 }
4122 
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)4123 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
4124                                     GCIdleTimeHeapState heap_state,
4125                                     double start_ms, double deadline_in_ms) {
4126   double idle_time_in_ms = deadline_in_ms - start_ms;
4127   double current_time = MonotonicallyIncreasingTimeInMs();
4128   last_idle_notification_time_ = current_time;
4129   double deadline_difference = deadline_in_ms - current_time;
4130 
4131   if (FLAG_trace_idle_notification) {
4132     isolate_->PrintWithTimestamp(
4133         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4134         "ms, deadline usage %.2f ms [",
4135         idle_time_in_ms, idle_time_in_ms - deadline_difference,
4136         deadline_difference);
4137     switch (action) {
4138       case GCIdleTimeAction::kDone:
4139         PrintF("done");
4140         break;
4141       case GCIdleTimeAction::kIncrementalStep:
4142         PrintF("incremental step");
4143         break;
4144     }
4145     PrintF("]");
4146     if (FLAG_trace_idle_notification_verbose) {
4147       PrintF("[");
4148       heap_state.Print();
4149       PrintF("]");
4150     }
4151     PrintF("\n");
4152   }
4153 }
4154 
MonotonicallyIncreasingTimeInMs() const4155 double Heap::MonotonicallyIncreasingTimeInMs() const {
4156   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4157          static_cast<double>(base::Time::kMillisecondsPerSecond);
4158 }
4159 
4160 #if DEBUG
VerifyNewSpaceTop()4161 void Heap::VerifyNewSpaceTop() {
4162   if (!new_space()) return;
4163   new_space()->VerifyTop();
4164 }
4165 #endif  // DEBUG
4166 
IdleNotification(int idle_time_in_ms)4167 bool Heap::IdleNotification(int idle_time_in_ms) {
4168   return IdleNotification(
4169       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4170       (static_cast<double>(idle_time_in_ms) /
4171        static_cast<double>(base::Time::kMillisecondsPerSecond)));
4172 }
4173 
IdleNotification(double deadline_in_seconds)4174 bool Heap::IdleNotification(double deadline_in_seconds) {
4175   CHECK(HasBeenSetUp());
4176   double deadline_in_ms =
4177       deadline_in_seconds *
4178       static_cast<double>(base::Time::kMillisecondsPerSecond);
4179   NestedTimedHistogramScope idle_notification_scope(
4180       isolate_->counters()->gc_idle_notification());
4181   TRACE_EVENT0("v8", "V8.GCIdleNotification");
4182   double start_ms = MonotonicallyIncreasingTimeInMs();
4183   double idle_time_in_ms = deadline_in_ms - start_ms;
4184 
4185   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4186                              OldGenerationAllocationCounter(),
4187                              EmbedderAllocationCounter());
4188 
4189   GCIdleTimeHeapState heap_state = ComputeHeapState();
4190   GCIdleTimeAction action =
4191       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4192   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4193   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
4194   return result;
4195 }
4196 
RecentIdleNotificationHappened()4197 bool Heap::RecentIdleNotificationHappened() {
4198   return (last_idle_notification_time_ +
4199           GCIdleTimeHandler::kMaxScheduledIdleTime) >
4200          MonotonicallyIncreasingTimeInMs();
4201 }
4202 
4203 class MemoryPressureInterruptTask : public CancelableTask {
4204  public:
MemoryPressureInterruptTask(Heap * heap)4205   explicit MemoryPressureInterruptTask(Heap* heap)
4206       : CancelableTask(heap->isolate()), heap_(heap) {}
4207 
4208   ~MemoryPressureInterruptTask() override = default;
4209   MemoryPressureInterruptTask(const MemoryPressureInterruptTask&) = delete;
4210   MemoryPressureInterruptTask& operator=(const MemoryPressureInterruptTask&) =
4211       delete;
4212 
4213  private:
4214   // v8::internal::CancelableTask overrides.
RunInternal()4215   void RunInternal() override { heap_->CheckMemoryPressure(); }
4216 
4217   Heap* heap_;
4218 };
4219 
CheckMemoryPressure()4220 void Heap::CheckMemoryPressure() {
4221   if (HighMemoryPressure()) {
4222     // The optimizing compiler may be unnecessarily holding on to memory.
4223     isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
4224   }
4225   // Reset the memory pressure level to avoid recursive GCs triggered by
4226   // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
4227   // the finalizers.
4228   MemoryPressureLevel memory_pressure_level = memory_pressure_level_.exchange(
4229       MemoryPressureLevel::kNone, std::memory_order_relaxed);
4230   if (memory_pressure_level == MemoryPressureLevel::kCritical) {
4231     TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
4232     CollectGarbageOnMemoryPressure();
4233   } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
4234     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4235       TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
4236       StartIncrementalMarking(kReduceMemoryFootprintMask,
4237                               GarbageCollectionReason::kMemoryPressure);
4238     }
4239   }
4240 }
4241 
CollectGarbageOnMemoryPressure()4242 void Heap::CollectGarbageOnMemoryPressure() {
4243   const int kGarbageThresholdInBytes = 8 * MB;
4244   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4245   // This constant is the maximum response time in RAIL performance model.
4246   const double kMaxMemoryPressurePauseMs = 100;
4247 
4248   double start = MonotonicallyIncreasingTimeInMs();
4249   CollectAllGarbage(kReduceMemoryFootprintMask,
4250                     GarbageCollectionReason::kMemoryPressure,
4251                     kGCCallbackFlagCollectAllAvailableGarbage);
4252   EagerlyFreeExternalMemory();
4253   double end = MonotonicallyIncreasingTimeInMs();
4254 
4255   // Estimate how much memory we can free.
4256   int64_t potential_garbage =
4257       (CommittedMemory() - SizeOfObjects()) + external_memory_.total();
4258   // If we can potentially free large amount of memory, then start GC right
4259   // away instead of waiting for memory reducer.
4260   if (potential_garbage >= kGarbageThresholdInBytes &&
4261       potential_garbage >=
4262           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4263     // If we spent less than half of the time budget, then perform full GC
4264     // Otherwise, start incremental marking.
4265     if (end - start < kMaxMemoryPressurePauseMs / 2) {
4266       CollectAllGarbage(kReduceMemoryFootprintMask,
4267                         GarbageCollectionReason::kMemoryPressure,
4268                         kGCCallbackFlagCollectAllAvailableGarbage);
4269     } else {
4270       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4271         StartIncrementalMarking(kReduceMemoryFootprintMask,
4272                                 GarbageCollectionReason::kMemoryPressure);
4273       }
4274     }
4275   }
4276 }
4277 
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)4278 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4279                                       bool is_isolate_locked) {
4280   TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
4281                static_cast<int>(level));
4282   MemoryPressureLevel previous =
4283       memory_pressure_level_.exchange(level, std::memory_order_relaxed);
4284   if ((previous != MemoryPressureLevel::kCritical &&
4285        level == MemoryPressureLevel::kCritical) ||
4286       (previous == MemoryPressureLevel::kNone &&
4287        level == MemoryPressureLevel::kModerate)) {
4288     if (is_isolate_locked) {
4289       CheckMemoryPressure();
4290     } else {
4291       ExecutionAccess access(isolate());
4292       isolate()->stack_guard()->RequestGC();
4293       auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
4294           reinterpret_cast<v8::Isolate*>(isolate()));
4295       taskrunner->PostTask(std::make_unique<MemoryPressureInterruptTask>(this));
4296     }
4297   }
4298 }
4299 
EagerlyFreeExternalMemory()4300 void Heap::EagerlyFreeExternalMemory() {
4301   array_buffer_sweeper()->EnsureFinished();
4302   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
4303 }
4304 
AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,void * data)4305 void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
4306                                     void* data) {
4307   const size_t kMaxCallbacks = 100;
4308   CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
4309   for (auto callback_data : near_heap_limit_callbacks_) {
4310     CHECK_NE(callback_data.first, callback);
4311   }
4312   near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
4313 }
4314 
RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,size_t heap_limit)4315 void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
4316                                        size_t heap_limit) {
4317   for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
4318     if (near_heap_limit_callbacks_[i].first == callback) {
4319       near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
4320       if (heap_limit) {
4321         RestoreHeapLimit(heap_limit);
4322       }
4323       return;
4324     }
4325   }
4326   UNREACHABLE();
4327 }
4328 
AppendArrayBufferExtension(JSArrayBuffer object,ArrayBufferExtension * extension)4329 void Heap::AppendArrayBufferExtension(JSArrayBuffer object,
4330                                       ArrayBufferExtension* extension) {
4331   // ArrayBufferSweeper is managing all counters and updating Heap counters.
4332   array_buffer_sweeper_->Append(object, extension);
4333 }
4334 
DetachArrayBufferExtension(JSArrayBuffer object,ArrayBufferExtension * extension)4335 void Heap::DetachArrayBufferExtension(JSArrayBuffer object,
4336                                       ArrayBufferExtension* extension) {
4337   // ArrayBufferSweeper is managing all counters and updating Heap counters.
4338   return array_buffer_sweeper_->Detach(object, extension);
4339 }
4340 
AutomaticallyRestoreInitialHeapLimit(double threshold_percent)4341 void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
4342   initial_max_old_generation_size_threshold_ =
4343       initial_max_old_generation_size_ * threshold_percent;
4344 }
4345 
InvokeNearHeapLimitCallback()4346 bool Heap::InvokeNearHeapLimitCallback() {
4347   if (near_heap_limit_callbacks_.size() > 0) {
4348     AllowGarbageCollection allow_gc;
4349     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_NEAR_HEAP_LIMIT);
4350     VMState<EXTERNAL> callback_state(isolate());
4351     HandleScope scope(isolate());
4352     v8::NearHeapLimitCallback callback =
4353         near_heap_limit_callbacks_.back().first;
4354     void* data = near_heap_limit_callbacks_.back().second;
4355     size_t heap_limit = callback(data, max_old_generation_size(),
4356                                  initial_max_old_generation_size_);
4357     if (heap_limit > max_old_generation_size()) {
4358       set_max_old_generation_size(
4359           std::min(heap_limit, AllocatorLimitOnMaxOldGenerationSize()));
4360       return true;
4361     }
4362   }
4363   return false;
4364 }
4365 
MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,v8::MeasureMemoryExecution execution)4366 bool Heap::MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
4367                          v8::MeasureMemoryExecution execution) {
4368   HandleScope handle_scope(isolate());
4369   std::vector<Handle<NativeContext>> contexts = FindAllNativeContexts();
4370   std::vector<Handle<NativeContext>> to_measure;
4371   for (auto& current : contexts) {
4372     if (delegate->ShouldMeasure(
4373             v8::Utils::ToLocal(Handle<Context>::cast(current)))) {
4374       to_measure.push_back(current);
4375     }
4376   }
4377   return memory_measurement_->EnqueueRequest(std::move(delegate), execution,
4378                                              to_measure);
4379 }
4380 
MeasureMemoryDelegate(Handle<NativeContext> context,Handle<JSPromise> promise,v8::MeasureMemoryMode mode)4381 std::unique_ptr<v8::MeasureMemoryDelegate> Heap::MeasureMemoryDelegate(
4382     Handle<NativeContext> context, Handle<JSPromise> promise,
4383     v8::MeasureMemoryMode mode) {
4384   return i::MemoryMeasurement::DefaultDelegate(isolate_, context, promise,
4385                                                mode);
4386 }
4387 
CollectCodeStatistics()4388 void Heap::CollectCodeStatistics() {
4389   TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
4390   IgnoreLocalGCRequests ignore_gc_requests(this);
4391   SafepointScope safepoint_scope(this);
4392   MakeHeapIterable();
4393   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
4394   // We do not look for code in new space, or map space.  If code
4395   // somehow ends up in those spaces, we would miss it here.
4396   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
4397   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
4398   CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
4399 }
4400 
4401 #ifdef DEBUG
4402 
Print()4403 void Heap::Print() {
4404   if (!HasBeenSetUp()) return;
4405   isolate()->PrintStack(stdout);
4406 
4407   for (SpaceIterator it(this); it.HasNext();) {
4408     it.Next()->Print();
4409   }
4410 }
4411 
ReportCodeStatistics(const char * title)4412 void Heap::ReportCodeStatistics(const char* title) {
4413   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4414   CollectCodeStatistics();
4415   CodeStatistics::ReportCodeStatistics(isolate());
4416 }
4417 
4418 #endif  // DEBUG
4419 
GarbageCollectionReasonToString(GarbageCollectionReason gc_reason)4420 const char* Heap::GarbageCollectionReasonToString(
4421     GarbageCollectionReason gc_reason) {
4422   switch (gc_reason) {
4423     case GarbageCollectionReason::kAllocationFailure:
4424       return "allocation failure";
4425     case GarbageCollectionReason::kAllocationLimit:
4426       return "allocation limit";
4427     case GarbageCollectionReason::kContextDisposal:
4428       return "context disposal";
4429     case GarbageCollectionReason::kCountersExtension:
4430       return "counters extension";
4431     case GarbageCollectionReason::kDebugger:
4432       return "debugger";
4433     case GarbageCollectionReason::kDeserializer:
4434       return "deserialize";
4435     case GarbageCollectionReason::kExternalMemoryPressure:
4436       return "external memory pressure";
4437     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
4438       return "finalize incremental marking via stack guard";
4439     case GarbageCollectionReason::kFinalizeMarkingViaTask:
4440       return "finalize incremental marking via task";
4441     case GarbageCollectionReason::kFullHashtable:
4442       return "full hash-table";
4443     case GarbageCollectionReason::kHeapProfiler:
4444       return "heap profiler";
4445     case GarbageCollectionReason::kTask:
4446       return "task";
4447     case GarbageCollectionReason::kLastResort:
4448       return "last resort";
4449     case GarbageCollectionReason::kLowMemoryNotification:
4450       return "low memory notification";
4451     case GarbageCollectionReason::kMakeHeapIterable:
4452       return "make heap iterable";
4453     case GarbageCollectionReason::kMemoryPressure:
4454       return "memory pressure";
4455     case GarbageCollectionReason::kMemoryReducer:
4456       return "memory reducer";
4457     case GarbageCollectionReason::kRuntime:
4458       return "runtime";
4459     case GarbageCollectionReason::kSamplingProfiler:
4460       return "sampling profiler";
4461     case GarbageCollectionReason::kSnapshotCreator:
4462       return "snapshot creator";
4463     case GarbageCollectionReason::kTesting:
4464       return "testing";
4465     case GarbageCollectionReason::kExternalFinalize:
4466       return "external finalize";
4467     case GarbageCollectionReason::kGlobalAllocationLimit:
4468       return "global allocation limit";
4469     case GarbageCollectionReason::kMeasureMemory:
4470       return "measure memory";
4471     case GarbageCollectionReason::kUnknown:
4472       return "unknown";
4473     case GarbageCollectionReason::kBackgroundAllocationFailure:
4474       return "background allocation failure";
4475   }
4476   UNREACHABLE();
4477 }
4478 
Contains(HeapObject value) const4479 bool Heap::Contains(HeapObject value) const {
4480   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
4481     return true;
4482   }
4483   if (ReadOnlyHeap::Contains(value)) {
4484     return false;
4485   }
4486   if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
4487     return false;
4488   }
4489   return HasBeenSetUp() &&
4490          ((new_space_ && new_space_->ToSpaceContains(value)) ||
4491           old_space_->Contains(value) || code_space_->Contains(value) ||
4492           (map_space_ && map_space_->Contains(value)) ||
4493           lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
4494           (new_lo_space_ && new_lo_space_->Contains(value)));
4495 }
4496 
ContainsCode(HeapObject value) const4497 bool Heap::ContainsCode(HeapObject value) const {
4498   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
4499     return true;
4500   }
4501   // TODO(v8:11880): support external code space.
4502   if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
4503     return false;
4504   }
4505   return HasBeenSetUp() &&
4506          (code_space_->Contains(value) || code_lo_space_->Contains(value));
4507 }
4508 
SharedHeapContains(HeapObject value) const4509 bool Heap::SharedHeapContains(HeapObject value) const {
4510   if (shared_old_space_)
4511     return shared_old_space_->Contains(value) ||
4512            (shared_map_space_ && shared_map_space_->Contains(value));
4513   return false;
4514 }
4515 
ShouldBeInSharedOldSpace(HeapObject value)4516 bool Heap::ShouldBeInSharedOldSpace(HeapObject value) {
4517   if (isolate()->OwnsStringTable()) return false;
4518   if (ReadOnlyHeap::Contains(value)) return false;
4519   if (Heap::InYoungGeneration(value)) return false;
4520   if (value.IsExternalString()) return false;
4521   if (value.IsString()) {
4522     return value.IsInternalizedString() ||
4523            String::IsInPlaceInternalizable(String::cast(value));
4524   }
4525   return false;
4526 }
4527 
InSpace(HeapObject value,AllocationSpace space) const4528 bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
4529   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
4530     return third_party_heap::Heap::InSpace(value.address(), space);
4531   if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
4532     return false;
4533   }
4534   if (!HasBeenSetUp()) return false;
4535 
4536   switch (space) {
4537     case NEW_SPACE:
4538       return new_space_->ToSpaceContains(value);
4539     case OLD_SPACE:
4540       return old_space_->Contains(value);
4541     case CODE_SPACE:
4542       return code_space_->Contains(value);
4543     case MAP_SPACE:
4544       DCHECK(map_space_);
4545       return map_space_->Contains(value);
4546     case LO_SPACE:
4547       return lo_space_->Contains(value);
4548     case CODE_LO_SPACE:
4549       return code_lo_space_->Contains(value);
4550     case NEW_LO_SPACE:
4551       return new_lo_space_->Contains(value);
4552     case RO_SPACE:
4553       return ReadOnlyHeap::Contains(value);
4554   }
4555   UNREACHABLE();
4556 }
4557 
IsShared()4558 bool Heap::IsShared() { return isolate()->is_shared(); }
4559 
InSpaceSlow(Address addr,AllocationSpace space) const4560 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
4561   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4562     return false;
4563   }
4564   if (!HasBeenSetUp()) return false;
4565 
4566   switch (space) {
4567     case NEW_SPACE:
4568       return new_space_->ToSpaceContainsSlow(addr);
4569     case OLD_SPACE:
4570       return old_space_->ContainsSlow(addr);
4571     case CODE_SPACE:
4572       return code_space_->ContainsSlow(addr);
4573     case MAP_SPACE:
4574       DCHECK(map_space_);
4575       return map_space_->ContainsSlow(addr);
4576     case LO_SPACE:
4577       return lo_space_->ContainsSlow(addr);
4578     case CODE_LO_SPACE:
4579       return code_lo_space_->ContainsSlow(addr);
4580     case NEW_LO_SPACE:
4581       return new_lo_space_->ContainsSlow(addr);
4582     case RO_SPACE:
4583       return read_only_space_->ContainsSlow(addr);
4584   }
4585   UNREACHABLE();
4586 }
4587 
IsValidAllocationSpace(AllocationSpace space)4588 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4589   switch (space) {
4590     case NEW_SPACE:
4591     case OLD_SPACE:
4592     case CODE_SPACE:
4593     case MAP_SPACE:
4594     case LO_SPACE:
4595     case NEW_LO_SPACE:
4596     case CODE_LO_SPACE:
4597     case RO_SPACE:
4598       return true;
4599     default:
4600       return false;
4601   }
4602 }
4603 
4604 #ifdef VERIFY_HEAP
Verify()4605 void Heap::Verify() {
4606   CHECK(HasBeenSetUp());
4607   IgnoreLocalGCRequests ignore_gc_requests(this);
4608   SafepointScope safepoint_scope(this);
4609   HandleScope scope(isolate());
4610 
4611   MakeHeapIterable();
4612 
4613   array_buffer_sweeper()->EnsureFinished();
4614 
4615   VerifyPointersVisitor visitor(this);
4616   IterateRoots(&visitor, {});
4617 
4618   if (!isolate()->context().is_null() &&
4619       !isolate()->normalized_map_cache()->IsUndefined(isolate())) {
4620     NormalizedMapCache::cast(*isolate()->normalized_map_cache())
4621         .NormalizedMapCacheVerify(isolate());
4622   }
4623 
4624   // The heap verifier can't deal with partially deserialized objects, so
4625   // disable it if a deserializer is active.
4626   // TODO(leszeks): Enable verification during deserialization, e.g. by only
4627   // blocklisting objects that are in a partially deserialized state.
4628   if (isolate()->has_active_deserializer()) return;
4629 
4630   VerifySmisVisitor smis_visitor;
4631   IterateSmiRoots(&smis_visitor);
4632 
4633   if (new_space_) new_space_->Verify(isolate());
4634 
4635   old_space_->Verify(isolate(), &visitor);
4636   if (map_space_) {
4637     map_space_->Verify(isolate(), &visitor);
4638   }
4639 
4640   VerifyPointersVisitor no_dirty_regions_visitor(this);
4641   code_space_->Verify(isolate(), &no_dirty_regions_visitor);
4642 
4643   lo_space_->Verify(isolate());
4644   code_lo_space_->Verify(isolate());
4645   if (new_lo_space_) new_lo_space_->Verify(isolate());
4646   isolate()->string_table()->VerifyIfOwnedBy(isolate());
4647 
4648 #if DEBUG
4649   VerifyCommittedPhysicalMemory();
4650 #endif  // DEBUG
4651 }
4652 
VerifyReadOnlyHeap()4653 void Heap::VerifyReadOnlyHeap() {
4654   CHECK(!read_only_space_->writable());
4655   read_only_space_->Verify(isolate());
4656 }
4657 
4658 class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
4659  public:
SlotVerifyingVisitor(Isolate * isolate,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)4660   SlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
4661                        std::set<std::pair<SlotType, Address>>* typed)
4662       : ObjectVisitorWithCageBases(isolate), untyped_(untyped), typed_(typed) {}
4663 
4664   virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
4665 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)4666   void VisitPointers(HeapObject host, ObjectSlot start,
4667                      ObjectSlot end) override {
4668 #ifdef DEBUG
4669     for (ObjectSlot slot = start; slot < end; ++slot) {
4670       Object obj = slot.load(cage_base());
4671       CHECK(!MapWord::IsPacked(obj.ptr()) || !HasWeakHeapObjectTag(obj));
4672     }
4673 #endif  // DEBUG
4674     VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
4675   }
4676 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)4677   void VisitPointers(HeapObject host, MaybeObjectSlot start,
4678                      MaybeObjectSlot end) final {
4679     for (MaybeObjectSlot slot = start; slot < end; ++slot) {
4680       if (ShouldHaveBeenRecorded(host, slot.load(cage_base()))) {
4681         CHECK_GT(untyped_->count(slot.address()), 0);
4682       }
4683     }
4684   }
4685 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)4686   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
4687     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
4688     if (ShouldHaveBeenRecorded(
4689             host, MaybeObject::FromObject(slot.load(code_cage_base())))) {
4690       CHECK_GT(untyped_->count(slot.address()), 0);
4691     }
4692   }
4693 
VisitCodeTarget(Code host,RelocInfo * rinfo)4694   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4695     Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4696     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
4697       CHECK(InTypedSet(SlotType::kCodeEntry, rinfo->pc()) ||
4698             (rinfo->IsInConstantPool() &&
4699              InTypedSet(SlotType::kConstPoolCodeEntry,
4700                         rinfo->constant_pool_entry_address())));
4701     }
4702   }
4703 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4704   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4705     Object target = rinfo->target_object(cage_base());
4706     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
4707       CHECK(InTypedSet(SlotType::kEmbeddedObjectFull, rinfo->pc()) ||
4708             InTypedSet(SlotType::kEmbeddedObjectCompressed, rinfo->pc()) ||
4709             InTypedSet(SlotType::kEmbeddedObjectData, rinfo->pc()) ||
4710             (rinfo->IsInConstantPool() &&
4711              InTypedSet(SlotType::kConstPoolEmbeddedObjectCompressed,
4712                         rinfo->constant_pool_entry_address())) ||
4713             (rinfo->IsInConstantPool() &&
4714              InTypedSet(SlotType::kConstPoolEmbeddedObjectFull,
4715                         rinfo->constant_pool_entry_address())));
4716     }
4717   }
4718 
4719  protected:
InUntypedSet(ObjectSlot slot)4720   bool InUntypedSet(ObjectSlot slot) {
4721     return untyped_->count(slot.address()) > 0;
4722   }
4723 
4724  private:
InTypedSet(SlotType type,Address slot)4725   bool InTypedSet(SlotType type, Address slot) {
4726     return typed_->count(std::make_pair(type, slot)) > 0;
4727   }
4728   std::set<Address>* untyped_;
4729   std::set<std::pair<SlotType, Address>>* typed_;
4730 };
4731 
4732 class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
4733  public:
OldToNewSlotVerifyingVisitor(Isolate * isolate,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed,EphemeronRememberedSet * ephemeron_remembered_set)4734   OldToNewSlotVerifyingVisitor(Isolate* isolate, std::set<Address>* untyped,
4735                                std::set<std::pair<SlotType, Address>>* typed,
4736                                EphemeronRememberedSet* ephemeron_remembered_set)
4737       : SlotVerifyingVisitor(isolate, untyped, typed),
4738         ephemeron_remembered_set_(ephemeron_remembered_set) {}
4739 
ShouldHaveBeenRecorded(HeapObject host,MaybeObject target)4740   bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
4741     DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
4742                    Heap::InToPage(target));
4743     return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
4744            !Heap::InYoungGeneration(host);
4745   }
4746 
VisitEphemeron(HeapObject host,int index,ObjectSlot key,ObjectSlot target)4747   void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
4748                       ObjectSlot target) override {
4749     VisitPointer(host, target);
4750     if (FLAG_minor_mc) return;
4751     // Keys are handled separately and should never appear in this set.
4752     CHECK(!InUntypedSet(key));
4753     Object k = *key;
4754     if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
4755       EphemeronHashTable table = EphemeronHashTable::cast(host);
4756       auto it = ephemeron_remembered_set_->find(table);
4757       CHECK(it != ephemeron_remembered_set_->end());
4758       int slot_index =
4759           EphemeronHashTable::SlotToIndex(table.address(), key.address());
4760       InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
4761       CHECK(it->second.find(entry.as_int()) != it->second.end());
4762     }
4763   }
4764 
4765  private:
4766   EphemeronRememberedSet* ephemeron_remembered_set_;
4767 };
4768 
4769 template <RememberedSetType direction>
CollectSlots(MemoryChunk * chunk,Address start,Address end,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)4770 void CollectSlots(MemoryChunk* chunk, Address start, Address end,
4771                   std::set<Address>* untyped,
4772                   std::set<std::pair<SlotType, Address>>* typed) {
4773   RememberedSet<direction>::Iterate(
4774       chunk,
4775       [start, end, untyped](MaybeObjectSlot slot) {
4776         if (start <= slot.address() && slot.address() < end) {
4777           untyped->insert(slot.address());
4778         }
4779         return KEEP_SLOT;
4780       },
4781       SlotSet::FREE_EMPTY_BUCKETS);
4782   RememberedSet<direction>::IterateTyped(
4783       chunk, [=](SlotType type, Address slot) {
4784         if (start <= slot && slot < end) {
4785           typed->insert(std::make_pair(type, slot));
4786         }
4787         return KEEP_SLOT;
4788       });
4789 }
4790 
VerifyRememberedSetFor(HeapObject object)4791 void Heap::VerifyRememberedSetFor(HeapObject object) {
4792   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
4793   DCHECK_IMPLIES(chunk->mutex() == nullptr, ReadOnlyHeap::Contains(object));
4794   // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
4795   base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
4796       chunk->mutex());
4797   PtrComprCageBase cage_base(isolate());
4798   Address start = object.address();
4799   Address end = start + object.Size(cage_base);
4800   std::set<Address> old_to_new;
4801   std::set<std::pair<SlotType, Address>> typed_old_to_new;
4802   if (!InYoungGeneration(object)) {
4803     CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
4804     OldToNewSlotVerifyingVisitor visitor(isolate(), &old_to_new,
4805                                          &typed_old_to_new,
4806                                          &this->ephemeron_remembered_set_);
4807     object.IterateBody(cage_base, &visitor);
4808   }
4809   // TODO(v8:11797): Add old to old slot set verification once all weak objects
4810   // have their own instance types and slots are recorded for all weak fields.
4811 }
4812 #endif
4813 
4814 #ifdef DEBUG
VerifyCountersAfterSweeping()4815 void Heap::VerifyCountersAfterSweeping() {
4816   PagedSpaceIterator spaces(this);
4817   for (PagedSpace* space = spaces.Next(); space != nullptr;
4818        space = spaces.Next()) {
4819     space->VerifyCountersAfterSweeping(this);
4820   }
4821 }
4822 
VerifyCountersBeforeConcurrentSweeping()4823 void Heap::VerifyCountersBeforeConcurrentSweeping() {
4824   PagedSpaceIterator spaces(this);
4825   for (PagedSpace* space = spaces.Next(); space != nullptr;
4826        space = spaces.Next()) {
4827     space->VerifyCountersBeforeConcurrentSweeping();
4828   }
4829 }
4830 
VerifyCommittedPhysicalMemory()4831 void Heap::VerifyCommittedPhysicalMemory() {
4832   PagedSpaceIterator spaces(this);
4833   for (PagedSpace* space = spaces.Next(); space != nullptr;
4834        space = spaces.Next()) {
4835     space->VerifyCommittedPhysicalMemory();
4836   }
4837 }
4838 #endif  // DEBUG
4839 
ZapFromSpace()4840 void Heap::ZapFromSpace() {
4841   if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
4842   for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
4843     memory_allocator()->ZapBlock(page->area_start(),
4844                                  page->HighWaterMark() - page->area_start(),
4845                                  ZapValue());
4846   }
4847 }
4848 
ZapCodeObject(Address start_address,int size_in_bytes)4849 void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
4850 #ifdef DEBUG
4851   DCHECK(IsAligned(start_address, kIntSize));
4852   for (int i = 0; i < size_in_bytes / kIntSize; i++) {
4853     Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
4854   }
4855 #endif
4856 }
4857 
RegisterCodeObject(Handle<Code> code)4858 void Heap::RegisterCodeObject(Handle<Code> code) {
4859   Address addr = code->address();
4860   if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && code_space()->Contains(addr)) {
4861     MemoryChunk::FromHeapObject(*code)
4862         ->GetCodeObjectRegistry()
4863         ->RegisterNewlyAllocatedCodeObject(addr);
4864   }
4865 }
4866 
IterateWeakRoots(RootVisitor * v,base::EnumSet<SkipRoot> options)4867 void Heap::IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
4868   DCHECK(!options.contains(SkipRoot::kWeak));
4869 
4870   if (!options.contains(SkipRoot::kOldGeneration) &&
4871       !options.contains(SkipRoot::kUnserializable) &&
4872       isolate()->OwnsStringTable()) {
4873     // Do not visit for the following reasons.
4874     // - Serialization, since the string table is custom serialized.
4875     // - If we are skipping old generation, since all internalized strings
4876     //   are in old space.
4877     // - If the string table is shared and this is not the shared heap,
4878     //   since all internalized strings are in the shared heap.
4879     isolate()->string_table()->IterateElements(v);
4880   }
4881   v->Synchronize(VisitorSynchronization::kStringTable);
4882   if (!options.contains(SkipRoot::kExternalStringTable) &&
4883       !options.contains(SkipRoot::kUnserializable)) {
4884     // Scavenge collections have special processing for this.
4885     // Do not visit for serialization, since the external string table will
4886     // be populated from scratch upon deserialization.
4887     external_string_table_.IterateAll(v);
4888   }
4889   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4890 }
4891 
IterateSmiRoots(RootVisitor * v)4892 void Heap::IterateSmiRoots(RootVisitor* v) {
4893   // Acquire execution access since we are going to read stack limit values.
4894   ExecutionAccess access(isolate());
4895   v->VisitRootPointers(Root::kSmiRootList, nullptr,
4896                        roots_table().smi_roots_begin(),
4897                        roots_table().smi_roots_end());
4898   v->Synchronize(VisitorSynchronization::kSmiRootList);
4899 }
4900 
4901 // We cannot avoid stale handles to left-trimmed objects, but can only make
4902 // sure all handles still needed are updated. Filter out a stale pointer
4903 // and clear the slot to allow post processing of handles (needed because
4904 // the sweeper might actually free the underlying page).
4905 class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
4906  public:
ClearStaleLeftTrimmedHandlesVisitor(Heap * heap)4907   explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap)
4908       : heap_(heap)
4909 #if V8_COMPRESS_POINTERS
4910         ,
4911         cage_base_(heap->isolate())
4912 #endif  // V8_COMPRESS_POINTERS
4913   {
4914     USE(heap_);
4915   }
4916 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)4917   void VisitRootPointer(Root root, const char* description,
4918                         FullObjectSlot p) override {
4919     FixHandle(p);
4920   }
4921 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)4922   void VisitRootPointers(Root root, const char* description,
4923                          FullObjectSlot start, FullObjectSlot end) override {
4924     for (FullObjectSlot p = start; p < end; ++p) {
4925       FixHandle(p);
4926     }
4927   }
4928 
4929   // The pointer compression cage base value used for decompression of all
4930   // tagged values except references to Code objects.
cage_base() const4931   PtrComprCageBase cage_base() const {
4932 #if V8_COMPRESS_POINTERS
4933     return cage_base_;
4934 #else
4935     return PtrComprCageBase{};
4936 #endif  // V8_COMPRESS_POINTERS
4937   }
4938 
4939  private:
FixHandle(FullObjectSlot p)4940   inline void FixHandle(FullObjectSlot p) {
4941     if (!(*p).IsHeapObject()) return;
4942     HeapObject current = HeapObject::cast(*p);
4943     if (!current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
4944         current.IsFreeSpaceOrFiller(cage_base())) {
4945 #ifdef DEBUG
4946       // We need to find a FixedArrayBase map after walking the fillers.
4947       while (
4948           !current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
4949           current.IsFreeSpaceOrFiller(cage_base())) {
4950         Address next = current.ptr();
4951         if (current.map(cage_base()) ==
4952             ReadOnlyRoots(heap_).one_pointer_filler_map()) {
4953           next += kTaggedSize;
4954         } else if (current.map(cage_base()) ==
4955                    ReadOnlyRoots(heap_).two_pointer_filler_map()) {
4956           next += 2 * kTaggedSize;
4957         } else {
4958           next += current.Size();
4959         }
4960         current = HeapObject::cast(Object(next));
4961       }
4962       DCHECK(
4963           current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() ||
4964           current.IsFixedArrayBase(cage_base()));
4965 #endif  // DEBUG
4966       p.store(Smi::zero());
4967     }
4968   }
4969 
4970   Heap* heap_;
4971 
4972 #if V8_COMPRESS_POINTERS
4973   const PtrComprCageBase cage_base_;
4974 #endif  // V8_COMPRESS_POINTERS
4975 };
4976 
IterateRoots(RootVisitor * v,base::EnumSet<SkipRoot> options)4977 void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
4978   v->VisitRootPointers(Root::kStrongRootList, nullptr,
4979                        roots_table().strong_roots_begin(),
4980                        roots_table().strong_roots_end());
4981   v->Synchronize(VisitorSynchronization::kStrongRootList);
4982 
4983   isolate_->bootstrapper()->Iterate(v);
4984   v->Synchronize(VisitorSynchronization::kBootstrapper);
4985   Relocatable::Iterate(isolate_, v);
4986   v->Synchronize(VisitorSynchronization::kRelocatable);
4987   isolate_->debug()->Iterate(v);
4988   v->Synchronize(VisitorSynchronization::kDebug);
4989 
4990   isolate_->compilation_cache()->Iterate(v);
4991   v->Synchronize(VisitorSynchronization::kCompilationCache);
4992 
4993   if (!options.contains(SkipRoot::kOldGeneration)) {
4994     IterateBuiltins(v);
4995     v->Synchronize(VisitorSynchronization::kBuiltins);
4996   }
4997 
4998   // Iterate over pointers being held by inactive threads.
4999   isolate_->thread_manager()->Iterate(v);
5000   v->Synchronize(VisitorSynchronization::kThreadManager);
5001 
5002   // Visitors in this block only run when not serializing. These include:
5003   //
5004   // - Thread-local and stack.
5005   // - Handles.
5006   // - Microtasks.
5007   // - The startup object cache.
5008   //
5009   // When creating real startup snapshot, these areas are expected to be empty.
5010   // It is also possible to create a snapshot of a *running* isolate for testing
5011   // purposes. In this case, these areas are likely not empty and will simply be
5012   // skipped.
5013   //
5014   // The general guideline for adding visitors to this section vs. adding them
5015   // above is that non-transient heap state is always visited, transient heap
5016   // state is visited only when not serializing.
5017   if (!options.contains(SkipRoot::kUnserializable)) {
5018     if (!options.contains(SkipRoot::kGlobalHandles)) {
5019       if (options.contains(SkipRoot::kWeak)) {
5020         if (options.contains(SkipRoot::kOldGeneration)) {
5021           // Skip handles that are either weak or old.
5022           isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
5023         } else {
5024           // Skip handles that are weak.
5025           isolate_->global_handles()->IterateStrongRoots(v);
5026         }
5027       } else {
5028         // Do not skip weak handles.
5029         if (options.contains(SkipRoot::kOldGeneration)) {
5030           // Skip handles that are old.
5031           isolate_->global_handles()->IterateAllYoungRoots(v);
5032         } else {
5033           // Do not skip any handles.
5034           isolate_->global_handles()->IterateAllRoots(v);
5035         }
5036       }
5037     }
5038     v->Synchronize(VisitorSynchronization::kGlobalHandles);
5039 
5040     if (!options.contains(SkipRoot::kStack)) {
5041       IterateStackRoots(v);
5042       v->Synchronize(VisitorSynchronization::kStackRoots);
5043     }
5044 
5045 #ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
5046     // Iterate over main thread handles in handle scopes.
5047     if (!options.contains(SkipRoot::kMainThreadHandles)) {
5048       // Clear main thread handles with stale references to left-trimmed
5049       // objects. The GC would crash on such stale references.
5050       ClearStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
5051       isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
5052 
5053       isolate_->handle_scope_implementer()->Iterate(v);
5054     }
5055 #endif
5056 
5057     // Iterate local handles for all local heaps.
5058     safepoint_->Iterate(v);
5059 
5060     // Iterates all persistent handles.
5061     isolate_->persistent_handles_list()->Iterate(v, isolate_);
5062 
5063     v->Synchronize(VisitorSynchronization::kHandleScope);
5064 
5065     if (options.contains(SkipRoot::kOldGeneration)) {
5066       isolate_->eternal_handles()->IterateYoungRoots(v);
5067     } else {
5068       isolate_->eternal_handles()->IterateAllRoots(v);
5069     }
5070     v->Synchronize(VisitorSynchronization::kEternalHandles);
5071 
5072     // Iterate over pending Microtasks stored in MicrotaskQueues.
5073     MicrotaskQueue* default_microtask_queue =
5074         isolate_->default_microtask_queue();
5075     if (default_microtask_queue) {
5076       MicrotaskQueue* microtask_queue = default_microtask_queue;
5077       do {
5078         microtask_queue->IterateMicrotasks(v);
5079         microtask_queue = microtask_queue->next();
5080       } while (microtask_queue != default_microtask_queue);
5081     }
5082 
5083     // Iterate over other strong roots (currently only identity maps and
5084     // deoptimization entries).
5085     for (StrongRootsEntry* current = strong_roots_head_; current;
5086          current = current->next) {
5087       v->VisitRootPointers(Root::kStrongRoots, current->label, current->start,
5088                            current->end);
5089     }
5090     v->Synchronize(VisitorSynchronization::kStrongRoots);
5091 
5092     // Iterate over the startup and shared heap object caches unless
5093     // serializing or deserializing.
5094     SerializerDeserializer::IterateStartupObjectCache(isolate_, v);
5095     v->Synchronize(VisitorSynchronization::kStartupObjectCache);
5096 
5097     // When shared_isolate() is null, isolate_ is either an unshared (instead of
5098     // a client) Isolate or the shared Isolate. In both cases isolate_ owns its
5099     // shared heap object cache and should iterate it.
5100     //
5101     // When shared_isolate() is not null, isolate_ is a client Isolate, does not
5102     // own its shared heap object cache, and should not iterate it.
5103     if (isolate_->shared_isolate() == nullptr) {
5104       SerializerDeserializer::IterateSharedHeapObjectCache(isolate_, v);
5105       v->Synchronize(VisitorSynchronization::kSharedHeapObjectCache);
5106     }
5107   }
5108 
5109   if (!options.contains(SkipRoot::kWeak)) {
5110     IterateWeakRoots(v, options);
5111   }
5112 }
5113 
IterateRootsIncludingClients(RootVisitor * v,base::EnumSet<SkipRoot> options)5114 void Heap::IterateRootsIncludingClients(RootVisitor* v,
5115                                         base::EnumSet<SkipRoot> options) {
5116   IterateRoots(v, options);
5117 
5118   if (isolate()->is_shared()) {
5119     isolate()->global_safepoint()->IterateClientIsolates(
5120         [v, options](Isolate* client) {
5121           client->heap()->IterateRoots(v, options);
5122         });
5123   }
5124 }
5125 
IterateWeakGlobalHandles(RootVisitor * v)5126 void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
5127   isolate_->global_handles()->IterateWeakRoots(v);
5128 }
5129 
IterateBuiltins(RootVisitor * v)5130 void Heap::IterateBuiltins(RootVisitor* v) {
5131   Builtins* builtins = isolate()->builtins();
5132   for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
5133        ++builtin) {
5134     const char* name = Builtins::name(builtin);
5135     v->VisitRootPointer(Root::kBuiltins, name, builtins->builtin_slot(builtin));
5136   }
5137 
5138   for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
5139        ++builtin) {
5140     v->VisitRootPointer(Root::kBuiltins, Builtins::name(builtin),
5141                         builtins->builtin_tier0_slot(builtin));
5142   }
5143 
5144   // The entry table doesn't need to be updated since all builtins are embedded.
5145   STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
5146 }
5147 
IterateStackRoots(RootVisitor * v)5148 void Heap::IterateStackRoots(RootVisitor* v) {
5149   isolate_->Iterate(v);
5150   isolate_->global_handles()->IterateStrongStackRoots(v);
5151 }
5152 
5153 namespace {
GlobalMemorySizeFromV8Size(size_t v8_size)5154 size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
5155   const size_t kGlobalMemoryToV8Ratio = 2;
5156   return std::min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
5157                   static_cast<uint64_t>(v8_size) * kGlobalMemoryToV8Ratio);
5158 }
5159 }  // anonymous namespace
5160 
ConfigureHeap(const v8::ResourceConstraints & constraints)5161 void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
5162   // Initialize max_semi_space_size_.
5163   {
5164     max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
5165     if (constraints.max_young_generation_size_in_bytes() > 0) {
5166       max_semi_space_size_ = SemiSpaceSizeFromYoungGenerationSize(
5167           constraints.max_young_generation_size_in_bytes());
5168     }
5169     if (FLAG_max_semi_space_size > 0) {
5170       max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
5171     } else if (FLAG_max_heap_size > 0) {
5172       size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
5173       size_t young_generation_size, old_generation_size;
5174       if (FLAG_max_old_space_size > 0) {
5175         old_generation_size = static_cast<size_t>(FLAG_max_old_space_size) * MB;
5176         young_generation_size = max_heap_size > old_generation_size
5177                                     ? max_heap_size - old_generation_size
5178                                     : 0;
5179       } else {
5180         GenerationSizesFromHeapSize(max_heap_size, &young_generation_size,
5181                                     &old_generation_size);
5182       }
5183       max_semi_space_size_ =
5184           SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
5185     }
5186     if (FLAG_stress_compaction) {
5187       // This will cause more frequent GCs when stressing.
5188       max_semi_space_size_ = MB;
5189     }
5190     // TODO(dinfuehr): Rounding to a power of 2 is not longer needed. Remove it.
5191     max_semi_space_size_ =
5192         static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
5193             static_cast<uint64_t>(max_semi_space_size_)));
5194     max_semi_space_size_ = std::max({max_semi_space_size_, kMinSemiSpaceSize});
5195     max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
5196   }
5197 
5198   // Initialize max_old_generation_size_ and max_global_memory_.
5199   {
5200     size_t max_old_generation_size = 700ul * (kSystemPointerSize / 4) * MB;
5201     if (constraints.max_old_generation_size_in_bytes() > 0) {
5202       max_old_generation_size = constraints.max_old_generation_size_in_bytes();
5203     }
5204     if (FLAG_max_old_space_size > 0) {
5205       max_old_generation_size =
5206           static_cast<size_t>(FLAG_max_old_space_size) * MB;
5207     } else if (FLAG_max_heap_size > 0) {
5208       size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
5209       size_t young_generation_size =
5210           YoungGenerationSizeFromSemiSpaceSize(max_semi_space_size_);
5211       max_old_generation_size = max_heap_size > young_generation_size
5212                                     ? max_heap_size - young_generation_size
5213                                     : 0;
5214     }
5215     max_old_generation_size =
5216         std::max(max_old_generation_size, MinOldGenerationSize());
5217     max_old_generation_size = std::min(max_old_generation_size,
5218                                        AllocatorLimitOnMaxOldGenerationSize());
5219     max_old_generation_size =
5220         RoundDown<Page::kPageSize>(max_old_generation_size);
5221 
5222     max_global_memory_size_ =
5223         GlobalMemorySizeFromV8Size(max_old_generation_size);
5224     set_max_old_generation_size(max_old_generation_size);
5225   }
5226 
5227   CHECK_IMPLIES(FLAG_max_heap_size > 0,
5228                 FLAG_max_semi_space_size == 0 || FLAG_max_old_space_size == 0);
5229 
5230   // Initialize initial_semispace_size_.
5231   {
5232     initial_semispace_size_ = kMinSemiSpaceSize;
5233     if (max_semi_space_size_ == kMaxSemiSpaceSize) {
5234       // Start with at least 1*MB semi-space on machines with a lot of memory.
5235       initial_semispace_size_ =
5236           std::max(initial_semispace_size_, static_cast<size_t>(1 * MB));
5237     }
5238     if (constraints.initial_young_generation_size_in_bytes() > 0) {
5239       initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
5240           constraints.initial_young_generation_size_in_bytes());
5241     }
5242     if (FLAG_initial_heap_size > 0) {
5243       size_t young_generation, old_generation;
5244       Heap::GenerationSizesFromHeapSize(
5245           static_cast<size_t>(FLAG_initial_heap_size) * MB, &young_generation,
5246           &old_generation);
5247       initial_semispace_size_ =
5248           SemiSpaceSizeFromYoungGenerationSize(young_generation);
5249     }
5250     if (FLAG_min_semi_space_size > 0) {
5251       initial_semispace_size_ =
5252           static_cast<size_t>(FLAG_min_semi_space_size) * MB;
5253     }
5254     initial_semispace_size_ =
5255         std::min(initial_semispace_size_, max_semi_space_size_);
5256     initial_semispace_size_ =
5257         RoundDown<Page::kPageSize>(initial_semispace_size_);
5258   }
5259 
5260   if (FLAG_lazy_new_space_shrinking) {
5261     initial_semispace_size_ = max_semi_space_size_;
5262   }
5263 
5264   // Initialize initial_old_space_size_.
5265   {
5266     initial_old_generation_size_ = kMaxInitialOldGenerationSize;
5267     if (constraints.initial_old_generation_size_in_bytes() > 0) {
5268       initial_old_generation_size_ =
5269           constraints.initial_old_generation_size_in_bytes();
5270       old_generation_size_configured_ = true;
5271     }
5272     if (FLAG_initial_heap_size > 0) {
5273       size_t initial_heap_size =
5274           static_cast<size_t>(FLAG_initial_heap_size) * MB;
5275       size_t young_generation_size =
5276           YoungGenerationSizeFromSemiSpaceSize(initial_semispace_size_);
5277       initial_old_generation_size_ =
5278           initial_heap_size > young_generation_size
5279               ? initial_heap_size - young_generation_size
5280               : 0;
5281       old_generation_size_configured_ = true;
5282     }
5283     if (FLAG_initial_old_space_size > 0) {
5284       initial_old_generation_size_ =
5285           static_cast<size_t>(FLAG_initial_old_space_size) * MB;
5286       old_generation_size_configured_ = true;
5287     }
5288     initial_old_generation_size_ =
5289         std::min(initial_old_generation_size_, max_old_generation_size() / 2);
5290     initial_old_generation_size_ =
5291         RoundDown<Page::kPageSize>(initial_old_generation_size_);
5292   }
5293 
5294   if (old_generation_size_configured_) {
5295     // If the embedder pre-configures the initial old generation size,
5296     // then allow V8 to skip full GCs below that threshold.
5297     min_old_generation_size_ = initial_old_generation_size_;
5298     min_global_memory_size_ =
5299         GlobalMemorySizeFromV8Size(min_old_generation_size_);
5300   }
5301 
5302   if (FLAG_semi_space_growth_factor < 2) {
5303     FLAG_semi_space_growth_factor = 2;
5304   }
5305 
5306   set_old_generation_allocation_limit(initial_old_generation_size_);
5307   global_allocation_limit_ =
5308       GlobalMemorySizeFromV8Size(old_generation_allocation_limit());
5309   initial_max_old_generation_size_ = max_old_generation_size();
5310 
5311   // We rely on being able to allocate new arrays in paged spaces.
5312   DCHECK(kMaxRegularHeapObjectSize >=
5313          (JSArray::kHeaderSize +
5314           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
5315           AllocationMemento::kSize));
5316 
5317   code_range_size_ = constraints.code_range_size_in_bytes();
5318 
5319   configured_ = true;
5320 }
5321 
AddToRingBuffer(const char * string)5322 void Heap::AddToRingBuffer(const char* string) {
5323   size_t first_part =
5324       std::min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
5325   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
5326   ring_buffer_end_ += first_part;
5327   if (first_part < strlen(string)) {
5328     ring_buffer_full_ = true;
5329     size_t second_part = strlen(string) - first_part;
5330     memcpy(trace_ring_buffer_, string + first_part, second_part);
5331     ring_buffer_end_ = second_part;
5332   }
5333 }
5334 
GetFromRingBuffer(char * buffer)5335 void Heap::GetFromRingBuffer(char* buffer) {
5336   size_t copied = 0;
5337   if (ring_buffer_full_) {
5338     copied = kTraceRingBufferSize - ring_buffer_end_;
5339     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
5340   }
5341   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
5342 }
5343 
ConfigureHeapDefault()5344 void Heap::ConfigureHeapDefault() {
5345   v8::ResourceConstraints constraints;
5346   ConfigureHeap(constraints);
5347 }
5348 
RecordStats(HeapStats * stats,bool take_snapshot)5349 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5350   *stats->start_marker = HeapStats::kStartMarker;
5351   *stats->end_marker = HeapStats::kEndMarker;
5352   *stats->ro_space_size = read_only_space_->Size();
5353   *stats->ro_space_capacity = read_only_space_->Capacity();
5354   *stats->new_space_size = NewSpaceSize();
5355   *stats->new_space_capacity = NewSpaceCapacity();
5356   *stats->old_space_size = old_space_->SizeOfObjects();
5357   *stats->old_space_capacity = old_space_->Capacity();
5358   *stats->code_space_size = code_space_->SizeOfObjects();
5359   *stats->code_space_capacity = code_space_->Capacity();
5360   *stats->map_space_size = map_space_ ? map_space_->SizeOfObjects() : 0;
5361   *stats->map_space_capacity = map_space_ ? map_space_->Capacity() : 0;
5362   *stats->lo_space_size = lo_space_->Size();
5363   *stats->code_lo_space_size = code_lo_space_->Size();
5364   isolate_->global_handles()->RecordStats(stats);
5365   *stats->memory_allocator_size = memory_allocator()->Size();
5366   *stats->memory_allocator_capacity =
5367       memory_allocator()->Size() + memory_allocator()->Available();
5368   *stats->os_error = base::OS::GetLastError();
5369   // TODO(leszeks): Include the string table in both current and peak usage.
5370   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
5371   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
5372   if (take_snapshot) {
5373     HeapObjectIterator iterator(this);
5374     for (HeapObject obj = iterator.Next(); !obj.is_null();
5375          obj = iterator.Next()) {
5376       InstanceType type = obj.map().instance_type();
5377       DCHECK(0 <= type && type <= LAST_TYPE);
5378       stats->objects_per_type[type]++;
5379       stats->size_per_type[type] += obj.Size();
5380     }
5381   }
5382   if (stats->last_few_messages != nullptr)
5383     GetFromRingBuffer(stats->last_few_messages);
5384 }
5385 
OldGenerationSizeOfObjects()5386 size_t Heap::OldGenerationSizeOfObjects() {
5387   PagedSpaceIterator spaces(this);
5388   size_t total = 0;
5389   for (PagedSpace* space = spaces.Next(); space != nullptr;
5390        space = spaces.Next()) {
5391     total += space->SizeOfObjects();
5392   }
5393   return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
5394 }
5395 
EmbedderSizeOfObjects() const5396 size_t Heap::EmbedderSizeOfObjects() const {
5397   return local_embedder_heap_tracer()
5398              ? local_embedder_heap_tracer()->used_size()
5399              : 0;
5400 }
5401 
GlobalSizeOfObjects()5402 size_t Heap::GlobalSizeOfObjects() {
5403   return OldGenerationSizeOfObjects() + EmbedderSizeOfObjects();
5404 }
5405 
AllocatedExternalMemorySinceMarkCompact()5406 uint64_t Heap::AllocatedExternalMemorySinceMarkCompact() {
5407   return external_memory_.AllocatedSinceMarkCompact();
5408 }
5409 
AllocationLimitOvershotByLargeMargin()5410 bool Heap::AllocationLimitOvershotByLargeMargin() {
5411   // This guards against too eager finalization in small heaps.
5412   // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
5413   constexpr size_t kMarginForSmallHeaps = 32u * MB;
5414 
5415   uint64_t size_now =
5416       OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
5417 
5418   const size_t v8_overshoot = old_generation_allocation_limit() < size_now
5419                                   ? size_now - old_generation_allocation_limit()
5420                                   : 0;
5421   const size_t global_overshoot =
5422       global_allocation_limit_ < GlobalSizeOfObjects()
5423           ? GlobalSizeOfObjects() - global_allocation_limit_
5424           : 0;
5425 
5426   // Bail out if the V8 and global sizes are still below their respective
5427   // limits.
5428   if (v8_overshoot == 0 && global_overshoot == 0) {
5429     return false;
5430   }
5431 
5432   // Overshoot margin is 50% of allocation limit or half-way to the max heap
5433   // with special handling of small heaps.
5434   const size_t v8_margin = std::min(
5435       std::max(old_generation_allocation_limit() / 2, kMarginForSmallHeaps),
5436       (max_old_generation_size() - old_generation_allocation_limit()) / 2);
5437   const size_t global_margin =
5438       std::min(std::max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
5439                (max_global_memory_size_ - global_allocation_limit_) / 2);
5440 
5441   return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
5442 }
5443 
ShouldOptimizeForLoadTime()5444 bool Heap::ShouldOptimizeForLoadTime() {
5445   return isolate()->rail_mode() == PERFORMANCE_LOAD &&
5446          !AllocationLimitOvershotByLargeMargin() &&
5447          MonotonicallyIncreasingTimeInMs() <
5448              isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
5449 }
5450 
5451 // This predicate is called when an old generation space cannot allocated from
5452 // the free list and is about to add a new page. Returning false will cause a
5453 // major GC. It happens when the old generation allocation limit is reached and
5454 // - either we need to optimize for memory usage,
5455 // - or the incremental marking is not in progress and we cannot start it.
ShouldExpandOldGenerationOnSlowAllocation(LocalHeap * local_heap)5456 bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
5457   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5458   // We reached the old generation allocation limit.
5459 
5460   // Background threads need to be allowed to allocate without GC after teardown
5461   // was initiated.
5462   if (gc_state() == TEAR_DOWN) return true;
5463 
5464   // If main thread is parked, it can't perform the GC. Fix the deadlock by
5465   // allowing the allocation.
5466   if (IsMainThreadParked(local_heap)) return true;
5467 
5468   // Make it more likely that retry of allocation on background thread succeeds
5469   if (IsRetryOfFailedAllocation(local_heap)) return true;
5470 
5471   // Background thread requested GC, allocation should fail
5472   if (CollectionRequested()) return false;
5473 
5474   if (ShouldOptimizeForMemoryUsage()) return false;
5475 
5476   if (ShouldOptimizeForLoadTime()) return true;
5477 
5478   if (incremental_marking()->NeedsFinalization()) {
5479     return !AllocationLimitOvershotByLargeMargin();
5480   }
5481 
5482   if (incremental_marking()->IsStopped() &&
5483       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5484     // We cannot start incremental marking.
5485     return false;
5486   }
5487   return true;
5488 }
5489 
IsRetryOfFailedAllocation(LocalHeap * local_heap)5490 bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
5491   if (!local_heap) return false;
5492   return local_heap->allocation_failed_;
5493 }
5494 
IsMainThreadParked(LocalHeap * local_heap)5495 bool Heap::IsMainThreadParked(LocalHeap* local_heap) {
5496   if (!local_heap) return false;
5497   return local_heap->main_thread_parked_;
5498 }
5499 
CurrentHeapGrowingMode()5500 Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
5501   if (ShouldReduceMemory() || FLAG_stress_compaction) {
5502     return Heap::HeapGrowingMode::kMinimal;
5503   }
5504 
5505   if (ShouldOptimizeForMemoryUsage()) {
5506     return Heap::HeapGrowingMode::kConservative;
5507   }
5508 
5509   if (memory_reducer()->ShouldGrowHeapSlowly()) {
5510     return Heap::HeapGrowingMode::kSlow;
5511   }
5512 
5513   return Heap::HeapGrowingMode::kDefault;
5514 }
5515 
GlobalMemoryAvailable()5516 base::Optional<size_t> Heap::GlobalMemoryAvailable() {
5517   if (!UseGlobalMemoryScheduling()) return {};
5518 
5519   size_t global_size = GlobalSizeOfObjects();
5520 
5521   if (global_size < global_allocation_limit_)
5522     return global_allocation_limit_ - global_size;
5523 
5524   return 0;
5525 }
5526 
PercentToOldGenerationLimit()5527 double Heap::PercentToOldGenerationLimit() {
5528   double size_at_gc = old_generation_size_at_last_gc_;
5529   double size_now =
5530       OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
5531   double current_bytes = size_now - size_at_gc;
5532   double total_bytes = old_generation_allocation_limit() - size_at_gc;
5533   return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
5534 }
5535 
PercentToGlobalMemoryLimit()5536 double Heap::PercentToGlobalMemoryLimit() {
5537   double size_at_gc = old_generation_size_at_last_gc_;
5538   double size_now =
5539       OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
5540   double current_bytes = size_now - size_at_gc;
5541   double total_bytes = old_generation_allocation_limit() - size_at_gc;
5542   return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
5543 }
5544 
5545 // - kNoLimit means that either incremental marking is disabled or it is too
5546 // early to start incremental marking.
5547 // - kSoftLimit means that incremental marking should be started soon.
5548 // - kHardLimit means that incremental marking should be started immediately.
5549 // - kFallbackForEmbedderLimit means that incremental marking should be
5550 // started as soon as the embedder does not allocate with high throughput
5551 // anymore.
IncrementalMarkingLimitReached()5552 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5553   // Code using an AlwaysAllocateScope assumes that the GC state does not
5554   // change; that implies that no marking steps must be performed.
5555   if (!incremental_marking()->CanBeActivated() || always_allocate()) {
5556     // Incremental marking is disabled or it is too early to start.
5557     return IncrementalMarkingLimit::kNoLimit;
5558   }
5559   if (FLAG_stress_incremental_marking) {
5560     return IncrementalMarkingLimit::kHardLimit;
5561   }
5562   if (incremental_marking()->IsBelowActivationThresholds()) {
5563     // Incremental marking is disabled or it is too early to start.
5564     return IncrementalMarkingLimit::kNoLimit;
5565   }
5566   if (ShouldStressCompaction() || HighMemoryPressure()) {
5567     // If there is high memory pressure or stress testing is enabled, then
5568     // start marking immediately.
5569     return IncrementalMarkingLimit::kHardLimit;
5570   }
5571 
5572   if (FLAG_stress_marking > 0) {
5573     int current_percent = static_cast<int>(
5574         std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
5575     if (current_percent > 0) {
5576       if (FLAG_trace_stress_marking) {
5577         isolate()->PrintWithTimestamp(
5578             "[IncrementalMarking] %d%% of the memory limit reached\n",
5579             current_percent);
5580       }
5581       if (FLAG_fuzzer_gc_analysis) {
5582         // Skips values >=100% since they already trigger marking.
5583         if (current_percent < 100) {
5584           max_marking_limit_reached_ =
5585               std::max<double>(max_marking_limit_reached_, current_percent);
5586         }
5587       } else if (current_percent >=
5588                  stress_marking_percentage_.load(std::memory_order_relaxed)) {
5589         stress_marking_percentage_.store(NextStressMarkingLimit(),
5590                                          std::memory_order_relaxed);
5591         return IncrementalMarkingLimit::kHardLimit;
5592       }
5593     }
5594   }
5595 
5596   if (FLAG_incremental_marking_soft_trigger > 0 ||
5597       FLAG_incremental_marking_hard_trigger > 0) {
5598     int current_percent = static_cast<int>(
5599         std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
5600     if (current_percent > FLAG_incremental_marking_hard_trigger &&
5601         FLAG_incremental_marking_hard_trigger > 0) {
5602       return IncrementalMarkingLimit::kHardLimit;
5603     }
5604     if (current_percent > FLAG_incremental_marking_soft_trigger &&
5605         FLAG_incremental_marking_soft_trigger > 0) {
5606       return IncrementalMarkingLimit::kSoftLimit;
5607     }
5608     return IncrementalMarkingLimit::kNoLimit;
5609   }
5610 
5611   size_t old_generation_space_available = OldGenerationSpaceAvailable();
5612   const base::Optional<size_t> global_memory_available =
5613       GlobalMemoryAvailable();
5614 
5615   if (old_generation_space_available > NewSpaceCapacity() &&
5616       (!global_memory_available ||
5617        global_memory_available > NewSpaceCapacity())) {
5618     if (local_embedder_heap_tracer()->InUse() &&
5619         !old_generation_size_configured_ && gc_count_ == 0) {
5620       // At this point the embedder memory is above the activation
5621       // threshold. No GC happened so far and it's thus unlikely to get a
5622       // configured heap any time soon. Start a memory reducer in this case
5623       // which will wait until the allocation rate is low to trigger garbage
5624       // collection.
5625       return IncrementalMarkingLimit::kFallbackForEmbedderLimit;
5626     }
5627     return IncrementalMarkingLimit::kNoLimit;
5628   }
5629   if (ShouldOptimizeForMemoryUsage()) {
5630     return IncrementalMarkingLimit::kHardLimit;
5631   }
5632   if (ShouldOptimizeForLoadTime()) {
5633     return IncrementalMarkingLimit::kNoLimit;
5634   }
5635   if (old_generation_space_available == 0) {
5636     return IncrementalMarkingLimit::kHardLimit;
5637   }
5638   if (global_memory_available && *global_memory_available == 0) {
5639     return IncrementalMarkingLimit::kHardLimit;
5640   }
5641   return IncrementalMarkingLimit::kSoftLimit;
5642 }
5643 
ShouldStressCompaction() const5644 bool Heap::ShouldStressCompaction() const {
5645   return FLAG_stress_compaction && (gc_count_ & 1) != 0;
5646 }
5647 
EnableInlineAllocation()5648 void Heap::EnableInlineAllocation() {
5649   // Update inline allocation limit for new space.
5650   if (new_space()) {
5651     new_space()->EnableInlineAllocation();
5652   }
5653   // Update inline allocation limit for old spaces.
5654   PagedSpaceIterator spaces(this);
5655   for (PagedSpace* space = spaces.Next(); space != nullptr;
5656        space = spaces.Next()) {
5657     base::MutexGuard guard(space->mutex());
5658     space->EnableInlineAllocation();
5659   }
5660 }
5661 
DisableInlineAllocation()5662 void Heap::DisableInlineAllocation() {
5663   // Update inline allocation limit for new space.
5664   if (new_space()) {
5665     new_space()->DisableInlineAllocation();
5666   }
5667   // Update inline allocation limit for old spaces.
5668   PagedSpaceIterator spaces(this);
5669   CodePageCollectionMemoryModificationScope modification_scope(this);
5670   for (PagedSpace* space = spaces.Next(); space != nullptr;
5671        space = spaces.Next()) {
5672     base::MutexGuard guard(space->mutex());
5673     space->DisableInlineAllocation();
5674   }
5675 }
5676 
SetUp(LocalHeap * main_thread_local_heap)5677 void Heap::SetUp(LocalHeap* main_thread_local_heap) {
5678   DCHECK_NULL(main_thread_local_heap_);
5679   main_thread_local_heap_ = main_thread_local_heap;
5680 
5681 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
5682   heap_allocator_.UpdateAllocationTimeout();
5683 #endif  // V8_ENABLE_ALLOCATION_TIMEOUT
5684 
5685 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
5686   tp_heap_ = third_party_heap::Heap::New(isolate());
5687 #endif
5688 
5689   // Initialize heap spaces and initial maps and objects.
5690   //
5691   // If the heap is not yet configured (e.g. through the API), configure it.
5692   // Configuration is based on the flags new-space-size (really the semispace
5693   // size) and old-space-size if set or the initial values of semispace_size_
5694   // and old_generation_size_ otherwise.
5695   if (!configured_) ConfigureHeapDefault();
5696 
5697   mmap_region_base_ =
5698       reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
5699       ~kMmapRegionMask;
5700 
5701   v8::PageAllocator* code_page_allocator;
5702   if (isolate_->RequiresCodeRange() || code_range_size_ != 0) {
5703     const size_t requested_size =
5704         code_range_size_ == 0 ? kMaximalCodeRangeSize : code_range_size_;
5705     // When a target requires the code range feature, we put all code objects in
5706     // a contiguous range of virtual address space, so that they can call each
5707     // other with near calls.
5708     if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
5709       // When sharing a pointer cage among Isolates, also share the
5710       // CodeRange. isolate_->page_allocator() is the process-wide pointer
5711       // compression cage's PageAllocator.
5712       code_range_ = CodeRange::EnsureProcessWideCodeRange(
5713           isolate_->page_allocator(), requested_size);
5714     } else {
5715       code_range_ = std::make_shared<CodeRange>();
5716       if (!code_range_->InitReservation(isolate_->page_allocator(),
5717                                         requested_size)) {
5718         V8::FatalProcessOutOfMemory(
5719             isolate_, "Failed to reserve virtual memory for CodeRange");
5720       }
5721     }
5722 
5723     LOG(isolate_,
5724         NewEvent("CodeRange",
5725                  reinterpret_cast<void*>(code_range_->reservation()->address()),
5726                  code_range_size_));
5727 
5728     isolate_->AddCodeRange(code_range_->reservation()->region().begin(),
5729                            code_range_->reservation()->region().size());
5730     code_page_allocator = code_range_->page_allocator();
5731   } else {
5732     code_page_allocator = isolate_->page_allocator();
5733   }
5734 
5735   // Set up memory allocator.
5736   memory_allocator_.reset(
5737       new MemoryAllocator(isolate_, code_page_allocator, MaxReserved()));
5738 
5739   mark_compact_collector_.reset(new MarkCompactCollector(this));
5740 
5741   scavenger_collector_.reset(new ScavengerCollector(this));
5742   minor_mark_compact_collector_.reset(new MinorMarkCompactCollector(this));
5743 
5744   incremental_marking_.reset(
5745       new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
5746 
5747   if (FLAG_concurrent_marking || FLAG_parallel_marking) {
5748     concurrent_marking_.reset(new ConcurrentMarking(
5749         this, mark_compact_collector_->marking_worklists(),
5750         mark_compact_collector_->weak_objects()));
5751   } else {
5752     concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
5753   }
5754 
5755   marking_barrier_.reset(new MarkingBarrier(this));
5756 
5757   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
5758     space_[i] = nullptr;
5759   }
5760 
5761   // Set up layout tracing callback.
5762   if (V8_UNLIKELY(FLAG_trace_gc_heap_layout)) {
5763     v8::GCType gc_type = kGCTypeMarkSweepCompact;
5764     if (V8_UNLIKELY(!FLAG_trace_gc_heap_layout_ignore_minor_gc)) {
5765       gc_type = static_cast<v8::GCType>(gc_type | kGCTypeScavenge |
5766                                         kGCTypeMinorMarkCompact);
5767     }
5768     AddGCPrologueCallback(HeapLayoutTracer::GCProloguePrintHeapLayout, gc_type,
5769                           nullptr);
5770     AddGCEpilogueCallback(HeapLayoutTracer::GCEpiloguePrintHeapLayout, gc_type,
5771                           nullptr);
5772   }
5773 }
5774 
SetUpFromReadOnlyHeap(ReadOnlyHeap * ro_heap)5775 void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
5776   DCHECK_NOT_NULL(ro_heap);
5777   DCHECK_IMPLIES(read_only_space_ != nullptr,
5778                  read_only_space_ == ro_heap->read_only_space());
5779   space_[RO_SPACE] = nullptr;
5780   read_only_space_ = ro_heap->read_only_space();
5781   heap_allocator_.SetReadOnlySpace(read_only_space_);
5782 }
5783 
ReplaceReadOnlySpace(SharedReadOnlySpace * space)5784 void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
5785   CHECK(V8_SHARED_RO_HEAP_BOOL);
5786   if (read_only_space_) {
5787     read_only_space_->TearDown(memory_allocator());
5788     delete read_only_space_;
5789   }
5790 
5791   read_only_space_ = space;
5792   heap_allocator_.SetReadOnlySpace(read_only_space_);
5793 }
5794 
5795 class StressConcurrentAllocationObserver : public AllocationObserver {
5796  public:
StressConcurrentAllocationObserver(Heap * heap)5797   explicit StressConcurrentAllocationObserver(Heap* heap)
5798       : AllocationObserver(1024), heap_(heap) {}
5799 
Step(int bytes_allocated,Address,size_t)5800   void Step(int bytes_allocated, Address, size_t) override {
5801     DCHECK(heap_->deserialization_complete());
5802     if (FLAG_stress_concurrent_allocation) {
5803       // Only schedule task if --stress-concurrent-allocation is enabled. This
5804       // allows tests to disable flag even when Isolate was already initialized.
5805       StressConcurrentAllocatorTask::Schedule(heap_->isolate());
5806     }
5807     heap_->RemoveAllocationObserversFromAllSpaces(this, this);
5808     heap_->need_to_remove_stress_concurrent_allocation_observer_ = false;
5809   }
5810 
5811  private:
5812   Heap* heap_;
5813 };
5814 
SetUpSpaces(LinearAllocationArea * new_allocation_info,LinearAllocationArea * old_allocation_info)5815 void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
5816                        LinearAllocationArea* old_allocation_info) {
5817   // Ensure SetUpFromReadOnlySpace has been ran.
5818   DCHECK_NOT_NULL(read_only_space_);
5819   const bool has_young_gen = !FLAG_single_generation && !IsShared();
5820   if (has_young_gen) {
5821     space_[NEW_SPACE] = new_space_ = new NewSpace(
5822         this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
5823         max_semi_space_size_, new_allocation_info);
5824     space_[NEW_LO_SPACE] = new_lo_space_ =
5825         new NewLargeObjectSpace(this, NewSpaceCapacity());
5826   }
5827   space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
5828   space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
5829   if (FLAG_use_map_space) {
5830     space_[MAP_SPACE] = map_space_ = new MapSpace(this);
5831   }
5832   space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
5833   space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
5834 
5835   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5836        i++) {
5837     deferred_counters_[i] = 0;
5838   }
5839 
5840   tracer_.reset(new GCTracer(this));
5841   array_buffer_sweeper_.reset(new ArrayBufferSweeper(this));
5842   gc_idle_time_handler_.reset(new GCIdleTimeHandler());
5843   memory_measurement_.reset(new MemoryMeasurement(isolate()));
5844   memory_reducer_.reset(new MemoryReducer(this));
5845   if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
5846     live_object_stats_.reset(new ObjectStats(this));
5847     dead_object_stats_.reset(new ObjectStats(this));
5848   }
5849   local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
5850   embedder_roots_handler_ =
5851       &local_embedder_heap_tracer()->default_embedder_roots_handler();
5852   if (Heap::AllocationTrackerForDebugging::IsNeeded()) {
5853     allocation_tracker_for_debugging_ =
5854         std::make_unique<Heap::AllocationTrackerForDebugging>(this);
5855   }
5856 
5857   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5858   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5859 
5860   mark_compact_collector()->SetUp();
5861   if (minor_mark_compact_collector_) {
5862     minor_mark_compact_collector_->SetUp();
5863   }
5864 
5865   if (new_space()) {
5866     scavenge_job_.reset(new ScavengeJob());
5867     scavenge_task_observer_.reset(new ScavengeTaskObserver(
5868         this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
5869     new_space()->AddAllocationObserver(scavenge_task_observer_.get());
5870   }
5871 
5872   SetGetExternallyAllocatedMemoryInBytesCallback(
5873       DefaultGetExternallyAllocatedMemoryInBytesCallback);
5874 
5875   if (FLAG_stress_marking > 0) {
5876     stress_marking_percentage_ = NextStressMarkingLimit();
5877     stress_marking_observer_ = new StressMarkingObserver(this);
5878     AddAllocationObserversToAllSpaces(stress_marking_observer_,
5879                                       stress_marking_observer_);
5880   }
5881   if (IsStressingScavenge()) {
5882     stress_scavenge_observer_ = new StressScavengeObserver(this);
5883     new_space()->AddAllocationObserver(stress_scavenge_observer_);
5884   }
5885 
5886   write_protect_code_memory_ = FLAG_write_protect_code_memory;
5887 
5888   if (isolate()->shared_isolate()) {
5889     Heap* shared_heap = isolate()->shared_isolate()->heap();
5890 
5891     shared_old_space_ = shared_heap->old_space();
5892     shared_old_allocator_.reset(
5893         new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
5894 
5895     if (shared_heap->map_space()) {
5896       shared_map_space_ = shared_heap->map_space();
5897       shared_map_allocator_.reset(
5898           new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
5899     }
5900   }
5901 
5902   main_thread_local_heap()->SetUpMainThread();
5903   heap_allocator_.Setup();
5904 }
5905 
InitializeHashSeed()5906 void Heap::InitializeHashSeed() {
5907   DCHECK(!deserialization_complete_);
5908   uint64_t new_hash_seed;
5909   if (FLAG_hash_seed == 0) {
5910     int64_t rnd = isolate()->random_number_generator()->NextInt64();
5911     new_hash_seed = static_cast<uint64_t>(rnd);
5912   } else {
5913     new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
5914   }
5915   ReadOnlyRoots(this).hash_seed().copy_in(
5916       0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
5917 }
5918 
5919 // static
InitializeOncePerProcess()5920 void Heap::InitializeOncePerProcess() {
5921   MemoryAllocator::InitializeOncePerProcess();
5922 }
5923 
PrintMaxMarkingLimitReached()5924 void Heap::PrintMaxMarkingLimitReached() {
5925   PrintF("\n### Maximum marking limit reached = %.02lf\n",
5926          max_marking_limit_reached_);
5927 }
5928 
PrintMaxNewSpaceSizeReached()5929 void Heap::PrintMaxNewSpaceSizeReached() {
5930   PrintF("\n### Maximum new space size reached = %.02lf\n",
5931          stress_scavenge_observer_->MaxNewSpaceSizeReached());
5932 }
5933 
NextStressMarkingLimit()5934 int Heap::NextStressMarkingLimit() {
5935   // Reuse Heap-global mutex as this getter is called from different threads on
5936   // allocation slow paths.
5937   base::MutexGuard guard(relocation_mutex());
5938   return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
5939 }
5940 
NotifyDeserializationComplete()5941 void Heap::NotifyDeserializationComplete() {
5942   PagedSpaceIterator spaces(this);
5943   for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
5944     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
5945 #ifdef DEBUG
5946     // All pages right after bootstrapping must be marked as never-evacuate.
5947     for (Page* p : *s) {
5948       DCHECK(p->NeverEvacuate());
5949     }
5950 #endif  // DEBUG
5951   }
5952 
5953   if (FLAG_stress_concurrent_allocation) {
5954     stress_concurrent_allocation_observer_.reset(
5955         new StressConcurrentAllocationObserver(this));
5956     AddAllocationObserversToAllSpaces(
5957         stress_concurrent_allocation_observer_.get(),
5958         stress_concurrent_allocation_observer_.get());
5959     need_to_remove_stress_concurrent_allocation_observer_ = true;
5960   }
5961 
5962   deserialization_complete_ = true;
5963 }
5964 
NotifyBootstrapComplete()5965 void Heap::NotifyBootstrapComplete() {
5966   // This function is invoked for each native context creation. We are
5967   // interested only in the first native context.
5968   if (old_generation_capacity_after_bootstrap_ == 0) {
5969     old_generation_capacity_after_bootstrap_ = OldGenerationCapacity();
5970   }
5971 }
5972 
NotifyOldGenerationExpansion(AllocationSpace space,MemoryChunk * chunk)5973 void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
5974                                         MemoryChunk* chunk) {
5975   // Pages created during bootstrapping may contain immortal immovable objects.
5976   if (!deserialization_complete()) {
5977     chunk->MarkNeverEvacuate();
5978   }
5979   if (space == CODE_SPACE || space == CODE_LO_SPACE) {
5980     isolate()->AddCodeMemoryChunk(chunk);
5981   }
5982   const size_t kMemoryReducerActivationThreshold = 1 * MB;
5983   if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
5984       OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
5985                                      kMemoryReducerActivationThreshold &&
5986       FLAG_memory_reducer_for_small_heaps) {
5987     MemoryReducer::Event event;
5988     event.type = MemoryReducer::kPossibleGarbage;
5989     event.time_ms = MonotonicallyIncreasingTimeInMs();
5990     memory_reducer()->NotifyPossibleGarbage(event);
5991   }
5992 }
5993 
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)5994 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5995   DCHECK_EQ(gc_state(), HeapState::NOT_IN_GC);
5996   // Setting a tracer is only supported when CppHeap is not used.
5997   DCHECK_IMPLIES(tracer, !cpp_heap_);
5998   local_embedder_heap_tracer()->SetRemoteTracer(tracer);
5999 }
6000 
SetEmbedderRootsHandler(EmbedderRootsHandler * handler)6001 void Heap::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
6002   embedder_roots_handler_ = handler;
6003 }
6004 
GetEmbedderRootsHandler() const6005 EmbedderRootsHandler* Heap::GetEmbedderRootsHandler() const {
6006   return embedder_roots_handler_;
6007 }
6008 
GetEmbedderHeapTracer() const6009 EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
6010   return local_embedder_heap_tracer()->remote_tracer();
6011 }
6012 
AttachCppHeap(v8::CppHeap * cpp_heap)6013 void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
6014   CppHeap::From(cpp_heap)->AttachIsolate(isolate());
6015   cpp_heap_ = cpp_heap;
6016   local_embedder_heap_tracer()->SetCppHeap(CppHeap::From(cpp_heap));
6017 }
6018 
DetachCppHeap()6019 void Heap::DetachCppHeap() {
6020   CppHeap::From(cpp_heap_)->DetachIsolate();
6021   cpp_heap_ = nullptr;
6022   local_embedder_heap_tracer()->SetCppHeap(nullptr);
6023 }
6024 
flags_for_embedder_tracer() const6025 EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
6026   if (is_current_gc_forced()) {
6027     return EmbedderHeapTracer::TraceFlags::kForced;
6028   } else if (ShouldReduceMemory()) {
6029     return EmbedderHeapTracer::TraceFlags::kReduceMemory;
6030   }
6031   return EmbedderHeapTracer::TraceFlags::kNoFlags;
6032 }
6033 
overriden_stack_state() const6034 const cppgc::EmbedderStackState* Heap::overriden_stack_state() const {
6035   const auto* cpp_heap = CppHeap::From(cpp_heap_);
6036   return cpp_heap ? cpp_heap->override_stack_state() : nullptr;
6037 }
6038 
RegisterExternallyReferencedObject(Address * location)6039 void Heap::RegisterExternallyReferencedObject(Address* location) {
6040   GlobalHandles::MarkTraced(location);
6041   Object object(*location);
6042   if (!object.IsHeapObject()) {
6043     // The embedder is not aware of whether numbers are materialized as heap
6044     // objects are just passed around as Smis.
6045     return;
6046   }
6047   HeapObject heap_object = HeapObject::cast(object);
6048   DCHECK(IsValidHeapObject(this, heap_object));
6049   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
6050     incremental_marking()->WhiteToGreyAndPush(heap_object);
6051   } else {
6052     DCHECK(mark_compact_collector()->in_use());
6053     mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
6054   }
6055 }
6056 
StartTearDown()6057 void Heap::StartTearDown() {
6058   // Finish any ongoing sweeping to avoid stray background tasks still accessing
6059   // the heap during teardown.
6060   CompleteSweepingFull();
6061 
6062   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
6063 
6064   SetGCState(TEAR_DOWN);
6065 
6066   // Background threads may allocate and block until GC is performed. However
6067   // this might never happen when the main thread tries to quit and doesn't
6068   // process the event queue anymore. Avoid this deadlock by allowing all
6069   // allocations after tear down was requested to make sure all background
6070   // threads finish.
6071   collection_barrier_->NotifyShutdownRequested();
6072 
6073   // Main thread isn't going to allocate anymore.
6074   main_thread_local_heap()->FreeLinearAllocationArea();
6075 
6076   FreeMainThreadSharedLinearAllocationAreas();
6077 
6078 #ifdef VERIFY_HEAP
6079   // {StartTearDown} is called fairly early during Isolate teardown, so it's
6080   // a good time to run heap verification (if requested), before starting to
6081   // tear down parts of the Isolate.
6082   if (FLAG_verify_heap) {
6083     AllowGarbageCollection allow_gc;
6084     IgnoreLocalGCRequests ignore_gc_requests(this);
6085     SafepointScope scope(this);
6086     Verify();
6087   }
6088 #endif
6089 }
6090 
TearDown()6091 void Heap::TearDown() {
6092   DCHECK_EQ(gc_state(), TEAR_DOWN);
6093 
6094   if (FLAG_concurrent_marking || FLAG_parallel_marking)
6095     concurrent_marking_->Pause();
6096 
6097   // It's too late for Heap::Verify() here, as parts of the Isolate are
6098   // already gone by the time this is called.
6099 
6100   UpdateMaximumCommitted();
6101 
6102   if (FLAG_fuzzer_gc_analysis) {
6103     if (FLAG_stress_marking > 0) {
6104       PrintMaxMarkingLimitReached();
6105     }
6106     if (IsStressingScavenge()) {
6107       PrintMaxNewSpaceSizeReached();
6108     }
6109   }
6110 
6111   if (new_space()) {
6112     new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
6113   }
6114 
6115   scavenge_task_observer_.reset();
6116   scavenge_job_.reset();
6117 
6118   if (need_to_remove_stress_concurrent_allocation_observer_) {
6119     RemoveAllocationObserversFromAllSpaces(
6120         stress_concurrent_allocation_observer_.get(),
6121         stress_concurrent_allocation_observer_.get());
6122   }
6123   stress_concurrent_allocation_observer_.reset();
6124 
6125   if (FLAG_stress_marking > 0) {
6126     RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
6127                                            stress_marking_observer_);
6128     delete stress_marking_observer_;
6129     stress_marking_observer_ = nullptr;
6130   }
6131   if (IsStressingScavenge()) {
6132     new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
6133     delete stress_scavenge_observer_;
6134     stress_scavenge_observer_ = nullptr;
6135   }
6136 
6137   if (mark_compact_collector_) {
6138     mark_compact_collector_->TearDown();
6139     mark_compact_collector_.reset();
6140   }
6141 
6142   if (minor_mark_compact_collector_) {
6143     minor_mark_compact_collector_->TearDown();
6144     minor_mark_compact_collector_.reset();
6145   }
6146 
6147   scavenger_collector_.reset();
6148   array_buffer_sweeper_.reset();
6149   incremental_marking_.reset();
6150   concurrent_marking_.reset();
6151 
6152   gc_idle_time_handler_.reset();
6153   memory_measurement_.reset();
6154   allocation_tracker_for_debugging_.reset();
6155 
6156   if (memory_reducer_ != nullptr) {
6157     memory_reducer_->TearDown();
6158     memory_reducer_.reset();
6159   }
6160 
6161   live_object_stats_.reset();
6162   dead_object_stats_.reset();
6163 
6164   local_embedder_heap_tracer_.reset();
6165   embedder_roots_handler_ = nullptr;
6166 
6167   if (cpp_heap_) {
6168     CppHeap::From(cpp_heap_)->DetachIsolate();
6169     cpp_heap_ = nullptr;
6170   }
6171 
6172   external_string_table_.TearDown();
6173 
6174   tracer_.reset();
6175 
6176   allocation_sites_to_pretenure_.reset();
6177 
6178   shared_old_space_ = nullptr;
6179   shared_old_allocator_.reset();
6180 
6181   shared_map_space_ = nullptr;
6182   shared_map_allocator_.reset();
6183 
6184   for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
6185     delete space_[i];
6186     space_[i] = nullptr;
6187   }
6188 
6189   isolate()->read_only_heap()->OnHeapTearDown(this);
6190   read_only_space_ = nullptr;
6191 
6192   memory_allocator()->TearDown();
6193 
6194   StrongRootsEntry* next = nullptr;
6195   for (StrongRootsEntry* current = strong_roots_head_; current;
6196        current = next) {
6197     next = current->next;
6198     delete current;
6199   }
6200   strong_roots_head_ = nullptr;
6201 
6202   memory_allocator_.reset();
6203 }
6204 
AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)6205 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
6206                                  GCType gc_type, void* data) {
6207   DCHECK_NOT_NULL(callback);
6208   DCHECK(gc_prologue_callbacks_.end() ==
6209          std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
6210                    GCCallbackTuple(callback, gc_type, data)));
6211   gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
6212 }
6213 
RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,void * data)6214 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
6215                                     void* data) {
6216   DCHECK_NOT_NULL(callback);
6217   for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
6218     if (gc_prologue_callbacks_[i].callback == callback &&
6219         gc_prologue_callbacks_[i].data == data) {
6220       gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
6221       gc_prologue_callbacks_.pop_back();
6222       return;
6223     }
6224   }
6225   UNREACHABLE();
6226 }
6227 
AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)6228 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
6229                                  GCType gc_type, void* data) {
6230   DCHECK_NOT_NULL(callback);
6231   DCHECK(gc_epilogue_callbacks_.end() ==
6232          std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
6233                    GCCallbackTuple(callback, gc_type, data)));
6234   gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
6235 }
6236 
RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,void * data)6237 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
6238                                     void* data) {
6239   DCHECK_NOT_NULL(callback);
6240   for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
6241     if (gc_epilogue_callbacks_[i].callback == callback &&
6242         gc_epilogue_callbacks_[i].data == data) {
6243       gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
6244       gc_epilogue_callbacks_.pop_back();
6245       return;
6246     }
6247   }
6248   UNREACHABLE();
6249 }
6250 
6251 namespace {
CompactWeakArrayList(Heap * heap,Handle<WeakArrayList> array,AllocationType allocation)6252 Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
6253                                            Handle<WeakArrayList> array,
6254                                            AllocationType allocation) {
6255   if (array->length() == 0) {
6256     return array;
6257   }
6258   int new_length = array->CountLiveWeakReferences();
6259   if (new_length == array->length()) {
6260     return array;
6261   }
6262 
6263   Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
6264       heap->isolate(),
6265       handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
6266       new_length, allocation);
6267   // Allocation might have caused GC and turned some of the elements into
6268   // cleared weak heap objects. Count the number of live references again and
6269   // fill in the new array.
6270   int copy_to = 0;
6271   for (int i = 0; i < array->length(); i++) {
6272     MaybeObject element = array->Get(i);
6273     if (element->IsCleared()) continue;
6274     new_array->Set(copy_to++, element);
6275   }
6276   new_array->set_length(copy_to);
6277   return new_array;
6278 }
6279 
6280 }  // anonymous namespace
6281 
CompactWeakArrayLists()6282 void Heap::CompactWeakArrayLists() {
6283   // Find known PrototypeUsers and compact them.
6284   std::vector<Handle<PrototypeInfo>> prototype_infos;
6285   {
6286     HeapObjectIterator iterator(this);
6287     for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
6288       if (o.IsPrototypeInfo()) {
6289         PrototypeInfo prototype_info = PrototypeInfo::cast(o);
6290         if (prototype_info.prototype_users().IsWeakArrayList()) {
6291           prototype_infos.emplace_back(handle(prototype_info, isolate()));
6292         }
6293       }
6294     }
6295   }
6296   for (auto& prototype_info : prototype_infos) {
6297     Handle<WeakArrayList> array(
6298         WeakArrayList::cast(prototype_info->prototype_users()), isolate());
6299     DCHECK(InOldSpace(*array) ||
6300            *array == ReadOnlyRoots(this).empty_weak_array_list());
6301     WeakArrayList new_array = PrototypeUsers::Compact(
6302         array, this, JSObject::PrototypeRegistryCompactionCallback,
6303         AllocationType::kOld);
6304     prototype_info->set_prototype_users(new_array);
6305   }
6306 
6307   // Find known WeakArrayLists and compact them.
6308   Handle<WeakArrayList> scripts(script_list(), isolate());
6309   DCHECK_IMPLIES(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL, InOldSpace(*scripts));
6310   scripts = CompactWeakArrayList(this, scripts, AllocationType::kOld);
6311   set_script_list(*scripts);
6312 }
6313 
AddRetainedMap(Handle<NativeContext> context,Handle<Map> map)6314 void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
6315   if (map->is_in_retained_map_list() || map->InSharedWritableHeap()) {
6316     return;
6317   }
6318 
6319   Handle<WeakArrayList> array(WeakArrayList::cast(context->retained_maps()),
6320                               isolate());
6321   if (array->IsFull()) {
6322     CompactRetainedMaps(*array);
6323   }
6324   array =
6325       WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
6326   array = WeakArrayList::AddToEnd(
6327       isolate(), array,
6328       MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
6329   if (*array != context->retained_maps()) {
6330     context->set_retained_maps(*array);
6331   }
6332   map->set_is_in_retained_map_list(true);
6333 }
6334 
CompactRetainedMaps(WeakArrayList retained_maps)6335 void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
6336   int length = retained_maps.length();
6337   int new_length = 0;
6338   // This loop compacts the array by removing cleared weak cells.
6339   for (int i = 0; i < length; i += 2) {
6340     MaybeObject maybe_object = retained_maps.Get(i);
6341     if (maybe_object->IsCleared()) {
6342       continue;
6343     }
6344 
6345     DCHECK(maybe_object->IsWeak());
6346 
6347     MaybeObject age = retained_maps.Get(i + 1);
6348     DCHECK(age->IsSmi());
6349     if (i != new_length) {
6350       retained_maps.Set(new_length, maybe_object);
6351       retained_maps.Set(new_length + 1, age);
6352     }
6353     new_length += 2;
6354   }
6355   HeapObject undefined = ReadOnlyRoots(this).undefined_value();
6356   for (int i = new_length; i < length; i++) {
6357     retained_maps.Set(i, HeapObjectReference::Strong(undefined));
6358   }
6359   if (new_length != length) retained_maps.set_length(new_length);
6360 }
6361 
FatalProcessOutOfMemory(const char * location)6362 void Heap::FatalProcessOutOfMemory(const char* location) {
6363   v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
6364 }
6365 
6366 #ifdef DEBUG
6367 
6368 class PrintHandleVisitor : public RootVisitor {
6369  public:
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6370   void VisitRootPointers(Root root, const char* description,
6371                          FullObjectSlot start, FullObjectSlot end) override {
6372     for (FullObjectSlot p = start; p < end; ++p)
6373       PrintF("  handle %p to %p\n", p.ToVoidPtr(),
6374              reinterpret_cast<void*>((*p).ptr()));
6375   }
6376 };
6377 
PrintHandles()6378 void Heap::PrintHandles() {
6379   PrintF("Handles:\n");
6380   PrintHandleVisitor v;
6381   isolate_->handle_scope_implementer()->Iterate(&v);
6382 }
6383 
6384 #endif
6385 
6386 class CheckHandleCountVisitor : public RootVisitor {
6387  public:
CheckHandleCountVisitor()6388   CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()6389   ~CheckHandleCountVisitor() override {
6390     CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
6391   }
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6392   void VisitRootPointers(Root root, const char* description,
6393                          FullObjectSlot start, FullObjectSlot end) override {
6394     handle_count_ += end - start;
6395   }
6396 
6397  private:
6398   ptrdiff_t handle_count_;
6399 };
6400 
CheckHandleCount()6401 void Heap::CheckHandleCount() {
6402   CheckHandleCountVisitor v;
6403   isolate_->handle_scope_implementer()->Iterate(&v);
6404 }
6405 
ClearRecordedSlot(HeapObject object,ObjectSlot slot)6406 void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
6407 #ifndef V8_DISABLE_WRITE_BARRIERS
6408   DCHECK(!IsLargeObject(object));
6409   Page* page = Page::FromAddress(slot.address());
6410   if (!page->InYoungGeneration()) {
6411     DCHECK_EQ(page->owner_identity(), OLD_SPACE);
6412 
6413     if (!page->SweepingDone()) {
6414       RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
6415     }
6416   }
6417 #endif
6418 }
6419 
6420 // static
InsertIntoRememberedSetFromCode(MemoryChunk * chunk,Address slot)6421 int Heap::InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot) {
6422   RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
6423   return 0;
6424 }
6425 
6426 #ifdef DEBUG
VerifyClearedSlot(HeapObject object,ObjectSlot slot)6427 void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
6428 #ifndef V8_DISABLE_WRITE_BARRIERS
6429   DCHECK(!IsLargeObject(object));
6430   if (InYoungGeneration(object)) return;
6431   Page* page = Page::FromAddress(slot.address());
6432   DCHECK_EQ(page->owner_identity(), OLD_SPACE);
6433   // Slots are filtered with invalidated slots.
6434   CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()),
6435                 page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
6436   CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
6437                 page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
6438 #endif
6439 }
6440 
VerifySlotRangeHasNoRecordedSlots(Address start,Address end)6441 void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
6442 #ifndef V8_DISABLE_WRITE_BARRIERS
6443   Page* page = Page::FromAddress(start);
6444   DCHECK(!page->InYoungGeneration());
6445   RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
6446 #endif
6447 }
6448 #endif
6449 
ClearRecordedSlotRange(Address start,Address end)6450 void Heap::ClearRecordedSlotRange(Address start, Address end) {
6451 #ifndef V8_DISABLE_WRITE_BARRIERS
6452   Page* page = Page::FromAddress(start);
6453   DCHECK(!page->IsLargePage());
6454   if (!page->InYoungGeneration()) {
6455     DCHECK_EQ(page->owner_identity(), OLD_SPACE);
6456 
6457     if (!page->SweepingDone()) {
6458       RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
6459                                              SlotSet::KEEP_EMPTY_BUCKETS);
6460     }
6461   }
6462 #endif
6463 }
6464 
Next()6465 PagedSpace* PagedSpaceIterator::Next() {
6466   DCHECK_GE(counter_, FIRST_GROWABLE_PAGED_SPACE);
6467   if (counter_ > LAST_GROWABLE_PAGED_SPACE) return nullptr;
6468   return heap_->paged_space(counter_++);
6469 }
6470 
SpaceIterator(Heap * heap)6471 SpaceIterator::SpaceIterator(Heap* heap)
6472     : heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
6473 
6474 SpaceIterator::~SpaceIterator() = default;
6475 
HasNext()6476 bool SpaceIterator::HasNext() {
6477   while (current_space_ <= LAST_MUTABLE_SPACE) {
6478     Space* space = heap_->space(current_space_);
6479     if (space) return true;
6480     ++current_space_;
6481   }
6482 
6483   // No more spaces left.
6484   return false;
6485 }
6486 
Next()6487 Space* SpaceIterator::Next() {
6488   DCHECK_LE(current_space_, LAST_MUTABLE_SPACE);
6489   Space* space = heap_->space(current_space_++);
6490   DCHECK_NOT_NULL(space);
6491   return space;
6492 }
6493 
6494 class HeapObjectsFilter {
6495  public:
6496   virtual ~HeapObjectsFilter() = default;
6497   virtual bool SkipObject(HeapObject object) = 0;
6498 };
6499 
6500 class UnreachableObjectsFilter : public HeapObjectsFilter {
6501  public:
UnreachableObjectsFilter(Heap * heap)6502   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6503     MarkReachableObjects();
6504   }
6505 
~UnreachableObjectsFilter()6506   ~UnreachableObjectsFilter() override {
6507     for (auto it : reachable_) {
6508       delete it.second;
6509       it.second = nullptr;
6510     }
6511   }
6512 
SkipObject(HeapObject object)6513   bool SkipObject(HeapObject object) override {
6514     if (object.IsFreeSpaceOrFiller()) return true;
6515     Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
6516     if (reachable_.count(chunk) == 0) return true;
6517     return reachable_[chunk]->count(object) == 0;
6518   }
6519 
6520  private:
MarkAsReachable(HeapObject object)6521   bool MarkAsReachable(HeapObject object) {
6522     Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
6523     if (reachable_.count(chunk) == 0) {
6524       reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
6525     }
6526     if (reachable_[chunk]->count(object)) return false;
6527     reachable_[chunk]->insert(object);
6528     return true;
6529   }
6530 
6531   static constexpr intptr_t kLogicalChunkAlignment =
6532       (static_cast<uintptr_t>(1) << kPageSizeBits);
6533 
6534   static constexpr intptr_t kLogicalChunkAlignmentMask =
6535       kLogicalChunkAlignment - 1;
6536 
6537   class MarkingVisitor : public ObjectVisitorWithCageBases, public RootVisitor {
6538    public:
MarkingVisitor(UnreachableObjectsFilter * filter)6539     explicit MarkingVisitor(UnreachableObjectsFilter* filter)
6540         : ObjectVisitorWithCageBases(filter->heap_), filter_(filter) {}
6541 
VisitMapPointer(HeapObject object)6542     void VisitMapPointer(HeapObject object) override {
6543       MarkHeapObject(Map::unchecked_cast(object.map(cage_base())));
6544     }
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)6545     void VisitPointers(HeapObject host, ObjectSlot start,
6546                        ObjectSlot end) override {
6547       MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
6548     }
6549 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)6550     void VisitPointers(HeapObject host, MaybeObjectSlot start,
6551                        MaybeObjectSlot end) final {
6552       MarkPointers(start, end);
6553     }
6554 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)6555     void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
6556       CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
6557       HeapObject code = HeapObject::unchecked_cast(slot.load(code_cage_base()));
6558       MarkHeapObject(code);
6559     }
6560 
VisitCodeTarget(Code host,RelocInfo * rinfo)6561     void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
6562       Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
6563       MarkHeapObject(target);
6564     }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)6565     void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
6566       MarkHeapObject(rinfo->target_object(cage_base()));
6567     }
6568 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6569     void VisitRootPointers(Root root, const char* description,
6570                            FullObjectSlot start, FullObjectSlot end) override {
6571       MarkPointersImpl(start, end);
6572     }
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)6573     void VisitRootPointers(Root root, const char* description,
6574                            OffHeapObjectSlot start,
6575                            OffHeapObjectSlot end) override {
6576       MarkPointersImpl(start, end);
6577     }
6578 
TransitiveClosure()6579     void TransitiveClosure() {
6580       while (!marking_stack_.empty()) {
6581         HeapObject obj = marking_stack_.back();
6582         marking_stack_.pop_back();
6583         obj.Iterate(cage_base(), this);
6584       }
6585     }
6586 
6587    private:
MarkPointers(MaybeObjectSlot start,MaybeObjectSlot end)6588     void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
6589       MarkPointersImpl(start, end);
6590     }
6591 
6592     template <typename TSlot>
MarkPointersImpl(TSlot start,TSlot end)6593     V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
6594       // Treat weak references as strong.
6595       for (TSlot p = start; p < end; ++p) {
6596         typename TSlot::TObject object = p.load(cage_base());
6597         HeapObject heap_object;
6598         if (object.GetHeapObject(&heap_object)) {
6599           MarkHeapObject(heap_object);
6600         }
6601       }
6602     }
6603 
MarkHeapObject(HeapObject heap_object)6604     V8_INLINE void MarkHeapObject(HeapObject heap_object) {
6605       if (filter_->MarkAsReachable(heap_object)) {
6606         marking_stack_.push_back(heap_object);
6607       }
6608     }
6609 
6610     UnreachableObjectsFilter* filter_;
6611     std::vector<HeapObject> marking_stack_;
6612   };
6613 
6614   friend class MarkingVisitor;
6615 
MarkReachableObjects()6616   void MarkReachableObjects() {
6617     MarkingVisitor visitor(this);
6618     heap_->IterateRoots(&visitor, {});
6619     visitor.TransitiveClosure();
6620   }
6621 
6622   Heap* heap_;
6623   DISALLOW_GARBAGE_COLLECTION(no_gc_)
6624   std::unordered_map<Address, std::unordered_set<HeapObject, Object::Hasher>*>
6625       reachable_;
6626 };
6627 
HeapObjectIterator(Heap * heap,HeapObjectIterator::HeapObjectsFiltering filtering)6628 HeapObjectIterator::HeapObjectIterator(
6629     Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
6630     : heap_(heap),
6631       safepoint_scope_(std::make_unique<SafepointScope>(heap)),
6632       filtering_(filtering),
6633       filter_(nullptr),
6634       space_iterator_(nullptr),
6635       object_iterator_(nullptr) {
6636   heap_->MakeHeapIterable();
6637   // Start the iteration.
6638   space_iterator_ = new SpaceIterator(heap_);
6639   switch (filtering_) {
6640     case kFilterUnreachable:
6641       filter_ = new UnreachableObjectsFilter(heap_);
6642       break;
6643     default:
6644       break;
6645   }
6646   // By not calling |space_iterator_->HasNext()|, we assume that the old
6647   // space is first returned and that it has been set up.
6648   object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
6649   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
6650 }
6651 
~HeapObjectIterator()6652 HeapObjectIterator::~HeapObjectIterator() {
6653 #ifdef DEBUG
6654   // Assert that in filtering mode we have iterated through all
6655   // objects. Otherwise, heap will be left in an inconsistent state.
6656   if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && filtering_ != kNoFiltering) {
6657     DCHECK_NULL(object_iterator_);
6658   }
6659 #endif
6660   delete space_iterator_;
6661   delete filter_;
6662 }
6663 
Next()6664 HeapObject HeapObjectIterator::Next() {
6665   if (filter_ == nullptr) return NextObject();
6666 
6667   HeapObject obj = NextObject();
6668   while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
6669   return obj;
6670 }
6671 
NextObject()6672 HeapObject HeapObjectIterator::NextObject() {
6673   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return heap_->tp_heap_->NextObject();
6674   // No iterator means we are done.
6675   if (object_iterator_.get() == nullptr) return HeapObject();
6676 
6677   HeapObject obj = object_iterator_.get()->Next();
6678   if (!obj.is_null()) {
6679     // If the current iterator has more objects we are fine.
6680     return obj;
6681   } else {
6682     // Go though the spaces looking for one that has objects.
6683     while (space_iterator_->HasNext()) {
6684       object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
6685       obj = object_iterator_.get()->Next();
6686       if (!obj.is_null()) {
6687         return obj;
6688       }
6689     }
6690   }
6691   // Done with the last space.
6692   object_iterator_.reset(nullptr);
6693   return HeapObject();
6694 }
6695 
UpdateTotalGCTime(double duration)6696 void Heap::UpdateTotalGCTime(double duration) {
6697   if (FLAG_trace_gc_verbose) {
6698     total_gc_time_ms_ += duration;
6699   }
6700 }
6701 
CleanUpYoung()6702 void Heap::ExternalStringTable::CleanUpYoung() {
6703   int last = 0;
6704   Isolate* isolate = heap_->isolate();
6705   for (size_t i = 0; i < young_strings_.size(); ++i) {
6706     Object o = young_strings_[i];
6707     if (o.IsTheHole(isolate)) {
6708       continue;
6709     }
6710     // The real external string is already in one of these vectors and was or
6711     // will be processed. Re-processing it will add a duplicate to the vector.
6712     if (o.IsThinString()) continue;
6713     DCHECK(o.IsExternalString());
6714     if (InYoungGeneration(o)) {
6715       young_strings_[last++] = o;
6716     } else {
6717       old_strings_.push_back(o);
6718     }
6719   }
6720   young_strings_.resize(last);
6721 }
6722 
CleanUpAll()6723 void Heap::ExternalStringTable::CleanUpAll() {
6724   CleanUpYoung();
6725   int last = 0;
6726   Isolate* isolate = heap_->isolate();
6727   for (size_t i = 0; i < old_strings_.size(); ++i) {
6728     Object o = old_strings_[i];
6729     if (o.IsTheHole(isolate)) {
6730       continue;
6731     }
6732     // The real external string is already in one of these vectors and was or
6733     // will be processed. Re-processing it will add a duplicate to the vector.
6734     if (o.IsThinString()) continue;
6735     DCHECK(o.IsExternalString());
6736     DCHECK(!InYoungGeneration(o));
6737     old_strings_[last++] = o;
6738   }
6739   old_strings_.resize(last);
6740 #ifdef VERIFY_HEAP
6741   if (FLAG_verify_heap && !FLAG_enable_third_party_heap) {
6742     Verify();
6743   }
6744 #endif
6745 }
6746 
TearDown()6747 void Heap::ExternalStringTable::TearDown() {
6748   for (size_t i = 0; i < young_strings_.size(); ++i) {
6749     Object o = young_strings_[i];
6750     // Dont finalize thin strings.
6751     if (o.IsThinString()) continue;
6752     heap_->FinalizeExternalString(ExternalString::cast(o));
6753   }
6754   young_strings_.clear();
6755   for (size_t i = 0; i < old_strings_.size(); ++i) {
6756     Object o = old_strings_[i];
6757     // Dont finalize thin strings.
6758     if (o.IsThinString()) continue;
6759     heap_->FinalizeExternalString(ExternalString::cast(o));
6760   }
6761   old_strings_.clear();
6762 }
6763 
RememberUnmappedPage(Address page,bool compacted)6764 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6765   // Tag the page pointer to make it findable in the dump file.
6766   if (compacted) {
6767     page ^= 0xC1EAD & (Page::kPageSize - 1);  // Cleared.
6768   } else {
6769     page ^= 0x1D1ED & (Page::kPageSize - 1);  // I died.
6770   }
6771   remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
6772   remembered_unmapped_pages_index_++;
6773   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6774 }
6775 
YoungArrayBufferBytes()6776 size_t Heap::YoungArrayBufferBytes() {
6777   return array_buffer_sweeper()->YoungBytes();
6778 }
6779 
OldArrayBufferBytes()6780 size_t Heap::OldArrayBufferBytes() {
6781   return array_buffer_sweeper()->OldBytes();
6782 }
6783 
RegisterStrongRoots(const char * label,FullObjectSlot start,FullObjectSlot end)6784 StrongRootsEntry* Heap::RegisterStrongRoots(const char* label,
6785                                             FullObjectSlot start,
6786                                             FullObjectSlot end) {
6787   base::MutexGuard guard(&strong_roots_mutex_);
6788 
6789   StrongRootsEntry* entry = new StrongRootsEntry(label);
6790   entry->start = start;
6791   entry->end = end;
6792   entry->prev = nullptr;
6793   entry->next = strong_roots_head_;
6794 
6795   if (strong_roots_head_) {
6796     DCHECK_NULL(strong_roots_head_->prev);
6797     strong_roots_head_->prev = entry;
6798   }
6799   strong_roots_head_ = entry;
6800 
6801   return entry;
6802 }
6803 
UpdateStrongRoots(StrongRootsEntry * entry,FullObjectSlot start,FullObjectSlot end)6804 void Heap::UpdateStrongRoots(StrongRootsEntry* entry, FullObjectSlot start,
6805                              FullObjectSlot end) {
6806   entry->start = start;
6807   entry->end = end;
6808 }
6809 
UnregisterStrongRoots(StrongRootsEntry * entry)6810 void Heap::UnregisterStrongRoots(StrongRootsEntry* entry) {
6811   base::MutexGuard guard(&strong_roots_mutex_);
6812 
6813   StrongRootsEntry* prev = entry->prev;
6814   StrongRootsEntry* next = entry->next;
6815 
6816   if (prev) prev->next = next;
6817   if (next) next->prev = prev;
6818 
6819   if (strong_roots_head_ == entry) {
6820     DCHECK_NULL(prev);
6821     strong_roots_head_ = next;
6822   }
6823 
6824   delete entry;
6825 }
6826 
SetBuiltinsConstantsTable(FixedArray cache)6827 void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
6828   set_builtins_constants_table(cache);
6829 }
6830 
SetDetachedContexts(WeakArrayList detached_contexts)6831 void Heap::SetDetachedContexts(WeakArrayList detached_contexts) {
6832   set_detached_contexts(detached_contexts);
6833 }
6834 
SetInterpreterEntryTrampolineForProfiling(Code code)6835 void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
6836   DCHECK_EQ(Builtin::kInterpreterEntryTrampoline, code.builtin_id());
6837   set_interpreter_entry_trampoline_for_profiling(code);
6838 }
6839 
PostFinalizationRegistryCleanupTaskIfNeeded()6840 void Heap::PostFinalizationRegistryCleanupTaskIfNeeded() {
6841   // Only one cleanup task is posted at a time.
6842   if (!HasDirtyJSFinalizationRegistries() ||
6843       is_finalization_registry_cleanup_task_posted_) {
6844     return;
6845   }
6846   auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
6847       reinterpret_cast<v8::Isolate*>(isolate()));
6848   auto task = std::make_unique<FinalizationRegistryCleanupTask>(this);
6849   taskrunner->PostNonNestableTask(std::move(task));
6850   is_finalization_registry_cleanup_task_posted_ = true;
6851 }
6852 
EnqueueDirtyJSFinalizationRegistry(JSFinalizationRegistry finalization_registry,std::function<void (HeapObject object,ObjectSlot slot,Object target)> gc_notify_updated_slot)6853 void Heap::EnqueueDirtyJSFinalizationRegistry(
6854     JSFinalizationRegistry finalization_registry,
6855     std::function<void(HeapObject object, ObjectSlot slot, Object target)>
6856         gc_notify_updated_slot) {
6857   // Add a FinalizationRegistry to the tail of the dirty list.
6858   DCHECK(!HasDirtyJSFinalizationRegistries() ||
6859          dirty_js_finalization_registries_list().IsJSFinalizationRegistry());
6860   DCHECK(finalization_registry.next_dirty().IsUndefined(isolate()));
6861   DCHECK(!finalization_registry.scheduled_for_cleanup());
6862   finalization_registry.set_scheduled_for_cleanup(true);
6863   if (dirty_js_finalization_registries_list_tail().IsUndefined(isolate())) {
6864     DCHECK(dirty_js_finalization_registries_list().IsUndefined(isolate()));
6865     set_dirty_js_finalization_registries_list(finalization_registry);
6866     // dirty_js_finalization_registries_list_ is rescanned by
6867     // ProcessWeakListRoots.
6868   } else {
6869     JSFinalizationRegistry tail = JSFinalizationRegistry::cast(
6870         dirty_js_finalization_registries_list_tail());
6871     tail.set_next_dirty(finalization_registry);
6872     gc_notify_updated_slot(
6873         tail, tail.RawField(JSFinalizationRegistry::kNextDirtyOffset),
6874         finalization_registry);
6875   }
6876   set_dirty_js_finalization_registries_list_tail(finalization_registry);
6877   // dirty_js_finalization_registries_list_tail_ is rescanned by
6878   // ProcessWeakListRoots.
6879 }
6880 
DequeueDirtyJSFinalizationRegistry()6881 MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
6882   // Take a FinalizationRegistry from the head of the dirty list for fairness.
6883   if (HasDirtyJSFinalizationRegistries()) {
6884     Handle<JSFinalizationRegistry> head(
6885         JSFinalizationRegistry::cast(dirty_js_finalization_registries_list()),
6886         isolate());
6887     set_dirty_js_finalization_registries_list(head->next_dirty());
6888     head->set_next_dirty(ReadOnlyRoots(this).undefined_value());
6889     if (*head == dirty_js_finalization_registries_list_tail()) {
6890       set_dirty_js_finalization_registries_list_tail(
6891           ReadOnlyRoots(this).undefined_value());
6892     }
6893     return head;
6894   }
6895   return {};
6896 }
6897 
RemoveDirtyFinalizationRegistriesOnContext(NativeContext context)6898 void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
6899   DisallowGarbageCollection no_gc;
6900 
6901   Isolate* isolate = this->isolate();
6902   Object prev = ReadOnlyRoots(isolate).undefined_value();
6903   Object current = dirty_js_finalization_registries_list();
6904   while (!current.IsUndefined(isolate)) {
6905     JSFinalizationRegistry finalization_registry =
6906         JSFinalizationRegistry::cast(current);
6907     if (finalization_registry.native_context() == context) {
6908       if (prev.IsUndefined(isolate)) {
6909         set_dirty_js_finalization_registries_list(
6910             finalization_registry.next_dirty());
6911       } else {
6912         JSFinalizationRegistry::cast(prev).set_next_dirty(
6913             finalization_registry.next_dirty());
6914       }
6915       finalization_registry.set_scheduled_for_cleanup(false);
6916       current = finalization_registry.next_dirty();
6917       finalization_registry.set_next_dirty(
6918           ReadOnlyRoots(isolate).undefined_value());
6919     } else {
6920       prev = current;
6921       current = finalization_registry.next_dirty();
6922     }
6923   }
6924   set_dirty_js_finalization_registries_list_tail(prev);
6925 }
6926 
KeepDuringJob(Handle<HeapObject> target)6927 void Heap::KeepDuringJob(Handle<HeapObject> target) {
6928   DCHECK(weak_refs_keep_during_job().IsUndefined() ||
6929          weak_refs_keep_during_job().IsOrderedHashSet());
6930   Handle<OrderedHashSet> table;
6931   if (weak_refs_keep_during_job().IsUndefined(isolate())) {
6932     table = isolate()->factory()->NewOrderedHashSet();
6933   } else {
6934     table =
6935         handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
6936   }
6937   table = OrderedHashSet::Add(isolate(), table, target).ToHandleChecked();
6938   set_weak_refs_keep_during_job(*table);
6939 }
6940 
ClearKeptObjects()6941 void Heap::ClearKeptObjects() {
6942   set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
6943 }
6944 
NumberOfTrackedHeapObjectTypes()6945 size_t Heap::NumberOfTrackedHeapObjectTypes() {
6946   return ObjectStats::OBJECT_STATS_COUNT;
6947 }
6948 
ObjectCountAtLastGC(size_t index)6949 size_t Heap::ObjectCountAtLastGC(size_t index) {
6950   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6951     return 0;
6952   return live_object_stats_->object_count_last_gc(index);
6953 }
6954 
ObjectSizeAtLastGC(size_t index)6955 size_t Heap::ObjectSizeAtLastGC(size_t index) {
6956   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6957     return 0;
6958   return live_object_stats_->object_size_last_gc(index);
6959 }
6960 
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)6961 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6962                              const char** object_sub_type) {
6963   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6964 
6965   switch (static_cast<int>(index)) {
6966 #define COMPARE_AND_RETURN_NAME(name) \
6967   case name:                          \
6968     *object_type = #name;             \
6969     *object_sub_type = "";            \
6970     return true;
6971     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6972 #undef COMPARE_AND_RETURN_NAME
6973 
6974 #define COMPARE_AND_RETURN_NAME(name)                       \
6975   case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
6976     *object_type = #name;                                   \
6977     *object_sub_type = "";                                  \
6978     return true;
6979     VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6980 #undef COMPARE_AND_RETURN_NAME
6981   }
6982   return false;
6983 }
6984 
NumberOfNativeContexts()6985 size_t Heap::NumberOfNativeContexts() {
6986   int result = 0;
6987   Object context = native_contexts_list();
6988   while (!context.IsUndefined(isolate())) {
6989     ++result;
6990     Context native_context = Context::cast(context);
6991     context = native_context.next_context_link();
6992   }
6993   return result;
6994 }
6995 
FindAllNativeContexts()6996 std::vector<Handle<NativeContext>> Heap::FindAllNativeContexts() {
6997   std::vector<Handle<NativeContext>> result;
6998   Object context = native_contexts_list();
6999   while (!context.IsUndefined(isolate())) {
7000     NativeContext native_context = NativeContext::cast(context);
7001     result.push_back(handle(native_context, isolate()));
7002     context = native_context.next_context_link();
7003   }
7004   return result;
7005 }
7006 
FindAllRetainedMaps()7007 std::vector<WeakArrayList> Heap::FindAllRetainedMaps() {
7008   std::vector<WeakArrayList> result;
7009   Object context = native_contexts_list();
7010   while (!context.IsUndefined(isolate())) {
7011     NativeContext native_context = NativeContext::cast(context);
7012     result.push_back(WeakArrayList::cast(native_context.retained_maps()));
7013     context = native_context.next_context_link();
7014   }
7015   return result;
7016 }
7017 
NumberOfDetachedContexts()7018 size_t Heap::NumberOfDetachedContexts() {
7019   // The detached_contexts() array has two entries per detached context.
7020   return detached_contexts().length() / 2;
7021 }
7022 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)7023 void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
7024                                           ObjectSlot end) {
7025   VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
7026 }
7027 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)7028 void VerifyPointersVisitor::VisitPointers(HeapObject host,
7029                                           MaybeObjectSlot start,
7030                                           MaybeObjectSlot end) {
7031   VerifyPointers(host, start, end);
7032 }
7033 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)7034 void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
7035                                              CodeObjectSlot slot) {
7036   CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
7037   Object maybe_code = slot.load(code_cage_base());
7038   HeapObject code;
7039   // The slot might contain smi during CodeDataContainer creation.
7040   if (maybe_code.GetHeapObject(&code)) {
7041     VerifyCodeObjectImpl(code);
7042   } else {
7043     CHECK(maybe_code.IsSmi());
7044   }
7045 }
7046 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)7047 void VerifyPointersVisitor::VisitRootPointers(Root root,
7048                                               const char* description,
7049                                               FullObjectSlot start,
7050                                               FullObjectSlot end) {
7051   VerifyPointersImpl(start, end);
7052 }
7053 
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)7054 void VerifyPointersVisitor::VisitRootPointers(Root root,
7055                                               const char* description,
7056                                               OffHeapObjectSlot start,
7057                                               OffHeapObjectSlot end) {
7058   VerifyPointersImpl(start, end);
7059 }
7060 
VerifyHeapObjectImpl(HeapObject heap_object)7061 void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
7062   CHECK(IsValidHeapObject(heap_, heap_object));
7063   CHECK(heap_object.map(cage_base()).IsMap());
7064 }
7065 
VerifyCodeObjectImpl(HeapObject heap_object)7066 void VerifyPointersVisitor::VerifyCodeObjectImpl(HeapObject heap_object) {
7067   CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
7068   CHECK(IsValidCodeObject(heap_, heap_object));
7069   CHECK(heap_object.map(cage_base()).IsMap());
7070   CHECK(heap_object.map(cage_base()).instance_type() == CODE_TYPE);
7071 }
7072 
7073 template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)7074 void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
7075   for (TSlot slot = start; slot < end; ++slot) {
7076     typename TSlot::TObject object = slot.load(cage_base());
7077     HeapObject heap_object;
7078     if (object.GetHeapObject(&heap_object)) {
7079       VerifyHeapObjectImpl(heap_object);
7080     } else {
7081       CHECK(object.IsSmi() || object.IsCleared() ||
7082             MapWord::IsPacked(object.ptr()));
7083     }
7084   }
7085 }
7086 
VerifyPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)7087 void VerifyPointersVisitor::VerifyPointers(HeapObject host,
7088                                            MaybeObjectSlot start,
7089                                            MaybeObjectSlot end) {
7090   // If this DCHECK fires then you probably added a pointer field
7091   // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
7092   // this by moving that object to POINTER_VISITOR_ID_LIST.
7093   DCHECK_EQ(ObjectFields::kMaybePointers,
7094             Map::ObjectFieldsFrom(host.map(cage_base()).visitor_id()));
7095   VerifyPointersImpl(start, end);
7096 }
7097 
VisitCodeTarget(Code host,RelocInfo * rinfo)7098 void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
7099   Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
7100   VerifyHeapObjectImpl(target);
7101 }
7102 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)7103 void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
7104   VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
7105 }
7106 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)7107 void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
7108                                           FullObjectSlot start,
7109                                           FullObjectSlot end) {
7110   for (FullObjectSlot current = start; current < end; ++current) {
7111     CHECK((*current).IsSmi());
7112   }
7113 }
7114 
AllowedToBeMigrated(Map map,HeapObject obj,AllocationSpace dst)7115 bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
7116   // Object migration is governed by the following rules:
7117   //
7118   // 1) Objects in new-space can be migrated to the old space
7119   //    that matches their target space or they stay in new-space.
7120   // 2) Objects in old-space stay in the same space when migrating.
7121   // 3) Fillers (two or more words) can migrate due to left-trimming of
7122   //    fixed arrays in new-space or old space.
7123   // 4) Fillers (one word) can never migrate, they are skipped by
7124   //    incremental marking explicitly to prevent invalid pattern.
7125   //
7126   // Since this function is used for debugging only, we do not place
7127   // asserts here, but check everything explicitly.
7128   if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
7129   InstanceType type = map.instance_type();
7130   MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
7131   AllocationSpace src = chunk->owner_identity();
7132   switch (src) {
7133     case NEW_SPACE:
7134       return dst == NEW_SPACE || dst == OLD_SPACE;
7135     case OLD_SPACE:
7136       return dst == OLD_SPACE;
7137     case CODE_SPACE:
7138       return dst == CODE_SPACE && type == CODE_TYPE;
7139     case MAP_SPACE:
7140       return dst == MAP_SPACE && type == MAP_TYPE;
7141     case LO_SPACE:
7142     case CODE_LO_SPACE:
7143     case NEW_LO_SPACE:
7144     case RO_SPACE:
7145       return false;
7146   }
7147   UNREACHABLE();
7148 }
7149 
EmbedderAllocationCounter() const7150 size_t Heap::EmbedderAllocationCounter() const {
7151   return local_embedder_heap_tracer()
7152              ? local_embedder_heap_tracer()->allocated_size()
7153              : 0;
7154 }
7155 
CreateObjectStats()7156 void Heap::CreateObjectStats() {
7157   if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
7158   if (!live_object_stats_) {
7159     live_object_stats_.reset(new ObjectStats(this));
7160   }
7161   if (!dead_object_stats_) {
7162     dead_object_stats_.reset(new ObjectStats(this));
7163   }
7164 }
7165 
GcSafeMapOfCodeSpaceObject(HeapObject object)7166 Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
7167   PtrComprCageBase cage_base(isolate());
7168   MapWord map_word = object.map_word(cage_base, kRelaxedLoad);
7169   if (map_word.IsForwardingAddress()) {
7170 #ifdef V8_EXTERNAL_CODE_SPACE
7171     PtrComprCageBase code_cage_base(isolate()->code_cage_base());
7172 #else
7173     PtrComprCageBase code_cage_base = cage_base;
7174 #endif
7175     return map_word.ToForwardingAddress(code_cage_base).map(cage_base);
7176   }
7177   return map_word.ToMap();
7178 }
7179 
GcSafeCastToCode(HeapObject object,Address inner_pointer)7180 Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
7181   Code code = Code::unchecked_cast(object);
7182   DCHECK(!code.is_null());
7183   DCHECK(GcSafeCodeContains(code, inner_pointer));
7184   return code;
7185 }
7186 
GcSafeCodeContains(Code code,Address addr)7187 bool Heap::GcSafeCodeContains(Code code, Address addr) {
7188   Map map = GcSafeMapOfCodeSpaceObject(code);
7189   DCHECK(map == ReadOnlyRoots(this).code_map());
7190   Builtin maybe_builtin =
7191       OffHeapInstructionStream::TryLookupCode(isolate(), addr);
7192   if (Builtins::IsBuiltinId(maybe_builtin) &&
7193       code.builtin_id() == maybe_builtin) {
7194     return true;
7195   }
7196   Address start = code.address();
7197   Address end = code.address() + code.SizeFromMap(map);
7198   return start <= addr && addr < end;
7199 }
7200 
GcSafeFindCodeForInnerPointer(Address inner_pointer)7201 Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
7202   Builtin maybe_builtin =
7203       OffHeapInstructionStream::TryLookupCode(isolate(), inner_pointer);
7204   if (Builtins::IsBuiltinId(maybe_builtin)) {
7205     return FromCodeT(isolate()->builtins()->code(maybe_builtin));
7206   }
7207 
7208   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
7209     Address start = tp_heap_->GetObjectFromInnerPointer(inner_pointer);
7210     return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
7211   }
7212 
7213   // Check if the inner pointer points into a large object chunk.
7214   LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
7215   if (large_page != nullptr) {
7216     return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
7217   }
7218 
7219   if (V8_LIKELY(code_space()->Contains(inner_pointer))) {
7220     // Iterate through the page until we reach the end or find an object
7221     // starting after the inner pointer.
7222     Page* page = Page::FromAddress(inner_pointer);
7223 
7224     Address start =
7225         page->GetCodeObjectRegistry()->GetCodeObjectStartFromInnerAddress(
7226             inner_pointer);
7227     return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
7228   }
7229 
7230   // It can only fall through to here during debugging, where for instance "jco"
7231   // was called on an address within a RO_SPACE builtin. It cannot reach here
7232   // during stack iteration as RO_SPACE memory is not executable so cannot
7233   // appear on the stack as an instruction address.
7234   DCHECK(ReadOnlyHeap::Contains(
7235       HeapObject::FromAddress(inner_pointer & ~kHeapObjectTagMask)));
7236 
7237   // TODO(delphick): Possibly optimize this as it iterates over all pages in
7238   // RO_SPACE instead of just the one containing the address.
7239   ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
7240   for (HeapObject object = iterator.Next(); !object.is_null();
7241        object = iterator.Next()) {
7242     if (!object.IsCode()) continue;
7243     Code code = Code::cast(object);
7244     if (inner_pointer >= code.address() &&
7245         inner_pointer < code.address() + code.Size()) {
7246       return code;
7247     }
7248   }
7249   // TODO(1241665): Remove once the issue is solved.
7250   std::shared_ptr<CodeRange> code_range = CodeRange::GetProcessWideCodeRange();
7251   void* code_range_embedded_blob_code_copy =
7252       code_range ? code_range->embedded_blob_code_copy() : nullptr;
7253   Address flags = (isolate()->is_short_builtin_calls_enabled() ? 1 : 0) |
7254                   (code_range ? 2 : 0) |
7255                   static_cast<Address>(max_old_generation_size());
7256 
7257   isolate()->PushParamsAndDie(
7258       reinterpret_cast<void*>(inner_pointer),
7259       const_cast<uint8_t*>(isolate()->embedded_blob_code()),
7260       const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
7261       code_range_embedded_blob_code_copy,
7262       reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()),
7263       reinterpret_cast<void*>(flags));
7264 
7265   UNREACHABLE();
7266 }
7267 
WriteBarrierForCodeSlow(Code code)7268 void Heap::WriteBarrierForCodeSlow(Code code) {
7269   PtrComprCageBase cage_base = code.main_cage_base();
7270   for (RelocIterator it(code, RelocInfo::EmbeddedObjectModeMask()); !it.done();
7271        it.next()) {
7272     HeapObject target_object = it.rinfo()->target_object(cage_base);
7273     GenerationalBarrierForCode(code, it.rinfo(), target_object);
7274     WriteBarrier::Marking(code, it.rinfo(), target_object);
7275   }
7276 }
7277 
GenerationalBarrierSlow(HeapObject object,Address slot,HeapObject value)7278 void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
7279                                    HeapObject value) {
7280   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
7281   RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
7282 }
7283 
RecordEphemeronKeyWrite(EphemeronHashTable table,Address slot)7284 void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
7285   DCHECK(ObjectInYoungGeneration(HeapObjectSlot(slot).ToHeapObject()));
7286   if (FLAG_minor_mc) {
7287     // Minor MC lacks support for specialized generational ephemeron barriers.
7288     // The regular write barrier works as well but keeps more memory alive.
7289     MemoryChunk* chunk = MemoryChunk::FromHeapObject(table);
7290     RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
7291   } else {
7292     int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
7293     InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
7294     auto it =
7295         ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
7296     it.first->second.insert(entry.as_int());
7297   }
7298 }
7299 
EphemeronKeyWriteBarrierFromCode(Address raw_object,Address key_slot_address,Isolate * isolate)7300 void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
7301                                             Address key_slot_address,
7302                                             Isolate* isolate) {
7303   EphemeronHashTable table = EphemeronHashTable::cast(Object(raw_object));
7304   MaybeObjectSlot key_slot(key_slot_address);
7305   MaybeObject maybe_key = *key_slot;
7306   HeapObject key;
7307   if (!maybe_key.GetHeapObject(&key)) return;
7308   if (!ObjectInYoungGeneration(table) && ObjectInYoungGeneration(key)) {
7309     isolate->heap()->RecordEphemeronKeyWrite(table, key_slot_address);
7310   }
7311   WriteBarrier::Marking(table, key_slot, maybe_key);
7312 }
7313 
7314 enum RangeWriteBarrierMode {
7315   kDoGenerational = 1 << 0,
7316   kDoMarking = 1 << 1,
7317   kDoEvacuationSlotRecording = 1 << 2,
7318 };
7319 
7320 template <int kModeMask, typename TSlot>
WriteBarrierForRangeImpl(MemoryChunk * source_page,HeapObject object,TSlot start_slot,TSlot end_slot)7321 void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
7322                                     TSlot start_slot, TSlot end_slot) {
7323   // At least one of generational or marking write barrier should be requested.
7324   STATIC_ASSERT(kModeMask & (kDoGenerational | kDoMarking));
7325   // kDoEvacuationSlotRecording implies kDoMarking.
7326   STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
7327                 (kModeMask & kDoMarking));
7328 
7329   MarkingBarrier* marking_barrier = WriteBarrier::CurrentMarkingBarrier(this);
7330   MarkCompactCollector* collector = this->mark_compact_collector();
7331 
7332   for (TSlot slot = start_slot; slot < end_slot; ++slot) {
7333     typename TSlot::TObject value = *slot;
7334     HeapObject value_heap_object;
7335     if (!value.GetHeapObject(&value_heap_object)) continue;
7336 
7337     if ((kModeMask & kDoGenerational) &&
7338         Heap::InYoungGeneration(value_heap_object)) {
7339       RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(source_page,
7340                                                                 slot.address());
7341     }
7342 
7343     if ((kModeMask & kDoMarking) &&
7344         marking_barrier->MarkValue(object, value_heap_object)) {
7345       if (kModeMask & kDoEvacuationSlotRecording) {
7346         collector->RecordSlot(source_page, HeapObjectSlot(slot),
7347                               value_heap_object);
7348       }
7349     }
7350   }
7351 }
7352 
7353 // Instantiate Heap::WriteBarrierForRange() for ObjectSlot and MaybeObjectSlot.
7354 template void Heap::WriteBarrierForRange<ObjectSlot>(HeapObject object,
7355                                                      ObjectSlot start_slot,
7356                                                      ObjectSlot end_slot);
7357 template void Heap::WriteBarrierForRange<MaybeObjectSlot>(
7358     HeapObject object, MaybeObjectSlot start_slot, MaybeObjectSlot end_slot);
7359 
7360 template <typename TSlot>
WriteBarrierForRange(HeapObject object,TSlot start_slot,TSlot end_slot)7361 void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
7362                                 TSlot end_slot) {
7363   if (FLAG_disable_write_barriers) return;
7364   MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
7365   base::Flags<RangeWriteBarrierMode> mode;
7366 
7367   if (!source_page->InYoungGeneration()) {
7368     mode |= kDoGenerational;
7369   }
7370 
7371   if (incremental_marking()->IsMarking()) {
7372     mode |= kDoMarking;
7373     if (!source_page->ShouldSkipEvacuationSlotRecording()) {
7374       mode |= kDoEvacuationSlotRecording;
7375     }
7376   }
7377 
7378   switch (mode) {
7379     // Nothing to be done.
7380     case 0:
7381       return;
7382 
7383     // Generational only.
7384     case kDoGenerational:
7385       return WriteBarrierForRangeImpl<kDoGenerational>(source_page, object,
7386                                                        start_slot, end_slot);
7387     // Marking, no evacuation slot recording.
7388     case kDoMarking:
7389       return WriteBarrierForRangeImpl<kDoMarking>(source_page, object,
7390                                                   start_slot, end_slot);
7391     // Marking with evacuation slot recording.
7392     case kDoMarking | kDoEvacuationSlotRecording:
7393       return WriteBarrierForRangeImpl<kDoMarking | kDoEvacuationSlotRecording>(
7394           source_page, object, start_slot, end_slot);
7395 
7396     // Generational and marking, no evacuation slot recording.
7397     case kDoGenerational | kDoMarking:
7398       return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking>(
7399           source_page, object, start_slot, end_slot);
7400 
7401     // Generational and marking with evacuation slot recording.
7402     case kDoGenerational | kDoMarking | kDoEvacuationSlotRecording:
7403       return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking |
7404                                       kDoEvacuationSlotRecording>(
7405           source_page, object, start_slot, end_slot);
7406 
7407     default:
7408       UNREACHABLE();
7409   }
7410 }
7411 
GenerationalBarrierForCodeSlow(Code host,RelocInfo * rinfo,HeapObject object)7412 void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
7413                                           HeapObject object) {
7414   DCHECK(InYoungGeneration(object));
7415   const MarkCompactCollector::RecordRelocSlotInfo info =
7416       MarkCompactCollector::ProcessRelocInfo(host, rinfo, object);
7417 
7418   RememberedSet<OLD_TO_NEW>::InsertTyped(info.memory_chunk, info.slot_type,
7419                                          info.offset);
7420 }
7421 
PageFlagsAreConsistent(HeapObject object)7422 bool Heap::PageFlagsAreConsistent(HeapObject object) {
7423   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
7424     return true;
7425   }
7426   BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
7427   heap_internals::MemoryChunk* slim_chunk =
7428       heap_internals::MemoryChunk::FromHeapObject(object);
7429 
7430   // Slim chunk flags consistency.
7431   CHECK_EQ(chunk->InYoungGeneration(), slim_chunk->InYoungGeneration());
7432   CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
7433            slim_chunk->IsMarking());
7434 
7435   AllocationSpace identity = chunk->owner()->identity();
7436 
7437   // Generation consistency.
7438   CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
7439            slim_chunk->InYoungGeneration());
7440   // Read-only consistency.
7441   CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
7442 
7443   // Marking consistency.
7444   if (chunk->IsWritable()) {
7445     // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
7446     // find a heap. The exception is when the ReadOnlySpace is writeable, during
7447     // bootstrapping, so explicitly allow this case.
7448     Heap* heap = Heap::FromWritableHeapObject(object);
7449     CHECK_EQ(slim_chunk->IsMarking(), heap->incremental_marking()->IsMarking());
7450   } else {
7451     // Non-writable RO_SPACE must never have marking flag set.
7452     CHECK(!slim_chunk->IsMarking());
7453   }
7454   return true;
7455 }
7456 
7457 #ifdef DEBUG
IncrementObjectCounters()7458 void Heap::IncrementObjectCounters() {
7459   isolate_->counters()->objs_since_last_full()->Increment();
7460   isolate_->counters()->objs_since_last_young()->Increment();
7461 }
7462 #endif  // DEBUG
7463 
IsStressingScavenge()7464 bool Heap::IsStressingScavenge() {
7465   return FLAG_stress_scavenge > 0 && new_space();
7466 }
7467 
7468 // StrongRootBlocks are allocated as a block of addresses, prefixed with a
7469 // StrongRootsEntry pointer:
7470 //
7471 //   | StrongRootsEntry*
7472 //   | Address 1
7473 //   | ...
7474 //   | Address N
7475 //
7476 // The allocate method registers the range "Address 1" to "Address N" with the
7477 // heap as a strong root array, saves that entry in StrongRootsEntry*, and
7478 // returns a pointer to Address 1.
allocate(size_t n)7479 Address* StrongRootBlockAllocator::allocate(size_t n) {
7480   void* block = base::Malloc(sizeof(StrongRootsEntry*) + n * sizeof(Address));
7481 
7482   StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
7483   Address* ret = reinterpret_cast<Address*>(reinterpret_cast<char*>(block) +
7484                                             sizeof(StrongRootsEntry*));
7485 
7486   memset(ret, kNullAddress, n * sizeof(Address));
7487   *header = heap_->RegisterStrongRoots(
7488       "StrongRootBlockAllocator", FullObjectSlot(ret), FullObjectSlot(ret + n));
7489 
7490   return ret;
7491 }
7492 
deallocate(Address * p,size_t n)7493 void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
7494   // The allocate method returns a pointer to Address 1, so the deallocate
7495   // method has to offset that pointer back by sizeof(StrongRootsEntry*).
7496   void* block = reinterpret_cast<char*>(p) - sizeof(StrongRootsEntry*);
7497   StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
7498 
7499   heap_->UnregisterStrongRoots(*header);
7500 
7501   base::Free(block);
7502 }
7503 
7504 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
set_allocation_timeout(int allocation_timeout)7505 void Heap::set_allocation_timeout(int allocation_timeout) {
7506   heap_allocator_.SetAllocationTimeout(allocation_timeout);
7507 }
7508 #endif  // V8_ENABLE_ALLOCATION_TIMEOUT
7509 
EmbedderStackStateScope(Heap * heap,Origin origin,EmbedderHeapTracer::EmbedderStackState stack_state)7510 EmbedderStackStateScope::EmbedderStackStateScope(
7511     Heap* heap, Origin origin,
7512     EmbedderHeapTracer::EmbedderStackState stack_state)
7513     : local_tracer_(heap->local_embedder_heap_tracer()),
7514       old_stack_state_(local_tracer_->embedder_stack_state_) {
7515   if (origin == kImplicitThroughTask && heap->overriden_stack_state()) {
7516     stack_state = *heap->overriden_stack_state();
7517   }
7518 
7519   local_tracer_->embedder_stack_state_ = stack_state;
7520   if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
7521     local_tracer_->NotifyEmptyEmbedderStack();
7522 }
7523 
7524 // static
ExplicitScopeForTesting(LocalEmbedderHeapTracer * local_tracer,EmbedderHeapTracer::EmbedderStackState stack_state)7525 EmbedderStackStateScope EmbedderStackStateScope::ExplicitScopeForTesting(
7526     LocalEmbedderHeapTracer* local_tracer,
7527     EmbedderHeapTracer::EmbedderStackState stack_state) {
7528   return EmbedderStackStateScope(local_tracer, stack_state);
7529 }
7530 
EmbedderStackStateScope(LocalEmbedderHeapTracer * local_tracer,EmbedderHeapTracer::EmbedderStackState stack_state)7531 EmbedderStackStateScope::EmbedderStackStateScope(
7532     LocalEmbedderHeapTracer* local_tracer,
7533     EmbedderHeapTracer::EmbedderStackState stack_state)
7534     : local_tracer_(local_tracer),
7535       old_stack_state_(local_tracer_->embedder_stack_state_) {
7536   local_tracer_->embedder_stack_state_ = stack_state;
7537   if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
7538     local_tracer_->NotifyEmptyEmbedderStack();
7539 }
7540 
~EmbedderStackStateScope()7541 EmbedderStackStateScope::~EmbedderStackStateScope() {
7542   local_tracer_->embedder_stack_state_ = old_stack_state_;
7543 }
7544 
7545 }  // namespace internal
7546 }  // namespace v8
7547