1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/heap.h"
6
7 #include <atomic>
8 #include <cinttypes>
9 #include <iomanip>
10 #include <memory>
11 #include <unordered_map>
12 #include <unordered_set>
13
14 #include "src/api/api-inl.h"
15 #include "src/base/bits.h"
16 #include "src/base/flags.h"
17 #include "src/base/once.h"
18 #include "src/base/platform/mutex.h"
19 #include "src/base/utils/random-number-generator.h"
20 #include "src/builtins/accessors.h"
21 #include "src/codegen/assembler-inl.h"
22 #include "src/codegen/compilation-cache.h"
23 #include "src/common/assert-scope.h"
24 #include "src/common/globals.h"
25 #include "src/debug/debug.h"
26 #include "src/deoptimizer/deoptimizer.h"
27 #include "src/execution/isolate-utils-inl.h"
28 #include "src/execution/microtask-queue.h"
29 #include "src/execution/runtime-profiler.h"
30 #include "src/execution/v8threads.h"
31 #include "src/execution/vm-state-inl.h"
32 #include "src/handles/global-handles.h"
33 #include "src/heap/array-buffer-sweeper.h"
34 #include "src/heap/barrier.h"
35 #include "src/heap/base/stack.h"
36 #include "src/heap/code-object-registry.h"
37 #include "src/heap/code-stats.h"
38 #include "src/heap/collection-barrier.h"
39 #include "src/heap/combined-heap.h"
40 #include "src/heap/concurrent-allocator.h"
41 #include "src/heap/concurrent-marking.h"
42 #include "src/heap/embedder-tracing.h"
43 #include "src/heap/finalization-registry-cleanup-task.h"
44 #include "src/heap/gc-idle-time-handler.h"
45 #include "src/heap/gc-tracer.h"
46 #include "src/heap/heap-controller.h"
47 #include "src/heap/heap-write-barrier-inl.h"
48 #include "src/heap/incremental-marking-inl.h"
49 #include "src/heap/incremental-marking.h"
50 #include "src/heap/large-spaces.h"
51 #include "src/heap/local-heap.h"
52 #include "src/heap/mark-compact-inl.h"
53 #include "src/heap/mark-compact.h"
54 #include "src/heap/marking-barrier-inl.h"
55 #include "src/heap/marking-barrier.h"
56 #include "src/heap/memory-chunk-inl.h"
57 #include "src/heap/memory-measurement.h"
58 #include "src/heap/memory-reducer.h"
59 #include "src/heap/object-stats.h"
60 #include "src/heap/objects-visiting-inl.h"
61 #include "src/heap/objects-visiting.h"
62 #include "src/heap/paged-spaces-inl.h"
63 #include "src/heap/read-only-heap.h"
64 #include "src/heap/remembered-set.h"
65 #include "src/heap/safepoint.h"
66 #include "src/heap/scavenge-job.h"
67 #include "src/heap/scavenger-inl.h"
68 #include "src/heap/stress-marking-observer.h"
69 #include "src/heap/stress-scavenge-observer.h"
70 #include "src/heap/sweeper.h"
71 #include "src/init/bootstrapper.h"
72 #include "src/init/v8.h"
73 #include "src/interpreter/interpreter.h"
74 #include "src/logging/log.h"
75 #include "src/numbers/conversions.h"
76 #include "src/objects/data-handler.h"
77 #include "src/objects/feedback-vector.h"
78 #include "src/objects/free-space-inl.h"
79 #include "src/objects/hash-table-inl.h"
80 #include "src/objects/maybe-object.h"
81 #include "src/objects/shared-function-info.h"
82 #include "src/objects/slots-atomic-inl.h"
83 #include "src/objects/slots-inl.h"
84 #include "src/regexp/regexp.h"
85 #include "src/snapshot/embedded/embedded-data.h"
86 #include "src/snapshot/serializer-deserializer.h"
87 #include "src/snapshot/snapshot.h"
88 #include "src/strings/string-stream.h"
89 #include "src/strings/unicode-decoder.h"
90 #include "src/strings/unicode-inl.h"
91 #include "src/tracing/trace-event.h"
92 #include "src/utils/utils-inl.h"
93 #include "src/utils/utils.h"
94
95 #ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
96 #include "src/heap/conservative-stack-visitor.h"
97 #endif
98
99 // Has to be the last include (doesn't have include guards):
100 #include "src/objects/object-macros.h"
101
102 namespace v8 {
103 namespace internal {
104
105 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
GetIsolateFromWritableObject(HeapObject object)106 Isolate* Heap::GetIsolateFromWritableObject(HeapObject object) {
107 return reinterpret_cast<Isolate*>(
108 third_party_heap::Heap::GetIsolate(object.address()));
109 }
110 #endif
111
112 // These are outside the Heap class so they can be forward-declared
113 // in heap-write-barrier-inl.h.
Heap_PageFlagsAreConsistent(HeapObject object)114 bool Heap_PageFlagsAreConsistent(HeapObject object) {
115 return Heap::PageFlagsAreConsistent(object);
116 }
117
Heap_GenerationalBarrierSlow(HeapObject object,Address slot,HeapObject value)118 void Heap_GenerationalBarrierSlow(HeapObject object, Address slot,
119 HeapObject value) {
120 Heap::GenerationalBarrierSlow(object, slot, value);
121 }
122
Heap_WriteBarrierForCodeSlow(Code host)123 void Heap_WriteBarrierForCodeSlow(Code host) {
124 Heap::WriteBarrierForCodeSlow(host);
125 }
126
Heap_GenerationalBarrierForCodeSlow(Code host,RelocInfo * rinfo,HeapObject object)127 void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
128 HeapObject object) {
129 Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
130 }
131
Heap_GenerationalEphemeronKeyBarrierSlow(Heap * heap,EphemeronHashTable table,Address slot)132 void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
133 EphemeronHashTable table,
134 Address slot) {
135 heap->RecordEphemeronKeyWrite(table, slot);
136 }
137
SetArgumentsAdaptorDeoptPCOffset(int pc_offset)138 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
139 DCHECK_EQ(Smi::zero(), arguments_adaptor_deopt_pc_offset());
140 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
141 }
142
SetConstructStubCreateDeoptPCOffset(int pc_offset)143 void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
144 DCHECK_EQ(Smi::zero(), construct_stub_create_deopt_pc_offset());
145 set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
146 }
147
SetConstructStubInvokeDeoptPCOffset(int pc_offset)148 void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
149 DCHECK_EQ(Smi::zero(), construct_stub_invoke_deopt_pc_offset());
150 set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
151 }
152
SetInterpreterEntryReturnPCOffset(int pc_offset)153 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
154 DCHECK_EQ(Smi::zero(), interpreter_entry_return_pc_offset());
155 set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
156 }
157
SetSerializedObjects(FixedArray objects)158 void Heap::SetSerializedObjects(FixedArray objects) {
159 DCHECK(isolate()->serializer_enabled());
160 set_serialized_objects(objects);
161 }
162
SetSerializedGlobalProxySizes(FixedArray sizes)163 void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
164 DCHECK(isolate()->serializer_enabled());
165 set_serialized_global_proxy_sizes(sizes);
166 }
167
SetBasicBlockProfilingData(Handle<ArrayList> list)168 void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
169 set_basic_block_profiling_data(*list);
170 }
171
operator ==(const Heap::GCCallbackTuple & other) const172 bool Heap::GCCallbackTuple::operator==(
173 const Heap::GCCallbackTuple& other) const {
174 return other.callback == callback && other.data == data;
175 }
176
177 Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
178 const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
179
180 class ScavengeTaskObserver : public AllocationObserver {
181 public:
ScavengeTaskObserver(Heap * heap,intptr_t step_size)182 ScavengeTaskObserver(Heap* heap, intptr_t step_size)
183 : AllocationObserver(step_size), heap_(heap) {}
184
Step(int bytes_allocated,Address,size_t)185 void Step(int bytes_allocated, Address, size_t) override {
186 heap_->ScheduleScavengeTaskIfNeeded();
187 }
188
189 private:
190 Heap* heap_;
191 };
192
Heap()193 Heap::Heap()
194 : isolate_(isolate()),
195 memory_pressure_level_(MemoryPressureLevel::kNone),
196 global_pretenuring_feedback_(kInitialFeedbackCapacity),
197 safepoint_(new GlobalSafepoint(this)),
198 external_string_table_(this),
199 collection_barrier_(new CollectionBarrier(this)) {
200 // Ensure old_generation_size_ is a multiple of kPageSize.
201 DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
202
203 set_native_contexts_list(Smi::zero());
204 set_allocation_sites_list(Smi::zero());
205 set_dirty_js_finalization_registries_list(Smi::zero());
206 set_dirty_js_finalization_registries_list_tail(Smi::zero());
207 // Put a dummy entry in the remembered pages so we can find the list the
208 // minidump even if there are no real unmapped pages.
209 RememberUnmappedPage(kNullAddress, false);
210 }
211
212 Heap::~Heap() = default;
213
MaxReserved()214 size_t Heap::MaxReserved() {
215 const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
216 return static_cast<size_t>(2 * max_semi_space_size_ +
217 kMaxNewLargeObjectSpaceSize +
218 max_old_generation_size());
219 }
220
YoungGenerationSizeFromOldGenerationSize(size_t old_generation)221 size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
222 // Compute the semi space size and cap it.
223 size_t ratio = old_generation <= kOldGenerationLowMemory
224 ? kOldGenerationToSemiSpaceRatioLowMemory
225 : kOldGenerationToSemiSpaceRatio;
226 size_t semi_space = old_generation / ratio;
227 semi_space = Min<size_t>(semi_space, kMaxSemiSpaceSize);
228 semi_space = Max<size_t>(semi_space, kMinSemiSpaceSize);
229 semi_space = RoundUp(semi_space, Page::kPageSize);
230 return YoungGenerationSizeFromSemiSpaceSize(semi_space);
231 }
232
HeapSizeFromPhysicalMemory(uint64_t physical_memory)233 size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
234 // Compute the old generation size and cap it.
235 uint64_t old_generation = physical_memory /
236 kPhysicalMemoryToOldGenerationRatio *
237 kHeapLimitMultiplier;
238 old_generation =
239 Min<uint64_t>(old_generation, MaxOldGenerationSize(physical_memory));
240 old_generation = Max<uint64_t>(old_generation, V8HeapTrait::kMinSize);
241 old_generation = RoundUp(old_generation, Page::kPageSize);
242
243 size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
244 static_cast<size_t>(old_generation));
245 return static_cast<size_t>(old_generation) + young_generation;
246 }
247
GenerationSizesFromHeapSize(size_t heap_size,size_t * young_generation_size,size_t * old_generation_size)248 void Heap::GenerationSizesFromHeapSize(size_t heap_size,
249 size_t* young_generation_size,
250 size_t* old_generation_size) {
251 // Initialize values for the case when the given heap size is too small.
252 *young_generation_size = 0;
253 *old_generation_size = 0;
254 // Binary search for the largest old generation size that fits to the given
255 // heap limit considering the correspondingly sized young generation.
256 size_t lower = 0, upper = heap_size;
257 while (lower + 1 < upper) {
258 size_t old_generation = lower + (upper - lower) / 2;
259 size_t young_generation =
260 YoungGenerationSizeFromOldGenerationSize(old_generation);
261 if (old_generation + young_generation <= heap_size) {
262 // This size configuration fits into the given heap limit.
263 *young_generation_size = young_generation;
264 *old_generation_size = old_generation;
265 lower = old_generation;
266 } else {
267 upper = old_generation;
268 }
269 }
270 }
271
MinYoungGenerationSize()272 size_t Heap::MinYoungGenerationSize() {
273 return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize);
274 }
275
MinOldGenerationSize()276 size_t Heap::MinOldGenerationSize() {
277 size_t paged_space_count =
278 LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
279 return paged_space_count * Page::kPageSize;
280 }
281
AllocatorLimitOnMaxOldGenerationSize()282 size_t Heap::AllocatorLimitOnMaxOldGenerationSize() {
283 #ifdef V8_COMPRESS_POINTERS
284 // Isolate and the young generation are also allocated on the heap.
285 return kPtrComprHeapReservationSize -
286 YoungGenerationSizeFromSemiSpaceSize(kMaxSemiSpaceSize) -
287 RoundUp(sizeof(Isolate), size_t{1} << kPageSizeBits);
288 #endif
289 return std::numeric_limits<size_t>::max();
290 }
291
MaxOldGenerationSize(uint64_t physical_memory)292 size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
293 size_t max_size = V8HeapTrait::kMaxSize;
294 // Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
295 // systems with physical memory bigger than 16GB. The physical memory
296 // is rounded up to GB.
297 constexpr bool x64_bit = Heap::kHeapLimitMultiplier >= 2;
298 if (FLAG_huge_max_old_generation_size && x64_bit &&
299 (physical_memory + 512 * MB) / GB >= 16) {
300 DCHECK_EQ(max_size / GB, 2);
301 max_size *= 2;
302 }
303 return Min(max_size, AllocatorLimitOnMaxOldGenerationSize());
304 }
305
YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size)306 size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
307 return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
308 }
309
SemiSpaceSizeFromYoungGenerationSize(size_t young_generation_size)310 size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
311 size_t young_generation_size) {
312 return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
313 }
314
Capacity()315 size_t Heap::Capacity() {
316 if (!HasBeenSetUp()) return 0;
317
318 return new_space_->Capacity() + OldGenerationCapacity();
319 }
320
OldGenerationCapacity()321 size_t Heap::OldGenerationCapacity() {
322 if (!HasBeenSetUp()) return 0;
323 PagedSpaceIterator spaces(this);
324 size_t total = 0;
325 for (PagedSpace* space = spaces.Next(); space != nullptr;
326 space = spaces.Next()) {
327 total += space->Capacity();
328 }
329 return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
330 }
331
CommittedOldGenerationMemory()332 size_t Heap::CommittedOldGenerationMemory() {
333 if (!HasBeenSetUp()) return 0;
334
335 PagedSpaceIterator spaces(this);
336 size_t total = 0;
337 for (PagedSpace* space = spaces.Next(); space != nullptr;
338 space = spaces.Next()) {
339 total += space->CommittedMemory();
340 }
341 return total + lo_space_->Size() + code_lo_space_->Size();
342 }
343
CommittedMemoryOfUnmapper()344 size_t Heap::CommittedMemoryOfUnmapper() {
345 if (!HasBeenSetUp()) return 0;
346
347 return memory_allocator()->unmapper()->CommittedBufferedMemory();
348 }
349
CommittedMemory()350 size_t Heap::CommittedMemory() {
351 if (!HasBeenSetUp()) return 0;
352
353 return new_space_->CommittedMemory() + new_lo_space_->Size() +
354 CommittedOldGenerationMemory();
355 }
356
357
CommittedPhysicalMemory()358 size_t Heap::CommittedPhysicalMemory() {
359 if (!HasBeenSetUp()) return 0;
360
361 size_t total = 0;
362 for (SpaceIterator it(this); it.HasNext();) {
363 total += it.Next()->CommittedPhysicalMemory();
364 }
365
366 return total;
367 }
368
CommittedMemoryExecutable()369 size_t Heap::CommittedMemoryExecutable() {
370 if (!HasBeenSetUp()) return 0;
371
372 return static_cast<size_t>(memory_allocator()->SizeExecutable());
373 }
374
375
UpdateMaximumCommitted()376 void Heap::UpdateMaximumCommitted() {
377 if (!HasBeenSetUp()) return;
378
379 const size_t current_committed_memory = CommittedMemory();
380 if (current_committed_memory > maximum_committed_) {
381 maximum_committed_ = current_committed_memory;
382 }
383 }
384
Available()385 size_t Heap::Available() {
386 if (!HasBeenSetUp()) return 0;
387
388 size_t total = 0;
389
390 for (SpaceIterator it(this); it.HasNext();) {
391 total += it.Next()->Available();
392 }
393
394 total += memory_allocator()->Available();
395 return total;
396 }
397
CanExpandOldGeneration(size_t size)398 bool Heap::CanExpandOldGeneration(size_t size) {
399 if (force_oom_ || force_gc_on_next_allocation_) return false;
400 if (OldGenerationCapacity() + size > max_old_generation_size()) return false;
401 // The OldGenerationCapacity does not account compaction spaces used
402 // during evacuation. Ensure that expanding the old generation does push
403 // the total allocated memory size over the maximum heap size.
404 return memory_allocator()->Size() + size <= MaxReserved();
405 }
406
CanExpandOldGenerationBackground(size_t size)407 bool Heap::CanExpandOldGenerationBackground(size_t size) {
408 if (force_oom_) return false;
409 // When the heap is tearing down, then GC requests from background threads
410 // are not served and the threads are allowed to expand the heap to avoid OOM.
411 return gc_state() == TEAR_DOWN ||
412 memory_allocator()->Size() + size <= MaxReserved();
413 }
414
CanPromoteYoungAndExpandOldGeneration(size_t size)415 bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
416 // Over-estimate the new space size using capacity to allow some slack.
417 return CanExpandOldGeneration(size + new_space_->Capacity() +
418 new_lo_space_->Size());
419 }
420
HasBeenSetUp() const421 bool Heap::HasBeenSetUp() const {
422 // We will always have a new space when the heap is set up.
423 return new_space_ != nullptr;
424 }
425
SelectGarbageCollector(AllocationSpace space,const char ** reason)426 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
427 const char** reason) {
428 // Is global GC requested?
429 if (space != NEW_SPACE && space != NEW_LO_SPACE) {
430 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
431 *reason = "GC in old space requested";
432 return MARK_COMPACTOR;
433 }
434
435 if (FLAG_gc_global || ShouldStressCompaction()) {
436 *reason = "GC in old space forced by flags";
437 return MARK_COMPACTOR;
438 }
439
440 if (incremental_marking()->NeedsFinalization() &&
441 AllocationLimitOvershotByLargeMargin()) {
442 *reason = "Incremental marking needs finalization";
443 return MARK_COMPACTOR;
444 }
445
446 if (!CanPromoteYoungAndExpandOldGeneration(0)) {
447 isolate_->counters()
448 ->gc_compactor_caused_by_oldspace_exhaustion()
449 ->Increment();
450 *reason = "scavenge might not succeed";
451 return MARK_COMPACTOR;
452 }
453
454 // Default
455 *reason = nullptr;
456 return YoungGenerationCollector();
457 }
458
SetGCState(HeapState state)459 void Heap::SetGCState(HeapState state) {
460 gc_state_.store(state, std::memory_order_relaxed);
461 }
462
PrintShortHeapStatistics()463 void Heap::PrintShortHeapStatistics() {
464 if (!FLAG_trace_gc_verbose) return;
465 PrintIsolate(isolate_,
466 "Memory allocator, used: %6zu KB,"
467 " available: %6zu KB\n",
468 memory_allocator()->Size() / KB,
469 memory_allocator()->Available() / KB);
470 PrintIsolate(isolate_,
471 "Read-only space, used: %6zu KB"
472 ", available: %6zu KB"
473 ", committed: %6zu KB\n",
474 read_only_space_->Size() / KB, size_t{0},
475 read_only_space_->CommittedMemory() / KB);
476 PrintIsolate(isolate_,
477 "New space, used: %6zu KB"
478 ", available: %6zu KB"
479 ", committed: %6zu KB\n",
480 new_space_->Size() / KB, new_space_->Available() / KB,
481 new_space_->CommittedMemory() / KB);
482 PrintIsolate(isolate_,
483 "New large object space, used: %6zu KB"
484 ", available: %6zu KB"
485 ", committed: %6zu KB\n",
486 new_lo_space_->SizeOfObjects() / KB,
487 new_lo_space_->Available() / KB,
488 new_lo_space_->CommittedMemory() / KB);
489 PrintIsolate(isolate_,
490 "Old space, used: %6zu KB"
491 ", available: %6zu KB"
492 ", committed: %6zu KB\n",
493 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
494 old_space_->CommittedMemory() / KB);
495 PrintIsolate(isolate_,
496 "Code space, used: %6zu KB"
497 ", available: %6zu KB"
498 ", committed: %6zu KB\n",
499 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
500 code_space_->CommittedMemory() / KB);
501 PrintIsolate(isolate_,
502 "Map space, used: %6zu KB"
503 ", available: %6zu KB"
504 ", committed: %6zu KB\n",
505 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
506 map_space_->CommittedMemory() / KB);
507 PrintIsolate(isolate_,
508 "Large object space, used: %6zu KB"
509 ", available: %6zu KB"
510 ", committed: %6zu KB\n",
511 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
512 lo_space_->CommittedMemory() / KB);
513 PrintIsolate(isolate_,
514 "Code large object space, used: %6zu KB"
515 ", available: %6zu KB"
516 ", committed: %6zu KB\n",
517 code_lo_space_->SizeOfObjects() / KB,
518 code_lo_space_->Available() / KB,
519 code_lo_space_->CommittedMemory() / KB);
520 ReadOnlySpace* const ro_space = read_only_space_;
521 PrintIsolate(isolate_,
522 "All spaces, used: %6zu KB"
523 ", available: %6zu KB"
524 ", committed: %6zu KB\n",
525 (this->SizeOfObjects() + ro_space->Size()) / KB,
526 (this->Available()) / KB,
527 (this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
528 PrintIsolate(isolate_,
529 "Unmapper buffering %zu chunks of committed: %6zu KB\n",
530 memory_allocator()->unmapper()->NumberOfCommittedChunks(),
531 CommittedMemoryOfUnmapper() / KB);
532 PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
533 external_memory_.total() / KB);
534 PrintIsolate(isolate_, "Backing store memory: %6zu KB\n",
535 backing_store_bytes_ / KB);
536 PrintIsolate(isolate_, "External memory global %zu KB\n",
537 external_memory_callback_() / KB);
538 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
539 total_gc_time_ms_);
540 }
541
PrintFreeListsStats()542 void Heap::PrintFreeListsStats() {
543 DCHECK(FLAG_trace_gc_freelists);
544
545 if (FLAG_trace_gc_freelists_verbose) {
546 PrintIsolate(isolate_,
547 "Freelists statistics per Page: "
548 "[category: length || total free bytes]\n");
549 }
550
551 std::vector<int> categories_lengths(
552 old_space()->free_list()->number_of_categories(), 0);
553 std::vector<size_t> categories_sums(
554 old_space()->free_list()->number_of_categories(), 0);
555 unsigned int pageCnt = 0;
556
557 // This loops computes freelists lengths and sum.
558 // If FLAG_trace_gc_freelists_verbose is enabled, it also prints
559 // the stats of each FreeListCategory of each Page.
560 for (Page* page : *old_space()) {
561 std::ostringstream out_str;
562
563 if (FLAG_trace_gc_freelists_verbose) {
564 out_str << "Page " << std::setw(4) << pageCnt;
565 }
566
567 for (int cat = kFirstCategory;
568 cat <= old_space()->free_list()->last_category(); cat++) {
569 FreeListCategory* free_list =
570 page->free_list_category(static_cast<FreeListCategoryType>(cat));
571 int length = free_list->FreeListLength();
572 size_t sum = free_list->SumFreeList();
573
574 if (FLAG_trace_gc_freelists_verbose) {
575 out_str << "[" << cat << ": " << std::setw(4) << length << " || "
576 << std::setw(6) << sum << " ]"
577 << (cat == old_space()->free_list()->last_category() ? "\n"
578 : ", ");
579 }
580 categories_lengths[cat] += length;
581 categories_sums[cat] += sum;
582 }
583
584 if (FLAG_trace_gc_freelists_verbose) {
585 PrintIsolate(isolate_, "%s", out_str.str().c_str());
586 }
587
588 pageCnt++;
589 }
590
591 // Print statistics about old_space (pages, free/wasted/used memory...).
592 PrintIsolate(
593 isolate_,
594 "%d pages. Free space: %.1f MB (waste: %.2f). "
595 "Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
596 pageCnt, static_cast<double>(old_space_->Available()) / MB,
597 static_cast<double>(old_space_->Waste()) / MB,
598 static_cast<double>(old_space_->Size()) / MB,
599 static_cast<double>(old_space_->Capacity()) / MB,
600 static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
601
602 // Print global statistics of each FreeListCategory (length & sum).
603 PrintIsolate(isolate_,
604 "FreeLists global statistics: "
605 "[category: length || total free KB]\n");
606 std::ostringstream out_str;
607 for (int cat = kFirstCategory;
608 cat <= old_space()->free_list()->last_category(); cat++) {
609 out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
610 << std::fixed << std::setprecision(2)
611 << static_cast<double>(categories_sums[cat]) / KB << " KB]"
612 << (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
613 }
614 PrintIsolate(isolate_, "%s", out_str.str().c_str());
615 }
616
DumpJSONHeapStatistics(std::stringstream & stream)617 void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
618 HeapStatistics stats;
619 reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
620
621 // clang-format off
622 #define DICT(s) "{" << s << "}"
623 #define LIST(s) "[" << s << "]"
624 #define ESCAPE(s) "\"" << s << "\""
625 #define MEMBER(s) ESCAPE(s) << ":"
626
627 auto SpaceStatistics = [this](int space_index) {
628 HeapSpaceStatistics space_stats;
629 reinterpret_cast<v8::Isolate*>(isolate())->GetHeapSpaceStatistics(
630 &space_stats, space_index);
631 std::stringstream stream;
632 stream << DICT(
633 MEMBER("name")
634 << ESCAPE(BaseSpace::GetSpaceName(
635 static_cast<AllocationSpace>(space_index)))
636 << ","
637 MEMBER("size") << space_stats.space_size() << ","
638 MEMBER("used_size") << space_stats.space_used_size() << ","
639 MEMBER("available_size") << space_stats.space_available_size() << ","
640 MEMBER("physical_size") << space_stats.physical_space_size());
641 return stream.str();
642 };
643
644 stream << DICT(
645 MEMBER("isolate") << ESCAPE(reinterpret_cast<void*>(isolate())) << ","
646 MEMBER("id") << gc_count() << ","
647 MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
648 MEMBER("total_heap_size") << stats.total_heap_size() << ","
649 MEMBER("total_heap_size_executable")
650 << stats.total_heap_size_executable() << ","
651 MEMBER("total_physical_size") << stats.total_physical_size() << ","
652 MEMBER("total_available_size") << stats.total_available_size() << ","
653 MEMBER("used_heap_size") << stats.used_heap_size() << ","
654 MEMBER("heap_size_limit") << stats.heap_size_limit() << ","
655 MEMBER("malloced_memory") << stats.malloced_memory() << ","
656 MEMBER("external_memory") << stats.external_memory() << ","
657 MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
658 MEMBER("spaces") << LIST(
659 SpaceStatistics(RO_SPACE) << "," <<
660 SpaceStatistics(NEW_SPACE) << "," <<
661 SpaceStatistics(OLD_SPACE) << "," <<
662 SpaceStatistics(CODE_SPACE) << "," <<
663 SpaceStatistics(MAP_SPACE) << "," <<
664 SpaceStatistics(LO_SPACE) << "," <<
665 SpaceStatistics(CODE_LO_SPACE) << "," <<
666 SpaceStatistics(NEW_LO_SPACE)));
667
668 #undef DICT
669 #undef LIST
670 #undef ESCAPE
671 #undef MEMBER
672 // clang-format on
673 }
674
ReportStatisticsAfterGC()675 void Heap::ReportStatisticsAfterGC() {
676 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
677 ++i) {
678 int count = deferred_counters_[i];
679 deferred_counters_[i] = 0;
680 while (count > 0) {
681 count--;
682 isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
683 }
684 }
685 }
686
AddHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)687 void Heap::AddHeapObjectAllocationTracker(
688 HeapObjectAllocationTracker* tracker) {
689 if (allocation_trackers_.empty() && FLAG_inline_new) {
690 DisableInlineAllocation();
691 }
692 allocation_trackers_.push_back(tracker);
693 }
694
RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)695 void Heap::RemoveHeapObjectAllocationTracker(
696 HeapObjectAllocationTracker* tracker) {
697 allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
698 allocation_trackers_.end(), tracker),
699 allocation_trackers_.end());
700 if (allocation_trackers_.empty() && FLAG_inline_new) {
701 EnableInlineAllocation();
702 }
703 }
704
AddRetainingPathTarget(Handle<HeapObject> object,RetainingPathOption option)705 void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
706 RetainingPathOption option) {
707 if (!FLAG_track_retaining_path) {
708 PrintF("Retaining path tracking requires --track-retaining-path\n");
709 } else {
710 Handle<WeakArrayList> array(retaining_path_targets(), isolate());
711 int index = array->length();
712 array = WeakArrayList::AddToEnd(isolate(), array,
713 MaybeObjectHandle::Weak(object));
714 set_retaining_path_targets(*array);
715 DCHECK_EQ(array->length(), index + 1);
716 retaining_path_target_option_[index] = option;
717 }
718 }
719
IsRetainingPathTarget(HeapObject object,RetainingPathOption * option)720 bool Heap::IsRetainingPathTarget(HeapObject object,
721 RetainingPathOption* option) {
722 WeakArrayList targets = retaining_path_targets();
723 int length = targets.length();
724 MaybeObject object_to_check = HeapObjectReference::Weak(object);
725 for (int i = 0; i < length; i++) {
726 MaybeObject target = targets.Get(i);
727 DCHECK(target->IsWeakOrCleared());
728 if (target == object_to_check) {
729 DCHECK(retaining_path_target_option_.count(i));
730 *option = retaining_path_target_option_[i];
731 return true;
732 }
733 }
734 return false;
735 }
736
PrintRetainingPath(HeapObject target,RetainingPathOption option)737 void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
738 PrintF("\n\n\n");
739 PrintF("#################################################\n");
740 PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target.ptr()));
741 HeapObject object = target;
742 std::vector<std::pair<HeapObject, bool>> retaining_path;
743 Root root = Root::kUnknown;
744 bool ephemeron = false;
745 while (true) {
746 retaining_path.push_back(std::make_pair(object, ephemeron));
747 if (option == RetainingPathOption::kTrackEphemeronPath &&
748 ephemeron_retainer_.count(object)) {
749 object = ephemeron_retainer_[object];
750 ephemeron = true;
751 } else if (retainer_.count(object)) {
752 object = retainer_[object];
753 ephemeron = false;
754 } else {
755 if (retaining_root_.count(object)) {
756 root = retaining_root_[object];
757 }
758 break;
759 }
760 }
761 int distance = static_cast<int>(retaining_path.size());
762 for (auto node : retaining_path) {
763 HeapObject object = node.first;
764 bool ephemeron = node.second;
765 PrintF("\n");
766 PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
767 PrintF("Distance from root %d%s: ", distance,
768 ephemeron ? " (ephemeron)" : "");
769 object.ShortPrint();
770 PrintF("\n");
771 #ifdef OBJECT_PRINT
772 object.Print();
773 PrintF("\n");
774 #endif
775 --distance;
776 }
777 PrintF("\n");
778 PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
779 PrintF("Root: %s\n", RootVisitor::RootName(root));
780 PrintF("-------------------------------------------------\n");
781 }
782
AddRetainer(HeapObject retainer,HeapObject object)783 void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
784 if (retainer_.count(object)) return;
785 retainer_[object] = retainer;
786 RetainingPathOption option = RetainingPathOption::kDefault;
787 if (IsRetainingPathTarget(object, &option)) {
788 // Check if the retaining path was already printed in
789 // AddEphemeronRetainer().
790 if (ephemeron_retainer_.count(object) == 0 ||
791 option == RetainingPathOption::kDefault) {
792 PrintRetainingPath(object, option);
793 }
794 }
795 }
796
AddEphemeronRetainer(HeapObject retainer,HeapObject object)797 void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) {
798 if (ephemeron_retainer_.count(object)) return;
799 ephemeron_retainer_[object] = retainer;
800 RetainingPathOption option = RetainingPathOption::kDefault;
801 if (IsRetainingPathTarget(object, &option) &&
802 option == RetainingPathOption::kTrackEphemeronPath) {
803 // Check if the retaining path was already printed in AddRetainer().
804 if (retainer_.count(object) == 0) {
805 PrintRetainingPath(object, option);
806 }
807 }
808 }
809
AddRetainingRoot(Root root,HeapObject object)810 void Heap::AddRetainingRoot(Root root, HeapObject object) {
811 if (retaining_root_.count(object)) return;
812 retaining_root_[object] = root;
813 RetainingPathOption option = RetainingPathOption::kDefault;
814 if (IsRetainingPathTarget(object, &option)) {
815 PrintRetainingPath(object, option);
816 }
817 }
818
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)819 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
820 deferred_counters_[feature]++;
821 }
822
UncommitFromSpace()823 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
824
GarbageCollectionPrologue()825 void Heap::GarbageCollectionPrologue() {
826 TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
827
828 // Reset GC statistics.
829 promoted_objects_size_ = 0;
830 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
831 semi_space_copied_object_size_ = 0;
832 nodes_died_in_new_space_ = 0;
833 nodes_copied_in_new_space_ = 0;
834 nodes_promoted_ = 0;
835
836 UpdateMaximumCommitted();
837
838 #ifdef DEBUG
839 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state() == NOT_IN_GC);
840
841 if (FLAG_gc_verbose) Print();
842 #endif // DEBUG
843
844 if (new_space_->IsAtMaximumCapacity()) {
845 maximum_size_scavenges_++;
846 } else {
847 maximum_size_scavenges_ = 0;
848 }
849 if (FLAG_track_retaining_path) {
850 retainer_.clear();
851 ephemeron_retainer_.clear();
852 retaining_root_.clear();
853 }
854 memory_allocator()->unmapper()->PrepareForGC();
855 }
856
GarbageCollectionPrologueInSafepoint()857 void Heap::GarbageCollectionPrologueInSafepoint() {
858 TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
859 gc_count_++;
860
861 UpdateNewSpaceAllocationCounter();
862 CheckNewSpaceExpansionCriteria();
863 }
864
SizeOfObjects()865 size_t Heap::SizeOfObjects() {
866 size_t total = 0;
867
868 for (SpaceIterator it(this); it.HasNext();) {
869 total += it.Next()->SizeOfObjects();
870 }
871 return total;
872 }
873
TotalGlobalHandlesSize()874 size_t Heap::TotalGlobalHandlesSize() {
875 return isolate_->global_handles()->TotalSize();
876 }
877
UsedGlobalHandlesSize()878 size_t Heap::UsedGlobalHandlesSize() {
879 return isolate_->global_handles()->UsedSize();
880 }
881
MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap & local_pretenuring_feedback)882 void Heap::MergeAllocationSitePretenuringFeedback(
883 const PretenuringFeedbackMap& local_pretenuring_feedback) {
884 AllocationSite site;
885 for (auto& site_and_count : local_pretenuring_feedback) {
886 site = site_and_count.first;
887 MapWord map_word = site_and_count.first.map_word();
888 if (map_word.IsForwardingAddress()) {
889 site = AllocationSite::cast(map_word.ToForwardingAddress());
890 }
891
892 // We have not validated the allocation site yet, since we have not
893 // dereferenced the site during collecting information.
894 // This is an inlined check of AllocationMemento::IsValid.
895 if (!site.IsAllocationSite() || site.IsZombie()) continue;
896
897 const int value = static_cast<int>(site_and_count.second);
898 DCHECK_LT(0, value);
899 if (site.IncrementMementoFoundCount(value)) {
900 // For sites in the global map the count is accessed through the site.
901 global_pretenuring_feedback_.insert(std::make_pair(site, 0));
902 }
903 }
904 }
905
AddAllocationObserversToAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)906 void Heap::AddAllocationObserversToAllSpaces(
907 AllocationObserver* observer, AllocationObserver* new_space_observer) {
908 DCHECK(observer && new_space_observer);
909 SafepointScope scope(this);
910
911 for (SpaceIterator it(this); it.HasNext();) {
912 Space* space = it.Next();
913 if (space == new_space()) {
914 space->AddAllocationObserver(new_space_observer);
915 } else {
916 space->AddAllocationObserver(observer);
917 }
918 }
919 }
920
RemoveAllocationObserversFromAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)921 void Heap::RemoveAllocationObserversFromAllSpaces(
922 AllocationObserver* observer, AllocationObserver* new_space_observer) {
923 DCHECK(observer && new_space_observer);
924 SafepointScope scope(this);
925
926 for (SpaceIterator it(this); it.HasNext();) {
927 Space* space = it.Next();
928 if (space == new_space()) {
929 space->RemoveAllocationObserver(new_space_observer);
930 } else {
931 space->RemoveAllocationObserver(observer);
932 }
933 }
934 }
935
936 namespace {
MakePretenureDecision(AllocationSite site,AllocationSite::PretenureDecision current_decision,double ratio,bool maximum_size_scavenge)937 inline bool MakePretenureDecision(
938 AllocationSite site, AllocationSite::PretenureDecision current_decision,
939 double ratio, bool maximum_size_scavenge) {
940 // Here we just allow state transitions from undecided or maybe tenure
941 // to don't tenure, maybe tenure, or tenure.
942 if ((current_decision == AllocationSite::kUndecided ||
943 current_decision == AllocationSite::kMaybeTenure)) {
944 if (ratio >= AllocationSite::kPretenureRatio) {
945 // We just transition into tenure state when the semi-space was at
946 // maximum capacity.
947 if (maximum_size_scavenge) {
948 site.set_deopt_dependent_code(true);
949 site.set_pretenure_decision(AllocationSite::kTenure);
950 // Currently we just need to deopt when we make a state transition to
951 // tenure.
952 return true;
953 }
954 site.set_pretenure_decision(AllocationSite::kMaybeTenure);
955 } else {
956 site.set_pretenure_decision(AllocationSite::kDontTenure);
957 }
958 }
959 return false;
960 }
961
DigestPretenuringFeedback(Isolate * isolate,AllocationSite site,bool maximum_size_scavenge)962 inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
963 bool maximum_size_scavenge) {
964 bool deopt = false;
965 int create_count = site.memento_create_count();
966 int found_count = site.memento_found_count();
967 bool minimum_mementos_created =
968 create_count >= AllocationSite::kPretenureMinimumCreated;
969 double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
970 ? static_cast<double>(found_count) / create_count
971 : 0.0;
972 AllocationSite::PretenureDecision current_decision =
973 site.pretenure_decision();
974
975 if (minimum_mementos_created) {
976 deopt = MakePretenureDecision(site, current_decision, ratio,
977 maximum_size_scavenge);
978 }
979
980 if (FLAG_trace_pretenuring_statistics) {
981 PrintIsolate(isolate,
982 "pretenuring: AllocationSite(%p): (created, found, ratio) "
983 "(%d, %d, %f) %s => %s\n",
984 reinterpret_cast<void*>(site.ptr()), create_count, found_count,
985 ratio, site.PretenureDecisionName(current_decision),
986 site.PretenureDecisionName(site.pretenure_decision()));
987 }
988
989 // Clear feedback calculation fields until the next gc.
990 site.set_memento_found_count(0);
991 site.set_memento_create_count(0);
992 return deopt;
993 }
994 } // namespace
995
RemoveAllocationSitePretenuringFeedback(AllocationSite site)996 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
997 global_pretenuring_feedback_.erase(site);
998 }
999
DeoptMaybeTenuredAllocationSites()1000 bool Heap::DeoptMaybeTenuredAllocationSites() {
1001 return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
1002 }
1003
ProcessPretenuringFeedback()1004 void Heap::ProcessPretenuringFeedback() {
1005 bool trigger_deoptimization = false;
1006 if (FLAG_allocation_site_pretenuring) {
1007 int tenure_decisions = 0;
1008 int dont_tenure_decisions = 0;
1009 int allocation_mementos_found = 0;
1010 int allocation_sites = 0;
1011 int active_allocation_sites = 0;
1012
1013 AllocationSite site;
1014
1015 // Step 1: Digest feedback for recorded allocation sites.
1016 bool maximum_size_scavenge = MaximumSizeScavenge();
1017 for (auto& site_and_count : global_pretenuring_feedback_) {
1018 allocation_sites++;
1019 site = site_and_count.first;
1020 // Count is always access through the site.
1021 DCHECK_EQ(0, site_and_count.second);
1022 int found_count = site.memento_found_count();
1023 // An entry in the storage does not imply that the count is > 0 because
1024 // allocation sites might have been reset due to too many objects dying
1025 // in old space.
1026 if (found_count > 0) {
1027 DCHECK(site.IsAllocationSite());
1028 active_allocation_sites++;
1029 allocation_mementos_found += found_count;
1030 if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
1031 trigger_deoptimization = true;
1032 }
1033 if (site.GetAllocationType() == AllocationType::kOld) {
1034 tenure_decisions++;
1035 } else {
1036 dont_tenure_decisions++;
1037 }
1038 }
1039 }
1040
1041 // Step 2: Deopt maybe tenured allocation sites if necessary.
1042 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
1043 if (deopt_maybe_tenured) {
1044 ForeachAllocationSite(
1045 allocation_sites_list(),
1046 [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
1047 DCHECK(site.IsAllocationSite());
1048 allocation_sites++;
1049 if (site.IsMaybeTenure()) {
1050 site.set_deopt_dependent_code(true);
1051 trigger_deoptimization = true;
1052 }
1053 });
1054 }
1055
1056 if (trigger_deoptimization) {
1057 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1058 }
1059
1060 if (FLAG_trace_pretenuring_statistics &&
1061 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
1062 dont_tenure_decisions > 0)) {
1063 PrintIsolate(isolate(),
1064 "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
1065 "active_sites=%d "
1066 "mementos=%d tenured=%d not_tenured=%d\n",
1067 deopt_maybe_tenured ? 1 : 0, allocation_sites,
1068 active_allocation_sites, allocation_mementos_found,
1069 tenure_decisions, dont_tenure_decisions);
1070 }
1071
1072 global_pretenuring_feedback_.clear();
1073 global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
1074 }
1075 }
1076
InvalidateCodeDeoptimizationData(Code code)1077 void Heap::InvalidateCodeDeoptimizationData(Code code) {
1078 CodePageMemoryModificationScope modification_scope(code);
1079 code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
1080 }
1081
DeoptMarkedAllocationSites()1082 void Heap::DeoptMarkedAllocationSites() {
1083 // TODO(hpayer): If iterating over the allocation sites list becomes a
1084 // performance issue, use a cache data structure in heap instead.
1085
1086 ForeachAllocationSite(allocation_sites_list(), [](AllocationSite site) {
1087 if (site.deopt_dependent_code()) {
1088 site.dependent_code().MarkCodeForDeoptimization(
1089 DependentCode::kAllocationSiteTenuringChangedGroup);
1090 site.set_deopt_dependent_code(false);
1091 }
1092 });
1093
1094 Deoptimizer::DeoptimizeMarkedCode(isolate_);
1095 }
1096
GarbageCollectionEpilogueInSafepoint(GarbageCollector collector)1097 void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
1098 if (collector == MARK_COMPACTOR) {
1099 memory_pressure_level_.store(MemoryPressureLevel::kNone,
1100 std::memory_order_relaxed);
1101 }
1102
1103 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_SAFEPOINT);
1104
1105 #define UPDATE_COUNTERS_FOR_SPACE(space) \
1106 isolate_->counters()->space##_bytes_available()->Set( \
1107 static_cast<int>(space()->Available())); \
1108 isolate_->counters()->space##_bytes_committed()->Set( \
1109 static_cast<int>(space()->CommittedMemory())); \
1110 isolate_->counters()->space##_bytes_used()->Set( \
1111 static_cast<int>(space()->SizeOfObjects()));
1112 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
1113 if (space()->CommittedMemory() > 0) { \
1114 isolate_->counters()->external_fragmentation_##space()->AddSample( \
1115 static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) / \
1116 space()->CommittedMemory())); \
1117 }
1118 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
1119 UPDATE_COUNTERS_FOR_SPACE(space) \
1120 UPDATE_FRAGMENTATION_FOR_SPACE(space)
1121
1122 UPDATE_COUNTERS_FOR_SPACE(new_space)
1123 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
1124 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
1125 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
1126 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
1127 #undef UPDATE_COUNTERS_FOR_SPACE
1128 #undef UPDATE_FRAGMENTATION_FOR_SPACE
1129 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
1130
1131 #ifdef DEBUG
1132 // Old-to-new slot sets must be empty after each collection.
1133 for (SpaceIterator it(this); it.HasNext();) {
1134 Space* space = it.Next();
1135
1136 for (MemoryChunk* chunk = space->first_page(); chunk != space->last_page();
1137 chunk = chunk->list_node().next())
1138 DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
1139 }
1140
1141 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
1142 if (FLAG_print_handles) PrintHandles();
1143 if (FLAG_gc_verbose) Print();
1144 if (FLAG_code_stats) ReportCodeStatistics("After GC");
1145 if (FLAG_check_handle_count) CheckHandleCount();
1146 #endif
1147
1148 if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
1149 ZapFromSpace();
1150 }
1151
1152 {
1153 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
1154 ReduceNewSpaceSize();
1155 }
1156
1157 // Resume all threads waiting for the GC.
1158 collection_barrier_->ResumeThreadsAwaitingCollection();
1159 }
1160
GarbageCollectionEpilogue()1161 void Heap::GarbageCollectionEpilogue() {
1162 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
1163 AllowHeapAllocation for_the_rest_of_the_epilogue;
1164
1165 UpdateMaximumCommitted();
1166
1167 isolate_->counters()->alive_after_last_gc()->Set(
1168 static_cast<int>(SizeOfObjects()));
1169
1170 isolate_->counters()->string_table_capacity()->Set(
1171 isolate()->string_table()->Capacity());
1172 isolate_->counters()->number_of_symbols()->Set(
1173 isolate()->string_table()->NumberOfElements());
1174
1175 if (CommittedMemory() > 0) {
1176 isolate_->counters()->external_fragmentation_total()->AddSample(
1177 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
1178
1179 isolate_->counters()->heap_sample_total_committed()->AddSample(
1180 static_cast<int>(CommittedMemory() / KB));
1181 isolate_->counters()->heap_sample_total_used()->AddSample(
1182 static_cast<int>(SizeOfObjects() / KB));
1183 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
1184 static_cast<int>(map_space()->CommittedMemory() / KB));
1185 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
1186 static_cast<int>(code_space()->CommittedMemory() / KB));
1187
1188 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
1189 static_cast<int>(MaximumCommittedMemory() / KB));
1190 }
1191
1192 #ifdef DEBUG
1193 ReportStatisticsAfterGC();
1194 #endif // DEBUG
1195
1196 last_gc_time_ = MonotonicallyIncreasingTimeInMs();
1197 }
1198
1199 class GCCallbacksScope {
1200 public:
GCCallbacksScope(Heap * heap)1201 explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
1202 heap_->gc_callbacks_depth_++;
1203 }
~GCCallbacksScope()1204 ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
1205
CheckReenter()1206 bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
1207
1208 private:
1209 Heap* heap_;
1210 };
1211
1212
HandleGCRequest()1213 void Heap::HandleGCRequest() {
1214 if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
1215 CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
1216 stress_scavenge_observer_->RequestedGCDone();
1217 } else if (HighMemoryPressure()) {
1218 incremental_marking()->reset_request_type();
1219 CheckMemoryPressure();
1220 } else if (CollectionRequested()) {
1221 CheckCollectionRequested();
1222 } else if (incremental_marking()->request_type() ==
1223 IncrementalMarking::COMPLETE_MARKING) {
1224 incremental_marking()->reset_request_type();
1225 CollectAllGarbage(current_gc_flags_,
1226 GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
1227 current_gc_callback_flags_);
1228 } else if (incremental_marking()->request_type() ==
1229 IncrementalMarking::FINALIZATION &&
1230 incremental_marking()->IsMarking() &&
1231 !incremental_marking()->finalize_marking_completed()) {
1232 incremental_marking()->reset_request_type();
1233 FinalizeIncrementalMarkingIncrementally(
1234 GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
1235 }
1236 }
1237
ScheduleScavengeTaskIfNeeded()1238 void Heap::ScheduleScavengeTaskIfNeeded() {
1239 DCHECK_NOT_NULL(scavenge_job_);
1240 scavenge_job_->ScheduleTaskIfNeeded(this);
1241 }
1242
GCTypePriorityTimer(GarbageCollector collector)1243 TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
1244 if (IsYoungGenerationCollector(collector)) {
1245 if (isolate_->IsIsolateInBackground()) {
1246 return isolate_->counters()->gc_scavenger_background();
1247 }
1248 return isolate_->counters()->gc_scavenger_foreground();
1249 } else {
1250 if (!incremental_marking()->IsStopped()) {
1251 if (ShouldReduceMemory()) {
1252 if (isolate_->IsIsolateInBackground()) {
1253 return isolate_->counters()->gc_finalize_reduce_memory_background();
1254 }
1255 return isolate_->counters()->gc_finalize_reduce_memory_foreground();
1256 } else {
1257 if (isolate_->IsIsolateInBackground()) {
1258 return isolate_->counters()->gc_finalize_background();
1259 }
1260 return isolate_->counters()->gc_finalize_foreground();
1261 }
1262 } else {
1263 if (isolate_->IsIsolateInBackground()) {
1264 return isolate_->counters()->gc_compactor_background();
1265 }
1266 return isolate_->counters()->gc_compactor_foreground();
1267 }
1268 }
1269 }
1270
GCTypeTimer(GarbageCollector collector)1271 TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
1272 if (IsYoungGenerationCollector(collector)) {
1273 return isolate_->counters()->gc_scavenger();
1274 }
1275 if (incremental_marking()->IsStopped()) {
1276 return isolate_->counters()->gc_compactor();
1277 }
1278 if (ShouldReduceMemory()) {
1279 return isolate_->counters()->gc_finalize_reduce_memory();
1280 }
1281 if (incremental_marking()->IsMarking() &&
1282 incremental_marking()->local_marking_worklists()->IsPerContextMode()) {
1283 return isolate_->counters()->gc_finalize_measure_memory();
1284 }
1285 return isolate_->counters()->gc_finalize();
1286 }
1287
CollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1288 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
1289 const v8::GCCallbackFlags gc_callback_flags) {
1290 // Since we are ignoring the return value, the exact choice of space does
1291 // not matter, so long as we do not specify NEW_SPACE, which would not
1292 // cause a full GC.
1293 set_current_gc_flags(flags);
1294 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1295 set_current_gc_flags(kNoGCFlags);
1296 }
1297
1298 namespace {
1299
CompareWords(int size,HeapObject a,HeapObject b)1300 intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
1301 int slots = size / kTaggedSize;
1302 DCHECK_EQ(a.Size(), size);
1303 DCHECK_EQ(b.Size(), size);
1304 Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a.address());
1305 Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b.address());
1306 for (int i = 0; i < slots; i++) {
1307 if (*slot_a != *slot_b) {
1308 return *slot_a - *slot_b;
1309 }
1310 slot_a++;
1311 slot_b++;
1312 }
1313 return 0;
1314 }
1315
ReportDuplicates(int size,std::vector<HeapObject> * objects)1316 void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
1317 if (objects->size() == 0) return;
1318
1319 sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
1320 intptr_t c = CompareWords(size, a, b);
1321 if (c != 0) return c < 0;
1322 return a < b;
1323 });
1324
1325 std::vector<std::pair<int, HeapObject>> duplicates;
1326 HeapObject current = (*objects)[0];
1327 int count = 1;
1328 for (size_t i = 1; i < objects->size(); i++) {
1329 if (CompareWords(size, current, (*objects)[i]) == 0) {
1330 count++;
1331 } else {
1332 if (count > 1) {
1333 duplicates.push_back(std::make_pair(count - 1, current));
1334 }
1335 count = 1;
1336 current = (*objects)[i];
1337 }
1338 }
1339 if (count > 1) {
1340 duplicates.push_back(std::make_pair(count - 1, current));
1341 }
1342
1343 int threshold = FLAG_trace_duplicate_threshold_kb * KB;
1344
1345 sort(duplicates.begin(), duplicates.end());
1346 for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
1347 int duplicate_bytes = it->first * size;
1348 if (duplicate_bytes < threshold) break;
1349 PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
1350 duplicate_bytes / KB);
1351 PrintF("Sample object: ");
1352 it->second.Print();
1353 PrintF("============================\n");
1354 }
1355 }
1356 } // anonymous namespace
1357
CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)1358 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1359 // Since we are ignoring the return value, the exact choice of space does
1360 // not matter, so long as we do not specify NEW_SPACE, which would not
1361 // cause a full GC.
1362 // Major GC would invoke weak handle callbacks on weakly reachable
1363 // handles, but won't collect weakly reachable objects until next
1364 // major GC. Therefore if we collect aggressively and weak handle callback
1365 // has been invoked, we rerun major GC to release objects which become
1366 // garbage.
1367 // Note: as weak callbacks can execute arbitrary code, we cannot
1368 // hope that eventually there will be no weak callbacks invocations.
1369 // Therefore stop recollecting after several attempts.
1370 if (gc_reason == GarbageCollectionReason::kLastResort) {
1371 InvokeNearHeapLimitCallback();
1372 }
1373 RuntimeCallTimerScope runtime_timer(
1374 isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
1375
1376 // The optimizing compiler may be unnecessarily holding on to memory.
1377 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1378 isolate()->ClearSerializerData();
1379 set_current_gc_flags(
1380 kReduceMemoryFootprintMask |
1381 (gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC
1382 : 0));
1383 isolate_->compilation_cache()->Clear();
1384 const int kMaxNumberOfAttempts = 7;
1385 const int kMinNumberOfAttempts = 2;
1386 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1387 if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) &&
1388 attempt + 1 >= kMinNumberOfAttempts) {
1389 break;
1390 }
1391 }
1392
1393 set_current_gc_flags(kNoGCFlags);
1394 new_space_->Shrink();
1395 new_lo_space_->SetCapacity(new_space_->Capacity() *
1396 kNewLargeObjectSpaceToSemiSpaceRatio);
1397 UncommitFromSpace();
1398 EagerlyFreeExternalMemory();
1399
1400 if (FLAG_trace_duplicate_threshold_kb) {
1401 std::map<int, std::vector<HeapObject>> objects_by_size;
1402 PagedSpaceIterator spaces(this);
1403 for (PagedSpace* space = spaces.Next(); space != nullptr;
1404 space = spaces.Next()) {
1405 PagedSpaceObjectIterator it(this, space);
1406 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
1407 objects_by_size[obj.Size()].push_back(obj);
1408 }
1409 }
1410 {
1411 LargeObjectSpaceObjectIterator it(lo_space());
1412 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
1413 objects_by_size[obj.Size()].push_back(obj);
1414 }
1415 }
1416 for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
1417 ++it) {
1418 ReportDuplicates(it->first, &it->second);
1419 }
1420 }
1421 }
1422
PreciseCollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)1423 void Heap::PreciseCollectAllGarbage(int flags,
1424 GarbageCollectionReason gc_reason,
1425 const GCCallbackFlags gc_callback_flags) {
1426 if (!incremental_marking()->IsStopped()) {
1427 FinalizeIncrementalMarkingAtomically(gc_reason);
1428 }
1429 CollectAllGarbage(flags, gc_reason, gc_callback_flags);
1430 }
1431
ReportExternalMemoryPressure()1432 void Heap::ReportExternalMemoryPressure() {
1433 const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1434 static_cast<GCCallbackFlags>(
1435 kGCCallbackFlagSynchronousPhantomCallbackProcessing |
1436 kGCCallbackFlagCollectAllExternalMemory);
1437 int64_t current = external_memory_.total();
1438 int64_t baseline = external_memory_.low_since_mark_compact();
1439 int64_t limit = external_memory_.limit();
1440 TRACE_EVENT2(
1441 "devtools.timeline,v8", "V8.ExternalMemoryPressure", "external_memory_mb",
1442 static_cast<int>((current - baseline) / MB), "external_memory_limit_mb",
1443 static_cast<int>((limit - baseline) / MB));
1444 if (current > baseline + external_memory_hard_limit()) {
1445 CollectAllGarbage(
1446 kReduceMemoryFootprintMask,
1447 GarbageCollectionReason::kExternalMemoryPressure,
1448 static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
1449 kGCCallbackFlagsForExternalMemory));
1450 return;
1451 }
1452 if (incremental_marking()->IsStopped()) {
1453 if (incremental_marking()->CanBeActivated()) {
1454 StartIncrementalMarking(GCFlagsForIncrementalMarking(),
1455 GarbageCollectionReason::kExternalMemoryPressure,
1456 kGCCallbackFlagsForExternalMemory);
1457 } else {
1458 CollectAllGarbage(i::Heap::kNoGCFlags,
1459 GarbageCollectionReason::kExternalMemoryPressure,
1460 kGCCallbackFlagsForExternalMemory);
1461 }
1462 } else {
1463 // Incremental marking is turned on an has already been started.
1464 const double kMinStepSize = 5;
1465 const double kMaxStepSize = 10;
1466 const double ms_step = Min(
1467 kMaxStepSize,
1468 Max(kMinStepSize, static_cast<double>(current) / limit * kMinStepSize));
1469 const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
1470 // Extend the gc callback flags with external memory flags.
1471 current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
1472 current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1473 incremental_marking()->AdvanceWithDeadline(
1474 deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
1475 }
1476 }
1477
external_memory_limit()1478 int64_t Heap::external_memory_limit() { return external_memory_.limit(); }
1479
EnsureFillerObjectAtTop()1480 void Heap::EnsureFillerObjectAtTop() {
1481 // There may be an allocation memento behind objects in new space. Upon
1482 // evacuation of a non-full new space (or if we are on the last page) there
1483 // may be uninitialized memory behind top. We fill the remainder of the page
1484 // with a filler.
1485 Address to_top = new_space_->top();
1486 Page* page = Page::FromAddress(to_top - kTaggedSize);
1487 if (page->Contains(to_top)) {
1488 int remaining_in_page = static_cast<int>(page->area_end() - to_top);
1489 CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
1490 }
1491 }
1492
DevToolsTraceEventScope(Heap * heap,const char * event_name,const char * event_type)1493 Heap::DevToolsTraceEventScope::DevToolsTraceEventScope(Heap* heap,
1494 const char* event_name,
1495 const char* event_type)
1496 : heap_(heap), event_name_(event_name) {
1497 TRACE_EVENT_BEGIN2("devtools.timeline,v8", event_name_, "usedHeapSizeBefore",
1498 heap_->SizeOfObjects(), "type", event_type);
1499 }
1500
~DevToolsTraceEventScope()1501 Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
1502 TRACE_EVENT_END1("devtools.timeline,v8", event_name_, "usedHeapSizeAfter",
1503 heap_->SizeOfObjects());
1504 }
1505
CollectGarbage(AllocationSpace space,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1506 bool Heap::CollectGarbage(AllocationSpace space,
1507 GarbageCollectionReason gc_reason,
1508 const v8::GCCallbackFlags gc_callback_flags) {
1509 const char* collector_reason = nullptr;
1510 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
1511 is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
1512 current_gc_flags_ & kForcedGC ||
1513 force_gc_on_next_allocation_;
1514 if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
1515
1516 DevToolsTraceEventScope devtools_trace_event_scope(
1517 this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
1518 GarbageCollectionReasonToString(gc_reason));
1519
1520 // Filter on-stack reference below this method.
1521 isolate()
1522 ->global_handles()
1523 ->CleanupOnStackReferencesBelowCurrentStackPosition();
1524
1525 // Ensure that all pending phantom callbacks are invoked.
1526 isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
1527
1528 // The VM is in the GC state until exiting this function.
1529 VMState<GC> state(isolate());
1530
1531 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1532 // Reset the allocation timeout, but make sure to allow at least a few
1533 // allocations after a collection. The reason for this is that we have a lot
1534 // of allocation sequences and we assume that a garbage collection will allow
1535 // the subsequent allocation attempts to go through.
1536 if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
1537 allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
1538 }
1539 #endif
1540
1541 EnsureFillerObjectAtTop();
1542
1543 if (IsYoungGenerationCollector(collector) &&
1544 !incremental_marking()->IsStopped()) {
1545 if (FLAG_trace_incremental_marking) {
1546 isolate()->PrintWithTimestamp(
1547 "[IncrementalMarking] Scavenge during marking.\n");
1548 }
1549 }
1550
1551 size_t freed_global_handles = 0;
1552
1553 size_t committed_memory_before = 0;
1554
1555 if (collector == MARK_COMPACTOR) {
1556 committed_memory_before = CommittedOldGenerationMemory();
1557 }
1558
1559 {
1560 tracer()->Start(collector, gc_reason, collector_reason);
1561 DCHECK(AllowHeapAllocation::IsAllowed());
1562 DCHECK(AllowGarbageCollection::IsAllowed());
1563 DisallowHeapAllocation no_allocation_during_gc;
1564 DisallowGarbageCollection no_gc_during_gc;
1565 GarbageCollectionPrologue();
1566
1567 {
1568 TimedHistogram* gc_type_timer = GCTypeTimer(collector);
1569 TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
1570 TRACE_EVENT0("v8", gc_type_timer->name());
1571
1572 TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
1573 OptionalTimedHistogramScopeMode mode =
1574 isolate_->IsMemorySavingsModeActive()
1575 ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
1576 : OptionalTimedHistogramScopeMode::TAKE_TIME;
1577 OptionalTimedHistogramScope histogram_timer_priority_scope(
1578 gc_type_priority_timer, isolate_, mode);
1579
1580 if (!IsYoungGenerationCollector(collector)) {
1581 PROFILE(isolate_, CodeMovingGCEvent());
1582 }
1583
1584 GCType gc_type = collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact
1585 : kGCTypeScavenge;
1586 {
1587 GCCallbacksScope scope(this);
1588 // Temporary override any embedder stack state as callbacks may create
1589 // their own state on the stack and recursively trigger GC.
1590 EmbedderStackStateScope embedder_scope(
1591 local_embedder_heap_tracer(),
1592 EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
1593 if (scope.CheckReenter()) {
1594 AllowHeapAllocation allow_allocation;
1595 AllowGarbageCollection allow_gc;
1596 AllowJavascriptExecution allow_js(isolate());
1597 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1598 VMState<EXTERNAL> state(isolate_);
1599 HandleScope handle_scope(isolate_);
1600 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1601 }
1602 }
1603
1604 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
1605 tp_heap_->CollectGarbage();
1606 } else {
1607 freed_global_handles +=
1608 PerformGarbageCollection(collector, gc_callback_flags);
1609 }
1610 // Clear is_current_gc_forced now that the current GC is complete. Do this
1611 // before GarbageCollectionEpilogue() since that could trigger another
1612 // unforced GC.
1613 is_current_gc_forced_ = false;
1614
1615 {
1616 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1617 gc_post_processing_depth_++;
1618 {
1619 AllowHeapAllocation allow_allocation;
1620 AllowGarbageCollection allow_gc;
1621 AllowJavascriptExecution allow_js(isolate());
1622 freed_global_handles +=
1623 isolate_->global_handles()->PostGarbageCollectionProcessing(
1624 collector, gc_callback_flags);
1625 }
1626 gc_post_processing_depth_--;
1627 }
1628
1629 {
1630 GCCallbacksScope scope(this);
1631 if (scope.CheckReenter()) {
1632 AllowHeapAllocation allow_allocation;
1633 AllowGarbageCollection allow_gc;
1634 AllowJavascriptExecution allow_js(isolate());
1635 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1636 VMState<EXTERNAL> state(isolate_);
1637 HandleScope handle_scope(isolate_);
1638 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1639 }
1640 }
1641 if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
1642 tracer()->RecordGCPhasesHistograms(gc_type_timer);
1643 }
1644 }
1645
1646 GarbageCollectionEpilogue();
1647 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1648 isolate()->CheckDetachedContextsAfterGC();
1649 }
1650
1651 if (collector == MARK_COMPACTOR) {
1652 // Calculate used memory first, then committed memory. Following code
1653 // assumes that committed >= used, which might not hold when this is
1654 // calculated in the wrong order and background threads allocate
1655 // in-between.
1656 size_t used_memory_after = OldGenerationSizeOfObjects();
1657 size_t committed_memory_after = CommittedOldGenerationMemory();
1658 MemoryReducer::Event event;
1659 event.type = MemoryReducer::kMarkCompact;
1660 event.time_ms = MonotonicallyIncreasingTimeInMs();
1661 // Trigger one more GC if
1662 // - this GC decreased committed memory,
1663 // - there is high fragmentation,
1664 event.next_gc_likely_to_collect_more =
1665 (committed_memory_before > committed_memory_after + MB) ||
1666 HasHighFragmentation(used_memory_after, committed_memory_after);
1667 event.committed_memory = committed_memory_after;
1668 if (deserialization_complete_) {
1669 memory_reducer_->NotifyMarkCompact(event);
1670 }
1671 if (initial_max_old_generation_size_ < max_old_generation_size() &&
1672 used_memory_after < initial_max_old_generation_size_threshold_) {
1673 set_max_old_generation_size(initial_max_old_generation_size_);
1674 }
1675 }
1676
1677 tracer()->Stop(collector);
1678 }
1679
1680 if (collector == MARK_COMPACTOR &&
1681 (gc_callback_flags & (kGCCallbackFlagForced |
1682 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1683 isolate()->CountUsage(v8::Isolate::kForcedGC);
1684 }
1685
1686 // Start incremental marking for the next cycle. We do this only for scavenger
1687 // to avoid a loop where mark-compact causes another mark-compact.
1688 if (IsYoungGenerationCollector(collector)) {
1689 StartIncrementalMarkingIfAllocationLimitIsReached(
1690 GCFlagsForIncrementalMarking(),
1691 kGCCallbackScheduleIdleGarbageCollection);
1692 }
1693
1694 if (!CanExpandOldGeneration(0)) {
1695 InvokeNearHeapLimitCallback();
1696 if (!CanExpandOldGeneration(0)) {
1697 FatalProcessOutOfMemory("Reached heap limit");
1698 }
1699 }
1700
1701 return freed_global_handles > 0;
1702 }
1703
1704
NotifyContextDisposed(bool dependant_context)1705 int Heap::NotifyContextDisposed(bool dependant_context) {
1706 if (!dependant_context) {
1707 tracer()->ResetSurvivalEvents();
1708 old_generation_size_configured_ = false;
1709 set_old_generation_allocation_limit(initial_old_generation_size_);
1710 MemoryReducer::Event event;
1711 event.type = MemoryReducer::kPossibleGarbage;
1712 event.time_ms = MonotonicallyIncreasingTimeInMs();
1713 memory_reducer_->NotifyPossibleGarbage(event);
1714 }
1715 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1716 if (!isolate()->context().is_null()) {
1717 RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
1718 isolate()->raw_native_context().set_retained_maps(
1719 ReadOnlyRoots(this).empty_weak_array_list());
1720 }
1721
1722 tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1723 return ++contexts_disposed_;
1724 }
1725
StartIncrementalMarking(int gc_flags,GarbageCollectionReason gc_reason,GCCallbackFlags gc_callback_flags)1726 void Heap::StartIncrementalMarking(int gc_flags,
1727 GarbageCollectionReason gc_reason,
1728 GCCallbackFlags gc_callback_flags) {
1729 DCHECK(incremental_marking()->IsStopped());
1730 SafepointScope safepoint(this);
1731 set_current_gc_flags(gc_flags);
1732 current_gc_callback_flags_ = gc_callback_flags;
1733 incremental_marking()->Start(gc_reason);
1734 }
1735
StartIncrementalMarkingIfAllocationLimitIsReached(int gc_flags,const GCCallbackFlags gc_callback_flags)1736 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1737 int gc_flags, const GCCallbackFlags gc_callback_flags) {
1738 if (incremental_marking()->IsStopped()) {
1739 IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
1740 if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1741 incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1742 } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1743 StartIncrementalMarking(
1744 gc_flags,
1745 OldGenerationSpaceAvailable() <= new_space_->Capacity()
1746 ? GarbageCollectionReason::kAllocationLimit
1747 : GarbageCollectionReason::kGlobalAllocationLimit,
1748 gc_callback_flags);
1749 }
1750 }
1751 }
1752
StartIncrementalMarkingIfAllocationLimitIsReachedBackground()1753 void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
1754 if (!incremental_marking()->IsStopped() ||
1755 !incremental_marking()->CanBeActivated()) {
1756 return;
1757 }
1758
1759 const size_t old_generation_space_available = OldGenerationSpaceAvailable();
1760 const base::Optional<size_t> global_memory_available =
1761 GlobalMemoryAvailable();
1762
1763 if (old_generation_space_available < new_space_->Capacity() ||
1764 (global_memory_available &&
1765 *global_memory_available < new_space_->Capacity())) {
1766 incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1767 }
1768 }
1769
StartIdleIncrementalMarking(GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)1770 void Heap::StartIdleIncrementalMarking(
1771 GarbageCollectionReason gc_reason,
1772 const GCCallbackFlags gc_callback_flags) {
1773 StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1774 gc_callback_flags);
1775 }
1776
MoveRange(HeapObject dst_object,const ObjectSlot dst_slot,const ObjectSlot src_slot,int len,WriteBarrierMode mode)1777 void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
1778 const ObjectSlot src_slot, int len,
1779 WriteBarrierMode mode) {
1780 DCHECK_NE(len, 0);
1781 DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
1782 const ObjectSlot dst_end(dst_slot + len);
1783 // Ensure no range overflow.
1784 DCHECK(dst_slot < dst_end);
1785 DCHECK(src_slot < src_slot + len);
1786
1787 if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
1788 if (dst_slot < src_slot) {
1789 // Copy tagged values forward using relaxed load/stores that do not
1790 // involve value decompression.
1791 const AtomicSlot atomic_dst_end(dst_end);
1792 AtomicSlot dst(dst_slot);
1793 AtomicSlot src(src_slot);
1794 while (dst < atomic_dst_end) {
1795 *dst = *src;
1796 ++dst;
1797 ++src;
1798 }
1799 } else {
1800 // Copy tagged values backwards using relaxed load/stores that do not
1801 // involve value decompression.
1802 const AtomicSlot atomic_dst_begin(dst_slot);
1803 AtomicSlot dst(dst_slot + len - 1);
1804 AtomicSlot src(src_slot + len - 1);
1805 while (dst >= atomic_dst_begin) {
1806 *dst = *src;
1807 --dst;
1808 --src;
1809 }
1810 }
1811 } else {
1812 MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
1813 }
1814 if (mode == SKIP_WRITE_BARRIER) return;
1815 WriteBarrierForRange(dst_object, dst_slot, dst_end);
1816 }
1817
1818 // Instantiate Heap::CopyRange() for ObjectSlot and MaybeObjectSlot.
1819 template void Heap::CopyRange<ObjectSlot>(HeapObject dst_object,
1820 ObjectSlot dst_slot,
1821 ObjectSlot src_slot, int len,
1822 WriteBarrierMode mode);
1823 template void Heap::CopyRange<MaybeObjectSlot>(HeapObject dst_object,
1824 MaybeObjectSlot dst_slot,
1825 MaybeObjectSlot src_slot,
1826 int len, WriteBarrierMode mode);
1827
1828 template <typename TSlot>
CopyRange(HeapObject dst_object,const TSlot dst_slot,const TSlot src_slot,int len,WriteBarrierMode mode)1829 void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
1830 const TSlot src_slot, int len, WriteBarrierMode mode) {
1831 DCHECK_NE(len, 0);
1832
1833 DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
1834 const TSlot dst_end(dst_slot + len);
1835 // Ensure ranges do not overlap.
1836 DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
1837
1838 if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
1839 // Copy tagged values using relaxed load/stores that do not involve value
1840 // decompression.
1841 const AtomicSlot atomic_dst_end(dst_end);
1842 AtomicSlot dst(dst_slot);
1843 AtomicSlot src(src_slot);
1844 while (dst < atomic_dst_end) {
1845 *dst = *src;
1846 ++dst;
1847 ++src;
1848 }
1849 } else {
1850 MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
1851 }
1852 if (mode == SKIP_WRITE_BARRIER) return;
1853 WriteBarrierForRange(dst_object, dst_slot, dst_end);
1854 }
1855
1856 #ifdef VERIFY_HEAP
1857 // Helper class for verifying the string table.
1858 class StringTableVerifier : public RootVisitor {
1859 public:
StringTableVerifier(Isolate * isolate)1860 explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
1861
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1862 void VisitRootPointers(Root root, const char* description,
1863 FullObjectSlot start, FullObjectSlot end) override {
1864 UNREACHABLE();
1865 }
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)1866 void VisitRootPointers(Root root, const char* description,
1867 OffHeapObjectSlot start,
1868 OffHeapObjectSlot end) override {
1869 // Visit all HeapObject pointers in [start, end).
1870 for (OffHeapObjectSlot p = start; p < end; ++p) {
1871 Object o = p.load(isolate_);
1872 DCHECK(!HasWeakHeapObjectTag(o));
1873 if (o.IsHeapObject()) {
1874 HeapObject object = HeapObject::cast(o);
1875 // Check that the string is actually internalized.
1876 CHECK(object.IsInternalizedString());
1877 }
1878 }
1879 }
1880
1881 private:
1882 Isolate* isolate_;
1883 };
1884
VerifyStringTable(Isolate * isolate)1885 static void VerifyStringTable(Isolate* isolate) {
1886 StringTableVerifier verifier(isolate);
1887 isolate->string_table()->IterateElements(&verifier);
1888 }
1889 #endif // VERIFY_HEAP
1890
EnsureFromSpaceIsCommitted()1891 void Heap::EnsureFromSpaceIsCommitted() {
1892 if (new_space_->CommitFromSpaceIfNeeded()) return;
1893
1894 // Committing memory to from space failed.
1895 // Memory is exhausted and we will die.
1896 FatalProcessOutOfMemory("Committing semi space failed.");
1897 }
1898
CollectionRequested()1899 bool Heap::CollectionRequested() {
1900 return collection_barrier_->CollectionRequested();
1901 }
1902
RequestCollectionBackground(LocalHeap * local_heap)1903 void Heap::RequestCollectionBackground(LocalHeap* local_heap) {
1904 if (local_heap->is_main_thread()) {
1905 CollectAllGarbage(current_gc_flags_,
1906 GarbageCollectionReason::kBackgroundAllocationFailure,
1907 current_gc_callback_flags_);
1908 } else {
1909 collection_barrier_->AwaitCollectionBackground();
1910 }
1911 }
1912
CheckCollectionRequested()1913 void Heap::CheckCollectionRequested() {
1914 if (!collection_barrier_->CollectionRequested()) return;
1915
1916 CollectAllGarbage(current_gc_flags_,
1917 GarbageCollectionReason::kBackgroundAllocationFailure,
1918 current_gc_callback_flags_);
1919 }
1920
UpdateSurvivalStatistics(int start_new_space_size)1921 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1922 if (start_new_space_size == 0) return;
1923
1924 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1925 static_cast<double>(start_new_space_size) * 100);
1926
1927 if (previous_semi_space_copied_object_size_ > 0) {
1928 promotion_rate_ =
1929 (static_cast<double>(promoted_objects_size_) /
1930 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1931 } else {
1932 promotion_rate_ = 0;
1933 }
1934
1935 semi_space_copied_rate_ =
1936 (static_cast<double>(semi_space_copied_object_size_) /
1937 static_cast<double>(start_new_space_size) * 100);
1938
1939 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1940 tracer()->AddSurvivalRatio(survival_rate);
1941 }
1942
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1943 size_t Heap::PerformGarbageCollection(
1944 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1945 DisallowJavascriptExecution no_js(isolate());
1946 base::Optional<SafepointScope> optional_safepoint_scope;
1947
1948 // Stop time-to-collection timer before safepoint - we do not want to measure
1949 // time for safepointing.
1950 collection_barrier_->StopTimeToCollectionTimer();
1951
1952 if (FLAG_local_heaps) {
1953 optional_safepoint_scope.emplace(this);
1954 }
1955 #ifdef VERIFY_HEAP
1956 if (FLAG_verify_heap) {
1957 Verify();
1958 }
1959 #endif
1960 tracer()->StartInSafepoint();
1961
1962 GarbageCollectionPrologueInSafepoint();
1963
1964 EnsureFromSpaceIsCommitted();
1965
1966 size_t start_young_generation_size =
1967 Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
1968
1969 switch (collector) {
1970 case MARK_COMPACTOR:
1971 MarkCompact();
1972 break;
1973 case MINOR_MARK_COMPACTOR:
1974 MinorMarkCompact();
1975 break;
1976 case SCAVENGER:
1977 Scavenge();
1978 break;
1979 }
1980
1981 ProcessPretenuringFeedback();
1982
1983 UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
1984 ConfigureInitialOldGenerationSize();
1985
1986 if (collector != MARK_COMPACTOR) {
1987 // Objects that died in the new space might have been accounted
1988 // as bytes marked ahead of schedule by the incremental marker.
1989 incremental_marking()->UpdateMarkedBytesAfterScavenge(
1990 start_young_generation_size - SurvivedYoungObjectSize());
1991 }
1992
1993 if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
1994 ComputeFastPromotionMode();
1995 }
1996
1997 isolate_->counters()->objs_since_last_young()->Set(0);
1998
1999 isolate_->eternal_handles()->PostGarbageCollectionProcessing();
2000
2001 // Update relocatables.
2002 Relocatable::PostGarbageCollectionProcessing(isolate_);
2003
2004 size_t freed_global_handles;
2005
2006 {
2007 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
2008 // First round weak callbacks are not supposed to allocate and trigger
2009 // nested GCs.
2010 freed_global_handles =
2011 isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
2012 }
2013
2014 if (collector == MARK_COMPACTOR) {
2015 TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
2016 // TraceEpilogue may trigger operations that invalidate global handles. It
2017 // has to be called *after* all other operations that potentially touch and
2018 // reset global handles. It is also still part of the main garbage
2019 // collection pause and thus needs to be called *before* any operation that
2020 // can potentially trigger recursive garbage
2021 local_embedder_heap_tracer()->TraceEpilogue();
2022 }
2023
2024 #ifdef VERIFY_HEAP
2025 if (FLAG_verify_heap) {
2026 Verify();
2027 }
2028 #endif
2029
2030 RecomputeLimits(collector);
2031
2032 GarbageCollectionEpilogueInSafepoint(collector);
2033
2034 tracer()->StopInSafepoint();
2035
2036 return freed_global_handles;
2037 }
2038
RecomputeLimits(GarbageCollector collector)2039 void Heap::RecomputeLimits(GarbageCollector collector) {
2040 if (!((collector == MARK_COMPACTOR) ||
2041 (HasLowYoungGenerationAllocationRate() &&
2042 old_generation_size_configured_))) {
2043 return;
2044 }
2045
2046 double v8_gc_speed =
2047 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
2048 double v8_mutator_speed =
2049 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
2050 double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor(
2051 this, max_old_generation_size(), v8_gc_speed, v8_mutator_speed);
2052 double global_growing_factor = 0;
2053 if (UseGlobalMemoryScheduling()) {
2054 DCHECK_NOT_NULL(local_embedder_heap_tracer());
2055 double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond();
2056 double embedder_speed =
2057 tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond();
2058 double embedder_growing_factor =
2059 (embedder_gc_speed > 0 && embedder_speed > 0)
2060 ? MemoryController<GlobalMemoryTrait>::GrowingFactor(
2061 this, max_global_memory_size_, embedder_gc_speed,
2062 embedder_speed)
2063 : 0;
2064 global_growing_factor = Max(v8_growing_factor, embedder_growing_factor);
2065 }
2066
2067 size_t old_gen_size = OldGenerationSizeOfObjects();
2068 size_t new_space_capacity = new_space()->Capacity();
2069 HeapGrowingMode mode = CurrentHeapGrowingMode();
2070
2071 if (collector == MARK_COMPACTOR) {
2072 external_memory_.ResetAfterGC();
2073
2074 set_old_generation_allocation_limit(
2075 MemoryController<V8HeapTrait>::CalculateAllocationLimit(
2076 this, old_gen_size, min_old_generation_size_,
2077 max_old_generation_size(), new_space_capacity, v8_growing_factor,
2078 mode));
2079 if (UseGlobalMemoryScheduling()) {
2080 DCHECK_GT(global_growing_factor, 0);
2081 global_allocation_limit_ =
2082 MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
2083 this, GlobalSizeOfObjects(), min_global_memory_size_,
2084 max_global_memory_size_, new_space_capacity,
2085 global_growing_factor, mode);
2086 }
2087 CheckIneffectiveMarkCompact(
2088 old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
2089 } else if (HasLowYoungGenerationAllocationRate() &&
2090 old_generation_size_configured_) {
2091 size_t new_old_generation_limit =
2092 MemoryController<V8HeapTrait>::CalculateAllocationLimit(
2093 this, old_gen_size, min_old_generation_size_,
2094 max_old_generation_size(), new_space_capacity, v8_growing_factor,
2095 mode);
2096 if (new_old_generation_limit < old_generation_allocation_limit()) {
2097 set_old_generation_allocation_limit(new_old_generation_limit);
2098 }
2099 if (UseGlobalMemoryScheduling()) {
2100 DCHECK_GT(global_growing_factor, 0);
2101 size_t new_global_limit =
2102 MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
2103 this, GlobalSizeOfObjects(), min_global_memory_size_,
2104 max_global_memory_size_, new_space_capacity,
2105 global_growing_factor, mode);
2106 if (new_global_limit < global_allocation_limit_) {
2107 global_allocation_limit_ = new_global_limit;
2108 }
2109 }
2110 }
2111 }
2112
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)2113 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
2114 RuntimeCallTimerScope runtime_timer(
2115 isolate(), RuntimeCallCounterId::kGCPrologueCallback);
2116 for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
2117 if (gc_type & info.gc_type) {
2118 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
2119 info.callback(isolate, gc_type, flags, info.data);
2120 }
2121 }
2122 }
2123
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags flags)2124 void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
2125 RuntimeCallTimerScope runtime_timer(
2126 isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
2127 for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
2128 if (gc_type & info.gc_type) {
2129 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
2130 info.callback(isolate, gc_type, flags, info.data);
2131 }
2132 }
2133 }
2134
2135
MarkCompact()2136 void Heap::MarkCompact() {
2137 PauseAllocationObserversScope pause_observers(this);
2138
2139 SetGCState(MARK_COMPACT);
2140
2141 LOG(isolate_, ResourceEvent("markcompact", "begin"));
2142
2143 CodeSpaceMemoryModificationScope code_modifcation(this);
2144
2145 UpdateOldGenerationAllocationCounter();
2146 uint64_t size_of_objects_before_gc = SizeOfObjects();
2147
2148 mark_compact_collector()->Prepare();
2149
2150 ms_count_++;
2151
2152 MarkCompactPrologue();
2153
2154 mark_compact_collector()->CollectGarbage();
2155
2156 LOG(isolate_, ResourceEvent("markcompact", "end"));
2157
2158 MarkCompactEpilogue();
2159
2160 if (FLAG_allocation_site_pretenuring) {
2161 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
2162 }
2163 old_generation_size_configured_ = true;
2164 // This should be updated before PostGarbageCollectionProcessing, which
2165 // can cause another GC. Take into account the objects promoted during
2166 // GC.
2167 old_generation_allocation_counter_at_last_gc_ +=
2168 static_cast<size_t>(promoted_objects_size_);
2169 old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
2170 global_memory_at_last_gc_ = GlobalSizeOfObjects();
2171 }
2172
MinorMarkCompact()2173 void Heap::MinorMarkCompact() {
2174 #ifdef ENABLE_MINOR_MC
2175 DCHECK(FLAG_minor_mc);
2176
2177 PauseAllocationObserversScope pause_observers(this);
2178 SetGCState(MINOR_MARK_COMPACT);
2179 LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
2180
2181 TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
2182 AlwaysAllocateScope always_allocate(this);
2183 IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2184 incremental_marking());
2185 ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2186
2187 minor_mark_compact_collector()->CollectGarbage();
2188
2189 LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
2190 SetGCState(NOT_IN_GC);
2191 #else
2192 UNREACHABLE();
2193 #endif // ENABLE_MINOR_MC
2194 }
2195
MarkCompactEpilogue()2196 void Heap::MarkCompactEpilogue() {
2197 TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
2198 SetGCState(NOT_IN_GC);
2199
2200 isolate_->counters()->objs_since_last_full()->Set(0);
2201
2202 incremental_marking()->Epilogue();
2203
2204 DCHECK(incremental_marking()->IsStopped());
2205 }
2206
2207
MarkCompactPrologue()2208 void Heap::MarkCompactPrologue() {
2209 TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
2210 isolate_->descriptor_lookup_cache()->Clear();
2211 RegExpResultsCache::Clear(string_split_cache());
2212 RegExpResultsCache::Clear(regexp_multiple_cache());
2213
2214 isolate_->compilation_cache()->MarkCompactPrologue();
2215
2216 FlushNumberStringCache();
2217 }
2218
2219
CheckNewSpaceExpansionCriteria()2220 void Heap::CheckNewSpaceExpansionCriteria() {
2221 if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
2222 survived_since_last_expansion_ > new_space_->TotalCapacity()) {
2223 // Grow the size of new space if there is room to grow, and enough data
2224 // has survived scavenge since the last expansion.
2225 new_space_->Grow();
2226 survived_since_last_expansion_ = 0;
2227 }
2228 new_lo_space()->SetCapacity(new_space()->Capacity());
2229 }
2230
EvacuateYoungGeneration()2231 void Heap::EvacuateYoungGeneration() {
2232 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
2233 base::MutexGuard guard(relocation_mutex());
2234 ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2235 if (!FLAG_concurrent_marking) {
2236 DCHECK(fast_promotion_mode_);
2237 DCHECK(CanPromoteYoungAndExpandOldGeneration(0));
2238 }
2239
2240 mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2241
2242 SetGCState(SCAVENGE);
2243 LOG(isolate_, ResourceEvent("scavenge", "begin"));
2244
2245 // Move pages from new->old generation.
2246 PageRange range(new_space()->first_allocatable_address(), new_space()->top());
2247 for (auto it = range.begin(); it != range.end();) {
2248 Page* p = (*++it)->prev_page();
2249 new_space()->from_space().RemovePage(p);
2250 Page::ConvertNewToOld(p);
2251 if (incremental_marking()->IsMarking())
2252 mark_compact_collector()->RecordLiveSlotsOnPage(p);
2253 }
2254
2255 // Reset new space.
2256 if (!new_space()->Rebalance()) {
2257 FatalProcessOutOfMemory("NewSpace::Rebalance");
2258 }
2259 new_space()->ResetLinearAllocationArea();
2260 new_space()->set_age_mark(new_space()->top());
2261
2262 for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) {
2263 LargePage* page = *it;
2264 // Increment has to happen after we save the page, because it is going to
2265 // be removed below.
2266 it++;
2267 lo_space()->PromoteNewLargeObject(page);
2268 }
2269
2270 // Fix up special trackers.
2271 external_string_table_.PromoteYoung();
2272 // GlobalHandles are updated in PostGarbageCollectonProcessing
2273
2274 size_t promoted = new_space()->Size() + new_lo_space()->Size();
2275 IncrementYoungSurvivorsCounter(promoted);
2276 IncrementPromotedObjectsSize(promoted);
2277 IncrementSemiSpaceCopiedObjectSize(0);
2278
2279 LOG(isolate_, ResourceEvent("scavenge", "end"));
2280 SetGCState(NOT_IN_GC);
2281 }
2282
Scavenge()2283 void Heap::Scavenge() {
2284 if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
2285 tracer()->NotifyYoungGenerationHandling(
2286 YoungGenerationHandling::kFastPromotionDuringScavenge);
2287 EvacuateYoungGeneration();
2288 return;
2289 }
2290 tracer()->NotifyYoungGenerationHandling(
2291 YoungGenerationHandling::kRegularScavenge);
2292
2293 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
2294 base::MutexGuard guard(relocation_mutex());
2295 ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2296 // There are soft limits in the allocation code, designed to trigger a mark
2297 // sweep collection by failing allocations. There is no sense in trying to
2298 // trigger one during scavenge: scavenges allocation should always succeed.
2299 AlwaysAllocateScope scope(this);
2300
2301 // Bump-pointer allocations done during scavenge are not real allocations.
2302 // Pause the inline allocation steps.
2303 PauseAllocationObserversScope pause_observers(this);
2304 IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2305 incremental_marking());
2306
2307
2308 mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2309
2310 SetGCState(SCAVENGE);
2311
2312 // Flip the semispaces. After flipping, to space is empty, from space has
2313 // live objects.
2314 new_space()->Flip();
2315 new_space()->ResetLinearAllocationArea();
2316
2317 // We also flip the young generation large object space. All large objects
2318 // will be in the from space.
2319 new_lo_space()->Flip();
2320 new_lo_space()->ResetPendingObject();
2321
2322 // Implements Cheney's copying algorithm
2323 LOG(isolate_, ResourceEvent("scavenge", "begin"));
2324
2325 scavenger_collector_->CollectGarbage();
2326
2327 LOG(isolate_, ResourceEvent("scavenge", "end"));
2328
2329 SetGCState(NOT_IN_GC);
2330 }
2331
ComputeFastPromotionMode()2332 void Heap::ComputeFastPromotionMode() {
2333 const size_t survived_in_new_space =
2334 survived_last_scavenge_ * 100 / new_space_->Capacity();
2335 fast_promotion_mode_ =
2336 !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
2337 !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
2338 survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
2339 if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
2340 PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n",
2341 fast_promotion_mode_ ? "true" : "false",
2342 survived_in_new_space);
2343 }
2344 }
2345
UnprotectAndRegisterMemoryChunk(MemoryChunk * chunk)2346 void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
2347 if (unprotected_memory_chunks_registry_enabled_) {
2348 base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
2349 if (unprotected_memory_chunks_.insert(chunk).second) {
2350 chunk->SetReadAndWritable();
2351 }
2352 }
2353 }
2354
UnprotectAndRegisterMemoryChunk(HeapObject object)2355 void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) {
2356 UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object));
2357 }
2358
UnregisterUnprotectedMemoryChunk(MemoryChunk * chunk)2359 void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
2360 unprotected_memory_chunks_.erase(chunk);
2361 }
2362
ProtectUnprotectedMemoryChunks()2363 void Heap::ProtectUnprotectedMemoryChunks() {
2364 DCHECK(unprotected_memory_chunks_registry_enabled_);
2365 for (auto chunk = unprotected_memory_chunks_.begin();
2366 chunk != unprotected_memory_chunks_.end(); chunk++) {
2367 CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
2368 (*chunk)->SetDefaultCodePermissions();
2369 }
2370 unprotected_memory_chunks_.clear();
2371 }
2372
Contains(String string)2373 bool Heap::ExternalStringTable::Contains(String string) {
2374 for (size_t i = 0; i < young_strings_.size(); ++i) {
2375 if (young_strings_[i] == string) return true;
2376 }
2377 for (size_t i = 0; i < old_strings_.size(); ++i) {
2378 if (old_strings_[i] == string) return true;
2379 }
2380 return false;
2381 }
2382
UpdateExternalString(String string,size_t old_payload,size_t new_payload)2383 void Heap::UpdateExternalString(String string, size_t old_payload,
2384 size_t new_payload) {
2385 DCHECK(string.IsExternalString());
2386 Page* page = Page::FromHeapObject(string);
2387
2388 if (old_payload > new_payload) {
2389 page->DecrementExternalBackingStoreBytes(
2390 ExternalBackingStoreType::kExternalString, old_payload - new_payload);
2391 } else {
2392 page->IncrementExternalBackingStoreBytes(
2393 ExternalBackingStoreType::kExternalString, new_payload - old_payload);
2394 }
2395 }
2396
UpdateYoungReferenceInExternalStringTableEntry(Heap * heap,FullObjectSlot p)2397 String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
2398 FullObjectSlot p) {
2399 HeapObject obj = HeapObject::cast(*p);
2400 MapWord first_word = obj.map_word();
2401
2402 String new_string;
2403
2404 if (InFromPage(obj)) {
2405 if (!first_word.IsForwardingAddress()) {
2406 // Unreachable external string can be finalized.
2407 String string = String::cast(obj);
2408 if (!string.IsExternalString()) {
2409 // Original external string has been internalized.
2410 DCHECK(string.IsThinString());
2411 return String();
2412 }
2413 heap->FinalizeExternalString(string);
2414 return String();
2415 }
2416 new_string = String::cast(first_word.ToForwardingAddress());
2417 } else {
2418 new_string = String::cast(obj);
2419 }
2420
2421 // String is still reachable.
2422 if (new_string.IsThinString()) {
2423 // Filtering Thin strings out of the external string table.
2424 return String();
2425 } else if (new_string.IsExternalString()) {
2426 MemoryChunk::MoveExternalBackingStoreBytes(
2427 ExternalBackingStoreType::kExternalString,
2428 Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
2429 ExternalString::cast(new_string).ExternalPayloadSize());
2430 return new_string;
2431 }
2432
2433 // Internalization can replace external strings with non-external strings.
2434 return new_string.IsExternalString() ? new_string : String();
2435 }
2436
VerifyYoung()2437 void Heap::ExternalStringTable::VerifyYoung() {
2438 #ifdef DEBUG
2439 std::set<String> visited_map;
2440 std::map<MemoryChunk*, size_t> size_map;
2441 ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2442 for (size_t i = 0; i < young_strings_.size(); ++i) {
2443 String obj = String::cast(young_strings_[i]);
2444 MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2445 DCHECK(mc->InYoungGeneration());
2446 DCHECK(heap_->InYoungGeneration(obj));
2447 DCHECK(!obj.IsTheHole(heap_->isolate()));
2448 DCHECK(obj.IsExternalString());
2449 // Note: we can have repeated elements in the table.
2450 DCHECK_EQ(0, visited_map.count(obj));
2451 visited_map.insert(obj);
2452 size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
2453 }
2454 for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2455 it != size_map.end(); it++)
2456 DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2457 #endif
2458 }
2459
Verify()2460 void Heap::ExternalStringTable::Verify() {
2461 #ifdef DEBUG
2462 std::set<String> visited_map;
2463 std::map<MemoryChunk*, size_t> size_map;
2464 ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2465 VerifyYoung();
2466 for (size_t i = 0; i < old_strings_.size(); ++i) {
2467 String obj = String::cast(old_strings_[i]);
2468 MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2469 DCHECK(!mc->InYoungGeneration());
2470 DCHECK(!heap_->InYoungGeneration(obj));
2471 DCHECK(!obj.IsTheHole(heap_->isolate()));
2472 DCHECK(obj.IsExternalString());
2473 // Note: we can have repeated elements in the table.
2474 DCHECK_EQ(0, visited_map.count(obj));
2475 visited_map.insert(obj);
2476 size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
2477 }
2478 for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2479 it != size_map.end(); it++)
2480 DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2481 #endif
2482 }
2483
UpdateYoungReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2484 void Heap::ExternalStringTable::UpdateYoungReferences(
2485 Heap::ExternalStringTableUpdaterCallback updater_func) {
2486 if (young_strings_.empty()) return;
2487
2488 FullObjectSlot start(young_strings_.data());
2489 FullObjectSlot end(young_strings_.data() + young_strings_.size());
2490 FullObjectSlot last = start;
2491
2492 for (FullObjectSlot p = start; p < end; ++p) {
2493 String target = updater_func(heap_, p);
2494
2495 if (target.is_null()) continue;
2496
2497 DCHECK(target.IsExternalString());
2498
2499 if (InYoungGeneration(target)) {
2500 // String is still in new space. Update the table entry.
2501 last.store(target);
2502 ++last;
2503 } else {
2504 // String got promoted. Move it to the old string list.
2505 old_strings_.push_back(target);
2506 }
2507 }
2508
2509 DCHECK(last <= end);
2510 young_strings_.resize(last - start);
2511 #ifdef VERIFY_HEAP
2512 if (FLAG_verify_heap) {
2513 VerifyYoung();
2514 }
2515 #endif
2516 }
2517
PromoteYoung()2518 void Heap::ExternalStringTable::PromoteYoung() {
2519 old_strings_.reserve(old_strings_.size() + young_strings_.size());
2520 std::move(std::begin(young_strings_), std::end(young_strings_),
2521 std::back_inserter(old_strings_));
2522 young_strings_.clear();
2523 }
2524
IterateYoung(RootVisitor * v)2525 void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
2526 if (!young_strings_.empty()) {
2527 v->VisitRootPointers(
2528 Root::kExternalStringsTable, nullptr,
2529 FullObjectSlot(young_strings_.data()),
2530 FullObjectSlot(young_strings_.data() + young_strings_.size()));
2531 }
2532 }
2533
IterateAll(RootVisitor * v)2534 void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
2535 IterateYoung(v);
2536 if (!old_strings_.empty()) {
2537 v->VisitRootPointers(
2538 Root::kExternalStringsTable, nullptr,
2539 FullObjectSlot(old_strings_.data()),
2540 FullObjectSlot(old_strings_.data() + old_strings_.size()));
2541 }
2542 }
2543
UpdateYoungReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2544 void Heap::UpdateYoungReferencesInExternalStringTable(
2545 ExternalStringTableUpdaterCallback updater_func) {
2546 external_string_table_.UpdateYoungReferences(updater_func);
2547 }
2548
UpdateReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2549 void Heap::ExternalStringTable::UpdateReferences(
2550 Heap::ExternalStringTableUpdaterCallback updater_func) {
2551 if (old_strings_.size() > 0) {
2552 FullObjectSlot start(old_strings_.data());
2553 FullObjectSlot end(old_strings_.data() + old_strings_.size());
2554 for (FullObjectSlot p = start; p < end; ++p)
2555 p.store(updater_func(heap_, p));
2556 }
2557
2558 UpdateYoungReferences(updater_func);
2559 }
2560
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2561 void Heap::UpdateReferencesInExternalStringTable(
2562 ExternalStringTableUpdaterCallback updater_func) {
2563 external_string_table_.UpdateReferences(updater_func);
2564 }
2565
2566
ProcessAllWeakReferences(WeakObjectRetainer * retainer)2567 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
2568 ProcessNativeContexts(retainer);
2569 ProcessAllocationSites(retainer);
2570 ProcessDirtyJSFinalizationRegistries(retainer);
2571 }
2572
2573
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)2574 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
2575 ProcessNativeContexts(retainer);
2576 }
2577
2578
ProcessNativeContexts(WeakObjectRetainer * retainer)2579 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
2580 Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
2581 // Update the head of the list of contexts.
2582 set_native_contexts_list(head);
2583 }
2584
2585
ProcessAllocationSites(WeakObjectRetainer * retainer)2586 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
2587 Object allocation_site_obj =
2588 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
2589 set_allocation_sites_list(allocation_site_obj);
2590 }
2591
ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer * retainer)2592 void Heap::ProcessDirtyJSFinalizationRegistries(WeakObjectRetainer* retainer) {
2593 Object head = VisitWeakList<JSFinalizationRegistry>(
2594 this, dirty_js_finalization_registries_list(), retainer);
2595 set_dirty_js_finalization_registries_list(head);
2596 // If the list is empty, set the tail to undefined. Otherwise the tail is set
2597 // by WeakListVisitor<JSFinalizationRegistry>::VisitLiveObject.
2598 if (head.IsUndefined(isolate())) {
2599 set_dirty_js_finalization_registries_list_tail(head);
2600 }
2601 }
2602
ProcessWeakListRoots(WeakObjectRetainer * retainer)2603 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
2604 set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
2605 set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
2606 set_dirty_js_finalization_registries_list(
2607 retainer->RetainAs(dirty_js_finalization_registries_list()));
2608 set_dirty_js_finalization_registries_list_tail(
2609 retainer->RetainAs(dirty_js_finalization_registries_list_tail()));
2610 }
2611
ForeachAllocationSite(Object list,const std::function<void (AllocationSite)> & visitor)2612 void Heap::ForeachAllocationSite(
2613 Object list, const std::function<void(AllocationSite)>& visitor) {
2614 DisallowHeapAllocation disallow_heap_allocation;
2615 Object current = list;
2616 while (current.IsAllocationSite()) {
2617 AllocationSite site = AllocationSite::cast(current);
2618 visitor(site);
2619 Object current_nested = site.nested_site();
2620 while (current_nested.IsAllocationSite()) {
2621 AllocationSite nested_site = AllocationSite::cast(current_nested);
2622 visitor(nested_site);
2623 current_nested = nested_site.nested_site();
2624 }
2625 current = site.weak_next();
2626 }
2627 }
2628
ResetAllAllocationSitesDependentCode(AllocationType allocation)2629 void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
2630 DisallowHeapAllocation no_allocation_scope;
2631 bool marked = false;
2632
2633 ForeachAllocationSite(allocation_sites_list(),
2634 [&marked, allocation, this](AllocationSite site) {
2635 if (site.GetAllocationType() == allocation) {
2636 site.ResetPretenureDecision();
2637 site.set_deopt_dependent_code(true);
2638 marked = true;
2639 RemoveAllocationSitePretenuringFeedback(site);
2640 return;
2641 }
2642 });
2643 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2644 }
2645
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)2646 void Heap::EvaluateOldSpaceLocalPretenuring(
2647 uint64_t size_of_objects_before_gc) {
2648 uint64_t size_of_objects_after_gc = SizeOfObjects();
2649 double old_generation_survival_rate =
2650 (static_cast<double>(size_of_objects_after_gc) * 100) /
2651 static_cast<double>(size_of_objects_before_gc);
2652
2653 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2654 // Too many objects died in the old generation, pretenuring of wrong
2655 // allocation sites may be the cause for that. We have to deopt all
2656 // dependent code registered in the allocation sites to re-evaluate
2657 // our pretenuring decisions.
2658 ResetAllAllocationSitesDependentCode(AllocationType::kOld);
2659 if (FLAG_trace_pretenuring) {
2660 PrintF(
2661 "Deopt all allocation sites dependent code due to low survival "
2662 "rate in the old generation %f\n",
2663 old_generation_survival_rate);
2664 }
2665 }
2666 }
2667
2668
VisitExternalResources(v8::ExternalResourceVisitor * visitor)2669 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2670 DisallowHeapAllocation no_allocation;
2671 // All external strings are listed in the external string table.
2672
2673 class ExternalStringTableVisitorAdapter : public RootVisitor {
2674 public:
2675 explicit ExternalStringTableVisitorAdapter(
2676 Isolate* isolate, v8::ExternalResourceVisitor* visitor)
2677 : isolate_(isolate), visitor_(visitor) {}
2678 void VisitRootPointers(Root root, const char* description,
2679 FullObjectSlot start, FullObjectSlot end) override {
2680 for (FullObjectSlot p = start; p < end; ++p) {
2681 DCHECK((*p).IsExternalString());
2682 visitor_->VisitExternalString(
2683 Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
2684 }
2685 }
2686
2687 private:
2688 Isolate* isolate_;
2689 v8::ExternalResourceVisitor* visitor_;
2690 } external_string_table_visitor(isolate(), visitor);
2691
2692 external_string_table_.IterateAll(&external_string_table_visitor);
2693 }
2694
2695 STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
2696
2697 #ifdef V8_COMPRESS_POINTERS
2698 // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
2699 // is only kTaggedSize aligned but we can keep using unaligned access since
2700 // both x64 and arm64 architectures (where pointer compression supported)
2701 // allow unaligned access to doubles.
2702 STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
2703 #else
2704 STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
2705 #endif
2706
2707 #ifdef V8_HOST_ARCH_32_BIT
2708 // NOLINTNEXTLINE(runtime/references) (false positive)
2709 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
2710 #endif
2711
2712
GetMaximumFillToAlign(AllocationAlignment alignment)2713 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
2714 switch (alignment) {
2715 case kWordAligned:
2716 return 0;
2717 case kDoubleAligned:
2718 case kDoubleUnaligned:
2719 return kDoubleSize - kTaggedSize;
2720 default:
2721 UNREACHABLE();
2722 }
2723 return 0;
2724 }
2725
2726 // static
GetFillToAlign(Address address,AllocationAlignment alignment)2727 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2728 if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
2729 return kTaggedSize;
2730 if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
2731 return kDoubleSize - kTaggedSize; // No fill if double is always aligned.
2732 return 0;
2733 }
2734
GetCodeRangeReservedAreaSize()2735 size_t Heap::GetCodeRangeReservedAreaSize() {
2736 return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
2737 }
2738
2739 // static
PrecedeWithFiller(ReadOnlyRoots roots,HeapObject object,int filler_size)2740 HeapObject Heap::PrecedeWithFiller(ReadOnlyRoots roots, HeapObject object,
2741 int filler_size) {
2742 CreateFillerObjectAt(roots, object.address(), filler_size,
2743 ClearFreedMemoryMode::kDontClearFreedMemory);
2744 return HeapObject::FromAddress(object.address() + filler_size);
2745 }
2746
2747 // static
AlignWithFiller(ReadOnlyRoots roots,HeapObject object,int object_size,int allocation_size,AllocationAlignment alignment)2748 HeapObject Heap::AlignWithFiller(ReadOnlyRoots roots, HeapObject object,
2749 int object_size, int allocation_size,
2750 AllocationAlignment alignment) {
2751 int filler_size = allocation_size - object_size;
2752 DCHECK_LT(0, filler_size);
2753 int pre_filler = GetFillToAlign(object.address(), alignment);
2754 if (pre_filler) {
2755 object = PrecedeWithFiller(roots, object, pre_filler);
2756 filler_size -= pre_filler;
2757 }
2758 if (filler_size) {
2759 CreateFillerObjectAt(roots, object.address() + object_size, filler_size,
2760 ClearFreedMemoryMode::kDontClearFreedMemory);
2761 }
2762 return object;
2763 }
2764
AllocateExternalBackingStore(const std::function<void * (size_t)> & allocate,size_t byte_length)2765 void* Heap::AllocateExternalBackingStore(
2766 const std::function<void*(size_t)>& allocate, size_t byte_length) {
2767 if (!always_allocate()) {
2768 size_t new_space_backing_store_bytes =
2769 new_space()->ExternalBackingStoreBytes();
2770 if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
2771 new_space_backing_store_bytes >= byte_length) {
2772 // Performing a young generation GC amortizes over the allocated backing
2773 // store bytes and may free enough external bytes for this allocation.
2774 CollectGarbage(NEW_SPACE,
2775 GarbageCollectionReason::kExternalMemoryPressure);
2776 }
2777 }
2778 // TODO(ulan): Perform GCs proactively based on the byte_length and
2779 // the current external backing store counters.
2780 void* result = allocate(byte_length);
2781 if (result) return result;
2782 if (!always_allocate()) {
2783 for (int i = 0; i < 2; i++) {
2784 CollectGarbage(OLD_SPACE,
2785 GarbageCollectionReason::kExternalMemoryPressure);
2786 result = allocate(byte_length);
2787 if (result) return result;
2788 }
2789 isolate()->counters()->gc_last_resort_from_handles()->Increment();
2790 CollectAllAvailableGarbage(
2791 GarbageCollectionReason::kExternalMemoryPressure);
2792 }
2793 return allocate(byte_length);
2794 }
2795
ConfigureInitialOldGenerationSize()2796 void Heap::ConfigureInitialOldGenerationSize() {
2797 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2798 const size_t minimum_growing_step =
2799 MemoryController<V8HeapTrait>::MinimumAllocationLimitGrowingStep(
2800 CurrentHeapGrowingMode());
2801 const size_t new_old_generation_allocation_limit =
2802 Max(OldGenerationSizeOfObjects() + minimum_growing_step,
2803 static_cast<size_t>(
2804 static_cast<double>(old_generation_allocation_limit()) *
2805 (tracer()->AverageSurvivalRatio() / 100)));
2806 if (new_old_generation_allocation_limit <
2807 old_generation_allocation_limit()) {
2808 set_old_generation_allocation_limit(new_old_generation_allocation_limit);
2809 } else {
2810 old_generation_size_configured_ = true;
2811 }
2812 if (UseGlobalMemoryScheduling()) {
2813 const size_t new_global_memory_limit = Max(
2814 GlobalSizeOfObjects() + minimum_growing_step,
2815 static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
2816 (tracer()->AverageSurvivalRatio() / 100)));
2817 if (new_global_memory_limit < global_allocation_limit_) {
2818 global_allocation_limit_ = new_global_memory_limit;
2819 }
2820 }
2821 }
2822 }
2823
FlushNumberStringCache()2824 void Heap::FlushNumberStringCache() {
2825 // Flush the number to string cache.
2826 int len = number_string_cache().length();
2827 for (int i = 0; i < len; i++) {
2828 number_string_cache().set_undefined(i);
2829 }
2830 }
2831
2832 namespace {
2833
CreateFillerObjectAtImpl(ReadOnlyRoots roots,Address addr,int size,ClearFreedMemoryMode clear_memory_mode)2834 HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
2835 ClearFreedMemoryMode clear_memory_mode) {
2836 if (size == 0) return HeapObject();
2837 HeapObject filler = HeapObject::FromAddress(addr);
2838 if (size == kTaggedSize) {
2839 filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
2840 SKIP_WRITE_BARRIER);
2841 } else if (size == 2 * kTaggedSize) {
2842 filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
2843 SKIP_WRITE_BARRIER);
2844 if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2845 AtomicSlot slot(ObjectSlot(addr) + 1);
2846 *slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
2847 }
2848 } else {
2849 DCHECK_GT(size, 2 * kTaggedSize);
2850 filler.set_map_after_allocation(roots.unchecked_free_space_map(),
2851 SKIP_WRITE_BARRIER);
2852 FreeSpace::cast(filler).relaxed_write_size(size);
2853 if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2854 MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
2855 (size / kTaggedSize) - 2);
2856 }
2857 }
2858
2859 // At this point, we may be deserializing the heap from a snapshot, and
2860 // none of the maps have been created yet and are nullptr.
2861 DCHECK((filler.map_slot().contains_value(kNullAddress) &&
2862 !Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
2863 filler.map().IsMap());
2864
2865 return filler;
2866 }
2867
2868 #ifdef DEBUG
VerifyNoNeedToClearSlots(Address start,Address end)2869 void VerifyNoNeedToClearSlots(Address start, Address end) {
2870 BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
2871 if (basic_chunk->InReadOnlySpace()) return;
2872 MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
2873 // TODO(ulan): Support verification of large pages.
2874 if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
2875 BaseSpace* space = chunk->owner();
2876 space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
2877 }
2878 #else
VerifyNoNeedToClearSlots(Address start,Address end)2879 void VerifyNoNeedToClearSlots(Address start, Address end) {}
2880 #endif // DEBUG
2881
2882 } // namespace
2883
2884 // static
CreateFillerObjectAt(ReadOnlyRoots roots,Address addr,int size,ClearFreedMemoryMode clear_memory_mode)2885 HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
2886 int size,
2887 ClearFreedMemoryMode clear_memory_mode) {
2888 // TODO(leszeks): Verify that no slots need to be recorded.
2889 HeapObject filler =
2890 CreateFillerObjectAtImpl(roots, addr, size, clear_memory_mode);
2891 VerifyNoNeedToClearSlots(addr, addr + size);
2892 return filler;
2893 }
2894
CreateFillerObjectAtBackground(Address addr,int size,ClearFreedMemoryMode clear_memory_mode)2895 void Heap::CreateFillerObjectAtBackground(
2896 Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
2897 CreateFillerObjectAtImpl(ReadOnlyRoots(this), addr, size, clear_memory_mode);
2898 // Do not verify whether slots are cleared here: the concurrent sweeper is not
2899 // allowed to access the main thread's remembered set.
2900 }
2901
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots clear_slots_mode)2902 HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
2903 ClearRecordedSlots clear_slots_mode) {
2904 if (size == 0) return HeapObject();
2905 HeapObject filler = CreateFillerObjectAtImpl(
2906 ReadOnlyRoots(this), addr, size,
2907 clear_slots_mode == ClearRecordedSlots::kYes
2908 ? ClearFreedMemoryMode::kClearFreedMemory
2909 : ClearFreedMemoryMode::kDontClearFreedMemory);
2910 if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
2911 if (clear_slots_mode == ClearRecordedSlots::kYes) {
2912 ClearRecordedSlotRange(addr, addr + size);
2913 } else {
2914 VerifyNoNeedToClearSlots(addr, addr + size);
2915 }
2916 }
2917 return filler;
2918 }
2919
CanMoveObjectStart(HeapObject object)2920 bool Heap::CanMoveObjectStart(HeapObject object) {
2921 if (!FLAG_move_object_start) return false;
2922
2923 // Sampling heap profiler may have a reference to the object.
2924 if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
2925
2926 if (IsLargeObject(object)) return false;
2927
2928 // We can move the object start if the page was already swept.
2929 return Page::FromHeapObject(object)->SweepingDone();
2930 }
2931
IsImmovable(HeapObject object)2932 bool Heap::IsImmovable(HeapObject object) {
2933 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
2934 // TODO(steveblackburn): For now all objects are immovable.
2935 // Will need to revisit once moving is supported.
2936 return true;
2937 }
2938
2939 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
2940 return chunk->NeverEvacuate() || IsLargeObject(object);
2941 }
2942
IsLargeObject(HeapObject object)2943 bool Heap::IsLargeObject(HeapObject object) {
2944 return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
2945 }
2946
2947 #ifdef ENABLE_SLOW_DCHECKS
2948 namespace {
2949
2950 class LeftTrimmerVerifierRootVisitor : public RootVisitor {
2951 public:
LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)2952 explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
2953 : to_check_(to_check) {}
2954
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)2955 void VisitRootPointers(Root root, const char* description,
2956 FullObjectSlot start, FullObjectSlot end) override {
2957 for (FullObjectSlot p = start; p < end; ++p) {
2958 DCHECK_NE(*p, to_check_);
2959 }
2960 }
2961
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)2962 void VisitRootPointers(Root root, const char* description,
2963 OffHeapObjectSlot start,
2964 OffHeapObjectSlot end) override {
2965 DCHECK_EQ(root, Root::kStringTable);
2966 // We can skip iterating the string table, it doesn't point to any fixed
2967 // arrays.
2968 }
2969
2970 private:
2971 FixedArrayBase to_check_;
2972
2973 DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
2974 };
2975 } // namespace
2976 #endif // ENABLE_SLOW_DCHECKS
2977
2978 namespace {
MayContainRecordedSlots(HeapObject object)2979 bool MayContainRecordedSlots(HeapObject object) {
2980 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
2981 // New space object do not have recorded slots.
2982 if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
2983 return false;
2984 // Allowlist objects that definitely do not have pointers.
2985 if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
2986 // Conservatively return true for other objects.
2987 return true;
2988 }
2989 } // namespace
2990
OnMoveEvent(HeapObject target,HeapObject source,int size_in_bytes)2991 void Heap::OnMoveEvent(HeapObject target, HeapObject source,
2992 int size_in_bytes) {
2993 HeapProfiler* heap_profiler = isolate_->heap_profiler();
2994 if (heap_profiler->is_tracking_object_moves()) {
2995 heap_profiler->ObjectMoveEvent(source.address(), target.address(),
2996 size_in_bytes);
2997 }
2998 for (auto& tracker : allocation_trackers_) {
2999 tracker->MoveEvent(source.address(), target.address(), size_in_bytes);
3000 }
3001 if (target.IsSharedFunctionInfo()) {
3002 LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
3003 target.address()));
3004 }
3005
3006 if (FLAG_verify_predictable) {
3007 ++allocations_count_;
3008 // Advance synthetic time by making a time request.
3009 MonotonicallyIncreasingTimeInMs();
3010
3011 UpdateAllocationsHash(source);
3012 UpdateAllocationsHash(target);
3013 UpdateAllocationsHash(size_in_bytes);
3014
3015 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
3016 PrintAllocationsHash();
3017 }
3018 } else if (FLAG_fuzzer_gc_analysis) {
3019 ++allocations_count_;
3020 }
3021 }
3022
LeftTrimFixedArray(FixedArrayBase object,int elements_to_trim)3023 FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
3024 int elements_to_trim) {
3025 if (elements_to_trim == 0) {
3026 // This simplifies reasoning in the rest of the function.
3027 return object;
3028 }
3029 CHECK(!object.is_null());
3030 DCHECK(CanMoveObjectStart(object));
3031 // Add custom visitor to concurrent marker if new left-trimmable type
3032 // is added.
3033 DCHECK(object.IsFixedArray() || object.IsFixedDoubleArray());
3034 const int element_size = object.IsFixedArray() ? kTaggedSize : kDoubleSize;
3035 const int bytes_to_trim = elements_to_trim * element_size;
3036 Map map = object.map();
3037
3038 // For now this trick is only applied to fixed arrays which may be in new
3039 // space or old space. In a large object space the object's start must
3040 // coincide with chunk and thus the trick is just not applicable.
3041 DCHECK(!IsLargeObject(object));
3042 DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
3043
3044 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3045 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
3046 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
3047
3048 const int len = object.length();
3049 DCHECK(elements_to_trim <= len);
3050
3051 // Calculate location of new array start.
3052 Address old_start = object.address();
3053 Address new_start = old_start + bytes_to_trim;
3054
3055 if (incremental_marking()->IsMarking()) {
3056 incremental_marking()->NotifyLeftTrimming(
3057 object, HeapObject::FromAddress(new_start));
3058 }
3059
3060 #ifdef DEBUG
3061 if (MayContainRecordedSlots(object)) {
3062 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
3063 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
3064 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
3065 }
3066 #endif
3067
3068 // Technically in new space this write might be omitted (except for
3069 // debug mode which iterates through the heap), but to play safer
3070 // we still do it.
3071 CreateFillerObjectAt(old_start, bytes_to_trim,
3072 MayContainRecordedSlots(object)
3073 ? ClearRecordedSlots::kYes
3074 : ClearRecordedSlots::kNo);
3075
3076 // Initialize header of the trimmed array. Since left trimming is only
3077 // performed on pages which are not concurrently swept creating a filler
3078 // object does not require synchronization.
3079 RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
3080 RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
3081 Smi::FromInt(len - elements_to_trim));
3082
3083 FixedArrayBase new_object =
3084 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3085
3086 // Notify the heap profiler of change in object layout.
3087 OnMoveEvent(new_object, object, new_object.Size());
3088
3089 #ifdef ENABLE_SLOW_DCHECKS
3090 if (FLAG_enable_slow_asserts) {
3091 // Make sure the stack or other roots (e.g., Handles) don't contain pointers
3092 // to the original FixedArray (which is now the filler object).
3093 SafepointScope scope(this);
3094 LeftTrimmerVerifierRootVisitor root_visitor(object);
3095 ReadOnlyRoots(this).Iterate(&root_visitor);
3096 IterateRoots(&root_visitor, {});
3097 }
3098 #endif // ENABLE_SLOW_DCHECKS
3099
3100 return new_object;
3101 }
3102
RightTrimFixedArray(FixedArrayBase object,int elements_to_trim)3103 void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
3104 const int len = object.length();
3105 DCHECK_LE(elements_to_trim, len);
3106 DCHECK_GE(elements_to_trim, 0);
3107
3108 int bytes_to_trim;
3109 if (object.IsByteArray()) {
3110 int new_size = ByteArray::SizeFor(len - elements_to_trim);
3111 bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3112 DCHECK_GE(bytes_to_trim, 0);
3113 } else if (object.IsFixedArray()) {
3114 CHECK_NE(elements_to_trim, len);
3115 bytes_to_trim = elements_to_trim * kTaggedSize;
3116 } else {
3117 DCHECK(object.IsFixedDoubleArray());
3118 CHECK_NE(elements_to_trim, len);
3119 bytes_to_trim = elements_to_trim * kDoubleSize;
3120 }
3121
3122 CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
3123 }
3124
RightTrimWeakFixedArray(WeakFixedArray object,int elements_to_trim)3125 void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
3126 int elements_to_trim) {
3127 // This function is safe to use only at the end of the mark compact
3128 // collection: When marking, we record the weak slots, and shrinking
3129 // invalidates them.
3130 DCHECK_EQ(gc_state(), MARK_COMPACT);
3131 CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
3132 elements_to_trim * kTaggedSize);
3133 }
3134
3135 template <typename T>
CreateFillerForArray(T object,int elements_to_trim,int bytes_to_trim)3136 void Heap::CreateFillerForArray(T object, int elements_to_trim,
3137 int bytes_to_trim) {
3138 DCHECK(object.IsFixedArrayBase() || object.IsByteArray() ||
3139 object.IsWeakFixedArray());
3140
3141 // For now this trick is only applied to objects in new and paged space.
3142 DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
3143
3144 if (bytes_to_trim == 0) {
3145 DCHECK_EQ(elements_to_trim, 0);
3146 // No need to create filler and update live bytes counters.
3147 return;
3148 }
3149
3150 // Calculate location of new array end.
3151 int old_size = object.Size();
3152 Address old_end = object.address() + old_size;
3153 Address new_end = old_end - bytes_to_trim;
3154
3155 #ifdef DEBUG
3156 if (MayContainRecordedSlots(object)) {
3157 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
3158 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
3159 DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
3160 }
3161 #endif
3162
3163 bool clear_slots = MayContainRecordedSlots(object);
3164
3165 // Technically in new space this write might be omitted (except for
3166 // debug mode which iterates through the heap), but to play safer
3167 // we still do it.
3168 // We do not create a filler for objects in a large object space.
3169 if (!IsLargeObject(object)) {
3170 HeapObject filler = CreateFillerObjectAt(
3171 new_end, bytes_to_trim,
3172 clear_slots ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
3173 DCHECK(!filler.is_null());
3174 // Clear the mark bits of the black area that belongs now to the filler.
3175 // This is an optimization. The sweeper will release black fillers anyway.
3176 if (incremental_marking()->black_allocation() &&
3177 incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
3178 Page* page = Page::FromAddress(new_end);
3179 incremental_marking()->marking_state()->bitmap(page)->ClearRange(
3180 page->AddressToMarkbitIndex(new_end),
3181 page->AddressToMarkbitIndex(new_end + bytes_to_trim));
3182 }
3183 } else if (clear_slots) {
3184 // Large objects are not swept, so it is not necessary to clear the
3185 // recorded slot.
3186 MemsetTagged(ObjectSlot(new_end), Object(kClearedFreeMemoryValue),
3187 (old_end - new_end) / kTaggedSize);
3188 }
3189
3190 // Initialize header of the trimmed array. We are storing the new length
3191 // using release store after creating a filler for the left-over space to
3192 // avoid races with the sweeper thread.
3193 object.synchronized_set_length(object.length() - elements_to_trim);
3194
3195 // Notify the heap object allocation tracker of change in object layout. The
3196 // array may not be moved during GC, and size has to be adjusted nevertheless.
3197 for (auto& tracker : allocation_trackers_) {
3198 tracker->UpdateObjectSizeEvent(object.address(), object.Size());
3199 }
3200 }
3201
MakeHeapIterable()3202 void Heap::MakeHeapIterable() {
3203 mark_compact_collector()->EnsureSweepingCompleted();
3204
3205 MakeLocalHeapLabsIterable();
3206 }
3207
MakeLocalHeapLabsIterable()3208 void Heap::MakeLocalHeapLabsIterable() {
3209 if (!FLAG_local_heaps) return;
3210 safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
3211 local_heap->MakeLinearAllocationAreaIterable();
3212 });
3213 }
3214
3215 namespace {
3216
ComputeMutatorUtilizationImpl(double mutator_speed,double gc_speed)3217 double ComputeMutatorUtilizationImpl(double mutator_speed, double gc_speed) {
3218 constexpr double kMinMutatorUtilization = 0.0;
3219 constexpr double kConservativeGcSpeedInBytesPerMillisecond = 200000;
3220 if (mutator_speed == 0) return kMinMutatorUtilization;
3221 if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
3222 // Derivation:
3223 // mutator_utilization = mutator_time / (mutator_time + gc_time)
3224 // mutator_time = 1 / mutator_speed
3225 // gc_time = 1 / gc_speed
3226 // mutator_utilization = (1 / mutator_speed) /
3227 // (1 / mutator_speed + 1 / gc_speed)
3228 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
3229 return gc_speed / (mutator_speed + gc_speed);
3230 }
3231
3232 } // namespace
3233
ComputeMutatorUtilization(const char * tag,double mutator_speed,double gc_speed)3234 double Heap::ComputeMutatorUtilization(const char* tag, double mutator_speed,
3235 double gc_speed) {
3236 double result = ComputeMutatorUtilizationImpl(mutator_speed, gc_speed);
3237 if (FLAG_trace_mutator_utilization) {
3238 isolate()->PrintWithTimestamp(
3239 "%s mutator utilization = %.3f ("
3240 "mutator_speed=%.f, gc_speed=%.f)\n",
3241 tag, result, mutator_speed, gc_speed);
3242 }
3243 return result;
3244 }
3245
HasLowYoungGenerationAllocationRate()3246 bool Heap::HasLowYoungGenerationAllocationRate() {
3247 double mu = ComputeMutatorUtilization(
3248 "Young generation",
3249 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(),
3250 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
3251 constexpr double kHighMutatorUtilization = 0.993;
3252 return mu > kHighMutatorUtilization;
3253 }
3254
HasLowOldGenerationAllocationRate()3255 bool Heap::HasLowOldGenerationAllocationRate() {
3256 double mu = ComputeMutatorUtilization(
3257 "Old generation",
3258 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond(),
3259 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
3260 const double kHighMutatorUtilization = 0.993;
3261 return mu > kHighMutatorUtilization;
3262 }
3263
HasLowEmbedderAllocationRate()3264 bool Heap::HasLowEmbedderAllocationRate() {
3265 if (!UseGlobalMemoryScheduling()) return true;
3266
3267 DCHECK_NOT_NULL(local_embedder_heap_tracer());
3268 double mu = ComputeMutatorUtilization(
3269 "Embedder",
3270 tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond(),
3271 tracer()->EmbedderSpeedInBytesPerMillisecond());
3272 const double kHighMutatorUtilization = 0.993;
3273 return mu > kHighMutatorUtilization;
3274 }
3275
HasLowAllocationRate()3276 bool Heap::HasLowAllocationRate() {
3277 return HasLowYoungGenerationAllocationRate() &&
3278 HasLowOldGenerationAllocationRate() && HasLowEmbedderAllocationRate();
3279 }
3280
IsIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3281 bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
3282 double mutator_utilization) {
3283 const double kHighHeapPercentage = 0.8;
3284 const double kLowMutatorUtilization = 0.4;
3285 return old_generation_size >=
3286 kHighHeapPercentage * max_old_generation_size() &&
3287 mutator_utilization < kLowMutatorUtilization;
3288 }
3289
CheckIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3290 void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
3291 double mutator_utilization) {
3292 const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
3293 if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
3294 if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
3295 consecutive_ineffective_mark_compacts_ = 0;
3296 return;
3297 }
3298 ++consecutive_ineffective_mark_compacts_;
3299 if (consecutive_ineffective_mark_compacts_ ==
3300 kMaxConsecutiveIneffectiveMarkCompacts) {
3301 if (InvokeNearHeapLimitCallback()) {
3302 // The callback increased the heap limit.
3303 consecutive_ineffective_mark_compacts_ = 0;
3304 return;
3305 }
3306 FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
3307 }
3308 }
3309
HasHighFragmentation()3310 bool Heap::HasHighFragmentation() {
3311 size_t used = OldGenerationSizeOfObjects();
3312 size_t committed = CommittedOldGenerationMemory();
3313 return HasHighFragmentation(used, committed);
3314 }
3315
HasHighFragmentation(size_t used,size_t committed)3316 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
3317 const size_t kSlack = 16 * MB;
3318 // Fragmentation is high if committed > 2 * used + kSlack.
3319 // Rewrite the exression to avoid overflow.
3320 DCHECK_GE(committed, used);
3321 return committed - used > used + kSlack;
3322 }
3323
ShouldOptimizeForMemoryUsage()3324 bool Heap::ShouldOptimizeForMemoryUsage() {
3325 const size_t kOldGenerationSlack = max_old_generation_size() / 8;
3326 return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
3327 isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
3328 !CanExpandOldGeneration(kOldGenerationSlack);
3329 }
3330
ActivateMemoryReducerIfNeeded()3331 void Heap::ActivateMemoryReducerIfNeeded() {
3332 // Activate memory reducer when switching to background if
3333 // - there was no mark compact since the start.
3334 // - the committed memory can be potentially reduced.
3335 // 2 pages for the old, code, and map space + 1 page for new space.
3336 const int kMinCommittedMemory = 7 * Page::kPageSize;
3337 if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
3338 isolate()->IsIsolateInBackground()) {
3339 MemoryReducer::Event event;
3340 event.type = MemoryReducer::kPossibleGarbage;
3341 event.time_ms = MonotonicallyIncreasingTimeInMs();
3342 memory_reducer_->NotifyPossibleGarbage(event);
3343 }
3344 }
3345
ReduceNewSpaceSize()3346 void Heap::ReduceNewSpaceSize() {
3347 // TODO(ulan): Unify this constant with the similar constant in
3348 // GCIdleTimeHandler once the change is merged to 4.5.
3349 static const size_t kLowAllocationThroughput = 1000;
3350 const double allocation_throughput =
3351 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
3352
3353 if (FLAG_predictable) return;
3354
3355 if (ShouldReduceMemory() ||
3356 ((allocation_throughput != 0) &&
3357 (allocation_throughput < kLowAllocationThroughput))) {
3358 new_space_->Shrink();
3359 new_lo_space_->SetCapacity(new_space_->Capacity());
3360 UncommitFromSpace();
3361 }
3362 }
3363
FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason)3364 void Heap::FinalizeIncrementalMarkingIfComplete(
3365 GarbageCollectionReason gc_reason) {
3366 if (incremental_marking()->IsMarking() &&
3367 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
3368 (!incremental_marking()->finalize_marking_completed() &&
3369 mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
3370 local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
3371 FinalizeIncrementalMarkingIncrementally(gc_reason);
3372 } else if (incremental_marking()->IsComplete() ||
3373 (incremental_marking()->IsMarking() &&
3374 mark_compact_collector()->local_marking_worklists()->IsEmpty() &&
3375 local_embedder_heap_tracer()
3376 ->ShouldFinalizeIncrementalMarking())) {
3377 CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3378 }
3379 }
3380
FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason)3381 void Heap::FinalizeIncrementalMarkingAtomically(
3382 GarbageCollectionReason gc_reason) {
3383 DCHECK(!incremental_marking()->IsStopped());
3384 CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3385 }
3386
InvokeIncrementalMarkingPrologueCallbacks()3387 void Heap::InvokeIncrementalMarkingPrologueCallbacks() {
3388 GCCallbacksScope scope(this);
3389 if (scope.CheckReenter()) {
3390 AllowHeapAllocation allow_allocation;
3391 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
3392 VMState<EXTERNAL> state(isolate_);
3393 HandleScope handle_scope(isolate_);
3394 CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3395 }
3396 }
3397
InvokeIncrementalMarkingEpilogueCallbacks()3398 void Heap::InvokeIncrementalMarkingEpilogueCallbacks() {
3399 GCCallbacksScope scope(this);
3400 if (scope.CheckReenter()) {
3401 AllowHeapAllocation allow_allocation;
3402 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
3403 VMState<EXTERNAL> state(isolate_);
3404 HandleScope handle_scope(isolate_);
3405 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3406 }
3407 }
3408
FinalizeIncrementalMarkingIncrementally(GarbageCollectionReason gc_reason)3409 void Heap::FinalizeIncrementalMarkingIncrementally(
3410 GarbageCollectionReason gc_reason) {
3411 if (FLAG_trace_incremental_marking) {
3412 isolate()->PrintWithTimestamp(
3413 "[IncrementalMarking] (%s).\n",
3414 Heap::GarbageCollectionReasonToString(gc_reason));
3415 }
3416
3417 DevToolsTraceEventScope devtools_trace_event_scope(
3418 this, "MajorGC", "incremental finalization step");
3419
3420 HistogramTimerScope incremental_marking_scope(
3421 isolate()->counters()->gc_incremental_marking_finalize());
3422 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
3423 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
3424
3425 SafepointScope safepoint(this);
3426 InvokeIncrementalMarkingPrologueCallbacks();
3427 incremental_marking()->FinalizeIncrementally();
3428 InvokeIncrementalMarkingEpilogueCallbacks();
3429 }
3430
NotifyObjectLayoutChange(HeapObject object,const DisallowHeapAllocation &,InvalidateRecordedSlots invalidate_recorded_slots)3431 void Heap::NotifyObjectLayoutChange(
3432 HeapObject object, const DisallowHeapAllocation&,
3433 InvalidateRecordedSlots invalidate_recorded_slots) {
3434 if (incremental_marking()->IsMarking()) {
3435 incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
3436 if (incremental_marking()->IsCompacting() &&
3437 invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
3438 MayContainRecordedSlots(object)) {
3439 MemoryChunk::FromHeapObject(object)
3440 ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
3441 }
3442 }
3443 if (invalidate_recorded_slots == InvalidateRecordedSlots::kYes &&
3444 MayContainRecordedSlots(object)) {
3445 MemoryChunk::FromHeapObject(object)
3446 ->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
3447 }
3448 #ifdef VERIFY_HEAP
3449 if (FLAG_verify_heap) {
3450 DCHECK(pending_layout_change_object_.is_null());
3451 pending_layout_change_object_ = object;
3452 }
3453 #endif
3454 }
3455
3456 #ifdef VERIFY_HEAP
3457 // Helper class for collecting slot addresses.
3458 class SlotCollectingVisitor final : public ObjectVisitor {
3459 public:
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)3460 void VisitPointers(HeapObject host, ObjectSlot start,
3461 ObjectSlot end) override {
3462 VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
3463 }
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)3464 void VisitPointers(HeapObject host, MaybeObjectSlot start,
3465 MaybeObjectSlot end) final {
3466 for (MaybeObjectSlot p = start; p < end; ++p) {
3467 slots_.push_back(p);
3468 }
3469 }
3470
VisitCodeTarget(Code host,RelocInfo * rinfo)3471 void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
3472
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)3473 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3474 UNREACHABLE();
3475 }
3476
number_of_slots()3477 int number_of_slots() { return static_cast<int>(slots_.size()); }
3478
slot(int i)3479 MaybeObjectSlot slot(int i) { return slots_[i]; }
3480
3481 private:
3482 std::vector<MaybeObjectSlot> slots_;
3483 };
3484
VerifyObjectLayoutChange(HeapObject object,Map new_map)3485 void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
3486 if (!FLAG_verify_heap) return;
3487
3488 // Check that Heap::NotifyObjectLayoutChange was called for object transitions
3489 // that are not safe for concurrent marking.
3490 // If you see this check triggering for a freshly allocated object,
3491 // use object->set_map_after_allocation() to initialize its map.
3492 if (pending_layout_change_object_.is_null()) {
3493 if (object.IsJSObject()) {
3494 DCHECK(!object.map().TransitionRequiresSynchronizationWithGC(new_map));
3495 } else if (object.IsString() &&
3496 (new_map == ReadOnlyRoots(this).thin_string_map() ||
3497 new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
3498 // When transitioning a string to ThinString,
3499 // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
3500 // tagged fields are introduced.
3501 } else {
3502 // Check that the set of slots before and after the transition match.
3503 SlotCollectingVisitor old_visitor;
3504 object.IterateFast(&old_visitor);
3505 MapWord old_map_word = object.map_word();
3506 // Temporarily set the new map to iterate new slots.
3507 object.set_map_word(MapWord::FromMap(new_map));
3508 SlotCollectingVisitor new_visitor;
3509 object.IterateFast(&new_visitor);
3510 // Restore the old map.
3511 object.set_map_word(old_map_word);
3512 DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
3513 for (int i = 0; i < new_visitor.number_of_slots(); i++) {
3514 DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
3515 }
3516 }
3517 } else {
3518 DCHECK_EQ(pending_layout_change_object_, object);
3519 pending_layout_change_object_ = HeapObject();
3520 }
3521 }
3522 #endif
3523
ComputeHeapState()3524 GCIdleTimeHeapState Heap::ComputeHeapState() {
3525 GCIdleTimeHeapState heap_state;
3526 heap_state.contexts_disposed = contexts_disposed_;
3527 heap_state.contexts_disposal_rate =
3528 tracer()->ContextDisposalRateInMilliseconds();
3529 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
3530 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
3531 return heap_state;
3532 }
3533
3534
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)3535 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
3536 GCIdleTimeHeapState heap_state,
3537 double deadline_in_ms) {
3538 bool result = false;
3539 switch (action) {
3540 case GCIdleTimeAction::kDone:
3541 result = true;
3542 break;
3543 case GCIdleTimeAction::kIncrementalStep: {
3544 incremental_marking()->AdvanceWithDeadline(
3545 deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
3546 StepOrigin::kTask);
3547 FinalizeIncrementalMarkingIfComplete(
3548 GarbageCollectionReason::kFinalizeMarkingViaTask);
3549 result = incremental_marking()->IsStopped();
3550 break;
3551 }
3552 case GCIdleTimeAction::kFullGC: {
3553 DCHECK_LT(0, contexts_disposed_);
3554 HistogramTimerScope scope(isolate_->counters()->gc_context());
3555 TRACE_EVENT0("v8", "V8.GCContext");
3556 CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
3557 break;
3558 }
3559 }
3560
3561 return result;
3562 }
3563
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)3564 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
3565 GCIdleTimeHeapState heap_state,
3566 double start_ms, double deadline_in_ms) {
3567 double idle_time_in_ms = deadline_in_ms - start_ms;
3568 double current_time = MonotonicallyIncreasingTimeInMs();
3569 last_idle_notification_time_ = current_time;
3570 double deadline_difference = deadline_in_ms - current_time;
3571
3572 contexts_disposed_ = 0;
3573
3574 if (FLAG_trace_idle_notification) {
3575 isolate_->PrintWithTimestamp(
3576 "Idle notification: requested idle time %.2f ms, used idle time %.2f "
3577 "ms, deadline usage %.2f ms [",
3578 idle_time_in_ms, idle_time_in_ms - deadline_difference,
3579 deadline_difference);
3580 switch (action) {
3581 case GCIdleTimeAction::kDone:
3582 PrintF("done");
3583 break;
3584 case GCIdleTimeAction::kIncrementalStep:
3585 PrintF("incremental step");
3586 break;
3587 case GCIdleTimeAction::kFullGC:
3588 PrintF("full GC");
3589 break;
3590 }
3591 PrintF("]");
3592 if (FLAG_trace_idle_notification_verbose) {
3593 PrintF("[");
3594 heap_state.Print();
3595 PrintF("]");
3596 }
3597 PrintF("\n");
3598 }
3599 }
3600
3601
MonotonicallyIncreasingTimeInMs()3602 double Heap::MonotonicallyIncreasingTimeInMs() {
3603 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
3604 static_cast<double>(base::Time::kMillisecondsPerSecond);
3605 }
3606
VerifyNewSpaceTop()3607 void Heap::VerifyNewSpaceTop() { new_space()->VerifyTop(); }
3608
IdleNotification(int idle_time_in_ms)3609 bool Heap::IdleNotification(int idle_time_in_ms) {
3610 return IdleNotification(
3611 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
3612 (static_cast<double>(idle_time_in_ms) /
3613 static_cast<double>(base::Time::kMillisecondsPerSecond)));
3614 }
3615
3616
IdleNotification(double deadline_in_seconds)3617 bool Heap::IdleNotification(double deadline_in_seconds) {
3618 CHECK(HasBeenSetUp());
3619 double deadline_in_ms =
3620 deadline_in_seconds *
3621 static_cast<double>(base::Time::kMillisecondsPerSecond);
3622 HistogramTimerScope idle_notification_scope(
3623 isolate_->counters()->gc_idle_notification());
3624 TRACE_EVENT0("v8", "V8.GCIdleNotification");
3625 double start_ms = MonotonicallyIncreasingTimeInMs();
3626 double idle_time_in_ms = deadline_in_ms - start_ms;
3627
3628 tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
3629 OldGenerationAllocationCounter(),
3630 EmbedderAllocationCounter());
3631
3632 GCIdleTimeHeapState heap_state = ComputeHeapState();
3633
3634 GCIdleTimeAction action =
3635 gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
3636
3637 bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
3638
3639 IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
3640 return result;
3641 }
3642
3643
RecentIdleNotificationHappened()3644 bool Heap::RecentIdleNotificationHappened() {
3645 return (last_idle_notification_time_ +
3646 GCIdleTimeHandler::kMaxScheduledIdleTime) >
3647 MonotonicallyIncreasingTimeInMs();
3648 }
3649
3650 class MemoryPressureInterruptTask : public CancelableTask {
3651 public:
MemoryPressureInterruptTask(Heap * heap)3652 explicit MemoryPressureInterruptTask(Heap* heap)
3653 : CancelableTask(heap->isolate()), heap_(heap) {}
3654
3655 ~MemoryPressureInterruptTask() override = default;
3656
3657 private:
3658 // v8::internal::CancelableTask overrides.
RunInternal()3659 void RunInternal() override { heap_->CheckMemoryPressure(); }
3660
3661 Heap* heap_;
3662 DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
3663 };
3664
CheckMemoryPressure()3665 void Heap::CheckMemoryPressure() {
3666 if (HighMemoryPressure()) {
3667 // The optimizing compiler may be unnecessarily holding on to memory.
3668 isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
3669 }
3670 // Reset the memory pressure level to avoid recursive GCs triggered by
3671 // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
3672 // the finalizers.
3673 MemoryPressureLevel memory_pressure_level = memory_pressure_level_.exchange(
3674 MemoryPressureLevel::kNone, std::memory_order_relaxed);
3675 if (memory_pressure_level == MemoryPressureLevel::kCritical) {
3676 TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
3677 CollectGarbageOnMemoryPressure();
3678 } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
3679 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3680 TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
3681 StartIncrementalMarking(kReduceMemoryFootprintMask,
3682 GarbageCollectionReason::kMemoryPressure);
3683 }
3684 }
3685 }
3686
CollectGarbageOnMemoryPressure()3687 void Heap::CollectGarbageOnMemoryPressure() {
3688 const int kGarbageThresholdInBytes = 8 * MB;
3689 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
3690 // This constant is the maximum response time in RAIL performance model.
3691 const double kMaxMemoryPressurePauseMs = 100;
3692
3693 double start = MonotonicallyIncreasingTimeInMs();
3694 CollectAllGarbage(kReduceMemoryFootprintMask,
3695 GarbageCollectionReason::kMemoryPressure,
3696 kGCCallbackFlagCollectAllAvailableGarbage);
3697 EagerlyFreeExternalMemory();
3698 double end = MonotonicallyIncreasingTimeInMs();
3699
3700 // Estimate how much memory we can free.
3701 int64_t potential_garbage =
3702 (CommittedMemory() - SizeOfObjects()) + external_memory_.total();
3703 // If we can potentially free large amount of memory, then start GC right
3704 // away instead of waiting for memory reducer.
3705 if (potential_garbage >= kGarbageThresholdInBytes &&
3706 potential_garbage >=
3707 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
3708 // If we spent less than half of the time budget, then perform full GC
3709 // Otherwise, start incremental marking.
3710 if (end - start < kMaxMemoryPressurePauseMs / 2) {
3711 CollectAllGarbage(kReduceMemoryFootprintMask,
3712 GarbageCollectionReason::kMemoryPressure,
3713 kGCCallbackFlagCollectAllAvailableGarbage);
3714 } else {
3715 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3716 StartIncrementalMarking(kReduceMemoryFootprintMask,
3717 GarbageCollectionReason::kMemoryPressure);
3718 }
3719 }
3720 }
3721 }
3722
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)3723 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
3724 bool is_isolate_locked) {
3725 TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
3726 static_cast<int>(level));
3727 MemoryPressureLevel previous =
3728 memory_pressure_level_.exchange(level, std::memory_order_relaxed);
3729 if ((previous != MemoryPressureLevel::kCritical &&
3730 level == MemoryPressureLevel::kCritical) ||
3731 (previous == MemoryPressureLevel::kNone &&
3732 level == MemoryPressureLevel::kModerate)) {
3733 if (is_isolate_locked) {
3734 CheckMemoryPressure();
3735 } else {
3736 ExecutionAccess access(isolate());
3737 isolate()->stack_guard()->RequestGC();
3738 auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
3739 reinterpret_cast<v8::Isolate*>(isolate()));
3740 taskrunner->PostTask(std::make_unique<MemoryPressureInterruptTask>(this));
3741 }
3742 }
3743 }
3744
EagerlyFreeExternalMemory()3745 void Heap::EagerlyFreeExternalMemory() {
3746 array_buffer_sweeper()->EnsureFinished();
3747 memory_allocator()->unmapper()->EnsureUnmappingCompleted();
3748 }
3749
AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,void * data)3750 void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3751 void* data) {
3752 const size_t kMaxCallbacks = 100;
3753 CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
3754 for (auto callback_data : near_heap_limit_callbacks_) {
3755 CHECK_NE(callback_data.first, callback);
3756 }
3757 near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
3758 }
3759
RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,size_t heap_limit)3760 void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3761 size_t heap_limit) {
3762 for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
3763 if (near_heap_limit_callbacks_[i].first == callback) {
3764 near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
3765 if (heap_limit) {
3766 RestoreHeapLimit(heap_limit);
3767 }
3768 return;
3769 }
3770 }
3771 UNREACHABLE();
3772 }
3773
AppendArrayBufferExtension(JSArrayBuffer object,ArrayBufferExtension * extension)3774 void Heap::AppendArrayBufferExtension(JSArrayBuffer object,
3775 ArrayBufferExtension* extension) {
3776 array_buffer_sweeper_->Append(object, extension);
3777 }
3778
AutomaticallyRestoreInitialHeapLimit(double threshold_percent)3779 void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
3780 initial_max_old_generation_size_threshold_ =
3781 initial_max_old_generation_size_ * threshold_percent;
3782 }
3783
InvokeNearHeapLimitCallback()3784 bool Heap::InvokeNearHeapLimitCallback() {
3785 if (near_heap_limit_callbacks_.size() > 0) {
3786 HandleScope scope(isolate());
3787 v8::NearHeapLimitCallback callback =
3788 near_heap_limit_callbacks_.back().first;
3789 void* data = near_heap_limit_callbacks_.back().second;
3790 size_t heap_limit = callback(data, max_old_generation_size(),
3791 initial_max_old_generation_size_);
3792 if (heap_limit > max_old_generation_size()) {
3793 set_max_old_generation_size(
3794 Min(heap_limit, AllocatorLimitOnMaxOldGenerationSize()));
3795 return true;
3796 }
3797 }
3798 return false;
3799 }
3800
MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,v8::MeasureMemoryExecution execution)3801 bool Heap::MeasureMemory(std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
3802 v8::MeasureMemoryExecution execution) {
3803 HandleScope handle_scope(isolate());
3804 std::vector<Handle<NativeContext>> contexts = FindAllNativeContexts();
3805 std::vector<Handle<NativeContext>> to_measure;
3806 for (auto& current : contexts) {
3807 if (delegate->ShouldMeasure(
3808 v8::Utils::ToLocal(Handle<Context>::cast(current)))) {
3809 to_measure.push_back(current);
3810 }
3811 }
3812 return memory_measurement_->EnqueueRequest(std::move(delegate), execution,
3813 to_measure);
3814 }
3815
MeasureMemoryDelegate(Handle<NativeContext> context,Handle<JSPromise> promise,v8::MeasureMemoryMode mode)3816 std::unique_ptr<v8::MeasureMemoryDelegate> Heap::MeasureMemoryDelegate(
3817 Handle<NativeContext> context, Handle<JSPromise> promise,
3818 v8::MeasureMemoryMode mode) {
3819 return i::MemoryMeasurement::DefaultDelegate(isolate_, context, promise,
3820 mode);
3821 }
3822
CollectCodeStatistics()3823 void Heap::CollectCodeStatistics() {
3824 TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
3825 CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
3826 // We do not look for code in new space, or map space. If code
3827 // somehow ends up in those spaces, we would miss it here.
3828 CodeStatistics::CollectCodeStatistics(code_space_, isolate());
3829 CodeStatistics::CollectCodeStatistics(old_space_, isolate());
3830 CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
3831 }
3832
3833 #ifdef DEBUG
3834
Print()3835 void Heap::Print() {
3836 if (!HasBeenSetUp()) return;
3837 isolate()->PrintStack(stdout);
3838
3839 for (SpaceIterator it(this); it.HasNext();) {
3840 it.Next()->Print();
3841 }
3842 }
3843
3844
ReportCodeStatistics(const char * title)3845 void Heap::ReportCodeStatistics(const char* title) {
3846 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3847 CollectCodeStatistics();
3848 CodeStatistics::ReportCodeStatistics(isolate());
3849 }
3850
3851 #endif // DEBUG
3852
GarbageCollectionReasonToString(GarbageCollectionReason gc_reason)3853 const char* Heap::GarbageCollectionReasonToString(
3854 GarbageCollectionReason gc_reason) {
3855 switch (gc_reason) {
3856 case GarbageCollectionReason::kAllocationFailure:
3857 return "allocation failure";
3858 case GarbageCollectionReason::kAllocationLimit:
3859 return "allocation limit";
3860 case GarbageCollectionReason::kContextDisposal:
3861 return "context disposal";
3862 case GarbageCollectionReason::kCountersExtension:
3863 return "counters extension";
3864 case GarbageCollectionReason::kDebugger:
3865 return "debugger";
3866 case GarbageCollectionReason::kDeserializer:
3867 return "deserialize";
3868 case GarbageCollectionReason::kExternalMemoryPressure:
3869 return "external memory pressure";
3870 case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
3871 return "finalize incremental marking via stack guard";
3872 case GarbageCollectionReason::kFinalizeMarkingViaTask:
3873 return "finalize incremental marking via task";
3874 case GarbageCollectionReason::kFullHashtable:
3875 return "full hash-table";
3876 case GarbageCollectionReason::kHeapProfiler:
3877 return "heap profiler";
3878 case GarbageCollectionReason::kTask:
3879 return "task";
3880 case GarbageCollectionReason::kLastResort:
3881 return "last resort";
3882 case GarbageCollectionReason::kLowMemoryNotification:
3883 return "low memory notification";
3884 case GarbageCollectionReason::kMakeHeapIterable:
3885 return "make heap iterable";
3886 case GarbageCollectionReason::kMemoryPressure:
3887 return "memory pressure";
3888 case GarbageCollectionReason::kMemoryReducer:
3889 return "memory reducer";
3890 case GarbageCollectionReason::kRuntime:
3891 return "runtime";
3892 case GarbageCollectionReason::kSamplingProfiler:
3893 return "sampling profiler";
3894 case GarbageCollectionReason::kSnapshotCreator:
3895 return "snapshot creator";
3896 case GarbageCollectionReason::kTesting:
3897 return "testing";
3898 case GarbageCollectionReason::kExternalFinalize:
3899 return "external finalize";
3900 case GarbageCollectionReason::kGlobalAllocationLimit:
3901 return "global allocation limit";
3902 case GarbageCollectionReason::kMeasureMemory:
3903 return "measure memory";
3904 case GarbageCollectionReason::kUnknown:
3905 return "unknown";
3906 case GarbageCollectionReason::kBackgroundAllocationFailure:
3907 return "background allocation failure";
3908 }
3909 UNREACHABLE();
3910 }
3911
Contains(HeapObject value) const3912 bool Heap::Contains(HeapObject value) const {
3913 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
3914 return true;
3915 }
3916 if (ReadOnlyHeap::Contains(value)) {
3917 return false;
3918 }
3919 if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
3920 return false;
3921 }
3922 return HasBeenSetUp() &&
3923 (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
3924 code_space_->Contains(value) || map_space_->Contains(value) ||
3925 lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
3926 new_lo_space_->Contains(value));
3927 }
3928
InSpace(HeapObject value,AllocationSpace space) const3929 bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
3930 if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
3931 return false;
3932 }
3933 if (!HasBeenSetUp()) return false;
3934
3935 switch (space) {
3936 case NEW_SPACE:
3937 return new_space_->ToSpaceContains(value);
3938 case OLD_SPACE:
3939 return old_space_->Contains(value);
3940 case CODE_SPACE:
3941 return code_space_->Contains(value);
3942 case MAP_SPACE:
3943 return map_space_->Contains(value);
3944 case LO_SPACE:
3945 return lo_space_->Contains(value);
3946 case CODE_LO_SPACE:
3947 return code_lo_space_->Contains(value);
3948 case NEW_LO_SPACE:
3949 return new_lo_space_->Contains(value);
3950 case RO_SPACE:
3951 return ReadOnlyHeap::Contains(value);
3952 }
3953 UNREACHABLE();
3954 }
3955
InSpaceSlow(Address addr,AllocationSpace space) const3956 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
3957 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
3958 return false;
3959 }
3960 if (!HasBeenSetUp()) return false;
3961
3962 switch (space) {
3963 case NEW_SPACE:
3964 return new_space_->ToSpaceContainsSlow(addr);
3965 case OLD_SPACE:
3966 return old_space_->ContainsSlow(addr);
3967 case CODE_SPACE:
3968 return code_space_->ContainsSlow(addr);
3969 case MAP_SPACE:
3970 return map_space_->ContainsSlow(addr);
3971 case LO_SPACE:
3972 return lo_space_->ContainsSlow(addr);
3973 case CODE_LO_SPACE:
3974 return code_lo_space_->ContainsSlow(addr);
3975 case NEW_LO_SPACE:
3976 return new_lo_space_->ContainsSlow(addr);
3977 case RO_SPACE:
3978 return read_only_space_->ContainsSlow(addr);
3979 }
3980 UNREACHABLE();
3981 }
3982
IsValidAllocationSpace(AllocationSpace space)3983 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
3984 switch (space) {
3985 case NEW_SPACE:
3986 case OLD_SPACE:
3987 case CODE_SPACE:
3988 case MAP_SPACE:
3989 case LO_SPACE:
3990 case NEW_LO_SPACE:
3991 case CODE_LO_SPACE:
3992 case RO_SPACE:
3993 return true;
3994 default:
3995 return false;
3996 }
3997 }
3998
3999 #ifdef VERIFY_HEAP
Verify()4000 void Heap::Verify() {
4001 CHECK(HasBeenSetUp());
4002 SafepointScope safepoint_scope(this);
4003 HandleScope scope(isolate());
4004
4005 MakeLocalHeapLabsIterable();
4006
4007 // We have to wait here for the sweeper threads to have an iterable heap.
4008 mark_compact_collector()->EnsureSweepingCompleted();
4009
4010 array_buffer_sweeper()->EnsureFinished();
4011
4012 VerifyPointersVisitor visitor(this);
4013 IterateRoots(&visitor, {});
4014
4015 if (!isolate()->context().is_null() &&
4016 !isolate()->normalized_map_cache()->IsUndefined(isolate())) {
4017 NormalizedMapCache::cast(*isolate()->normalized_map_cache())
4018 .NormalizedMapCacheVerify(isolate());
4019 }
4020
4021 // The heap verifier can't deal with partially deserialized objects, so
4022 // disable it if a deserializer is active.
4023 // TODO(leszeks): Enable verification during deserialization, e.g. by only
4024 // blocklisting objects that are in a partially deserialized state.
4025 if (isolate()->has_active_deserializer()) return;
4026
4027 VerifySmisVisitor smis_visitor;
4028 IterateSmiRoots(&smis_visitor);
4029
4030 new_space_->Verify(isolate());
4031
4032 old_space_->Verify(isolate(), &visitor);
4033 map_space_->Verify(isolate(), &visitor);
4034
4035 VerifyPointersVisitor no_dirty_regions_visitor(this);
4036 code_space_->Verify(isolate(), &no_dirty_regions_visitor);
4037
4038 lo_space_->Verify(isolate());
4039 code_lo_space_->Verify(isolate());
4040 new_lo_space_->Verify(isolate());
4041 VerifyStringTable(isolate());
4042 }
4043
VerifyReadOnlyHeap()4044 void Heap::VerifyReadOnlyHeap() {
4045 CHECK(!read_only_space_->writable());
4046 read_only_space_->Verify(isolate());
4047 }
4048
4049 class SlotVerifyingVisitor : public ObjectVisitor {
4050 public:
SlotVerifyingVisitor(std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)4051 SlotVerifyingVisitor(std::set<Address>* untyped,
4052 std::set<std::pair<SlotType, Address> >* typed)
4053 : untyped_(untyped), typed_(typed) {}
4054
4055 virtual bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) = 0;
4056
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)4057 void VisitPointers(HeapObject host, ObjectSlot start,
4058 ObjectSlot end) override {
4059 #ifdef DEBUG
4060 for (ObjectSlot slot = start; slot < end; ++slot) {
4061 DCHECK(!HasWeakHeapObjectTag(*slot));
4062 }
4063 #endif // DEBUG
4064 VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
4065 }
4066
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)4067 void VisitPointers(HeapObject host, MaybeObjectSlot start,
4068 MaybeObjectSlot end) final {
4069 for (MaybeObjectSlot slot = start; slot < end; ++slot) {
4070 if (ShouldHaveBeenRecorded(host, *slot)) {
4071 CHECK_GT(untyped_->count(slot.address()), 0);
4072 }
4073 }
4074 }
4075
VisitCodeTarget(Code host,RelocInfo * rinfo)4076 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4077 Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4078 if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
4079 CHECK(
4080 InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
4081 (rinfo->IsInConstantPool() &&
4082 InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
4083 }
4084 }
4085
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4086 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4087 Object target = rinfo->target_object();
4088 if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
4089 CHECK(
4090 InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
4091 InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
4092 (rinfo->IsInConstantPool() &&
4093 InTypedSet(COMPRESSED_OBJECT_SLOT,
4094 rinfo->constant_pool_entry_address())) ||
4095 (rinfo->IsInConstantPool() &&
4096 InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
4097 }
4098 }
4099
4100 protected:
InUntypedSet(ObjectSlot slot)4101 bool InUntypedSet(ObjectSlot slot) {
4102 return untyped_->count(slot.address()) > 0;
4103 }
4104
4105 private:
InTypedSet(SlotType type,Address slot)4106 bool InTypedSet(SlotType type, Address slot) {
4107 return typed_->count(std::make_pair(type, slot)) > 0;
4108 }
4109 std::set<Address>* untyped_;
4110 std::set<std::pair<SlotType, Address> >* typed_;
4111 };
4112
4113 class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
4114 public:
OldToNewSlotVerifyingVisitor(std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed,EphemeronRememberedSet * ephemeron_remembered_set)4115 OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
4116 std::set<std::pair<SlotType, Address>>* typed,
4117 EphemeronRememberedSet* ephemeron_remembered_set)
4118 : SlotVerifyingVisitor(untyped, typed),
4119 ephemeron_remembered_set_(ephemeron_remembered_set) {}
4120
ShouldHaveBeenRecorded(HeapObject host,MaybeObject target)4121 bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
4122 DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
4123 Heap::InToPage(target));
4124 return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
4125 !Heap::InYoungGeneration(host);
4126 }
4127
VisitEphemeron(HeapObject host,int index,ObjectSlot key,ObjectSlot target)4128 void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
4129 ObjectSlot target) override {
4130 VisitPointer(host, target);
4131 #ifdef ENABLE_MINOR_MC
4132 if (FLAG_minor_mc) return VisitPointer(host, target);
4133 #endif
4134 // Keys are handled separately and should never appear in this set.
4135 CHECK(!InUntypedSet(key));
4136 Object k = *key;
4137 if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
4138 EphemeronHashTable table = EphemeronHashTable::cast(host);
4139 auto it = ephemeron_remembered_set_->find(table);
4140 CHECK(it != ephemeron_remembered_set_->end());
4141 int slot_index =
4142 EphemeronHashTable::SlotToIndex(table.address(), key.address());
4143 InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
4144 CHECK(it->second.find(entry.as_int()) != it->second.end());
4145 }
4146 }
4147
4148 private:
4149 EphemeronRememberedSet* ephemeron_remembered_set_;
4150 };
4151
4152 template <RememberedSetType direction>
CollectSlots(MemoryChunk * chunk,Address start,Address end,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)4153 void CollectSlots(MemoryChunk* chunk, Address start, Address end,
4154 std::set<Address>* untyped,
4155 std::set<std::pair<SlotType, Address> >* typed) {
4156 RememberedSet<direction>::Iterate(
4157 chunk,
4158 [start, end, untyped](MaybeObjectSlot slot) {
4159 if (start <= slot.address() && slot.address() < end) {
4160 untyped->insert(slot.address());
4161 }
4162 return KEEP_SLOT;
4163 },
4164 SlotSet::FREE_EMPTY_BUCKETS);
4165 if (direction == OLD_TO_NEW) {
4166 CHECK(chunk->SweepingDone());
4167 RememberedSetSweeping::Iterate(
4168 chunk,
4169 [start, end, untyped](MaybeObjectSlot slot) {
4170 if (start <= slot.address() && slot.address() < end) {
4171 untyped->insert(slot.address());
4172 }
4173 return KEEP_SLOT;
4174 },
4175 SlotSet::FREE_EMPTY_BUCKETS);
4176 }
4177 RememberedSet<direction>::IterateTyped(
4178 chunk, [=](SlotType type, Address slot) {
4179 if (start <= slot && slot < end) {
4180 typed->insert(std::make_pair(type, slot));
4181 }
4182 return KEEP_SLOT;
4183 });
4184 }
4185
VerifyRememberedSetFor(HeapObject object)4186 void Heap::VerifyRememberedSetFor(HeapObject object) {
4187 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
4188 DCHECK_IMPLIES(chunk->mutex() == nullptr, ReadOnlyHeap::Contains(object));
4189 // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
4190 base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
4191 chunk->mutex());
4192 Address start = object.address();
4193 Address end = start + object.Size();
4194 std::set<Address> old_to_new;
4195 std::set<std::pair<SlotType, Address> > typed_old_to_new;
4196 if (!InYoungGeneration(object)) {
4197 CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
4198 OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
4199 &this->ephemeron_remembered_set_);
4200 object.IterateBody(&visitor);
4201 }
4202 // TODO(ulan): Add old to old slot set verification once all weak objects
4203 // have their own instance types and slots are recorded for all weal fields.
4204 }
4205 #endif
4206
4207 #ifdef DEBUG
VerifyCountersAfterSweeping()4208 void Heap::VerifyCountersAfterSweeping() {
4209 MakeLocalHeapLabsIterable();
4210
4211 PagedSpaceIterator spaces(this);
4212 for (PagedSpace* space = spaces.Next(); space != nullptr;
4213 space = spaces.Next()) {
4214 space->VerifyCountersAfterSweeping(this);
4215 }
4216 }
4217
VerifyCountersBeforeConcurrentSweeping()4218 void Heap::VerifyCountersBeforeConcurrentSweeping() {
4219 PagedSpaceIterator spaces(this);
4220 for (PagedSpace* space = spaces.Next(); space != nullptr;
4221 space = spaces.Next()) {
4222 space->VerifyCountersBeforeConcurrentSweeping();
4223 }
4224 }
4225 #endif
4226
ZapFromSpace()4227 void Heap::ZapFromSpace() {
4228 if (!new_space_->IsFromSpaceCommitted()) return;
4229 for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
4230 memory_allocator()->ZapBlock(page->area_start(),
4231 page->HighWaterMark() - page->area_start(),
4232 ZapValue());
4233 }
4234 }
4235
ZapCodeObject(Address start_address,int size_in_bytes)4236 void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
4237 #ifdef DEBUG
4238 DCHECK(IsAligned(start_address, kIntSize));
4239 for (int i = 0; i < size_in_bytes / kIntSize; i++) {
4240 Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
4241 }
4242 #endif
4243 }
4244
4245 // TODO(ishell): move builtin accessors out from Heap.
builtin(int index)4246 Code Heap::builtin(int index) {
4247 DCHECK(Builtins::IsBuiltinId(index));
4248 return Code::cast(Object(isolate()->builtins_table()[index]));
4249 }
4250
builtin_address(int index)4251 Address Heap::builtin_address(int index) {
4252 DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
4253 return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
4254 }
4255
set_builtin(int index,Code builtin)4256 void Heap::set_builtin(int index, Code builtin) {
4257 DCHECK(Builtins::IsBuiltinId(index));
4258 DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
4259 // The given builtin may be completely uninitialized thus we cannot check its
4260 // type here.
4261 isolate()->builtins_table()[index] = builtin.ptr();
4262 }
4263
IterateWeakRoots(RootVisitor * v,base::EnumSet<SkipRoot> options)4264 void Heap::IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
4265 DCHECK(!options.contains(SkipRoot::kWeak));
4266
4267 if (!options.contains(SkipRoot::kOldGeneration) &&
4268 !options.contains(SkipRoot::kUnserializable)) {
4269 // Do not visit for serialization, since the string table is custom
4270 // serialized. Also do not visit if we are skipping old generation.
4271 isolate()->string_table()->IterateElements(v);
4272 }
4273 v->Synchronize(VisitorSynchronization::kStringTable);
4274 if (!options.contains(SkipRoot::kExternalStringTable) &&
4275 !options.contains(SkipRoot::kUnserializable)) {
4276 // Scavenge collections have special processing for this.
4277 // Do not visit for serialization, since the external string table will
4278 // be populated from scratch upon deserialization.
4279 external_string_table_.IterateAll(v);
4280 }
4281 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4282 }
4283
IterateSmiRoots(RootVisitor * v)4284 void Heap::IterateSmiRoots(RootVisitor* v) {
4285 // Acquire execution access since we are going to read stack limit values.
4286 ExecutionAccess access(isolate());
4287 v->VisitRootPointers(Root::kSmiRootList, nullptr,
4288 roots_table().smi_roots_begin(),
4289 roots_table().smi_roots_end());
4290 v->Synchronize(VisitorSynchronization::kSmiRootList);
4291 }
4292
4293 // We cannot avoid stale handles to left-trimmed objects, but can only make
4294 // sure all handles still needed are updated. Filter out a stale pointer
4295 // and clear the slot to allow post processing of handles (needed because
4296 // the sweeper might actually free the underlying page).
4297 class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
4298 public:
FixStaleLeftTrimmedHandlesVisitor(Heap * heap)4299 explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
4300 USE(heap_);
4301 }
4302
VisitRootPointer(Root root,const char * description,FullObjectSlot p)4303 void VisitRootPointer(Root root, const char* description,
4304 FullObjectSlot p) override {
4305 FixHandle(p);
4306 }
4307
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)4308 void VisitRootPointers(Root root, const char* description,
4309 FullObjectSlot start, FullObjectSlot end) override {
4310 for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
4311 }
4312
4313 private:
FixHandle(FullObjectSlot p)4314 inline void FixHandle(FullObjectSlot p) {
4315 if (!(*p).IsHeapObject()) return;
4316 HeapObject current = HeapObject::cast(*p);
4317 if (!current.map_word().IsForwardingAddress() &&
4318 current.IsFreeSpaceOrFiller()) {
4319 #ifdef DEBUG
4320 // We need to find a FixedArrayBase map after walking the fillers.
4321 while (!current.map_word().IsForwardingAddress() &&
4322 current.IsFreeSpaceOrFiller()) {
4323 Address next = current.ptr();
4324 if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
4325 next += kTaggedSize;
4326 } else if (current.map() ==
4327 ReadOnlyRoots(heap_).two_pointer_filler_map()) {
4328 next += 2 * kTaggedSize;
4329 } else {
4330 next += current.Size();
4331 }
4332 current = HeapObject::cast(Object(next));
4333 }
4334 DCHECK(current.map_word().IsForwardingAddress() ||
4335 current.IsFixedArrayBase());
4336 #endif // DEBUG
4337 p.store(Smi::zero());
4338 }
4339 }
4340
4341 Heap* heap_;
4342 };
4343
IterateRoots(RootVisitor * v,base::EnumSet<SkipRoot> options)4344 void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
4345 v->VisitRootPointers(Root::kStrongRootList, nullptr,
4346 roots_table().strong_roots_begin(),
4347 roots_table().strong_roots_end());
4348 v->Synchronize(VisitorSynchronization::kStrongRootList);
4349
4350 isolate_->bootstrapper()->Iterate(v);
4351 v->Synchronize(VisitorSynchronization::kBootstrapper);
4352 Relocatable::Iterate(isolate_, v);
4353 v->Synchronize(VisitorSynchronization::kRelocatable);
4354 isolate_->debug()->Iterate(v);
4355 v->Synchronize(VisitorSynchronization::kDebug);
4356
4357 isolate_->compilation_cache()->Iterate(v);
4358 v->Synchronize(VisitorSynchronization::kCompilationCache);
4359
4360 if (!options.contains(SkipRoot::kOldGeneration)) {
4361 IterateBuiltins(v);
4362 v->Synchronize(VisitorSynchronization::kBuiltins);
4363 }
4364
4365 // Iterate over pointers being held by inactive threads.
4366 isolate_->thread_manager()->Iterate(v);
4367 v->Synchronize(VisitorSynchronization::kThreadManager);
4368
4369 // Visitors in this block only run when not serializing. These include:
4370 //
4371 // - Thread-local and stack.
4372 // - Handles.
4373 // - Microtasks.
4374 // - The startup object cache.
4375 //
4376 // When creating real startup snapshot, these areas are expected to be empty.
4377 // It is also possible to create a snapshot of a *running* isolate for testing
4378 // purposes. In this case, these areas are likely not empty and will simply be
4379 // skipped.
4380 //
4381 // The general guideline for adding visitors to this section vs. adding them
4382 // above is that non-transient heap state is always visited, transient heap
4383 // state is visited only when not serializing.
4384 if (!options.contains(SkipRoot::kUnserializable)) {
4385 if (!options.contains(SkipRoot::kGlobalHandles)) {
4386 if (options.contains(SkipRoot::kWeak)) {
4387 if (options.contains(SkipRoot::kOldGeneration)) {
4388 // Skip handles that are either weak or old.
4389 isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
4390 } else {
4391 // Skip handles that are weak.
4392 isolate_->global_handles()->IterateStrongRoots(v);
4393 }
4394 } else {
4395 // Do not skip weak handles.
4396 if (options.contains(SkipRoot::kOldGeneration)) {
4397 // Skip handles that are old.
4398 isolate_->global_handles()->IterateAllYoungRoots(v);
4399 } else {
4400 // Do not skip any handles.
4401 isolate_->global_handles()->IterateAllRoots(v);
4402 }
4403 }
4404 }
4405 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4406
4407 if (!options.contains(SkipRoot::kStack)) {
4408 IterateStackRoots(v);
4409 v->Synchronize(VisitorSynchronization::kTop);
4410 }
4411
4412 // Iterate over local handles in handle scopes.
4413 FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
4414 #ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
4415 isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
4416 isolate_->handle_scope_implementer()->Iterate(v);
4417 #endif
4418
4419 if (FLAG_local_heaps) {
4420 safepoint_->Iterate(&left_trim_visitor);
4421 safepoint_->Iterate(v);
4422 }
4423
4424 isolate_->persistent_handles_list()->Iterate(&left_trim_visitor, isolate_);
4425 isolate_->persistent_handles_list()->Iterate(v, isolate_);
4426
4427 v->Synchronize(VisitorSynchronization::kHandleScope);
4428
4429 if (options.contains(SkipRoot::kOldGeneration)) {
4430 isolate_->eternal_handles()->IterateYoungRoots(v);
4431 } else {
4432 isolate_->eternal_handles()->IterateAllRoots(v);
4433 }
4434 v->Synchronize(VisitorSynchronization::kEternalHandles);
4435
4436 // Iterate over pending Microtasks stored in MicrotaskQueues.
4437 MicrotaskQueue* default_microtask_queue =
4438 isolate_->default_microtask_queue();
4439 if (default_microtask_queue) {
4440 MicrotaskQueue* microtask_queue = default_microtask_queue;
4441 do {
4442 microtask_queue->IterateMicrotasks(v);
4443 microtask_queue = microtask_queue->next();
4444 } while (microtask_queue != default_microtask_queue);
4445 }
4446
4447 // Iterate over other strong roots (currently only identity maps and
4448 // deoptimization entries).
4449 for (StrongRootsEntry* current = strong_roots_head_; current;
4450 current = current->next) {
4451 v->VisitRootPointers(Root::kStrongRoots, nullptr, current->start,
4452 current->end);
4453 }
4454 v->Synchronize(VisitorSynchronization::kStrongRoots);
4455
4456 // Iterate over the startup object cache unless serializing or
4457 // deserializing.
4458 SerializerDeserializer::Iterate(isolate_, v);
4459 v->Synchronize(VisitorSynchronization::kStartupObjectCache);
4460 }
4461
4462 if (!options.contains(SkipRoot::kWeak)) {
4463 IterateWeakRoots(v, options);
4464 }
4465 }
4466
IterateWeakGlobalHandles(RootVisitor * v)4467 void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
4468 isolate_->global_handles()->IterateWeakRoots(v);
4469 }
4470
IterateBuiltins(RootVisitor * v)4471 void Heap::IterateBuiltins(RootVisitor* v) {
4472 for (int i = 0; i < Builtins::builtin_count; i++) {
4473 v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
4474 FullObjectSlot(builtin_address(i)));
4475 }
4476
4477 // The entry table doesn't need to be updated since all builtins are embedded.
4478 STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
4479 }
4480
IterateStackRoots(RootVisitor * v)4481 void Heap::IterateStackRoots(RootVisitor* v) {
4482 isolate_->Iterate(v);
4483 isolate_->global_handles()->IterateStrongStackRoots(v);
4484 }
4485
4486 namespace {
GlobalMemorySizeFromV8Size(size_t v8_size)4487 size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
4488 const size_t kGlobalMemoryToV8Ratio = 2;
4489 return Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
4490 static_cast<uint64_t>(v8_size) * kGlobalMemoryToV8Ratio);
4491 }
4492 } // anonymous namespace
4493
ConfigureHeap(const v8::ResourceConstraints & constraints)4494 void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
4495 // Initialize max_semi_space_size_.
4496 {
4497 max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
4498 if (constraints.max_young_generation_size_in_bytes() > 0) {
4499 max_semi_space_size_ = SemiSpaceSizeFromYoungGenerationSize(
4500 constraints.max_young_generation_size_in_bytes());
4501 }
4502 if (FLAG_max_semi_space_size > 0) {
4503 max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
4504 } else if (FLAG_max_heap_size > 0) {
4505 size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
4506 size_t young_generation_size, old_generation_size;
4507 if (FLAG_max_old_space_size > 0) {
4508 old_generation_size = static_cast<size_t>(FLAG_max_old_space_size) * MB;
4509 young_generation_size = max_heap_size > old_generation_size
4510 ? max_heap_size - old_generation_size
4511 : 0;
4512 } else {
4513 GenerationSizesFromHeapSize(max_heap_size, &young_generation_size,
4514 &old_generation_size);
4515 }
4516 max_semi_space_size_ =
4517 SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
4518 }
4519 if (FLAG_stress_compaction) {
4520 // This will cause more frequent GCs when stressing.
4521 max_semi_space_size_ = MB;
4522 }
4523 // TODO(dinfuehr): Rounding to a power of 2 is not longer needed. Remove it.
4524 max_semi_space_size_ =
4525 static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
4526 static_cast<uint64_t>(max_semi_space_size_)));
4527 max_semi_space_size_ = Max(max_semi_space_size_, kMinSemiSpaceSize);
4528 max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
4529 }
4530
4531 // Initialize max_old_generation_size_ and max_global_memory_.
4532 {
4533 size_t max_old_generation_size = 700ul * (kSystemPointerSize / 4) * MB;
4534 if (constraints.max_old_generation_size_in_bytes() > 0) {
4535 max_old_generation_size = constraints.max_old_generation_size_in_bytes();
4536 }
4537 if (FLAG_max_old_space_size > 0) {
4538 max_old_generation_size =
4539 static_cast<size_t>(FLAG_max_old_space_size) * MB;
4540 } else if (FLAG_max_heap_size > 0) {
4541 size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
4542 size_t young_generation_size =
4543 YoungGenerationSizeFromSemiSpaceSize(max_semi_space_size_);
4544 max_old_generation_size = max_heap_size > young_generation_size
4545 ? max_heap_size - young_generation_size
4546 : 0;
4547 }
4548 max_old_generation_size =
4549 Max(max_old_generation_size, MinOldGenerationSize());
4550 max_old_generation_size =
4551 Min(max_old_generation_size, AllocatorLimitOnMaxOldGenerationSize());
4552 max_old_generation_size =
4553 RoundDown<Page::kPageSize>(max_old_generation_size);
4554
4555 max_global_memory_size_ =
4556 GlobalMemorySizeFromV8Size(max_old_generation_size);
4557 set_max_old_generation_size(max_old_generation_size);
4558 }
4559
4560 CHECK_IMPLIES(FLAG_max_heap_size > 0,
4561 FLAG_max_semi_space_size == 0 || FLAG_max_old_space_size == 0);
4562
4563 // Initialize initial_semispace_size_.
4564 {
4565 initial_semispace_size_ = kMinSemiSpaceSize;
4566 if (max_semi_space_size_ == kMaxSemiSpaceSize) {
4567 // Start with at least 1*MB semi-space on machines with a lot of memory.
4568 initial_semispace_size_ =
4569 Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
4570 }
4571 if (constraints.initial_young_generation_size_in_bytes() > 0) {
4572 initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
4573 constraints.initial_young_generation_size_in_bytes());
4574 }
4575 if (FLAG_initial_heap_size > 0) {
4576 size_t young_generation, old_generation;
4577 Heap::GenerationSizesFromHeapSize(
4578 static_cast<size_t>(FLAG_initial_heap_size) * MB, &young_generation,
4579 &old_generation);
4580 initial_semispace_size_ =
4581 SemiSpaceSizeFromYoungGenerationSize(young_generation);
4582 }
4583 if (FLAG_min_semi_space_size > 0) {
4584 initial_semispace_size_ =
4585 static_cast<size_t>(FLAG_min_semi_space_size) * MB;
4586 }
4587 initial_semispace_size_ =
4588 Min(initial_semispace_size_, max_semi_space_size_);
4589 initial_semispace_size_ =
4590 RoundDown<Page::kPageSize>(initial_semispace_size_);
4591 }
4592
4593 if (FLAG_lazy_new_space_shrinking) {
4594 initial_semispace_size_ = max_semi_space_size_;
4595 }
4596
4597 // Initialize initial_old_space_size_.
4598 {
4599 initial_old_generation_size_ = kMaxInitialOldGenerationSize;
4600 if (constraints.initial_old_generation_size_in_bytes() > 0) {
4601 initial_old_generation_size_ =
4602 constraints.initial_old_generation_size_in_bytes();
4603 old_generation_size_configured_ = true;
4604 }
4605 if (FLAG_initial_heap_size > 0) {
4606 size_t initial_heap_size =
4607 static_cast<size_t>(FLAG_initial_heap_size) * MB;
4608 size_t young_generation_size =
4609 YoungGenerationSizeFromSemiSpaceSize(initial_semispace_size_);
4610 initial_old_generation_size_ =
4611 initial_heap_size > young_generation_size
4612 ? initial_heap_size - young_generation_size
4613 : 0;
4614 old_generation_size_configured_ = true;
4615 }
4616 if (FLAG_initial_old_space_size > 0) {
4617 initial_old_generation_size_ =
4618 static_cast<size_t>(FLAG_initial_old_space_size) * MB;
4619 old_generation_size_configured_ = true;
4620 }
4621 initial_old_generation_size_ =
4622 Min(initial_old_generation_size_, max_old_generation_size() / 2);
4623 initial_old_generation_size_ =
4624 RoundDown<Page::kPageSize>(initial_old_generation_size_);
4625 }
4626
4627 if (old_generation_size_configured_) {
4628 // If the embedder pre-configures the initial old generation size,
4629 // then allow V8 to skip full GCs below that threshold.
4630 min_old_generation_size_ = initial_old_generation_size_;
4631 min_global_memory_size_ =
4632 GlobalMemorySizeFromV8Size(min_old_generation_size_);
4633 }
4634
4635 if (FLAG_semi_space_growth_factor < 2) {
4636 FLAG_semi_space_growth_factor = 2;
4637 }
4638
4639 set_old_generation_allocation_limit(initial_old_generation_size_);
4640 global_allocation_limit_ =
4641 GlobalMemorySizeFromV8Size(old_generation_allocation_limit());
4642 initial_max_old_generation_size_ = max_old_generation_size();
4643
4644 // We rely on being able to allocate new arrays in paged spaces.
4645 DCHECK(kMaxRegularHeapObjectSize >=
4646 (JSArray::kHeaderSize +
4647 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
4648 AllocationMemento::kSize));
4649
4650 code_range_size_ = constraints.code_range_size_in_bytes();
4651
4652 configured_ = true;
4653 }
4654
AddToRingBuffer(const char * string)4655 void Heap::AddToRingBuffer(const char* string) {
4656 size_t first_part =
4657 Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4658 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4659 ring_buffer_end_ += first_part;
4660 if (first_part < strlen(string)) {
4661 ring_buffer_full_ = true;
4662 size_t second_part = strlen(string) - first_part;
4663 memcpy(trace_ring_buffer_, string + first_part, second_part);
4664 ring_buffer_end_ = second_part;
4665 }
4666 }
4667
4668
GetFromRingBuffer(char * buffer)4669 void Heap::GetFromRingBuffer(char* buffer) {
4670 size_t copied = 0;
4671 if (ring_buffer_full_) {
4672 copied = kTraceRingBufferSize - ring_buffer_end_;
4673 memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
4674 }
4675 memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
4676 }
4677
ConfigureHeapDefault()4678 void Heap::ConfigureHeapDefault() {
4679 v8::ResourceConstraints constraints;
4680 ConfigureHeap(constraints);
4681 }
4682
RecordStats(HeapStats * stats,bool take_snapshot)4683 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4684 *stats->start_marker = HeapStats::kStartMarker;
4685 *stats->end_marker = HeapStats::kEndMarker;
4686 *stats->ro_space_size = read_only_space_->Size();
4687 *stats->ro_space_capacity = read_only_space_->Capacity();
4688 *stats->new_space_size = new_space_->Size();
4689 *stats->new_space_capacity = new_space_->Capacity();
4690 *stats->old_space_size = old_space_->SizeOfObjects();
4691 *stats->old_space_capacity = old_space_->Capacity();
4692 *stats->code_space_size = code_space_->SizeOfObjects();
4693 *stats->code_space_capacity = code_space_->Capacity();
4694 *stats->map_space_size = map_space_->SizeOfObjects();
4695 *stats->map_space_capacity = map_space_->Capacity();
4696 *stats->lo_space_size = lo_space_->Size();
4697 *stats->code_lo_space_size = code_lo_space_->Size();
4698 isolate_->global_handles()->RecordStats(stats);
4699 *stats->memory_allocator_size = memory_allocator()->Size();
4700 *stats->memory_allocator_capacity =
4701 memory_allocator()->Size() + memory_allocator()->Available();
4702 *stats->os_error = base::OS::GetLastError();
4703 // TODO(leszeks): Include the string table in both current and peak usage.
4704 *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
4705 *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
4706 if (take_snapshot) {
4707 HeapObjectIterator iterator(this);
4708 for (HeapObject obj = iterator.Next(); !obj.is_null();
4709 obj = iterator.Next()) {
4710 InstanceType type = obj.map().instance_type();
4711 DCHECK(0 <= type && type <= LAST_TYPE);
4712 stats->objects_per_type[type]++;
4713 stats->size_per_type[type] += obj.Size();
4714 }
4715 }
4716 if (stats->last_few_messages != nullptr)
4717 GetFromRingBuffer(stats->last_few_messages);
4718 }
4719
OldGenerationSizeOfObjects()4720 size_t Heap::OldGenerationSizeOfObjects() {
4721 PagedSpaceIterator spaces(this);
4722 size_t total = 0;
4723 for (PagedSpace* space = spaces.Next(); space != nullptr;
4724 space = spaces.Next()) {
4725 total += space->SizeOfObjects();
4726 }
4727 return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
4728 }
4729
GlobalSizeOfObjects()4730 size_t Heap::GlobalSizeOfObjects() {
4731 const size_t on_heap_size = OldGenerationSizeOfObjects();
4732 const size_t embedder_size = local_embedder_heap_tracer()
4733 ? local_embedder_heap_tracer()->used_size()
4734 : 0;
4735 return on_heap_size + embedder_size;
4736 }
4737
AllocatedExternalMemorySinceMarkCompact()4738 uint64_t Heap::AllocatedExternalMemorySinceMarkCompact() {
4739 return external_memory_.AllocatedSinceMarkCompact();
4740 }
4741
AllocationLimitOvershotByLargeMargin()4742 bool Heap::AllocationLimitOvershotByLargeMargin() {
4743 // This guards against too eager finalization in small heaps.
4744 // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
4745 constexpr size_t kMarginForSmallHeaps = 32u * MB;
4746
4747 uint64_t size_now =
4748 OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
4749
4750 const size_t v8_overshoot = old_generation_allocation_limit() < size_now
4751 ? size_now - old_generation_allocation_limit()
4752 : 0;
4753 const size_t global_overshoot =
4754 global_allocation_limit_ < GlobalSizeOfObjects()
4755 ? GlobalSizeOfObjects() - global_allocation_limit_
4756 : 0;
4757
4758 // Bail out if the V8 and global sizes are still below their respective
4759 // limits.
4760 if (v8_overshoot == 0 && global_overshoot == 0) {
4761 return false;
4762 }
4763
4764 // Overshoot margin is 50% of allocation limit or half-way to the max heap
4765 // with special handling of small heaps.
4766 const size_t v8_margin =
4767 Min(Max(old_generation_allocation_limit() / 2, kMarginForSmallHeaps),
4768 (max_old_generation_size() - old_generation_allocation_limit()) / 2);
4769 const size_t global_margin =
4770 Min(Max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
4771 (max_global_memory_size_ - global_allocation_limit_) / 2);
4772
4773 return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
4774 }
4775
4776 // static
MaxRegularHeapObjectSize(AllocationType allocation)4777 int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
4778 if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
4779 (allocation == AllocationType::kCode)) {
4780 return MemoryChunkLayout::MaxRegularCodeObjectSize();
4781 }
4782 return kMaxRegularHeapObjectSize;
4783 }
4784
ShouldOptimizeForLoadTime()4785 bool Heap::ShouldOptimizeForLoadTime() {
4786 return isolate()->rail_mode() == PERFORMANCE_LOAD &&
4787 !AllocationLimitOvershotByLargeMargin() &&
4788 MonotonicallyIncreasingTimeInMs() <
4789 isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
4790 }
4791
4792 // This predicate is called when an old generation space cannot allocated from
4793 // the free list and is about to add a new page. Returning false will cause a
4794 // major GC. It happens when the old generation allocation limit is reached and
4795 // - either we need to optimize for memory usage,
4796 // - or the incremental marking is not in progress and we cannot start it.
ShouldExpandOldGenerationOnSlowAllocation(LocalHeap * local_heap)4797 bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
4798 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
4799 // We reached the old generation allocation limit.
4800
4801 // Background threads need to be allowed to allocate without GC after teardown
4802 // was initiated.
4803 if (gc_state() == TEAR_DOWN) return true;
4804
4805 // Ensure that retry of allocation on background thread succeeds
4806 if (IsRetryOfFailedAllocation(local_heap)) return true;
4807
4808 // Background thread requested GC, allocation should fail
4809 if (CollectionRequested()) return false;
4810
4811 if (ShouldOptimizeForMemoryUsage()) return false;
4812
4813 if (ShouldOptimizeForLoadTime()) return true;
4814
4815 if (incremental_marking()->NeedsFinalization()) {
4816 return !AllocationLimitOvershotByLargeMargin();
4817 }
4818
4819 if (incremental_marking()->IsStopped() &&
4820 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
4821 // We cannot start incremental marking.
4822 return false;
4823 }
4824 return true;
4825 }
4826
IsRetryOfFailedAllocation(LocalHeap * local_heap)4827 bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
4828 if (!local_heap) return false;
4829 return local_heap->allocation_failed_;
4830 }
4831
CurrentHeapGrowingMode()4832 Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
4833 if (ShouldReduceMemory() || FLAG_stress_compaction) {
4834 return Heap::HeapGrowingMode::kMinimal;
4835 }
4836
4837 if (ShouldOptimizeForMemoryUsage()) {
4838 return Heap::HeapGrowingMode::kConservative;
4839 }
4840
4841 if (memory_reducer()->ShouldGrowHeapSlowly()) {
4842 return Heap::HeapGrowingMode::kSlow;
4843 }
4844
4845 return Heap::HeapGrowingMode::kDefault;
4846 }
4847
GlobalMemoryAvailable()4848 base::Optional<size_t> Heap::GlobalMemoryAvailable() {
4849 if (!UseGlobalMemoryScheduling()) return {};
4850
4851 size_t global_size = GlobalSizeOfObjects();
4852
4853 if (global_size < global_allocation_limit_)
4854 return global_allocation_limit_ - global_size;
4855
4856 return 0;
4857 }
4858
PercentToOldGenerationLimit()4859 double Heap::PercentToOldGenerationLimit() {
4860 double size_at_gc = old_generation_size_at_last_gc_;
4861 double size_now =
4862 OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
4863 double current_bytes = size_now - size_at_gc;
4864 double total_bytes = old_generation_allocation_limit() - size_at_gc;
4865 return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
4866 }
4867
PercentToGlobalMemoryLimit()4868 double Heap::PercentToGlobalMemoryLimit() {
4869 double size_at_gc = old_generation_size_at_last_gc_;
4870 double size_now =
4871 OldGenerationSizeOfObjects() + AllocatedExternalMemorySinceMarkCompact();
4872 double current_bytes = size_now - size_at_gc;
4873 double total_bytes = old_generation_allocation_limit() - size_at_gc;
4874 return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
4875 }
4876
4877 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
4878 // The kNoLimit means that either incremental marking is disabled or it is too
4879 // early to start incremental marking.
4880 // The kSoftLimit means that incremental marking should be started soon.
4881 // The kHardLimit means that incremental marking should be started immediately.
IncrementalMarkingLimitReached()4882 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
4883 // Code using an AlwaysAllocateScope assumes that the GC state does not
4884 // change; that implies that no marking steps must be performed.
4885 if (!incremental_marking()->CanBeActivated() || always_allocate()) {
4886 // Incremental marking is disabled or it is too early to start.
4887 return IncrementalMarkingLimit::kNoLimit;
4888 }
4889 if (FLAG_stress_incremental_marking) {
4890 return IncrementalMarkingLimit::kHardLimit;
4891 }
4892 if (incremental_marking()->IsBelowActivationThresholds()) {
4893 // Incremental marking is disabled or it is too early to start.
4894 return IncrementalMarkingLimit::kNoLimit;
4895 }
4896 if (ShouldStressCompaction() || HighMemoryPressure()) {
4897 // If there is high memory pressure or stress testing is enabled, then
4898 // start marking immediately.
4899 return IncrementalMarkingLimit::kHardLimit;
4900 }
4901
4902 if (FLAG_stress_marking > 0) {
4903 int current_percent = static_cast<int>(
4904 std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
4905 if (current_percent > 0) {
4906 if (FLAG_trace_stress_marking) {
4907 isolate()->PrintWithTimestamp(
4908 "[IncrementalMarking] %d%% of the memory limit reached\n",
4909 current_percent);
4910 }
4911 if (FLAG_fuzzer_gc_analysis) {
4912 // Skips values >=100% since they already trigger marking.
4913 if (current_percent < 100) {
4914 max_marking_limit_reached_ =
4915 std::max<double>(max_marking_limit_reached_, current_percent);
4916 }
4917 } else if (current_percent >= stress_marking_percentage_) {
4918 stress_marking_percentage_ = NextStressMarkingLimit();
4919 return IncrementalMarkingLimit::kHardLimit;
4920 }
4921 }
4922 }
4923
4924 if (FLAG_incremental_marking_soft_trigger > 0 ||
4925 FLAG_incremental_marking_hard_trigger > 0) {
4926 int current_percent = static_cast<int>(
4927 std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
4928 if (current_percent > FLAG_incremental_marking_hard_trigger &&
4929 FLAG_incremental_marking_hard_trigger > 0) {
4930 return IncrementalMarkingLimit::kHardLimit;
4931 }
4932 if (current_percent > FLAG_incremental_marking_soft_trigger &&
4933 FLAG_incremental_marking_soft_trigger > 0) {
4934 return IncrementalMarkingLimit::kSoftLimit;
4935 }
4936 return IncrementalMarkingLimit::kNoLimit;
4937 }
4938
4939 size_t old_generation_space_available = OldGenerationSpaceAvailable();
4940 const base::Optional<size_t> global_memory_available =
4941 GlobalMemoryAvailable();
4942
4943 if (old_generation_space_available > new_space_->Capacity() &&
4944 (!global_memory_available ||
4945 global_memory_available > new_space_->Capacity())) {
4946 return IncrementalMarkingLimit::kNoLimit;
4947 }
4948 if (ShouldOptimizeForMemoryUsage()) {
4949 return IncrementalMarkingLimit::kHardLimit;
4950 }
4951 if (ShouldOptimizeForLoadTime()) {
4952 return IncrementalMarkingLimit::kNoLimit;
4953 }
4954 if (old_generation_space_available == 0) {
4955 return IncrementalMarkingLimit::kHardLimit;
4956 }
4957 if (global_memory_available && *global_memory_available == 0) {
4958 return IncrementalMarkingLimit::kHardLimit;
4959 }
4960 return IncrementalMarkingLimit::kSoftLimit;
4961 }
4962
ShouldStressCompaction() const4963 bool Heap::ShouldStressCompaction() const {
4964 return FLAG_stress_compaction && (gc_count_ & 1) != 0;
4965 }
4966
EnableInlineAllocation()4967 void Heap::EnableInlineAllocation() {
4968 if (!inline_allocation_disabled_) return;
4969 inline_allocation_disabled_ = false;
4970
4971 // Update inline allocation limit for new space.
4972 new_space()->AdvanceAllocationObservers();
4973 new_space()->UpdateInlineAllocationLimit(0);
4974 }
4975
4976
DisableInlineAllocation()4977 void Heap::DisableInlineAllocation() {
4978 if (inline_allocation_disabled_) return;
4979 inline_allocation_disabled_ = true;
4980
4981 // Update inline allocation limit for new space.
4982 new_space()->UpdateInlineAllocationLimit(0);
4983
4984 // Update inline allocation limit for old spaces.
4985 PagedSpaceIterator spaces(this);
4986 CodeSpaceMemoryModificationScope modification_scope(this);
4987 for (PagedSpace* space = spaces.Next(); space != nullptr;
4988 space = spaces.Next()) {
4989 base::MutexGuard guard(space->mutex());
4990 space->FreeLinearAllocationArea();
4991 }
4992 }
4993
AllocateRawWithLightRetrySlowPath(int size,AllocationType allocation,AllocationOrigin origin,AllocationAlignment alignment)4994 HeapObject Heap::AllocateRawWithLightRetrySlowPath(
4995 int size, AllocationType allocation, AllocationOrigin origin,
4996 AllocationAlignment alignment) {
4997 HeapObject result;
4998 AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
4999 if (alloc.To(&result)) {
5000 // DCHECK that the successful allocation is not "exception". The one
5001 // exception to this is when allocating the "exception" object itself, in
5002 // which case this must be an ROSpace allocation and the exception object
5003 // in the roots has to be unset.
5004 DCHECK((CanAllocateInReadOnlySpace() &&
5005 allocation == AllocationType::kReadOnly &&
5006 ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) ||
5007 result != ReadOnlyRoots(this).exception());
5008 return result;
5009 }
5010 // Two GCs before panicking. In newspace will almost always succeed.
5011 for (int i = 0; i < 2; i++) {
5012 CollectGarbage(alloc.RetrySpace(),
5013 GarbageCollectionReason::kAllocationFailure);
5014 alloc = AllocateRaw(size, allocation, origin, alignment);
5015 if (alloc.To(&result)) {
5016 DCHECK(result != ReadOnlyRoots(this).exception());
5017 return result;
5018 }
5019 }
5020 return HeapObject();
5021 }
5022
AllocateRawWithRetryOrFailSlowPath(int size,AllocationType allocation,AllocationOrigin origin,AllocationAlignment alignment)5023 HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
5024 int size, AllocationType allocation, AllocationOrigin origin,
5025 AllocationAlignment alignment) {
5026 AllocationResult alloc;
5027 HeapObject result =
5028 AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
5029 if (!result.is_null()) return result;
5030
5031 isolate()->counters()->gc_last_resort_from_handles()->Increment();
5032 CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
5033 {
5034 AlwaysAllocateScope scope(this);
5035 alloc = AllocateRaw(size, allocation, origin, alignment);
5036 }
5037 if (alloc.To(&result)) {
5038 DCHECK(result != ReadOnlyRoots(this).exception());
5039 return result;
5040 }
5041 // TODO(1181417): Fix this.
5042 FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
5043 return HeapObject();
5044 }
5045
SetUp()5046 void Heap::SetUp() {
5047 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
5048 allocation_timeout_ = NextAllocationTimeout();
5049 #endif
5050
5051 #ifdef V8_ENABLE_THIRD_PARTY_HEAP
5052 tp_heap_ = third_party_heap::Heap::New(isolate());
5053 #endif
5054
5055 // Initialize heap spaces and initial maps and objects.
5056 //
5057 // If the heap is not yet configured (e.g. through the API), configure it.
5058 // Configuration is based on the flags new-space-size (really the semispace
5059 // size) and old-space-size if set or the initial values of semispace_size_
5060 // and old_generation_size_ otherwise.
5061 if (!configured_) ConfigureHeapDefault();
5062
5063 mmap_region_base_ =
5064 reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
5065 ~kMmapRegionMask;
5066
5067 // Set up memory allocator.
5068 memory_allocator_.reset(
5069 new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
5070
5071 mark_compact_collector_.reset(new MarkCompactCollector(this));
5072
5073 scavenger_collector_.reset(new ScavengerCollector(this));
5074
5075 incremental_marking_.reset(
5076 new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
5077
5078 if (FLAG_concurrent_marking || FLAG_parallel_marking) {
5079 concurrent_marking_.reset(new ConcurrentMarking(
5080 this, mark_compact_collector_->marking_worklists(),
5081 mark_compact_collector_->weak_objects()));
5082 } else {
5083 concurrent_marking_.reset(new ConcurrentMarking(this, nullptr, nullptr));
5084 }
5085
5086 marking_barrier_.reset(new MarkingBarrier(this));
5087
5088 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
5089 space_[i] = nullptr;
5090 }
5091 }
5092
SetUpFromReadOnlyHeap(ReadOnlyHeap * ro_heap)5093 void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
5094 DCHECK_NOT_NULL(ro_heap);
5095 DCHECK_IMPLIES(read_only_space_ != nullptr,
5096 read_only_space_ == ro_heap->read_only_space());
5097 space_[RO_SPACE] = nullptr;
5098 read_only_space_ = ro_heap->read_only_space();
5099 }
5100
ReplaceReadOnlySpace(SharedReadOnlySpace * space)5101 void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
5102 CHECK(V8_SHARED_RO_HEAP_BOOL);
5103 if (read_only_space_) {
5104 read_only_space_->TearDown(memory_allocator());
5105 delete read_only_space_;
5106 }
5107
5108 read_only_space_ = space;
5109 }
5110
5111 class StressConcurrentAllocationObserver : public AllocationObserver {
5112 public:
StressConcurrentAllocationObserver(Heap * heap)5113 explicit StressConcurrentAllocationObserver(Heap* heap)
5114 : AllocationObserver(1024), heap_(heap) {}
5115
Step(int bytes_allocated,Address,size_t)5116 void Step(int bytes_allocated, Address, size_t) override {
5117 DCHECK(heap_->deserialization_complete());
5118 if (FLAG_stress_concurrent_allocation) {
5119 // Only schedule task if --stress-concurrent-allocation is enabled. This
5120 // allows tests to disable flag even when Isolate was already initialized.
5121 StressConcurrentAllocatorTask::Schedule(heap_->isolate());
5122 }
5123 heap_->RemoveAllocationObserversFromAllSpaces(this, this);
5124 heap_->need_to_remove_stress_concurrent_allocation_observer_ = false;
5125 }
5126
5127 private:
5128 Heap* heap_;
5129 };
5130
SetUpSpaces()5131 void Heap::SetUpSpaces() {
5132 // Ensure SetUpFromReadOnlySpace has been ran.
5133 DCHECK_NOT_NULL(read_only_space_);
5134 space_[NEW_SPACE] = new_space_ =
5135 new NewSpace(this, memory_allocator_->data_page_allocator(),
5136 initial_semispace_size_, max_semi_space_size_);
5137 space_[OLD_SPACE] = old_space_ = new OldSpace(this);
5138 space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
5139 space_[MAP_SPACE] = map_space_ = new MapSpace(this);
5140 space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
5141 space_[NEW_LO_SPACE] = new_lo_space_ =
5142 new NewLargeObjectSpace(this, new_space_->Capacity());
5143 space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
5144
5145 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5146 i++) {
5147 deferred_counters_[i] = 0;
5148 }
5149
5150 tracer_.reset(new GCTracer(this));
5151 #ifdef ENABLE_MINOR_MC
5152 minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
5153 #else
5154 minor_mark_compact_collector_ = nullptr;
5155 #endif // ENABLE_MINOR_MC
5156 array_buffer_sweeper_.reset(new ArrayBufferSweeper(this));
5157 gc_idle_time_handler_.reset(new GCIdleTimeHandler());
5158 memory_measurement_.reset(new MemoryMeasurement(isolate()));
5159 memory_reducer_.reset(new MemoryReducer(this));
5160 if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
5161 live_object_stats_.reset(new ObjectStats(this));
5162 dead_object_stats_.reset(new ObjectStats(this));
5163 }
5164 local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
5165
5166 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5167 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5168
5169 mark_compact_collector()->SetUp();
5170 #ifdef ENABLE_MINOR_MC
5171 if (minor_mark_compact_collector() != nullptr) {
5172 minor_mark_compact_collector()->SetUp();
5173 }
5174 #endif // ENABLE_MINOR_MC
5175
5176 scavenge_job_.reset(new ScavengeJob());
5177 scavenge_task_observer_.reset(new ScavengeTaskObserver(
5178 this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
5179 new_space()->AddAllocationObserver(scavenge_task_observer_.get());
5180
5181 SetGetExternallyAllocatedMemoryInBytesCallback(
5182 DefaultGetExternallyAllocatedMemoryInBytesCallback);
5183
5184 if (FLAG_stress_marking > 0) {
5185 stress_marking_percentage_ = NextStressMarkingLimit();
5186 stress_marking_observer_ = new StressMarkingObserver(this);
5187 AddAllocationObserversToAllSpaces(stress_marking_observer_,
5188 stress_marking_observer_);
5189 }
5190 if (FLAG_stress_scavenge > 0) {
5191 stress_scavenge_observer_ = new StressScavengeObserver(this);
5192 new_space()->AddAllocationObserver(stress_scavenge_observer_);
5193 }
5194
5195 write_protect_code_memory_ = FLAG_write_protect_code_memory;
5196 }
5197
InitializeHashSeed()5198 void Heap::InitializeHashSeed() {
5199 DCHECK(!deserialization_complete_);
5200 uint64_t new_hash_seed;
5201 if (FLAG_hash_seed == 0) {
5202 int64_t rnd = isolate()->random_number_generator()->NextInt64();
5203 new_hash_seed = static_cast<uint64_t>(rnd);
5204 } else {
5205 new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
5206 }
5207 ReadOnlyRoots(this).hash_seed().copy_in(
5208 0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
5209 }
5210
NextAllocationTimeout(int current_timeout)5211 int Heap::NextAllocationTimeout(int current_timeout) {
5212 if (FLAG_random_gc_interval > 0) {
5213 // If current timeout hasn't reached 0 the GC was caused by something
5214 // different than --stress-atomic-gc flag and we don't update the timeout.
5215 if (current_timeout <= 0) {
5216 return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
5217 } else {
5218 return current_timeout;
5219 }
5220 }
5221 return FLAG_gc_interval;
5222 }
5223
PrintAllocationsHash()5224 void Heap::PrintAllocationsHash() {
5225 uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5226 PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5227 }
5228
PrintMaxMarkingLimitReached()5229 void Heap::PrintMaxMarkingLimitReached() {
5230 PrintF("\n### Maximum marking limit reached = %.02lf\n",
5231 max_marking_limit_reached_);
5232 }
5233
PrintMaxNewSpaceSizeReached()5234 void Heap::PrintMaxNewSpaceSizeReached() {
5235 PrintF("\n### Maximum new space size reached = %.02lf\n",
5236 stress_scavenge_observer_->MaxNewSpaceSizeReached());
5237 }
5238
NextStressMarkingLimit()5239 int Heap::NextStressMarkingLimit() {
5240 return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
5241 }
5242
NotifyDeserializationComplete()5243 void Heap::NotifyDeserializationComplete() {
5244 PagedSpaceIterator spaces(this);
5245 for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
5246 if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
5247 #ifdef DEBUG
5248 // All pages right after bootstrapping must be marked as never-evacuate.
5249 for (Page* p : *s) {
5250 DCHECK(p->NeverEvacuate());
5251 }
5252 #endif // DEBUG
5253 }
5254
5255 if (FLAG_stress_concurrent_allocation) {
5256 stress_concurrent_allocation_observer_.reset(
5257 new StressConcurrentAllocationObserver(this));
5258 AddAllocationObserversToAllSpaces(
5259 stress_concurrent_allocation_observer_.get(),
5260 stress_concurrent_allocation_observer_.get());
5261 need_to_remove_stress_concurrent_allocation_observer_ = true;
5262 }
5263
5264 deserialization_complete_ = true;
5265 }
5266
NotifyBootstrapComplete()5267 void Heap::NotifyBootstrapComplete() {
5268 // This function is invoked for each native context creation. We are
5269 // interested only in the first native context.
5270 if (old_generation_capacity_after_bootstrap_ == 0) {
5271 old_generation_capacity_after_bootstrap_ = OldGenerationCapacity();
5272 }
5273 }
5274
NotifyOldGenerationExpansion(AllocationSpace space,MemoryChunk * chunk)5275 void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
5276 MemoryChunk* chunk) {
5277 // Pages created during bootstrapping may contain immortal immovable objects.
5278 if (!deserialization_complete()) {
5279 chunk->MarkNeverEvacuate();
5280 }
5281 if (space == CODE_SPACE || space == CODE_LO_SPACE) {
5282 isolate()->AddCodeMemoryChunk(chunk);
5283 }
5284 const size_t kMemoryReducerActivationThreshold = 1 * MB;
5285 if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
5286 OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
5287 kMemoryReducerActivationThreshold &&
5288 FLAG_memory_reducer_for_small_heaps) {
5289 MemoryReducer::Event event;
5290 event.type = MemoryReducer::kPossibleGarbage;
5291 event.time_ms = MonotonicallyIncreasingTimeInMs();
5292 memory_reducer()->NotifyPossibleGarbage(event);
5293 }
5294 }
5295
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)5296 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5297 DCHECK_EQ(gc_state(), HeapState::NOT_IN_GC);
5298 local_embedder_heap_tracer()->SetRemoteTracer(tracer);
5299 }
5300
GetEmbedderHeapTracer() const5301 EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
5302 return local_embedder_heap_tracer()->remote_tracer();
5303 }
5304
flags_for_embedder_tracer() const5305 EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
5306 if (is_current_gc_forced()) {
5307 return EmbedderHeapTracer::TraceFlags::kForced;
5308 } else if (ShouldReduceMemory()) {
5309 return EmbedderHeapTracer::TraceFlags::kReduceMemory;
5310 }
5311 return EmbedderHeapTracer::TraceFlags::kNoFlags;
5312 }
5313
RegisterExternallyReferencedObject(Address * location)5314 void Heap::RegisterExternallyReferencedObject(Address* location) {
5315 GlobalHandles::MarkTraced(location);
5316 Object object(*location);
5317 if (!object.IsHeapObject()) {
5318 // The embedder is not aware of whether numbers are materialized as heap
5319 // objects are just passed around as Smis.
5320 return;
5321 }
5322 HeapObject heap_object = HeapObject::cast(object);
5323 DCHECK(IsValidHeapObject(this, heap_object));
5324 if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
5325 incremental_marking()->WhiteToGreyAndPush(heap_object);
5326 } else {
5327 DCHECK(mark_compact_collector()->in_use());
5328 mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
5329 }
5330 }
5331
StartTearDown()5332 void Heap::StartTearDown() {
5333 SetGCState(TEAR_DOWN);
5334
5335 // Background threads may allocate and block until GC is performed. However
5336 // this might never happen when the main thread tries to quit and doesn't
5337 // process the event queue anymore. Avoid this deadlock by allowing all
5338 // allocations after tear down was requested to make sure all background
5339 // threads finish.
5340 collection_barrier_->ShutdownRequested();
5341
5342 #ifdef VERIFY_HEAP
5343 // {StartTearDown} is called fairly early during Isolate teardown, so it's
5344 // a good time to run heap verification (if requested), before starting to
5345 // tear down parts of the Isolate.
5346 if (FLAG_verify_heap) {
5347 SafepointScope scope(this);
5348 Verify();
5349 }
5350 #endif
5351 }
5352
TearDown()5353 void Heap::TearDown() {
5354 DCHECK_EQ(gc_state(), TEAR_DOWN);
5355
5356 if (FLAG_concurrent_marking || FLAG_parallel_marking)
5357 concurrent_marking_->Pause();
5358
5359 // It's too late for Heap::Verify() here, as parts of the Isolate are
5360 // already gone by the time this is called.
5361
5362 UpdateMaximumCommitted();
5363
5364 if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
5365 PrintAllocationsHash();
5366 }
5367
5368 if (FLAG_fuzzer_gc_analysis) {
5369 if (FLAG_stress_marking > 0) {
5370 PrintMaxMarkingLimitReached();
5371 }
5372 if (FLAG_stress_scavenge > 0) {
5373 PrintMaxNewSpaceSizeReached();
5374 }
5375 }
5376
5377 new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
5378 scavenge_task_observer_.reset();
5379 scavenge_job_.reset();
5380
5381 if (need_to_remove_stress_concurrent_allocation_observer_) {
5382 RemoveAllocationObserversFromAllSpaces(
5383 stress_concurrent_allocation_observer_.get(),
5384 stress_concurrent_allocation_observer_.get());
5385 }
5386 stress_concurrent_allocation_observer_.reset();
5387
5388 if (FLAG_stress_marking > 0) {
5389 RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
5390 stress_marking_observer_);
5391 delete stress_marking_observer_;
5392 stress_marking_observer_ = nullptr;
5393 }
5394 if (FLAG_stress_scavenge > 0) {
5395 new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
5396 delete stress_scavenge_observer_;
5397 stress_scavenge_observer_ = nullptr;
5398 }
5399
5400 if (mark_compact_collector_) {
5401 mark_compact_collector_->TearDown();
5402 mark_compact_collector_.reset();
5403 }
5404
5405 #ifdef ENABLE_MINOR_MC
5406 if (minor_mark_compact_collector_ != nullptr) {
5407 minor_mark_compact_collector_->TearDown();
5408 delete minor_mark_compact_collector_;
5409 minor_mark_compact_collector_ = nullptr;
5410 }
5411 #endif // ENABLE_MINOR_MC
5412
5413 scavenger_collector_.reset();
5414 array_buffer_sweeper_.reset();
5415 incremental_marking_.reset();
5416 concurrent_marking_.reset();
5417
5418 gc_idle_time_handler_.reset();
5419
5420 memory_measurement_.reset();
5421
5422 if (memory_reducer_ != nullptr) {
5423 memory_reducer_->TearDown();
5424 memory_reducer_.reset();
5425 }
5426
5427 live_object_stats_.reset();
5428 dead_object_stats_.reset();
5429
5430 local_embedder_heap_tracer_.reset();
5431
5432 external_string_table_.TearDown();
5433
5434 tracer_.reset();
5435
5436 for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
5437 delete space_[i];
5438 space_[i] = nullptr;
5439 }
5440
5441 isolate()->read_only_heap()->OnHeapTearDown(this);
5442 read_only_space_ = nullptr;
5443
5444 memory_allocator()->TearDown();
5445
5446 StrongRootsEntry* next = nullptr;
5447 for (StrongRootsEntry* current = strong_roots_head_; current;
5448 current = next) {
5449 next = current->next;
5450 delete current;
5451 }
5452 strong_roots_head_ = nullptr;
5453
5454 memory_allocator_.reset();
5455 }
5456
AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)5457 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5458 GCType gc_type, void* data) {
5459 DCHECK_NOT_NULL(callback);
5460 DCHECK(gc_prologue_callbacks_.end() ==
5461 std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
5462 GCCallbackTuple(callback, gc_type, data)));
5463 gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
5464 }
5465
RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,void * data)5466 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5467 void* data) {
5468 DCHECK_NOT_NULL(callback);
5469 for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
5470 if (gc_prologue_callbacks_[i].callback == callback &&
5471 gc_prologue_callbacks_[i].data == data) {
5472 gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
5473 gc_prologue_callbacks_.pop_back();
5474 return;
5475 }
5476 }
5477 UNREACHABLE();
5478 }
5479
AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)5480 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5481 GCType gc_type, void* data) {
5482 DCHECK_NOT_NULL(callback);
5483 DCHECK(gc_epilogue_callbacks_.end() ==
5484 std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
5485 GCCallbackTuple(callback, gc_type, data)));
5486 gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
5487 }
5488
RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,void * data)5489 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5490 void* data) {
5491 DCHECK_NOT_NULL(callback);
5492 for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
5493 if (gc_epilogue_callbacks_[i].callback == callback &&
5494 gc_epilogue_callbacks_[i].data == data) {
5495 gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
5496 gc_epilogue_callbacks_.pop_back();
5497 return;
5498 }
5499 }
5500 UNREACHABLE();
5501 }
5502
5503 namespace {
CompactWeakArrayList(Heap * heap,Handle<WeakArrayList> array,AllocationType allocation)5504 Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
5505 Handle<WeakArrayList> array,
5506 AllocationType allocation) {
5507 if (array->length() == 0) {
5508 return array;
5509 }
5510 int new_length = array->CountLiveWeakReferences();
5511 if (new_length == array->length()) {
5512 return array;
5513 }
5514
5515 Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
5516 heap->isolate(),
5517 handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
5518 new_length, allocation);
5519 // Allocation might have caused GC and turned some of the elements into
5520 // cleared weak heap objects. Count the number of live references again and
5521 // fill in the new array.
5522 int copy_to = 0;
5523 for (int i = 0; i < array->length(); i++) {
5524 MaybeObject element = array->Get(i);
5525 if (element->IsCleared()) continue;
5526 new_array->Set(copy_to++, element);
5527 }
5528 new_array->set_length(copy_to);
5529 return new_array;
5530 }
5531
5532 } // anonymous namespace
5533
CompactWeakArrayLists(AllocationType allocation)5534 void Heap::CompactWeakArrayLists(AllocationType allocation) {
5535 // Find known PrototypeUsers and compact them.
5536 std::vector<Handle<PrototypeInfo>> prototype_infos;
5537 {
5538 HeapObjectIterator iterator(this);
5539 for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
5540 if (o.IsPrototypeInfo()) {
5541 PrototypeInfo prototype_info = PrototypeInfo::cast(o);
5542 if (prototype_info.prototype_users().IsWeakArrayList()) {
5543 prototype_infos.emplace_back(handle(prototype_info, isolate()));
5544 }
5545 }
5546 }
5547 }
5548 for (auto& prototype_info : prototype_infos) {
5549 Handle<WeakArrayList> array(
5550 WeakArrayList::cast(prototype_info->prototype_users()), isolate());
5551 DCHECK_IMPLIES(allocation == AllocationType::kOld,
5552 InOldSpace(*array) ||
5553 *array == ReadOnlyRoots(this).empty_weak_array_list());
5554 WeakArrayList new_array = PrototypeUsers::Compact(
5555 array, this, JSObject::PrototypeRegistryCompactionCallback, allocation);
5556 prototype_info->set_prototype_users(new_array);
5557 }
5558
5559 // Find known WeakArrayLists and compact them.
5560 Handle<WeakArrayList> scripts(script_list(), isolate());
5561 DCHECK_IMPLIES(
5562 !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && allocation == AllocationType::kOld,
5563 InOldSpace(*scripts));
5564 scripts = CompactWeakArrayList(this, scripts, allocation);
5565 set_script_list(*scripts);
5566 }
5567
AddRetainedMap(Handle<NativeContext> context,Handle<Map> map)5568 void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
5569 if (map->is_in_retained_map_list()) {
5570 return;
5571 }
5572 Handle<WeakArrayList> array(context->retained_maps(), isolate());
5573 if (array->IsFull()) {
5574 CompactRetainedMaps(*array);
5575 }
5576 array =
5577 WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
5578 array = WeakArrayList::AddToEnd(
5579 isolate(), array,
5580 MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
5581 if (*array != context->retained_maps()) {
5582 context->set_retained_maps(*array);
5583 }
5584 map->set_is_in_retained_map_list(true);
5585 }
5586
CompactRetainedMaps(WeakArrayList retained_maps)5587 void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
5588 int length = retained_maps.length();
5589 int new_length = 0;
5590 // This loop compacts the array by removing cleared weak cells.
5591 for (int i = 0; i < length; i += 2) {
5592 MaybeObject maybe_object = retained_maps.Get(i);
5593 if (maybe_object->IsCleared()) {
5594 continue;
5595 }
5596
5597 DCHECK(maybe_object->IsWeak());
5598
5599 MaybeObject age = retained_maps.Get(i + 1);
5600 DCHECK(age->IsSmi());
5601 if (i != new_length) {
5602 retained_maps.Set(new_length, maybe_object);
5603 retained_maps.Set(new_length + 1, age);
5604 }
5605 new_length += 2;
5606 }
5607 HeapObject undefined = ReadOnlyRoots(this).undefined_value();
5608 for (int i = new_length; i < length; i++) {
5609 retained_maps.Set(i, HeapObjectReference::Strong(undefined));
5610 }
5611 if (new_length != length) retained_maps.set_length(new_length);
5612 }
5613
FatalProcessOutOfMemory(const char * location)5614 void Heap::FatalProcessOutOfMemory(const char* location) {
5615 v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
5616 }
5617
5618 #ifdef DEBUG
5619
5620 class PrintHandleVisitor : public RootVisitor {
5621 public:
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)5622 void VisitRootPointers(Root root, const char* description,
5623 FullObjectSlot start, FullObjectSlot end) override {
5624 for (FullObjectSlot p = start; p < end; ++p)
5625 PrintF(" handle %p to %p\n", p.ToVoidPtr(),
5626 reinterpret_cast<void*>((*p).ptr()));
5627 }
5628 };
5629
5630
PrintHandles()5631 void Heap::PrintHandles() {
5632 PrintF("Handles:\n");
5633 PrintHandleVisitor v;
5634 isolate_->handle_scope_implementer()->Iterate(&v);
5635 }
5636
5637 #endif
5638
5639 class CheckHandleCountVisitor : public RootVisitor {
5640 public:
CheckHandleCountVisitor()5641 CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()5642 ~CheckHandleCountVisitor() override {
5643 CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
5644 }
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)5645 void VisitRootPointers(Root root, const char* description,
5646 FullObjectSlot start, FullObjectSlot end) override {
5647 handle_count_ += end - start;
5648 }
5649
5650 private:
5651 ptrdiff_t handle_count_;
5652 };
5653
5654
CheckHandleCount()5655 void Heap::CheckHandleCount() {
5656 CheckHandleCountVisitor v;
5657 isolate_->handle_scope_implementer()->Iterate(&v);
5658 }
5659
ClearRecordedSlot(HeapObject object,ObjectSlot slot)5660 void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
5661 #ifndef V8_DISABLE_WRITE_BARRIERS
5662 DCHECK(!IsLargeObject(object));
5663 Page* page = Page::FromAddress(slot.address());
5664 if (!page->InYoungGeneration()) {
5665 DCHECK_EQ(page->owner_identity(), OLD_SPACE);
5666
5667 if (!page->SweepingDone()) {
5668 RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
5669 }
5670 }
5671 #endif
5672 }
5673
5674 // static
InsertIntoRememberedSetFromCode(MemoryChunk * chunk,Address slot)5675 int Heap::InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot) {
5676 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
5677 return 0;
5678 }
5679
5680 #ifdef DEBUG
VerifyClearedSlot(HeapObject object,ObjectSlot slot)5681 void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
5682 #ifndef V8_DISABLE_WRITE_BARRIERS
5683 DCHECK(!IsLargeObject(object));
5684 if (InYoungGeneration(object)) return;
5685 Page* page = Page::FromAddress(slot.address());
5686 DCHECK_EQ(page->owner_identity(), OLD_SPACE);
5687 // Slots are filtered with invalidated slots.
5688 CHECK_IMPLIES(RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()),
5689 page->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
5690 CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
5691 page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
5692 #endif
5693 }
5694
VerifySlotRangeHasNoRecordedSlots(Address start,Address end)5695 void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
5696 #ifndef V8_DISABLE_WRITE_BARRIERS
5697 Page* page = Page::FromAddress(start);
5698 DCHECK(!page->IsLargePage());
5699 DCHECK(!page->InYoungGeneration());
5700 RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
5701 #endif
5702 }
5703 #endif
5704
ClearRecordedSlotRange(Address start,Address end)5705 void Heap::ClearRecordedSlotRange(Address start, Address end) {
5706 #ifndef V8_DISABLE_WRITE_BARRIERS
5707 Page* page = Page::FromAddress(start);
5708 DCHECK(!page->IsLargePage());
5709 if (!page->InYoungGeneration()) {
5710 DCHECK_EQ(page->owner_identity(), OLD_SPACE);
5711
5712 if (!page->SweepingDone()) {
5713 RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
5714 SlotSet::KEEP_EMPTY_BUCKETS);
5715 }
5716 }
5717 #endif
5718 }
5719
Next()5720 PagedSpace* PagedSpaceIterator::Next() {
5721 int space = counter_++;
5722 switch (space) {
5723 case RO_SPACE:
5724 UNREACHABLE();
5725 case OLD_SPACE:
5726 return heap_->old_space();
5727 case CODE_SPACE:
5728 return heap_->code_space();
5729 case MAP_SPACE:
5730 return heap_->map_space();
5731 default:
5732 DCHECK_GT(space, LAST_GROWABLE_PAGED_SPACE);
5733 return nullptr;
5734 }
5735 }
5736
SpaceIterator(Heap * heap)5737 SpaceIterator::SpaceIterator(Heap* heap)
5738 : heap_(heap), current_space_(FIRST_MUTABLE_SPACE - 1) {}
5739
5740 SpaceIterator::~SpaceIterator() = default;
5741
HasNext()5742 bool SpaceIterator::HasNext() {
5743 // Iterate until no more spaces.
5744 return current_space_ != LAST_SPACE;
5745 }
5746
Next()5747 Space* SpaceIterator::Next() {
5748 DCHECK(HasNext());
5749 return heap_->space(++current_space_);
5750 }
5751
5752 class HeapObjectsFilter {
5753 public:
5754 virtual ~HeapObjectsFilter() = default;
5755 virtual bool SkipObject(HeapObject object) = 0;
5756 };
5757
5758
5759 class UnreachableObjectsFilter : public HeapObjectsFilter {
5760 public:
UnreachableObjectsFilter(Heap * heap)5761 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5762 MarkReachableObjects();
5763 }
5764
~UnreachableObjectsFilter()5765 ~UnreachableObjectsFilter() override {
5766 for (auto it : reachable_) {
5767 delete it.second;
5768 it.second = nullptr;
5769 }
5770 }
5771
SkipObject(HeapObject object)5772 bool SkipObject(HeapObject object) override {
5773 if (object.IsFreeSpaceOrFiller()) return true;
5774 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
5775 if (reachable_.count(chunk) == 0) return true;
5776 return reachable_[chunk]->count(object) == 0;
5777 }
5778
5779 private:
MarkAsReachable(HeapObject object)5780 bool MarkAsReachable(HeapObject object) {
5781 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
5782 if (reachable_.count(chunk) == 0) {
5783 reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
5784 }
5785 if (reachable_[chunk]->count(object)) return false;
5786 reachable_[chunk]->insert(object);
5787 return true;
5788 }
5789
5790 class MarkingVisitor : public ObjectVisitor, public RootVisitor {
5791 public:
MarkingVisitor(UnreachableObjectsFilter * filter)5792 explicit MarkingVisitor(UnreachableObjectsFilter* filter)
5793 : filter_(filter) {}
5794
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)5795 void VisitPointers(HeapObject host, ObjectSlot start,
5796 ObjectSlot end) override {
5797 MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
5798 }
5799
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)5800 void VisitPointers(HeapObject host, MaybeObjectSlot start,
5801 MaybeObjectSlot end) final {
5802 MarkPointers(start, end);
5803 }
5804
VisitCodeTarget(Code host,RelocInfo * rinfo)5805 void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
5806 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
5807 MarkHeapObject(target);
5808 }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)5809 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
5810 MarkHeapObject(rinfo->target_object());
5811 }
5812
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)5813 void VisitRootPointers(Root root, const char* description,
5814 FullObjectSlot start, FullObjectSlot end) override {
5815 MarkPointersImpl(start, end);
5816 }
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)5817 void VisitRootPointers(Root root, const char* description,
5818 OffHeapObjectSlot start,
5819 OffHeapObjectSlot end) override {
5820 MarkPointersImpl(start, end);
5821 }
5822
TransitiveClosure()5823 void TransitiveClosure() {
5824 while (!marking_stack_.empty()) {
5825 HeapObject obj = marking_stack_.back();
5826 marking_stack_.pop_back();
5827 obj.Iterate(this);
5828 }
5829 }
5830
5831 private:
MarkPointers(MaybeObjectSlot start,MaybeObjectSlot end)5832 void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
5833 MarkPointersImpl(start, end);
5834 }
5835
5836 template <typename TSlot>
MarkPointersImpl(TSlot start,TSlot end)5837 V8_INLINE void MarkPointersImpl(TSlot start, TSlot end) {
5838 // Treat weak references as strong.
5839 Isolate* isolate = filter_->heap_->isolate();
5840 for (TSlot p = start; p < end; ++p) {
5841 typename TSlot::TObject object = p.load(isolate);
5842 HeapObject heap_object;
5843 if (object.GetHeapObject(&heap_object)) {
5844 MarkHeapObject(heap_object);
5845 }
5846 }
5847 }
5848
MarkHeapObject(HeapObject heap_object)5849 V8_INLINE void MarkHeapObject(HeapObject heap_object) {
5850 if (filter_->MarkAsReachable(heap_object)) {
5851 marking_stack_.push_back(heap_object);
5852 }
5853 }
5854
5855 UnreachableObjectsFilter* filter_;
5856 std::vector<HeapObject> marking_stack_;
5857 };
5858
5859 friend class MarkingVisitor;
5860
MarkReachableObjects()5861 void MarkReachableObjects() {
5862 MarkingVisitor visitor(this);
5863 heap_->IterateRoots(&visitor, {});
5864 visitor.TransitiveClosure();
5865 }
5866
5867 Heap* heap_;
5868 DisallowHeapAllocation no_allocation_;
5869 std::unordered_map<BasicMemoryChunk*,
5870 std::unordered_set<HeapObject, Object::Hasher>*>
5871 reachable_;
5872 };
5873
HeapObjectIterator(Heap * heap,HeapObjectIterator::HeapObjectsFiltering filtering)5874 HeapObjectIterator::HeapObjectIterator(
5875 Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
5876 : heap_(heap),
5877 safepoint_scope_(std::make_unique<SafepointScope>(heap)),
5878 filtering_(filtering),
5879 filter_(nullptr),
5880 space_iterator_(nullptr),
5881 object_iterator_(nullptr) {
5882 heap_->MakeHeapIterable();
5883 // Start the iteration.
5884 space_iterator_ = new SpaceIterator(heap_);
5885 switch (filtering_) {
5886 case kFilterUnreachable:
5887 filter_ = new UnreachableObjectsFilter(heap_);
5888 break;
5889 default:
5890 break;
5891 }
5892 object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
5893 }
5894
~HeapObjectIterator()5895 HeapObjectIterator::~HeapObjectIterator() {
5896 #ifdef DEBUG
5897 // Assert that in filtering mode we have iterated through all
5898 // objects. Otherwise, heap will be left in an inconsistent state.
5899 if (filtering_ != kNoFiltering) {
5900 DCHECK_NULL(object_iterator_);
5901 }
5902 #endif
5903 delete space_iterator_;
5904 delete filter_;
5905 }
5906
Next()5907 HeapObject HeapObjectIterator::Next() {
5908 if (filter_ == nullptr) return NextObject();
5909
5910 HeapObject obj = NextObject();
5911 while (!obj.is_null() && (filter_->SkipObject(obj))) obj = NextObject();
5912 return obj;
5913 }
5914
NextObject()5915 HeapObject HeapObjectIterator::NextObject() {
5916 // No iterator means we are done.
5917 if (object_iterator_.get() == nullptr) return HeapObject();
5918
5919 HeapObject obj = object_iterator_.get()->Next();
5920 if (!obj.is_null()) {
5921 // If the current iterator has more objects we are fine.
5922 return obj;
5923 } else {
5924 // Go though the spaces looking for one that has objects.
5925 while (space_iterator_->HasNext()) {
5926 object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
5927 obj = object_iterator_.get()->Next();
5928 if (!obj.is_null()) {
5929 return obj;
5930 }
5931 }
5932 }
5933 // Done with the last space.
5934 object_iterator_.reset(nullptr);
5935 return HeapObject();
5936 }
5937
UpdateTotalGCTime(double duration)5938 void Heap::UpdateTotalGCTime(double duration) {
5939 if (FLAG_trace_gc_verbose) {
5940 total_gc_time_ms_ += duration;
5941 }
5942 }
5943
CleanUpYoung()5944 void Heap::ExternalStringTable::CleanUpYoung() {
5945 int last = 0;
5946 Isolate* isolate = heap_->isolate();
5947 for (size_t i = 0; i < young_strings_.size(); ++i) {
5948 Object o = young_strings_[i];
5949 if (o.IsTheHole(isolate)) {
5950 continue;
5951 }
5952 // The real external string is already in one of these vectors and was or
5953 // will be processed. Re-processing it will add a duplicate to the vector.
5954 if (o.IsThinString()) continue;
5955 DCHECK(o.IsExternalString());
5956 if (InYoungGeneration(o)) {
5957 young_strings_[last++] = o;
5958 } else {
5959 old_strings_.push_back(o);
5960 }
5961 }
5962 young_strings_.resize(last);
5963 }
5964
CleanUpAll()5965 void Heap::ExternalStringTable::CleanUpAll() {
5966 CleanUpYoung();
5967 int last = 0;
5968 Isolate* isolate = heap_->isolate();
5969 for (size_t i = 0; i < old_strings_.size(); ++i) {
5970 Object o = old_strings_[i];
5971 if (o.IsTheHole(isolate)) {
5972 continue;
5973 }
5974 // The real external string is already in one of these vectors and was or
5975 // will be processed. Re-processing it will add a duplicate to the vector.
5976 if (o.IsThinString()) continue;
5977 DCHECK(o.IsExternalString());
5978 DCHECK(!InYoungGeneration(o));
5979 old_strings_[last++] = o;
5980 }
5981 old_strings_.resize(last);
5982 #ifdef VERIFY_HEAP
5983 if (FLAG_verify_heap) {
5984 Verify();
5985 }
5986 #endif
5987 }
5988
TearDown()5989 void Heap::ExternalStringTable::TearDown() {
5990 for (size_t i = 0; i < young_strings_.size(); ++i) {
5991 Object o = young_strings_[i];
5992 // Dont finalize thin strings.
5993 if (o.IsThinString()) continue;
5994 heap_->FinalizeExternalString(ExternalString::cast(o));
5995 }
5996 young_strings_.clear();
5997 for (size_t i = 0; i < old_strings_.size(); ++i) {
5998 Object o = old_strings_[i];
5999 // Dont finalize thin strings.
6000 if (o.IsThinString()) continue;
6001 heap_->FinalizeExternalString(ExternalString::cast(o));
6002 }
6003 old_strings_.clear();
6004 }
6005
6006
RememberUnmappedPage(Address page,bool compacted)6007 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6008 // Tag the page pointer to make it findable in the dump file.
6009 if (compacted) {
6010 page ^= 0xC1EAD & (Page::kPageSize - 1); // Cleared.
6011 } else {
6012 page ^= 0x1D1ED & (Page::kPageSize - 1); // I died.
6013 }
6014 remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
6015 remembered_unmapped_pages_index_++;
6016 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6017 }
6018
YoungArrayBufferBytes()6019 size_t Heap::YoungArrayBufferBytes() {
6020 return array_buffer_sweeper()->YoungBytes();
6021 }
6022
OldArrayBufferBytes()6023 size_t Heap::OldArrayBufferBytes() {
6024 return array_buffer_sweeper()->OldBytes();
6025 }
6026
RegisterStrongRoots(FullObjectSlot start,FullObjectSlot end)6027 StrongRootsEntry* Heap::RegisterStrongRoots(FullObjectSlot start,
6028 FullObjectSlot end) {
6029 base::MutexGuard guard(&strong_roots_mutex_);
6030
6031 StrongRootsEntry* entry = new StrongRootsEntry();
6032 entry->start = start;
6033 entry->end = end;
6034 entry->prev = nullptr;
6035 entry->next = strong_roots_head_;
6036
6037 if (strong_roots_head_) {
6038 DCHECK_NULL(strong_roots_head_->prev);
6039 strong_roots_head_->prev = entry;
6040 }
6041 strong_roots_head_ = entry;
6042
6043 return entry;
6044 }
6045
UpdateStrongRoots(StrongRootsEntry * entry,FullObjectSlot start,FullObjectSlot end)6046 void Heap::UpdateStrongRoots(StrongRootsEntry* entry, FullObjectSlot start,
6047 FullObjectSlot end) {
6048 entry->start = start;
6049 entry->end = end;
6050 }
6051
UnregisterStrongRoots(StrongRootsEntry * entry)6052 void Heap::UnregisterStrongRoots(StrongRootsEntry* entry) {
6053 base::MutexGuard guard(&strong_roots_mutex_);
6054
6055 StrongRootsEntry* prev = entry->prev;
6056 StrongRootsEntry* next = entry->next;
6057
6058 if (prev) prev->next = next;
6059 if (next) next->prev = prev;
6060
6061 if (strong_roots_head_ == entry) {
6062 DCHECK_NULL(prev);
6063 strong_roots_head_ = next;
6064 }
6065
6066 delete entry;
6067 }
6068
SetBuiltinsConstantsTable(FixedArray cache)6069 void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
6070 set_builtins_constants_table(cache);
6071 }
6072
SetDetachedContexts(WeakArrayList detached_contexts)6073 void Heap::SetDetachedContexts(WeakArrayList detached_contexts) {
6074 set_detached_contexts(detached_contexts);
6075 }
6076
SetInterpreterEntryTrampolineForProfiling(Code code)6077 void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
6078 DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code.builtin_index());
6079 set_interpreter_entry_trampoline_for_profiling(code);
6080 }
6081
PostFinalizationRegistryCleanupTaskIfNeeded()6082 void Heap::PostFinalizationRegistryCleanupTaskIfNeeded() {
6083 // Only one cleanup task is posted at a time.
6084 if (!HasDirtyJSFinalizationRegistries() ||
6085 is_finalization_registry_cleanup_task_posted_) {
6086 return;
6087 }
6088 auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
6089 reinterpret_cast<v8::Isolate*>(isolate()));
6090 auto task = std::make_unique<FinalizationRegistryCleanupTask>(this);
6091 taskrunner->PostNonNestableTask(std::move(task));
6092 is_finalization_registry_cleanup_task_posted_ = true;
6093 }
6094
EnqueueDirtyJSFinalizationRegistry(JSFinalizationRegistry finalization_registry,std::function<void (HeapObject object,ObjectSlot slot,Object target)> gc_notify_updated_slot)6095 void Heap::EnqueueDirtyJSFinalizationRegistry(
6096 JSFinalizationRegistry finalization_registry,
6097 std::function<void(HeapObject object, ObjectSlot slot, Object target)>
6098 gc_notify_updated_slot) {
6099 // Add a FinalizationRegistry to the tail of the dirty list.
6100 DCHECK(!HasDirtyJSFinalizationRegistries() ||
6101 dirty_js_finalization_registries_list().IsJSFinalizationRegistry());
6102 DCHECK(finalization_registry.next_dirty().IsUndefined(isolate()));
6103 DCHECK(!finalization_registry.scheduled_for_cleanup());
6104 finalization_registry.set_scheduled_for_cleanup(true);
6105 if (dirty_js_finalization_registries_list_tail().IsUndefined(isolate())) {
6106 DCHECK(dirty_js_finalization_registries_list().IsUndefined(isolate()));
6107 set_dirty_js_finalization_registries_list(finalization_registry);
6108 // dirty_js_finalization_registries_list_ is rescanned by
6109 // ProcessWeakListRoots.
6110 } else {
6111 JSFinalizationRegistry tail = JSFinalizationRegistry::cast(
6112 dirty_js_finalization_registries_list_tail());
6113 tail.set_next_dirty(finalization_registry);
6114 gc_notify_updated_slot(
6115 tail, tail.RawField(JSFinalizationRegistry::kNextDirtyOffset),
6116 finalization_registry);
6117 }
6118 set_dirty_js_finalization_registries_list_tail(finalization_registry);
6119 // dirty_js_finalization_registries_list_tail_ is rescanned by
6120 // ProcessWeakListRoots.
6121 }
6122
DequeueDirtyJSFinalizationRegistry()6123 MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
6124 // Take a FinalizationRegistry from the head of the dirty list for fairness.
6125 if (HasDirtyJSFinalizationRegistries()) {
6126 Handle<JSFinalizationRegistry> head(
6127 JSFinalizationRegistry::cast(dirty_js_finalization_registries_list()),
6128 isolate());
6129 set_dirty_js_finalization_registries_list(head->next_dirty());
6130 head->set_next_dirty(ReadOnlyRoots(this).undefined_value());
6131 if (*head == dirty_js_finalization_registries_list_tail()) {
6132 set_dirty_js_finalization_registries_list_tail(
6133 ReadOnlyRoots(this).undefined_value());
6134 }
6135 return head;
6136 }
6137 return {};
6138 }
6139
RemoveDirtyFinalizationRegistriesOnContext(NativeContext context)6140 void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
6141 if (!FLAG_harmony_weak_refs) return;
6142
6143 DisallowHeapAllocation no_gc;
6144
6145 Isolate* isolate = this->isolate();
6146 Object prev = ReadOnlyRoots(isolate).undefined_value();
6147 Object current = dirty_js_finalization_registries_list();
6148 while (!current.IsUndefined(isolate)) {
6149 JSFinalizationRegistry finalization_registry =
6150 JSFinalizationRegistry::cast(current);
6151 if (finalization_registry.native_context() == context) {
6152 if (prev.IsUndefined(isolate)) {
6153 set_dirty_js_finalization_registries_list(
6154 finalization_registry.next_dirty());
6155 } else {
6156 JSFinalizationRegistry::cast(prev).set_next_dirty(
6157 finalization_registry.next_dirty());
6158 }
6159 finalization_registry.set_scheduled_for_cleanup(false);
6160 current = finalization_registry.next_dirty();
6161 finalization_registry.set_next_dirty(
6162 ReadOnlyRoots(isolate).undefined_value());
6163 } else {
6164 prev = current;
6165 current = finalization_registry.next_dirty();
6166 }
6167 }
6168 set_dirty_js_finalization_registries_list_tail(prev);
6169 }
6170
KeepDuringJob(Handle<JSReceiver> target)6171 void Heap::KeepDuringJob(Handle<JSReceiver> target) {
6172 DCHECK(FLAG_harmony_weak_refs);
6173 DCHECK(weak_refs_keep_during_job().IsUndefined() ||
6174 weak_refs_keep_during_job().IsOrderedHashSet());
6175 Handle<OrderedHashSet> table;
6176 if (weak_refs_keep_during_job().IsUndefined(isolate())) {
6177 table = isolate()->factory()->NewOrderedHashSet();
6178 } else {
6179 table =
6180 handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
6181 }
6182 table = OrderedHashSet::Add(isolate(), table, target).ToHandleChecked();
6183 set_weak_refs_keep_during_job(*table);
6184 }
6185
ClearKeptObjects()6186 void Heap::ClearKeptObjects() {
6187 set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
6188 }
6189
NumberOfTrackedHeapObjectTypes()6190 size_t Heap::NumberOfTrackedHeapObjectTypes() {
6191 return ObjectStats::OBJECT_STATS_COUNT;
6192 }
6193
6194
ObjectCountAtLastGC(size_t index)6195 size_t Heap::ObjectCountAtLastGC(size_t index) {
6196 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6197 return 0;
6198 return live_object_stats_->object_count_last_gc(index);
6199 }
6200
6201
ObjectSizeAtLastGC(size_t index)6202 size_t Heap::ObjectSizeAtLastGC(size_t index) {
6203 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6204 return 0;
6205 return live_object_stats_->object_size_last_gc(index);
6206 }
6207
6208
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)6209 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6210 const char** object_sub_type) {
6211 if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6212
6213 switch (static_cast<int>(index)) {
6214 #define COMPARE_AND_RETURN_NAME(name) \
6215 case name: \
6216 *object_type = #name; \
6217 *object_sub_type = ""; \
6218 return true;
6219 INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6220 #undef COMPARE_AND_RETURN_NAME
6221
6222 #define COMPARE_AND_RETURN_NAME(name) \
6223 case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
6224 *object_type = #name; \
6225 *object_sub_type = ""; \
6226 return true;
6227 VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6228 #undef COMPARE_AND_RETURN_NAME
6229 }
6230 return false;
6231 }
6232
NumberOfNativeContexts()6233 size_t Heap::NumberOfNativeContexts() {
6234 int result = 0;
6235 Object context = native_contexts_list();
6236 while (!context.IsUndefined(isolate())) {
6237 ++result;
6238 Context native_context = Context::cast(context);
6239 context = native_context.next_context_link();
6240 }
6241 return result;
6242 }
6243
FindAllNativeContexts()6244 std::vector<Handle<NativeContext>> Heap::FindAllNativeContexts() {
6245 std::vector<Handle<NativeContext>> result;
6246 Object context = native_contexts_list();
6247 while (!context.IsUndefined(isolate())) {
6248 NativeContext native_context = NativeContext::cast(context);
6249 result.push_back(handle(native_context, isolate()));
6250 context = native_context.next_context_link();
6251 }
6252 return result;
6253 }
6254
FindAllRetainedMaps()6255 std::vector<WeakArrayList> Heap::FindAllRetainedMaps() {
6256 std::vector<WeakArrayList> result;
6257 Object context = native_contexts_list();
6258 while (!context.IsUndefined(isolate())) {
6259 NativeContext native_context = NativeContext::cast(context);
6260 result.push_back(native_context.retained_maps());
6261 context = native_context.next_context_link();
6262 }
6263 return result;
6264 }
6265
NumberOfDetachedContexts()6266 size_t Heap::NumberOfDetachedContexts() {
6267 // The detached_contexts() array has two entries per detached context.
6268 return detached_contexts().length() / 2;
6269 }
6270
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)6271 void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
6272 ObjectSlot end) {
6273 VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
6274 }
6275
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)6276 void VerifyPointersVisitor::VisitPointers(HeapObject host,
6277 MaybeObjectSlot start,
6278 MaybeObjectSlot end) {
6279 VerifyPointers(host, start, end);
6280 }
6281
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6282 void VerifyPointersVisitor::VisitRootPointers(Root root,
6283 const char* description,
6284 FullObjectSlot start,
6285 FullObjectSlot end) {
6286 VerifyPointersImpl(start, end);
6287 }
6288
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)6289 void VerifyPointersVisitor::VisitRootPointers(Root root,
6290 const char* description,
6291 OffHeapObjectSlot start,
6292 OffHeapObjectSlot end) {
6293 VerifyPointersImpl(start, end);
6294 }
6295
VerifyHeapObjectImpl(HeapObject heap_object)6296 void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
6297 CHECK(IsValidHeapObject(heap_, heap_object));
6298 CHECK(heap_object.map().IsMap());
6299 }
6300
6301 template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)6302 void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
6303 Isolate* isolate = heap_->isolate();
6304 for (TSlot slot = start; slot < end; ++slot) {
6305 typename TSlot::TObject object = slot.load(isolate);
6306 HeapObject heap_object;
6307 if (object.GetHeapObject(&heap_object)) {
6308 VerifyHeapObjectImpl(heap_object);
6309 } else {
6310 CHECK(object.IsSmi() || object.IsCleared());
6311 }
6312 }
6313 }
6314
VerifyPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)6315 void VerifyPointersVisitor::VerifyPointers(HeapObject host,
6316 MaybeObjectSlot start,
6317 MaybeObjectSlot end) {
6318 // If this DCHECK fires then you probably added a pointer field
6319 // to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
6320 // this by moving that object to POINTER_VISITOR_ID_LIST.
6321 DCHECK_EQ(ObjectFields::kMaybePointers,
6322 Map::ObjectFieldsFrom(host.map().visitor_id()));
6323 VerifyPointersImpl(start, end);
6324 }
6325
VisitCodeTarget(Code host,RelocInfo * rinfo)6326 void VerifyPointersVisitor::VisitCodeTarget(Code host, RelocInfo* rinfo) {
6327 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
6328 VerifyHeapObjectImpl(target);
6329 }
6330
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)6331 void VerifyPointersVisitor::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
6332 VerifyHeapObjectImpl(rinfo->target_object());
6333 }
6334
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)6335 void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
6336 FullObjectSlot start,
6337 FullObjectSlot end) {
6338 for (FullObjectSlot current = start; current < end; ++current) {
6339 CHECK((*current).IsSmi());
6340 }
6341 }
6342
AllowedToBeMigrated(Map map,HeapObject obj,AllocationSpace dst)6343 bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
6344 // Object migration is governed by the following rules:
6345 //
6346 // 1) Objects in new-space can be migrated to the old space
6347 // that matches their target space or they stay in new-space.
6348 // 2) Objects in old-space stay in the same space when migrating.
6349 // 3) Fillers (two or more words) can migrate due to left-trimming of
6350 // fixed arrays in new-space or old space.
6351 // 4) Fillers (one word) can never migrate, they are skipped by
6352 // incremental marking explicitly to prevent invalid pattern.
6353 //
6354 // Since this function is used for debugging only, we do not place
6355 // asserts here, but check everything explicitly.
6356 if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
6357 InstanceType type = map.instance_type();
6358 MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
6359 AllocationSpace src = chunk->owner_identity();
6360 switch (src) {
6361 case NEW_SPACE:
6362 return dst == NEW_SPACE || dst == OLD_SPACE;
6363 case OLD_SPACE:
6364 return dst == OLD_SPACE;
6365 case CODE_SPACE:
6366 return dst == CODE_SPACE && type == CODE_TYPE;
6367 case MAP_SPACE:
6368 case LO_SPACE:
6369 case CODE_LO_SPACE:
6370 case NEW_LO_SPACE:
6371 case RO_SPACE:
6372 return false;
6373 }
6374 UNREACHABLE();
6375 }
6376
EmbedderAllocationCounter() const6377 size_t Heap::EmbedderAllocationCounter() const {
6378 return local_embedder_heap_tracer()
6379 ? local_embedder_heap_tracer()->allocated_size()
6380 : 0;
6381 }
6382
CreateObjectStats()6383 void Heap::CreateObjectStats() {
6384 if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
6385 if (!live_object_stats_) {
6386 live_object_stats_.reset(new ObjectStats(this));
6387 }
6388 if (!dead_object_stats_) {
6389 dead_object_stats_.reset(new ObjectStats(this));
6390 }
6391 }
6392
GcSafeMapOfCodeSpaceObject(HeapObject object)6393 Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
6394 MapWord map_word = object.map_word();
6395 return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress().map()
6396 : map_word.ToMap();
6397 }
6398
GcSafeCastToCode(HeapObject object,Address inner_pointer)6399 Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
6400 Code code = Code::unchecked_cast(object);
6401 DCHECK(!code.is_null());
6402 DCHECK(GcSafeCodeContains(code, inner_pointer));
6403 return code;
6404 }
6405
GcSafeCodeContains(Code code,Address addr)6406 bool Heap::GcSafeCodeContains(Code code, Address addr) {
6407 Map map = GcSafeMapOfCodeSpaceObject(code);
6408 DCHECK(map == ReadOnlyRoots(this).code_map());
6409 if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
6410 Address start = code.address();
6411 Address end = code.address() + code.SizeFromMap(map);
6412 return start <= addr && addr < end;
6413 }
6414
GcSafeFindCodeForInnerPointer(Address inner_pointer)6415 Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
6416 Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
6417 if (!code.is_null()) return code;
6418
6419 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
6420 Address start = tp_heap_->GetObjectFromInnerPointer(inner_pointer);
6421 return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
6422 }
6423
6424 // Check if the inner pointer points into a large object chunk.
6425 LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
6426 if (large_page != nullptr) {
6427 return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
6428 }
6429
6430 if (V8_LIKELY(code_space()->Contains(inner_pointer))) {
6431 // Iterate through the page until we reach the end or find an object
6432 // starting after the inner pointer.
6433 Page* page = Page::FromAddress(inner_pointer);
6434
6435 Address start =
6436 page->GetCodeObjectRegistry()->GetCodeObjectStartFromInnerAddress(
6437 inner_pointer);
6438 return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
6439 }
6440
6441 // It can only fall through to here during debugging, where for instance "jco"
6442 // was called on an address within a RO_SPACE builtin. It cannot reach here
6443 // during stack iteration as RO_SPACE memory is not executable so cannot
6444 // appear on the stack as an instruction address.
6445 DCHECK(ReadOnlyHeap::Contains(
6446 HeapObject::FromAddress(inner_pointer & ~kHeapObjectTagMask)));
6447
6448 // TODO(delphick): Possibly optimize this as it iterates over all pages in
6449 // RO_SPACE instead of just the one containing the address.
6450 ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
6451 for (HeapObject object = iterator.Next(); !object.is_null();
6452 object = iterator.Next()) {
6453 if (!object.IsCode()) continue;
6454 Code code = Code::cast(object);
6455 if (inner_pointer >= code.address() &&
6456 inner_pointer < code.address() + code.Size()) {
6457 return code;
6458 }
6459 }
6460 UNREACHABLE();
6461 }
6462
WriteBarrierForCodeSlow(Code code)6463 void Heap::WriteBarrierForCodeSlow(Code code) {
6464 for (RelocIterator it(code, RelocInfo::EmbeddedObjectModeMask()); !it.done();
6465 it.next()) {
6466 GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
6467 WriteBarrier::Marking(code, it.rinfo(), it.rinfo()->target_object());
6468 }
6469 }
6470
GenerationalBarrierSlow(HeapObject object,Address slot,HeapObject value)6471 void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
6472 HeapObject value) {
6473 MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
6474 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
6475 }
6476
RecordEphemeronKeyWrite(EphemeronHashTable table,Address slot)6477 void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
6478 DCHECK(ObjectInYoungGeneration(HeapObjectSlot(slot).ToHeapObject()));
6479 int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
6480 InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
6481 auto it =
6482 ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
6483 it.first->second.insert(entry.as_int());
6484 }
6485
EphemeronKeyWriteBarrierFromCode(Address raw_object,Address key_slot_address,Isolate * isolate)6486 void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
6487 Address key_slot_address,
6488 Isolate* isolate) {
6489 EphemeronHashTable table = EphemeronHashTable::cast(Object(raw_object));
6490 MaybeObjectSlot key_slot(key_slot_address);
6491 MaybeObject maybe_key = *key_slot;
6492 HeapObject key;
6493 if (!maybe_key.GetHeapObject(&key)) return;
6494 if (!ObjectInYoungGeneration(table) && ObjectInYoungGeneration(key)) {
6495 isolate->heap()->RecordEphemeronKeyWrite(table, key_slot_address);
6496 }
6497 WriteBarrier::Marking(table, key_slot, maybe_key);
6498 }
6499
6500 enum RangeWriteBarrierMode {
6501 kDoGenerational = 1 << 0,
6502 kDoMarking = 1 << 1,
6503 kDoEvacuationSlotRecording = 1 << 2,
6504 };
6505
6506 template <int kModeMask, typename TSlot>
WriteBarrierForRangeImpl(MemoryChunk * source_page,HeapObject object,TSlot start_slot,TSlot end_slot)6507 void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
6508 TSlot start_slot, TSlot end_slot) {
6509 // At least one of generational or marking write barrier should be requested.
6510 STATIC_ASSERT(kModeMask & (kDoGenerational | kDoMarking));
6511 // kDoEvacuationSlotRecording implies kDoMarking.
6512 STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
6513 (kModeMask & kDoMarking));
6514
6515 MarkingBarrier* marking_barrier = this->marking_barrier();
6516 MarkCompactCollector* collector = this->mark_compact_collector();
6517
6518 for (TSlot slot = start_slot; slot < end_slot; ++slot) {
6519 typename TSlot::TObject value = *slot;
6520 HeapObject value_heap_object;
6521 if (!value.GetHeapObject(&value_heap_object)) continue;
6522
6523 if ((kModeMask & kDoGenerational) &&
6524 Heap::InYoungGeneration(value_heap_object)) {
6525 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(source_page,
6526 slot.address());
6527 }
6528
6529 if ((kModeMask & kDoMarking) &&
6530 marking_barrier->MarkValue(object, value_heap_object)) {
6531 if (kModeMask & kDoEvacuationSlotRecording) {
6532 collector->RecordSlot(source_page, HeapObjectSlot(slot),
6533 value_heap_object);
6534 }
6535 }
6536 }
6537 }
6538
6539 // Instantiate Heap::WriteBarrierForRange() for ObjectSlot and MaybeObjectSlot.
6540 template void Heap::WriteBarrierForRange<ObjectSlot>(HeapObject object,
6541 ObjectSlot start_slot,
6542 ObjectSlot end_slot);
6543 template void Heap::WriteBarrierForRange<MaybeObjectSlot>(
6544 HeapObject object, MaybeObjectSlot start_slot, MaybeObjectSlot end_slot);
6545
6546 template <typename TSlot>
WriteBarrierForRange(HeapObject object,TSlot start_slot,TSlot end_slot)6547 void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
6548 TSlot end_slot) {
6549 MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
6550 base::Flags<RangeWriteBarrierMode> mode;
6551
6552 if (!source_page->InYoungGeneration()) {
6553 mode |= kDoGenerational;
6554 }
6555
6556 if (incremental_marking()->IsMarking()) {
6557 mode |= kDoMarking;
6558 if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
6559 mode |= kDoEvacuationSlotRecording;
6560 }
6561 }
6562
6563 switch (mode) {
6564 // Nothing to be done.
6565 case 0:
6566 return;
6567
6568 // Generational only.
6569 case kDoGenerational:
6570 return WriteBarrierForRangeImpl<kDoGenerational>(source_page, object,
6571 start_slot, end_slot);
6572 // Marking, no evacuation slot recording.
6573 case kDoMarking:
6574 return WriteBarrierForRangeImpl<kDoMarking>(source_page, object,
6575 start_slot, end_slot);
6576 // Marking with evacuation slot recording.
6577 case kDoMarking | kDoEvacuationSlotRecording:
6578 return WriteBarrierForRangeImpl<kDoMarking | kDoEvacuationSlotRecording>(
6579 source_page, object, start_slot, end_slot);
6580
6581 // Generational and marking, no evacuation slot recording.
6582 case kDoGenerational | kDoMarking:
6583 return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking>(
6584 source_page, object, start_slot, end_slot);
6585
6586 // Generational and marking with evacuation slot recording.
6587 case kDoGenerational | kDoMarking | kDoEvacuationSlotRecording:
6588 return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking |
6589 kDoEvacuationSlotRecording>(
6590 source_page, object, start_slot, end_slot);
6591
6592 default:
6593 UNREACHABLE();
6594 }
6595 }
6596
GenerationalBarrierForCodeSlow(Code host,RelocInfo * rinfo,HeapObject object)6597 void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
6598 HeapObject object) {
6599 DCHECK(InYoungGeneration(object));
6600 Page* source_page = Page::FromHeapObject(host);
6601 RelocInfo::Mode rmode = rinfo->rmode();
6602 Address addr = rinfo->pc();
6603 SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
6604 if (rinfo->IsInConstantPool()) {
6605 addr = rinfo->constant_pool_entry_address();
6606 if (RelocInfo::IsCodeTargetMode(rmode)) {
6607 slot_type = CODE_ENTRY_SLOT;
6608 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
6609 slot_type = COMPRESSED_OBJECT_SLOT;
6610 } else {
6611 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
6612 slot_type = FULL_OBJECT_SLOT;
6613 }
6614 }
6615 uintptr_t offset = addr - source_page->address();
6616 DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
6617 RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
6618 static_cast<uint32_t>(offset));
6619 }
6620
PageFlagsAreConsistent(HeapObject object)6621 bool Heap::PageFlagsAreConsistent(HeapObject object) {
6622 if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
6623 return true;
6624 }
6625 BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
6626 heap_internals::MemoryChunk* slim_chunk =
6627 heap_internals::MemoryChunk::FromHeapObject(object);
6628
6629 // Slim chunk flags consistency.
6630 CHECK_EQ(chunk->InYoungGeneration(), slim_chunk->InYoungGeneration());
6631 CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
6632 slim_chunk->IsMarking());
6633
6634 AllocationSpace identity = chunk->owner()->identity();
6635
6636 // Generation consistency.
6637 CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
6638 slim_chunk->InYoungGeneration());
6639 // Read-only consistency.
6640 CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
6641
6642 // Marking consistency.
6643 if (chunk->IsWritable()) {
6644 // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
6645 // find a heap. The exception is when the ReadOnlySpace is writeable, during
6646 // bootstrapping, so explicitly allow this case.
6647 Heap* heap = Heap::FromWritableHeapObject(object);
6648 CHECK_EQ(slim_chunk->IsMarking(), heap->incremental_marking()->IsMarking());
6649 } else {
6650 // Non-writable RO_SPACE must never have marking flag set.
6651 CHECK(!slim_chunk->IsMarking());
6652 }
6653 return true;
6654 }
6655
SetEmbedderStackStateForNextFinalization(EmbedderHeapTracer::EmbedderStackState stack_state)6656 void Heap::SetEmbedderStackStateForNextFinalization(
6657 EmbedderHeapTracer::EmbedderStackState stack_state) {
6658 local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
6659 stack_state);
6660 }
6661
6662 #ifdef DEBUG
IncrementObjectCounters()6663 void Heap::IncrementObjectCounters() {
6664 isolate_->counters()->objs_since_last_full()->Increment();
6665 isolate_->counters()->objs_since_last_young()->Increment();
6666 }
6667 #endif // DEBUG
6668
6669 // StrongRootBlocks are allocated as a block of addresses, prefixed with a
6670 // StrongRootsEntry pointer:
6671 //
6672 // | StrongRootsEntry*
6673 // | Address 1
6674 // | ...
6675 // | Address N
6676 //
6677 // The allocate method registers the range "Address 1" to "Address N" with the
6678 // heap as a strong root array, saves that entry in StrongRootsEntry*, and
6679 // returns a pointer to Address 1.
allocate(size_t n)6680 Address* StrongRootBlockAllocator::allocate(size_t n) {
6681 void* block = malloc(sizeof(StrongRootsEntry*) + n * sizeof(Address));
6682
6683 StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
6684 Address* ret = reinterpret_cast<Address*>(reinterpret_cast<char*>(block) +
6685 sizeof(StrongRootsEntry*));
6686
6687 memset(ret, kNullAddress, n * sizeof(Address));
6688 *header =
6689 heap_->RegisterStrongRoots(FullObjectSlot(ret), FullObjectSlot(ret + n));
6690
6691 return ret;
6692 }
6693
deallocate(Address * p,size_t n)6694 void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
6695 // The allocate method returns a pointer to Address 1, so the deallocate
6696 // method has to offset that pointer back by sizeof(StrongRootsEntry*).
6697 void* block = reinterpret_cast<char*>(p) - sizeof(StrongRootsEntry*);
6698 StrongRootsEntry** header = reinterpret_cast<StrongRootsEntry**>(block);
6699
6700 heap_->UnregisterStrongRoots(*header);
6701
6702 free(block);
6703 }
6704
6705 } // namespace internal
6706 } // namespace v8
6707