1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/heap.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/assembler-inl.h"
10 #include "src/ast/context-slot-cache.h"
11 #include "src/base/bits.h"
12 #include "src/base/once.h"
13 #include "src/base/utils/random-number-generator.h"
14 #include "src/bootstrapper.h"
15 #include "src/codegen.h"
16 #include "src/compilation-cache.h"
17 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
18 #include "src/conversions.h"
19 #include "src/debug/debug.h"
20 #include "src/deoptimizer.h"
21 #include "src/feedback-vector.h"
22 #include "src/global-handles.h"
23 #include "src/heap/array-buffer-tracker-inl.h"
24 #include "src/heap/code-stats.h"
25 #include "src/heap/embedder-tracing.h"
26 #include "src/heap/gc-idle-time-handler.h"
27 #include "src/heap/gc-tracer.h"
28 #include "src/heap/incremental-marking.h"
29 #include "src/heap/mark-compact-inl.h"
30 #include "src/heap/mark-compact.h"
31 #include "src/heap/memory-reducer.h"
32 #include "src/heap/object-stats.h"
33 #include "src/heap/objects-visiting-inl.h"
34 #include "src/heap/objects-visiting.h"
35 #include "src/heap/remembered-set.h"
36 #include "src/heap/scavenge-job.h"
37 #include "src/heap/scavenger-inl.h"
38 #include "src/heap/store-buffer.h"
39 #include "src/interpreter/interpreter.h"
40 #include "src/regexp/jsregexp.h"
41 #include "src/runtime-profiler.h"
42 #include "src/snapshot/natives.h"
43 #include "src/snapshot/serializer-common.h"
44 #include "src/snapshot/snapshot.h"
45 #include "src/tracing/trace-event.h"
46 #include "src/utils.h"
47 #include "src/v8.h"
48 #include "src/v8threads.h"
49 #include "src/vm-state-inl.h"
50
51 namespace v8 {
52 namespace internal {
53
54
55 struct Heap::StrongRootsList {
56 Object** start;
57 Object** end;
58 StrongRootsList* next;
59 };
60
61 class IdleScavengeObserver : public AllocationObserver {
62 public:
IdleScavengeObserver(Heap & heap,intptr_t step_size)63 IdleScavengeObserver(Heap& heap, intptr_t step_size)
64 : AllocationObserver(step_size), heap_(heap) {}
65
Step(int bytes_allocated,Address,size_t)66 void Step(int bytes_allocated, Address, size_t) override {
67 heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
68 }
69
70 private:
71 Heap& heap_;
72 };
73
Heap()74 Heap::Heap()
75 : external_memory_(0),
76 external_memory_limit_(kExternalAllocationSoftLimit),
77 external_memory_at_last_mark_compact_(0),
78 isolate_(nullptr),
79 code_range_size_(0),
80 // semispace_size_ should be a power of 2 and old_generation_size_ should
81 // be a multiple of Page::kPageSize.
82 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
83 initial_semispace_size_(MB),
84 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
85 initial_max_old_generation_size_(max_old_generation_size_),
86 initial_old_generation_size_(max_old_generation_size_ /
87 kInitalOldGenerationLimitFactor),
88 old_generation_size_configured_(false),
89 max_executable_size_(256ul * (kPointerSize / 4) * MB),
90 // Variables set based on semispace_size_ and old_generation_size_ in
91 // ConfigureHeap.
92 // Will be 4 * reserved_semispace_size_ to ensure that young
93 // generation can be aligned to its size.
94 maximum_committed_(0),
95 survived_since_last_expansion_(0),
96 survived_last_scavenge_(0),
97 always_allocate_scope_count_(0),
98 memory_pressure_level_(MemoryPressureLevel::kNone),
99 out_of_memory_callback_(nullptr),
100 out_of_memory_callback_data_(nullptr),
101 contexts_disposed_(0),
102 number_of_disposed_maps_(0),
103 global_ic_age_(0),
104 new_space_(nullptr),
105 old_space_(NULL),
106 code_space_(NULL),
107 map_space_(NULL),
108 lo_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 gc_post_processing_depth_(0),
111 allocations_count_(0),
112 raw_allocations_hash_(0),
113 ms_count_(0),
114 gc_count_(0),
115 remembered_unmapped_pages_index_(0),
116 #ifdef DEBUG
117 allocation_timeout_(0),
118 #endif // DEBUG
119 old_generation_allocation_limit_(initial_old_generation_size_),
120 inline_allocation_disabled_(false),
121 tracer_(nullptr),
122 promoted_objects_size_(0),
123 promotion_ratio_(0),
124 semi_space_copied_object_size_(0),
125 previous_semi_space_copied_object_size_(0),
126 semi_space_copied_rate_(0),
127 nodes_died_in_new_space_(0),
128 nodes_copied_in_new_space_(0),
129 nodes_promoted_(0),
130 maximum_size_scavenges_(0),
131 last_idle_notification_time_(0.0),
132 last_gc_time_(0.0),
133 scavenge_collector_(nullptr),
134 mark_compact_collector_(nullptr),
135 memory_allocator_(nullptr),
136 store_buffer_(nullptr),
137 incremental_marking_(nullptr),
138 gc_idle_time_handler_(nullptr),
139 memory_reducer_(nullptr),
140 live_object_stats_(nullptr),
141 dead_object_stats_(nullptr),
142 scavenge_job_(nullptr),
143 idle_scavenge_observer_(nullptr),
144 new_space_allocation_counter_(0),
145 old_generation_allocation_counter_at_last_gc_(0),
146 old_generation_size_at_last_gc_(0),
147 gcs_since_last_deopt_(0),
148 global_pretenuring_feedback_(nullptr),
149 ring_buffer_full_(false),
150 ring_buffer_end_(0),
151 promotion_queue_(this),
152 configured_(false),
153 current_gc_flags_(Heap::kNoGCFlags),
154 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
155 external_string_table_(this),
156 gc_callbacks_depth_(0),
157 deserialization_complete_(false),
158 strong_roots_list_(NULL),
159 heap_iterator_depth_(0),
160 local_embedder_heap_tracer_(nullptr),
161 fast_promotion_mode_(false),
162 force_oom_(false),
163 delay_sweeper_tasks_for_testing_(false),
164 pending_layout_change_object_(nullptr) {
165 // Allow build-time customization of the max semispace size. Building
166 // V8 with snapshots and a non-default max semispace size is much
167 // easier if you can define it as part of the build environment.
168 #if defined(V8_MAX_SEMISPACE_SIZE)
169 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
170 #endif
171
172 // Ensure old_generation_size_ is a multiple of kPageSize.
173 DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
174
175 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
176 set_native_contexts_list(NULL);
177 set_allocation_sites_list(Smi::kZero);
178 set_encountered_weak_collections(Smi::kZero);
179 set_encountered_weak_cells(Smi::kZero);
180 set_encountered_transition_arrays(Smi::kZero);
181 // Put a dummy entry in the remembered pages so we can find the list the
182 // minidump even if there are no real unmapped pages.
183 RememberUnmappedPage(NULL, false);
184 }
185
Capacity()186 size_t Heap::Capacity() {
187 if (!HasBeenSetUp()) return 0;
188
189 return new_space_->Capacity() + OldGenerationCapacity();
190 }
191
OldGenerationCapacity()192 size_t Heap::OldGenerationCapacity() {
193 if (!HasBeenSetUp()) return 0;
194
195 return old_space_->Capacity() + code_space_->Capacity() +
196 map_space_->Capacity() + lo_space_->SizeOfObjects();
197 }
198
CommittedOldGenerationMemory()199 size_t Heap::CommittedOldGenerationMemory() {
200 if (!HasBeenSetUp()) return 0;
201
202 return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
203 map_space_->CommittedMemory() + lo_space_->Size();
204 }
205
CommittedMemory()206 size_t Heap::CommittedMemory() {
207 if (!HasBeenSetUp()) return 0;
208
209 return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
210 }
211
212
CommittedPhysicalMemory()213 size_t Heap::CommittedPhysicalMemory() {
214 if (!HasBeenSetUp()) return 0;
215
216 return new_space_->CommittedPhysicalMemory() +
217 old_space_->CommittedPhysicalMemory() +
218 code_space_->CommittedPhysicalMemory() +
219 map_space_->CommittedPhysicalMemory() +
220 lo_space_->CommittedPhysicalMemory();
221 }
222
CommittedMemoryExecutable()223 size_t Heap::CommittedMemoryExecutable() {
224 if (!HasBeenSetUp()) return 0;
225
226 return static_cast<size_t>(memory_allocator()->SizeExecutable());
227 }
228
229
UpdateMaximumCommitted()230 void Heap::UpdateMaximumCommitted() {
231 if (!HasBeenSetUp()) return;
232
233 const size_t current_committed_memory = CommittedMemory();
234 if (current_committed_memory > maximum_committed_) {
235 maximum_committed_ = current_committed_memory;
236 }
237 }
238
Available()239 size_t Heap::Available() {
240 if (!HasBeenSetUp()) return 0;
241
242 size_t total = 0;
243 AllSpaces spaces(this);
244 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
245 total += space->Available();
246 }
247 return total;
248 }
249
250
HasBeenSetUp()251 bool Heap::HasBeenSetUp() {
252 return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
253 lo_space_ != NULL;
254 }
255
256
SelectGarbageCollector(AllocationSpace space,const char ** reason)257 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
258 const char** reason) {
259 // Is global GC requested?
260 if (space != NEW_SPACE) {
261 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
262 *reason = "GC in old space requested";
263 return MARK_COMPACTOR;
264 }
265
266 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
267 *reason = "GC in old space forced by flags";
268 return MARK_COMPACTOR;
269 }
270
271 if (incremental_marking()->NeedsFinalization() &&
272 AllocationLimitOvershotByLargeMargin()) {
273 *reason = "Incremental marking needs finalization";
274 return MARK_COMPACTOR;
275 }
276
277 // Is there enough space left in OLD to guarantee that a scavenge can
278 // succeed?
279 //
280 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
281 // for object promotion. It counts only the bytes that the memory
282 // allocator has not yet allocated from the OS and assigned to any space,
283 // and does not count available bytes already in the old space or code
284 // space. Undercounting is safe---we may get an unrequested full GC when
285 // a scavenge would have succeeded.
286 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
287 isolate_->counters()
288 ->gc_compactor_caused_by_oldspace_exhaustion()
289 ->Increment();
290 *reason = "scavenge might not succeed";
291 return MARK_COMPACTOR;
292 }
293
294 // Default
295 *reason = NULL;
296 return YoungGenerationCollector();
297 }
298
SetGCState(HeapState state)299 void Heap::SetGCState(HeapState state) {
300 gc_state_ = state;
301 }
302
303 // TODO(1238405): Combine the infrastructure for --heap-stats and
304 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsBeforeGC()305 void Heap::ReportStatisticsBeforeGC() {
306 // Heap::ReportHeapStatistics will also log NewSpace statistics when
307 // compiled --log-gc is set. The following logic is used to avoid
308 // double logging.
309 #ifdef DEBUG
310 if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
311 if (FLAG_heap_stats) {
312 ReportHeapStatistics("Before GC");
313 } else if (FLAG_log_gc) {
314 new_space_->ReportStatistics();
315 }
316 if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
317 #else
318 if (FLAG_log_gc) {
319 new_space_->CollectStatistics();
320 new_space_->ReportStatistics();
321 new_space_->ClearHistograms();
322 }
323 #endif // DEBUG
324 }
325
326
PrintShortHeapStatistics()327 void Heap::PrintShortHeapStatistics() {
328 if (!FLAG_trace_gc_verbose) return;
329 PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
330 " KB,"
331 " available: %6" PRIuS " KB\n",
332 memory_allocator()->Size() / KB,
333 memory_allocator()->Available() / KB);
334 PrintIsolate(isolate_, "New space, used: %6" PRIuS
335 " KB"
336 ", available: %6" PRIuS
337 " KB"
338 ", committed: %6" PRIuS " KB\n",
339 new_space_->Size() / KB, new_space_->Available() / KB,
340 new_space_->CommittedMemory() / KB);
341 PrintIsolate(isolate_, "Old space, used: %6" PRIuS
342 " KB"
343 ", available: %6" PRIuS
344 " KB"
345 ", committed: %6" PRIuS " KB\n",
346 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
347 old_space_->CommittedMemory() / KB);
348 PrintIsolate(isolate_, "Code space, used: %6" PRIuS
349 " KB"
350 ", available: %6" PRIuS
351 " KB"
352 ", committed: %6" PRIuS "KB\n",
353 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
354 code_space_->CommittedMemory() / KB);
355 PrintIsolate(isolate_, "Map space, used: %6" PRIuS
356 " KB"
357 ", available: %6" PRIuS
358 " KB"
359 ", committed: %6" PRIuS " KB\n",
360 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
361 map_space_->CommittedMemory() / KB);
362 PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
363 " KB"
364 ", available: %6" PRIuS
365 " KB"
366 ", committed: %6" PRIuS " KB\n",
367 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
368 lo_space_->CommittedMemory() / KB);
369 PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
370 " KB"
371 ", available: %6" PRIuS
372 " KB"
373 ", committed: %6" PRIuS "KB\n",
374 this->SizeOfObjects() / KB, this->Available() / KB,
375 this->CommittedMemory() / KB);
376 PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
377 external_memory_ / KB);
378 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
379 total_gc_time_ms_);
380 }
381
382 // TODO(1238405): Combine the infrastructure for --heap-stats and
383 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()384 void Heap::ReportStatisticsAfterGC() {
385 // Similar to the before GC, we use some complicated logic to ensure that
386 // NewSpace statistics are logged exactly once when --log-gc is turned on.
387 #if defined(DEBUG)
388 if (FLAG_heap_stats) {
389 new_space_->CollectStatistics();
390 ReportHeapStatistics("After GC");
391 } else if (FLAG_log_gc) {
392 new_space_->ReportStatistics();
393 }
394 #else
395 if (FLAG_log_gc) new_space_->ReportStatistics();
396 #endif // DEBUG
397 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
398 ++i) {
399 int count = deferred_counters_[i];
400 deferred_counters_[i] = 0;
401 while (count > 0) {
402 count--;
403 isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
404 }
405 }
406 }
407
408
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)409 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
410 deferred_counters_[feature]++;
411 }
412
UncommitFromSpace()413 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
414
GarbageCollectionPrologue()415 void Heap::GarbageCollectionPrologue() {
416 {
417 AllowHeapAllocation for_the_first_part_of_prologue;
418 gc_count_++;
419
420 #ifdef VERIFY_HEAP
421 if (FLAG_verify_heap) {
422 Verify();
423 }
424 #endif
425 }
426
427 // Reset GC statistics.
428 promoted_objects_size_ = 0;
429 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
430 semi_space_copied_object_size_ = 0;
431 nodes_died_in_new_space_ = 0;
432 nodes_copied_in_new_space_ = 0;
433 nodes_promoted_ = 0;
434
435 UpdateMaximumCommitted();
436
437 #ifdef DEBUG
438 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
439
440 if (FLAG_gc_verbose) Print();
441
442 ReportStatisticsBeforeGC();
443 #endif // DEBUG
444
445 if (new_space_->IsAtMaximumCapacity()) {
446 maximum_size_scavenges_++;
447 } else {
448 maximum_size_scavenges_ = 0;
449 }
450 CheckNewSpaceExpansionCriteria();
451 UpdateNewSpaceAllocationCounter();
452 }
453
SizeOfObjects()454 size_t Heap::SizeOfObjects() {
455 size_t total = 0;
456 AllSpaces spaces(this);
457 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
458 total += space->SizeOfObjects();
459 }
460 return total;
461 }
462
463
GetSpaceName(int idx)464 const char* Heap::GetSpaceName(int idx) {
465 switch (idx) {
466 case NEW_SPACE:
467 return "new_space";
468 case OLD_SPACE:
469 return "old_space";
470 case MAP_SPACE:
471 return "map_space";
472 case CODE_SPACE:
473 return "code_space";
474 case LO_SPACE:
475 return "large_object_space";
476 default:
477 UNREACHABLE();
478 }
479 return nullptr;
480 }
481
482
RepairFreeListsAfterDeserialization()483 void Heap::RepairFreeListsAfterDeserialization() {
484 PagedSpaces spaces(this);
485 for (PagedSpace* space = spaces.next(); space != NULL;
486 space = spaces.next()) {
487 space->RepairFreeListsAfterDeserialization();
488 }
489 }
490
MergeAllocationSitePretenuringFeedback(const base::HashMap & local_pretenuring_feedback)491 void Heap::MergeAllocationSitePretenuringFeedback(
492 const base::HashMap& local_pretenuring_feedback) {
493 AllocationSite* site = nullptr;
494 for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
495 local_entry != nullptr;
496 local_entry = local_pretenuring_feedback.Next(local_entry)) {
497 site = reinterpret_cast<AllocationSite*>(local_entry->key);
498 MapWord map_word = site->map_word();
499 if (map_word.IsForwardingAddress()) {
500 site = AllocationSite::cast(map_word.ToForwardingAddress());
501 }
502
503 // We have not validated the allocation site yet, since we have not
504 // dereferenced the site during collecting information.
505 // This is an inlined check of AllocationMemento::IsValid.
506 if (!site->IsAllocationSite() || site->IsZombie()) continue;
507
508 int value =
509 static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
510 DCHECK_GT(value, 0);
511
512 if (site->IncrementMementoFoundCount(value)) {
513 global_pretenuring_feedback_->LookupOrInsert(site,
514 ObjectHash(site->address()));
515 }
516 }
517 }
518
519 class Heap::SkipStoreBufferScope {
520 public:
SkipStoreBufferScope(StoreBuffer * store_buffer)521 explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
522 : store_buffer_(store_buffer) {
523 store_buffer_->MoveAllEntriesToRememberedSet();
524 store_buffer_->SetMode(StoreBuffer::IN_GC);
525 }
526
~SkipStoreBufferScope()527 ~SkipStoreBufferScope() {
528 DCHECK(store_buffer_->Empty());
529 store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
530 }
531
532 private:
533 StoreBuffer* store_buffer_;
534 };
535
536 class Heap::PretenuringScope {
537 public:
PretenuringScope(Heap * heap)538 explicit PretenuringScope(Heap* heap) : heap_(heap) {
539 heap_->global_pretenuring_feedback_ =
540 new base::HashMap(kInitialFeedbackCapacity);
541 }
542
~PretenuringScope()543 ~PretenuringScope() {
544 delete heap_->global_pretenuring_feedback_;
545 heap_->global_pretenuring_feedback_ = nullptr;
546 }
547
548 private:
549 Heap* heap_;
550 };
551
552
ProcessPretenuringFeedback()553 void Heap::ProcessPretenuringFeedback() {
554 bool trigger_deoptimization = false;
555 if (FLAG_allocation_site_pretenuring) {
556 int tenure_decisions = 0;
557 int dont_tenure_decisions = 0;
558 int allocation_mementos_found = 0;
559 int allocation_sites = 0;
560 int active_allocation_sites = 0;
561
562 AllocationSite* site = nullptr;
563
564 // Step 1: Digest feedback for recorded allocation sites.
565 bool maximum_size_scavenge = MaximumSizeScavenge();
566 for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
567 e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
568 allocation_sites++;
569 site = reinterpret_cast<AllocationSite*>(e->key);
570 int found_count = site->memento_found_count();
571 // An entry in the storage does not imply that the count is > 0 because
572 // allocation sites might have been reset due to too many objects dying
573 // in old space.
574 if (found_count > 0) {
575 DCHECK(site->IsAllocationSite());
576 active_allocation_sites++;
577 allocation_mementos_found += found_count;
578 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
579 trigger_deoptimization = true;
580 }
581 if (site->GetPretenureMode() == TENURED) {
582 tenure_decisions++;
583 } else {
584 dont_tenure_decisions++;
585 }
586 }
587 }
588
589 // Step 2: Deopt maybe tenured allocation sites if necessary.
590 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
591 if (deopt_maybe_tenured) {
592 Object* list_element = allocation_sites_list();
593 while (list_element->IsAllocationSite()) {
594 site = AllocationSite::cast(list_element);
595 DCHECK(site->IsAllocationSite());
596 allocation_sites++;
597 if (site->IsMaybeTenure()) {
598 site->set_deopt_dependent_code(true);
599 trigger_deoptimization = true;
600 }
601 list_element = site->weak_next();
602 }
603 }
604
605 if (trigger_deoptimization) {
606 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
607 }
608
609 if (FLAG_trace_pretenuring_statistics &&
610 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
611 dont_tenure_decisions > 0)) {
612 PrintIsolate(isolate(),
613 "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
614 "active_sites=%d "
615 "mementos=%d tenured=%d not_tenured=%d\n",
616 deopt_maybe_tenured ? 1 : 0, allocation_sites,
617 active_allocation_sites, allocation_mementos_found,
618 tenure_decisions, dont_tenure_decisions);
619 }
620 }
621 }
622
623
DeoptMarkedAllocationSites()624 void Heap::DeoptMarkedAllocationSites() {
625 // TODO(hpayer): If iterating over the allocation sites list becomes a
626 // performance issue, use a cache data structure in heap instead.
627 Object* list_element = allocation_sites_list();
628 while (list_element->IsAllocationSite()) {
629 AllocationSite* site = AllocationSite::cast(list_element);
630 if (site->deopt_dependent_code()) {
631 site->dependent_code()->MarkCodeForDeoptimization(
632 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
633 site->set_deopt_dependent_code(false);
634 }
635 list_element = site->weak_next();
636 }
637 Deoptimizer::DeoptimizeMarkedCode(isolate_);
638 }
639
640
GarbageCollectionEpilogue()641 void Heap::GarbageCollectionEpilogue() {
642 // In release mode, we only zap the from space under heap verification.
643 if (Heap::ShouldZapGarbage()) {
644 ZapFromSpace();
645 }
646
647 #ifdef VERIFY_HEAP
648 if (FLAG_verify_heap) {
649 Verify();
650 }
651 #endif
652
653 AllowHeapAllocation for_the_rest_of_the_epilogue;
654
655 #ifdef DEBUG
656 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
657 if (FLAG_print_handles) PrintHandles();
658 if (FLAG_gc_verbose) Print();
659 if (FLAG_code_stats) ReportCodeStatistics("After GC");
660 if (FLAG_check_handle_count) CheckHandleCount();
661 #endif
662 if (FLAG_deopt_every_n_garbage_collections > 0) {
663 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
664 // the topmost optimized frame can be deoptimized safely, because it
665 // might not have a lazy bailout point right after its current PC.
666 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
667 Deoptimizer::DeoptimizeAll(isolate());
668 gcs_since_last_deopt_ = 0;
669 }
670 }
671
672 UpdateMaximumCommitted();
673
674 isolate_->counters()->alive_after_last_gc()->Set(
675 static_cast<int>(SizeOfObjects()));
676
677 isolate_->counters()->string_table_capacity()->Set(
678 string_table()->Capacity());
679 isolate_->counters()->number_of_symbols()->Set(
680 string_table()->NumberOfElements());
681
682 if (CommittedMemory() > 0) {
683 isolate_->counters()->external_fragmentation_total()->AddSample(
684 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
685
686 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
687 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
688 isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
689 (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
690 isolate_->counters()->heap_fraction_code_space()->AddSample(
691 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
692 CommittedMemory()));
693 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
694 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
695 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
696 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
697
698 isolate_->counters()->heap_sample_total_committed()->AddSample(
699 static_cast<int>(CommittedMemory() / KB));
700 isolate_->counters()->heap_sample_total_used()->AddSample(
701 static_cast<int>(SizeOfObjects() / KB));
702 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
703 static_cast<int>(map_space()->CommittedMemory() / KB));
704 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
705 static_cast<int>(code_space()->CommittedMemory() / KB));
706
707 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
708 static_cast<int>(MaximumCommittedMemory() / KB));
709 }
710
711 #define UPDATE_COUNTERS_FOR_SPACE(space) \
712 isolate_->counters()->space##_bytes_available()->Set( \
713 static_cast<int>(space()->Available())); \
714 isolate_->counters()->space##_bytes_committed()->Set( \
715 static_cast<int>(space()->CommittedMemory())); \
716 isolate_->counters()->space##_bytes_used()->Set( \
717 static_cast<int>(space()->SizeOfObjects()));
718 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
719 if (space()->CommittedMemory() > 0) { \
720 isolate_->counters()->external_fragmentation_##space()->AddSample( \
721 static_cast<int>(100 - \
722 (space()->SizeOfObjects() * 100.0) / \
723 space()->CommittedMemory())); \
724 }
725 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
726 UPDATE_COUNTERS_FOR_SPACE(space) \
727 UPDATE_FRAGMENTATION_FOR_SPACE(space)
728
729 UPDATE_COUNTERS_FOR_SPACE(new_space)
730 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
731 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
732 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
733 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
734 #undef UPDATE_COUNTERS_FOR_SPACE
735 #undef UPDATE_FRAGMENTATION_FOR_SPACE
736 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
737
738 #ifdef DEBUG
739 ReportStatisticsAfterGC();
740 #endif // DEBUG
741
742 // Remember the last top pointer so that we can later find out
743 // whether we allocated in new space since the last GC.
744 new_space_top_after_last_gc_ = new_space()->top();
745 last_gc_time_ = MonotonicallyIncreasingTimeInMs();
746
747 ReduceNewSpaceSize();
748 }
749
750
PreprocessStackTraces()751 void Heap::PreprocessStackTraces() {
752 WeakFixedArray::Iterator iterator(weak_stack_trace_list());
753 FixedArray* elements;
754 while ((elements = iterator.Next<FixedArray>())) {
755 for (int j = 1; j < elements->length(); j += 4) {
756 Object* maybe_code = elements->get(j + 2);
757 // If GC happens while adding a stack trace to the weak fixed array,
758 // which has been copied into a larger backing store, we may run into
759 // a stack trace that has already been preprocessed. Guard against this.
760 if (!maybe_code->IsAbstractCode()) break;
761 AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
762 int offset = Smi::cast(elements->get(j + 3))->value();
763 int pos = abstract_code->SourcePosition(offset);
764 elements->set(j + 2, Smi::FromInt(pos));
765 }
766 }
767 // We must not compact the weak fixed list here, as we may be in the middle
768 // of writing to it, when the GC triggered. Instead, we reset the root value.
769 set_weak_stack_trace_list(Smi::kZero);
770 }
771
772
773 class GCCallbacksScope {
774 public:
GCCallbacksScope(Heap * heap)775 explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
776 heap_->gc_callbacks_depth_++;
777 }
~GCCallbacksScope()778 ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
779
CheckReenter()780 bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
781
782 private:
783 Heap* heap_;
784 };
785
786
HandleGCRequest()787 void Heap::HandleGCRequest() {
788 if (HighMemoryPressure()) {
789 incremental_marking()->reset_request_type();
790 CheckMemoryPressure();
791 } else if (incremental_marking()->request_type() ==
792 IncrementalMarking::COMPLETE_MARKING) {
793 incremental_marking()->reset_request_type();
794 CollectAllGarbage(current_gc_flags_,
795 GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
796 current_gc_callback_flags_);
797 } else if (incremental_marking()->request_type() ==
798 IncrementalMarking::FINALIZATION &&
799 incremental_marking()->IsMarking() &&
800 !incremental_marking()->finalize_marking_completed()) {
801 incremental_marking()->reset_request_type();
802 FinalizeIncrementalMarking(
803 GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
804 }
805 }
806
807
ScheduleIdleScavengeIfNeeded(int bytes_allocated)808 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
809 scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
810 }
811
FinalizeIncrementalMarking(GarbageCollectionReason gc_reason)812 void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
813 if (FLAG_trace_incremental_marking) {
814 isolate()->PrintWithTimestamp(
815 "[IncrementalMarking] (%s).\n",
816 Heap::GarbageCollectionReasonToString(gc_reason));
817 }
818
819 HistogramTimerScope incremental_marking_scope(
820 isolate()->counters()->gc_incremental_marking_finalize());
821 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
822 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
823
824 {
825 GCCallbacksScope scope(this);
826 if (scope.CheckReenter()) {
827 AllowHeapAllocation allow_allocation;
828 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
829 VMState<EXTERNAL> state(isolate_);
830 HandleScope handle_scope(isolate_);
831 CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
832 }
833 }
834 incremental_marking()->FinalizeIncrementally();
835 {
836 GCCallbacksScope scope(this);
837 if (scope.CheckReenter()) {
838 AllowHeapAllocation allow_allocation;
839 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
840 VMState<EXTERNAL> state(isolate_);
841 HandleScope handle_scope(isolate_);
842 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
843 }
844 }
845 }
846
847
GCTypeTimer(GarbageCollector collector)848 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
849 if (IsYoungGenerationCollector(collector)) {
850 return isolate_->counters()->gc_scavenger();
851 } else {
852 if (!incremental_marking()->IsStopped()) {
853 if (ShouldReduceMemory()) {
854 return isolate_->counters()->gc_finalize_reduce_memory();
855 } else {
856 return isolate_->counters()->gc_finalize();
857 }
858 } else {
859 return isolate_->counters()->gc_compactor();
860 }
861 }
862 }
863
CollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)864 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
865 const v8::GCCallbackFlags gc_callback_flags) {
866 // Since we are ignoring the return value, the exact choice of space does
867 // not matter, so long as we do not specify NEW_SPACE, which would not
868 // cause a full GC.
869 set_current_gc_flags(flags);
870 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
871 set_current_gc_flags(kNoGCFlags);
872 }
873
CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)874 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
875 // Since we are ignoring the return value, the exact choice of space does
876 // not matter, so long as we do not specify NEW_SPACE, which would not
877 // cause a full GC.
878 // Major GC would invoke weak handle callbacks on weakly reachable
879 // handles, but won't collect weakly reachable objects until next
880 // major GC. Therefore if we collect aggressively and weak handle callback
881 // has been invoked, we rerun major GC to release objects which become
882 // garbage.
883 // Note: as weak callbacks can execute arbitrary code, we cannot
884 // hope that eventually there will be no weak callbacks invocations.
885 // Therefore stop recollecting after several attempts.
886 if (gc_reason == GarbageCollectionReason::kLastResort) {
887 InvokeOutOfMemoryCallback();
888 }
889 RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC_AllAvailableGarbage);
890 if (isolate()->concurrent_recompilation_enabled()) {
891 // The optimizing compiler may be unnecessarily holding on to memory.
892 DisallowHeapAllocation no_recursive_gc;
893 isolate()->optimizing_compile_dispatcher()->Flush(
894 OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
895 }
896 isolate()->ClearSerializerData();
897 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
898 isolate_->compilation_cache()->Clear();
899 const int kMaxNumberOfAttempts = 7;
900 const int kMinNumberOfAttempts = 2;
901 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
902 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
903 v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
904 attempt + 1 >= kMinNumberOfAttempts) {
905 break;
906 }
907 }
908 set_current_gc_flags(kNoGCFlags);
909 new_space_->Shrink();
910 UncommitFromSpace();
911 }
912
ReportExternalMemoryPressure()913 void Heap::ReportExternalMemoryPressure() {
914 if (external_memory_ >
915 (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
916 CollectAllGarbage(
917 kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
918 GarbageCollectionReason::kExternalMemoryPressure,
919 static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
920 kGCCallbackFlagCollectAllExternalMemory));
921 return;
922 }
923 if (incremental_marking()->IsStopped()) {
924 if (incremental_marking()->CanBeActivated()) {
925 StartIncrementalMarking(
926 i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
927 static_cast<GCCallbackFlags>(
928 kGCCallbackFlagSynchronousPhantomCallbackProcessing |
929 kGCCallbackFlagCollectAllExternalMemory));
930 } else {
931 CollectAllGarbage(i::Heap::kNoGCFlags,
932 GarbageCollectionReason::kExternalMemoryPressure,
933 kGCCallbackFlagSynchronousPhantomCallbackProcessing);
934 }
935 } else {
936 // Incremental marking is turned on an has already been started.
937 const double pressure =
938 static_cast<double>(external_memory_ -
939 external_memory_at_last_mark_compact_ -
940 kExternalAllocationSoftLimit) /
941 external_memory_hard_limit();
942 DCHECK_GE(1, pressure);
943 const double kMaxStepSizeOnExternalLimit = 25;
944 const double deadline = MonotonicallyIncreasingTimeInMs() +
945 pressure * kMaxStepSizeOnExternalLimit;
946 incremental_marking()->AdvanceIncrementalMarking(
947 deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
948 IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
949 }
950 }
951
952
EnsureFillerObjectAtTop()953 void Heap::EnsureFillerObjectAtTop() {
954 // There may be an allocation memento behind objects in new space. Upon
955 // evacuation of a non-full new space (or if we are on the last page) there
956 // may be uninitialized memory behind top. We fill the remainder of the page
957 // with a filler.
958 Address to_top = new_space_->top();
959 Page* page = Page::FromAddress(to_top - kPointerSize);
960 if (page->Contains(to_top)) {
961 int remaining_in_page = static_cast<int>(page->area_end() - to_top);
962 CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
963 }
964 }
965
CollectGarbage(GarbageCollector collector,GarbageCollectionReason gc_reason,const char * collector_reason,const v8::GCCallbackFlags gc_callback_flags)966 bool Heap::CollectGarbage(GarbageCollector collector,
967 GarbageCollectionReason gc_reason,
968 const char* collector_reason,
969 const v8::GCCallbackFlags gc_callback_flags) {
970 // The VM is in the GC state until exiting this function.
971 VMState<GC> state(isolate_);
972 RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC);
973
974 #ifdef DEBUG
975 // Reset the allocation timeout to the GC interval, but make sure to
976 // allow at least a few allocations after a collection. The reason
977 // for this is that we have a lot of allocation sequences and we
978 // assume that a garbage collection will allow the subsequent
979 // allocation attempts to go through.
980 allocation_timeout_ = Max(6, FLAG_gc_interval);
981 #endif
982
983 EnsureFillerObjectAtTop();
984
985 if (IsYoungGenerationCollector(collector) &&
986 !incremental_marking()->IsStopped()) {
987 if (FLAG_trace_incremental_marking) {
988 isolate()->PrintWithTimestamp(
989 "[IncrementalMarking] Scavenge during marking.\n");
990 }
991 }
992
993 bool next_gc_likely_to_collect_more = false;
994 size_t committed_memory_before = 0;
995
996 if (collector == MARK_COMPACTOR) {
997 committed_memory_before = CommittedOldGenerationMemory();
998 }
999
1000 {
1001 tracer()->Start(collector, gc_reason, collector_reason);
1002 DCHECK(AllowHeapAllocation::IsAllowed());
1003 DisallowHeapAllocation no_allocation_during_gc;
1004 GarbageCollectionPrologue();
1005
1006 {
1007 HistogramTimer* gc_type_timer = GCTypeTimer(collector);
1008 HistogramTimerScope histogram_timer_scope(gc_type_timer);
1009 TRACE_EVENT0("v8", gc_type_timer->name());
1010
1011 next_gc_likely_to_collect_more =
1012 PerformGarbageCollection(collector, gc_callback_flags);
1013 }
1014
1015 GarbageCollectionEpilogue();
1016 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1017 isolate()->CheckDetachedContextsAfterGC();
1018 }
1019
1020 if (collector == MARK_COMPACTOR) {
1021 size_t committed_memory_after = CommittedOldGenerationMemory();
1022 size_t used_memory_after = PromotedSpaceSizeOfObjects();
1023 MemoryReducer::Event event;
1024 event.type = MemoryReducer::kMarkCompact;
1025 event.time_ms = MonotonicallyIncreasingTimeInMs();
1026 // Trigger one more GC if
1027 // - this GC decreased committed memory,
1028 // - there is high fragmentation,
1029 // - there are live detached contexts.
1030 event.next_gc_likely_to_collect_more =
1031 (committed_memory_before > committed_memory_after + MB) ||
1032 HasHighFragmentation(used_memory_after, committed_memory_after) ||
1033 (detached_contexts()->length() > 0);
1034 event.committed_memory = committed_memory_after;
1035 if (deserialization_complete_) {
1036 memory_reducer_->NotifyMarkCompact(event);
1037 }
1038 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
1039 }
1040
1041 tracer()->Stop(collector);
1042 }
1043
1044 if (collector == MARK_COMPACTOR &&
1045 (gc_callback_flags & (kGCCallbackFlagForced |
1046 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1047 isolate()->CountUsage(v8::Isolate::kForcedGC);
1048 }
1049
1050 // Start incremental marking for the next cycle. The heap snapshot
1051 // generator needs incremental marking to stay off after it aborted.
1052 // We do this only for scavenger to avoid a loop where mark-compact
1053 // causes another mark-compact.
1054 if (IsYoungGenerationCollector(collector) &&
1055 !ShouldAbortIncrementalMarking()) {
1056 StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
1057 kNoGCCallbackFlags);
1058 }
1059
1060 return next_gc_likely_to_collect_more;
1061 }
1062
1063
NotifyContextDisposed(bool dependant_context)1064 int Heap::NotifyContextDisposed(bool dependant_context) {
1065 if (!dependant_context) {
1066 tracer()->ResetSurvivalEvents();
1067 old_generation_size_configured_ = false;
1068 MemoryReducer::Event event;
1069 event.type = MemoryReducer::kPossibleGarbage;
1070 event.time_ms = MonotonicallyIncreasingTimeInMs();
1071 memory_reducer_->NotifyPossibleGarbage(event);
1072 }
1073 if (isolate()->concurrent_recompilation_enabled()) {
1074 // Flush the queued recompilation tasks.
1075 isolate()->optimizing_compile_dispatcher()->Flush(
1076 OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
1077 }
1078 AgeInlineCaches();
1079 number_of_disposed_maps_ = retained_maps()->Length();
1080 tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1081 return ++contexts_disposed_;
1082 }
1083
StartIncrementalMarking(int gc_flags,GarbageCollectionReason gc_reason,GCCallbackFlags gc_callback_flags)1084 void Heap::StartIncrementalMarking(int gc_flags,
1085 GarbageCollectionReason gc_reason,
1086 GCCallbackFlags gc_callback_flags) {
1087 DCHECK(incremental_marking()->IsStopped());
1088 set_current_gc_flags(gc_flags);
1089 current_gc_callback_flags_ = gc_callback_flags;
1090 incremental_marking()->Start(gc_reason);
1091 }
1092
StartIncrementalMarkingIfAllocationLimitIsReached(int gc_flags,const GCCallbackFlags gc_callback_flags)1093 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1094 int gc_flags, const GCCallbackFlags gc_callback_flags) {
1095 if (incremental_marking()->IsStopped()) {
1096 IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
1097 if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1098 incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1099 } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1100 StartIncrementalMarking(gc_flags,
1101 GarbageCollectionReason::kAllocationLimit,
1102 gc_callback_flags);
1103 }
1104 }
1105 }
1106
StartIdleIncrementalMarking(GarbageCollectionReason gc_reason)1107 void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
1108 gc_idle_time_handler_->ResetNoProgressCounter();
1109 StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1110 kNoGCCallbackFlags);
1111 }
1112
1113
MoveElements(FixedArray * array,int dst_index,int src_index,int len)1114 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1115 int len) {
1116 if (len == 0) return;
1117
1118 DCHECK(array->map() != fixed_cow_array_map());
1119 Object** dst_objects = array->data_start() + dst_index;
1120 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
1121 FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
1122 }
1123
1124
1125 #ifdef VERIFY_HEAP
1126 // Helper class for verifying the string table.
1127 class StringTableVerifier : public ObjectVisitor {
1128 public:
VisitPointers(Object ** start,Object ** end)1129 void VisitPointers(Object** start, Object** end) override {
1130 // Visit all HeapObject pointers in [start, end).
1131 for (Object** p = start; p < end; p++) {
1132 if ((*p)->IsHeapObject()) {
1133 HeapObject* object = HeapObject::cast(*p);
1134 Isolate* isolate = object->GetIsolate();
1135 // Check that the string is actually internalized.
1136 CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
1137 object->IsInternalizedString());
1138 }
1139 }
1140 }
1141 };
1142
1143
VerifyStringTable(Heap * heap)1144 static void VerifyStringTable(Heap* heap) {
1145 StringTableVerifier verifier;
1146 heap->string_table()->IterateElements(&verifier);
1147 }
1148 #endif // VERIFY_HEAP
1149
ReserveSpace(Reservation * reservations,List<Address> * maps)1150 bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
1151 bool gc_performed = true;
1152 int counter = 0;
1153 static const int kThreshold = 20;
1154 while (gc_performed && counter++ < kThreshold) {
1155 gc_performed = false;
1156 for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
1157 space++) {
1158 Reservation* reservation = &reservations[space];
1159 DCHECK_LE(1, reservation->length());
1160 if (reservation->at(0).size == 0) continue;
1161 bool perform_gc = false;
1162 if (space == MAP_SPACE) {
1163 // We allocate each map individually to avoid fragmentation.
1164 maps->Clear();
1165 DCHECK_EQ(1, reservation->length());
1166 int num_maps = reservation->at(0).size / Map::kSize;
1167 for (int i = 0; i < num_maps; i++) {
1168 // The deserializer will update the skip list.
1169 AllocationResult allocation = map_space()->AllocateRawUnaligned(
1170 Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
1171 HeapObject* free_space = nullptr;
1172 if (allocation.To(&free_space)) {
1173 // Mark with a free list node, in case we have a GC before
1174 // deserializing.
1175 Address free_space_address = free_space->address();
1176 CreateFillerObjectAt(free_space_address, Map::kSize,
1177 ClearRecordedSlots::kNo);
1178 maps->Add(free_space_address);
1179 } else {
1180 perform_gc = true;
1181 break;
1182 }
1183 }
1184 } else if (space == LO_SPACE) {
1185 // Just check that we can allocate during deserialization.
1186 DCHECK_EQ(1, reservation->length());
1187 perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
1188 } else {
1189 for (auto& chunk : *reservation) {
1190 AllocationResult allocation;
1191 int size = chunk.size;
1192 DCHECK_LE(static_cast<size_t>(size),
1193 MemoryAllocator::PageAreaSize(
1194 static_cast<AllocationSpace>(space)));
1195 if (space == NEW_SPACE) {
1196 allocation = new_space()->AllocateRawUnaligned(size);
1197 } else {
1198 // The deserializer will update the skip list.
1199 allocation = paged_space(space)->AllocateRawUnaligned(
1200 size, PagedSpace::IGNORE_SKIP_LIST);
1201 }
1202 HeapObject* free_space = nullptr;
1203 if (allocation.To(&free_space)) {
1204 // Mark with a free list node, in case we have a GC before
1205 // deserializing.
1206 Address free_space_address = free_space->address();
1207 CreateFillerObjectAt(free_space_address, size,
1208 ClearRecordedSlots::kNo);
1209 DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
1210 chunk.start = free_space_address;
1211 chunk.end = free_space_address + size;
1212 } else {
1213 perform_gc = true;
1214 break;
1215 }
1216 }
1217 }
1218 if (perform_gc) {
1219 if (space == NEW_SPACE) {
1220 CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
1221 } else {
1222 if (counter > 1) {
1223 CollectAllGarbage(
1224 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1225 GarbageCollectionReason::kDeserializer);
1226 } else {
1227 CollectAllGarbage(kAbortIncrementalMarkingMask,
1228 GarbageCollectionReason::kDeserializer);
1229 }
1230 }
1231 gc_performed = true;
1232 break; // Abort for-loop over spaces and retry.
1233 }
1234 }
1235 }
1236
1237 return !gc_performed;
1238 }
1239
1240
EnsureFromSpaceIsCommitted()1241 void Heap::EnsureFromSpaceIsCommitted() {
1242 if (new_space_->CommitFromSpaceIfNeeded()) return;
1243
1244 // Committing memory to from space failed.
1245 // Memory is exhausted and we will die.
1246 V8::FatalProcessOutOfMemory("Committing semi space failed.");
1247 }
1248
1249
ClearNormalizedMapCaches()1250 void Heap::ClearNormalizedMapCaches() {
1251 if (isolate_->bootstrapper()->IsActive() &&
1252 !incremental_marking()->IsMarking()) {
1253 return;
1254 }
1255
1256 Object* context = native_contexts_list();
1257 while (!context->IsUndefined(isolate())) {
1258 // GC can happen when the context is not fully initialized,
1259 // so the cache can be undefined.
1260 Object* cache =
1261 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1262 if (!cache->IsUndefined(isolate())) {
1263 NormalizedMapCache::cast(cache)->Clear();
1264 }
1265 context = Context::cast(context)->next_context_link();
1266 }
1267 }
1268
1269
UpdateSurvivalStatistics(int start_new_space_size)1270 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1271 if (start_new_space_size == 0) return;
1272
1273 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1274 static_cast<double>(start_new_space_size) * 100);
1275
1276 if (previous_semi_space_copied_object_size_ > 0) {
1277 promotion_rate_ =
1278 (static_cast<double>(promoted_objects_size_) /
1279 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1280 } else {
1281 promotion_rate_ = 0;
1282 }
1283
1284 semi_space_copied_rate_ =
1285 (static_cast<double>(semi_space_copied_object_size_) /
1286 static_cast<double>(start_new_space_size) * 100);
1287
1288 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1289 tracer()->AddSurvivalRatio(survival_rate);
1290 }
1291
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1292 bool Heap::PerformGarbageCollection(
1293 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1294 int freed_global_handles = 0;
1295
1296 if (!IsYoungGenerationCollector(collector)) {
1297 PROFILE(isolate_, CodeMovingGCEvent());
1298 }
1299
1300 #ifdef VERIFY_HEAP
1301 if (FLAG_verify_heap) {
1302 VerifyStringTable(this);
1303 }
1304 #endif
1305
1306 GCType gc_type =
1307 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1308
1309 {
1310 GCCallbacksScope scope(this);
1311 if (scope.CheckReenter()) {
1312 AllowHeapAllocation allow_allocation;
1313 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_PROLOGUE);
1314 VMState<EXTERNAL> state(isolate_);
1315 HandleScope handle_scope(isolate_);
1316 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1317 }
1318 }
1319
1320 EnsureFromSpaceIsCommitted();
1321
1322 int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
1323
1324 {
1325 Heap::PretenuringScope pretenuring_scope(this);
1326 Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
1327
1328 switch (collector) {
1329 case MARK_COMPACTOR:
1330 UpdateOldGenerationAllocationCounter();
1331 // Perform mark-sweep with optional compaction.
1332 MarkCompact();
1333 old_generation_size_configured_ = true;
1334 // This should be updated before PostGarbageCollectionProcessing, which
1335 // can cause another GC. Take into account the objects promoted during
1336 // GC.
1337 old_generation_allocation_counter_at_last_gc_ +=
1338 static_cast<size_t>(promoted_objects_size_);
1339 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1340 break;
1341 case MINOR_MARK_COMPACTOR:
1342 MinorMarkCompact();
1343 break;
1344 case SCAVENGER:
1345 if (fast_promotion_mode_ &&
1346 CanExpandOldGeneration(new_space()->Size())) {
1347 tracer()->NotifyYoungGenerationHandling(
1348 YoungGenerationHandling::kFastPromotionDuringScavenge);
1349 EvacuateYoungGeneration();
1350 } else {
1351 tracer()->NotifyYoungGenerationHandling(
1352 YoungGenerationHandling::kRegularScavenge);
1353
1354 Scavenge();
1355 }
1356 break;
1357 }
1358
1359 ProcessPretenuringFeedback();
1360 }
1361
1362 UpdateSurvivalStatistics(start_new_space_size);
1363 ConfigureInitialOldGenerationSize();
1364
1365 if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
1366 ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
1367 }
1368
1369 isolate_->counters()->objs_since_last_young()->Set(0);
1370
1371 gc_post_processing_depth_++;
1372 {
1373 AllowHeapAllocation allow_allocation;
1374 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
1375 freed_global_handles =
1376 isolate_->global_handles()->PostGarbageCollectionProcessing(
1377 collector, gc_callback_flags);
1378 }
1379 gc_post_processing_depth_--;
1380
1381 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1382
1383 // Update relocatables.
1384 Relocatable::PostGarbageCollectionProcessing(isolate_);
1385
1386 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1387 double mutator_speed =
1388 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1389 size_t old_gen_size = PromotedSpaceSizeOfObjects();
1390 if (collector == MARK_COMPACTOR) {
1391 // Register the amount of external allocated memory.
1392 external_memory_at_last_mark_compact_ = external_memory_;
1393 external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1394 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1395 } else if (HasLowYoungGenerationAllocationRate() &&
1396 old_generation_size_configured_) {
1397 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1398 }
1399
1400 {
1401 GCCallbacksScope scope(this);
1402 if (scope.CheckReenter()) {
1403 AllowHeapAllocation allow_allocation;
1404 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_EPILOGUE);
1405 VMState<EXTERNAL> state(isolate_);
1406 HandleScope handle_scope(isolate_);
1407 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1408 }
1409 }
1410
1411 #ifdef VERIFY_HEAP
1412 if (FLAG_verify_heap) {
1413 VerifyStringTable(this);
1414 }
1415 #endif
1416
1417 return freed_global_handles > 0;
1418 }
1419
1420
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1421 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1422 RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCPrologueCallback);
1423 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1424 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1425 if (!gc_prologue_callbacks_[i].pass_isolate) {
1426 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1427 gc_prologue_callbacks_[i].callback);
1428 callback(gc_type, flags);
1429 } else {
1430 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1431 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1432 }
1433 }
1434 }
1435 if (FLAG_trace_object_groups && (gc_type == kGCTypeIncrementalMarking ||
1436 gc_type == kGCTypeMarkSweepCompact)) {
1437 isolate_->global_handles()->PrintObjectGroups();
1438 }
1439 }
1440
1441
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags gc_callback_flags)1442 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1443 GCCallbackFlags gc_callback_flags) {
1444 RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCEpilogueCallback);
1445 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1446 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1447 if (!gc_epilogue_callbacks_[i].pass_isolate) {
1448 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1449 gc_epilogue_callbacks_[i].callback);
1450 callback(gc_type, gc_callback_flags);
1451 } else {
1452 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1453 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1454 }
1455 }
1456 }
1457 }
1458
1459
MarkCompact()1460 void Heap::MarkCompact() {
1461 PauseAllocationObserversScope pause_observers(this);
1462
1463 SetGCState(MARK_COMPACT);
1464
1465 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1466
1467 uint64_t size_of_objects_before_gc = SizeOfObjects();
1468
1469 mark_compact_collector()->Prepare();
1470
1471 ms_count_++;
1472
1473 MarkCompactPrologue();
1474
1475 mark_compact_collector()->CollectGarbage();
1476
1477 LOG(isolate_, ResourceEvent("markcompact", "end"));
1478
1479 MarkCompactEpilogue();
1480
1481 if (FLAG_allocation_site_pretenuring) {
1482 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1483 }
1484 }
1485
MinorMarkCompact()1486 void Heap::MinorMarkCompact() { UNREACHABLE(); }
1487
MarkCompactEpilogue()1488 void Heap::MarkCompactEpilogue() {
1489 TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
1490 SetGCState(NOT_IN_GC);
1491
1492 isolate_->counters()->objs_since_last_full()->Set(0);
1493
1494 incremental_marking()->Epilogue();
1495
1496 PreprocessStackTraces();
1497 DCHECK(incremental_marking()->IsStopped());
1498
1499 mark_compact_collector()->marking_deque()->StopUsing();
1500 }
1501
1502
MarkCompactPrologue()1503 void Heap::MarkCompactPrologue() {
1504 TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
1505 isolate_->context_slot_cache()->Clear();
1506 isolate_->descriptor_lookup_cache()->Clear();
1507 RegExpResultsCache::Clear(string_split_cache());
1508 RegExpResultsCache::Clear(regexp_multiple_cache());
1509
1510 isolate_->compilation_cache()->MarkCompactPrologue();
1511
1512 CompletelyClearInstanceofCache();
1513
1514 FlushNumberStringCache();
1515 ClearNormalizedMapCaches();
1516 }
1517
1518
CheckNewSpaceExpansionCriteria()1519 void Heap::CheckNewSpaceExpansionCriteria() {
1520 if (FLAG_experimental_new_space_growth_heuristic) {
1521 if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1522 survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
1523 // Grow the size of new space if there is room to grow, and more than 10%
1524 // have survived the last scavenge.
1525 new_space_->Grow();
1526 survived_since_last_expansion_ = 0;
1527 }
1528 } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1529 survived_since_last_expansion_ > new_space_->TotalCapacity()) {
1530 // Grow the size of new space if there is room to grow, and enough data
1531 // has survived scavenge since the last expansion.
1532 new_space_->Grow();
1533 survived_since_last_expansion_ = 0;
1534 }
1535 }
1536
1537
IsUnscavengedHeapObject(Heap * heap,Object ** p)1538 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1539 return heap->InNewSpace(*p) &&
1540 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1541 }
1542
Initialize()1543 void PromotionQueue::Initialize() {
1544 // The last to-space page may be used for promotion queue. On promotion
1545 // conflict, we use the emergency stack.
1546 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1547 0);
1548 front_ = rear_ =
1549 reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
1550 limit_ = reinterpret_cast<struct Entry*>(
1551 Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
1552 ->area_start());
1553 emergency_stack_ = NULL;
1554 }
1555
Destroy()1556 void PromotionQueue::Destroy() {
1557 DCHECK(is_empty());
1558 delete emergency_stack_;
1559 emergency_stack_ = NULL;
1560 }
1561
RelocateQueueHead()1562 void PromotionQueue::RelocateQueueHead() {
1563 DCHECK(emergency_stack_ == NULL);
1564
1565 Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
1566 struct Entry* head_start = rear_;
1567 struct Entry* head_end =
1568 Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
1569
1570 int entries_count =
1571 static_cast<int>(head_end - head_start) / sizeof(struct Entry);
1572
1573 emergency_stack_ = new List<Entry>(2 * entries_count);
1574
1575 while (head_start != head_end) {
1576 struct Entry* entry = head_start++;
1577 // New space allocation in SemiSpaceCopyObject marked the region
1578 // overlapping with promotion queue as uninitialized.
1579 MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
1580 emergency_stack_->Add(*entry);
1581 }
1582 rear_ = head_end;
1583 }
1584
1585
1586 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1587 public:
ScavengeWeakObjectRetainer(Heap * heap)1588 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1589
RetainAs(Object * object)1590 virtual Object* RetainAs(Object* object) {
1591 if (!heap_->InFromSpace(object)) {
1592 return object;
1593 }
1594
1595 MapWord map_word = HeapObject::cast(object)->map_word();
1596 if (map_word.IsForwardingAddress()) {
1597 return map_word.ToForwardingAddress();
1598 }
1599 return NULL;
1600 }
1601
1602 private:
1603 Heap* heap_;
1604 };
1605
EvacuateYoungGeneration()1606 void Heap::EvacuateYoungGeneration() {
1607 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
1608 DCHECK(fast_promotion_mode_);
1609 DCHECK(CanExpandOldGeneration(new_space()->Size()));
1610
1611 mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1612
1613 SetGCState(SCAVENGE);
1614 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1615
1616 // Move pages from new->old generation.
1617 PageRange range(new_space()->bottom(), new_space()->top());
1618 for (auto it = range.begin(); it != range.end();) {
1619 Page* p = (*++it)->prev_page();
1620 p->Unlink();
1621 Page::ConvertNewToOld(p);
1622 if (incremental_marking()->IsMarking())
1623 mark_compact_collector()->RecordLiveSlotsOnPage(p);
1624 }
1625
1626 // Reset new space.
1627 if (!new_space()->Rebalance()) {
1628 FatalProcessOutOfMemory("NewSpace::Rebalance");
1629 }
1630 new_space()->ResetAllocationInfo();
1631 new_space()->set_age_mark(new_space()->top());
1632
1633 // Fix up special trackers.
1634 external_string_table_.PromoteAllNewSpaceStrings();
1635 // GlobalHandles are updated in PostGarbageCollectonProcessing
1636
1637 IncrementYoungSurvivorsCounter(new_space()->Size());
1638 IncrementPromotedObjectsSize(new_space()->Size());
1639 IncrementSemiSpaceCopiedObjectSize(0);
1640
1641 LOG(isolate_, ResourceEvent("scavenge", "end"));
1642 SetGCState(NOT_IN_GC);
1643 }
1644
Scavenge()1645 void Heap::Scavenge() {
1646 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
1647 RelocationLock relocation_lock(this);
1648 // There are soft limits in the allocation code, designed to trigger a mark
1649 // sweep collection by failing allocations. There is no sense in trying to
1650 // trigger one during scavenge: scavenges allocation should always succeed.
1651 AlwaysAllocateScope scope(isolate());
1652
1653 // Bump-pointer allocations done during scavenge are not real allocations.
1654 // Pause the inline allocation steps.
1655 PauseAllocationObserversScope pause_observers(this);
1656
1657 mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1658
1659 SetGCState(SCAVENGE);
1660
1661 // Implements Cheney's copying algorithm
1662 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1663
1664 // Used for updating survived_since_last_expansion_ at function end.
1665 size_t survived_watermark = PromotedSpaceSizeOfObjects();
1666
1667 scavenge_collector_->SelectScavengingVisitorsTable();
1668
1669 // Flip the semispaces. After flipping, to space is empty, from space has
1670 // live objects.
1671 new_space_->Flip();
1672 new_space_->ResetAllocationInfo();
1673
1674 // We need to sweep newly copied objects which can be either in the
1675 // to space or promoted to the old generation. For to-space
1676 // objects, we treat the bottom of the to space as a queue. Newly
1677 // copied and unswept objects lie between a 'front' mark and the
1678 // allocation pointer.
1679 //
1680 // Promoted objects can go into various old-generation spaces, and
1681 // can be allocated internally in the spaces (from the free list).
1682 // We treat the top of the to space as a queue of addresses of
1683 // promoted objects. The addresses of newly promoted and unswept
1684 // objects lie between a 'front' mark and a 'rear' mark that is
1685 // updated as a side effect of promoting an object.
1686 //
1687 // There is guaranteed to be enough room at the top of the to space
1688 // for the addresses of promoted objects: every object promoted
1689 // frees up its size in bytes from the top of the new space, and
1690 // objects are at least one pointer in size.
1691 Address new_space_front = new_space_->ToSpaceStart();
1692 promotion_queue_.Initialize();
1693
1694 ScavengeVisitor scavenge_visitor(this);
1695
1696 isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
1697 &IsUnmodifiedHeapObject);
1698
1699 {
1700 // Copy roots.
1701 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
1702 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1703 }
1704
1705 {
1706 // Copy objects reachable from the old generation.
1707 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
1708 RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
1709 return Scavenger::CheckAndScavengeObject(this, addr);
1710 });
1711
1712 RememberedSet<OLD_TO_NEW>::IterateTyped(
1713 this, [this](SlotType type, Address host_addr, Address addr) {
1714 return UpdateTypedSlotHelper::UpdateTypedSlot(
1715 isolate(), type, addr, [this](Object** addr) {
1716 // We expect that objects referenced by code are long living.
1717 // If we do not force promotion, then we need to clear
1718 // old_to_new slots in dead code objects after mark-compact.
1719 return Scavenger::CheckAndScavengeObject(
1720 this, reinterpret_cast<Address>(addr));
1721 });
1722 });
1723 }
1724
1725 {
1726 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
1727 // Copy objects reachable from the encountered weak collections list.
1728 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1729 }
1730
1731 {
1732 // Copy objects reachable from the code flushing candidates list.
1733 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
1734 MarkCompactCollector* collector = mark_compact_collector();
1735 if (collector->is_code_flushing_enabled()) {
1736 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1737 }
1738 }
1739
1740 {
1741 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
1742 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1743 }
1744
1745 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
1746 &IsUnscavengedHeapObject);
1747
1748 isolate()
1749 ->global_handles()
1750 ->IterateNewSpaceWeakUnmodifiedRoots<
1751 GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&scavenge_visitor);
1752 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1753
1754 UpdateNewSpaceReferencesInExternalStringTable(
1755 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1756
1757 promotion_queue_.Destroy();
1758
1759 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1760
1761 ScavengeWeakObjectRetainer weak_object_retainer(this);
1762 ProcessYoungWeakReferences(&weak_object_retainer);
1763
1764 DCHECK(new_space_front == new_space_->top());
1765
1766 // Set age mark.
1767 new_space_->set_age_mark(new_space_->top());
1768
1769 ArrayBufferTracker::FreeDeadInNewSpace(this);
1770
1771 // Update how much has survived scavenge.
1772 DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
1773 IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
1774 new_space_->Size() - survived_watermark);
1775
1776 // Scavenger may find new wrappers by iterating objects promoted onto a black
1777 // page.
1778 local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
1779
1780 LOG(isolate_, ResourceEvent("scavenge", "end"));
1781
1782 SetGCState(NOT_IN_GC);
1783 }
1784
ComputeFastPromotionMode(double survival_rate)1785 void Heap::ComputeFastPromotionMode(double survival_rate) {
1786 const size_t survived_in_new_space =
1787 survived_last_scavenge_ * 100 / new_space_->Capacity();
1788 fast_promotion_mode_ =
1789 !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
1790 !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
1791 survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
1792 if (FLAG_trace_gc_verbose) {
1793 PrintIsolate(
1794 isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
1795 fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
1796 }
1797 }
1798
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1799 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1800 Object** p) {
1801 MapWord first_word = HeapObject::cast(*p)->map_word();
1802
1803 if (!first_word.IsForwardingAddress()) {
1804 // Unreachable external string can be finalized.
1805 String* string = String::cast(*p);
1806 if (!string->IsExternalString()) {
1807 // Original external string has been internalized.
1808 DCHECK(string->IsThinString());
1809 return NULL;
1810 }
1811 heap->FinalizeExternalString(string);
1812 return NULL;
1813 }
1814
1815 // String is still reachable.
1816 String* string = String::cast(first_word.ToForwardingAddress());
1817 if (string->IsThinString()) string = ThinString::cast(string)->actual();
1818 // Internalization can replace external strings with non-external strings.
1819 return string->IsExternalString() ? string : nullptr;
1820 }
1821
1822
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1823 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1824 ExternalStringTableUpdaterCallback updater_func) {
1825 if (external_string_table_.new_space_strings_.is_empty()) return;
1826
1827 Object** start = &external_string_table_.new_space_strings_[0];
1828 Object** end = start + external_string_table_.new_space_strings_.length();
1829 Object** last = start;
1830
1831 for (Object** p = start; p < end; ++p) {
1832 String* target = updater_func(this, p);
1833
1834 if (target == NULL) continue;
1835
1836 DCHECK(target->IsExternalString());
1837
1838 if (InNewSpace(target)) {
1839 // String is still in new space. Update the table entry.
1840 *last = target;
1841 ++last;
1842 } else {
1843 // String got promoted. Move it to the old string list.
1844 external_string_table_.AddOldString(target);
1845 }
1846 }
1847
1848 DCHECK(last <= end);
1849 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1850 }
1851
1852
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1853 void Heap::UpdateReferencesInExternalStringTable(
1854 ExternalStringTableUpdaterCallback updater_func) {
1855 // Update old space string references.
1856 if (external_string_table_.old_space_strings_.length() > 0) {
1857 Object** start = &external_string_table_.old_space_strings_[0];
1858 Object** end = start + external_string_table_.old_space_strings_.length();
1859 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1860 }
1861
1862 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1863 }
1864
1865
ProcessAllWeakReferences(WeakObjectRetainer * retainer)1866 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1867 ProcessNativeContexts(retainer);
1868 ProcessAllocationSites(retainer);
1869 }
1870
1871
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)1872 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1873 ProcessNativeContexts(retainer);
1874 }
1875
1876
ProcessNativeContexts(WeakObjectRetainer * retainer)1877 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1878 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1879 // Update the head of the list of contexts.
1880 set_native_contexts_list(head);
1881 }
1882
1883
ProcessAllocationSites(WeakObjectRetainer * retainer)1884 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1885 Object* allocation_site_obj =
1886 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1887 set_allocation_sites_list(allocation_site_obj);
1888 }
1889
ProcessWeakListRoots(WeakObjectRetainer * retainer)1890 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
1891 set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
1892 set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
1893 }
1894
ResetAllAllocationSitesDependentCode(PretenureFlag flag)1895 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1896 DisallowHeapAllocation no_allocation_scope;
1897 Object* cur = allocation_sites_list();
1898 bool marked = false;
1899 while (cur->IsAllocationSite()) {
1900 AllocationSite* casted = AllocationSite::cast(cur);
1901 if (casted->GetPretenureMode() == flag) {
1902 casted->ResetPretenureDecision();
1903 casted->set_deopt_dependent_code(true);
1904 marked = true;
1905 RemoveAllocationSitePretenuringFeedback(casted);
1906 }
1907 cur = casted->weak_next();
1908 }
1909 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1910 }
1911
1912
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)1913 void Heap::EvaluateOldSpaceLocalPretenuring(
1914 uint64_t size_of_objects_before_gc) {
1915 uint64_t size_of_objects_after_gc = SizeOfObjects();
1916 double old_generation_survival_rate =
1917 (static_cast<double>(size_of_objects_after_gc) * 100) /
1918 static_cast<double>(size_of_objects_before_gc);
1919
1920 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1921 // Too many objects died in the old generation, pretenuring of wrong
1922 // allocation sites may be the cause for that. We have to deopt all
1923 // dependent code registered in the allocation sites to re-evaluate
1924 // our pretenuring decisions.
1925 ResetAllAllocationSitesDependentCode(TENURED);
1926 if (FLAG_trace_pretenuring) {
1927 PrintF(
1928 "Deopt all allocation sites dependent code due to low survival "
1929 "rate in the old generation %f\n",
1930 old_generation_survival_rate);
1931 }
1932 }
1933 }
1934
1935
VisitExternalResources(v8::ExternalResourceVisitor * visitor)1936 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1937 DisallowHeapAllocation no_allocation;
1938 // All external strings are listed in the external string table.
1939
1940 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1941 public:
1942 explicit ExternalStringTableVisitorAdapter(
1943 v8::ExternalResourceVisitor* visitor)
1944 : visitor_(visitor) {}
1945 virtual void VisitPointers(Object** start, Object** end) {
1946 for (Object** p = start; p < end; p++) {
1947 DCHECK((*p)->IsExternalString());
1948 visitor_->VisitExternalString(
1949 Utils::ToLocal(Handle<String>(String::cast(*p))));
1950 }
1951 }
1952
1953 private:
1954 v8::ExternalResourceVisitor* visitor_;
1955 } external_string_table_visitor(visitor);
1956
1957 external_string_table_.IterateAll(&external_string_table_visitor);
1958 }
1959
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)1960 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1961 Address new_space_front) {
1962 do {
1963 SemiSpace::AssertValidRange(new_space_front, new_space_->top());
1964 // The addresses new_space_front and new_space_.top() define a
1965 // queue of unprocessed copied objects. Process them until the
1966 // queue is empty.
1967 while (new_space_front != new_space_->top()) {
1968 if (!Page::IsAlignedToPageSize(new_space_front)) {
1969 HeapObject* object = HeapObject::FromAddress(new_space_front);
1970 new_space_front +=
1971 StaticScavengeVisitor::IterateBody(object->map(), object);
1972 } else {
1973 new_space_front = Page::FromAllocationAreaAddress(new_space_front)
1974 ->next_page()
1975 ->area_start();
1976 }
1977 }
1978
1979 // Promote and process all the to-be-promoted objects.
1980 {
1981 while (!promotion_queue()->is_empty()) {
1982 HeapObject* target;
1983 int32_t size;
1984 bool was_marked_black;
1985 promotion_queue()->remove(&target, &size, &was_marked_black);
1986
1987 // Promoted object might be already partially visited
1988 // during old space pointer iteration. Thus we search specifically
1989 // for pointers to from semispace instead of looking for pointers
1990 // to new space.
1991 DCHECK(!target->IsMap());
1992
1993 IterateAndScavengePromotedObject(target, static_cast<int>(size),
1994 was_marked_black);
1995 }
1996 }
1997
1998 // Take another spin if there are now unswept objects in new space
1999 // (there are currently no more unswept promoted objects).
2000 } while (new_space_front != new_space_->top());
2001
2002 return new_space_front;
2003 }
2004
2005
2006 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
2007 0); // NOLINT
2008 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
2009 0); // NOLINT
2010 #ifdef V8_HOST_ARCH_32_BIT
2011 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
2012 0); // NOLINT
2013 #endif
2014
2015
GetMaximumFillToAlign(AllocationAlignment alignment)2016 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
2017 switch (alignment) {
2018 case kWordAligned:
2019 return 0;
2020 case kDoubleAligned:
2021 case kDoubleUnaligned:
2022 return kDoubleSize - kPointerSize;
2023 default:
2024 UNREACHABLE();
2025 }
2026 return 0;
2027 }
2028
2029
GetFillToAlign(Address address,AllocationAlignment alignment)2030 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2031 intptr_t offset = OffsetFrom(address);
2032 if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
2033 return kPointerSize;
2034 if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
2035 return kDoubleSize - kPointerSize; // No fill if double is always aligned.
2036 return 0;
2037 }
2038
2039
PrecedeWithFiller(HeapObject * object,int filler_size)2040 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
2041 CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
2042 return HeapObject::FromAddress(object->address() + filler_size);
2043 }
2044
2045
AlignWithFiller(HeapObject * object,int object_size,int allocation_size,AllocationAlignment alignment)2046 HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2047 int allocation_size,
2048 AllocationAlignment alignment) {
2049 int filler_size = allocation_size - object_size;
2050 DCHECK(filler_size > 0);
2051 int pre_filler = GetFillToAlign(object->address(), alignment);
2052 if (pre_filler) {
2053 object = PrecedeWithFiller(object, pre_filler);
2054 filler_size -= pre_filler;
2055 }
2056 if (filler_size)
2057 CreateFillerObjectAt(object->address() + object_size, filler_size,
2058 ClearRecordedSlots::kNo);
2059 return object;
2060 }
2061
2062
DoubleAlignForDeserialization(HeapObject * object,int size)2063 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
2064 return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
2065 }
2066
2067
RegisterNewArrayBuffer(JSArrayBuffer * buffer)2068 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2069 ArrayBufferTracker::RegisterNew(this, buffer);
2070 }
2071
2072
UnregisterArrayBuffer(JSArrayBuffer * buffer)2073 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2074 ArrayBufferTracker::Unregister(this, buffer);
2075 }
2076
ConfigureInitialOldGenerationSize()2077 void Heap::ConfigureInitialOldGenerationSize() {
2078 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2079 old_generation_allocation_limit_ =
2080 Max(MinimumAllocationLimitGrowingStep(),
2081 static_cast<size_t>(
2082 static_cast<double>(old_generation_allocation_limit_) *
2083 (tracer()->AverageSurvivalRatio() / 100)));
2084 }
2085 }
2086
AllocatePartialMap(InstanceType instance_type,int instance_size)2087 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2088 int instance_size) {
2089 Object* result = nullptr;
2090 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2091 if (!allocation.To(&result)) return allocation;
2092
2093 // Map::cast cannot be used due to uninitialized map field.
2094 reinterpret_cast<Map*>(result)->set_map(
2095 reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
2096 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2097 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2098 // Initialize to only containing tagged fields.
2099 reinterpret_cast<Map*>(result)->set_visitor_id(
2100 StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2101 if (FLAG_unbox_double_fields) {
2102 reinterpret_cast<Map*>(result)
2103 ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2104 }
2105 reinterpret_cast<Map*>(result)->clear_unused();
2106 reinterpret_cast<Map*>(result)
2107 ->set_inobject_properties_or_constructor_function_index(0);
2108 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2109 reinterpret_cast<Map*>(result)->set_bit_field(0);
2110 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2111 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2112 Map::OwnsDescriptors::encode(true) |
2113 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2114 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2115 reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
2116 return result;
2117 }
2118
2119
AllocateMap(InstanceType instance_type,int instance_size,ElementsKind elements_kind)2120 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2121 int instance_size,
2122 ElementsKind elements_kind) {
2123 HeapObject* result = nullptr;
2124 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2125 if (!allocation.To(&result)) return allocation;
2126
2127 isolate()->counters()->maps_created()->Increment();
2128 result->set_map_no_write_barrier(meta_map());
2129 Map* map = Map::cast(result);
2130 map->set_instance_type(instance_type);
2131 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2132 map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
2133 map->set_instance_size(instance_size);
2134 map->clear_unused();
2135 map->set_inobject_properties_or_constructor_function_index(0);
2136 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2137 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2138 SKIP_WRITE_BARRIER);
2139 map->set_weak_cell_cache(Smi::kZero);
2140 map->set_raw_transitions(Smi::kZero);
2141 map->set_unused_property_fields(0);
2142 map->set_instance_descriptors(empty_descriptor_array());
2143 if (FLAG_unbox_double_fields) {
2144 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2145 }
2146 // Must be called only after |instance_type|, |instance_size| and
2147 // |layout_descriptor| are set.
2148 map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
2149 map->set_bit_field(0);
2150 map->set_bit_field2(1 << Map::kIsExtensible);
2151 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2152 Map::OwnsDescriptors::encode(true) |
2153 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2154 map->set_bit_field3(bit_field3);
2155 map->set_elements_kind(elements_kind);
2156 map->set_new_target_is_base(true);
2157
2158 return map;
2159 }
2160
2161
AllocateFillerObject(int size,bool double_align,AllocationSpace space)2162 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2163 AllocationSpace space) {
2164 HeapObject* obj = nullptr;
2165 {
2166 AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
2167 AllocationResult allocation = AllocateRaw(size, space, align);
2168 if (!allocation.To(&obj)) return allocation;
2169 }
2170 #ifdef DEBUG
2171 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2172 DCHECK(chunk->owner()->identity() == space);
2173 #endif
2174 CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
2175 return obj;
2176 }
2177
2178
2179 const Heap::StringTypeTable Heap::string_type_table[] = {
2180 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2181 { type, size, k##camel_name##MapRootIndex } \
2182 ,
2183 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2184 #undef STRING_TYPE_ELEMENT
2185 };
2186
2187
2188 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2189 {"", kempty_stringRootIndex},
2190 #define CONSTANT_STRING_ELEMENT(name, contents) \
2191 { contents, k##name##RootIndex } \
2192 ,
2193 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2194 #undef CONSTANT_STRING_ELEMENT
2195 };
2196
2197
2198 const Heap::StructTable Heap::struct_table[] = {
2199 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2200 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2201 ,
2202 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2203 #undef STRUCT_TABLE_ELEMENT
2204 };
2205
2206 namespace {
2207
FinalizePartialMap(Heap * heap,Map * map)2208 void FinalizePartialMap(Heap* heap, Map* map) {
2209 map->set_code_cache(heap->empty_fixed_array());
2210 map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
2211 map->set_raw_transitions(Smi::kZero);
2212 map->set_instance_descriptors(heap->empty_descriptor_array());
2213 if (FLAG_unbox_double_fields) {
2214 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2215 }
2216 map->set_prototype(heap->null_value());
2217 map->set_constructor_or_backpointer(heap->null_value());
2218 }
2219
2220 } // namespace
2221
CreateInitialMaps()2222 bool Heap::CreateInitialMaps() {
2223 HeapObject* obj = nullptr;
2224 {
2225 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2226 if (!allocation.To(&obj)) return false;
2227 }
2228 // Map::cast cannot be used due to uninitialized map field.
2229 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2230 set_meta_map(new_meta_map);
2231 new_meta_map->set_map(new_meta_map);
2232
2233 { // Partial map allocation
2234 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2235 { \
2236 Map* map; \
2237 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2238 set_##field_name##_map(map); \
2239 }
2240
2241 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2242 fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
2243 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2244 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2245 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2246
2247 #undef ALLOCATE_PARTIAL_MAP
2248 }
2249
2250 // Allocate the empty array.
2251 {
2252 AllocationResult allocation = AllocateEmptyFixedArray();
2253 if (!allocation.To(&obj)) return false;
2254 }
2255 set_empty_fixed_array(FixedArray::cast(obj));
2256
2257 {
2258 AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
2259 if (!allocation.To(&obj)) return false;
2260 }
2261 set_null_value(Oddball::cast(obj));
2262 Oddball::cast(obj)->set_kind(Oddball::kNull);
2263
2264 {
2265 AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
2266 if (!allocation.To(&obj)) return false;
2267 }
2268 set_undefined_value(Oddball::cast(obj));
2269 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2270 DCHECK(!InNewSpace(undefined_value()));
2271 {
2272 AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
2273 if (!allocation.To(&obj)) return false;
2274 }
2275 set_the_hole_value(Oddball::cast(obj));
2276 Oddball::cast(obj)->set_kind(Oddball::kTheHole);
2277
2278 // Set preliminary exception sentinel value before actually initializing it.
2279 set_exception(null_value());
2280
2281 // Allocate the empty descriptor array.
2282 {
2283 AllocationResult allocation = AllocateEmptyFixedArray();
2284 if (!allocation.To(&obj)) return false;
2285 }
2286 set_empty_descriptor_array(DescriptorArray::cast(obj));
2287
2288 // Fix the instance_descriptors for the existing maps.
2289 FinalizePartialMap(this, meta_map());
2290 FinalizePartialMap(this, fixed_array_map());
2291 FinalizePartialMap(this, undefined_map());
2292 undefined_map()->set_is_undetectable();
2293 FinalizePartialMap(this, null_map());
2294 null_map()->set_is_undetectable();
2295 FinalizePartialMap(this, the_hole_map());
2296
2297 { // Map allocation
2298 #define ALLOCATE_MAP(instance_type, size, field_name) \
2299 { \
2300 Map* map; \
2301 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2302 set_##field_name##_map(map); \
2303 }
2304
2305 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2306 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2307
2308 #define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
2309 constructor_function_index) \
2310 { \
2311 ALLOCATE_MAP((instance_type), (size), field_name); \
2312 field_name##_map()->SetConstructorFunctionIndex( \
2313 (constructor_function_index)); \
2314 }
2315
2316 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2317 fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
2318 DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
2319
2320 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2321 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
2322 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, feedback_vector)
2323 ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
2324 Context::NUMBER_FUNCTION_INDEX)
2325 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2326 mutable_heap_number)
2327 ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
2328 Context::SYMBOL_FUNCTION_INDEX)
2329 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2330
2331 ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
2332 Context::BOOLEAN_FUNCTION_INDEX);
2333 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2334 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2335 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2336 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2337 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2338 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
2339 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
2340
2341 ALLOCATE_MAP(JS_PROMISE_CAPABILITY_TYPE, JSPromiseCapability::kSize,
2342 js_promise_capability);
2343
2344 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2345 const StringTypeTable& entry = string_type_table[i];
2346 {
2347 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2348 if (!allocation.To(&obj)) return false;
2349 }
2350 Map* map = Map::cast(obj);
2351 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2352 // Mark cons string maps as unstable, because their objects can change
2353 // maps during GC.
2354 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2355 roots_[entry.index] = map;
2356 }
2357
2358 { // Create a separate external one byte string map for native sources.
2359 AllocationResult allocation =
2360 AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
2361 ExternalOneByteString::kShortSize);
2362 if (!allocation.To(&obj)) return false;
2363 Map* map = Map::cast(obj);
2364 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2365 set_native_source_string_map(map);
2366 }
2367
2368 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2369 fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
2370 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2371 ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
2372 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2373
2374 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2375 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2376
2377 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2378 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2379
2380 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2381
2382 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2383
2384 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2385 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2386 ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
2387 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
2388 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
2389 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
2390 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2391 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2392
2393 ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
2394
2395 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2396 const StructTable& entry = struct_table[i];
2397 Map* map;
2398 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2399 roots_[entry.index] = map;
2400 }
2401
2402 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2403 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2404 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, unseeded_number_dictionary)
2405
2406 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2407 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2408 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2409 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
2410 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2411 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2412 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
2413 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2414 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
2415
2416 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2417 native_context_map()->set_dictionary_map(true);
2418 native_context_map()->set_visitor_id(
2419 StaticVisitorBase::kVisitNativeContext);
2420
2421 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2422 shared_function_info)
2423
2424 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2425 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2426 external_map()->set_is_extensible(false);
2427 #undef ALLOCATE_PRIMITIVE_MAP
2428 #undef ALLOCATE_VARSIZE_MAP
2429 #undef ALLOCATE_MAP
2430 }
2431
2432 {
2433 AllocationResult allocation = AllocateEmptyScopeInfo();
2434 if (!allocation.To(&obj)) return false;
2435 }
2436
2437 set_empty_scope_info(ScopeInfo::cast(obj));
2438 {
2439 AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2440 if (!allocation.To(&obj)) return false;
2441 }
2442 set_true_value(Oddball::cast(obj));
2443 Oddball::cast(obj)->set_kind(Oddball::kTrue);
2444
2445 {
2446 AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2447 if (!allocation.To(&obj)) return false;
2448 }
2449 set_false_value(Oddball::cast(obj));
2450 Oddball::cast(obj)->set_kind(Oddball::kFalse);
2451
2452 { // Empty arrays
2453 {
2454 ByteArray* byte_array;
2455 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2456 set_empty_byte_array(byte_array);
2457 }
2458
2459 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2460 { \
2461 FixedTypedArrayBase* obj; \
2462 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2463 return false; \
2464 set_empty_fixed_##type##_array(obj); \
2465 }
2466
2467 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2468 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2469 }
2470 DCHECK(!InNewSpace(empty_fixed_array()));
2471 return true;
2472 }
2473
AllocateHeapNumber(MutableMode mode,PretenureFlag pretenure)2474 AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
2475 PretenureFlag pretenure) {
2476 // Statically ensure that it is safe to allocate heap numbers in paged
2477 // spaces.
2478 int size = HeapNumber::kSize;
2479 STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
2480
2481 AllocationSpace space = SelectSpace(pretenure);
2482
2483 HeapObject* result = nullptr;
2484 {
2485 AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
2486 if (!allocation.To(&result)) return allocation;
2487 }
2488
2489 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2490 HeapObject::cast(result)->set_map_no_write_barrier(map);
2491 return result;
2492 }
2493
AllocateCell(Object * value)2494 AllocationResult Heap::AllocateCell(Object* value) {
2495 int size = Cell::kSize;
2496 STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
2497
2498 HeapObject* result = nullptr;
2499 {
2500 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2501 if (!allocation.To(&result)) return allocation;
2502 }
2503 result->set_map_no_write_barrier(cell_map());
2504 Cell::cast(result)->set_value(value);
2505 return result;
2506 }
2507
AllocatePropertyCell()2508 AllocationResult Heap::AllocatePropertyCell() {
2509 int size = PropertyCell::kSize;
2510 STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
2511
2512 HeapObject* result = nullptr;
2513 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2514 if (!allocation.To(&result)) return allocation;
2515
2516 result->set_map_no_write_barrier(global_property_cell_map());
2517 PropertyCell* cell = PropertyCell::cast(result);
2518 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2519 SKIP_WRITE_BARRIER);
2520 cell->set_property_details(PropertyDetails(Smi::kZero));
2521 cell->set_value(the_hole_value());
2522 return result;
2523 }
2524
2525
AllocateWeakCell(HeapObject * value)2526 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2527 int size = WeakCell::kSize;
2528 STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
2529 HeapObject* result = nullptr;
2530 {
2531 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2532 if (!allocation.To(&result)) return allocation;
2533 }
2534 result->set_map_no_write_barrier(weak_cell_map());
2535 WeakCell::cast(result)->initialize(value);
2536 WeakCell::cast(result)->clear_next(the_hole_value());
2537 return result;
2538 }
2539
2540
AllocateTransitionArray(int capacity)2541 AllocationResult Heap::AllocateTransitionArray(int capacity) {
2542 DCHECK(capacity > 0);
2543 HeapObject* raw_array = nullptr;
2544 {
2545 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2546 if (!allocation.To(&raw_array)) return allocation;
2547 }
2548 raw_array->set_map_no_write_barrier(transition_array_map());
2549 TransitionArray* array = TransitionArray::cast(raw_array);
2550 array->set_length(capacity);
2551 MemsetPointer(array->data_start(), undefined_value(), capacity);
2552 // Transition arrays are tenured. When black allocation is on we have to
2553 // add the transition array to the list of encountered_transition_arrays.
2554 if (incremental_marking()->black_allocation()) {
2555 array->set_next_link(encountered_transition_arrays(),
2556 UPDATE_WEAK_WRITE_BARRIER);
2557 set_encountered_transition_arrays(array);
2558 } else {
2559 array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
2560 }
2561 return array;
2562 }
2563
CreateApiObjects()2564 bool Heap::CreateApiObjects() {
2565 HandleScope scope(isolate());
2566 set_message_listeners(*TemplateList::New(isolate(), 2));
2567 HeapObject* obj = nullptr;
2568 {
2569 AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
2570 if (!allocation.To(&obj)) return false;
2571 }
2572 InterceptorInfo* info = InterceptorInfo::cast(obj);
2573 info->set_flags(0);
2574 set_noop_interceptor_info(info);
2575 return true;
2576 }
2577
2578
CreateJSEntryStub()2579 void Heap::CreateJSEntryStub() {
2580 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2581 set_js_entry_code(*stub.GetCode());
2582 }
2583
2584
CreateJSConstructEntryStub()2585 void Heap::CreateJSConstructEntryStub() {
2586 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2587 set_js_construct_entry_code(*stub.GetCode());
2588 }
2589
2590
CreateFixedStubs()2591 void Heap::CreateFixedStubs() {
2592 // Here we create roots for fixed stubs. They are needed at GC
2593 // for cooking and uncooking (check out frames.cc).
2594 // The eliminates the need for doing dictionary lookup in the
2595 // stub cache for these stubs.
2596 HandleScope scope(isolate());
2597
2598 // Create stubs that should be there, so we don't unexpectedly have to
2599 // create them if we need them during the creation of another stub.
2600 // Stub creation mixes raw pointers and handles in an unsafe manner so
2601 // we cannot create stubs while we are creating stubs.
2602 CodeStub::GenerateStubsAheadOfTime(isolate());
2603
2604 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2605 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2606 // is created.
2607
2608 // gcc-4.4 has problem generating correct code of following snippet:
2609 // { JSEntryStub stub;
2610 // js_entry_code_ = *stub.GetCode();
2611 // }
2612 // { JSConstructEntryStub stub;
2613 // js_construct_entry_code_ = *stub.GetCode();
2614 // }
2615 // To workaround the problem, make separate functions without inlining.
2616 Heap::CreateJSEntryStub();
2617 Heap::CreateJSConstructEntryStub();
2618 }
2619
2620
CreateInitialObjects()2621 void Heap::CreateInitialObjects() {
2622 HandleScope scope(isolate());
2623 Factory* factory = isolate()->factory();
2624
2625 // The -0 value must be set before NewNumber works.
2626 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2627 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2628
2629 set_nan_value(*factory->NewHeapNumber(
2630 std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
2631 set_hole_nan_value(
2632 *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
2633 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2634 set_minus_infinity_value(
2635 *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
2636
2637 // Allocate initial string table.
2638 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2639
2640 // Allocate
2641
2642 // Finish initializing oddballs after creating the string table.
2643 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2644 factory->nan_value(), "undefined", Oddball::kUndefined);
2645
2646 // Initialize the null_value.
2647 Oddball::Initialize(isolate(), factory->null_value(), "null",
2648 handle(Smi::kZero, isolate()), "object", Oddball::kNull);
2649
2650 // Initialize the_hole_value.
2651 Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
2652 factory->hole_nan_value(), "undefined",
2653 Oddball::kTheHole);
2654
2655 // Initialize the true_value.
2656 Oddball::Initialize(isolate(), factory->true_value(), "true",
2657 handle(Smi::FromInt(1), isolate()), "boolean",
2658 Oddball::kTrue);
2659
2660 // Initialize the false_value.
2661 Oddball::Initialize(isolate(), factory->false_value(), "false",
2662 handle(Smi::kZero, isolate()), "boolean",
2663 Oddball::kFalse);
2664
2665 set_uninitialized_value(
2666 *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
2667 handle(Smi::FromInt(-1), isolate()), "undefined",
2668 Oddball::kUninitialized));
2669
2670 set_arguments_marker(
2671 *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
2672 handle(Smi::FromInt(-4), isolate()), "undefined",
2673 Oddball::kArgumentsMarker));
2674
2675 set_no_interceptor_result_sentinel(*factory->NewOddball(
2676 factory->no_interceptor_result_sentinel_map(),
2677 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2678 "undefined", Oddball::kOther));
2679
2680 set_termination_exception(*factory->NewOddball(
2681 factory->termination_exception_map(), "termination_exception",
2682 handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
2683
2684 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2685 handle(Smi::FromInt(-5), isolate()),
2686 "undefined", Oddball::kException));
2687
2688 set_optimized_out(*factory->NewOddball(factory->optimized_out_map(),
2689 "optimized_out",
2690 handle(Smi::FromInt(-6), isolate()),
2691 "undefined", Oddball::kOptimizedOut));
2692
2693 set_stale_register(
2694 *factory->NewOddball(factory->stale_register_map(), "stale_register",
2695 handle(Smi::FromInt(-7), isolate()), "undefined",
2696 Oddball::kStaleRegister));
2697
2698 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2699 Handle<String> str =
2700 factory->InternalizeUtf8String(constant_string_table[i].contents);
2701 roots_[constant_string_table[i].index] = *str;
2702 }
2703
2704 // Create the code_stubs dictionary. The initial size is set to avoid
2705 // expanding the dictionary during bootstrapping.
2706 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2707
2708 set_instanceof_cache_function(Smi::kZero);
2709 set_instanceof_cache_map(Smi::kZero);
2710 set_instanceof_cache_answer(Smi::kZero);
2711
2712 {
2713 HandleScope scope(isolate());
2714 #define SYMBOL_INIT(name) \
2715 { \
2716 Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
2717 Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
2718 symbol->set_name(*name##d); \
2719 roots_[k##name##RootIndex] = *symbol; \
2720 }
2721 PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
2722 #undef SYMBOL_INIT
2723 }
2724
2725 {
2726 HandleScope scope(isolate());
2727 #define SYMBOL_INIT(name, description) \
2728 Handle<Symbol> name = factory->NewSymbol(); \
2729 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2730 name->set_name(*name##d); \
2731 roots_[k##name##RootIndex] = *name;
2732 PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
2733 #undef SYMBOL_INIT
2734
2735 #define SYMBOL_INIT(name, description) \
2736 Handle<Symbol> name = factory->NewSymbol(); \
2737 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2738 name->set_is_well_known_symbol(true); \
2739 name->set_name(*name##d); \
2740 roots_[k##name##RootIndex] = *name;
2741 WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
2742 #undef SYMBOL_INIT
2743 }
2744
2745 Handle<NameDictionary> empty_properties_dictionary =
2746 NameDictionary::NewEmpty(isolate(), TENURED);
2747 empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
2748 set_empty_properties_dictionary(*empty_properties_dictionary);
2749
2750 set_public_symbol_table(*empty_properties_dictionary);
2751 set_api_symbol_table(*empty_properties_dictionary);
2752 set_api_private_symbol_table(*empty_properties_dictionary);
2753
2754 set_number_string_cache(
2755 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2756
2757 // Allocate cache for single character one byte strings.
2758 set_single_character_string_cache(
2759 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2760
2761 // Allocate cache for string split and regexp-multiple.
2762 set_string_split_cache(*factory->NewFixedArray(
2763 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2764 set_regexp_multiple_cache(*factory->NewFixedArray(
2765 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2766
2767 // Allocate cache for external strings pointing to native source code.
2768 set_natives_source_cache(
2769 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2770
2771 set_experimental_natives_source_cache(
2772 *factory->NewFixedArray(ExperimentalNatives::GetBuiltinsCount()));
2773
2774 set_extra_natives_source_cache(
2775 *factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
2776
2777 set_experimental_extra_natives_source_cache(
2778 *factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
2779
2780 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2781
2782 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2783 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2784 set_microtask_queue(empty_fixed_array());
2785
2786 {
2787 Handle<FixedArray> empty_sloppy_arguments_elements =
2788 factory->NewFixedArray(2, TENURED);
2789 empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
2790 set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
2791 }
2792
2793 {
2794 Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
2795 set_empty_weak_cell(*cell);
2796 cell->clear();
2797 }
2798
2799 set_detached_contexts(empty_fixed_array());
2800 set_retained_maps(ArrayList::cast(empty_fixed_array()));
2801
2802 set_weak_object_to_code_table(
2803 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
2804 TENURED));
2805
2806 set_weak_new_space_object_to_code_list(
2807 ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
2808 weak_new_space_object_to_code_list()->SetLength(0);
2809
2810 set_code_coverage_list(undefined_value());
2811
2812 set_script_list(Smi::kZero);
2813
2814 Handle<SeededNumberDictionary> slow_element_dictionary =
2815 SeededNumberDictionary::NewEmpty(isolate(), TENURED);
2816 slow_element_dictionary->set_requires_slow_elements();
2817 set_empty_slow_element_dictionary(*slow_element_dictionary);
2818
2819 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2820
2821 // Handling of script id generation is in Heap::NextScriptId().
2822 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2823 set_next_template_serial_number(Smi::kZero);
2824
2825 // Allocate the empty script.
2826 Handle<Script> script = factory->NewScript(factory->empty_string());
2827 script->set_type(Script::TYPE_NATIVE);
2828 set_empty_script(*script);
2829
2830 Handle<PropertyCell> cell = factory->NewPropertyCell();
2831 cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
2832 set_array_protector(*cell);
2833
2834 cell = factory->NewPropertyCell();
2835 cell->set_value(the_hole_value());
2836 set_empty_property_cell(*cell);
2837
2838 cell = factory->NewPropertyCell();
2839 cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
2840 set_array_iterator_protector(*cell);
2841
2842 Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
2843 handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
2844 set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
2845
2846 Handle<Cell> species_cell = factory->NewCell(
2847 handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
2848 set_species_protector(*species_cell);
2849
2850 cell = factory->NewPropertyCell();
2851 cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
2852 set_string_length_protector(*cell);
2853
2854 Handle<Cell> fast_array_iteration_cell = factory->NewCell(
2855 handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
2856 set_fast_array_iteration_protector(*fast_array_iteration_cell);
2857
2858 cell = factory->NewPropertyCell();
2859 cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
2860 set_array_buffer_neutering_protector(*cell);
2861
2862 set_serialized_templates(empty_fixed_array());
2863 set_serialized_global_proxy_sizes(empty_fixed_array());
2864
2865 set_weak_stack_trace_list(Smi::kZero);
2866
2867 set_noscript_shared_function_infos(Smi::kZero);
2868
2869 // Initialize context slot cache.
2870 isolate_->context_slot_cache()->Clear();
2871
2872 // Initialize descriptor cache.
2873 isolate_->descriptor_lookup_cache()->Clear();
2874
2875 // Initialize compilation cache.
2876 isolate_->compilation_cache()->Clear();
2877
2878 // Finish creating JSPromiseCapabilityMap
2879 {
2880 // TODO(caitp): This initialization can be removed once PromiseCapability
2881 // object is no longer used by builtins implemented in javascript.
2882 Handle<Map> map = factory->js_promise_capability_map();
2883 map->set_inobject_properties_or_constructor_function_index(3);
2884
2885 Map::EnsureDescriptorSlack(map, 3);
2886
2887 PropertyAttributes attrs =
2888 static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
2889 { // promise
2890 Descriptor d = Descriptor::DataField(factory->promise_string(),
2891 JSPromiseCapability::kPromiseIndex,
2892 attrs, Representation::Tagged());
2893 map->AppendDescriptor(&d);
2894 }
2895
2896 { // resolve
2897 Descriptor d = Descriptor::DataField(factory->resolve_string(),
2898 JSPromiseCapability::kResolveIndex,
2899 attrs, Representation::Tagged());
2900 map->AppendDescriptor(&d);
2901 }
2902
2903 { // reject
2904 Descriptor d = Descriptor::DataField(factory->reject_string(),
2905 JSPromiseCapability::kRejectIndex,
2906 attrs, Representation::Tagged());
2907 map->AppendDescriptor(&d);
2908 }
2909
2910 map->set_is_extensible(false);
2911 set_js_promise_capability_map(*map);
2912 }
2913 }
2914
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2915 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2916 switch (root_index) {
2917 case kNumberStringCacheRootIndex:
2918 case kInstanceofCacheFunctionRootIndex:
2919 case kInstanceofCacheMapRootIndex:
2920 case kInstanceofCacheAnswerRootIndex:
2921 case kCodeStubsRootIndex:
2922 case kEmptyScriptRootIndex:
2923 case kScriptListRootIndex:
2924 case kMaterializedObjectsRootIndex:
2925 case kMicrotaskQueueRootIndex:
2926 case kDetachedContextsRootIndex:
2927 case kWeakObjectToCodeTableRootIndex:
2928 case kWeakNewSpaceObjectToCodeListRootIndex:
2929 case kRetainedMapsRootIndex:
2930 case kCodeCoverageListRootIndex:
2931 case kNoScriptSharedFunctionInfosRootIndex:
2932 case kWeakStackTraceListRootIndex:
2933 case kSerializedTemplatesRootIndex:
2934 case kSerializedGlobalProxySizesRootIndex:
2935 case kPublicSymbolTableRootIndex:
2936 case kApiSymbolTableRootIndex:
2937 case kApiPrivateSymbolTableRootIndex:
2938 // Smi values
2939 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2940 SMI_ROOT_LIST(SMI_ENTRY)
2941 #undef SMI_ENTRY
2942 // String table
2943 case kStringTableRootIndex:
2944 return true;
2945
2946 default:
2947 return false;
2948 }
2949 }
2950
RootCanBeTreatedAsConstant(RootListIndex root_index)2951 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2952 return !RootCanBeWrittenAfterInitialization(root_index) &&
2953 !InNewSpace(root(root_index));
2954 }
2955
IsUnmodifiedHeapObject(Object ** p)2956 bool Heap::IsUnmodifiedHeapObject(Object** p) {
2957 Object* object = *p;
2958 if (object->IsSmi()) return false;
2959 HeapObject* heap_object = HeapObject::cast(object);
2960 if (!object->IsJSObject()) return false;
2961 JSObject* js_object = JSObject::cast(object);
2962 if (!js_object->WasConstructedFromApiFunction()) return false;
2963 JSFunction* constructor =
2964 JSFunction::cast(js_object->map()->GetConstructor());
2965
2966 return constructor->initial_map() == heap_object->map();
2967 }
2968
FullSizeNumberStringCacheLength()2969 int Heap::FullSizeNumberStringCacheLength() {
2970 // Compute the size of the number string cache based on the max newspace size.
2971 // The number string cache has a minimum size based on twice the initial cache
2972 // size to ensure that it is bigger after being made 'full size'.
2973 size_t number_string_cache_size = max_semi_space_size_ / 512;
2974 number_string_cache_size =
2975 Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
2976 Min<size_t>(0x4000u, number_string_cache_size));
2977 // There is a string and a number per entry so the length is twice the number
2978 // of entries.
2979 return static_cast<int>(number_string_cache_size * 2);
2980 }
2981
2982
FlushNumberStringCache()2983 void Heap::FlushNumberStringCache() {
2984 // Flush the number to string cache.
2985 int len = number_string_cache()->length();
2986 for (int i = 0; i < len; i++) {
2987 number_string_cache()->set_undefined(i);
2988 }
2989 }
2990
2991
MapForFixedTypedArray(ExternalArrayType array_type)2992 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2993 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2994 }
2995
2996
RootIndexForFixedTypedArray(ExternalArrayType array_type)2997 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
2998 ExternalArrayType array_type) {
2999 switch (array_type) {
3000 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3001 case kExternal##Type##Array: \
3002 return kFixed##Type##ArrayMapRootIndex;
3003
3004 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3005 #undef ARRAY_TYPE_TO_ROOT_INDEX
3006
3007 default:
3008 UNREACHABLE();
3009 return kUndefinedValueRootIndex;
3010 }
3011 }
3012
3013
RootIndexForEmptyFixedTypedArray(ElementsKind elementsKind)3014 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3015 ElementsKind elementsKind) {
3016 switch (elementsKind) {
3017 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3018 case TYPE##_ELEMENTS: \
3019 return kEmptyFixed##Type##ArrayRootIndex;
3020
3021 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3022 #undef ELEMENT_KIND_TO_ROOT_INDEX
3023 default:
3024 UNREACHABLE();
3025 return kUndefinedValueRootIndex;
3026 }
3027 }
3028
3029
EmptyFixedTypedArrayForMap(Map * map)3030 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3031 return FixedTypedArrayBase::cast(
3032 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3033 }
3034
3035
AllocateForeign(Address address,PretenureFlag pretenure)3036 AllocationResult Heap::AllocateForeign(Address address,
3037 PretenureFlag pretenure) {
3038 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3039 STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
3040 AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
3041 Foreign* result = nullptr;
3042 AllocationResult allocation = Allocate(foreign_map(), space);
3043 if (!allocation.To(&result)) return allocation;
3044 result->set_foreign_address(address);
3045 return result;
3046 }
3047
3048
AllocateByteArray(int length,PretenureFlag pretenure)3049 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3050 if (length < 0 || length > ByteArray::kMaxLength) {
3051 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3052 }
3053 int size = ByteArray::SizeFor(length);
3054 AllocationSpace space = SelectSpace(pretenure);
3055 HeapObject* result = nullptr;
3056 {
3057 AllocationResult allocation = AllocateRaw(size, space);
3058 if (!allocation.To(&result)) return allocation;
3059 }
3060
3061 result->set_map_no_write_barrier(byte_array_map());
3062 ByteArray::cast(result)->set_length(length);
3063 return result;
3064 }
3065
3066
AllocateBytecodeArray(int length,const byte * const raw_bytecodes,int frame_size,int parameter_count,FixedArray * constant_pool)3067 AllocationResult Heap::AllocateBytecodeArray(int length,
3068 const byte* const raw_bytecodes,
3069 int frame_size,
3070 int parameter_count,
3071 FixedArray* constant_pool) {
3072 if (length < 0 || length > BytecodeArray::kMaxLength) {
3073 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3074 }
3075 // Bytecode array is pretenured, so constant pool array should be to.
3076 DCHECK(!InNewSpace(constant_pool));
3077
3078 int size = BytecodeArray::SizeFor(length);
3079 HeapObject* result = nullptr;
3080 {
3081 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3082 if (!allocation.To(&result)) return allocation;
3083 }
3084
3085 result->set_map_no_write_barrier(bytecode_array_map());
3086 BytecodeArray* instance = BytecodeArray::cast(result);
3087 instance->set_length(length);
3088 instance->set_frame_size(frame_size);
3089 instance->set_parameter_count(parameter_count);
3090 instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
3091 instance->set_osr_loop_nesting_level(0);
3092 instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
3093 instance->set_constant_pool(constant_pool);
3094 instance->set_handler_table(empty_fixed_array());
3095 instance->set_source_position_table(empty_byte_array());
3096 CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
3097
3098 return result;
3099 }
3100
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots mode)3101 HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
3102 ClearRecordedSlots mode) {
3103 if (size == 0) return nullptr;
3104 HeapObject* filler = HeapObject::FromAddress(addr);
3105 if (size == kPointerSize) {
3106 filler->set_map_no_write_barrier(
3107 reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
3108 } else if (size == 2 * kPointerSize) {
3109 filler->set_map_no_write_barrier(
3110 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
3111 } else {
3112 DCHECK_GT(size, 2 * kPointerSize);
3113 filler->set_map_no_write_barrier(
3114 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
3115 FreeSpace::cast(filler)->nobarrier_set_size(size);
3116 }
3117 if (mode == ClearRecordedSlots::kYes) {
3118 ClearRecordedSlotRange(addr, addr + size);
3119 }
3120
3121 // At this point, we may be deserializing the heap from a snapshot, and
3122 // none of the maps have been created yet and are NULL.
3123 DCHECK((filler->map() == NULL && !deserialization_complete_) ||
3124 filler->map()->IsMap());
3125 return filler;
3126 }
3127
3128
CanMoveObjectStart(HeapObject * object)3129 bool Heap::CanMoveObjectStart(HeapObject* object) {
3130 if (!FLAG_move_object_start) return false;
3131
3132 // Sampling heap profiler may have a reference to the object.
3133 if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
3134
3135 Address address = object->address();
3136
3137 if (lo_space()->Contains(object)) return false;
3138
3139 // We can move the object start if the page was already swept.
3140 return Page::FromAddress(address)->SweepingDone();
3141 }
3142
IsImmovable(HeapObject * object)3143 bool Heap::IsImmovable(HeapObject* object) {
3144 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
3145 return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
3146 }
3147
AdjustLiveBytes(HeapObject * object,int by)3148 void Heap::AdjustLiveBytes(HeapObject* object, int by) {
3149 // As long as the inspected object is black and we are currently not iterating
3150 // the heap using HeapIterator, we can update the live byte count. We cannot
3151 // update while using HeapIterator because the iterator is temporarily
3152 // marking the whole object graph, without updating live bytes.
3153 if (lo_space()->Contains(object)) {
3154 lo_space()->AdjustLiveBytes(by);
3155 } else if (!in_heap_iterator() &&
3156 !mark_compact_collector()->sweeping_in_progress() &&
3157 ObjectMarking::IsBlack(object)) {
3158 DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
3159 MemoryChunk::IncrementLiveBytes(object, by);
3160 }
3161 }
3162
3163
LeftTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3164 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3165 int elements_to_trim) {
3166 CHECK_NOT_NULL(object);
3167 DCHECK(CanMoveObjectStart(object));
3168 DCHECK(!object->IsFixedTypedArrayBase());
3169 DCHECK(!object->IsByteArray());
3170 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3171 const int bytes_to_trim = elements_to_trim * element_size;
3172 Map* map = object->map();
3173
3174 // For now this trick is only applied to objects in new and paged space.
3175 // In large object space the object's start must coincide with chunk
3176 // and thus the trick is just not applicable.
3177 DCHECK(!lo_space()->Contains(object));
3178 DCHECK(object->map() != fixed_cow_array_map());
3179
3180 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3181 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3182 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3183
3184 const int len = object->length();
3185 DCHECK(elements_to_trim <= len);
3186
3187 // Calculate location of new array start.
3188 Address old_start = object->address();
3189 Address new_start = old_start + bytes_to_trim;
3190
3191 // Transfer the mark bits to their new location if the object is not within
3192 // a black area.
3193 if (!incremental_marking()->black_allocation() ||
3194 !Marking::IsBlack(
3195 ObjectMarking::MarkBitFrom(HeapObject::FromAddress(new_start)))) {
3196 IncrementalMarking::TransferMark(this, object,
3197 HeapObject::FromAddress(new_start));
3198 }
3199
3200 // Technically in new space this write might be omitted (except for
3201 // debug mode which iterates through the heap), but to play safer
3202 // we still do it.
3203 CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
3204
3205 // Clear the mark bits of the black area that belongs now to the filler.
3206 // This is an optimization. The sweeper will release black fillers anyway.
3207 if (incremental_marking()->black_allocation() &&
3208 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object))) {
3209 Page* page = Page::FromAddress(old_start);
3210 page->markbits()->ClearRange(
3211 page->AddressToMarkbitIndex(old_start),
3212 page->AddressToMarkbitIndex(old_start + bytes_to_trim));
3213 }
3214
3215 // Initialize header of the trimmed array. Since left trimming is only
3216 // performed on pages which are not concurrently swept creating a filler
3217 // object does not require synchronization.
3218 Object** former_start = HeapObject::RawField(object, 0);
3219 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3220 former_start[new_start_index] = map;
3221 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3222
3223 FixedArrayBase* new_object =
3224 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3225
3226 // Maintain consistency of live bytes during incremental marking
3227 AdjustLiveBytes(new_object, -bytes_to_trim);
3228
3229 // Remove recorded slots for the new map and length offset.
3230 ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
3231 ClearRecordedSlot(new_object, HeapObject::RawField(
3232 new_object, FixedArrayBase::kLengthOffset));
3233
3234 // Notify the heap profiler of change in object layout.
3235 OnMoveEvent(new_object, object, new_object->Size());
3236 return new_object;
3237 }
3238
RightTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3239 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3240 const int len = object->length();
3241 DCHECK_LE(elements_to_trim, len);
3242 DCHECK_GE(elements_to_trim, 0);
3243
3244 int bytes_to_trim;
3245 if (object->IsFixedTypedArrayBase()) {
3246 InstanceType type = object->map()->instance_type();
3247 bytes_to_trim =
3248 FixedTypedArrayBase::TypedArraySize(type, len) -
3249 FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
3250 } else if (object->IsByteArray()) {
3251 int new_size = ByteArray::SizeFor(len - elements_to_trim);
3252 bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3253 DCHECK_GE(bytes_to_trim, 0);
3254 } else {
3255 const int element_size =
3256 object->IsFixedArray() ? kPointerSize : kDoubleSize;
3257 bytes_to_trim = elements_to_trim * element_size;
3258 }
3259
3260
3261 // For now this trick is only applied to objects in new and paged space.
3262 DCHECK(object->map() != fixed_cow_array_map());
3263
3264 if (bytes_to_trim == 0) {
3265 // No need to create filler and update live bytes counters, just initialize
3266 // header of the trimmed array.
3267 object->synchronized_set_length(len - elements_to_trim);
3268 return;
3269 }
3270
3271 // Calculate location of new array end.
3272 Address old_end = object->address() + object->Size();
3273 Address new_end = old_end - bytes_to_trim;
3274
3275 // Technically in new space this write might be omitted (except for
3276 // debug mode which iterates through the heap), but to play safer
3277 // we still do it.
3278 // We do not create a filler for objects in large object space.
3279 // TODO(hpayer): We should shrink the large object page if the size
3280 // of the object changed significantly.
3281 if (!lo_space()->Contains(object)) {
3282 HeapObject* filler =
3283 CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
3284 DCHECK_NOT_NULL(filler);
3285 // Clear the mark bits of the black area that belongs now to the filler.
3286 // This is an optimization. The sweeper will release black fillers anyway.
3287 if (incremental_marking()->black_allocation() &&
3288 ObjectMarking::IsBlackOrGrey(filler)) {
3289 Page* page = Page::FromAddress(new_end);
3290 page->markbits()->ClearRange(
3291 page->AddressToMarkbitIndex(new_end),
3292 page->AddressToMarkbitIndex(new_end + bytes_to_trim));
3293 }
3294 }
3295
3296 // Initialize header of the trimmed array. We are storing the new length
3297 // using release store after creating a filler for the left-over space to
3298 // avoid races with the sweeper thread.
3299 object->synchronized_set_length(len - elements_to_trim);
3300
3301 // Maintain consistency of live bytes during incremental marking
3302 AdjustLiveBytes(object, -bytes_to_trim);
3303
3304 // Notify the heap profiler of change in object layout. The array may not be
3305 // moved during GC, and size has to be adjusted nevertheless.
3306 HeapProfiler* profiler = isolate()->heap_profiler();
3307 if (profiler->is_tracking_allocations()) {
3308 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3309 }
3310 }
3311
3312
AllocateFixedTypedArrayWithExternalPointer(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)3313 AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
3314 int length, ExternalArrayType array_type, void* external_pointer,
3315 PretenureFlag pretenure) {
3316 int size = FixedTypedArrayBase::kHeaderSize;
3317 AllocationSpace space = SelectSpace(pretenure);
3318 HeapObject* result = nullptr;
3319 {
3320 AllocationResult allocation = AllocateRaw(size, space);
3321 if (!allocation.To(&result)) return allocation;
3322 }
3323
3324 result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3325 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
3326 elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
3327 elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
3328 elements->set_length(length);
3329 return elements;
3330 }
3331
ForFixedTypedArray(ExternalArrayType array_type,int * element_size,ElementsKind * element_kind)3332 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3333 ElementsKind* element_kind) {
3334 switch (array_type) {
3335 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3336 case kExternal##Type##Array: \
3337 *element_size = size; \
3338 *element_kind = TYPE##_ELEMENTS; \
3339 return;
3340
3341 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3342 #undef TYPED_ARRAY_CASE
3343
3344 default:
3345 *element_size = 0; // Bogus
3346 *element_kind = UINT8_ELEMENTS; // Bogus
3347 UNREACHABLE();
3348 }
3349 }
3350
3351
AllocateFixedTypedArray(int length,ExternalArrayType array_type,bool initialize,PretenureFlag pretenure)3352 AllocationResult Heap::AllocateFixedTypedArray(int length,
3353 ExternalArrayType array_type,
3354 bool initialize,
3355 PretenureFlag pretenure) {
3356 int element_size;
3357 ElementsKind elements_kind;
3358 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3359 int size = OBJECT_POINTER_ALIGN(length * element_size +
3360 FixedTypedArrayBase::kDataOffset);
3361 AllocationSpace space = SelectSpace(pretenure);
3362
3363 HeapObject* object = nullptr;
3364 AllocationResult allocation = AllocateRaw(
3365 size, space,
3366 array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
3367 if (!allocation.To(&object)) return allocation;
3368
3369 object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3370 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3371 elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
3372 elements->set_external_pointer(
3373 ExternalReference::fixed_typed_array_base_data_offset().address(),
3374 SKIP_WRITE_BARRIER);
3375 elements->set_length(length);
3376 if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
3377 return elements;
3378 }
3379
3380
AllocateCode(int object_size,bool immovable)3381 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3382 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3383 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3384
3385 HeapObject* result = nullptr;
3386 if (!allocation.To(&result)) return allocation;
3387 if (immovable) {
3388 Address address = result->address();
3389 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3390 // Code objects which should stay at a fixed address are allocated either
3391 // in the first page of code space (objects on the first page of each space
3392 // are never moved), in large object space, or (during snapshot creation)
3393 // the containing page is marked as immovable.
3394 if (!Heap::IsImmovable(result) &&
3395 !code_space_->FirstPage()->Contains(address)) {
3396 if (isolate()->serializer_enabled()) {
3397 chunk->MarkNeverEvacuate();
3398 } else {
3399 // Discard the first code allocation, which was on a page where it could
3400 // be moved.
3401 CreateFillerObjectAt(result->address(), object_size,
3402 ClearRecordedSlots::kNo);
3403 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3404 if (!allocation.To(&result)) return allocation;
3405 OnAllocationEvent(result, object_size);
3406 }
3407 }
3408 }
3409
3410 result->set_map_no_write_barrier(code_map());
3411 Code* code = Code::cast(result);
3412 DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
3413 DCHECK(!memory_allocator()->code_range()->valid() ||
3414 memory_allocator()->code_range()->contains(code->address()) ||
3415 object_size <= code_space()->AreaSize());
3416 code->set_gc_metadata(Smi::kZero);
3417 code->set_ic_age(global_ic_age_);
3418 return code;
3419 }
3420
3421
CopyCode(Code * code)3422 AllocationResult Heap::CopyCode(Code* code) {
3423 AllocationResult allocation;
3424
3425 HeapObject* result = nullptr;
3426 // Allocate an object the same size as the code object.
3427 int obj_size = code->Size();
3428 allocation = AllocateRaw(obj_size, CODE_SPACE);
3429 if (!allocation.To(&result)) return allocation;
3430
3431 // Copy code object.
3432 Address old_addr = code->address();
3433 Address new_addr = result->address();
3434 CopyBlock(new_addr, old_addr, obj_size);
3435 Code* new_code = Code::cast(result);
3436
3437 // Relocate the copy.
3438 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3439 DCHECK(!memory_allocator()->code_range()->valid() ||
3440 memory_allocator()->code_range()->contains(code->address()) ||
3441 obj_size <= code_space()->AreaSize());
3442 new_code->Relocate(new_addr - old_addr);
3443 // We have to iterate over the object and process its pointers when black
3444 // allocation is on.
3445 incremental_marking()->IterateBlackObject(new_code);
3446 // Record all references to embedded objects in the new code object.
3447 RecordWritesIntoCode(new_code);
3448 return new_code;
3449 }
3450
CopyBytecodeArray(BytecodeArray * bytecode_array)3451 AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
3452 int size = BytecodeArray::SizeFor(bytecode_array->length());
3453 HeapObject* result = nullptr;
3454 {
3455 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3456 if (!allocation.To(&result)) return allocation;
3457 }
3458
3459 result->set_map_no_write_barrier(bytecode_array_map());
3460 BytecodeArray* copy = BytecodeArray::cast(result);
3461 copy->set_length(bytecode_array->length());
3462 copy->set_frame_size(bytecode_array->frame_size());
3463 copy->set_parameter_count(bytecode_array->parameter_count());
3464 copy->set_constant_pool(bytecode_array->constant_pool());
3465 copy->set_handler_table(bytecode_array->handler_table());
3466 copy->set_source_position_table(bytecode_array->source_position_table());
3467 copy->set_interrupt_budget(bytecode_array->interrupt_budget());
3468 copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
3469 copy->set_bytecode_age(bytecode_array->bytecode_age());
3470 bytecode_array->CopyBytecodesTo(copy);
3471 return copy;
3472 }
3473
InitializeAllocationMemento(AllocationMemento * memento,AllocationSite * allocation_site)3474 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3475 AllocationSite* allocation_site) {
3476 memento->set_map_no_write_barrier(allocation_memento_map());
3477 DCHECK(allocation_site->map() == allocation_site_map());
3478 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3479 if (FLAG_allocation_site_pretenuring) {
3480 allocation_site->IncrementMementoCreateCount();
3481 }
3482 }
3483
3484
Allocate(Map * map,AllocationSpace space,AllocationSite * allocation_site)3485 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3486 AllocationSite* allocation_site) {
3487 DCHECK(gc_state_ == NOT_IN_GC);
3488 DCHECK(map->instance_type() != MAP_TYPE);
3489 int size = map->instance_size();
3490 if (allocation_site != NULL) {
3491 size += AllocationMemento::kSize;
3492 }
3493 HeapObject* result = nullptr;
3494 AllocationResult allocation = AllocateRaw(size, space);
3495 if (!allocation.To(&result)) return allocation;
3496 // No need for write barrier since object is white and map is in old space.
3497 result->set_map_no_write_barrier(map);
3498 if (allocation_site != NULL) {
3499 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3500 reinterpret_cast<Address>(result) + map->instance_size());
3501 InitializeAllocationMemento(alloc_memento, allocation_site);
3502 }
3503 return result;
3504 }
3505
3506
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)3507 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3508 Map* map) {
3509 obj->set_properties(properties);
3510 obj->initialize_elements();
3511 // TODO(1240798): Initialize the object's body using valid initial values
3512 // according to the object's initial map. For example, if the map's
3513 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3514 // to a number (e.g. Smi::kZero) and the elements initialized to a
3515 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3516 // verification code has to cope with (temporarily) invalid objects. See
3517 // for example, JSArray::JSArrayVerify).
3518 InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
3519 }
3520
3521
InitializeJSObjectBody(JSObject * obj,Map * map,int start_offset)3522 void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
3523 if (start_offset == map->instance_size()) return;
3524 DCHECK_LT(start_offset, map->instance_size());
3525
3526 // We cannot always fill with one_pointer_filler_map because objects
3527 // created from API functions expect their internal fields to be initialized
3528 // with undefined_value.
3529 // Pre-allocated fields need to be initialized with undefined_value as well
3530 // so that object accesses before the constructor completes (e.g. in the
3531 // debugger) will not cause a crash.
3532
3533 // In case of Array subclassing the |map| could already be transitioned
3534 // to different elements kind from the initial map on which we track slack.
3535 bool in_progress = map->IsInobjectSlackTrackingInProgress();
3536 Object* filler;
3537 if (in_progress) {
3538 filler = one_pointer_filler_map();
3539 } else {
3540 filler = undefined_value();
3541 }
3542 obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
3543 if (in_progress) {
3544 map->FindRootMap()->InobjectSlackTrackingStep();
3545 }
3546 }
3547
3548
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure,AllocationSite * allocation_site)3549 AllocationResult Heap::AllocateJSObjectFromMap(
3550 Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
3551 // JSFunctions should be allocated using AllocateFunction to be
3552 // properly initialized.
3553 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3554
3555 // Both types of global objects should be allocated using
3556 // AllocateGlobalObject to be properly initialized.
3557 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3558
3559 // Allocate the backing storage for the properties.
3560 FixedArray* properties = empty_fixed_array();
3561
3562 // Allocate the JSObject.
3563 AllocationSpace space = SelectSpace(pretenure);
3564 JSObject* js_obj = nullptr;
3565 AllocationResult allocation = Allocate(map, space, allocation_site);
3566 if (!allocation.To(&js_obj)) return allocation;
3567
3568 // Initialize the JSObject.
3569 InitializeJSObjectFromMap(js_obj, properties, map);
3570 DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
3571 js_obj->HasFastStringWrapperElements() ||
3572 js_obj->HasFastArgumentsElements());
3573 return js_obj;
3574 }
3575
3576
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure,AllocationSite * allocation_site)3577 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3578 PretenureFlag pretenure,
3579 AllocationSite* allocation_site) {
3580 DCHECK(constructor->has_initial_map());
3581
3582 // Allocate the object based on the constructors initial map.
3583 AllocationResult allocation = AllocateJSObjectFromMap(
3584 constructor->initial_map(), pretenure, allocation_site);
3585 #ifdef DEBUG
3586 // Make sure result is NOT a global object if valid.
3587 HeapObject* obj = nullptr;
3588 DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
3589 #endif
3590 return allocation;
3591 }
3592
3593
CopyJSObject(JSObject * source,AllocationSite * site)3594 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3595 // Make the clone.
3596 Map* map = source->map();
3597
3598 // We can only clone regexps, normal objects, api objects, errors or arrays.
3599 // Copying anything else will break invariants.
3600 CHECK(map->instance_type() == JS_REGEXP_TYPE ||
3601 map->instance_type() == JS_OBJECT_TYPE ||
3602 map->instance_type() == JS_ERROR_TYPE ||
3603 map->instance_type() == JS_ARRAY_TYPE ||
3604 map->instance_type() == JS_API_OBJECT_TYPE ||
3605 map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
3606
3607 int object_size = map->instance_size();
3608 HeapObject* clone = nullptr;
3609
3610 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3611
3612 int adjusted_object_size =
3613 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3614 AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
3615 if (!allocation.To(&clone)) return allocation;
3616
3617 SLOW_DCHECK(InNewSpace(clone));
3618 // Since we know the clone is allocated in new space, we can copy
3619 // the contents without worrying about updating the write barrier.
3620 CopyBlock(clone->address(), source->address(), object_size);
3621
3622 if (site != NULL) {
3623 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3624 reinterpret_cast<Address>(clone) + object_size);
3625 InitializeAllocationMemento(alloc_memento, site);
3626 }
3627
3628 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3629 source->GetElementsKind());
3630 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3631 FixedArray* properties = FixedArray::cast(source->properties());
3632 // Update elements if necessary.
3633 if (elements->length() > 0) {
3634 FixedArrayBase* elem = nullptr;
3635 {
3636 AllocationResult allocation;
3637 if (elements->map() == fixed_cow_array_map()) {
3638 allocation = FixedArray::cast(elements);
3639 } else if (source->HasFastDoubleElements()) {
3640 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3641 } else {
3642 allocation = CopyFixedArray(FixedArray::cast(elements));
3643 }
3644 if (!allocation.To(&elem)) return allocation;
3645 }
3646 JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
3647 }
3648 // Update properties if necessary.
3649 if (properties->length() > 0) {
3650 FixedArray* prop = nullptr;
3651 {
3652 AllocationResult allocation = CopyFixedArray(properties);
3653 if (!allocation.To(&prop)) return allocation;
3654 }
3655 JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
3656 }
3657 // Return the new clone.
3658 return clone;
3659 }
3660
3661
WriteOneByteData(Vector<const char> vector,uint8_t * chars,int len)3662 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3663 int len) {
3664 // Only works for one byte strings.
3665 DCHECK(vector.length() == len);
3666 MemCopy(chars, vector.start(), len);
3667 }
3668
WriteTwoByteData(Vector<const char> vector,uint16_t * chars,int len)3669 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3670 int len) {
3671 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3672 size_t stream_length = vector.length();
3673 while (stream_length != 0) {
3674 size_t consumed = 0;
3675 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3676 DCHECK(c != unibrow::Utf8::kBadChar);
3677 DCHECK(consumed <= stream_length);
3678 stream_length -= consumed;
3679 stream += consumed;
3680 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3681 len -= 2;
3682 if (len < 0) break;
3683 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3684 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3685 } else {
3686 len -= 1;
3687 if (len < 0) break;
3688 *chars++ = c;
3689 }
3690 }
3691 DCHECK(stream_length == 0);
3692 DCHECK(len == 0);
3693 }
3694
3695
WriteOneByteData(String * s,uint8_t * chars,int len)3696 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3697 DCHECK(s->length() == len);
3698 String::WriteToFlat(s, chars, 0, len);
3699 }
3700
3701
WriteTwoByteData(String * s,uint16_t * chars,int len)3702 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3703 DCHECK(s->length() == len);
3704 String::WriteToFlat(s, chars, 0, len);
3705 }
3706
3707
3708 template <bool is_one_byte, typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)3709 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3710 uint32_t hash_field) {
3711 DCHECK(chars >= 0);
3712 // Compute map and object size.
3713 int size;
3714 Map* map;
3715
3716 DCHECK_LE(0, chars);
3717 DCHECK_GE(String::kMaxLength, chars);
3718 if (is_one_byte) {
3719 map = one_byte_internalized_string_map();
3720 size = SeqOneByteString::SizeFor(chars);
3721 } else {
3722 map = internalized_string_map();
3723 size = SeqTwoByteString::SizeFor(chars);
3724 }
3725
3726 // Allocate string.
3727 HeapObject* result = nullptr;
3728 {
3729 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3730 if (!allocation.To(&result)) return allocation;
3731 }
3732
3733 result->set_map_no_write_barrier(map);
3734 // Set length and hash fields of the allocated string.
3735 String* answer = String::cast(result);
3736 answer->set_length(chars);
3737 answer->set_hash_field(hash_field);
3738
3739 DCHECK_EQ(size, answer->Size());
3740
3741 if (is_one_byte) {
3742 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3743 } else {
3744 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3745 }
3746 return answer;
3747 }
3748
3749
3750 // Need explicit instantiations.
3751 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3752 int,
3753 uint32_t);
3754 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3755 int,
3756 uint32_t);
3757 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3758 Vector<const char>, int, uint32_t);
3759
3760
AllocateRawOneByteString(int length,PretenureFlag pretenure)3761 AllocationResult Heap::AllocateRawOneByteString(int length,
3762 PretenureFlag pretenure) {
3763 DCHECK_LE(0, length);
3764 DCHECK_GE(String::kMaxLength, length);
3765 int size = SeqOneByteString::SizeFor(length);
3766 DCHECK(size <= SeqOneByteString::kMaxSize);
3767 AllocationSpace space = SelectSpace(pretenure);
3768
3769 HeapObject* result = nullptr;
3770 {
3771 AllocationResult allocation = AllocateRaw(size, space);
3772 if (!allocation.To(&result)) return allocation;
3773 }
3774
3775 // Partially initialize the object.
3776 result->set_map_no_write_barrier(one_byte_string_map());
3777 String::cast(result)->set_length(length);
3778 String::cast(result)->set_hash_field(String::kEmptyHashField);
3779 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3780
3781 return result;
3782 }
3783
3784
AllocateRawTwoByteString(int length,PretenureFlag pretenure)3785 AllocationResult Heap::AllocateRawTwoByteString(int length,
3786 PretenureFlag pretenure) {
3787 DCHECK_LE(0, length);
3788 DCHECK_GE(String::kMaxLength, length);
3789 int size = SeqTwoByteString::SizeFor(length);
3790 DCHECK(size <= SeqTwoByteString::kMaxSize);
3791 AllocationSpace space = SelectSpace(pretenure);
3792
3793 HeapObject* result = nullptr;
3794 {
3795 AllocationResult allocation = AllocateRaw(size, space);
3796 if (!allocation.To(&result)) return allocation;
3797 }
3798
3799 // Partially initialize the object.
3800 result->set_map_no_write_barrier(string_map());
3801 String::cast(result)->set_length(length);
3802 String::cast(result)->set_hash_field(String::kEmptyHashField);
3803 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3804 return result;
3805 }
3806
3807
AllocateEmptyFixedArray()3808 AllocationResult Heap::AllocateEmptyFixedArray() {
3809 int size = FixedArray::SizeFor(0);
3810 HeapObject* result = nullptr;
3811 {
3812 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3813 if (!allocation.To(&result)) return allocation;
3814 }
3815 // Initialize the object.
3816 result->set_map_no_write_barrier(fixed_array_map());
3817 FixedArray::cast(result)->set_length(0);
3818 return result;
3819 }
3820
AllocateEmptyScopeInfo()3821 AllocationResult Heap::AllocateEmptyScopeInfo() {
3822 int size = FixedArray::SizeFor(0);
3823 HeapObject* result = nullptr;
3824 {
3825 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3826 if (!allocation.To(&result)) return allocation;
3827 }
3828 // Initialize the object.
3829 result->set_map_no_write_barrier(scope_info_map());
3830 FixedArray::cast(result)->set_length(0);
3831 return result;
3832 }
3833
CopyAndTenureFixedCOWArray(FixedArray * src)3834 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3835 if (!InNewSpace(src)) {
3836 return src;
3837 }
3838
3839 int len = src->length();
3840 HeapObject* obj = nullptr;
3841 {
3842 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3843 if (!allocation.To(&obj)) return allocation;
3844 }
3845 obj->set_map_no_write_barrier(fixed_array_map());
3846 FixedArray* result = FixedArray::cast(obj);
3847 result->set_length(len);
3848
3849 // Copy the content.
3850 DisallowHeapAllocation no_gc;
3851 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3852 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3853
3854 // TODO(mvstanton): The map is set twice because of protection against calling
3855 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3856 // we might then be able to remove this whole method.
3857 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3858 return result;
3859 }
3860
3861
AllocateEmptyFixedTypedArray(ExternalArrayType array_type)3862 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3863 ExternalArrayType array_type) {
3864 return AllocateFixedTypedArray(0, array_type, false, TENURED);
3865 }
3866
3867
CopyFixedArrayAndGrow(FixedArray * src,int grow_by,PretenureFlag pretenure)3868 AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
3869 PretenureFlag pretenure) {
3870 int old_len = src->length();
3871 int new_len = old_len + grow_by;
3872 DCHECK(new_len >= old_len);
3873 HeapObject* obj = nullptr;
3874 {
3875 AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3876 if (!allocation.To(&obj)) return allocation;
3877 }
3878
3879 obj->set_map_no_write_barrier(fixed_array_map());
3880 FixedArray* result = FixedArray::cast(obj);
3881 result->set_length(new_len);
3882
3883 // Copy the content.
3884 DisallowHeapAllocation no_gc;
3885 WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
3886 for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
3887 MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
3888 return result;
3889 }
3890
CopyFixedArrayUpTo(FixedArray * src,int new_len,PretenureFlag pretenure)3891 AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
3892 PretenureFlag pretenure) {
3893 if (new_len == 0) return empty_fixed_array();
3894
3895 DCHECK_LE(new_len, src->length());
3896
3897 HeapObject* obj = nullptr;
3898 {
3899 AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3900 if (!allocation.To(&obj)) return allocation;
3901 }
3902 obj->set_map_no_write_barrier(fixed_array_map());
3903
3904 FixedArray* result = FixedArray::cast(obj);
3905 result->set_length(new_len);
3906
3907 // Copy the content.
3908 DisallowHeapAllocation no_gc;
3909 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3910 for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
3911 return result;
3912 }
3913
CopyFixedArrayWithMap(FixedArray * src,Map * map)3914 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3915 int len = src->length();
3916 HeapObject* obj = nullptr;
3917 {
3918 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3919 if (!allocation.To(&obj)) return allocation;
3920 }
3921 obj->set_map_no_write_barrier(map);
3922
3923 FixedArray* result = FixedArray::cast(obj);
3924 DisallowHeapAllocation no_gc;
3925 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3926
3927 // Eliminate the write barrier if possible.
3928 if (mode == SKIP_WRITE_BARRIER) {
3929 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3930 FixedArray::SizeFor(len) - kPointerSize);
3931 return obj;
3932 }
3933
3934 // Slow case: Just copy the content one-by-one.
3935 result->set_length(len);
3936 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3937 return result;
3938 }
3939
3940
CopyFixedDoubleArrayWithMap(FixedDoubleArray * src,Map * map)3941 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3942 Map* map) {
3943 int len = src->length();
3944 HeapObject* obj = nullptr;
3945 {
3946 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3947 if (!allocation.To(&obj)) return allocation;
3948 }
3949 obj->set_map_no_write_barrier(map);
3950 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
3951 src->address() + FixedDoubleArray::kLengthOffset,
3952 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3953 return obj;
3954 }
3955
3956
AllocateRawFixedArray(int length,PretenureFlag pretenure)3957 AllocationResult Heap::AllocateRawFixedArray(int length,
3958 PretenureFlag pretenure) {
3959 if (length < 0 || length > FixedArray::kMaxLength) {
3960 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3961 }
3962 int size = FixedArray::SizeFor(length);
3963 AllocationSpace space = SelectSpace(pretenure);
3964
3965 AllocationResult result = AllocateRaw(size, space);
3966 if (!result.IsRetry() && size > kMaxRegularHeapObjectSize &&
3967 FLAG_use_marking_progress_bar) {
3968 MemoryChunk* chunk =
3969 MemoryChunk::FromAddress(result.ToObjectChecked()->address());
3970 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
3971 }
3972 return result;
3973 }
3974
3975
AllocateFixedArrayWithFiller(int length,PretenureFlag pretenure,Object * filler)3976 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
3977 PretenureFlag pretenure,
3978 Object* filler) {
3979 DCHECK(length >= 0);
3980 DCHECK(empty_fixed_array()->IsFixedArray());
3981 if (length == 0) return empty_fixed_array();
3982
3983 DCHECK(!InNewSpace(filler));
3984 HeapObject* result = nullptr;
3985 {
3986 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3987 if (!allocation.To(&result)) return allocation;
3988 }
3989
3990 result->set_map_no_write_barrier(fixed_array_map());
3991 FixedArray* array = FixedArray::cast(result);
3992 array->set_length(length);
3993 MemsetPointer(array->data_start(), filler, length);
3994 return array;
3995 }
3996
3997
AllocateFixedArray(int length,PretenureFlag pretenure)3998 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3999 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4000 }
4001
4002
AllocateUninitializedFixedArray(int length)4003 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4004 if (length == 0) return empty_fixed_array();
4005
4006 HeapObject* obj = nullptr;
4007 {
4008 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4009 if (!allocation.To(&obj)) return allocation;
4010 }
4011
4012 obj->set_map_no_write_barrier(fixed_array_map());
4013 FixedArray::cast(obj)->set_length(length);
4014 return obj;
4015 }
4016
4017
AllocateUninitializedFixedDoubleArray(int length,PretenureFlag pretenure)4018 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4019 int length, PretenureFlag pretenure) {
4020 if (length == 0) return empty_fixed_array();
4021
4022 HeapObject* elements = nullptr;
4023 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4024 if (!allocation.To(&elements)) return allocation;
4025
4026 elements->set_map_no_write_barrier(fixed_double_array_map());
4027 FixedDoubleArray::cast(elements)->set_length(length);
4028 return elements;
4029 }
4030
4031
AllocateRawFixedDoubleArray(int length,PretenureFlag pretenure)4032 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4033 PretenureFlag pretenure) {
4034 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4035 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4036 }
4037 int size = FixedDoubleArray::SizeFor(length);
4038 AllocationSpace space = SelectSpace(pretenure);
4039
4040 HeapObject* object = nullptr;
4041 {
4042 AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
4043 if (!allocation.To(&object)) return allocation;
4044 }
4045
4046 return object;
4047 }
4048
4049
AllocateSymbol()4050 AllocationResult Heap::AllocateSymbol() {
4051 // Statically ensure that it is safe to allocate symbols in paged spaces.
4052 STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
4053
4054 HeapObject* result = nullptr;
4055 AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
4056 if (!allocation.To(&result)) return allocation;
4057
4058 result->set_map_no_write_barrier(symbol_map());
4059
4060 // Generate a random hash value.
4061 int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
4062
4063 Symbol::cast(result)
4064 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4065 Symbol::cast(result)->set_name(undefined_value());
4066 Symbol::cast(result)->set_flags(0);
4067
4068 DCHECK(!Symbol::cast(result)->is_private());
4069 return result;
4070 }
4071
4072
AllocateStruct(InstanceType type)4073 AllocationResult Heap::AllocateStruct(InstanceType type) {
4074 Map* map;
4075 switch (type) {
4076 #define MAKE_CASE(NAME, Name, name) \
4077 case NAME##_TYPE: \
4078 map = name##_map(); \
4079 break;
4080 STRUCT_LIST(MAKE_CASE)
4081 #undef MAKE_CASE
4082 default:
4083 UNREACHABLE();
4084 return exception();
4085 }
4086 int size = map->instance_size();
4087 Struct* result = nullptr;
4088 {
4089 AllocationResult allocation = Allocate(map, OLD_SPACE);
4090 if (!allocation.To(&result)) return allocation;
4091 }
4092 result->InitializeBody(size);
4093 return result;
4094 }
4095
4096
MakeHeapIterable()4097 void Heap::MakeHeapIterable() {
4098 mark_compact_collector()->EnsureSweepingCompleted();
4099 }
4100
4101
ComputeMutatorUtilization(double mutator_speed,double gc_speed)4102 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4103 const double kMinMutatorUtilization = 0.0;
4104 const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
4105 if (mutator_speed == 0) return kMinMutatorUtilization;
4106 if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
4107 // Derivation:
4108 // mutator_utilization = mutator_time / (mutator_time + gc_time)
4109 // mutator_time = 1 / mutator_speed
4110 // gc_time = 1 / gc_speed
4111 // mutator_utilization = (1 / mutator_speed) /
4112 // (1 / mutator_speed + 1 / gc_speed)
4113 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
4114 return gc_speed / (mutator_speed + gc_speed);
4115 }
4116
4117
YoungGenerationMutatorUtilization()4118 double Heap::YoungGenerationMutatorUtilization() {
4119 double mutator_speed = static_cast<double>(
4120 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4121 double gc_speed =
4122 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
4123 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4124 if (FLAG_trace_mutator_utilization) {
4125 isolate()->PrintWithTimestamp(
4126 "Young generation mutator utilization = %.3f ("
4127 "mutator_speed=%.f, gc_speed=%.f)\n",
4128 result, mutator_speed, gc_speed);
4129 }
4130 return result;
4131 }
4132
4133
OldGenerationMutatorUtilization()4134 double Heap::OldGenerationMutatorUtilization() {
4135 double mutator_speed = static_cast<double>(
4136 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
4137 double gc_speed = static_cast<double>(
4138 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
4139 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4140 if (FLAG_trace_mutator_utilization) {
4141 isolate()->PrintWithTimestamp(
4142 "Old generation mutator utilization = %.3f ("
4143 "mutator_speed=%.f, gc_speed=%.f)\n",
4144 result, mutator_speed, gc_speed);
4145 }
4146 return result;
4147 }
4148
4149
HasLowYoungGenerationAllocationRate()4150 bool Heap::HasLowYoungGenerationAllocationRate() {
4151 const double high_mutator_utilization = 0.993;
4152 return YoungGenerationMutatorUtilization() > high_mutator_utilization;
4153 }
4154
4155
HasLowOldGenerationAllocationRate()4156 bool Heap::HasLowOldGenerationAllocationRate() {
4157 const double high_mutator_utilization = 0.993;
4158 return OldGenerationMutatorUtilization() > high_mutator_utilization;
4159 }
4160
4161
HasLowAllocationRate()4162 bool Heap::HasLowAllocationRate() {
4163 return HasLowYoungGenerationAllocationRate() &&
4164 HasLowOldGenerationAllocationRate();
4165 }
4166
4167
HasHighFragmentation()4168 bool Heap::HasHighFragmentation() {
4169 size_t used = PromotedSpaceSizeOfObjects();
4170 size_t committed = CommittedOldGenerationMemory();
4171 return HasHighFragmentation(used, committed);
4172 }
4173
HasHighFragmentation(size_t used,size_t committed)4174 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
4175 const size_t kSlack = 16 * MB;
4176 // Fragmentation is high if committed > 2 * used + kSlack.
4177 // Rewrite the exression to avoid overflow.
4178 DCHECK_GE(committed, used);
4179 return committed - used > used + kSlack;
4180 }
4181
ShouldOptimizeForMemoryUsage()4182 bool Heap::ShouldOptimizeForMemoryUsage() {
4183 return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
4184 HighMemoryPressure() || IsLowMemoryDevice();
4185 }
4186
ActivateMemoryReducerIfNeeded()4187 void Heap::ActivateMemoryReducerIfNeeded() {
4188 // Activate memory reducer when switching to background if
4189 // - there was no mark compact since the start.
4190 // - the committed memory can be potentially reduced.
4191 // 2 pages for the old, code, and map space + 1 page for new space.
4192 const int kMinCommittedMemory = 7 * Page::kPageSize;
4193 if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
4194 isolate()->IsIsolateInBackground()) {
4195 MemoryReducer::Event event;
4196 event.type = MemoryReducer::kPossibleGarbage;
4197 event.time_ms = MonotonicallyIncreasingTimeInMs();
4198 memory_reducer_->NotifyPossibleGarbage(event);
4199 }
4200 }
4201
ReduceNewSpaceSize()4202 void Heap::ReduceNewSpaceSize() {
4203 // TODO(ulan): Unify this constant with the similar constant in
4204 // GCIdleTimeHandler once the change is merged to 4.5.
4205 static const size_t kLowAllocationThroughput = 1000;
4206 const double allocation_throughput =
4207 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
4208
4209 if (FLAG_predictable) return;
4210
4211 if (ShouldReduceMemory() ||
4212 ((allocation_throughput != 0) &&
4213 (allocation_throughput < kLowAllocationThroughput))) {
4214 new_space_->Shrink();
4215 UncommitFromSpace();
4216 }
4217 }
4218
FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason)4219 void Heap::FinalizeIncrementalMarkingIfComplete(
4220 GarbageCollectionReason gc_reason) {
4221 if (incremental_marking()->IsMarking() &&
4222 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4223 (!incremental_marking()->finalize_marking_completed() &&
4224 mark_compact_collector()->marking_deque()->IsEmpty() &&
4225 local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
4226 FinalizeIncrementalMarking(gc_reason);
4227 } else if (incremental_marking()->IsComplete() ||
4228 (mark_compact_collector()->marking_deque()->IsEmpty() &&
4229 local_embedder_heap_tracer()
4230 ->ShouldFinalizeIncrementalMarking())) {
4231 CollectAllGarbage(current_gc_flags_, gc_reason);
4232 }
4233 }
4234
TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,GarbageCollectionReason gc_reason)4235 bool Heap::TryFinalizeIdleIncrementalMarking(
4236 double idle_time_in_ms, GarbageCollectionReason gc_reason) {
4237 size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
4238 double final_incremental_mark_compact_speed_in_bytes_per_ms =
4239 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
4240 if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4241 (!incremental_marking()->finalize_marking_completed() &&
4242 mark_compact_collector()->marking_deque()->IsEmpty() &&
4243 local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking() &&
4244 gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
4245 idle_time_in_ms))) {
4246 FinalizeIncrementalMarking(gc_reason);
4247 return true;
4248 } else if (incremental_marking()->IsComplete() ||
4249 (mark_compact_collector()->marking_deque()->IsEmpty() &&
4250 local_embedder_heap_tracer()
4251 ->ShouldFinalizeIncrementalMarking() &&
4252 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
4253 idle_time_in_ms, size_of_objects,
4254 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4255 CollectAllGarbage(current_gc_flags_, gc_reason);
4256 return true;
4257 }
4258 return false;
4259 }
4260
RegisterReservationsForBlackAllocation(Reservation * reservations)4261 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
4262 // TODO(hpayer): We do not have to iterate reservations on black objects
4263 // for marking. We just have to execute the special visiting side effect
4264 // code that adds objects to global data structures, e.g. for array buffers.
4265
4266 if (incremental_marking()->black_allocation()) {
4267 // Iterate black objects in old space, code space, map space, and large
4268 // object space for side effects.
4269 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4270 const Heap::Reservation& res = reservations[i];
4271 for (auto& chunk : res) {
4272 Address addr = chunk.start;
4273 while (addr < chunk.end) {
4274 HeapObject* obj = HeapObject::FromAddress(addr);
4275 // There might be grey objects due to black to grey transitions in
4276 // incremental marking. E.g. see VisitNativeContextIncremental.
4277 DCHECK(ObjectMarking::IsBlackOrGrey(obj));
4278 if (ObjectMarking::IsBlack(obj)) {
4279 incremental_marking()->IterateBlackObject(obj);
4280 }
4281 addr += obj->Size();
4282 }
4283 }
4284 }
4285 }
4286 }
4287
NotifyObjectLayoutChange(HeapObject * object,const DisallowHeapAllocation &)4288 void Heap::NotifyObjectLayoutChange(HeapObject* object,
4289 const DisallowHeapAllocation&) {
4290 if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
4291 incremental_marking()->MarkGrey(this, object);
4292 }
4293 #ifdef VERIFY_HEAP
4294 DCHECK(pending_layout_change_object_ == nullptr);
4295 pending_layout_change_object_ = object;
4296 #endif
4297 }
4298
4299 #ifdef VERIFY_HEAP
VerifyObjectLayoutChange(HeapObject * object,Map * new_map)4300 void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
4301 if (pending_layout_change_object_ == nullptr) {
4302 DCHECK(!object->IsJSObject() ||
4303 !object->map()->TransitionRequiresSynchronizationWithGC(new_map));
4304 } else {
4305 DCHECK_EQ(pending_layout_change_object_, object);
4306 pending_layout_change_object_ = nullptr;
4307 }
4308 }
4309 #endif
4310
ComputeHeapState()4311 GCIdleTimeHeapState Heap::ComputeHeapState() {
4312 GCIdleTimeHeapState heap_state;
4313 heap_state.contexts_disposed = contexts_disposed_;
4314 heap_state.contexts_disposal_rate =
4315 tracer()->ContextDisposalRateInMilliseconds();
4316 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4317 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4318 return heap_state;
4319 }
4320
4321
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)4322 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4323 GCIdleTimeHeapState heap_state,
4324 double deadline_in_ms) {
4325 bool result = false;
4326 switch (action.type) {
4327 case DONE:
4328 result = true;
4329 break;
4330 case DO_INCREMENTAL_STEP: {
4331 const double remaining_idle_time_in_ms =
4332 incremental_marking()->AdvanceIncrementalMarking(
4333 deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4334 IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
4335 if (remaining_idle_time_in_ms > 0.0) {
4336 TryFinalizeIdleIncrementalMarking(
4337 remaining_idle_time_in_ms,
4338 GarbageCollectionReason::kFinalizeMarkingViaTask);
4339 }
4340 result = incremental_marking()->IsStopped();
4341 break;
4342 }
4343 case DO_FULL_GC: {
4344 DCHECK(contexts_disposed_ > 0);
4345 HistogramTimerScope scope(isolate_->counters()->gc_context());
4346 TRACE_EVENT0("v8", "V8.GCContext");
4347 CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
4348 break;
4349 }
4350 case DO_NOTHING:
4351 break;
4352 }
4353
4354 return result;
4355 }
4356
4357
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)4358 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
4359 GCIdleTimeHeapState heap_state,
4360 double start_ms, double deadline_in_ms) {
4361 double idle_time_in_ms = deadline_in_ms - start_ms;
4362 double current_time = MonotonicallyIncreasingTimeInMs();
4363 last_idle_notification_time_ = current_time;
4364 double deadline_difference = deadline_in_ms - current_time;
4365
4366 contexts_disposed_ = 0;
4367
4368 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4369 static_cast<int>(idle_time_in_ms));
4370
4371 if (deadline_in_ms - start_ms >
4372 GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4373 int committed_memory = static_cast<int>(CommittedMemory() / KB);
4374 int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
4375 isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
4376 start_ms, committed_memory);
4377 isolate()->counters()->aggregated_memory_heap_used()->AddSample(
4378 start_ms, used_memory);
4379 }
4380
4381 if (deadline_difference >= 0) {
4382 if (action.type != DONE && action.type != DO_NOTHING) {
4383 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4384 static_cast<int>(deadline_difference));
4385 }
4386 } else {
4387 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4388 static_cast<int>(-deadline_difference));
4389 }
4390
4391 if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4392 FLAG_trace_idle_notification_verbose) {
4393 isolate_->PrintWithTimestamp(
4394 "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4395 "ms, deadline usage %.2f ms [",
4396 idle_time_in_ms, idle_time_in_ms - deadline_difference,
4397 deadline_difference);
4398 action.Print();
4399 PrintF("]");
4400 if (FLAG_trace_idle_notification_verbose) {
4401 PrintF("[");
4402 heap_state.Print();
4403 PrintF("]");
4404 }
4405 PrintF("\n");
4406 }
4407 }
4408
4409
MonotonicallyIncreasingTimeInMs()4410 double Heap::MonotonicallyIncreasingTimeInMs() {
4411 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4412 static_cast<double>(base::Time::kMillisecondsPerSecond);
4413 }
4414
4415
IdleNotification(int idle_time_in_ms)4416 bool Heap::IdleNotification(int idle_time_in_ms) {
4417 return IdleNotification(
4418 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4419 (static_cast<double>(idle_time_in_ms) /
4420 static_cast<double>(base::Time::kMillisecondsPerSecond)));
4421 }
4422
4423
IdleNotification(double deadline_in_seconds)4424 bool Heap::IdleNotification(double deadline_in_seconds) {
4425 CHECK(HasBeenSetUp());
4426 double deadline_in_ms =
4427 deadline_in_seconds *
4428 static_cast<double>(base::Time::kMillisecondsPerSecond);
4429 HistogramTimerScope idle_notification_scope(
4430 isolate_->counters()->gc_idle_notification());
4431 TRACE_EVENT0("v8", "V8.GCIdleNotification");
4432 double start_ms = MonotonicallyIncreasingTimeInMs();
4433 double idle_time_in_ms = deadline_in_ms - start_ms;
4434
4435 tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4436 OldGenerationAllocationCounter());
4437
4438 GCIdleTimeHeapState heap_state = ComputeHeapState();
4439
4440 GCIdleTimeAction action =
4441 gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4442
4443 bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4444
4445 IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
4446 return result;
4447 }
4448
4449
RecentIdleNotificationHappened()4450 bool Heap::RecentIdleNotificationHappened() {
4451 return (last_idle_notification_time_ +
4452 GCIdleTimeHandler::kMaxScheduledIdleTime) >
4453 MonotonicallyIncreasingTimeInMs();
4454 }
4455
4456 class MemoryPressureInterruptTask : public CancelableTask {
4457 public:
MemoryPressureInterruptTask(Heap * heap)4458 explicit MemoryPressureInterruptTask(Heap* heap)
4459 : CancelableTask(heap->isolate()), heap_(heap) {}
4460
~MemoryPressureInterruptTask()4461 virtual ~MemoryPressureInterruptTask() {}
4462
4463 private:
4464 // v8::internal::CancelableTask overrides.
RunInternal()4465 void RunInternal() override { heap_->CheckMemoryPressure(); }
4466
4467 Heap* heap_;
4468 DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
4469 };
4470
CheckMemoryPressure()4471 void Heap::CheckMemoryPressure() {
4472 if (HighMemoryPressure()) {
4473 if (isolate()->concurrent_recompilation_enabled()) {
4474 // The optimizing compiler may be unnecessarily holding on to memory.
4475 DisallowHeapAllocation no_recursive_gc;
4476 isolate()->optimizing_compile_dispatcher()->Flush(
4477 OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
4478 }
4479 }
4480 if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
4481 CollectGarbageOnMemoryPressure();
4482 } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
4483 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4484 StartIncrementalMarking(kReduceMemoryFootprintMask,
4485 GarbageCollectionReason::kMemoryPressure);
4486 }
4487 }
4488 MemoryReducer::Event event;
4489 event.type = MemoryReducer::kPossibleGarbage;
4490 event.time_ms = MonotonicallyIncreasingTimeInMs();
4491 memory_reducer_->NotifyPossibleGarbage(event);
4492 }
4493
CollectGarbageOnMemoryPressure()4494 void Heap::CollectGarbageOnMemoryPressure() {
4495 const int kGarbageThresholdInBytes = 8 * MB;
4496 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4497 // This constant is the maximum response time in RAIL performance model.
4498 const double kMaxMemoryPressurePauseMs = 100;
4499
4500 double start = MonotonicallyIncreasingTimeInMs();
4501 CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4502 GarbageCollectionReason::kMemoryPressure,
4503 kGCCallbackFlagCollectAllAvailableGarbage);
4504 double end = MonotonicallyIncreasingTimeInMs();
4505
4506 // Estimate how much memory we can free.
4507 int64_t potential_garbage =
4508 (CommittedMemory() - SizeOfObjects()) + external_memory_;
4509 // If we can potentially free large amount of memory, then start GC right
4510 // away instead of waiting for memory reducer.
4511 if (potential_garbage >= kGarbageThresholdInBytes &&
4512 potential_garbage >=
4513 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4514 // If we spent less than half of the time budget, then perform full GC
4515 // Otherwise, start incremental marking.
4516 if (end - start < kMaxMemoryPressurePauseMs / 2) {
4517 CollectAllGarbage(
4518 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4519 GarbageCollectionReason::kMemoryPressure,
4520 kGCCallbackFlagCollectAllAvailableGarbage);
4521 } else {
4522 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4523 StartIncrementalMarking(kReduceMemoryFootprintMask,
4524 GarbageCollectionReason::kMemoryPressure);
4525 }
4526 }
4527 }
4528 }
4529
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)4530 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4531 bool is_isolate_locked) {
4532 MemoryPressureLevel previous = memory_pressure_level_.Value();
4533 memory_pressure_level_.SetValue(level);
4534 if ((previous != MemoryPressureLevel::kCritical &&
4535 level == MemoryPressureLevel::kCritical) ||
4536 (previous == MemoryPressureLevel::kNone &&
4537 level == MemoryPressureLevel::kModerate)) {
4538 if (is_isolate_locked) {
4539 CheckMemoryPressure();
4540 } else {
4541 ExecutionAccess access(isolate());
4542 isolate()->stack_guard()->RequestGC();
4543 V8::GetCurrentPlatform()->CallOnForegroundThread(
4544 reinterpret_cast<v8::Isolate*>(isolate()),
4545 new MemoryPressureInterruptTask(this));
4546 }
4547 }
4548 }
4549
SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,void * data)4550 void Heap::SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
4551 void* data) {
4552 out_of_memory_callback_ = callback;
4553 out_of_memory_callback_data_ = data;
4554 }
4555
InvokeOutOfMemoryCallback()4556 void Heap::InvokeOutOfMemoryCallback() {
4557 if (out_of_memory_callback_) {
4558 out_of_memory_callback_(out_of_memory_callback_data_);
4559 }
4560 }
4561
CollectCodeStatistics()4562 void Heap::CollectCodeStatistics() {
4563 CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
4564 // We do not look for code in new space, or map space. If code
4565 // somehow ends up in those spaces, we would miss it here.
4566 CodeStatistics::CollectCodeStatistics(code_space_, isolate());
4567 CodeStatistics::CollectCodeStatistics(old_space_, isolate());
4568 CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
4569 }
4570
4571 #ifdef DEBUG
4572
Print()4573 void Heap::Print() {
4574 if (!HasBeenSetUp()) return;
4575 isolate()->PrintStack(stdout);
4576 AllSpaces spaces(this);
4577 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4578 space->Print();
4579 }
4580 }
4581
4582
ReportCodeStatistics(const char * title)4583 void Heap::ReportCodeStatistics(const char* title) {
4584 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4585 CollectCodeStatistics();
4586 CodeStatistics::ReportCodeStatistics(isolate());
4587 }
4588
4589
4590 // This function expects that NewSpace's allocated objects histogram is
4591 // populated (via a call to CollectStatistics or else as a side effect of a
4592 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)4593 void Heap::ReportHeapStatistics(const char* title) {
4594 USE(title);
4595 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4596 gc_count_);
4597 PrintF("old_generation_allocation_limit_ %" V8PRIdPTR "\n",
4598 old_generation_allocation_limit_);
4599
4600 PrintF("\n");
4601 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4602 isolate_->global_handles()->PrintStats();
4603 PrintF("\n");
4604
4605 PrintF("Heap statistics : ");
4606 memory_allocator()->ReportStatistics();
4607 PrintF("To space : ");
4608 new_space_->ReportStatistics();
4609 PrintF("Old space : ");
4610 old_space_->ReportStatistics();
4611 PrintF("Code space : ");
4612 code_space_->ReportStatistics();
4613 PrintF("Map space : ");
4614 map_space_->ReportStatistics();
4615 PrintF("Large object space : ");
4616 lo_space_->ReportStatistics();
4617 PrintF(">>>>>> ========================================= >>>>>>\n");
4618 }
4619
4620 #endif // DEBUG
4621
GarbageCollectionReasonToString(GarbageCollectionReason gc_reason)4622 const char* Heap::GarbageCollectionReasonToString(
4623 GarbageCollectionReason gc_reason) {
4624 switch (gc_reason) {
4625 case GarbageCollectionReason::kAllocationFailure:
4626 return "allocation failure";
4627 case GarbageCollectionReason::kAllocationLimit:
4628 return "allocation limit";
4629 case GarbageCollectionReason::kContextDisposal:
4630 return "context disposal";
4631 case GarbageCollectionReason::kCountersExtension:
4632 return "counters extension";
4633 case GarbageCollectionReason::kDebugger:
4634 return "debugger";
4635 case GarbageCollectionReason::kDeserializer:
4636 return "deserialize";
4637 case GarbageCollectionReason::kExternalMemoryPressure:
4638 return "external memory pressure";
4639 case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
4640 return "finalize incremental marking via stack guard";
4641 case GarbageCollectionReason::kFinalizeMarkingViaTask:
4642 return "finalize incremental marking via task";
4643 case GarbageCollectionReason::kFullHashtable:
4644 return "full hash-table";
4645 case GarbageCollectionReason::kHeapProfiler:
4646 return "heap profiler";
4647 case GarbageCollectionReason::kIdleTask:
4648 return "idle task";
4649 case GarbageCollectionReason::kLastResort:
4650 return "last resort";
4651 case GarbageCollectionReason::kLowMemoryNotification:
4652 return "low memory notification";
4653 case GarbageCollectionReason::kMakeHeapIterable:
4654 return "make heap iterable";
4655 case GarbageCollectionReason::kMemoryPressure:
4656 return "memory pressure";
4657 case GarbageCollectionReason::kMemoryReducer:
4658 return "memory reducer";
4659 case GarbageCollectionReason::kRuntime:
4660 return "runtime";
4661 case GarbageCollectionReason::kSamplingProfiler:
4662 return "sampling profiler";
4663 case GarbageCollectionReason::kSnapshotCreator:
4664 return "snapshot creator";
4665 case GarbageCollectionReason::kTesting:
4666 return "testing";
4667 case GarbageCollectionReason::kUnknown:
4668 return "unknown";
4669 }
4670 UNREACHABLE();
4671 return "";
4672 }
4673
Contains(HeapObject * value)4674 bool Heap::Contains(HeapObject* value) {
4675 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4676 return false;
4677 }
4678 return HasBeenSetUp() &&
4679 (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
4680 code_space_->Contains(value) || map_space_->Contains(value) ||
4681 lo_space_->Contains(value));
4682 }
4683
ContainsSlow(Address addr)4684 bool Heap::ContainsSlow(Address addr) {
4685 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4686 return false;
4687 }
4688 return HasBeenSetUp() &&
4689 (new_space_->ToSpaceContainsSlow(addr) ||
4690 old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
4691 map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
4692 }
4693
InSpace(HeapObject * value,AllocationSpace space)4694 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4695 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4696 return false;
4697 }
4698 if (!HasBeenSetUp()) return false;
4699
4700 switch (space) {
4701 case NEW_SPACE:
4702 return new_space_->ToSpaceContains(value);
4703 case OLD_SPACE:
4704 return old_space_->Contains(value);
4705 case CODE_SPACE:
4706 return code_space_->Contains(value);
4707 case MAP_SPACE:
4708 return map_space_->Contains(value);
4709 case LO_SPACE:
4710 return lo_space_->Contains(value);
4711 }
4712 UNREACHABLE();
4713 return false;
4714 }
4715
InSpaceSlow(Address addr,AllocationSpace space)4716 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
4717 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4718 return false;
4719 }
4720 if (!HasBeenSetUp()) return false;
4721
4722 switch (space) {
4723 case NEW_SPACE:
4724 return new_space_->ToSpaceContainsSlow(addr);
4725 case OLD_SPACE:
4726 return old_space_->ContainsSlow(addr);
4727 case CODE_SPACE:
4728 return code_space_->ContainsSlow(addr);
4729 case MAP_SPACE:
4730 return map_space_->ContainsSlow(addr);
4731 case LO_SPACE:
4732 return lo_space_->ContainsSlow(addr);
4733 }
4734 UNREACHABLE();
4735 return false;
4736 }
4737
4738
IsValidAllocationSpace(AllocationSpace space)4739 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4740 switch (space) {
4741 case NEW_SPACE:
4742 case OLD_SPACE:
4743 case CODE_SPACE:
4744 case MAP_SPACE:
4745 case LO_SPACE:
4746 return true;
4747 default:
4748 return false;
4749 }
4750 }
4751
4752
RootIsImmortalImmovable(int root_index)4753 bool Heap::RootIsImmortalImmovable(int root_index) {
4754 switch (root_index) {
4755 #define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
4756 IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
4757 #undef IMMORTAL_IMMOVABLE_ROOT
4758 #define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
4759 INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
4760 #undef INTERNALIZED_STRING
4761 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
4762 STRING_TYPE_LIST(STRING_TYPE)
4763 #undef STRING_TYPE
4764 return true;
4765 default:
4766 return false;
4767 }
4768 }
4769
4770
4771 #ifdef VERIFY_HEAP
Verify()4772 void Heap::Verify() {
4773 CHECK(HasBeenSetUp());
4774 HandleScope scope(isolate());
4775
4776 // We have to wait here for the sweeper threads to have an iterable heap.
4777 mark_compact_collector()->EnsureSweepingCompleted();
4778
4779 VerifyPointersVisitor visitor;
4780 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4781
4782 VerifySmisVisitor smis_visitor;
4783 IterateSmiRoots(&smis_visitor);
4784
4785 new_space_->Verify();
4786
4787 old_space_->Verify(&visitor);
4788 map_space_->Verify(&visitor);
4789
4790 VerifyPointersVisitor no_dirty_regions_visitor;
4791 code_space_->Verify(&no_dirty_regions_visitor);
4792
4793 lo_space_->Verify();
4794
4795 mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
4796 if (FLAG_omit_map_checks_for_leaf_maps) {
4797 mark_compact_collector()->VerifyOmittedMapChecks();
4798 }
4799 }
4800 #endif
4801
4802
ZapFromSpace()4803 void Heap::ZapFromSpace() {
4804 if (!new_space_->IsFromSpaceCommitted()) return;
4805 for (Page* page :
4806 PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
4807 for (Address cursor = page->area_start(), limit = page->area_end();
4808 cursor < limit; cursor += kPointerSize) {
4809 Memory::Address_at(cursor) = kFromSpaceZapValue;
4810 }
4811 }
4812 }
4813
4814 class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
4815 public:
IterateAndScavengePromotedObjectsVisitor(Heap * heap,HeapObject * target,bool record_slots)4816 IterateAndScavengePromotedObjectsVisitor(Heap* heap, HeapObject* target,
4817 bool record_slots)
4818 : heap_(heap), target_(target), record_slots_(record_slots) {}
4819
VisitPointers(Object ** start,Object ** end)4820 inline void VisitPointers(Object** start, Object** end) override {
4821 Address slot_address = reinterpret_cast<Address>(start);
4822 Page* page = Page::FromAddress(slot_address);
4823
4824 while (slot_address < reinterpret_cast<Address>(end)) {
4825 Object** slot = reinterpret_cast<Object**>(slot_address);
4826 Object* target = *slot;
4827
4828 if (target->IsHeapObject()) {
4829 if (heap_->InFromSpace(target)) {
4830 Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(slot),
4831 HeapObject::cast(target));
4832 target = *slot;
4833 if (heap_->InNewSpace(target)) {
4834 SLOW_DCHECK(heap_->InToSpace(target));
4835 SLOW_DCHECK(target->IsHeapObject());
4836 RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
4837 }
4838 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
4839 HeapObject::cast(target)));
4840 } else if (record_slots_ &&
4841 MarkCompactCollector::IsOnEvacuationCandidate(
4842 HeapObject::cast(target))) {
4843 heap_->mark_compact_collector()->RecordSlot(target_, slot, target);
4844 }
4845 }
4846
4847 slot_address += kPointerSize;
4848 }
4849 }
4850
VisitCodeEntry(Address code_entry_slot)4851 inline void VisitCodeEntry(Address code_entry_slot) override {
4852 // Black allocation requires us to process objects referenced by
4853 // promoted objects.
4854 if (heap_->incremental_marking()->black_allocation()) {
4855 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
4856 IncrementalMarking::MarkGrey(heap_, code);
4857 }
4858 }
4859
4860 private:
4861 Heap* heap_;
4862 HeapObject* target_;
4863 bool record_slots_;
4864 };
4865
IterateAndScavengePromotedObject(HeapObject * target,int size,bool was_marked_black)4866 void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
4867 bool was_marked_black) {
4868 // We are not collecting slots on new space objects during mutation
4869 // thus we have to scan for pointers to evacuation candidates when we
4870 // promote objects. But we should not record any slots in non-black
4871 // objects. Grey object's slots would be rescanned.
4872 // White object might not survive until the end of collection
4873 // it would be a violation of the invariant to record it's slots.
4874 bool record_slots = false;
4875 if (incremental_marking()->IsCompacting()) {
4876 record_slots = ObjectMarking::IsBlack(target);
4877 }
4878
4879 IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
4880 if (target->IsJSFunction()) {
4881 // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
4882 // this links are recorded during processing of weak lists.
4883 JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor);
4884 } else {
4885 target->IterateBody(target->map()->instance_type(), size, &visitor);
4886 }
4887
4888 // When black allocations is on, we have to visit not already marked black
4889 // objects (in new space) promoted to black pages to keep their references
4890 // alive.
4891 // TODO(hpayer): Implement a special promotion visitor that incorporates
4892 // regular visiting and IteratePromotedObjectPointers.
4893 if (!was_marked_black) {
4894 if (incremental_marking()->black_allocation()) {
4895 IncrementalMarking::MarkGrey(this, target->map());
4896 incremental_marking()->IterateBlackObject(target);
4897 }
4898 }
4899 }
4900
4901
IterateRoots(ObjectVisitor * v,VisitMode mode)4902 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4903 IterateStrongRoots(v, mode);
4904 IterateWeakRoots(v, mode);
4905 }
4906
4907
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)4908 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4909 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4910 v->Synchronize(VisitorSynchronization::kStringTable);
4911 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4912 // Scavenge collections have special processing for this.
4913 external_string_table_.IterateAll(v);
4914 }
4915 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4916 }
4917
4918
IterateSmiRoots(ObjectVisitor * v)4919 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4920 // Acquire execution access since we are going to read stack limit values.
4921 ExecutionAccess access(isolate());
4922 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4923 v->Synchronize(VisitorSynchronization::kSmiRootList);
4924 }
4925
4926 // We cannot avoid stale handles to left-trimmed objects, but can only make
4927 // sure all handles still needed are updated. Filter out a stale pointer
4928 // and clear the slot to allow post processing of handles (needed because
4929 // the sweeper might actually free the underlying page).
4930 class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
4931 public:
FixStaleLeftTrimmedHandlesVisitor(Heap * heap)4932 explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
4933 USE(heap_);
4934 }
4935
VisitPointer(Object ** p)4936 void VisitPointer(Object** p) override { FixHandle(p); }
4937
VisitPointers(Object ** start,Object ** end)4938 void VisitPointers(Object** start, Object** end) override {
4939 for (Object** p = start; p < end; p++) FixHandle(p);
4940 }
4941
4942 private:
FixHandle(Object ** p)4943 inline void FixHandle(Object** p) {
4944 HeapObject* current = reinterpret_cast<HeapObject*>(*p);
4945 if (!current->IsHeapObject()) return;
4946 const MapWord map_word = current->map_word();
4947 if (!map_word.IsForwardingAddress() && current->IsFiller()) {
4948 #ifdef DEBUG
4949 // We need to find a FixedArrayBase map after walking the fillers.
4950 while (current->IsFiller()) {
4951 Address next = reinterpret_cast<Address>(current);
4952 if (current->map() == heap_->one_pointer_filler_map()) {
4953 next += kPointerSize;
4954 } else if (current->map() == heap_->two_pointer_filler_map()) {
4955 next += 2 * kPointerSize;
4956 } else {
4957 next += current->Size();
4958 }
4959 current = reinterpret_cast<HeapObject*>(next);
4960 }
4961 DCHECK(current->IsFixedArrayBase());
4962 #endif // DEBUG
4963 *p = nullptr;
4964 }
4965 }
4966
4967 Heap* heap_;
4968 };
4969
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)4970 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4971 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4972 v->Synchronize(VisitorSynchronization::kStrongRootList);
4973 // The serializer/deserializer iterates the root list twice, first to pick
4974 // off immortal immovable roots to make sure they end up on the first page,
4975 // and then again for the rest.
4976 if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
4977
4978 isolate_->bootstrapper()->Iterate(v);
4979 v->Synchronize(VisitorSynchronization::kBootstrapper);
4980 isolate_->Iterate(v);
4981 v->Synchronize(VisitorSynchronization::kTop);
4982 Relocatable::Iterate(isolate_, v);
4983 v->Synchronize(VisitorSynchronization::kRelocatable);
4984 isolate_->debug()->Iterate(v);
4985 v->Synchronize(VisitorSynchronization::kDebug);
4986
4987 isolate_->compilation_cache()->Iterate(v);
4988 v->Synchronize(VisitorSynchronization::kCompilationCache);
4989
4990 // Iterate over local handles in handle scopes.
4991 FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
4992 isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
4993 isolate_->handle_scope_implementer()->Iterate(v);
4994 isolate_->IterateDeferredHandles(v);
4995 v->Synchronize(VisitorSynchronization::kHandleScope);
4996
4997 // Iterate over the builtin code objects and code stubs in the
4998 // heap. Note that it is not necessary to iterate over code objects
4999 // on scavenge collections.
5000 if (mode != VISIT_ALL_IN_SCAVENGE) {
5001 isolate_->builtins()->IterateBuiltins(v);
5002 v->Synchronize(VisitorSynchronization::kBuiltins);
5003 isolate_->interpreter()->IterateDispatchTable(v);
5004 v->Synchronize(VisitorSynchronization::kDispatchTable);
5005 }
5006
5007 // Iterate over global handles.
5008 switch (mode) {
5009 case VISIT_ONLY_STRONG_ROOT_LIST:
5010 UNREACHABLE();
5011 break;
5012 case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
5013 break;
5014 case VISIT_ONLY_STRONG:
5015 isolate_->global_handles()->IterateStrongRoots(v);
5016 break;
5017 case VISIT_ALL_IN_SCAVENGE:
5018 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5019 break;
5020 case VISIT_ALL_IN_SWEEP_NEWSPACE:
5021 case VISIT_ALL:
5022 isolate_->global_handles()->IterateAllRoots(v);
5023 break;
5024 }
5025 v->Synchronize(VisitorSynchronization::kGlobalHandles);
5026
5027 // Iterate over eternal handles.
5028 if (mode == VISIT_ALL_IN_SCAVENGE) {
5029 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
5030 } else {
5031 isolate_->eternal_handles()->IterateAllRoots(v);
5032 }
5033 v->Synchronize(VisitorSynchronization::kEternalHandles);
5034
5035 // Iterate over pointers being held by inactive threads.
5036 isolate_->thread_manager()->Iterate(v);
5037 v->Synchronize(VisitorSynchronization::kThreadManager);
5038
5039 // Iterate over other strong roots (currently only identity maps).
5040 for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
5041 v->VisitPointers(list->start, list->end);
5042 }
5043 v->Synchronize(VisitorSynchronization::kStrongRoots);
5044
5045 // Iterate over the partial snapshot cache unless serializing.
5046 if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
5047 SerializerDeserializer::Iterate(isolate_, v);
5048 }
5049 // We don't do a v->Synchronize call here, because in debug mode that will
5050 // output a flag to the snapshot. However at this point the serializer and
5051 // deserializer are deliberately a little unsynchronized (see above) so the
5052 // checking of the sync flag in the snapshot would fail.
5053 }
5054
5055
5056 // TODO(1236194): Since the heap size is configurable on the command line
5057 // and through the API, we should gracefully handle the case that the heap
5058 // size is not big enough to fit all the initial objects.
ConfigureHeap(size_t max_semi_space_size,size_t max_old_space_size,size_t max_executable_size,size_t code_range_size)5059 bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
5060 size_t max_executable_size, size_t code_range_size) {
5061 if (HasBeenSetUp()) return false;
5062
5063 // Overwrite default configuration.
5064 if (max_semi_space_size != 0) {
5065 max_semi_space_size_ = max_semi_space_size * MB;
5066 }
5067 if (max_old_space_size != 0) {
5068 max_old_generation_size_ = max_old_space_size * MB;
5069 }
5070 if (max_executable_size != 0) {
5071 max_executable_size_ = max_executable_size * MB;
5072 }
5073
5074 // If max space size flags are specified overwrite the configuration.
5075 if (FLAG_max_semi_space_size > 0) {
5076 max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
5077 }
5078 if (FLAG_max_old_space_size > 0) {
5079 max_old_generation_size_ =
5080 static_cast<size_t>(FLAG_max_old_space_size) * MB;
5081 }
5082 if (FLAG_max_executable_size > 0) {
5083 max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
5084 }
5085
5086 if (Page::kPageSize > MB) {
5087 max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
5088 max_old_generation_size_ =
5089 ROUND_UP(max_old_generation_size_, Page::kPageSize);
5090 max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
5091 }
5092
5093 if (FLAG_stress_compaction) {
5094 // This will cause more frequent GCs when stressing.
5095 max_semi_space_size_ = MB;
5096 }
5097
5098 // The new space size must be a power of two to support single-bit testing
5099 // for containment.
5100 max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
5101 static_cast<uint32_t>(max_semi_space_size_));
5102
5103 if (FLAG_min_semi_space_size > 0) {
5104 size_t initial_semispace_size =
5105 static_cast<size_t>(FLAG_min_semi_space_size) * MB;
5106 if (initial_semispace_size > max_semi_space_size_) {
5107 initial_semispace_size_ = max_semi_space_size_;
5108 if (FLAG_trace_gc) {
5109 PrintIsolate(isolate_,
5110 "Min semi-space size cannot be more than the maximum "
5111 "semi-space size of %" PRIuS " MB\n",
5112 max_semi_space_size_ / MB);
5113 }
5114 } else {
5115 initial_semispace_size_ =
5116 ROUND_UP(initial_semispace_size, Page::kPageSize);
5117 }
5118 }
5119
5120 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5121
5122 if (FLAG_semi_space_growth_factor < 2) {
5123 FLAG_semi_space_growth_factor = 2;
5124 }
5125
5126 // The old generation is paged and needs at least one page for each space.
5127 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5128 initial_max_old_generation_size_ = max_old_generation_size_ =
5129 Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
5130 max_old_generation_size_);
5131
5132 // The max executable size must be less than or equal to the max old
5133 // generation size.
5134 if (max_executable_size_ > max_old_generation_size_) {
5135 max_executable_size_ = max_old_generation_size_;
5136 }
5137
5138 if (FLAG_initial_old_space_size > 0) {
5139 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5140 } else {
5141 initial_old_generation_size_ =
5142 max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5143 }
5144 old_generation_allocation_limit_ = initial_old_generation_size_;
5145
5146 // We rely on being able to allocate new arrays in paged spaces.
5147 DCHECK(kMaxRegularHeapObjectSize >=
5148 (JSArray::kSize +
5149 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
5150 AllocationMemento::kSize));
5151
5152 code_range_size_ = code_range_size * MB;
5153
5154 configured_ = true;
5155 return true;
5156 }
5157
5158
AddToRingBuffer(const char * string)5159 void Heap::AddToRingBuffer(const char* string) {
5160 size_t first_part =
5161 Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
5162 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
5163 ring_buffer_end_ += first_part;
5164 if (first_part < strlen(string)) {
5165 ring_buffer_full_ = true;
5166 size_t second_part = strlen(string) - first_part;
5167 memcpy(trace_ring_buffer_, string + first_part, second_part);
5168 ring_buffer_end_ = second_part;
5169 }
5170 }
5171
5172
GetFromRingBuffer(char * buffer)5173 void Heap::GetFromRingBuffer(char* buffer) {
5174 size_t copied = 0;
5175 if (ring_buffer_full_) {
5176 copied = kTraceRingBufferSize - ring_buffer_end_;
5177 memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
5178 }
5179 memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
5180 }
5181
5182
ConfigureHeapDefault()5183 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
5184
5185
RecordStats(HeapStats * stats,bool take_snapshot)5186 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5187 *stats->start_marker = HeapStats::kStartMarker;
5188 *stats->end_marker = HeapStats::kEndMarker;
5189 *stats->new_space_size = new_space_->Size();
5190 *stats->new_space_capacity = new_space_->Capacity();
5191 *stats->old_space_size = old_space_->SizeOfObjects();
5192 *stats->old_space_capacity = old_space_->Capacity();
5193 *stats->code_space_size = code_space_->SizeOfObjects();
5194 *stats->code_space_capacity = code_space_->Capacity();
5195 *stats->map_space_size = map_space_->SizeOfObjects();
5196 *stats->map_space_capacity = map_space_->Capacity();
5197 *stats->lo_space_size = lo_space_->Size();
5198 isolate_->global_handles()->RecordStats(stats);
5199 *stats->memory_allocator_size = memory_allocator()->Size();
5200 *stats->memory_allocator_capacity =
5201 memory_allocator()->Size() + memory_allocator()->Available();
5202 *stats->os_error = base::OS::GetLastError();
5203 *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
5204 *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
5205 if (take_snapshot) {
5206 HeapIterator iterator(this);
5207 for (HeapObject* obj = iterator.next(); obj != NULL;
5208 obj = iterator.next()) {
5209 InstanceType type = obj->map()->instance_type();
5210 DCHECK(0 <= type && type <= LAST_TYPE);
5211 stats->objects_per_type[type]++;
5212 stats->size_per_type[type] += obj->Size();
5213 }
5214 }
5215 if (stats->last_few_messages != NULL)
5216 GetFromRingBuffer(stats->last_few_messages);
5217 if (stats->js_stacktrace != NULL) {
5218 FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
5219 StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
5220 if (gc_state() == Heap::NOT_IN_GC) {
5221 isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
5222 } else {
5223 accumulator.Add("Cannot get stack trace in GC.");
5224 }
5225 }
5226 }
5227
PromotedSpaceSizeOfObjects()5228 size_t Heap::PromotedSpaceSizeOfObjects() {
5229 return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5230 map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5231 }
5232
PromotedExternalMemorySize()5233 uint64_t Heap::PromotedExternalMemorySize() {
5234 if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
5235 return static_cast<uint64_t>(external_memory_ -
5236 external_memory_at_last_mark_compact_);
5237 }
5238
5239
5240 const double Heap::kMinHeapGrowingFactor = 1.1;
5241 const double Heap::kMaxHeapGrowingFactor = 4.0;
5242 const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
5243 const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
5244 const double Heap::kConservativeHeapGrowingFactor = 1.3;
5245 const double Heap::kTargetMutatorUtilization = 0.97;
5246
5247 // Given GC speed in bytes per ms, the allocation throughput in bytes per ms
5248 // (mutator speed), this function returns the heap growing factor that will
5249 // achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
5250 // remain the same until the next GC.
5251 //
5252 // For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
5253 // TM / (TM + TG), where TM is the time spent in the mutator and TG is the
5254 // time spent in the garbage collector.
5255 //
5256 // Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
5257 // time-frame from the end of the current GC to the end of the next GC. Based
5258 // on the MU we can compute the heap growing factor F as
5259 //
5260 // F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
5261 //
5262 // This formula can be derived as follows.
5263 //
5264 // F = Limit / Live by definition, where the Limit is the allocation limit,
5265 // and the Live is size of live objects.
5266 // Let’s assume that we already know the Limit. Then:
5267 // TG = Limit / gc_speed
5268 // TM = (TM + TG) * MU, by definition of MU.
5269 // TM = TG * MU / (1 - MU)
5270 // TM = Limit * MU / (gc_speed * (1 - MU))
5271 // On the other hand, if the allocation throughput remains constant:
5272 // Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
5273 // Solving it for TM, we get
5274 // TM = (Limit - Live) / mutator_speed
5275 // Combining the two equation for TM:
5276 // (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
5277 // (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
5278 // substitute R = gc_speed / mutator_speed
5279 // (Limit - Live) = Limit * MU / (R * (1 - MU))
5280 // substitute F = Limit / Live
5281 // F - 1 = F * MU / (R * (1 - MU))
5282 // F - F * MU / (R * (1 - MU)) = 1
5283 // F * (1 - MU / (R * (1 - MU))) = 1
5284 // F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
5285 // F = R * (1 - MU) / (R * (1 - MU) - MU)
HeapGrowingFactor(double gc_speed,double mutator_speed)5286 double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
5287 if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
5288
5289 const double speed_ratio = gc_speed / mutator_speed;
5290 const double mu = kTargetMutatorUtilization;
5291
5292 const double a = speed_ratio * (1 - mu);
5293 const double b = speed_ratio * (1 - mu) - mu;
5294
5295 // The factor is a / b, but we need to check for small b first.
5296 double factor =
5297 (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
5298 factor = Min(factor, kMaxHeapGrowingFactor);
5299 factor = Max(factor, kMinHeapGrowingFactor);
5300 return factor;
5301 }
5302
CalculateOldGenerationAllocationLimit(double factor,size_t old_gen_size)5303 size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5304 size_t old_gen_size) {
5305 CHECK(factor > 1.0);
5306 CHECK(old_gen_size > 0);
5307 uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
5308 limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
5309 MinimumAllocationLimitGrowingStep());
5310 limit += new_space_->Capacity();
5311 uint64_t halfway_to_the_max =
5312 (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
5313 return static_cast<size_t>(Min(limit, halfway_to_the_max));
5314 }
5315
MinimumAllocationLimitGrowingStep()5316 size_t Heap::MinimumAllocationLimitGrowingStep() {
5317 const size_t kRegularAllocationLimitGrowingStep = 8;
5318 const size_t kLowMemoryAllocationLimitGrowingStep = 2;
5319 size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
5320 return limit * (ShouldOptimizeForMemoryUsage()
5321 ? kLowMemoryAllocationLimitGrowingStep
5322 : kRegularAllocationLimitGrowingStep);
5323 }
5324
SetOldGenerationAllocationLimit(size_t old_gen_size,double gc_speed,double mutator_speed)5325 void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
5326 double mutator_speed) {
5327 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5328
5329 if (FLAG_trace_gc_verbose) {
5330 isolate_->PrintWithTimestamp(
5331 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
5332 "(gc=%.f, mutator=%.f)\n",
5333 factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
5334 mutator_speed);
5335 }
5336
5337 if (IsMemoryConstrainedDevice()) {
5338 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
5339 }
5340
5341 if (memory_reducer_->ShouldGrowHeapSlowly() ||
5342 ShouldOptimizeForMemoryUsage()) {
5343 factor = Min(factor, kConservativeHeapGrowingFactor);
5344 }
5345
5346 if (FLAG_stress_compaction || ShouldReduceMemory()) {
5347 factor = kMinHeapGrowingFactor;
5348 }
5349
5350 if (FLAG_heap_growing_percent > 0) {
5351 factor = 1.0 + FLAG_heap_growing_percent / 100.0;
5352 }
5353
5354 old_generation_allocation_limit_ =
5355 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5356
5357 if (FLAG_trace_gc_verbose) {
5358 isolate_->PrintWithTimestamp(
5359 "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
5360 old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
5361 }
5362 }
5363
DampenOldGenerationAllocationLimit(size_t old_gen_size,double gc_speed,double mutator_speed)5364 void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
5365 double gc_speed,
5366 double mutator_speed) {
5367 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5368 size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5369 if (limit < old_generation_allocation_limit_) {
5370 if (FLAG_trace_gc_verbose) {
5371 isolate_->PrintWithTimestamp(
5372 "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
5373 " KB, "
5374 "new limit: %" PRIuS " KB (%.1f)\n",
5375 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
5376 factor);
5377 }
5378 old_generation_allocation_limit_ = limit;
5379 }
5380 }
5381
ShouldOptimizeForLoadTime()5382 bool Heap::ShouldOptimizeForLoadTime() {
5383 return isolate()->rail_mode() == PERFORMANCE_LOAD &&
5384 !AllocationLimitOvershotByLargeMargin() &&
5385 MonotonicallyIncreasingTimeInMs() <
5386 isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
5387 }
5388
5389 // This predicate is called when an old generation space cannot allocated from
5390 // the free list and is about to add a new page. Returning false will cause a
5391 // major GC. It happens when the old generation allocation limit is reached and
5392 // - either we need to optimize for memory usage,
5393 // - or the incremental marking is not in progress and we cannot start it.
ShouldExpandOldGenerationOnSlowAllocation()5394 bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
5395 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5396 // We reached the old generation allocation limit.
5397
5398 if (ShouldOptimizeForMemoryUsage()) return false;
5399
5400 if (ShouldOptimizeForLoadTime()) return true;
5401
5402 if (incremental_marking()->NeedsFinalization()) {
5403 return !AllocationLimitOvershotByLargeMargin();
5404 }
5405
5406 if (incremental_marking()->IsStopped() &&
5407 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5408 // We cannot start incremental marking.
5409 return false;
5410 }
5411 return true;
5412 }
5413
5414 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
5415 // The kNoLimit means that either incremental marking is disabled or it is too
5416 // early to start incremental marking.
5417 // The kSoftLimit means that incremental marking should be started soon.
5418 // The kHardLimit means that incremental marking should be started immediately.
IncrementalMarkingLimitReached()5419 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5420 if (!incremental_marking()->CanBeActivated() ||
5421 PromotedSpaceSizeOfObjects() <=
5422 IncrementalMarking::kActivationThreshold) {
5423 // Incremental marking is disabled or it is too early to start.
5424 return IncrementalMarkingLimit::kNoLimit;
5425 }
5426 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
5427 HighMemoryPressure()) {
5428 // If there is high memory pressure or stress testing is enabled, then
5429 // start marking immediately.
5430 return IncrementalMarkingLimit::kHardLimit;
5431 }
5432 size_t old_generation_space_available = OldGenerationSpaceAvailable();
5433 if (old_generation_space_available > new_space_->Capacity()) {
5434 return IncrementalMarkingLimit::kNoLimit;
5435 }
5436 if (ShouldOptimizeForMemoryUsage()) {
5437 return IncrementalMarkingLimit::kHardLimit;
5438 }
5439 if (ShouldOptimizeForLoadTime()) {
5440 return IncrementalMarkingLimit::kNoLimit;
5441 }
5442 if (old_generation_space_available == 0) {
5443 return IncrementalMarkingLimit::kHardLimit;
5444 }
5445 return IncrementalMarkingLimit::kSoftLimit;
5446 }
5447
EnableInlineAllocation()5448 void Heap::EnableInlineAllocation() {
5449 if (!inline_allocation_disabled_) return;
5450 inline_allocation_disabled_ = false;
5451
5452 // Update inline allocation limit for new space.
5453 new_space()->UpdateInlineAllocationLimit(0);
5454 }
5455
5456
DisableInlineAllocation()5457 void Heap::DisableInlineAllocation() {
5458 if (inline_allocation_disabled_) return;
5459 inline_allocation_disabled_ = true;
5460
5461 // Update inline allocation limit for new space.
5462 new_space()->UpdateInlineAllocationLimit(0);
5463
5464 // Update inline allocation limit for old spaces.
5465 PagedSpaces spaces(this);
5466 for (PagedSpace* space = spaces.next(); space != NULL;
5467 space = spaces.next()) {
5468 space->EmptyAllocationInfo();
5469 }
5470 }
5471
5472
5473 V8_DECLARE_ONCE(initialize_gc_once);
5474
InitializeGCOnce()5475 static void InitializeGCOnce() {
5476 Scavenger::Initialize();
5477 StaticScavengeVisitor::Initialize();
5478 MarkCompactCollector::Initialize();
5479 }
5480
5481
SetUp()5482 bool Heap::SetUp() {
5483 #ifdef DEBUG
5484 allocation_timeout_ = FLAG_gc_interval;
5485 #endif
5486
5487 // Initialize heap spaces and initial maps and objects. Whenever something
5488 // goes wrong, just return false. The caller should check the results and
5489 // call Heap::TearDown() to release allocated memory.
5490 //
5491 // If the heap is not yet configured (e.g. through the API), configure it.
5492 // Configuration is based on the flags new-space-size (really the semispace
5493 // size) and old-space-size if set or the initial values of semispace_size_
5494 // and old_generation_size_ otherwise.
5495 if (!configured_) {
5496 if (!ConfigureHeapDefault()) return false;
5497 }
5498
5499 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5500
5501 // Set up memory allocator.
5502 memory_allocator_ = new MemoryAllocator(isolate_);
5503 if (!memory_allocator_->SetUp(MaxReserved(), MaxExecutableSize(),
5504 code_range_size_))
5505 return false;
5506
5507 // Initialize store buffer.
5508 store_buffer_ = new StoreBuffer(this);
5509
5510 // Initialize incremental marking.
5511 incremental_marking_ = new IncrementalMarking(this);
5512
5513 for (int i = 0; i <= LAST_SPACE; i++) {
5514 space_[i] = nullptr;
5515 }
5516
5517 space_[NEW_SPACE] = new_space_ = new NewSpace(this);
5518 if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
5519 return false;
5520 }
5521 new_space_top_after_last_gc_ = new_space()->top();
5522
5523 space_[OLD_SPACE] = old_space_ =
5524 new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
5525 if (!old_space_->SetUp()) return false;
5526
5527 space_[CODE_SPACE] = code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
5528 if (!code_space_->SetUp()) return false;
5529
5530 space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
5531 if (!map_space_->SetUp()) return false;
5532
5533 // The large object code space may contain code or data. We set the memory
5534 // to be non-executable here for safety, but this means we need to enable it
5535 // explicitly when allocating large code objects.
5536 space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
5537 if (!lo_space_->SetUp()) return false;
5538
5539 // Set up the seed that is used to randomize the string hash function.
5540 DCHECK(hash_seed() == 0);
5541 if (FLAG_randomize_hashes) {
5542 if (FLAG_hash_seed == 0) {
5543 int rnd = isolate()->random_number_generator()->NextInt();
5544 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5545 } else {
5546 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5547 }
5548 }
5549
5550 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5551 i++) {
5552 deferred_counters_[i] = 0;
5553 }
5554
5555 tracer_ = new GCTracer(this);
5556 scavenge_collector_ = new Scavenger(this);
5557 mark_compact_collector_ = new MarkCompactCollector(this);
5558 gc_idle_time_handler_ = new GCIdleTimeHandler();
5559 memory_reducer_ = new MemoryReducer(this);
5560 if (V8_UNLIKELY(FLAG_gc_stats)) {
5561 live_object_stats_ = new ObjectStats(this);
5562 dead_object_stats_ = new ObjectStats(this);
5563 }
5564 scavenge_job_ = new ScavengeJob();
5565 local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer();
5566
5567 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5568 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5569
5570 store_buffer()->SetUp();
5571
5572 mark_compact_collector()->SetUp();
5573
5574 idle_scavenge_observer_ = new IdleScavengeObserver(
5575 *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
5576 new_space()->AddAllocationObserver(idle_scavenge_observer_);
5577
5578 return true;
5579 }
5580
5581
CreateHeapObjects()5582 bool Heap::CreateHeapObjects() {
5583 // Create initial maps.
5584 if (!CreateInitialMaps()) return false;
5585 if (!CreateApiObjects()) return false;
5586
5587 // Create initial objects
5588 CreateInitialObjects();
5589 CHECK_EQ(0u, gc_count_);
5590
5591 set_native_contexts_list(undefined_value());
5592 set_allocation_sites_list(undefined_value());
5593
5594 return true;
5595 }
5596
5597
SetStackLimits()5598 void Heap::SetStackLimits() {
5599 DCHECK(isolate_ != NULL);
5600 DCHECK(isolate_ == isolate());
5601 // On 64 bit machines, pointers are generally out of range of Smis. We write
5602 // something that looks like an out of range Smi to the GC.
5603
5604 // Set up the special root array entries containing the stack limits.
5605 // These are actually addresses, but the tag makes the GC ignore it.
5606 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5607 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5608 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5609 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5610 }
5611
ClearStackLimits()5612 void Heap::ClearStackLimits() {
5613 roots_[kStackLimitRootIndex] = Smi::kZero;
5614 roots_[kRealStackLimitRootIndex] = Smi::kZero;
5615 }
5616
PrintAlloctionsHash()5617 void Heap::PrintAlloctionsHash() {
5618 uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5619 PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5620 }
5621
5622
NotifyDeserializationComplete()5623 void Heap::NotifyDeserializationComplete() {
5624 DCHECK_EQ(0, gc_count());
5625 PagedSpaces spaces(this);
5626 for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
5627 if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
5628 #ifdef DEBUG
5629 // All pages right after bootstrapping must be marked as never-evacuate.
5630 for (Page* p : *s) {
5631 CHECK(p->NeverEvacuate());
5632 }
5633 #endif // DEBUG
5634 }
5635
5636 deserialization_complete_ = true;
5637 }
5638
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)5639 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5640 DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
5641 local_embedder_heap_tracer()->SetRemoteTracer(tracer);
5642 }
5643
TracePossibleWrapper(JSObject * js_object)5644 void Heap::TracePossibleWrapper(JSObject* js_object) {
5645 DCHECK(js_object->WasConstructedFromApiFunction());
5646 if (js_object->GetInternalFieldCount() >= 2 &&
5647 js_object->GetInternalField(0) &&
5648 js_object->GetInternalField(0) != undefined_value() &&
5649 js_object->GetInternalField(1) != undefined_value()) {
5650 DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
5651 local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
5652 reinterpret_cast<void*>(js_object->GetInternalField(0)),
5653 reinterpret_cast<void*>(js_object->GetInternalField(1))));
5654 }
5655 }
5656
RegisterExternallyReferencedObject(Object ** object)5657 void Heap::RegisterExternallyReferencedObject(Object** object) {
5658 HeapObject* heap_object = HeapObject::cast(*object);
5659 DCHECK(Contains(heap_object));
5660 if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
5661 IncrementalMarking::MarkGrey(this, heap_object);
5662 } else {
5663 DCHECK(mark_compact_collector()->in_use());
5664 mark_compact_collector()->MarkObject(heap_object);
5665 }
5666 }
5667
TearDown()5668 void Heap::TearDown() {
5669 #ifdef VERIFY_HEAP
5670 if (FLAG_verify_heap) {
5671 Verify();
5672 }
5673 #endif
5674
5675 UpdateMaximumCommitted();
5676
5677 if (FLAG_verify_predictable) {
5678 PrintAlloctionsHash();
5679 }
5680
5681 new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
5682 delete idle_scavenge_observer_;
5683 idle_scavenge_observer_ = nullptr;
5684
5685 delete scavenge_collector_;
5686 scavenge_collector_ = nullptr;
5687
5688 if (mark_compact_collector_ != nullptr) {
5689 mark_compact_collector_->TearDown();
5690 delete mark_compact_collector_;
5691 mark_compact_collector_ = nullptr;
5692 }
5693
5694 delete incremental_marking_;
5695 incremental_marking_ = nullptr;
5696
5697 delete gc_idle_time_handler_;
5698 gc_idle_time_handler_ = nullptr;
5699
5700 if (memory_reducer_ != nullptr) {
5701 memory_reducer_->TearDown();
5702 delete memory_reducer_;
5703 memory_reducer_ = nullptr;
5704 }
5705
5706 if (live_object_stats_ != nullptr) {
5707 delete live_object_stats_;
5708 live_object_stats_ = nullptr;
5709 }
5710
5711 if (dead_object_stats_ != nullptr) {
5712 delete dead_object_stats_;
5713 dead_object_stats_ = nullptr;
5714 }
5715
5716 delete local_embedder_heap_tracer_;
5717 local_embedder_heap_tracer_ = nullptr;
5718
5719 delete scavenge_job_;
5720 scavenge_job_ = nullptr;
5721
5722 isolate_->global_handles()->TearDown();
5723
5724 external_string_table_.TearDown();
5725
5726 delete tracer_;
5727 tracer_ = nullptr;
5728
5729 new_space_->TearDown();
5730 delete new_space_;
5731 new_space_ = nullptr;
5732
5733 if (old_space_ != NULL) {
5734 delete old_space_;
5735 old_space_ = NULL;
5736 }
5737
5738 if (code_space_ != NULL) {
5739 delete code_space_;
5740 code_space_ = NULL;
5741 }
5742
5743 if (map_space_ != NULL) {
5744 delete map_space_;
5745 map_space_ = NULL;
5746 }
5747
5748 if (lo_space_ != NULL) {
5749 lo_space_->TearDown();
5750 delete lo_space_;
5751 lo_space_ = NULL;
5752 }
5753
5754 store_buffer()->TearDown();
5755
5756 memory_allocator()->TearDown();
5757
5758 StrongRootsList* next = NULL;
5759 for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5760 next = list->next;
5761 delete list;
5762 }
5763 strong_roots_list_ = NULL;
5764
5765 delete store_buffer_;
5766 store_buffer_ = nullptr;
5767
5768 delete memory_allocator_;
5769 memory_allocator_ = nullptr;
5770 }
5771
5772
AddGCPrologueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5773 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallback callback,
5774 GCType gc_type, bool pass_isolate) {
5775 DCHECK(callback != NULL);
5776 GCCallbackPair pair(callback, gc_type, pass_isolate);
5777 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5778 return gc_prologue_callbacks_.Add(pair);
5779 }
5780
5781
RemoveGCPrologueCallback(v8::Isolate::GCCallback callback)5782 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallback callback) {
5783 DCHECK(callback != NULL);
5784 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5785 if (gc_prologue_callbacks_[i].callback == callback) {
5786 gc_prologue_callbacks_.Remove(i);
5787 return;
5788 }
5789 }
5790 UNREACHABLE();
5791 }
5792
5793
AddGCEpilogueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5794 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
5795 GCType gc_type, bool pass_isolate) {
5796 DCHECK(callback != NULL);
5797 GCCallbackPair pair(callback, gc_type, pass_isolate);
5798 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5799 return gc_epilogue_callbacks_.Add(pair);
5800 }
5801
5802
RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback)5803 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
5804 DCHECK(callback != NULL);
5805 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5806 if (gc_epilogue_callbacks_[i].callback == callback) {
5807 gc_epilogue_callbacks_.Remove(i);
5808 return;
5809 }
5810 }
5811 UNREACHABLE();
5812 }
5813
5814 // TODO(ishell): Find a better place for this.
AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,Handle<WeakCell> code)5815 void Heap::AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
5816 Handle<WeakCell> code) {
5817 DCHECK(InNewSpace(*obj));
5818 DCHECK(!InNewSpace(*code));
5819 Handle<ArrayList> list(weak_new_space_object_to_code_list(), isolate());
5820 list = ArrayList::Add(list, isolate()->factory()->NewWeakCell(obj), code);
5821 if (*list != weak_new_space_object_to_code_list()) {
5822 set_weak_new_space_object_to_code_list(*list);
5823 }
5824 }
5825
5826 // TODO(ishell): Find a better place for this.
AddWeakObjectToCodeDependency(Handle<HeapObject> obj,Handle<DependentCode> dep)5827 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5828 Handle<DependentCode> dep) {
5829 DCHECK(!InNewSpace(*obj));
5830 DCHECK(!InNewSpace(*dep));
5831 Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5832 table = WeakHashTable::Put(table, obj, dep);
5833 if (*table != weak_object_to_code_table())
5834 set_weak_object_to_code_table(*table);
5835 DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5836 }
5837
5838
LookupWeakObjectToCodeDependency(Handle<HeapObject> obj)5839 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5840 Object* dep = weak_object_to_code_table()->Lookup(obj);
5841 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5842 return DependentCode::cast(empty_fixed_array());
5843 }
5844
5845 namespace {
CompactWeakFixedArray(Object * object)5846 void CompactWeakFixedArray(Object* object) {
5847 if (object->IsWeakFixedArray()) {
5848 WeakFixedArray* array = WeakFixedArray::cast(object);
5849 array->Compact<WeakFixedArray::NullCallback>();
5850 }
5851 }
5852 } // anonymous namespace
5853
CompactWeakFixedArrays()5854 void Heap::CompactWeakFixedArrays() {
5855 // Find known WeakFixedArrays and compact them.
5856 HeapIterator iterator(this);
5857 for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
5858 if (o->IsPrototypeInfo()) {
5859 Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
5860 if (prototype_users->IsWeakFixedArray()) {
5861 WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
5862 array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
5863 }
5864 }
5865 }
5866 CompactWeakFixedArray(noscript_shared_function_infos());
5867 CompactWeakFixedArray(script_list());
5868 CompactWeakFixedArray(weak_stack_trace_list());
5869 }
5870
AddRetainedMap(Handle<Map> map)5871 void Heap::AddRetainedMap(Handle<Map> map) {
5872 Handle<WeakCell> cell = Map::WeakCellForMap(map);
5873 Handle<ArrayList> array(retained_maps(), isolate());
5874 if (array->IsFull()) {
5875 CompactRetainedMaps(*array);
5876 }
5877 array = ArrayList::Add(
5878 array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
5879 ArrayList::kReloadLengthAfterAllocation);
5880 if (*array != retained_maps()) {
5881 set_retained_maps(*array);
5882 }
5883 }
5884
5885
CompactRetainedMaps(ArrayList * retained_maps)5886 void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
5887 DCHECK_EQ(retained_maps, this->retained_maps());
5888 int length = retained_maps->Length();
5889 int new_length = 0;
5890 int new_number_of_disposed_maps = 0;
5891 // This loop compacts the array by removing cleared weak cells.
5892 for (int i = 0; i < length; i += 2) {
5893 DCHECK(retained_maps->Get(i)->IsWeakCell());
5894 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
5895 Object* age = retained_maps->Get(i + 1);
5896 if (cell->cleared()) continue;
5897 if (i != new_length) {
5898 retained_maps->Set(new_length, cell);
5899 retained_maps->Set(new_length + 1, age);
5900 }
5901 if (i < number_of_disposed_maps_) {
5902 new_number_of_disposed_maps += 2;
5903 }
5904 new_length += 2;
5905 }
5906 number_of_disposed_maps_ = new_number_of_disposed_maps;
5907 Object* undefined = undefined_value();
5908 for (int i = new_length; i < length; i++) {
5909 retained_maps->Clear(i, undefined);
5910 }
5911 if (new_length != length) retained_maps->SetLength(new_length);
5912 }
5913
FatalProcessOutOfMemory(const char * location,bool is_heap_oom)5914 void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
5915 v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
5916 }
5917
5918 #ifdef DEBUG
5919
5920 class PrintHandleVisitor : public ObjectVisitor {
5921 public:
VisitPointers(Object ** start,Object ** end)5922 void VisitPointers(Object** start, Object** end) override {
5923 for (Object** p = start; p < end; p++)
5924 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5925 reinterpret_cast<void*>(*p));
5926 }
5927 };
5928
5929
PrintHandles()5930 void Heap::PrintHandles() {
5931 PrintF("Handles:\n");
5932 PrintHandleVisitor v;
5933 isolate_->handle_scope_implementer()->Iterate(&v);
5934 }
5935
5936 #endif
5937
5938 class CheckHandleCountVisitor : public ObjectVisitor {
5939 public:
CheckHandleCountVisitor()5940 CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()5941 ~CheckHandleCountVisitor() override {
5942 CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
5943 }
VisitPointers(Object ** start,Object ** end)5944 void VisitPointers(Object** start, Object** end) override {
5945 handle_count_ += end - start;
5946 }
5947
5948 private:
5949 ptrdiff_t handle_count_;
5950 };
5951
5952
CheckHandleCount()5953 void Heap::CheckHandleCount() {
5954 CheckHandleCountVisitor v;
5955 isolate_->handle_scope_implementer()->Iterate(&v);
5956 }
5957
ClearRecordedSlot(HeapObject * object,Object ** slot)5958 void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
5959 if (!InNewSpace(object)) {
5960 Address slot_addr = reinterpret_cast<Address>(slot);
5961 Page* page = Page::FromAddress(slot_addr);
5962 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5963 store_buffer()->DeleteEntry(slot_addr);
5964 RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
5965 }
5966 }
5967
HasRecordedSlot(HeapObject * object,Object ** slot)5968 bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
5969 if (InNewSpace(object)) {
5970 return false;
5971 }
5972 Address slot_addr = reinterpret_cast<Address>(slot);
5973 Page* page = Page::FromAddress(slot_addr);
5974 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5975 store_buffer()->MoveAllEntriesToRememberedSet();
5976 return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
5977 RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
5978 }
5979
ClearRecordedSlotRange(Address start,Address end)5980 void Heap::ClearRecordedSlotRange(Address start, Address end) {
5981 Page* page = Page::FromAddress(start);
5982 if (!page->InNewSpace()) {
5983 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5984 store_buffer()->DeleteEntry(start, end);
5985 RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end,
5986 SlotSet::FREE_EMPTY_BUCKETS);
5987 }
5988 }
5989
RecordWriteIntoCodeSlow(Code * host,RelocInfo * rinfo,Object * value)5990 void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
5991 Object* value) {
5992 DCHECK(InNewSpace(value));
5993 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
5994 RelocInfo::Mode rmode = rinfo->rmode();
5995 Address addr = rinfo->pc();
5996 SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
5997 if (rinfo->IsInConstantPool()) {
5998 addr = rinfo->constant_pool_entry_address();
5999 if (RelocInfo::IsCodeTarget(rmode)) {
6000 slot_type = CODE_ENTRY_SLOT;
6001 } else {
6002 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
6003 slot_type = OBJECT_SLOT;
6004 }
6005 }
6006 RememberedSet<OLD_TO_NEW>::InsertTyped(
6007 source_page, reinterpret_cast<Address>(host), slot_type, addr);
6008 }
6009
RecordWritesIntoCode(Code * code)6010 void Heap::RecordWritesIntoCode(Code* code) {
6011 for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
6012 !it.done(); it.next()) {
6013 RecordWriteIntoCode(code, it.rinfo(), it.rinfo()->target_object());
6014 }
6015 }
6016
next()6017 Space* AllSpaces::next() {
6018 switch (counter_++) {
6019 case NEW_SPACE:
6020 return heap_->new_space();
6021 case OLD_SPACE:
6022 return heap_->old_space();
6023 case CODE_SPACE:
6024 return heap_->code_space();
6025 case MAP_SPACE:
6026 return heap_->map_space();
6027 case LO_SPACE:
6028 return heap_->lo_space();
6029 default:
6030 return NULL;
6031 }
6032 }
6033
next()6034 PagedSpace* PagedSpaces::next() {
6035 switch (counter_++) {
6036 case OLD_SPACE:
6037 return heap_->old_space();
6038 case CODE_SPACE:
6039 return heap_->code_space();
6040 case MAP_SPACE:
6041 return heap_->map_space();
6042 default:
6043 return NULL;
6044 }
6045 }
6046
6047
next()6048 OldSpace* OldSpaces::next() {
6049 switch (counter_++) {
6050 case OLD_SPACE:
6051 return heap_->old_space();
6052 case CODE_SPACE:
6053 return heap_->code_space();
6054 default:
6055 return NULL;
6056 }
6057 }
6058
SpaceIterator(Heap * heap)6059 SpaceIterator::SpaceIterator(Heap* heap)
6060 : heap_(heap), current_space_(FIRST_SPACE - 1) {}
6061
~SpaceIterator()6062 SpaceIterator::~SpaceIterator() {
6063 }
6064
6065
has_next()6066 bool SpaceIterator::has_next() {
6067 // Iterate until no more spaces.
6068 return current_space_ != LAST_SPACE;
6069 }
6070
next()6071 Space* SpaceIterator::next() {
6072 DCHECK(has_next());
6073 return heap_->space(++current_space_);
6074 }
6075
6076
6077 class HeapObjectsFilter {
6078 public:
~HeapObjectsFilter()6079 virtual ~HeapObjectsFilter() {}
6080 virtual bool SkipObject(HeapObject* object) = 0;
6081 };
6082
6083
6084 class UnreachableObjectsFilter : public HeapObjectsFilter {
6085 public:
UnreachableObjectsFilter(Heap * heap)6086 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6087 MarkReachableObjects();
6088 }
6089
~UnreachableObjectsFilter()6090 ~UnreachableObjectsFilter() {
6091 heap_->mark_compact_collector()->ClearMarkbits();
6092 }
6093
SkipObject(HeapObject * object)6094 bool SkipObject(HeapObject* object) {
6095 if (object->IsFiller()) return true;
6096 return ObjectMarking::IsWhite(object);
6097 }
6098
6099 private:
6100 class MarkingVisitor : public ObjectVisitor {
6101 public:
MarkingVisitor()6102 MarkingVisitor() : marking_stack_(10) {}
6103
VisitPointers(Object ** start,Object ** end)6104 void VisitPointers(Object** start, Object** end) override {
6105 for (Object** p = start; p < end; p++) {
6106 if (!(*p)->IsHeapObject()) continue;
6107 HeapObject* obj = HeapObject::cast(*p);
6108 // Use Marking instead of ObjectMarking to avoid adjusting live bytes
6109 // counter.
6110 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
6111 if (Marking::IsWhite(mark_bit)) {
6112 Marking::WhiteToBlack(mark_bit);
6113 marking_stack_.Add(obj);
6114 }
6115 }
6116 }
6117
TransitiveClosure()6118 void TransitiveClosure() {
6119 while (!marking_stack_.is_empty()) {
6120 HeapObject* obj = marking_stack_.RemoveLast();
6121 obj->Iterate(this);
6122 }
6123 }
6124
6125 private:
6126 List<HeapObject*> marking_stack_;
6127 };
6128
MarkReachableObjects()6129 void MarkReachableObjects() {
6130 MarkingVisitor visitor;
6131 heap_->IterateRoots(&visitor, VISIT_ALL);
6132 visitor.TransitiveClosure();
6133 }
6134
6135 Heap* heap_;
6136 DisallowHeapAllocation no_allocation_;
6137 };
6138
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)6139 HeapIterator::HeapIterator(Heap* heap,
6140 HeapIterator::HeapObjectsFiltering filtering)
6141 : no_heap_allocation_(),
6142 heap_(heap),
6143 filtering_(filtering),
6144 filter_(nullptr),
6145 space_iterator_(nullptr),
6146 object_iterator_(nullptr) {
6147 heap_->MakeHeapIterable();
6148 heap_->heap_iterator_start();
6149 // Start the iteration.
6150 space_iterator_ = new SpaceIterator(heap_);
6151 switch (filtering_) {
6152 case kFilterUnreachable:
6153 filter_ = new UnreachableObjectsFilter(heap_);
6154 break;
6155 default:
6156 break;
6157 }
6158 object_iterator_ = space_iterator_->next()->GetObjectIterator();
6159 }
6160
6161
~HeapIterator()6162 HeapIterator::~HeapIterator() {
6163 heap_->heap_iterator_end();
6164 #ifdef DEBUG
6165 // Assert that in filtering mode we have iterated through all
6166 // objects. Otherwise, heap will be left in an inconsistent state.
6167 if (filtering_ != kNoFiltering) {
6168 DCHECK(object_iterator_ == nullptr);
6169 }
6170 #endif
6171 delete space_iterator_;
6172 delete filter_;
6173 }
6174
6175
next()6176 HeapObject* HeapIterator::next() {
6177 if (filter_ == nullptr) return NextObject();
6178
6179 HeapObject* obj = NextObject();
6180 while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
6181 return obj;
6182 }
6183
6184
NextObject()6185 HeapObject* HeapIterator::NextObject() {
6186 // No iterator means we are done.
6187 if (object_iterator_.get() == nullptr) return nullptr;
6188
6189 if (HeapObject* obj = object_iterator_.get()->Next()) {
6190 // If the current iterator has more objects we are fine.
6191 return obj;
6192 } else {
6193 // Go though the spaces looking for one that has objects.
6194 while (space_iterator_->has_next()) {
6195 object_iterator_ = space_iterator_->next()->GetObjectIterator();
6196 if (HeapObject* obj = object_iterator_.get()->Next()) {
6197 return obj;
6198 }
6199 }
6200 }
6201 // Done with the last space.
6202 object_iterator_.reset(nullptr);
6203 return nullptr;
6204 }
6205
6206
UpdateTotalGCTime(double duration)6207 void Heap::UpdateTotalGCTime(double duration) {
6208 if (FLAG_trace_gc_verbose) {
6209 total_gc_time_ms_ += duration;
6210 }
6211 }
6212
CleanUpNewSpaceStrings()6213 void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
6214 int last = 0;
6215 Isolate* isolate = heap_->isolate();
6216 for (int i = 0; i < new_space_strings_.length(); ++i) {
6217 Object* o = new_space_strings_[i];
6218 if (o->IsTheHole(isolate)) {
6219 continue;
6220 }
6221 if (o->IsThinString()) {
6222 o = ThinString::cast(o)->actual();
6223 if (!o->IsExternalString()) continue;
6224 }
6225 DCHECK(o->IsExternalString());
6226 if (heap_->InNewSpace(o)) {
6227 new_space_strings_[last++] = o;
6228 } else {
6229 old_space_strings_.Add(o);
6230 }
6231 }
6232 new_space_strings_.Rewind(last);
6233 new_space_strings_.Trim();
6234 }
6235
CleanUpAll()6236 void Heap::ExternalStringTable::CleanUpAll() {
6237 CleanUpNewSpaceStrings();
6238 int last = 0;
6239 Isolate* isolate = heap_->isolate();
6240 for (int i = 0; i < old_space_strings_.length(); ++i) {
6241 Object* o = old_space_strings_[i];
6242 if (o->IsTheHole(isolate)) {
6243 continue;
6244 }
6245 if (o->IsThinString()) {
6246 o = ThinString::cast(o)->actual();
6247 if (!o->IsExternalString()) continue;
6248 }
6249 DCHECK(o->IsExternalString());
6250 DCHECK(!heap_->InNewSpace(o));
6251 old_space_strings_[last++] = o;
6252 }
6253 old_space_strings_.Rewind(last);
6254 old_space_strings_.Trim();
6255 #ifdef VERIFY_HEAP
6256 if (FLAG_verify_heap) {
6257 Verify();
6258 }
6259 #endif
6260 }
6261
TearDown()6262 void Heap::ExternalStringTable::TearDown() {
6263 for (int i = 0; i < new_space_strings_.length(); ++i) {
6264 Object* o = new_space_strings_[i];
6265 if (o->IsThinString()) {
6266 o = ThinString::cast(o)->actual();
6267 if (!o->IsExternalString()) continue;
6268 }
6269 heap_->FinalizeExternalString(ExternalString::cast(o));
6270 }
6271 new_space_strings_.Free();
6272 for (int i = 0; i < old_space_strings_.length(); ++i) {
6273 Object* o = old_space_strings_[i];
6274 if (o->IsThinString()) {
6275 o = ThinString::cast(o)->actual();
6276 if (!o->IsExternalString()) continue;
6277 }
6278 heap_->FinalizeExternalString(ExternalString::cast(o));
6279 }
6280 old_space_strings_.Free();
6281 }
6282
6283
RememberUnmappedPage(Address page,bool compacted)6284 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6285 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6286 // Tag the page pointer to make it findable in the dump file.
6287 if (compacted) {
6288 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6289 } else {
6290 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6291 }
6292 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6293 reinterpret_cast<Address>(p);
6294 remembered_unmapped_pages_index_++;
6295 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6296 }
6297
6298
RegisterStrongRoots(Object ** start,Object ** end)6299 void Heap::RegisterStrongRoots(Object** start, Object** end) {
6300 StrongRootsList* list = new StrongRootsList();
6301 list->next = strong_roots_list_;
6302 list->start = start;
6303 list->end = end;
6304 strong_roots_list_ = list;
6305 }
6306
6307
UnregisterStrongRoots(Object ** start)6308 void Heap::UnregisterStrongRoots(Object** start) {
6309 StrongRootsList* prev = NULL;
6310 StrongRootsList* list = strong_roots_list_;
6311 while (list != nullptr) {
6312 StrongRootsList* next = list->next;
6313 if (list->start == start) {
6314 if (prev) {
6315 prev->next = next;
6316 } else {
6317 strong_roots_list_ = next;
6318 }
6319 delete list;
6320 } else {
6321 prev = list;
6322 }
6323 list = next;
6324 }
6325 }
6326
6327
NumberOfTrackedHeapObjectTypes()6328 size_t Heap::NumberOfTrackedHeapObjectTypes() {
6329 return ObjectStats::OBJECT_STATS_COUNT;
6330 }
6331
6332
ObjectCountAtLastGC(size_t index)6333 size_t Heap::ObjectCountAtLastGC(size_t index) {
6334 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6335 return 0;
6336 return live_object_stats_->object_count_last_gc(index);
6337 }
6338
6339
ObjectSizeAtLastGC(size_t index)6340 size_t Heap::ObjectSizeAtLastGC(size_t index) {
6341 if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6342 return 0;
6343 return live_object_stats_->object_size_last_gc(index);
6344 }
6345
6346
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)6347 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6348 const char** object_sub_type) {
6349 if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6350
6351 switch (static_cast<int>(index)) {
6352 #define COMPARE_AND_RETURN_NAME(name) \
6353 case name: \
6354 *object_type = #name; \
6355 *object_sub_type = ""; \
6356 return true;
6357 INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6358 #undef COMPARE_AND_RETURN_NAME
6359 #define COMPARE_AND_RETURN_NAME(name) \
6360 case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
6361 *object_type = "CODE_TYPE"; \
6362 *object_sub_type = "CODE_KIND/" #name; \
6363 return true;
6364 CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
6365 #undef COMPARE_AND_RETURN_NAME
6366 #define COMPARE_AND_RETURN_NAME(name) \
6367 case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
6368 *object_type = "FIXED_ARRAY_TYPE"; \
6369 *object_sub_type = #name; \
6370 return true;
6371 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6372 #undef COMPARE_AND_RETURN_NAME
6373 #define COMPARE_AND_RETURN_NAME(name) \
6374 case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
6375 Code::kFirstCodeAge: \
6376 *object_type = "CODE_TYPE"; \
6377 *object_sub_type = "CODE_AGE/" #name; \
6378 return true;
6379 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6380 #undef COMPARE_AND_RETURN_NAME
6381 }
6382 return false;
6383 }
6384
6385
6386 // static
GetStaticVisitorIdForMap(Map * map)6387 int Heap::GetStaticVisitorIdForMap(Map* map) {
6388 return StaticVisitorBase::GetVisitorId(map);
6389 }
6390
6391 } // namespace internal
6392 } // namespace v8
6393