1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/heap.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h"
11 #include "src/base/once.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/compilation-cache.h"
16 #include "src/conversions.h"
17 #include "src/debug/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/array-buffer-tracker-inl.h"
21 #include "src/heap/gc-idle-time-handler.h"
22 #include "src/heap/gc-tracer.h"
23 #include "src/heap/incremental-marking.h"
24 #include "src/heap/mark-compact-inl.h"
25 #include "src/heap/mark-compact.h"
26 #include "src/heap/memory-reducer.h"
27 #include "src/heap/object-stats.h"
28 #include "src/heap/objects-visiting-inl.h"
29 #include "src/heap/objects-visiting.h"
30 #include "src/heap/remembered-set.h"
31 #include "src/heap/scavenge-job.h"
32 #include "src/heap/scavenger-inl.h"
33 #include "src/heap/store-buffer.h"
34 #include "src/interpreter/interpreter.h"
35 #include "src/regexp/jsregexp.h"
36 #include "src/runtime-profiler.h"
37 #include "src/snapshot/natives.h"
38 #include "src/snapshot/serializer-common.h"
39 #include "src/snapshot/snapshot.h"
40 #include "src/tracing/trace-event.h"
41 #include "src/type-feedback-vector.h"
42 #include "src/utils.h"
43 #include "src/v8.h"
44 #include "src/v8threads.h"
45 #include "src/vm-state-inl.h"
46
47 namespace v8 {
48 namespace internal {
49
50
51 struct Heap::StrongRootsList {
52 Object** start;
53 Object** end;
54 StrongRootsList* next;
55 };
56
57 class IdleScavengeObserver : public AllocationObserver {
58 public:
IdleScavengeObserver(Heap & heap,intptr_t step_size)59 IdleScavengeObserver(Heap& heap, intptr_t step_size)
60 : AllocationObserver(step_size), heap_(heap) {}
61
Step(int bytes_allocated,Address,size_t)62 void Step(int bytes_allocated, Address, size_t) override {
63 heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
64 }
65
66 private:
67 Heap& heap_;
68 };
69
Heap()70 Heap::Heap()
71 : external_memory_(0),
72 external_memory_limit_(kExternalAllocationLimit),
73 external_memory_at_last_mark_compact_(0),
74 isolate_(nullptr),
75 code_range_size_(0),
76 // semispace_size_ should be a power of 2 and old_generation_size_ should
77 // be a multiple of Page::kPageSize.
78 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
79 initial_semispace_size_(Page::kPageSize),
80 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
81 initial_old_generation_size_(max_old_generation_size_ /
82 kInitalOldGenerationLimitFactor),
83 old_generation_size_configured_(false),
84 max_executable_size_(256ul * (kPointerSize / 4) * MB),
85 // Variables set based on semispace_size_ and old_generation_size_ in
86 // ConfigureHeap.
87 // Will be 4 * reserved_semispace_size_ to ensure that young
88 // generation can be aligned to its size.
89 maximum_committed_(0),
90 survived_since_last_expansion_(0),
91 survived_last_scavenge_(0),
92 always_allocate_scope_count_(0),
93 memory_pressure_level_(MemoryPressureLevel::kNone),
94 contexts_disposed_(0),
95 number_of_disposed_maps_(0),
96 global_ic_age_(0),
97 new_space_(this),
98 old_space_(NULL),
99 code_space_(NULL),
100 map_space_(NULL),
101 lo_space_(NULL),
102 gc_state_(NOT_IN_GC),
103 gc_post_processing_depth_(0),
104 allocations_count_(0),
105 raw_allocations_hash_(0),
106 ms_count_(0),
107 gc_count_(0),
108 remembered_unmapped_pages_index_(0),
109 #ifdef DEBUG
110 allocation_timeout_(0),
111 #endif // DEBUG
112 old_generation_allocation_limit_(initial_old_generation_size_),
113 old_gen_exhausted_(false),
114 optimize_for_memory_usage_(false),
115 inline_allocation_disabled_(false),
116 total_regexp_code_generated_(0),
117 tracer_(nullptr),
118 high_survival_rate_period_length_(0),
119 promoted_objects_size_(0),
120 promotion_ratio_(0),
121 semi_space_copied_object_size_(0),
122 previous_semi_space_copied_object_size_(0),
123 semi_space_copied_rate_(0),
124 nodes_died_in_new_space_(0),
125 nodes_copied_in_new_space_(0),
126 nodes_promoted_(0),
127 maximum_size_scavenges_(0),
128 max_gc_pause_(0.0),
129 total_gc_time_ms_(0.0),
130 max_alive_after_gc_(0),
131 min_in_mutator_(kMaxInt),
132 marking_time_(0.0),
133 sweeping_time_(0.0),
134 last_idle_notification_time_(0.0),
135 last_gc_time_(0.0),
136 scavenge_collector_(nullptr),
137 mark_compact_collector_(nullptr),
138 memory_allocator_(nullptr),
139 store_buffer_(this),
140 incremental_marking_(nullptr),
141 gc_idle_time_handler_(nullptr),
142 memory_reducer_(nullptr),
143 object_stats_(nullptr),
144 scavenge_job_(nullptr),
145 idle_scavenge_observer_(nullptr),
146 full_codegen_bytes_generated_(0),
147 crankshaft_codegen_bytes_generated_(0),
148 new_space_allocation_counter_(0),
149 old_generation_allocation_counter_(0),
150 old_generation_size_at_last_gc_(0),
151 gcs_since_last_deopt_(0),
152 global_pretenuring_feedback_(nullptr),
153 ring_buffer_full_(false),
154 ring_buffer_end_(0),
155 promotion_queue_(this),
156 configured_(false),
157 current_gc_flags_(Heap::kNoGCFlags),
158 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
159 external_string_table_(this),
160 gc_callbacks_depth_(0),
161 deserialization_complete_(false),
162 strong_roots_list_(NULL),
163 heap_iterator_depth_(0),
164 force_oom_(false) {
165 // Allow build-time customization of the max semispace size. Building
166 // V8 with snapshots and a non-default max semispace size is much
167 // easier if you can define it as part of the build environment.
168 #if defined(V8_MAX_SEMISPACE_SIZE)
169 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
170 #endif
171
172 // Ensure old_generation_size_ is a multiple of kPageSize.
173 DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
174
175 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
176 set_native_contexts_list(NULL);
177 set_allocation_sites_list(Smi::FromInt(0));
178 set_encountered_weak_collections(Smi::FromInt(0));
179 set_encountered_weak_cells(Smi::FromInt(0));
180 set_encountered_transition_arrays(Smi::FromInt(0));
181 // Put a dummy entry in the remembered pages so we can find the list the
182 // minidump even if there are no real unmapped pages.
183 RememberUnmappedPage(NULL, false);
184 }
185
186
Capacity()187 intptr_t Heap::Capacity() {
188 if (!HasBeenSetUp()) return 0;
189
190 return new_space_.Capacity() + OldGenerationCapacity();
191 }
192
OldGenerationCapacity()193 intptr_t Heap::OldGenerationCapacity() {
194 if (!HasBeenSetUp()) return 0;
195
196 return old_space_->Capacity() + code_space_->Capacity() +
197 map_space_->Capacity() + lo_space_->SizeOfObjects();
198 }
199
200
CommittedOldGenerationMemory()201 intptr_t Heap::CommittedOldGenerationMemory() {
202 if (!HasBeenSetUp()) return 0;
203
204 return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
205 map_space_->CommittedMemory() + lo_space_->Size();
206 }
207
208
CommittedMemory()209 intptr_t Heap::CommittedMemory() {
210 if (!HasBeenSetUp()) return 0;
211
212 return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
213 }
214
215
CommittedPhysicalMemory()216 size_t Heap::CommittedPhysicalMemory() {
217 if (!HasBeenSetUp()) return 0;
218
219 return new_space_.CommittedPhysicalMemory() +
220 old_space_->CommittedPhysicalMemory() +
221 code_space_->CommittedPhysicalMemory() +
222 map_space_->CommittedPhysicalMemory() +
223 lo_space_->CommittedPhysicalMemory();
224 }
225
226
CommittedMemoryExecutable()227 intptr_t Heap::CommittedMemoryExecutable() {
228 if (!HasBeenSetUp()) return 0;
229
230 return memory_allocator()->SizeExecutable();
231 }
232
233
UpdateMaximumCommitted()234 void Heap::UpdateMaximumCommitted() {
235 if (!HasBeenSetUp()) return;
236
237 intptr_t current_committed_memory = CommittedMemory();
238 if (current_committed_memory > maximum_committed_) {
239 maximum_committed_ = current_committed_memory;
240 }
241 }
242
243
Available()244 intptr_t Heap::Available() {
245 if (!HasBeenSetUp()) return 0;
246
247 intptr_t total = 0;
248 AllSpaces spaces(this);
249 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
250 total += space->Available();
251 }
252 return total;
253 }
254
255
HasBeenSetUp()256 bool Heap::HasBeenSetUp() {
257 return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
258 lo_space_ != NULL;
259 }
260
261
SelectGarbageCollector(AllocationSpace space,const char ** reason)262 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
263 const char** reason) {
264 // Is global GC requested?
265 if (space != NEW_SPACE) {
266 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
267 *reason = "GC in old space requested";
268 return MARK_COMPACTOR;
269 }
270
271 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
272 *reason = "GC in old space forced by flags";
273 return MARK_COMPACTOR;
274 }
275
276 // Is enough data promoted to justify a global GC?
277 if (OldGenerationAllocationLimitReached()) {
278 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
279 *reason = "promotion limit reached";
280 return MARK_COMPACTOR;
281 }
282
283 // Have allocation in OLD and LO failed?
284 if (old_gen_exhausted_) {
285 isolate_->counters()
286 ->gc_compactor_caused_by_oldspace_exhaustion()
287 ->Increment();
288 *reason = "old generations exhausted";
289 return MARK_COMPACTOR;
290 }
291
292 // Is there enough space left in OLD to guarantee that a scavenge can
293 // succeed?
294 //
295 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
296 // for object promotion. It counts only the bytes that the memory
297 // allocator has not yet allocated from the OS and assigned to any space,
298 // and does not count available bytes already in the old space or code
299 // space. Undercounting is safe---we may get an unrequested full GC when
300 // a scavenge would have succeeded.
301 if (memory_allocator()->MaxAvailable() <= new_space_.Size()) {
302 isolate_->counters()
303 ->gc_compactor_caused_by_oldspace_exhaustion()
304 ->Increment();
305 *reason = "scavenge might not succeed";
306 return MARK_COMPACTOR;
307 }
308
309 // Default
310 *reason = NULL;
311 return SCAVENGER;
312 }
313
314
315 // TODO(1238405): Combine the infrastructure for --heap-stats and
316 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsBeforeGC()317 void Heap::ReportStatisticsBeforeGC() {
318 // Heap::ReportHeapStatistics will also log NewSpace statistics when
319 // compiled --log-gc is set. The following logic is used to avoid
320 // double logging.
321 #ifdef DEBUG
322 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
323 if (FLAG_heap_stats) {
324 ReportHeapStatistics("Before GC");
325 } else if (FLAG_log_gc) {
326 new_space_.ReportStatistics();
327 }
328 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
329 #else
330 if (FLAG_log_gc) {
331 new_space_.CollectStatistics();
332 new_space_.ReportStatistics();
333 new_space_.ClearHistograms();
334 }
335 #endif // DEBUG
336 }
337
338
PrintShortHeapStatistics()339 void Heap::PrintShortHeapStatistics() {
340 if (!FLAG_trace_gc_verbose) return;
341 PrintIsolate(isolate_, "Memory allocator, used: %6" V8PRIdPTR
342 " KB, available: %6" V8PRIdPTR " KB\n",
343 memory_allocator()->Size() / KB,
344 memory_allocator()->Available() / KB);
345 PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
346 " KB"
347 ", available: %6" V8PRIdPTR
348 " KB"
349 ", committed: %6" V8PRIdPTR " KB\n",
350 new_space_.Size() / KB, new_space_.Available() / KB,
351 new_space_.CommittedMemory() / KB);
352 PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
353 " KB"
354 ", available: %6" V8PRIdPTR
355 " KB"
356 ", committed: %6" V8PRIdPTR " KB\n",
357 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
358 old_space_->CommittedMemory() / KB);
359 PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR
360 " KB"
361 ", available: %6" V8PRIdPTR
362 " KB"
363 ", committed: %6" V8PRIdPTR " KB\n",
364 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
365 code_space_->CommittedMemory() / KB);
366 PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR
367 " KB"
368 ", available: %6" V8PRIdPTR
369 " KB"
370 ", committed: %6" V8PRIdPTR " KB\n",
371 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
372 map_space_->CommittedMemory() / KB);
373 PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
374 " KB"
375 ", available: %6" V8PRIdPTR
376 " KB"
377 ", committed: %6" V8PRIdPTR " KB\n",
378 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
379 lo_space_->CommittedMemory() / KB);
380 PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR
381 " KB"
382 ", available: %6" V8PRIdPTR
383 " KB"
384 ", committed: %6" V8PRIdPTR " KB\n",
385 this->SizeOfObjects() / KB, this->Available() / KB,
386 this->CommittedMemory() / KB);
387 PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
388 static_cast<intptr_t>(external_memory_ / KB));
389 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
390 total_gc_time_ms_);
391 }
392
393 // TODO(1238405): Combine the infrastructure for --heap-stats and
394 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()395 void Heap::ReportStatisticsAfterGC() {
396 // Similar to the before GC, we use some complicated logic to ensure that
397 // NewSpace statistics are logged exactly once when --log-gc is turned on.
398 #if defined(DEBUG)
399 if (FLAG_heap_stats) {
400 new_space_.CollectStatistics();
401 ReportHeapStatistics("After GC");
402 } else if (FLAG_log_gc) {
403 new_space_.ReportStatistics();
404 }
405 #else
406 if (FLAG_log_gc) new_space_.ReportStatistics();
407 #endif // DEBUG
408 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
409 ++i) {
410 int count = deferred_counters_[i];
411 deferred_counters_[i] = 0;
412 while (count > 0) {
413 count--;
414 isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
415 }
416 }
417 }
418
419
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)420 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
421 deferred_counters_[feature]++;
422 }
423
424
GarbageCollectionPrologue()425 void Heap::GarbageCollectionPrologue() {
426 {
427 AllowHeapAllocation for_the_first_part_of_prologue;
428 gc_count_++;
429
430 #ifdef VERIFY_HEAP
431 if (FLAG_verify_heap) {
432 Verify();
433 }
434 #endif
435 }
436
437 // Reset GC statistics.
438 promoted_objects_size_ = 0;
439 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
440 semi_space_copied_object_size_ = 0;
441 nodes_died_in_new_space_ = 0;
442 nodes_copied_in_new_space_ = 0;
443 nodes_promoted_ = 0;
444
445 UpdateMaximumCommitted();
446
447 #ifdef DEBUG
448 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
449
450 if (FLAG_gc_verbose) Print();
451
452 ReportStatisticsBeforeGC();
453 #endif // DEBUG
454
455 if (new_space_.IsAtMaximumCapacity()) {
456 maximum_size_scavenges_++;
457 } else {
458 maximum_size_scavenges_ = 0;
459 }
460 CheckNewSpaceExpansionCriteria();
461 UpdateNewSpaceAllocationCounter();
462 store_buffer()->MoveEntriesToRememberedSet();
463 }
464
465
SizeOfObjects()466 intptr_t Heap::SizeOfObjects() {
467 intptr_t total = 0;
468 AllSpaces spaces(this);
469 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
470 total += space->SizeOfObjects();
471 }
472 return total;
473 }
474
475
GetSpaceName(int idx)476 const char* Heap::GetSpaceName(int idx) {
477 switch (idx) {
478 case NEW_SPACE:
479 return "new_space";
480 case OLD_SPACE:
481 return "old_space";
482 case MAP_SPACE:
483 return "map_space";
484 case CODE_SPACE:
485 return "code_space";
486 case LO_SPACE:
487 return "large_object_space";
488 default:
489 UNREACHABLE();
490 }
491 return nullptr;
492 }
493
494
RepairFreeListsAfterDeserialization()495 void Heap::RepairFreeListsAfterDeserialization() {
496 PagedSpaces spaces(this);
497 for (PagedSpace* space = spaces.next(); space != NULL;
498 space = spaces.next()) {
499 space->RepairFreeListsAfterDeserialization();
500 }
501 }
502
MergeAllocationSitePretenuringFeedback(const base::HashMap & local_pretenuring_feedback)503 void Heap::MergeAllocationSitePretenuringFeedback(
504 const base::HashMap& local_pretenuring_feedback) {
505 AllocationSite* site = nullptr;
506 for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
507 local_entry != nullptr;
508 local_entry = local_pretenuring_feedback.Next(local_entry)) {
509 site = reinterpret_cast<AllocationSite*>(local_entry->key);
510 MapWord map_word = site->map_word();
511 if (map_word.IsForwardingAddress()) {
512 site = AllocationSite::cast(map_word.ToForwardingAddress());
513 }
514
515 // We have not validated the allocation site yet, since we have not
516 // dereferenced the site during collecting information.
517 // This is an inlined check of AllocationMemento::IsValid.
518 if (!site->IsAllocationSite() || site->IsZombie()) continue;
519
520 int value =
521 static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
522 DCHECK_GT(value, 0);
523
524 if (site->IncrementMementoFoundCount(value)) {
525 global_pretenuring_feedback_->LookupOrInsert(site,
526 ObjectHash(site->address()));
527 }
528 }
529 }
530
531
532 class Heap::PretenuringScope {
533 public:
PretenuringScope(Heap * heap)534 explicit PretenuringScope(Heap* heap) : heap_(heap) {
535 heap_->global_pretenuring_feedback_ = new base::HashMap(
536 base::HashMap::PointersMatch, kInitialFeedbackCapacity);
537 }
538
~PretenuringScope()539 ~PretenuringScope() {
540 delete heap_->global_pretenuring_feedback_;
541 heap_->global_pretenuring_feedback_ = nullptr;
542 }
543
544 private:
545 Heap* heap_;
546 };
547
548
ProcessPretenuringFeedback()549 void Heap::ProcessPretenuringFeedback() {
550 bool trigger_deoptimization = false;
551 if (FLAG_allocation_site_pretenuring) {
552 int tenure_decisions = 0;
553 int dont_tenure_decisions = 0;
554 int allocation_mementos_found = 0;
555 int allocation_sites = 0;
556 int active_allocation_sites = 0;
557
558 AllocationSite* site = nullptr;
559
560 // Step 1: Digest feedback for recorded allocation sites.
561 bool maximum_size_scavenge = MaximumSizeScavenge();
562 for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
563 e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
564 allocation_sites++;
565 site = reinterpret_cast<AllocationSite*>(e->key);
566 int found_count = site->memento_found_count();
567 // An entry in the storage does not imply that the count is > 0 because
568 // allocation sites might have been reset due to too many objects dying
569 // in old space.
570 if (found_count > 0) {
571 DCHECK(site->IsAllocationSite());
572 active_allocation_sites++;
573 allocation_mementos_found += found_count;
574 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
575 trigger_deoptimization = true;
576 }
577 if (site->GetPretenureMode() == TENURED) {
578 tenure_decisions++;
579 } else {
580 dont_tenure_decisions++;
581 }
582 }
583 }
584
585 // Step 2: Deopt maybe tenured allocation sites if necessary.
586 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
587 if (deopt_maybe_tenured) {
588 Object* list_element = allocation_sites_list();
589 while (list_element->IsAllocationSite()) {
590 site = AllocationSite::cast(list_element);
591 DCHECK(site->IsAllocationSite());
592 allocation_sites++;
593 if (site->IsMaybeTenure()) {
594 site->set_deopt_dependent_code(true);
595 trigger_deoptimization = true;
596 }
597 list_element = site->weak_next();
598 }
599 }
600
601 if (trigger_deoptimization) {
602 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
603 }
604
605 if (FLAG_trace_pretenuring_statistics &&
606 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
607 dont_tenure_decisions > 0)) {
608 PrintIsolate(isolate(),
609 "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
610 "active_sites=%d "
611 "mementos=%d tenured=%d not_tenured=%d\n",
612 deopt_maybe_tenured ? 1 : 0, allocation_sites,
613 active_allocation_sites, allocation_mementos_found,
614 tenure_decisions, dont_tenure_decisions);
615 }
616 }
617 }
618
619
DeoptMarkedAllocationSites()620 void Heap::DeoptMarkedAllocationSites() {
621 // TODO(hpayer): If iterating over the allocation sites list becomes a
622 // performance issue, use a cache data structure in heap instead.
623 Object* list_element = allocation_sites_list();
624 while (list_element->IsAllocationSite()) {
625 AllocationSite* site = AllocationSite::cast(list_element);
626 if (site->deopt_dependent_code()) {
627 site->dependent_code()->MarkCodeForDeoptimization(
628 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
629 site->set_deopt_dependent_code(false);
630 }
631 list_element = site->weak_next();
632 }
633 Deoptimizer::DeoptimizeMarkedCode(isolate_);
634 }
635
636
GarbageCollectionEpilogue()637 void Heap::GarbageCollectionEpilogue() {
638 // In release mode, we only zap the from space under heap verification.
639 if (Heap::ShouldZapGarbage()) {
640 ZapFromSpace();
641 }
642
643 #ifdef VERIFY_HEAP
644 if (FLAG_verify_heap) {
645 Verify();
646 }
647 #endif
648
649 AllowHeapAllocation for_the_rest_of_the_epilogue;
650
651 #ifdef DEBUG
652 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
653 if (FLAG_print_handles) PrintHandles();
654 if (FLAG_gc_verbose) Print();
655 if (FLAG_code_stats) ReportCodeStatistics("After GC");
656 if (FLAG_check_handle_count) CheckHandleCount();
657 #endif
658 if (FLAG_deopt_every_n_garbage_collections > 0) {
659 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
660 // the topmost optimized frame can be deoptimized safely, because it
661 // might not have a lazy bailout point right after its current PC.
662 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
663 Deoptimizer::DeoptimizeAll(isolate());
664 gcs_since_last_deopt_ = 0;
665 }
666 }
667
668 UpdateMaximumCommitted();
669
670 isolate_->counters()->alive_after_last_gc()->Set(
671 static_cast<int>(SizeOfObjects()));
672
673 isolate_->counters()->string_table_capacity()->Set(
674 string_table()->Capacity());
675 isolate_->counters()->number_of_symbols()->Set(
676 string_table()->NumberOfElements());
677
678 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
679 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
680 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
681 (crankshaft_codegen_bytes_generated_ +
682 full_codegen_bytes_generated_)));
683 }
684
685 if (CommittedMemory() > 0) {
686 isolate_->counters()->external_fragmentation_total()->AddSample(
687 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
688
689 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
690 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
691 isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
692 (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
693 isolate_->counters()->heap_fraction_code_space()->AddSample(
694 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
695 CommittedMemory()));
696 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
697 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
698 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
699 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
700
701 isolate_->counters()->heap_sample_total_committed()->AddSample(
702 static_cast<int>(CommittedMemory() / KB));
703 isolate_->counters()->heap_sample_total_used()->AddSample(
704 static_cast<int>(SizeOfObjects() / KB));
705 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
706 static_cast<int>(map_space()->CommittedMemory() / KB));
707 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
708 static_cast<int>(code_space()->CommittedMemory() / KB));
709
710 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
711 static_cast<int>(MaximumCommittedMemory() / KB));
712 }
713
714 #define UPDATE_COUNTERS_FOR_SPACE(space) \
715 isolate_->counters()->space##_bytes_available()->Set( \
716 static_cast<int>(space()->Available())); \
717 isolate_->counters()->space##_bytes_committed()->Set( \
718 static_cast<int>(space()->CommittedMemory())); \
719 isolate_->counters()->space##_bytes_used()->Set( \
720 static_cast<int>(space()->SizeOfObjects()));
721 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
722 if (space()->CommittedMemory() > 0) { \
723 isolate_->counters()->external_fragmentation_##space()->AddSample( \
724 static_cast<int>(100 - \
725 (space()->SizeOfObjects() * 100.0) / \
726 space()->CommittedMemory())); \
727 }
728 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
729 UPDATE_COUNTERS_FOR_SPACE(space) \
730 UPDATE_FRAGMENTATION_FOR_SPACE(space)
731
732 UPDATE_COUNTERS_FOR_SPACE(new_space)
733 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
734 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
735 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
736 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
737 #undef UPDATE_COUNTERS_FOR_SPACE
738 #undef UPDATE_FRAGMENTATION_FOR_SPACE
739 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
740
741 #ifdef DEBUG
742 ReportStatisticsAfterGC();
743 #endif // DEBUG
744
745 // Remember the last top pointer so that we can later find out
746 // whether we allocated in new space since the last GC.
747 new_space_top_after_last_gc_ = new_space()->top();
748 last_gc_time_ = MonotonicallyIncreasingTimeInMs();
749
750 ReduceNewSpaceSize();
751 }
752
753
PreprocessStackTraces()754 void Heap::PreprocessStackTraces() {
755 WeakFixedArray::Iterator iterator(weak_stack_trace_list());
756 FixedArray* elements;
757 while ((elements = iterator.Next<FixedArray>())) {
758 for (int j = 1; j < elements->length(); j += 4) {
759 Object* maybe_code = elements->get(j + 2);
760 // If GC happens while adding a stack trace to the weak fixed array,
761 // which has been copied into a larger backing store, we may run into
762 // a stack trace that has already been preprocessed. Guard against this.
763 if (!maybe_code->IsAbstractCode()) break;
764 AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
765 int offset = Smi::cast(elements->get(j + 3))->value();
766 int pos = abstract_code->SourcePosition(offset);
767 elements->set(j + 2, Smi::FromInt(pos));
768 }
769 }
770 // We must not compact the weak fixed list here, as we may be in the middle
771 // of writing to it, when the GC triggered. Instead, we reset the root value.
772 set_weak_stack_trace_list(Smi::FromInt(0));
773 }
774
775
776 class GCCallbacksScope {
777 public:
GCCallbacksScope(Heap * heap)778 explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
779 heap_->gc_callbacks_depth_++;
780 }
~GCCallbacksScope()781 ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
782
CheckReenter()783 bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
784
785 private:
786 Heap* heap_;
787 };
788
789
HandleGCRequest()790 void Heap::HandleGCRequest() {
791 if (HighMemoryPressure()) {
792 incremental_marking()->reset_request_type();
793 CheckMemoryPressure();
794 } else if (incremental_marking()->request_type() ==
795 IncrementalMarking::COMPLETE_MARKING) {
796 incremental_marking()->reset_request_type();
797 CollectAllGarbage(current_gc_flags_, "GC interrupt",
798 current_gc_callback_flags_);
799 } else if (incremental_marking()->request_type() ==
800 IncrementalMarking::FINALIZATION &&
801 incremental_marking()->IsMarking() &&
802 !incremental_marking()->finalize_marking_completed()) {
803 incremental_marking()->reset_request_type();
804 FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
805 }
806 }
807
808
ScheduleIdleScavengeIfNeeded(int bytes_allocated)809 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
810 scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
811 }
812
813
FinalizeIncrementalMarking(const char * gc_reason)814 void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
815 if (FLAG_trace_incremental_marking) {
816 PrintF("[IncrementalMarking] (%s).\n", gc_reason);
817 }
818
819 HistogramTimerScope incremental_marking_scope(
820 isolate()->counters()->gc_incremental_marking_finalize());
821 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
822 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
823
824 {
825 GCCallbacksScope scope(this);
826 if (scope.CheckReenter()) {
827 AllowHeapAllocation allow_allocation;
828 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
829 VMState<EXTERNAL> state(isolate_);
830 HandleScope handle_scope(isolate_);
831 CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
832 }
833 }
834 incremental_marking()->FinalizeIncrementally();
835 {
836 GCCallbacksScope scope(this);
837 if (scope.CheckReenter()) {
838 AllowHeapAllocation allow_allocation;
839 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
840 VMState<EXTERNAL> state(isolate_);
841 HandleScope handle_scope(isolate_);
842 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
843 }
844 }
845 }
846
847
GCTypeTimer(GarbageCollector collector)848 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
849 if (collector == SCAVENGER) {
850 return isolate_->counters()->gc_scavenger();
851 } else {
852 if (!incremental_marking()->IsStopped()) {
853 if (ShouldReduceMemory()) {
854 return isolate_->counters()->gc_finalize_reduce_memory();
855 } else {
856 return isolate_->counters()->gc_finalize();
857 }
858 } else {
859 return isolate_->counters()->gc_compactor();
860 }
861 }
862 }
863
CollectAllGarbage(int flags,const char * gc_reason,const v8::GCCallbackFlags gc_callback_flags)864 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
865 const v8::GCCallbackFlags gc_callback_flags) {
866 // Since we are ignoring the return value, the exact choice of space does
867 // not matter, so long as we do not specify NEW_SPACE, which would not
868 // cause a full GC.
869 set_current_gc_flags(flags);
870 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
871 set_current_gc_flags(kNoGCFlags);
872 }
873
874
CollectAllAvailableGarbage(const char * gc_reason)875 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
876 // Since we are ignoring the return value, the exact choice of space does
877 // not matter, so long as we do not specify NEW_SPACE, which would not
878 // cause a full GC.
879 // Major GC would invoke weak handle callbacks on weakly reachable
880 // handles, but won't collect weakly reachable objects until next
881 // major GC. Therefore if we collect aggressively and weak handle callback
882 // has been invoked, we rerun major GC to release objects which become
883 // garbage.
884 // Note: as weak callbacks can execute arbitrary code, we cannot
885 // hope that eventually there will be no weak callbacks invocations.
886 // Therefore stop recollecting after several attempts.
887 if (isolate()->concurrent_recompilation_enabled()) {
888 // The optimizing compiler may be unnecessarily holding on to memory.
889 DisallowHeapAllocation no_recursive_gc;
890 isolate()->optimizing_compile_dispatcher()->Flush();
891 }
892 isolate()->ClearSerializerData();
893 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
894 isolate_->compilation_cache()->Clear();
895 const int kMaxNumberOfAttempts = 7;
896 const int kMinNumberOfAttempts = 2;
897 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
898 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
899 v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
900 attempt + 1 >= kMinNumberOfAttempts) {
901 break;
902 }
903 }
904 set_current_gc_flags(kNoGCFlags);
905 new_space_.Shrink();
906 UncommitFromSpace();
907 }
908
909
ReportExternalMemoryPressure(const char * gc_reason)910 void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
911 if (incremental_marking()->IsStopped()) {
912 if (incremental_marking()->CanBeActivated()) {
913 StartIncrementalMarking(
914 i::Heap::kNoGCFlags,
915 kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
916 } else {
917 CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
918 kGCCallbackFlagSynchronousPhantomCallbackProcessing);
919 }
920 } else {
921 // Incremental marking is turned on an has already been started.
922
923 // TODO(mlippautz): Compute the time slice for incremental marking based on
924 // memory pressure.
925 double deadline = MonotonicallyIncreasingTimeInMs() +
926 FLAG_external_allocation_limit_incremental_time;
927 incremental_marking()->AdvanceIncrementalMarking(
928 deadline,
929 IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
930 IncrementalMarking::FORCE_MARKING,
931 IncrementalMarking::FORCE_COMPLETION));
932 }
933 }
934
935
EnsureFillerObjectAtTop()936 void Heap::EnsureFillerObjectAtTop() {
937 // There may be an allocation memento behind objects in new space. Upon
938 // evacuation of a non-full new space (or if we are on the last page) there
939 // may be uninitialized memory behind top. We fill the remainder of the page
940 // with a filler.
941 Address to_top = new_space_.top();
942 Page* page = Page::FromAddress(to_top - kPointerSize);
943 if (page->Contains(to_top)) {
944 int remaining_in_page = static_cast<int>(page->area_end() - to_top);
945 CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
946 }
947 }
948
949
CollectGarbage(GarbageCollector collector,const char * gc_reason,const char * collector_reason,const v8::GCCallbackFlags gc_callback_flags)950 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
951 const char* collector_reason,
952 const v8::GCCallbackFlags gc_callback_flags) {
953 // The VM is in the GC state until exiting this function.
954 VMState<GC> state(isolate_);
955
956 #ifdef DEBUG
957 // Reset the allocation timeout to the GC interval, but make sure to
958 // allow at least a few allocations after a collection. The reason
959 // for this is that we have a lot of allocation sequences and we
960 // assume that a garbage collection will allow the subsequent
961 // allocation attempts to go through.
962 allocation_timeout_ = Max(6, FLAG_gc_interval);
963 #endif
964
965 EnsureFillerObjectAtTop();
966
967 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
968 if (FLAG_trace_incremental_marking) {
969 PrintF("[IncrementalMarking] Scavenge during marking.\n");
970 }
971 }
972
973 if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
974 !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
975 !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
976 OldGenerationAllocationLimitReached()) {
977 if (!incremental_marking()->IsComplete() &&
978 !mark_compact_collector()->marking_deque_.IsEmpty() &&
979 !FLAG_gc_global) {
980 if (FLAG_trace_incremental_marking) {
981 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
982 }
983 collector = SCAVENGER;
984 collector_reason = "incremental marking delaying mark-sweep";
985 }
986 }
987
988 bool next_gc_likely_to_collect_more = false;
989 intptr_t committed_memory_before = 0;
990
991 if (collector == MARK_COMPACTOR) {
992 committed_memory_before = CommittedOldGenerationMemory();
993 }
994
995 {
996 tracer()->Start(collector, gc_reason, collector_reason);
997 DCHECK(AllowHeapAllocation::IsAllowed());
998 DisallowHeapAllocation no_allocation_during_gc;
999 GarbageCollectionPrologue();
1000
1001 {
1002 HistogramTimer* gc_type_timer = GCTypeTimer(collector);
1003 HistogramTimerScope histogram_timer_scope(gc_type_timer);
1004 TRACE_EVENT0("v8", gc_type_timer->name());
1005
1006 next_gc_likely_to_collect_more =
1007 PerformGarbageCollection(collector, gc_callback_flags);
1008 }
1009
1010 GarbageCollectionEpilogue();
1011 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1012 isolate()->CheckDetachedContextsAfterGC();
1013 }
1014
1015 if (collector == MARK_COMPACTOR) {
1016 intptr_t committed_memory_after = CommittedOldGenerationMemory();
1017 intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
1018 MemoryReducer::Event event;
1019 event.type = MemoryReducer::kMarkCompact;
1020 event.time_ms = MonotonicallyIncreasingTimeInMs();
1021 // Trigger one more GC if
1022 // - this GC decreased committed memory,
1023 // - there is high fragmentation,
1024 // - there are live detached contexts.
1025 event.next_gc_likely_to_collect_more =
1026 (committed_memory_before - committed_memory_after) > MB ||
1027 HasHighFragmentation(used_memory_after, committed_memory_after) ||
1028 (detached_contexts()->length() > 0);
1029 if (deserialization_complete_) {
1030 memory_reducer_->NotifyMarkCompact(event);
1031 }
1032 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
1033 }
1034
1035 tracer()->Stop(collector);
1036 }
1037
1038 if (collector == MARK_COMPACTOR &&
1039 (gc_callback_flags & (kGCCallbackFlagForced |
1040 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1041 isolate()->CountUsage(v8::Isolate::kForcedGC);
1042 }
1043
1044 // Start incremental marking for the next cycle. The heap snapshot
1045 // generator needs incremental marking to stay off after it aborted.
1046 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
1047 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
1048 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
1049 }
1050
1051 return next_gc_likely_to_collect_more;
1052 }
1053
1054
NotifyContextDisposed(bool dependant_context)1055 int Heap::NotifyContextDisposed(bool dependant_context) {
1056 if (!dependant_context) {
1057 tracer()->ResetSurvivalEvents();
1058 old_generation_size_configured_ = false;
1059 MemoryReducer::Event event;
1060 event.type = MemoryReducer::kPossibleGarbage;
1061 event.time_ms = MonotonicallyIncreasingTimeInMs();
1062 memory_reducer_->NotifyPossibleGarbage(event);
1063 }
1064 if (isolate()->concurrent_recompilation_enabled()) {
1065 // Flush the queued recompilation tasks.
1066 isolate()->optimizing_compile_dispatcher()->Flush();
1067 }
1068 AgeInlineCaches();
1069 number_of_disposed_maps_ = retained_maps()->Length();
1070 tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1071 return ++contexts_disposed_;
1072 }
1073
1074
StartIncrementalMarking(int gc_flags,const GCCallbackFlags gc_callback_flags,const char * reason)1075 void Heap::StartIncrementalMarking(int gc_flags,
1076 const GCCallbackFlags gc_callback_flags,
1077 const char* reason) {
1078 DCHECK(incremental_marking()->IsStopped());
1079 set_current_gc_flags(gc_flags);
1080 current_gc_callback_flags_ = gc_callback_flags;
1081 incremental_marking()->Start(reason);
1082 }
1083
1084
StartIdleIncrementalMarking()1085 void Heap::StartIdleIncrementalMarking() {
1086 gc_idle_time_handler_->ResetNoProgressCounter();
1087 StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
1088 "idle");
1089 }
1090
1091
MoveElements(FixedArray * array,int dst_index,int src_index,int len)1092 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1093 int len) {
1094 if (len == 0) return;
1095
1096 DCHECK(array->map() != fixed_cow_array_map());
1097 Object** dst_objects = array->data_start() + dst_index;
1098 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
1099 FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
1100 }
1101
1102
1103 #ifdef VERIFY_HEAP
1104 // Helper class for verifying the string table.
1105 class StringTableVerifier : public ObjectVisitor {
1106 public:
VisitPointers(Object ** start,Object ** end)1107 void VisitPointers(Object** start, Object** end) override {
1108 // Visit all HeapObject pointers in [start, end).
1109 for (Object** p = start; p < end; p++) {
1110 if ((*p)->IsHeapObject()) {
1111 HeapObject* object = HeapObject::cast(*p);
1112 Isolate* isolate = object->GetIsolate();
1113 // Check that the string is actually internalized.
1114 CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
1115 object->IsInternalizedString());
1116 }
1117 }
1118 }
1119 };
1120
1121
VerifyStringTable(Heap * heap)1122 static void VerifyStringTable(Heap* heap) {
1123 StringTableVerifier verifier;
1124 heap->string_table()->IterateElements(&verifier);
1125 }
1126 #endif // VERIFY_HEAP
1127
1128
ReserveSpace(Reservation * reservations)1129 bool Heap::ReserveSpace(Reservation* reservations) {
1130 bool gc_performed = true;
1131 int counter = 0;
1132 static const int kThreshold = 20;
1133 while (gc_performed && counter++ < kThreshold) {
1134 gc_performed = false;
1135 for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
1136 space++) {
1137 Reservation* reservation = &reservations[space];
1138 DCHECK_LE(1, reservation->length());
1139 if (reservation->at(0).size == 0) continue;
1140 bool perform_gc = false;
1141 if (space == LO_SPACE) {
1142 DCHECK_EQ(1, reservation->length());
1143 perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
1144 } else {
1145 for (auto& chunk : *reservation) {
1146 AllocationResult allocation;
1147 int size = chunk.size;
1148 DCHECK_LE(size, MemoryAllocator::PageAreaSize(
1149 static_cast<AllocationSpace>(space)));
1150 if (space == NEW_SPACE) {
1151 allocation = new_space()->AllocateRawUnaligned(size);
1152 } else {
1153 // The deserializer will update the skip list.
1154 allocation = paged_space(space)->AllocateRawUnaligned(
1155 size, PagedSpace::IGNORE_SKIP_LIST);
1156 }
1157 HeapObject* free_space = nullptr;
1158 if (allocation.To(&free_space)) {
1159 // Mark with a free list node, in case we have a GC before
1160 // deserializing.
1161 Address free_space_address = free_space->address();
1162 CreateFillerObjectAt(free_space_address, size,
1163 ClearRecordedSlots::kNo);
1164 DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
1165 chunk.start = free_space_address;
1166 chunk.end = free_space_address + size;
1167 } else {
1168 perform_gc = true;
1169 break;
1170 }
1171 }
1172 }
1173 if (perform_gc) {
1174 if (space == NEW_SPACE) {
1175 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1176 } else {
1177 if (counter > 1) {
1178 CollectAllGarbage(
1179 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1180 "failed to reserve space in paged or large "
1181 "object space, trying to reduce memory footprint");
1182 } else {
1183 CollectAllGarbage(
1184 kAbortIncrementalMarkingMask,
1185 "failed to reserve space in paged or large object space");
1186 }
1187 }
1188 gc_performed = true;
1189 break; // Abort for-loop over spaces and retry.
1190 }
1191 }
1192 }
1193
1194 return !gc_performed;
1195 }
1196
1197
EnsureFromSpaceIsCommitted()1198 void Heap::EnsureFromSpaceIsCommitted() {
1199 if (new_space_.CommitFromSpaceIfNeeded()) return;
1200
1201 // Committing memory to from space failed.
1202 // Memory is exhausted and we will die.
1203 V8::FatalProcessOutOfMemory("Committing semi space failed.");
1204 }
1205
1206
ClearNormalizedMapCaches()1207 void Heap::ClearNormalizedMapCaches() {
1208 if (isolate_->bootstrapper()->IsActive() &&
1209 !incremental_marking()->IsMarking()) {
1210 return;
1211 }
1212
1213 Object* context = native_contexts_list();
1214 while (!context->IsUndefined(isolate())) {
1215 // GC can happen when the context is not fully initialized,
1216 // so the cache can be undefined.
1217 Object* cache =
1218 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1219 if (!cache->IsUndefined(isolate())) {
1220 NormalizedMapCache::cast(cache)->Clear();
1221 }
1222 context = Context::cast(context)->next_context_link();
1223 }
1224 }
1225
1226
UpdateSurvivalStatistics(int start_new_space_size)1227 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1228 if (start_new_space_size == 0) return;
1229
1230 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1231 static_cast<double>(start_new_space_size) * 100);
1232
1233 if (previous_semi_space_copied_object_size_ > 0) {
1234 promotion_rate_ =
1235 (static_cast<double>(promoted_objects_size_) /
1236 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1237 } else {
1238 promotion_rate_ = 0;
1239 }
1240
1241 semi_space_copied_rate_ =
1242 (static_cast<double>(semi_space_copied_object_size_) /
1243 static_cast<double>(start_new_space_size) * 100);
1244
1245 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1246 tracer()->AddSurvivalRatio(survival_rate);
1247 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1248 high_survival_rate_period_length_++;
1249 } else {
1250 high_survival_rate_period_length_ = 0;
1251 }
1252 }
1253
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1254 bool Heap::PerformGarbageCollection(
1255 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1256 int freed_global_handles = 0;
1257
1258 if (collector != SCAVENGER) {
1259 PROFILE(isolate_, CodeMovingGCEvent());
1260 }
1261
1262 #ifdef VERIFY_HEAP
1263 if (FLAG_verify_heap) {
1264 VerifyStringTable(this);
1265 }
1266 #endif
1267
1268 GCType gc_type =
1269 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1270
1271 {
1272 GCCallbacksScope scope(this);
1273 if (scope.CheckReenter()) {
1274 AllowHeapAllocation allow_allocation;
1275 TRACE_GC(tracer(), collector == MARK_COMPACTOR
1276 ? GCTracer::Scope::MC_EXTERNAL_PROLOGUE
1277 : GCTracer::Scope::SCAVENGER_EXTERNAL_PROLOGUE);
1278 VMState<EXTERNAL> state(isolate_);
1279 HandleScope handle_scope(isolate_);
1280 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1281 }
1282 }
1283
1284 EnsureFromSpaceIsCommitted();
1285
1286 int start_new_space_size = Heap::new_space()->SizeAsInt();
1287
1288 if (IsHighSurvivalRate()) {
1289 // We speed up the incremental marker if it is running so that it
1290 // does not fall behind the rate of promotion, which would cause a
1291 // constantly growing old space.
1292 incremental_marking()->NotifyOfHighPromotionRate();
1293 }
1294
1295 {
1296 Heap::PretenuringScope pretenuring_scope(this);
1297
1298 if (collector == MARK_COMPACTOR) {
1299 UpdateOldGenerationAllocationCounter();
1300 // Perform mark-sweep with optional compaction.
1301 MarkCompact();
1302 old_gen_exhausted_ = false;
1303 old_generation_size_configured_ = true;
1304 // This should be updated before PostGarbageCollectionProcessing, which
1305 // can cause another GC. Take into account the objects promoted during GC.
1306 old_generation_allocation_counter_ +=
1307 static_cast<size_t>(promoted_objects_size_);
1308 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1309 } else {
1310 Scavenge();
1311 }
1312
1313 ProcessPretenuringFeedback();
1314 }
1315
1316 UpdateSurvivalStatistics(start_new_space_size);
1317 ConfigureInitialOldGenerationSize();
1318
1319 isolate_->counters()->objs_since_last_young()->Set(0);
1320
1321 gc_post_processing_depth_++;
1322 {
1323 AllowHeapAllocation allow_allocation;
1324 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
1325 freed_global_handles =
1326 isolate_->global_handles()->PostGarbageCollectionProcessing(
1327 collector, gc_callback_flags);
1328 }
1329 gc_post_processing_depth_--;
1330
1331 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1332
1333 // Update relocatables.
1334 Relocatable::PostGarbageCollectionProcessing(isolate_);
1335
1336 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1337 double mutator_speed =
1338 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1339 intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
1340 if (collector == MARK_COMPACTOR) {
1341 // Register the amount of external allocated memory.
1342 external_memory_at_last_mark_compact_ = external_memory_;
1343 external_memory_limit_ = external_memory_ + kExternalAllocationLimit;
1344 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1345 } else if (HasLowYoungGenerationAllocationRate() &&
1346 old_generation_size_configured_) {
1347 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1348 }
1349
1350 {
1351 GCCallbacksScope scope(this);
1352 if (scope.CheckReenter()) {
1353 AllowHeapAllocation allow_allocation;
1354 TRACE_GC(tracer(), collector == MARK_COMPACTOR
1355 ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
1356 : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
1357 VMState<EXTERNAL> state(isolate_);
1358 HandleScope handle_scope(isolate_);
1359 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1360 }
1361 }
1362
1363 #ifdef VERIFY_HEAP
1364 if (FLAG_verify_heap) {
1365 VerifyStringTable(this);
1366 }
1367 #endif
1368
1369 return freed_global_handles > 0;
1370 }
1371
1372
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1373 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1374 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1375 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1376 if (!gc_prologue_callbacks_[i].pass_isolate) {
1377 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1378 gc_prologue_callbacks_[i].callback);
1379 callback(gc_type, flags);
1380 } else {
1381 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1382 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1383 }
1384 }
1385 }
1386 if (FLAG_trace_object_groups && (gc_type == kGCTypeIncrementalMarking ||
1387 gc_type == kGCTypeMarkSweepCompact)) {
1388 isolate_->global_handles()->PrintObjectGroups();
1389 }
1390 }
1391
1392
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags gc_callback_flags)1393 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1394 GCCallbackFlags gc_callback_flags) {
1395 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1396 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1397 if (!gc_epilogue_callbacks_[i].pass_isolate) {
1398 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1399 gc_epilogue_callbacks_[i].callback);
1400 callback(gc_type, gc_callback_flags);
1401 } else {
1402 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1403 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1404 }
1405 }
1406 }
1407 }
1408
1409
MarkCompact()1410 void Heap::MarkCompact() {
1411 PauseAllocationObserversScope pause_observers(this);
1412
1413 gc_state_ = MARK_COMPACT;
1414 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1415
1416 uint64_t size_of_objects_before_gc = SizeOfObjects();
1417
1418 mark_compact_collector()->Prepare();
1419
1420 ms_count_++;
1421
1422 MarkCompactPrologue();
1423
1424 mark_compact_collector()->CollectGarbage();
1425
1426 LOG(isolate_, ResourceEvent("markcompact", "end"));
1427
1428 MarkCompactEpilogue();
1429
1430 if (FLAG_allocation_site_pretenuring) {
1431 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1432 }
1433 }
1434
1435
MarkCompactEpilogue()1436 void Heap::MarkCompactEpilogue() {
1437 gc_state_ = NOT_IN_GC;
1438
1439 isolate_->counters()->objs_since_last_full()->Set(0);
1440
1441 incremental_marking()->Epilogue();
1442
1443 PreprocessStackTraces();
1444 DCHECK(incremental_marking()->IsStopped());
1445
1446 // We finished a marking cycle. We can uncommit the marking deque until
1447 // we start marking again.
1448 mark_compact_collector()->marking_deque()->Uninitialize();
1449 mark_compact_collector()->EnsureMarkingDequeIsCommitted(
1450 MarkCompactCollector::kMinMarkingDequeSize);
1451 }
1452
1453
MarkCompactPrologue()1454 void Heap::MarkCompactPrologue() {
1455 // At any old GC clear the keyed lookup cache to enable collection of unused
1456 // maps.
1457 isolate_->keyed_lookup_cache()->Clear();
1458 isolate_->context_slot_cache()->Clear();
1459 isolate_->descriptor_lookup_cache()->Clear();
1460 RegExpResultsCache::Clear(string_split_cache());
1461 RegExpResultsCache::Clear(regexp_multiple_cache());
1462
1463 isolate_->compilation_cache()->MarkCompactPrologue();
1464
1465 CompletelyClearInstanceofCache();
1466
1467 FlushNumberStringCache();
1468 ClearNormalizedMapCaches();
1469 }
1470
1471
1472 #ifdef VERIFY_HEAP
1473 // Visitor class to verify pointers in code or data space do not point into
1474 // new space.
1475 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1476 public:
VerifyNonPointerSpacePointersVisitor(Heap * heap)1477 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1478
VisitPointers(Object ** start,Object ** end)1479 void VisitPointers(Object** start, Object** end) override {
1480 for (Object** current = start; current < end; current++) {
1481 if ((*current)->IsHeapObject()) {
1482 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1483 }
1484 }
1485 }
1486
1487 private:
1488 Heap* heap_;
1489 };
1490
1491
VerifyNonPointerSpacePointers(Heap * heap)1492 static void VerifyNonPointerSpacePointers(Heap* heap) {
1493 // Verify that there are no pointers to new space in spaces where we
1494 // do not expect them.
1495 VerifyNonPointerSpacePointersVisitor v(heap);
1496 HeapObjectIterator code_it(heap->code_space());
1497 for (HeapObject* object = code_it.Next(); object != NULL;
1498 object = code_it.Next())
1499 object->Iterate(&v);
1500 }
1501 #endif // VERIFY_HEAP
1502
1503
CheckNewSpaceExpansionCriteria()1504 void Heap::CheckNewSpaceExpansionCriteria() {
1505 if (FLAG_experimental_new_space_growth_heuristic) {
1506 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1507 survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
1508 // Grow the size of new space if there is room to grow, and more than 10%
1509 // have survived the last scavenge.
1510 new_space_.Grow();
1511 survived_since_last_expansion_ = 0;
1512 }
1513 } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1514 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1515 // Grow the size of new space if there is room to grow, and enough data
1516 // has survived scavenge since the last expansion.
1517 new_space_.Grow();
1518 survived_since_last_expansion_ = 0;
1519 }
1520 }
1521
1522
IsUnscavengedHeapObject(Heap * heap,Object ** p)1523 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1524 return heap->InNewSpace(*p) &&
1525 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1526 }
1527
1528
IsUnmodifiedHeapObject(Object ** p)1529 static bool IsUnmodifiedHeapObject(Object** p) {
1530 Object* object = *p;
1531 if (object->IsSmi()) return false;
1532 HeapObject* heap_object = HeapObject::cast(object);
1533 if (!object->IsJSObject()) return false;
1534 JSObject* js_object = JSObject::cast(object);
1535 if (!js_object->WasConstructedFromApiFunction()) return false;
1536 JSFunction* constructor =
1537 JSFunction::cast(js_object->map()->GetConstructor());
1538
1539 return constructor->initial_map() == heap_object->map();
1540 }
1541
1542
Initialize()1543 void PromotionQueue::Initialize() {
1544 // The last to-space page may be used for promotion queue. On promotion
1545 // conflict, we use the emergency stack.
1546 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1547 0);
1548 front_ = rear_ =
1549 reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
1550 limit_ = reinterpret_cast<struct Entry*>(
1551 Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
1552 ->area_start());
1553 emergency_stack_ = NULL;
1554 }
1555
1556
RelocateQueueHead()1557 void PromotionQueue::RelocateQueueHead() {
1558 DCHECK(emergency_stack_ == NULL);
1559
1560 Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
1561 struct Entry* head_start = rear_;
1562 struct Entry* head_end =
1563 Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
1564
1565 int entries_count =
1566 static_cast<int>(head_end - head_start) / sizeof(struct Entry);
1567
1568 emergency_stack_ = new List<Entry>(2 * entries_count);
1569
1570 while (head_start != head_end) {
1571 struct Entry* entry = head_start++;
1572 // New space allocation in SemiSpaceCopyObject marked the region
1573 // overlapping with promotion queue as uninitialized.
1574 MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
1575 emergency_stack_->Add(*entry);
1576 }
1577 rear_ = head_end;
1578 }
1579
1580
1581 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1582 public:
ScavengeWeakObjectRetainer(Heap * heap)1583 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1584
RetainAs(Object * object)1585 virtual Object* RetainAs(Object* object) {
1586 if (!heap_->InFromSpace(object)) {
1587 return object;
1588 }
1589
1590 MapWord map_word = HeapObject::cast(object)->map_word();
1591 if (map_word.IsForwardingAddress()) {
1592 return map_word.ToForwardingAddress();
1593 }
1594 return NULL;
1595 }
1596
1597 private:
1598 Heap* heap_;
1599 };
1600
1601
Scavenge()1602 void Heap::Scavenge() {
1603 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
1604 RelocationLock relocation_lock(this);
1605 // There are soft limits in the allocation code, designed to trigger a mark
1606 // sweep collection by failing allocations. There is no sense in trying to
1607 // trigger one during scavenge: scavenges allocation should always succeed.
1608 AlwaysAllocateScope scope(isolate());
1609
1610 // Bump-pointer allocations done during scavenge are not real allocations.
1611 // Pause the inline allocation steps.
1612 PauseAllocationObserversScope pause_observers(this);
1613
1614 mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1615
1616 #ifdef VERIFY_HEAP
1617 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1618 #endif
1619
1620 gc_state_ = SCAVENGE;
1621
1622 // Implements Cheney's copying algorithm
1623 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1624
1625 // Used for updating survived_since_last_expansion_ at function end.
1626 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1627
1628 scavenge_collector_->SelectScavengingVisitorsTable();
1629
1630 if (UsingEmbedderHeapTracer()) {
1631 // Register found wrappers with embedder so it can add them to its marking
1632 // deque and correctly manage the case when v8 scavenger collects the
1633 // wrappers by either keeping wrappables alive, or cleaning marking deque.
1634 mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
1635 }
1636
1637 // Flip the semispaces. After flipping, to space is empty, from space has
1638 // live objects.
1639 new_space_.Flip();
1640 new_space_.ResetAllocationInfo();
1641
1642 // We need to sweep newly copied objects which can be either in the
1643 // to space or promoted to the old generation. For to-space
1644 // objects, we treat the bottom of the to space as a queue. Newly
1645 // copied and unswept objects lie between a 'front' mark and the
1646 // allocation pointer.
1647 //
1648 // Promoted objects can go into various old-generation spaces, and
1649 // can be allocated internally in the spaces (from the free list).
1650 // We treat the top of the to space as a queue of addresses of
1651 // promoted objects. The addresses of newly promoted and unswept
1652 // objects lie between a 'front' mark and a 'rear' mark that is
1653 // updated as a side effect of promoting an object.
1654 //
1655 // There is guaranteed to be enough room at the top of the to space
1656 // for the addresses of promoted objects: every object promoted
1657 // frees up its size in bytes from the top of the new space, and
1658 // objects are at least one pointer in size.
1659 Address new_space_front = new_space_.ToSpaceStart();
1660 promotion_queue_.Initialize();
1661
1662 PromotionMode promotion_mode = CurrentPromotionMode();
1663 ScavengeVisitor scavenge_visitor(this);
1664
1665 if (FLAG_scavenge_reclaim_unmodified_objects) {
1666 isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
1667 &IsUnmodifiedHeapObject);
1668 }
1669
1670 {
1671 // Copy roots.
1672 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
1673 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1674 }
1675
1676 {
1677 // Copy objects reachable from the old generation.
1678 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
1679 RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
1680 return Scavenger::CheckAndScavengeObject(this, addr);
1681 });
1682
1683 RememberedSet<OLD_TO_NEW>::IterateTyped(
1684 this, [this](SlotType type, Address host_addr, Address addr) {
1685 return UpdateTypedSlotHelper::UpdateTypedSlot(
1686 isolate(), type, addr, [this](Object** addr) {
1687 // We expect that objects referenced by code are long living.
1688 // If we do not force promotion, then we need to clear
1689 // old_to_new slots in dead code objects after mark-compact.
1690 return Scavenger::CheckAndScavengeObject(
1691 this, reinterpret_cast<Address>(addr));
1692 });
1693 });
1694 }
1695
1696 {
1697 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
1698 // Copy objects reachable from the encountered weak collections list.
1699 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1700 // Copy objects reachable from the encountered weak cells.
1701 scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1702 }
1703
1704 {
1705 // Copy objects reachable from the code flushing candidates list.
1706 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
1707 MarkCompactCollector* collector = mark_compact_collector();
1708 if (collector->is_code_flushing_enabled()) {
1709 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1710 }
1711 }
1712
1713 {
1714 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
1715 new_space_front =
1716 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
1717 }
1718
1719 if (FLAG_scavenge_reclaim_unmodified_objects) {
1720 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
1721 &IsUnscavengedHeapObject);
1722
1723 isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
1724 &scavenge_visitor);
1725 new_space_front =
1726 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
1727 } else {
1728 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
1729 while (isolate()->global_handles()->IterateObjectGroups(
1730 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1731 new_space_front =
1732 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
1733 }
1734 isolate()->global_handles()->RemoveObjectGroups();
1735 isolate()->global_handles()->RemoveImplicitRefGroups();
1736
1737 isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1738 &IsUnscavengedHeapObject);
1739
1740 isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
1741 &scavenge_visitor);
1742 new_space_front =
1743 DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
1744 }
1745
1746 UpdateNewSpaceReferencesInExternalStringTable(
1747 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1748
1749 promotion_queue_.Destroy();
1750
1751 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1752
1753 ScavengeWeakObjectRetainer weak_object_retainer(this);
1754 ProcessYoungWeakReferences(&weak_object_retainer);
1755
1756 DCHECK(new_space_front == new_space_.top());
1757
1758 // Set age mark.
1759 new_space_.set_age_mark(new_space_.top());
1760
1761 ArrayBufferTracker::FreeDeadInNewSpace(this);
1762
1763 // Update how much has survived scavenge.
1764 IncrementYoungSurvivorsCounter(static_cast<int>(
1765 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1766
1767 LOG(isolate_, ResourceEvent("scavenge", "end"));
1768
1769 gc_state_ = NOT_IN_GC;
1770 }
1771
1772
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1773 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1774 Object** p) {
1775 MapWord first_word = HeapObject::cast(*p)->map_word();
1776
1777 if (!first_word.IsForwardingAddress()) {
1778 // Unreachable external string can be finalized.
1779 heap->FinalizeExternalString(String::cast(*p));
1780 return NULL;
1781 }
1782
1783 // String is still reachable.
1784 return String::cast(first_word.ToForwardingAddress());
1785 }
1786
1787
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1788 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1789 ExternalStringTableUpdaterCallback updater_func) {
1790 if (external_string_table_.new_space_strings_.is_empty()) return;
1791
1792 Object** start = &external_string_table_.new_space_strings_[0];
1793 Object** end = start + external_string_table_.new_space_strings_.length();
1794 Object** last = start;
1795
1796 for (Object** p = start; p < end; ++p) {
1797 String* target = updater_func(this, p);
1798
1799 if (target == NULL) continue;
1800
1801 DCHECK(target->IsExternalString());
1802
1803 if (InNewSpace(target)) {
1804 // String is still in new space. Update the table entry.
1805 *last = target;
1806 ++last;
1807 } else {
1808 // String got promoted. Move it to the old string list.
1809 external_string_table_.AddOldString(target);
1810 }
1811 }
1812
1813 DCHECK(last <= end);
1814 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1815 }
1816
1817
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1818 void Heap::UpdateReferencesInExternalStringTable(
1819 ExternalStringTableUpdaterCallback updater_func) {
1820 // Update old space string references.
1821 if (external_string_table_.old_space_strings_.length() > 0) {
1822 Object** start = &external_string_table_.old_space_strings_[0];
1823 Object** end = start + external_string_table_.old_space_strings_.length();
1824 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1825 }
1826
1827 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1828 }
1829
1830
ProcessAllWeakReferences(WeakObjectRetainer * retainer)1831 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1832 ProcessNativeContexts(retainer);
1833 ProcessAllocationSites(retainer);
1834 }
1835
1836
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)1837 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1838 ProcessNativeContexts(retainer);
1839 }
1840
1841
ProcessNativeContexts(WeakObjectRetainer * retainer)1842 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1843 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1844 // Update the head of the list of contexts.
1845 set_native_contexts_list(head);
1846 }
1847
1848
ProcessAllocationSites(WeakObjectRetainer * retainer)1849 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1850 Object* allocation_site_obj =
1851 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1852 set_allocation_sites_list(allocation_site_obj);
1853 }
1854
ProcessWeakListRoots(WeakObjectRetainer * retainer)1855 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
1856 set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
1857 set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
1858 }
1859
ResetAllAllocationSitesDependentCode(PretenureFlag flag)1860 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1861 DisallowHeapAllocation no_allocation_scope;
1862 Object* cur = allocation_sites_list();
1863 bool marked = false;
1864 while (cur->IsAllocationSite()) {
1865 AllocationSite* casted = AllocationSite::cast(cur);
1866 if (casted->GetPretenureMode() == flag) {
1867 casted->ResetPretenureDecision();
1868 casted->set_deopt_dependent_code(true);
1869 marked = true;
1870 RemoveAllocationSitePretenuringFeedback(casted);
1871 }
1872 cur = casted->weak_next();
1873 }
1874 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1875 }
1876
1877
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)1878 void Heap::EvaluateOldSpaceLocalPretenuring(
1879 uint64_t size_of_objects_before_gc) {
1880 uint64_t size_of_objects_after_gc = SizeOfObjects();
1881 double old_generation_survival_rate =
1882 (static_cast<double>(size_of_objects_after_gc) * 100) /
1883 static_cast<double>(size_of_objects_before_gc);
1884
1885 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1886 // Too many objects died in the old generation, pretenuring of wrong
1887 // allocation sites may be the cause for that. We have to deopt all
1888 // dependent code registered in the allocation sites to re-evaluate
1889 // our pretenuring decisions.
1890 ResetAllAllocationSitesDependentCode(TENURED);
1891 if (FLAG_trace_pretenuring) {
1892 PrintF(
1893 "Deopt all allocation sites dependent code due to low survival "
1894 "rate in the old generation %f\n",
1895 old_generation_survival_rate);
1896 }
1897 }
1898 }
1899
1900
VisitExternalResources(v8::ExternalResourceVisitor * visitor)1901 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1902 DisallowHeapAllocation no_allocation;
1903 // All external strings are listed in the external string table.
1904
1905 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1906 public:
1907 explicit ExternalStringTableVisitorAdapter(
1908 v8::ExternalResourceVisitor* visitor)
1909 : visitor_(visitor) {}
1910 virtual void VisitPointers(Object** start, Object** end) {
1911 for (Object** p = start; p < end; p++) {
1912 DCHECK((*p)->IsExternalString());
1913 visitor_->VisitExternalString(
1914 Utils::ToLocal(Handle<String>(String::cast(*p))));
1915 }
1916 }
1917
1918 private:
1919 v8::ExternalResourceVisitor* visitor_;
1920 } external_string_table_visitor(visitor);
1921
1922 external_string_table_.Iterate(&external_string_table_visitor);
1923 }
1924
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front,PromotionMode promotion_mode)1925 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1926 Address new_space_front,
1927 PromotionMode promotion_mode) {
1928 do {
1929 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1930 // The addresses new_space_front and new_space_.top() define a
1931 // queue of unprocessed copied objects. Process them until the
1932 // queue is empty.
1933 while (new_space_front != new_space_.top()) {
1934 if (!Page::IsAlignedToPageSize(new_space_front)) {
1935 HeapObject* object = HeapObject::FromAddress(new_space_front);
1936 if (promotion_mode == PROMOTE_MARKED) {
1937 new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
1938 object->map(), object);
1939 } else {
1940 new_space_front +=
1941 StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
1942 object->map(), object);
1943 }
1944 } else {
1945 new_space_front = Page::FromAllocationAreaAddress(new_space_front)
1946 ->next_page()
1947 ->area_start();
1948 }
1949 }
1950
1951 // Promote and process all the to-be-promoted objects.
1952 {
1953 while (!promotion_queue()->is_empty()) {
1954 HeapObject* target;
1955 int32_t size;
1956 bool was_marked_black;
1957 promotion_queue()->remove(&target, &size, &was_marked_black);
1958
1959 // Promoted object might be already partially visited
1960 // during old space pointer iteration. Thus we search specifically
1961 // for pointers to from semispace instead of looking for pointers
1962 // to new space.
1963 DCHECK(!target->IsMap());
1964
1965 IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
1966 &Scavenger::ScavengeObject);
1967 }
1968 }
1969
1970 // Take another spin if there are now unswept objects in new space
1971 // (there are currently no more unswept promoted objects).
1972 } while (new_space_front != new_space_.top());
1973
1974 return new_space_front;
1975 }
1976
1977
1978 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1979 0); // NOLINT
1980 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
1981 0); // NOLINT
1982 #ifdef V8_HOST_ARCH_32_BIT
1983 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
1984 0); // NOLINT
1985 #endif
1986
1987
GetMaximumFillToAlign(AllocationAlignment alignment)1988 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
1989 switch (alignment) {
1990 case kWordAligned:
1991 return 0;
1992 case kDoubleAligned:
1993 case kDoubleUnaligned:
1994 return kDoubleSize - kPointerSize;
1995 case kSimd128Unaligned:
1996 return kSimd128Size - kPointerSize;
1997 default:
1998 UNREACHABLE();
1999 }
2000 return 0;
2001 }
2002
2003
GetFillToAlign(Address address,AllocationAlignment alignment)2004 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2005 intptr_t offset = OffsetFrom(address);
2006 if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
2007 return kPointerSize;
2008 if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
2009 return kDoubleSize - kPointerSize; // No fill if double is always aligned.
2010 if (alignment == kSimd128Unaligned) {
2011 return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
2012 kSimd128AlignmentMask;
2013 }
2014 return 0;
2015 }
2016
2017
PrecedeWithFiller(HeapObject * object,int filler_size)2018 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
2019 CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
2020 return HeapObject::FromAddress(object->address() + filler_size);
2021 }
2022
2023
AlignWithFiller(HeapObject * object,int object_size,int allocation_size,AllocationAlignment alignment)2024 HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2025 int allocation_size,
2026 AllocationAlignment alignment) {
2027 int filler_size = allocation_size - object_size;
2028 DCHECK(filler_size > 0);
2029 int pre_filler = GetFillToAlign(object->address(), alignment);
2030 if (pre_filler) {
2031 object = PrecedeWithFiller(object, pre_filler);
2032 filler_size -= pre_filler;
2033 }
2034 if (filler_size)
2035 CreateFillerObjectAt(object->address() + object_size, filler_size,
2036 ClearRecordedSlots::kNo);
2037 return object;
2038 }
2039
2040
DoubleAlignForDeserialization(HeapObject * object,int size)2041 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
2042 return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
2043 }
2044
2045
RegisterNewArrayBuffer(JSArrayBuffer * buffer)2046 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2047 ArrayBufferTracker::RegisterNew(this, buffer);
2048 }
2049
2050
UnregisterArrayBuffer(JSArrayBuffer * buffer)2051 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2052 ArrayBufferTracker::Unregister(this, buffer);
2053 }
2054
2055
ConfigureInitialOldGenerationSize()2056 void Heap::ConfigureInitialOldGenerationSize() {
2057 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2058 old_generation_allocation_limit_ =
2059 Max(kMinimumOldGenerationAllocationLimit,
2060 static_cast<intptr_t>(
2061 static_cast<double>(old_generation_allocation_limit_) *
2062 (tracer()->AverageSurvivalRatio() / 100)));
2063 }
2064 }
2065
2066
AllocatePartialMap(InstanceType instance_type,int instance_size)2067 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2068 int instance_size) {
2069 Object* result = nullptr;
2070 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2071 if (!allocation.To(&result)) return allocation;
2072
2073 // Map::cast cannot be used due to uninitialized map field.
2074 reinterpret_cast<Map*>(result)->set_map(
2075 reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
2076 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2077 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2078 // Initialize to only containing tagged fields.
2079 reinterpret_cast<Map*>(result)->set_visitor_id(
2080 StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2081 if (FLAG_unbox_double_fields) {
2082 reinterpret_cast<Map*>(result)
2083 ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2084 }
2085 reinterpret_cast<Map*>(result)->clear_unused();
2086 reinterpret_cast<Map*>(result)
2087 ->set_inobject_properties_or_constructor_function_index(0);
2088 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2089 reinterpret_cast<Map*>(result)->set_bit_field(0);
2090 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2091 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2092 Map::OwnsDescriptors::encode(true) |
2093 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2094 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2095 reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
2096 return result;
2097 }
2098
2099
AllocateMap(InstanceType instance_type,int instance_size,ElementsKind elements_kind)2100 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2101 int instance_size,
2102 ElementsKind elements_kind) {
2103 HeapObject* result = nullptr;
2104 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2105 if (!allocation.To(&result)) return allocation;
2106
2107 isolate()->counters()->maps_created()->Increment();
2108 result->set_map_no_write_barrier(meta_map());
2109 Map* map = Map::cast(result);
2110 map->set_instance_type(instance_type);
2111 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2112 map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
2113 map->set_instance_size(instance_size);
2114 map->clear_unused();
2115 map->set_inobject_properties_or_constructor_function_index(0);
2116 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2117 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2118 SKIP_WRITE_BARRIER);
2119 map->set_weak_cell_cache(Smi::FromInt(0));
2120 map->set_raw_transitions(Smi::FromInt(0));
2121 map->set_unused_property_fields(0);
2122 map->set_instance_descriptors(empty_descriptor_array());
2123 if (FLAG_unbox_double_fields) {
2124 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2125 }
2126 // Must be called only after |instance_type|, |instance_size| and
2127 // |layout_descriptor| are set.
2128 map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
2129 map->set_bit_field(0);
2130 map->set_bit_field2(1 << Map::kIsExtensible);
2131 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2132 Map::OwnsDescriptors::encode(true) |
2133 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2134 map->set_bit_field3(bit_field3);
2135 map->set_elements_kind(elements_kind);
2136 map->set_new_target_is_base(true);
2137
2138 return map;
2139 }
2140
2141
AllocateFillerObject(int size,bool double_align,AllocationSpace space)2142 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2143 AllocationSpace space) {
2144 HeapObject* obj = nullptr;
2145 {
2146 AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
2147 AllocationResult allocation = AllocateRaw(size, space, align);
2148 if (!allocation.To(&obj)) return allocation;
2149 }
2150 #ifdef DEBUG
2151 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2152 DCHECK(chunk->owner()->identity() == space);
2153 #endif
2154 CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
2155 return obj;
2156 }
2157
2158
2159 const Heap::StringTypeTable Heap::string_type_table[] = {
2160 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2161 { type, size, k##camel_name##MapRootIndex } \
2162 ,
2163 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2164 #undef STRING_TYPE_ELEMENT
2165 };
2166
2167
2168 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2169 {"", kempty_stringRootIndex},
2170 #define CONSTANT_STRING_ELEMENT(name, contents) \
2171 { contents, k##name##RootIndex } \
2172 ,
2173 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2174 #undef CONSTANT_STRING_ELEMENT
2175 };
2176
2177
2178 const Heap::StructTable Heap::struct_table[] = {
2179 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2180 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2181 ,
2182 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2183 #undef STRUCT_TABLE_ELEMENT
2184 };
2185
2186 namespace {
2187
FinalizePartialMap(Heap * heap,Map * map)2188 void FinalizePartialMap(Heap* heap, Map* map) {
2189 map->set_code_cache(heap->empty_fixed_array());
2190 map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
2191 map->set_raw_transitions(Smi::FromInt(0));
2192 map->set_instance_descriptors(heap->empty_descriptor_array());
2193 if (FLAG_unbox_double_fields) {
2194 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2195 }
2196 map->set_prototype(heap->null_value());
2197 map->set_constructor_or_backpointer(heap->null_value());
2198 }
2199
2200 } // namespace
2201
CreateInitialMaps()2202 bool Heap::CreateInitialMaps() {
2203 HeapObject* obj = nullptr;
2204 {
2205 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2206 if (!allocation.To(&obj)) return false;
2207 }
2208 // Map::cast cannot be used due to uninitialized map field.
2209 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2210 set_meta_map(new_meta_map);
2211 new_meta_map->set_map(new_meta_map);
2212
2213 { // Partial map allocation
2214 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2215 { \
2216 Map* map; \
2217 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2218 set_##field_name##_map(map); \
2219 }
2220
2221 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2222 fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
2223 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2224 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2225 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2226
2227 #undef ALLOCATE_PARTIAL_MAP
2228 }
2229
2230 // Allocate the empty array.
2231 {
2232 AllocationResult allocation = AllocateEmptyFixedArray();
2233 if (!allocation.To(&obj)) return false;
2234 }
2235 set_empty_fixed_array(FixedArray::cast(obj));
2236
2237 {
2238 AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
2239 if (!allocation.To(&obj)) return false;
2240 }
2241 set_null_value(Oddball::cast(obj));
2242 Oddball::cast(obj)->set_kind(Oddball::kNull);
2243
2244 {
2245 AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
2246 if (!allocation.To(&obj)) return false;
2247 }
2248 set_undefined_value(Oddball::cast(obj));
2249 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2250 DCHECK(!InNewSpace(undefined_value()));
2251 {
2252 AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
2253 if (!allocation.To(&obj)) return false;
2254 }
2255 set_the_hole_value(Oddball::cast(obj));
2256 Oddball::cast(obj)->set_kind(Oddball::kTheHole);
2257
2258 // Set preliminary exception sentinel value before actually initializing it.
2259 set_exception(null_value());
2260
2261 // Allocate the empty descriptor array.
2262 {
2263 AllocationResult allocation = AllocateEmptyFixedArray();
2264 if (!allocation.To(&obj)) return false;
2265 }
2266 set_empty_descriptor_array(DescriptorArray::cast(obj));
2267
2268 // Fix the instance_descriptors for the existing maps.
2269 FinalizePartialMap(this, meta_map());
2270 FinalizePartialMap(this, fixed_array_map());
2271 FinalizePartialMap(this, undefined_map());
2272 undefined_map()->set_is_undetectable();
2273 FinalizePartialMap(this, null_map());
2274 null_map()->set_is_undetectable();
2275 FinalizePartialMap(this, the_hole_map());
2276
2277 { // Map allocation
2278 #define ALLOCATE_MAP(instance_type, size, field_name) \
2279 { \
2280 Map* map; \
2281 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2282 set_##field_name##_map(map); \
2283 }
2284
2285 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2286 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2287
2288 #define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
2289 constructor_function_index) \
2290 { \
2291 ALLOCATE_MAP((instance_type), (size), field_name); \
2292 field_name##_map()->SetConstructorFunctionIndex( \
2293 (constructor_function_index)); \
2294 }
2295
2296 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2297 fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
2298 DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
2299
2300 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2301 ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
2302 Context::NUMBER_FUNCTION_INDEX)
2303 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2304 mutable_heap_number)
2305 ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
2306 Context::SYMBOL_FUNCTION_INDEX)
2307 #define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
2308 ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type, \
2309 Context::TYPE##_FUNCTION_INDEX)
2310 SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
2311 #undef ALLOCATE_SIMD128_MAP
2312 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2313
2314 ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
2315 Context::BOOLEAN_FUNCTION_INDEX);
2316 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2317 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2318 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2319 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2320 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2321 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
2322 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
2323
2324 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2325 const StringTypeTable& entry = string_type_table[i];
2326 {
2327 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2328 if (!allocation.To(&obj)) return false;
2329 }
2330 Map* map = Map::cast(obj);
2331 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2332 // Mark cons string maps as unstable, because their objects can change
2333 // maps during GC.
2334 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2335 roots_[entry.index] = map;
2336 }
2337
2338 { // Create a separate external one byte string map for native sources.
2339 AllocationResult allocation =
2340 AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
2341 ExternalOneByteString::kShortSize);
2342 if (!allocation.To(&obj)) return false;
2343 Map* map = Map::cast(obj);
2344 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2345 set_native_source_string_map(map);
2346 }
2347
2348 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2349 fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
2350 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2351 ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
2352 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2353
2354 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2355 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2356
2357 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2358 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2359
2360 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2361
2362 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2363
2364 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2365 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2366 ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
2367 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2368 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2369
2370 ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
2371
2372 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2373 const StructTable& entry = struct_table[i];
2374 Map* map;
2375 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2376 roots_[entry.index] = map;
2377 }
2378
2379 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2380 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2381
2382 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2383 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2384 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2385 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
2386 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2387 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2388 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2389 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
2390
2391 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2392 native_context_map()->set_dictionary_map(true);
2393 native_context_map()->set_visitor_id(
2394 StaticVisitorBase::kVisitNativeContext);
2395
2396 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2397 shared_function_info)
2398
2399 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2400 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2401 external_map()->set_is_extensible(false);
2402 #undef ALLOCATE_PRIMITIVE_MAP
2403 #undef ALLOCATE_VARSIZE_MAP
2404 #undef ALLOCATE_MAP
2405 }
2406
2407 {
2408 AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2409 if (!allocation.To(&obj)) return false;
2410 }
2411 set_true_value(Oddball::cast(obj));
2412 Oddball::cast(obj)->set_kind(Oddball::kTrue);
2413
2414 {
2415 AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2416 if (!allocation.To(&obj)) return false;
2417 }
2418 set_false_value(Oddball::cast(obj));
2419 Oddball::cast(obj)->set_kind(Oddball::kFalse);
2420
2421 { // Empty arrays
2422 {
2423 ByteArray* byte_array;
2424 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2425 set_empty_byte_array(byte_array);
2426 }
2427
2428 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2429 { \
2430 FixedTypedArrayBase* obj; \
2431 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2432 return false; \
2433 set_empty_fixed_##type##_array(obj); \
2434 }
2435
2436 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2437 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2438 }
2439 DCHECK(!InNewSpace(empty_fixed_array()));
2440 return true;
2441 }
2442
2443
AllocateHeapNumber(double value,MutableMode mode,PretenureFlag pretenure)2444 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2445 PretenureFlag pretenure) {
2446 // Statically ensure that it is safe to allocate heap numbers in paged
2447 // spaces.
2448 int size = HeapNumber::kSize;
2449 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2450
2451 AllocationSpace space = SelectSpace(pretenure);
2452
2453 HeapObject* result = nullptr;
2454 {
2455 AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
2456 if (!allocation.To(&result)) return allocation;
2457 }
2458
2459 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2460 HeapObject::cast(result)->set_map_no_write_barrier(map);
2461 HeapNumber::cast(result)->set_value(value);
2462 return result;
2463 }
2464
2465 #define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
2466 AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \
2467 PretenureFlag pretenure) { \
2468 int size = Type::kSize; \
2469 STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize); \
2470 \
2471 AllocationSpace space = SelectSpace(pretenure); \
2472 \
2473 HeapObject* result = nullptr; \
2474 { \
2475 AllocationResult allocation = \
2476 AllocateRaw(size, space, kSimd128Unaligned); \
2477 if (!allocation.To(&result)) return allocation; \
2478 } \
2479 \
2480 result->set_map_no_write_barrier(type##_map()); \
2481 Type* instance = Type::cast(result); \
2482 for (int i = 0; i < lane_count; i++) { \
2483 instance->set_lane(i, lanes[i]); \
2484 } \
2485 return result; \
2486 }
SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)2487 SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
2488 #undef SIMD_ALLOCATE_DEFINITION
2489
2490
2491 AllocationResult Heap::AllocateCell(Object* value) {
2492 int size = Cell::kSize;
2493 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2494
2495 HeapObject* result = nullptr;
2496 {
2497 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2498 if (!allocation.To(&result)) return allocation;
2499 }
2500 result->set_map_no_write_barrier(cell_map());
2501 Cell::cast(result)->set_value(value);
2502 return result;
2503 }
2504
2505
AllocatePropertyCell()2506 AllocationResult Heap::AllocatePropertyCell() {
2507 int size = PropertyCell::kSize;
2508 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2509
2510 HeapObject* result = nullptr;
2511 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2512 if (!allocation.To(&result)) return allocation;
2513
2514 result->set_map_no_write_barrier(global_property_cell_map());
2515 PropertyCell* cell = PropertyCell::cast(result);
2516 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2517 SKIP_WRITE_BARRIER);
2518 cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
2519 cell->set_value(the_hole_value());
2520 return result;
2521 }
2522
2523
AllocateWeakCell(HeapObject * value)2524 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2525 int size = WeakCell::kSize;
2526 STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
2527 HeapObject* result = nullptr;
2528 {
2529 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2530 if (!allocation.To(&result)) return allocation;
2531 }
2532 result->set_map_no_write_barrier(weak_cell_map());
2533 WeakCell::cast(result)->initialize(value);
2534 WeakCell::cast(result)->clear_next(the_hole_value());
2535 return result;
2536 }
2537
2538
AllocateTransitionArray(int capacity)2539 AllocationResult Heap::AllocateTransitionArray(int capacity) {
2540 DCHECK(capacity > 0);
2541 HeapObject* raw_array = nullptr;
2542 {
2543 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2544 if (!allocation.To(&raw_array)) return allocation;
2545 }
2546 raw_array->set_map_no_write_barrier(transition_array_map());
2547 TransitionArray* array = TransitionArray::cast(raw_array);
2548 array->set_length(capacity);
2549 MemsetPointer(array->data_start(), undefined_value(), capacity);
2550 // Transition arrays are tenured. When black allocation is on we have to
2551 // add the transition array to the list of encountered_transition_arrays.
2552 if (incremental_marking()->black_allocation()) {
2553 array->set_next_link(encountered_transition_arrays(),
2554 UPDATE_WEAK_WRITE_BARRIER);
2555 set_encountered_transition_arrays(array);
2556 } else {
2557 array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
2558 }
2559 return array;
2560 }
2561
2562
CreateApiObjects()2563 void Heap::CreateApiObjects() {
2564 HandleScope scope(isolate());
2565 Factory* factory = isolate()->factory();
2566 Handle<Map> new_neander_map =
2567 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2568
2569 // Don't use Smi-only elements optimizations for objects with the neander
2570 // map. There are too many cases where element values are set directly with a
2571 // bottleneck to trap the Smi-only -> fast elements transition, and there
2572 // appears to be no benefit for optimize this case.
2573 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2574 set_neander_map(*new_neander_map);
2575
2576 Handle<JSObject> listeners = factory->NewNeanderObject();
2577 Handle<FixedArray> elements = factory->NewFixedArray(2);
2578 elements->set(0, Smi::FromInt(0));
2579 listeners->set_elements(*elements);
2580 set_message_listeners(*listeners);
2581 }
2582
2583
CreateJSEntryStub()2584 void Heap::CreateJSEntryStub() {
2585 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2586 set_js_entry_code(*stub.GetCode());
2587 }
2588
2589
CreateJSConstructEntryStub()2590 void Heap::CreateJSConstructEntryStub() {
2591 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2592 set_js_construct_entry_code(*stub.GetCode());
2593 }
2594
2595
CreateFixedStubs()2596 void Heap::CreateFixedStubs() {
2597 // Here we create roots for fixed stubs. They are needed at GC
2598 // for cooking and uncooking (check out frames.cc).
2599 // The eliminates the need for doing dictionary lookup in the
2600 // stub cache for these stubs.
2601 HandleScope scope(isolate());
2602
2603 // Create stubs that should be there, so we don't unexpectedly have to
2604 // create them if we need them during the creation of another stub.
2605 // Stub creation mixes raw pointers and handles in an unsafe manner so
2606 // we cannot create stubs while we are creating stubs.
2607 CodeStub::GenerateStubsAheadOfTime(isolate());
2608
2609 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2610 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2611 // is created.
2612
2613 // gcc-4.4 has problem generating correct code of following snippet:
2614 // { JSEntryStub stub;
2615 // js_entry_code_ = *stub.GetCode();
2616 // }
2617 // { JSConstructEntryStub stub;
2618 // js_construct_entry_code_ = *stub.GetCode();
2619 // }
2620 // To workaround the problem, make separate functions without inlining.
2621 Heap::CreateJSEntryStub();
2622 Heap::CreateJSConstructEntryStub();
2623 }
2624
2625
CreateInitialObjects()2626 void Heap::CreateInitialObjects() {
2627 HandleScope scope(isolate());
2628 Factory* factory = isolate()->factory();
2629
2630 // The -0 value must be set before NewNumber works.
2631 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2632 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2633
2634 set_nan_value(*factory->NewHeapNumber(
2635 std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
2636 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2637 set_minus_infinity_value(
2638 *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
2639
2640 // Allocate initial string table.
2641 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2642
2643 // Allocate
2644
2645 // Finish initializing oddballs after creating the string table.
2646 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2647 factory->nan_value(), false, "undefined",
2648 Oddball::kUndefined);
2649
2650 // Initialize the null_value.
2651 Oddball::Initialize(isolate(), factory->null_value(), "null",
2652 handle(Smi::FromInt(0), isolate()), false, "object",
2653 Oddball::kNull);
2654
2655 // Initialize the_hole_value.
2656 Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
2657 handle(Smi::FromInt(-1), isolate()), false, "undefined",
2658 Oddball::kTheHole);
2659
2660 // Initialize the true_value.
2661 Oddball::Initialize(isolate(), factory->true_value(), "true",
2662 handle(Smi::FromInt(1), isolate()), true, "boolean",
2663 Oddball::kTrue);
2664
2665 // Initialize the false_value.
2666 Oddball::Initialize(isolate(), factory->false_value(), "false",
2667 handle(Smi::FromInt(0), isolate()), false, "boolean",
2668 Oddball::kFalse);
2669
2670 set_uninitialized_value(
2671 *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
2672 handle(Smi::FromInt(-1), isolate()), false,
2673 "undefined", Oddball::kUninitialized));
2674
2675 set_arguments_marker(
2676 *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
2677 handle(Smi::FromInt(-4), isolate()), false,
2678 "undefined", Oddball::kArgumentsMarker));
2679
2680 set_no_interceptor_result_sentinel(*factory->NewOddball(
2681 factory->no_interceptor_result_sentinel_map(),
2682 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2683 false, "undefined", Oddball::kOther));
2684
2685 set_termination_exception(*factory->NewOddball(
2686 factory->termination_exception_map(), "termination_exception",
2687 handle(Smi::FromInt(-3), isolate()), false, "undefined",
2688 Oddball::kOther));
2689
2690 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2691 handle(Smi::FromInt(-5), isolate()), false,
2692 "undefined", Oddball::kException));
2693
2694 set_optimized_out(
2695 *factory->NewOddball(factory->optimized_out_map(), "optimized_out",
2696 handle(Smi::FromInt(-6), isolate()), false,
2697 "undefined", Oddball::kOptimizedOut));
2698
2699 set_stale_register(
2700 *factory->NewOddball(factory->stale_register_map(), "stale_register",
2701 handle(Smi::FromInt(-7), isolate()), false,
2702 "undefined", Oddball::kStaleRegister));
2703
2704 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2705 Handle<String> str =
2706 factory->InternalizeUtf8String(constant_string_table[i].contents);
2707 roots_[constant_string_table[i].index] = *str;
2708 }
2709
2710 // Create the code_stubs dictionary. The initial size is set to avoid
2711 // expanding the dictionary during bootstrapping.
2712 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2713
2714 set_instanceof_cache_function(Smi::FromInt(0));
2715 set_instanceof_cache_map(Smi::FromInt(0));
2716 set_instanceof_cache_answer(Smi::FromInt(0));
2717
2718 {
2719 HandleScope scope(isolate());
2720 #define SYMBOL_INIT(name) \
2721 { \
2722 Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
2723 Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
2724 symbol->set_name(*name##d); \
2725 roots_[k##name##RootIndex] = *symbol; \
2726 }
2727 PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
2728 #undef SYMBOL_INIT
2729 }
2730
2731 {
2732 HandleScope scope(isolate());
2733 #define SYMBOL_INIT(name, description) \
2734 Handle<Symbol> name = factory->NewSymbol(); \
2735 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2736 name->set_name(*name##d); \
2737 roots_[k##name##RootIndex] = *name;
2738 PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
2739 #undef SYMBOL_INIT
2740
2741 #define SYMBOL_INIT(name, description) \
2742 Handle<Symbol> name = factory->NewSymbol(); \
2743 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2744 name->set_is_well_known_symbol(true); \
2745 name->set_name(*name##d); \
2746 roots_[k##name##RootIndex] = *name;
2747 WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
2748 #undef SYMBOL_INIT
2749 }
2750
2751 // Allocate the dictionary of intrinsic function names.
2752 Handle<NameDictionary> intrinsic_names =
2753 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2754 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2755 set_intrinsic_function_names(*intrinsic_names);
2756
2757 Handle<NameDictionary> empty_properties_dictionary =
2758 NameDictionary::New(isolate(), 0, TENURED);
2759 empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
2760 set_empty_properties_dictionary(*empty_properties_dictionary);
2761
2762 set_number_string_cache(
2763 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2764
2765 // Allocate cache for single character one byte strings.
2766 set_single_character_string_cache(
2767 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2768
2769 // Allocate cache for string split and regexp-multiple.
2770 set_string_split_cache(*factory->NewFixedArray(
2771 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2772 set_regexp_multiple_cache(*factory->NewFixedArray(
2773 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2774
2775 // Allocate cache for external strings pointing to native source code.
2776 set_natives_source_cache(
2777 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2778
2779 set_experimental_natives_source_cache(
2780 *factory->NewFixedArray(ExperimentalNatives::GetBuiltinsCount()));
2781
2782 set_extra_natives_source_cache(
2783 *factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
2784
2785 set_experimental_extra_natives_source_cache(
2786 *factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
2787
2788 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2789
2790 // The symbol registry is initialized lazily.
2791 set_symbol_registry(Smi::FromInt(0));
2792
2793 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2794 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2795 set_microtask_queue(empty_fixed_array());
2796
2797 {
2798 StaticFeedbackVectorSpec spec;
2799 FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
2800 FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
2801 FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
2802 FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
2803
2804 DCHECK_EQ(load_ic_slot,
2805 FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
2806 DCHECK_EQ(keyed_load_ic_slot,
2807 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
2808 DCHECK_EQ(store_ic_slot,
2809 FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
2810 DCHECK_EQ(keyed_store_ic_slot,
2811 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
2812
2813 Handle<TypeFeedbackMetadata> dummy_metadata =
2814 TypeFeedbackMetadata::New(isolate(), &spec);
2815 Handle<TypeFeedbackVector> dummy_vector =
2816 TypeFeedbackVector::New(isolate(), dummy_metadata);
2817
2818 Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
2819 dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2820 dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2821 dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2822 dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2823
2824 set_dummy_vector(*dummy_vector);
2825 }
2826
2827 {
2828 Handle<FixedArray> empty_literals_array =
2829 factory->NewFixedArray(1, TENURED);
2830 empty_literals_array->set(0, *factory->empty_fixed_array());
2831 set_empty_literals_array(*empty_literals_array);
2832 }
2833
2834 {
2835 Handle<FixedArray> empty_sloppy_arguments_elements =
2836 factory->NewFixedArray(2, TENURED);
2837 empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
2838 set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
2839 }
2840
2841 {
2842 Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
2843 set_empty_weak_cell(*cell);
2844 cell->clear();
2845
2846 Handle<FixedArray> cleared_optimized_code_map =
2847 factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
2848 cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
2849 *cell);
2850 STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
2851 SharedFunctionInfo::kSharedCodeIndex == 0);
2852 set_cleared_optimized_code_map(*cleared_optimized_code_map);
2853 }
2854
2855 set_detached_contexts(empty_fixed_array());
2856 set_retained_maps(ArrayList::cast(empty_fixed_array()));
2857
2858 set_weak_object_to_code_table(
2859 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
2860 TENURED));
2861
2862 set_script_list(Smi::FromInt(0));
2863
2864 Handle<SeededNumberDictionary> slow_element_dictionary =
2865 SeededNumberDictionary::New(isolate(), 0, TENURED);
2866 slow_element_dictionary->set_requires_slow_elements();
2867 set_empty_slow_element_dictionary(*slow_element_dictionary);
2868
2869 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2870
2871 // Handling of script id generation is in Heap::NextScriptId().
2872 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2873 set_next_template_serial_number(Smi::FromInt(0));
2874
2875 // Allocate the empty script.
2876 Handle<Script> script = factory->NewScript(factory->empty_string());
2877 script->set_type(Script::TYPE_NATIVE);
2878 set_empty_script(*script);
2879
2880 Handle<PropertyCell> cell = factory->NewPropertyCell();
2881 cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
2882 set_array_protector(*cell);
2883
2884 cell = factory->NewPropertyCell();
2885 cell->set_value(the_hole_value());
2886 set_empty_property_cell(*cell);
2887
2888 cell = factory->NewPropertyCell();
2889 cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
2890 set_has_instance_protector(*cell);
2891
2892 Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
2893 handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
2894 set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
2895
2896 Handle<Cell> species_cell = factory->NewCell(
2897 handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
2898 set_species_protector(*species_cell);
2899
2900 set_serialized_templates(empty_fixed_array());
2901
2902 set_weak_stack_trace_list(Smi::FromInt(0));
2903
2904 set_noscript_shared_function_infos(Smi::FromInt(0));
2905
2906 // Initialize keyed lookup cache.
2907 isolate_->keyed_lookup_cache()->Clear();
2908
2909 // Initialize context slot cache.
2910 isolate_->context_slot_cache()->Clear();
2911
2912 // Initialize descriptor cache.
2913 isolate_->descriptor_lookup_cache()->Clear();
2914
2915 // Initialize compilation cache.
2916 isolate_->compilation_cache()->Clear();
2917
2918 CreateFixedStubs();
2919 }
2920
2921
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2922 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2923 switch (root_index) {
2924 case kNumberStringCacheRootIndex:
2925 case kInstanceofCacheFunctionRootIndex:
2926 case kInstanceofCacheMapRootIndex:
2927 case kInstanceofCacheAnswerRootIndex:
2928 case kCodeStubsRootIndex:
2929 case kEmptyScriptRootIndex:
2930 case kSymbolRegistryRootIndex:
2931 case kScriptListRootIndex:
2932 case kMaterializedObjectsRootIndex:
2933 case kMicrotaskQueueRootIndex:
2934 case kDetachedContextsRootIndex:
2935 case kWeakObjectToCodeTableRootIndex:
2936 case kRetainedMapsRootIndex:
2937 case kNoScriptSharedFunctionInfosRootIndex:
2938 case kWeakStackTraceListRootIndex:
2939 case kSerializedTemplatesRootIndex:
2940 // Smi values
2941 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2942 SMI_ROOT_LIST(SMI_ENTRY)
2943 #undef SMI_ENTRY
2944 // String table
2945 case kStringTableRootIndex:
2946 return true;
2947
2948 default:
2949 return false;
2950 }
2951 }
2952
2953
RootCanBeTreatedAsConstant(RootListIndex root_index)2954 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2955 return !RootCanBeWrittenAfterInitialization(root_index) &&
2956 !InNewSpace(root(root_index));
2957 }
2958
2959
FullSizeNumberStringCacheLength()2960 int Heap::FullSizeNumberStringCacheLength() {
2961 // Compute the size of the number string cache based on the max newspace size.
2962 // The number string cache has a minimum size based on twice the initial cache
2963 // size to ensure that it is bigger after being made 'full size'.
2964 int number_string_cache_size = max_semi_space_size_ / 512;
2965 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
2966 Min(0x4000, number_string_cache_size));
2967 // There is a string and a number per entry so the length is twice the number
2968 // of entries.
2969 return number_string_cache_size * 2;
2970 }
2971
2972
FlushNumberStringCache()2973 void Heap::FlushNumberStringCache() {
2974 // Flush the number to string cache.
2975 int len = number_string_cache()->length();
2976 for (int i = 0; i < len; i++) {
2977 number_string_cache()->set_undefined(i);
2978 }
2979 }
2980
2981
MapForFixedTypedArray(ExternalArrayType array_type)2982 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2983 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2984 }
2985
2986
RootIndexForFixedTypedArray(ExternalArrayType array_type)2987 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
2988 ExternalArrayType array_type) {
2989 switch (array_type) {
2990 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2991 case kExternal##Type##Array: \
2992 return kFixed##Type##ArrayMapRootIndex;
2993
2994 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
2995 #undef ARRAY_TYPE_TO_ROOT_INDEX
2996
2997 default:
2998 UNREACHABLE();
2999 return kUndefinedValueRootIndex;
3000 }
3001 }
3002
3003
RootIndexForEmptyFixedTypedArray(ElementsKind elementsKind)3004 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3005 ElementsKind elementsKind) {
3006 switch (elementsKind) {
3007 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3008 case TYPE##_ELEMENTS: \
3009 return kEmptyFixed##Type##ArrayRootIndex;
3010
3011 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3012 #undef ELEMENT_KIND_TO_ROOT_INDEX
3013 default:
3014 UNREACHABLE();
3015 return kUndefinedValueRootIndex;
3016 }
3017 }
3018
3019
EmptyFixedTypedArrayForMap(Map * map)3020 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3021 return FixedTypedArrayBase::cast(
3022 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3023 }
3024
3025
AllocateForeign(Address address,PretenureFlag pretenure)3026 AllocationResult Heap::AllocateForeign(Address address,
3027 PretenureFlag pretenure) {
3028 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3029 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3030 AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
3031 Foreign* result = nullptr;
3032 AllocationResult allocation = Allocate(foreign_map(), space);
3033 if (!allocation.To(&result)) return allocation;
3034 result->set_foreign_address(address);
3035 return result;
3036 }
3037
3038
AllocateByteArray(int length,PretenureFlag pretenure)3039 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3040 if (length < 0 || length > ByteArray::kMaxLength) {
3041 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3042 }
3043 int size = ByteArray::SizeFor(length);
3044 AllocationSpace space = SelectSpace(pretenure);
3045 HeapObject* result = nullptr;
3046 {
3047 AllocationResult allocation = AllocateRaw(size, space);
3048 if (!allocation.To(&result)) return allocation;
3049 }
3050
3051 result->set_map_no_write_barrier(byte_array_map());
3052 ByteArray::cast(result)->set_length(length);
3053 return result;
3054 }
3055
3056
AllocateBytecodeArray(int length,const byte * const raw_bytecodes,int frame_size,int parameter_count,FixedArray * constant_pool)3057 AllocationResult Heap::AllocateBytecodeArray(int length,
3058 const byte* const raw_bytecodes,
3059 int frame_size,
3060 int parameter_count,
3061 FixedArray* constant_pool) {
3062 if (length < 0 || length > BytecodeArray::kMaxLength) {
3063 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3064 }
3065 // Bytecode array is pretenured, so constant pool array should be to.
3066 DCHECK(!InNewSpace(constant_pool));
3067
3068 int size = BytecodeArray::SizeFor(length);
3069 HeapObject* result = nullptr;
3070 {
3071 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3072 if (!allocation.To(&result)) return allocation;
3073 }
3074
3075 result->set_map_no_write_barrier(bytecode_array_map());
3076 BytecodeArray* instance = BytecodeArray::cast(result);
3077 instance->set_length(length);
3078 instance->set_frame_size(frame_size);
3079 instance->set_parameter_count(parameter_count);
3080 instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
3081 instance->set_constant_pool(constant_pool);
3082 instance->set_handler_table(empty_fixed_array());
3083 instance->set_source_position_table(empty_byte_array());
3084 CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
3085
3086 return result;
3087 }
3088
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots mode)3089 void Heap::CreateFillerObjectAt(Address addr, int size,
3090 ClearRecordedSlots mode) {
3091 if (size == 0) return;
3092 HeapObject* filler = HeapObject::FromAddress(addr);
3093 if (size == kPointerSize) {
3094 filler->set_map_no_write_barrier(
3095 reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
3096 } else if (size == 2 * kPointerSize) {
3097 filler->set_map_no_write_barrier(
3098 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
3099 } else {
3100 DCHECK_GT(size, 2 * kPointerSize);
3101 filler->set_map_no_write_barrier(
3102 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
3103 FreeSpace::cast(filler)->nobarrier_set_size(size);
3104 }
3105 if (mode == ClearRecordedSlots::kYes) {
3106 ClearRecordedSlotRange(addr, addr + size);
3107 }
3108 // At this point, we may be deserializing the heap from a snapshot, and
3109 // none of the maps have been created yet and are NULL.
3110 DCHECK((filler->map() == NULL && !deserialization_complete_) ||
3111 filler->map()->IsMap());
3112 }
3113
3114
CanMoveObjectStart(HeapObject * object)3115 bool Heap::CanMoveObjectStart(HeapObject* object) {
3116 if (!FLAG_move_object_start) return false;
3117
3118 // Sampling heap profiler may have a reference to the object.
3119 if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
3120
3121 Address address = object->address();
3122
3123 if (lo_space()->Contains(object)) return false;
3124
3125 // We can move the object start if the page was already swept.
3126 return Page::FromAddress(address)->SweepingDone();
3127 }
3128
3129
AdjustLiveBytes(HeapObject * object,int by,InvocationMode mode)3130 void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
3131 // As long as the inspected object is black and we are currently not iterating
3132 // the heap using HeapIterator, we can update the live byte count. We cannot
3133 // update while using HeapIterator because the iterator is temporarily
3134 // marking the whole object graph, without updating live bytes.
3135 if (lo_space()->Contains(object)) {
3136 lo_space()->AdjustLiveBytes(by);
3137 } else if (!in_heap_iterator() &&
3138 !mark_compact_collector()->sweeping_in_progress() &&
3139 Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
3140 if (mode == SEQUENTIAL_TO_SWEEPER) {
3141 MemoryChunk::IncrementLiveBytesFromGC(object, by);
3142 } else {
3143 MemoryChunk::IncrementLiveBytesFromMutator(object, by);
3144 }
3145 }
3146 }
3147
3148
LeftTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3149 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3150 int elements_to_trim) {
3151 CHECK_NOT_NULL(object);
3152 DCHECK(!object->IsFixedTypedArrayBase());
3153 DCHECK(!object->IsByteArray());
3154 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3155 const int bytes_to_trim = elements_to_trim * element_size;
3156 Map* map = object->map();
3157
3158 // For now this trick is only applied to objects in new and paged space.
3159 // In large object space the object's start must coincide with chunk
3160 // and thus the trick is just not applicable.
3161 DCHECK(!lo_space()->Contains(object));
3162 DCHECK(object->map() != fixed_cow_array_map());
3163
3164 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3165 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3166 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3167
3168 const int len = object->length();
3169 DCHECK(elements_to_trim <= len);
3170
3171 // Calculate location of new array start.
3172 Address new_start = object->address() + bytes_to_trim;
3173
3174 // Technically in new space this write might be omitted (except for
3175 // debug mode which iterates through the heap), but to play safer
3176 // we still do it.
3177 CreateFillerObjectAt(object->address(), bytes_to_trim,
3178 ClearRecordedSlots::kYes);
3179 // Initialize header of the trimmed array. Since left trimming is only
3180 // performed on pages which are not concurrently swept creating a filler
3181 // object does not require synchronization.
3182 DCHECK(CanMoveObjectStart(object));
3183 Object** former_start = HeapObject::RawField(object, 0);
3184 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3185 former_start[new_start_index] = map;
3186 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3187 FixedArrayBase* new_object =
3188 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3189
3190 // Remove recorded slots for the new map and length offset.
3191 ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
3192 ClearRecordedSlot(new_object, HeapObject::RawField(
3193 new_object, FixedArrayBase::kLengthOffset));
3194
3195 // Maintain consistency of live bytes during incremental marking
3196 Marking::TransferMark(this, object->address(), new_start);
3197 AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
3198
3199 // Notify the heap profiler of change in object layout.
3200 OnMoveEvent(new_object, object, new_object->Size());
3201 return new_object;
3202 }
3203
3204
3205 // Force instantiation of templatized method.
3206 template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
3207 FixedArrayBase*, int);
3208 template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
3209 FixedArrayBase*, int);
3210
3211
3212 template<Heap::InvocationMode mode>
RightTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3213 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3214 const int len = object->length();
3215 DCHECK_LE(elements_to_trim, len);
3216 DCHECK_GE(elements_to_trim, 0);
3217
3218 int bytes_to_trim;
3219 if (object->IsFixedTypedArrayBase()) {
3220 InstanceType type = object->map()->instance_type();
3221 bytes_to_trim =
3222 FixedTypedArrayBase::TypedArraySize(type, len) -
3223 FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
3224 } else if (object->IsByteArray()) {
3225 int new_size = ByteArray::SizeFor(len - elements_to_trim);
3226 bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3227 DCHECK_GE(bytes_to_trim, 0);
3228 } else {
3229 const int element_size =
3230 object->IsFixedArray() ? kPointerSize : kDoubleSize;
3231 bytes_to_trim = elements_to_trim * element_size;
3232 }
3233
3234
3235 // For now this trick is only applied to objects in new and paged space.
3236 DCHECK(object->map() != fixed_cow_array_map());
3237
3238 if (bytes_to_trim == 0) {
3239 // No need to create filler and update live bytes counters, just initialize
3240 // header of the trimmed array.
3241 object->synchronized_set_length(len - elements_to_trim);
3242 return;
3243 }
3244
3245 // Calculate location of new array end.
3246 Address old_end = object->address() + object->Size();
3247 Address new_end = old_end - bytes_to_trim;
3248
3249 // Technically in new space this write might be omitted (except for
3250 // debug mode which iterates through the heap), but to play safer
3251 // we still do it.
3252 // We do not create a filler for objects in large object space.
3253 // TODO(hpayer): We should shrink the large object page if the size
3254 // of the object changed significantly.
3255 if (!lo_space()->Contains(object)) {
3256 CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
3257 }
3258
3259 // Initialize header of the trimmed array. We are storing the new length
3260 // using release store after creating a filler for the left-over space to
3261 // avoid races with the sweeper thread.
3262 object->synchronized_set_length(len - elements_to_trim);
3263
3264 // Maintain consistency of live bytes during incremental marking
3265 AdjustLiveBytes(object, -bytes_to_trim, mode);
3266
3267 // Notify the heap profiler of change in object layout. The array may not be
3268 // moved during GC, and size has to be adjusted nevertheless.
3269 HeapProfiler* profiler = isolate()->heap_profiler();
3270 if (profiler->is_tracking_allocations()) {
3271 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3272 }
3273 }
3274
3275
AllocateFixedTypedArrayWithExternalPointer(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)3276 AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
3277 int length, ExternalArrayType array_type, void* external_pointer,
3278 PretenureFlag pretenure) {
3279 int size = FixedTypedArrayBase::kHeaderSize;
3280 AllocationSpace space = SelectSpace(pretenure);
3281 HeapObject* result = nullptr;
3282 {
3283 AllocationResult allocation = AllocateRaw(size, space);
3284 if (!allocation.To(&result)) return allocation;
3285 }
3286
3287 result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3288 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
3289 elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
3290 elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
3291 elements->set_length(length);
3292 return elements;
3293 }
3294
ForFixedTypedArray(ExternalArrayType array_type,int * element_size,ElementsKind * element_kind)3295 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3296 ElementsKind* element_kind) {
3297 switch (array_type) {
3298 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3299 case kExternal##Type##Array: \
3300 *element_size = size; \
3301 *element_kind = TYPE##_ELEMENTS; \
3302 return;
3303
3304 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3305 #undef TYPED_ARRAY_CASE
3306
3307 default:
3308 *element_size = 0; // Bogus
3309 *element_kind = UINT8_ELEMENTS; // Bogus
3310 UNREACHABLE();
3311 }
3312 }
3313
3314
AllocateFixedTypedArray(int length,ExternalArrayType array_type,bool initialize,PretenureFlag pretenure)3315 AllocationResult Heap::AllocateFixedTypedArray(int length,
3316 ExternalArrayType array_type,
3317 bool initialize,
3318 PretenureFlag pretenure) {
3319 int element_size;
3320 ElementsKind elements_kind;
3321 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3322 int size = OBJECT_POINTER_ALIGN(length * element_size +
3323 FixedTypedArrayBase::kDataOffset);
3324 AllocationSpace space = SelectSpace(pretenure);
3325
3326 HeapObject* object = nullptr;
3327 AllocationResult allocation = AllocateRaw(
3328 size, space,
3329 array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
3330 if (!allocation.To(&object)) return allocation;
3331
3332 object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3333 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3334 elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
3335 elements->set_external_pointer(
3336 ExternalReference::fixed_typed_array_base_data_offset().address(),
3337 SKIP_WRITE_BARRIER);
3338 elements->set_length(length);
3339 if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
3340 return elements;
3341 }
3342
3343
AllocateCode(int object_size,bool immovable)3344 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3345 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3346 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3347
3348 HeapObject* result = nullptr;
3349 if (!allocation.To(&result)) return allocation;
3350 if (immovable) {
3351 Address address = result->address();
3352 // Code objects which should stay at a fixed address are allocated either
3353 // in the first page of code space (objects on the first page of each space
3354 // are never moved) or in large object space.
3355 if (!code_space_->FirstPage()->Contains(address) &&
3356 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3357 // Discard the first code allocation, which was on a page where it could
3358 // be moved.
3359 CreateFillerObjectAt(result->address(), object_size,
3360 ClearRecordedSlots::kNo);
3361 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3362 if (!allocation.To(&result)) return allocation;
3363 OnAllocationEvent(result, object_size);
3364 }
3365 }
3366
3367 result->set_map_no_write_barrier(code_map());
3368 Code* code = Code::cast(result);
3369 DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
3370 DCHECK(!memory_allocator()->code_range()->valid() ||
3371 memory_allocator()->code_range()->contains(code->address()) ||
3372 object_size <= code_space()->AreaSize());
3373 code->set_gc_metadata(Smi::FromInt(0));
3374 code->set_ic_age(global_ic_age_);
3375 return code;
3376 }
3377
3378
CopyCode(Code * code)3379 AllocationResult Heap::CopyCode(Code* code) {
3380 AllocationResult allocation;
3381
3382 HeapObject* result = nullptr;
3383 // Allocate an object the same size as the code object.
3384 int obj_size = code->Size();
3385 allocation = AllocateRaw(obj_size, CODE_SPACE);
3386 if (!allocation.To(&result)) return allocation;
3387
3388 // Copy code object.
3389 Address old_addr = code->address();
3390 Address new_addr = result->address();
3391 CopyBlock(new_addr, old_addr, obj_size);
3392 Code* new_code = Code::cast(result);
3393
3394 // Relocate the copy.
3395 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3396 DCHECK(!memory_allocator()->code_range()->valid() ||
3397 memory_allocator()->code_range()->contains(code->address()) ||
3398 obj_size <= code_space()->AreaSize());
3399 new_code->Relocate(new_addr - old_addr);
3400 // We have to iterate over the object and process its pointers when black
3401 // allocation is on.
3402 incremental_marking()->IterateBlackObject(new_code);
3403 return new_code;
3404 }
3405
CopyBytecodeArray(BytecodeArray * bytecode_array)3406 AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
3407 int size = BytecodeArray::SizeFor(bytecode_array->length());
3408 HeapObject* result = nullptr;
3409 {
3410 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3411 if (!allocation.To(&result)) return allocation;
3412 }
3413
3414 result->set_map_no_write_barrier(bytecode_array_map());
3415 BytecodeArray* copy = BytecodeArray::cast(result);
3416 copy->set_length(bytecode_array->length());
3417 copy->set_frame_size(bytecode_array->frame_size());
3418 copy->set_parameter_count(bytecode_array->parameter_count());
3419 copy->set_constant_pool(bytecode_array->constant_pool());
3420 copy->set_handler_table(bytecode_array->handler_table());
3421 copy->set_source_position_table(bytecode_array->source_position_table());
3422 copy->set_interrupt_budget(bytecode_array->interrupt_budget());
3423 bytecode_array->CopyBytecodesTo(copy);
3424 return copy;
3425 }
3426
InitializeAllocationMemento(AllocationMemento * memento,AllocationSite * allocation_site)3427 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3428 AllocationSite* allocation_site) {
3429 memento->set_map_no_write_barrier(allocation_memento_map());
3430 DCHECK(allocation_site->map() == allocation_site_map());
3431 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3432 if (FLAG_allocation_site_pretenuring) {
3433 allocation_site->IncrementMementoCreateCount();
3434 }
3435 }
3436
3437
Allocate(Map * map,AllocationSpace space,AllocationSite * allocation_site)3438 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3439 AllocationSite* allocation_site) {
3440 DCHECK(gc_state_ == NOT_IN_GC);
3441 DCHECK(map->instance_type() != MAP_TYPE);
3442 int size = map->instance_size();
3443 if (allocation_site != NULL) {
3444 size += AllocationMemento::kSize;
3445 }
3446 HeapObject* result = nullptr;
3447 AllocationResult allocation = AllocateRaw(size, space);
3448 if (!allocation.To(&result)) return allocation;
3449 // No need for write barrier since object is white and map is in old space.
3450 result->set_map_no_write_barrier(map);
3451 if (allocation_site != NULL) {
3452 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3453 reinterpret_cast<Address>(result) + map->instance_size());
3454 InitializeAllocationMemento(alloc_memento, allocation_site);
3455 }
3456 return result;
3457 }
3458
3459
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)3460 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3461 Map* map) {
3462 obj->set_properties(properties);
3463 obj->initialize_elements();
3464 // TODO(1240798): Initialize the object's body using valid initial values
3465 // according to the object's initial map. For example, if the map's
3466 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3467 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3468 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3469 // verification code has to cope with (temporarily) invalid objects. See
3470 // for example, JSArray::JSArrayVerify).
3471 InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
3472 }
3473
3474
InitializeJSObjectBody(JSObject * obj,Map * map,int start_offset)3475 void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
3476 if (start_offset == map->instance_size()) return;
3477 DCHECK_LT(start_offset, map->instance_size());
3478
3479 // We cannot always fill with one_pointer_filler_map because objects
3480 // created from API functions expect their internal fields to be initialized
3481 // with undefined_value.
3482 // Pre-allocated fields need to be initialized with undefined_value as well
3483 // so that object accesses before the constructor completes (e.g. in the
3484 // debugger) will not cause a crash.
3485
3486 // In case of Array subclassing the |map| could already be transitioned
3487 // to different elements kind from the initial map on which we track slack.
3488 bool in_progress = map->IsInobjectSlackTrackingInProgress();
3489 Object* filler;
3490 if (in_progress) {
3491 filler = one_pointer_filler_map();
3492 } else {
3493 filler = undefined_value();
3494 }
3495 obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
3496 if (in_progress) {
3497 map->FindRootMap()->InobjectSlackTrackingStep();
3498 }
3499 }
3500
3501
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure,AllocationSite * allocation_site)3502 AllocationResult Heap::AllocateJSObjectFromMap(
3503 Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
3504 // JSFunctions should be allocated using AllocateFunction to be
3505 // properly initialized.
3506 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3507
3508 // Both types of global objects should be allocated using
3509 // AllocateGlobalObject to be properly initialized.
3510 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3511
3512 // Allocate the backing storage for the properties.
3513 FixedArray* properties = empty_fixed_array();
3514
3515 // Allocate the JSObject.
3516 AllocationSpace space = SelectSpace(pretenure);
3517 JSObject* js_obj = nullptr;
3518 AllocationResult allocation = Allocate(map, space, allocation_site);
3519 if (!allocation.To(&js_obj)) return allocation;
3520
3521 // Initialize the JSObject.
3522 InitializeJSObjectFromMap(js_obj, properties, map);
3523 DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
3524 js_obj->HasFastStringWrapperElements() ||
3525 js_obj->HasFastArgumentsElements());
3526 return js_obj;
3527 }
3528
3529
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure,AllocationSite * allocation_site)3530 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3531 PretenureFlag pretenure,
3532 AllocationSite* allocation_site) {
3533 DCHECK(constructor->has_initial_map());
3534
3535 // Allocate the object based on the constructors initial map.
3536 AllocationResult allocation = AllocateJSObjectFromMap(
3537 constructor->initial_map(), pretenure, allocation_site);
3538 #ifdef DEBUG
3539 // Make sure result is NOT a global object if valid.
3540 HeapObject* obj = nullptr;
3541 DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
3542 #endif
3543 return allocation;
3544 }
3545
3546
CopyJSObject(JSObject * source,AllocationSite * site)3547 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3548 // Make the clone.
3549 Map* map = source->map();
3550
3551 // We can only clone regexps, normal objects, api objects, errors or arrays.
3552 // Copying anything else will break invariants.
3553 CHECK(map->instance_type() == JS_REGEXP_TYPE ||
3554 map->instance_type() == JS_OBJECT_TYPE ||
3555 map->instance_type() == JS_ERROR_TYPE ||
3556 map->instance_type() == JS_ARRAY_TYPE ||
3557 map->instance_type() == JS_API_OBJECT_TYPE ||
3558 map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
3559
3560 int object_size = map->instance_size();
3561 HeapObject* clone = nullptr;
3562
3563 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3564
3565 int adjusted_object_size =
3566 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3567 AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
3568 if (!allocation.To(&clone)) return allocation;
3569
3570 SLOW_DCHECK(InNewSpace(clone));
3571 // Since we know the clone is allocated in new space, we can copy
3572 // the contents without worrying about updating the write barrier.
3573 CopyBlock(clone->address(), source->address(), object_size);
3574
3575 if (site != NULL) {
3576 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3577 reinterpret_cast<Address>(clone) + object_size);
3578 InitializeAllocationMemento(alloc_memento, site);
3579 }
3580
3581 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3582 source->GetElementsKind());
3583 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3584 FixedArray* properties = FixedArray::cast(source->properties());
3585 // Update elements if necessary.
3586 if (elements->length() > 0) {
3587 FixedArrayBase* elem = nullptr;
3588 {
3589 AllocationResult allocation;
3590 if (elements->map() == fixed_cow_array_map()) {
3591 allocation = FixedArray::cast(elements);
3592 } else if (source->HasFastDoubleElements()) {
3593 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3594 } else {
3595 allocation = CopyFixedArray(FixedArray::cast(elements));
3596 }
3597 if (!allocation.To(&elem)) return allocation;
3598 }
3599 JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
3600 }
3601 // Update properties if necessary.
3602 if (properties->length() > 0) {
3603 FixedArray* prop = nullptr;
3604 {
3605 AllocationResult allocation = CopyFixedArray(properties);
3606 if (!allocation.To(&prop)) return allocation;
3607 }
3608 JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
3609 }
3610 // Return the new clone.
3611 return clone;
3612 }
3613
3614
WriteOneByteData(Vector<const char> vector,uint8_t * chars,int len)3615 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3616 int len) {
3617 // Only works for one byte strings.
3618 DCHECK(vector.length() == len);
3619 MemCopy(chars, vector.start(), len);
3620 }
3621
WriteTwoByteData(Vector<const char> vector,uint16_t * chars,int len)3622 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3623 int len) {
3624 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3625 size_t stream_length = vector.length();
3626 while (stream_length != 0) {
3627 size_t consumed = 0;
3628 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3629 DCHECK(c != unibrow::Utf8::kBadChar);
3630 DCHECK(consumed <= stream_length);
3631 stream_length -= consumed;
3632 stream += consumed;
3633 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3634 len -= 2;
3635 if (len < 0) break;
3636 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3637 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3638 } else {
3639 len -= 1;
3640 if (len < 0) break;
3641 *chars++ = c;
3642 }
3643 }
3644 DCHECK(stream_length == 0);
3645 DCHECK(len == 0);
3646 }
3647
3648
WriteOneByteData(String * s,uint8_t * chars,int len)3649 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3650 DCHECK(s->length() == len);
3651 String::WriteToFlat(s, chars, 0, len);
3652 }
3653
3654
WriteTwoByteData(String * s,uint16_t * chars,int len)3655 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3656 DCHECK(s->length() == len);
3657 String::WriteToFlat(s, chars, 0, len);
3658 }
3659
3660
3661 template <bool is_one_byte, typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)3662 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3663 uint32_t hash_field) {
3664 DCHECK(chars >= 0);
3665 // Compute map and object size.
3666 int size;
3667 Map* map;
3668
3669 DCHECK_LE(0, chars);
3670 DCHECK_GE(String::kMaxLength, chars);
3671 if (is_one_byte) {
3672 map = one_byte_internalized_string_map();
3673 size = SeqOneByteString::SizeFor(chars);
3674 } else {
3675 map = internalized_string_map();
3676 size = SeqTwoByteString::SizeFor(chars);
3677 }
3678
3679 // Allocate string.
3680 HeapObject* result = nullptr;
3681 {
3682 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3683 if (!allocation.To(&result)) return allocation;
3684 }
3685
3686 result->set_map_no_write_barrier(map);
3687 // Set length and hash fields of the allocated string.
3688 String* answer = String::cast(result);
3689 answer->set_length(chars);
3690 answer->set_hash_field(hash_field);
3691
3692 DCHECK_EQ(size, answer->Size());
3693
3694 if (is_one_byte) {
3695 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3696 } else {
3697 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3698 }
3699 return answer;
3700 }
3701
3702
3703 // Need explicit instantiations.
3704 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3705 int,
3706 uint32_t);
3707 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3708 int,
3709 uint32_t);
3710 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3711 Vector<const char>, int, uint32_t);
3712
3713
AllocateRawOneByteString(int length,PretenureFlag pretenure)3714 AllocationResult Heap::AllocateRawOneByteString(int length,
3715 PretenureFlag pretenure) {
3716 DCHECK_LE(0, length);
3717 DCHECK_GE(String::kMaxLength, length);
3718 int size = SeqOneByteString::SizeFor(length);
3719 DCHECK(size <= SeqOneByteString::kMaxSize);
3720 AllocationSpace space = SelectSpace(pretenure);
3721
3722 HeapObject* result = nullptr;
3723 {
3724 AllocationResult allocation = AllocateRaw(size, space);
3725 if (!allocation.To(&result)) return allocation;
3726 }
3727
3728 // Partially initialize the object.
3729 result->set_map_no_write_barrier(one_byte_string_map());
3730 String::cast(result)->set_length(length);
3731 String::cast(result)->set_hash_field(String::kEmptyHashField);
3732 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3733
3734 return result;
3735 }
3736
3737
AllocateRawTwoByteString(int length,PretenureFlag pretenure)3738 AllocationResult Heap::AllocateRawTwoByteString(int length,
3739 PretenureFlag pretenure) {
3740 DCHECK_LE(0, length);
3741 DCHECK_GE(String::kMaxLength, length);
3742 int size = SeqTwoByteString::SizeFor(length);
3743 DCHECK(size <= SeqTwoByteString::kMaxSize);
3744 AllocationSpace space = SelectSpace(pretenure);
3745
3746 HeapObject* result = nullptr;
3747 {
3748 AllocationResult allocation = AllocateRaw(size, space);
3749 if (!allocation.To(&result)) return allocation;
3750 }
3751
3752 // Partially initialize the object.
3753 result->set_map_no_write_barrier(string_map());
3754 String::cast(result)->set_length(length);
3755 String::cast(result)->set_hash_field(String::kEmptyHashField);
3756 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3757 return result;
3758 }
3759
3760
AllocateEmptyFixedArray()3761 AllocationResult Heap::AllocateEmptyFixedArray() {
3762 int size = FixedArray::SizeFor(0);
3763 HeapObject* result = nullptr;
3764 {
3765 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3766 if (!allocation.To(&result)) return allocation;
3767 }
3768 // Initialize the object.
3769 result->set_map_no_write_barrier(fixed_array_map());
3770 FixedArray::cast(result)->set_length(0);
3771 return result;
3772 }
3773
3774
CopyAndTenureFixedCOWArray(FixedArray * src)3775 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3776 if (!InNewSpace(src)) {
3777 return src;
3778 }
3779
3780 int len = src->length();
3781 HeapObject* obj = nullptr;
3782 {
3783 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3784 if (!allocation.To(&obj)) return allocation;
3785 }
3786 obj->set_map_no_write_barrier(fixed_array_map());
3787 FixedArray* result = FixedArray::cast(obj);
3788 result->set_length(len);
3789
3790 // Copy the content.
3791 DisallowHeapAllocation no_gc;
3792 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3793 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3794
3795 // TODO(mvstanton): The map is set twice because of protection against calling
3796 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3797 // we might then be able to remove this whole method.
3798 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3799 return result;
3800 }
3801
3802
AllocateEmptyFixedTypedArray(ExternalArrayType array_type)3803 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3804 ExternalArrayType array_type) {
3805 return AllocateFixedTypedArray(0, array_type, false, TENURED);
3806 }
3807
3808
CopyFixedArrayAndGrow(FixedArray * src,int grow_by,PretenureFlag pretenure)3809 AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
3810 PretenureFlag pretenure) {
3811 int old_len = src->length();
3812 int new_len = old_len + grow_by;
3813 DCHECK(new_len >= old_len);
3814 HeapObject* obj = nullptr;
3815 {
3816 AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3817 if (!allocation.To(&obj)) return allocation;
3818 }
3819
3820 obj->set_map_no_write_barrier(fixed_array_map());
3821 FixedArray* result = FixedArray::cast(obj);
3822 result->set_length(new_len);
3823
3824 // Copy the content.
3825 DisallowHeapAllocation no_gc;
3826 WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
3827 for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
3828 MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
3829 return result;
3830 }
3831
CopyFixedArrayUpTo(FixedArray * src,int new_len,PretenureFlag pretenure)3832 AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
3833 PretenureFlag pretenure) {
3834 if (new_len == 0) return empty_fixed_array();
3835
3836 DCHECK_LE(new_len, src->length());
3837
3838 HeapObject* obj = nullptr;
3839 {
3840 AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3841 if (!allocation.To(&obj)) return allocation;
3842 }
3843 obj->set_map_no_write_barrier(fixed_array_map());
3844
3845 FixedArray* result = FixedArray::cast(obj);
3846 result->set_length(new_len);
3847
3848 // Copy the content.
3849 DisallowHeapAllocation no_gc;
3850 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3851 for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
3852 return result;
3853 }
3854
CopyFixedArrayWithMap(FixedArray * src,Map * map)3855 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3856 int len = src->length();
3857 HeapObject* obj = nullptr;
3858 {
3859 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3860 if (!allocation.To(&obj)) return allocation;
3861 }
3862 obj->set_map_no_write_barrier(map);
3863 if (InNewSpace(obj)) {
3864 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3865 FixedArray::SizeFor(len) - kPointerSize);
3866 return obj;
3867 }
3868 FixedArray* result = FixedArray::cast(obj);
3869 result->set_length(len);
3870
3871 // Copy the content.
3872 DisallowHeapAllocation no_gc;
3873 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3874 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3875 return result;
3876 }
3877
3878
CopyFixedDoubleArrayWithMap(FixedDoubleArray * src,Map * map)3879 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3880 Map* map) {
3881 int len = src->length();
3882 HeapObject* obj = nullptr;
3883 {
3884 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3885 if (!allocation.To(&obj)) return allocation;
3886 }
3887 obj->set_map_no_write_barrier(map);
3888 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
3889 src->address() + FixedDoubleArray::kLengthOffset,
3890 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3891 return obj;
3892 }
3893
3894
AllocateRawFixedArray(int length,PretenureFlag pretenure)3895 AllocationResult Heap::AllocateRawFixedArray(int length,
3896 PretenureFlag pretenure) {
3897 if (length < 0 || length > FixedArray::kMaxLength) {
3898 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3899 }
3900 int size = FixedArray::SizeFor(length);
3901 AllocationSpace space = SelectSpace(pretenure);
3902
3903 return AllocateRaw(size, space);
3904 }
3905
3906
AllocateFixedArrayWithFiller(int length,PretenureFlag pretenure,Object * filler)3907 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
3908 PretenureFlag pretenure,
3909 Object* filler) {
3910 DCHECK(length >= 0);
3911 DCHECK(empty_fixed_array()->IsFixedArray());
3912 if (length == 0) return empty_fixed_array();
3913
3914 DCHECK(!InNewSpace(filler));
3915 HeapObject* result = nullptr;
3916 {
3917 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3918 if (!allocation.To(&result)) return allocation;
3919 }
3920
3921 result->set_map_no_write_barrier(fixed_array_map());
3922 FixedArray* array = FixedArray::cast(result);
3923 array->set_length(length);
3924 MemsetPointer(array->data_start(), filler, length);
3925 return array;
3926 }
3927
3928
AllocateFixedArray(int length,PretenureFlag pretenure)3929 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3930 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3931 }
3932
3933
AllocateUninitializedFixedArray(int length)3934 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
3935 if (length == 0) return empty_fixed_array();
3936
3937 HeapObject* obj = nullptr;
3938 {
3939 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
3940 if (!allocation.To(&obj)) return allocation;
3941 }
3942
3943 obj->set_map_no_write_barrier(fixed_array_map());
3944 FixedArray::cast(obj)->set_length(length);
3945 return obj;
3946 }
3947
3948
AllocateUninitializedFixedDoubleArray(int length,PretenureFlag pretenure)3949 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
3950 int length, PretenureFlag pretenure) {
3951 if (length == 0) return empty_fixed_array();
3952
3953 HeapObject* elements = nullptr;
3954 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
3955 if (!allocation.To(&elements)) return allocation;
3956
3957 elements->set_map_no_write_barrier(fixed_double_array_map());
3958 FixedDoubleArray::cast(elements)->set_length(length);
3959 return elements;
3960 }
3961
3962
AllocateRawFixedDoubleArray(int length,PretenureFlag pretenure)3963 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
3964 PretenureFlag pretenure) {
3965 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
3966 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3967 }
3968 int size = FixedDoubleArray::SizeFor(length);
3969 AllocationSpace space = SelectSpace(pretenure);
3970
3971 HeapObject* object = nullptr;
3972 {
3973 AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
3974 if (!allocation.To(&object)) return allocation;
3975 }
3976
3977 return object;
3978 }
3979
3980
AllocateSymbol()3981 AllocationResult Heap::AllocateSymbol() {
3982 // Statically ensure that it is safe to allocate symbols in paged spaces.
3983 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
3984
3985 HeapObject* result = nullptr;
3986 AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
3987 if (!allocation.To(&result)) return allocation;
3988
3989 result->set_map_no_write_barrier(symbol_map());
3990
3991 // Generate a random hash value.
3992 int hash;
3993 int attempts = 0;
3994 do {
3995 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
3996 attempts++;
3997 } while (hash == 0 && attempts < 30);
3998 if (hash == 0) hash = 1; // never return 0
3999
4000 Symbol::cast(result)
4001 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4002 Symbol::cast(result)->set_name(undefined_value());
4003 Symbol::cast(result)->set_flags(0);
4004
4005 DCHECK(!Symbol::cast(result)->is_private());
4006 return result;
4007 }
4008
4009
AllocateStruct(InstanceType type)4010 AllocationResult Heap::AllocateStruct(InstanceType type) {
4011 Map* map;
4012 switch (type) {
4013 #define MAKE_CASE(NAME, Name, name) \
4014 case NAME##_TYPE: \
4015 map = name##_map(); \
4016 break;
4017 STRUCT_LIST(MAKE_CASE)
4018 #undef MAKE_CASE
4019 default:
4020 UNREACHABLE();
4021 return exception();
4022 }
4023 int size = map->instance_size();
4024 Struct* result = nullptr;
4025 {
4026 AllocationResult allocation = Allocate(map, OLD_SPACE);
4027 if (!allocation.To(&result)) return allocation;
4028 }
4029 result->InitializeBody(size);
4030 return result;
4031 }
4032
4033
IsHeapIterable()4034 bool Heap::IsHeapIterable() {
4035 // TODO(hpayer): This function is not correct. Allocation folding in old
4036 // space breaks the iterability.
4037 return new_space_top_after_last_gc_ == new_space()->top();
4038 }
4039
4040
MakeHeapIterable()4041 void Heap::MakeHeapIterable() {
4042 DCHECK(AllowHeapAllocation::IsAllowed());
4043 if (!IsHeapIterable()) {
4044 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4045 }
4046 if (mark_compact_collector()->sweeping_in_progress()) {
4047 mark_compact_collector()->EnsureSweepingCompleted();
4048 }
4049 DCHECK(IsHeapIterable());
4050 }
4051
4052
ComputeMutatorUtilization(double mutator_speed,double gc_speed)4053 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4054 const double kMinMutatorUtilization = 0.0;
4055 const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
4056 if (mutator_speed == 0) return kMinMutatorUtilization;
4057 if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
4058 // Derivation:
4059 // mutator_utilization = mutator_time / (mutator_time + gc_time)
4060 // mutator_time = 1 / mutator_speed
4061 // gc_time = 1 / gc_speed
4062 // mutator_utilization = (1 / mutator_speed) /
4063 // (1 / mutator_speed + 1 / gc_speed)
4064 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
4065 return gc_speed / (mutator_speed + gc_speed);
4066 }
4067
4068
YoungGenerationMutatorUtilization()4069 double Heap::YoungGenerationMutatorUtilization() {
4070 double mutator_speed = static_cast<double>(
4071 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4072 double gc_speed =
4073 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
4074 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4075 if (FLAG_trace_mutator_utilization) {
4076 PrintIsolate(isolate(),
4077 "Young generation mutator utilization = %.3f ("
4078 "mutator_speed=%.f, gc_speed=%.f)\n",
4079 result, mutator_speed, gc_speed);
4080 }
4081 return result;
4082 }
4083
4084
OldGenerationMutatorUtilization()4085 double Heap::OldGenerationMutatorUtilization() {
4086 double mutator_speed = static_cast<double>(
4087 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
4088 double gc_speed = static_cast<double>(
4089 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
4090 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4091 if (FLAG_trace_mutator_utilization) {
4092 PrintIsolate(isolate(),
4093 "Old generation mutator utilization = %.3f ("
4094 "mutator_speed=%.f, gc_speed=%.f)\n",
4095 result, mutator_speed, gc_speed);
4096 }
4097 return result;
4098 }
4099
4100
HasLowYoungGenerationAllocationRate()4101 bool Heap::HasLowYoungGenerationAllocationRate() {
4102 const double high_mutator_utilization = 0.993;
4103 return YoungGenerationMutatorUtilization() > high_mutator_utilization;
4104 }
4105
4106
HasLowOldGenerationAllocationRate()4107 bool Heap::HasLowOldGenerationAllocationRate() {
4108 const double high_mutator_utilization = 0.993;
4109 return OldGenerationMutatorUtilization() > high_mutator_utilization;
4110 }
4111
4112
HasLowAllocationRate()4113 bool Heap::HasLowAllocationRate() {
4114 return HasLowYoungGenerationAllocationRate() &&
4115 HasLowOldGenerationAllocationRate();
4116 }
4117
4118
HasHighFragmentation()4119 bool Heap::HasHighFragmentation() {
4120 intptr_t used = PromotedSpaceSizeOfObjects();
4121 intptr_t committed = CommittedOldGenerationMemory();
4122 return HasHighFragmentation(used, committed);
4123 }
4124
4125
HasHighFragmentation(intptr_t used,intptr_t committed)4126 bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
4127 const intptr_t kSlack = 16 * MB;
4128 // Fragmentation is high if committed > 2 * used + kSlack.
4129 // Rewrite the exression to avoid overflow.
4130 return committed - used > used + kSlack;
4131 }
4132
SetOptimizeForMemoryUsage()4133 void Heap::SetOptimizeForMemoryUsage() {
4134 // Activate memory reducer when switching to background if
4135 // - there was no mark compact since the start.
4136 // - the committed memory can be potentially reduced.
4137 // 2 pages for the old, code, and map space + 1 page for new space.
4138 const int kMinCommittedMemory = 7 * Page::kPageSize;
4139 if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory) {
4140 MemoryReducer::Event event;
4141 event.type = MemoryReducer::kPossibleGarbage;
4142 event.time_ms = MonotonicallyIncreasingTimeInMs();
4143 memory_reducer_->NotifyPossibleGarbage(event);
4144 }
4145 optimize_for_memory_usage_ = true;
4146 }
4147
ReduceNewSpaceSize()4148 void Heap::ReduceNewSpaceSize() {
4149 // TODO(ulan): Unify this constant with the similar constant in
4150 // GCIdleTimeHandler once the change is merged to 4.5.
4151 static const size_t kLowAllocationThroughput = 1000;
4152 const double allocation_throughput =
4153 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
4154
4155 if (FLAG_predictable) return;
4156
4157 if (ShouldReduceMemory() ||
4158 ((allocation_throughput != 0) &&
4159 (allocation_throughput < kLowAllocationThroughput))) {
4160 new_space_.Shrink();
4161 UncommitFromSpace();
4162 }
4163 }
4164
4165
FinalizeIncrementalMarkingIfComplete(const char * comment)4166 void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
4167 if (incremental_marking()->IsMarking() &&
4168 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4169 (!incremental_marking()->finalize_marking_completed() &&
4170 mark_compact_collector()->marking_deque()->IsEmpty()))) {
4171 FinalizeIncrementalMarking(comment);
4172 } else if (incremental_marking()->IsComplete() ||
4173 (mark_compact_collector()->marking_deque()->IsEmpty())) {
4174 CollectAllGarbage(current_gc_flags_, comment);
4175 }
4176 }
4177
4178
TryFinalizeIdleIncrementalMarking(double idle_time_in_ms)4179 bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
4180 size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
4181 double final_incremental_mark_compact_speed_in_bytes_per_ms =
4182 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
4183 if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4184 (!incremental_marking()->finalize_marking_completed() &&
4185 mark_compact_collector()->marking_deque()->IsEmpty() &&
4186 gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
4187 idle_time_in_ms))) {
4188 FinalizeIncrementalMarking(
4189 "Idle notification: finalize incremental marking");
4190 return true;
4191 } else if (incremental_marking()->IsComplete() ||
4192 (mark_compact_collector()->marking_deque()->IsEmpty() &&
4193 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
4194 idle_time_in_ms, size_of_objects,
4195 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4196 CollectAllGarbage(current_gc_flags_,
4197 "idle notification: finalize incremental marking");
4198 return true;
4199 }
4200 return false;
4201 }
4202
RegisterReservationsForBlackAllocation(Reservation * reservations)4203 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
4204 // TODO(hpayer): We do not have to iterate reservations on black objects
4205 // for marking. We just have to execute the special visiting side effect
4206 // code that adds objects to global data structures, e.g. for array buffers.
4207
4208 // Code space, map space, and large object space do not use black pages.
4209 // Hence we have to color all objects of the reservation first black to avoid
4210 // unnecessary marking deque load.
4211 if (incremental_marking()->black_allocation()) {
4212 for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4213 const Heap::Reservation& res = reservations[i];
4214 for (auto& chunk : res) {
4215 Address addr = chunk.start;
4216 while (addr < chunk.end) {
4217 HeapObject* obj = HeapObject::FromAddress(addr);
4218 Marking::MarkBlack(Marking::MarkBitFrom(obj));
4219 MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
4220 addr += obj->Size();
4221 }
4222 }
4223 }
4224 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4225 const Heap::Reservation& res = reservations[i];
4226 for (auto& chunk : res) {
4227 Address addr = chunk.start;
4228 while (addr < chunk.end) {
4229 HeapObject* obj = HeapObject::FromAddress(addr);
4230 incremental_marking()->IterateBlackObject(obj);
4231 addr += obj->Size();
4232 }
4233 }
4234 }
4235 }
4236 }
4237
ComputeHeapState()4238 GCIdleTimeHeapState Heap::ComputeHeapState() {
4239 GCIdleTimeHeapState heap_state;
4240 heap_state.contexts_disposed = contexts_disposed_;
4241 heap_state.contexts_disposal_rate =
4242 tracer()->ContextDisposalRateInMilliseconds();
4243 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4244 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4245 return heap_state;
4246 }
4247
4248
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)4249 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4250 GCIdleTimeHeapState heap_state,
4251 double deadline_in_ms) {
4252 bool result = false;
4253 switch (action.type) {
4254 case DONE:
4255 result = true;
4256 break;
4257 case DO_INCREMENTAL_STEP: {
4258 if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) {
4259 result = true;
4260 } else {
4261 incremental_marking()
4262 ->incremental_marking_job()
4263 ->NotifyIdleTaskProgress();
4264 result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
4265 IncrementalMarkingJob::IdleTask::kDone;
4266 }
4267 break;
4268 }
4269 case DO_FULL_GC: {
4270 DCHECK(contexts_disposed_ > 0);
4271 HistogramTimerScope scope(isolate_->counters()->gc_context());
4272 TRACE_EVENT0("v8", "V8.GCContext");
4273 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
4274 break;
4275 }
4276 case DO_NOTHING:
4277 break;
4278 }
4279
4280 return result;
4281 }
4282
4283
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)4284 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
4285 GCIdleTimeHeapState heap_state,
4286 double start_ms, double deadline_in_ms) {
4287 double idle_time_in_ms = deadline_in_ms - start_ms;
4288 double current_time = MonotonicallyIncreasingTimeInMs();
4289 last_idle_notification_time_ = current_time;
4290 double deadline_difference = deadline_in_ms - current_time;
4291
4292 contexts_disposed_ = 0;
4293
4294 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4295 static_cast<int>(idle_time_in_ms));
4296
4297 if (deadline_in_ms - start_ms >
4298 GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4299 int committed_memory = static_cast<int>(CommittedMemory() / KB);
4300 int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
4301 isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
4302 start_ms, committed_memory);
4303 isolate()->counters()->aggregated_memory_heap_used()->AddSample(
4304 start_ms, used_memory);
4305 }
4306
4307 if (deadline_difference >= 0) {
4308 if (action.type != DONE && action.type != DO_NOTHING) {
4309 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4310 static_cast<int>(deadline_difference));
4311 }
4312 } else {
4313 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4314 static_cast<int>(-deadline_difference));
4315 }
4316
4317 if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4318 FLAG_trace_idle_notification_verbose) {
4319 PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init());
4320 PrintF(
4321 "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4322 "ms, deadline usage %.2f ms [",
4323 idle_time_in_ms, idle_time_in_ms - deadline_difference,
4324 deadline_difference);
4325 action.Print();
4326 PrintF("]");
4327 if (FLAG_trace_idle_notification_verbose) {
4328 PrintF("[");
4329 heap_state.Print();
4330 PrintF("]");
4331 }
4332 PrintF("\n");
4333 }
4334 }
4335
4336
MonotonicallyIncreasingTimeInMs()4337 double Heap::MonotonicallyIncreasingTimeInMs() {
4338 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4339 static_cast<double>(base::Time::kMillisecondsPerSecond);
4340 }
4341
4342
IdleNotification(int idle_time_in_ms)4343 bool Heap::IdleNotification(int idle_time_in_ms) {
4344 return IdleNotification(
4345 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4346 (static_cast<double>(idle_time_in_ms) /
4347 static_cast<double>(base::Time::kMillisecondsPerSecond)));
4348 }
4349
4350
IdleNotification(double deadline_in_seconds)4351 bool Heap::IdleNotification(double deadline_in_seconds) {
4352 CHECK(HasBeenSetUp());
4353 double deadline_in_ms =
4354 deadline_in_seconds *
4355 static_cast<double>(base::Time::kMillisecondsPerSecond);
4356 HistogramTimerScope idle_notification_scope(
4357 isolate_->counters()->gc_idle_notification());
4358 TRACE_EVENT0("v8", "V8.GCIdleNotification");
4359 double start_ms = MonotonicallyIncreasingTimeInMs();
4360 double idle_time_in_ms = deadline_in_ms - start_ms;
4361
4362 tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4363 OldGenerationAllocationCounter());
4364
4365 GCIdleTimeHeapState heap_state = ComputeHeapState();
4366
4367 GCIdleTimeAction action =
4368 gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4369
4370 bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4371
4372 IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
4373 return result;
4374 }
4375
4376
RecentIdleNotificationHappened()4377 bool Heap::RecentIdleNotificationHappened() {
4378 return (last_idle_notification_time_ +
4379 GCIdleTimeHandler::kMaxScheduledIdleTime) >
4380 MonotonicallyIncreasingTimeInMs();
4381 }
4382
4383 class MemoryPressureInterruptTask : public CancelableTask {
4384 public:
MemoryPressureInterruptTask(Heap * heap)4385 explicit MemoryPressureInterruptTask(Heap* heap)
4386 : CancelableTask(heap->isolate()), heap_(heap) {}
4387
~MemoryPressureInterruptTask()4388 virtual ~MemoryPressureInterruptTask() {}
4389
4390 private:
4391 // v8::internal::CancelableTask overrides.
RunInternal()4392 void RunInternal() override { heap_->CheckMemoryPressure(); }
4393
4394 Heap* heap_;
4395 DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
4396 };
4397
CheckMemoryPressure()4398 void Heap::CheckMemoryPressure() {
4399 if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
4400 CollectGarbageOnMemoryPressure("memory pressure");
4401 } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
4402 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4403 StartIdleIncrementalMarking();
4404 }
4405 }
4406 MemoryReducer::Event event;
4407 event.type = MemoryReducer::kPossibleGarbage;
4408 event.time_ms = MonotonicallyIncreasingTimeInMs();
4409 memory_reducer_->NotifyPossibleGarbage(event);
4410 }
4411
CollectGarbageOnMemoryPressure(const char * source)4412 void Heap::CollectGarbageOnMemoryPressure(const char* source) {
4413 const int kGarbageThresholdInBytes = 8 * MB;
4414 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4415 // This constant is the maximum response time in RAIL performance model.
4416 const double kMaxMemoryPressurePauseMs = 100;
4417
4418 double start = MonotonicallyIncreasingTimeInMs();
4419 CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4420 source, kGCCallbackFlagCollectAllAvailableGarbage);
4421 double end = MonotonicallyIncreasingTimeInMs();
4422
4423 // Estimate how much memory we can free.
4424 int64_t potential_garbage =
4425 (CommittedMemory() - SizeOfObjects()) + external_memory_;
4426 // If we can potentially free large amount of memory, then start GC right
4427 // away instead of waiting for memory reducer.
4428 if (potential_garbage >= kGarbageThresholdInBytes &&
4429 potential_garbage >=
4430 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4431 // If we spent less than half of the time budget, then perform full GC
4432 // Otherwise, start incremental marking.
4433 if (end - start < kMaxMemoryPressurePauseMs / 2) {
4434 CollectAllGarbage(
4435 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source,
4436 kGCCallbackFlagCollectAllAvailableGarbage);
4437 } else {
4438 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4439 StartIdleIncrementalMarking();
4440 }
4441 }
4442 }
4443 }
4444
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)4445 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4446 bool is_isolate_locked) {
4447 MemoryPressureLevel previous = memory_pressure_level_.Value();
4448 memory_pressure_level_.SetValue(level);
4449 if ((previous != MemoryPressureLevel::kCritical &&
4450 level == MemoryPressureLevel::kCritical) ||
4451 (previous == MemoryPressureLevel::kNone &&
4452 level == MemoryPressureLevel::kModerate)) {
4453 if (is_isolate_locked) {
4454 CheckMemoryPressure();
4455 } else {
4456 ExecutionAccess access(isolate());
4457 isolate()->stack_guard()->RequestGC();
4458 V8::GetCurrentPlatform()->CallOnForegroundThread(
4459 reinterpret_cast<v8::Isolate*>(isolate()),
4460 new MemoryPressureInterruptTask(this));
4461 }
4462 }
4463 }
4464
CollectCodeStatistics()4465 void Heap::CollectCodeStatistics() {
4466 PagedSpace::ResetCodeAndMetadataStatistics(isolate());
4467 // We do not look for code in new space, or map space. If code
4468 // somehow ends up in those spaces, we would miss it here.
4469 code_space_->CollectCodeStatistics();
4470 old_space_->CollectCodeStatistics();
4471 lo_space_->CollectCodeStatistics();
4472 }
4473
4474 #ifdef DEBUG
4475
Print()4476 void Heap::Print() {
4477 if (!HasBeenSetUp()) return;
4478 isolate()->PrintStack(stdout);
4479 AllSpaces spaces(this);
4480 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4481 space->Print();
4482 }
4483 }
4484
4485
ReportCodeStatistics(const char * title)4486 void Heap::ReportCodeStatistics(const char* title) {
4487 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4488 CollectCodeStatistics();
4489 PagedSpace::ReportCodeStatistics(isolate());
4490 }
4491
4492
4493 // This function expects that NewSpace's allocated objects histogram is
4494 // populated (via a call to CollectStatistics or else as a side effect of a
4495 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)4496 void Heap::ReportHeapStatistics(const char* title) {
4497 USE(title);
4498 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4499 gc_count_);
4500 PrintF("old_generation_allocation_limit_ %" V8PRIdPTR "\n",
4501 old_generation_allocation_limit_);
4502
4503 PrintF("\n");
4504 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4505 isolate_->global_handles()->PrintStats();
4506 PrintF("\n");
4507
4508 PrintF("Heap statistics : ");
4509 memory_allocator()->ReportStatistics();
4510 PrintF("To space : ");
4511 new_space_.ReportStatistics();
4512 PrintF("Old space : ");
4513 old_space_->ReportStatistics();
4514 PrintF("Code space : ");
4515 code_space_->ReportStatistics();
4516 PrintF("Map space : ");
4517 map_space_->ReportStatistics();
4518 PrintF("Large object space : ");
4519 lo_space_->ReportStatistics();
4520 PrintF(">>>>>> ========================================= >>>>>>\n");
4521 }
4522
4523 #endif // DEBUG
4524
Contains(HeapObject * value)4525 bool Heap::Contains(HeapObject* value) {
4526 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4527 return false;
4528 }
4529 return HasBeenSetUp() &&
4530 (new_space_.ToSpaceContains(value) || old_space_->Contains(value) ||
4531 code_space_->Contains(value) || map_space_->Contains(value) ||
4532 lo_space_->Contains(value));
4533 }
4534
ContainsSlow(Address addr)4535 bool Heap::ContainsSlow(Address addr) {
4536 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4537 return false;
4538 }
4539 return HasBeenSetUp() &&
4540 (new_space_.ToSpaceContainsSlow(addr) ||
4541 old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
4542 map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
4543 }
4544
InSpace(HeapObject * value,AllocationSpace space)4545 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4546 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4547 return false;
4548 }
4549 if (!HasBeenSetUp()) return false;
4550
4551 switch (space) {
4552 case NEW_SPACE:
4553 return new_space_.ToSpaceContains(value);
4554 case OLD_SPACE:
4555 return old_space_->Contains(value);
4556 case CODE_SPACE:
4557 return code_space_->Contains(value);
4558 case MAP_SPACE:
4559 return map_space_->Contains(value);
4560 case LO_SPACE:
4561 return lo_space_->Contains(value);
4562 }
4563 UNREACHABLE();
4564 return false;
4565 }
4566
InSpaceSlow(Address addr,AllocationSpace space)4567 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
4568 if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4569 return false;
4570 }
4571 if (!HasBeenSetUp()) return false;
4572
4573 switch (space) {
4574 case NEW_SPACE:
4575 return new_space_.ToSpaceContainsSlow(addr);
4576 case OLD_SPACE:
4577 return old_space_->ContainsSlow(addr);
4578 case CODE_SPACE:
4579 return code_space_->ContainsSlow(addr);
4580 case MAP_SPACE:
4581 return map_space_->ContainsSlow(addr);
4582 case LO_SPACE:
4583 return lo_space_->ContainsSlow(addr);
4584 }
4585 UNREACHABLE();
4586 return false;
4587 }
4588
4589
IsValidAllocationSpace(AllocationSpace space)4590 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4591 switch (space) {
4592 case NEW_SPACE:
4593 case OLD_SPACE:
4594 case CODE_SPACE:
4595 case MAP_SPACE:
4596 case LO_SPACE:
4597 return true;
4598 default:
4599 return false;
4600 }
4601 }
4602
4603
RootIsImmortalImmovable(int root_index)4604 bool Heap::RootIsImmortalImmovable(int root_index) {
4605 switch (root_index) {
4606 #define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
4607 IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
4608 #undef IMMORTAL_IMMOVABLE_ROOT
4609 #define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
4610 INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
4611 #undef INTERNALIZED_STRING
4612 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
4613 STRING_TYPE_LIST(STRING_TYPE)
4614 #undef STRING_TYPE
4615 return true;
4616 default:
4617 return false;
4618 }
4619 }
4620
4621
4622 #ifdef VERIFY_HEAP
Verify()4623 void Heap::Verify() {
4624 CHECK(HasBeenSetUp());
4625 HandleScope scope(isolate());
4626
4627 if (mark_compact_collector()->sweeping_in_progress()) {
4628 // We have to wait here for the sweeper threads to have an iterable heap.
4629 mark_compact_collector()->EnsureSweepingCompleted();
4630 }
4631
4632 VerifyPointersVisitor visitor;
4633 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4634
4635 VerifySmisVisitor smis_visitor;
4636 IterateSmiRoots(&smis_visitor);
4637
4638 new_space_.Verify();
4639
4640 old_space_->Verify(&visitor);
4641 map_space_->Verify(&visitor);
4642
4643 VerifyPointersVisitor no_dirty_regions_visitor;
4644 code_space_->Verify(&no_dirty_regions_visitor);
4645
4646 lo_space_->Verify();
4647
4648 mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
4649 if (FLAG_omit_map_checks_for_leaf_maps) {
4650 mark_compact_collector()->VerifyOmittedMapChecks();
4651 }
4652 }
4653 #endif
4654
4655
ZapFromSpace()4656 void Heap::ZapFromSpace() {
4657 if (!new_space_.IsFromSpaceCommitted()) return;
4658 for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
4659 new_space_.FromSpaceEnd())) {
4660 for (Address cursor = page->area_start(), limit = page->area_end();
4661 cursor < limit; cursor += kPointerSize) {
4662 Memory::Address_at(cursor) = kFromSpaceZapValue;
4663 }
4664 }
4665 }
4666
IteratePromotedObjectPointers(HeapObject * object,Address start,Address end,bool record_slots,ObjectSlotCallback callback)4667 void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
4668 Address end, bool record_slots,
4669 ObjectSlotCallback callback) {
4670 Address slot_address = start;
4671 Page* page = Page::FromAddress(start);
4672
4673 while (slot_address < end) {
4674 Object** slot = reinterpret_cast<Object**>(slot_address);
4675 Object* target = *slot;
4676 if (target->IsHeapObject()) {
4677 if (Heap::InFromSpace(target)) {
4678 callback(reinterpret_cast<HeapObject**>(slot),
4679 HeapObject::cast(target));
4680 Object* new_target = *slot;
4681 if (InNewSpace(new_target)) {
4682 SLOW_DCHECK(Heap::InToSpace(new_target));
4683 SLOW_DCHECK(new_target->IsHeapObject());
4684 RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
4685 }
4686 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4687 } else if (record_slots &&
4688 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4689 mark_compact_collector()->RecordSlot(object, slot, target);
4690 }
4691 }
4692 slot_address += kPointerSize;
4693 }
4694 }
4695
4696 class IteratePromotedObjectsVisitor final : public ObjectVisitor {
4697 public:
IteratePromotedObjectsVisitor(Heap * heap,HeapObject * target,bool record_slots,ObjectSlotCallback callback)4698 IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
4699 bool record_slots, ObjectSlotCallback callback)
4700 : heap_(heap),
4701 target_(target),
4702 record_slots_(record_slots),
4703 callback_(callback) {}
4704
VisitPointers(Object ** start,Object ** end)4705 V8_INLINE void VisitPointers(Object** start, Object** end) override {
4706 heap_->IteratePromotedObjectPointers(
4707 target_, reinterpret_cast<Address>(start),
4708 reinterpret_cast<Address>(end), record_slots_, callback_);
4709 }
4710
VisitCodeEntry(Address code_entry_slot)4711 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
4712 // Black allocation requires us to process objects referenced by
4713 // promoted objects.
4714 if (heap_->incremental_marking()->black_allocation()) {
4715 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
4716 IncrementalMarking::MarkObject(heap_, code);
4717 }
4718 }
4719
4720 private:
4721 Heap* heap_;
4722 HeapObject* target_;
4723 bool record_slots_;
4724 ObjectSlotCallback callback_;
4725 };
4726
IteratePromotedObject(HeapObject * target,int size,bool was_marked_black,ObjectSlotCallback callback)4727 void Heap::IteratePromotedObject(HeapObject* target, int size,
4728 bool was_marked_black,
4729 ObjectSlotCallback callback) {
4730 // We are not collecting slots on new space objects during mutation
4731 // thus we have to scan for pointers to evacuation candidates when we
4732 // promote objects. But we should not record any slots in non-black
4733 // objects. Grey object's slots would be rescanned.
4734 // White object might not survive until the end of collection
4735 // it would be a violation of the invariant to record it's slots.
4736 bool record_slots = false;
4737 if (incremental_marking()->IsCompacting()) {
4738 MarkBit mark_bit = Marking::MarkBitFrom(target);
4739 record_slots = Marking::IsBlack(mark_bit);
4740 }
4741
4742 IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
4743 target->IterateBody(target->map()->instance_type(), size, &visitor);
4744
4745 // When black allocations is on, we have to visit not already marked black
4746 // objects (in new space) promoted to black pages to keep their references
4747 // alive.
4748 // TODO(hpayer): Implement a special promotion visitor that incorporates
4749 // regular visiting and IteratePromotedObjectPointers.
4750 if (!was_marked_black) {
4751 if (incremental_marking()->black_allocation()) {
4752 IncrementalMarking::MarkObject(this, target->map());
4753 incremental_marking()->IterateBlackObject(target);
4754 }
4755 }
4756 }
4757
4758
IterateRoots(ObjectVisitor * v,VisitMode mode)4759 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4760 IterateStrongRoots(v, mode);
4761 IterateWeakRoots(v, mode);
4762 }
4763
4764
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)4765 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4766 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4767 v->Synchronize(VisitorSynchronization::kStringTable);
4768 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4769 // Scavenge collections have special processing for this.
4770 external_string_table_.Iterate(v);
4771 }
4772 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4773 }
4774
4775
IterateSmiRoots(ObjectVisitor * v)4776 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4777 // Acquire execution access since we are going to read stack limit values.
4778 ExecutionAccess access(isolate());
4779 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4780 v->Synchronize(VisitorSynchronization::kSmiRootList);
4781 }
4782
4783
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)4784 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4785 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4786 v->Synchronize(VisitorSynchronization::kStrongRootList);
4787 // The serializer/deserializer iterates the root list twice, first to pick
4788 // off immortal immovable roots to make sure they end up on the first page,
4789 // and then again for the rest.
4790 if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
4791
4792 isolate_->bootstrapper()->Iterate(v);
4793 v->Synchronize(VisitorSynchronization::kBootstrapper);
4794 isolate_->Iterate(v);
4795 v->Synchronize(VisitorSynchronization::kTop);
4796 Relocatable::Iterate(isolate_, v);
4797 v->Synchronize(VisitorSynchronization::kRelocatable);
4798 isolate_->debug()->Iterate(v);
4799 v->Synchronize(VisitorSynchronization::kDebug);
4800
4801 isolate_->compilation_cache()->Iterate(v);
4802 v->Synchronize(VisitorSynchronization::kCompilationCache);
4803
4804 // Iterate over local handles in handle scopes.
4805 isolate_->handle_scope_implementer()->Iterate(v);
4806 isolate_->IterateDeferredHandles(v);
4807 v->Synchronize(VisitorSynchronization::kHandleScope);
4808
4809 // Iterate over the builtin code objects and code stubs in the
4810 // heap. Note that it is not necessary to iterate over code objects
4811 // on scavenge collections.
4812 if (mode != VISIT_ALL_IN_SCAVENGE) {
4813 isolate_->builtins()->IterateBuiltins(v);
4814 v->Synchronize(VisitorSynchronization::kBuiltins);
4815 isolate_->interpreter()->IterateDispatchTable(v);
4816 v->Synchronize(VisitorSynchronization::kDispatchTable);
4817 }
4818
4819 // Iterate over global handles.
4820 switch (mode) {
4821 case VISIT_ONLY_STRONG_ROOT_LIST:
4822 UNREACHABLE();
4823 break;
4824 case VISIT_ONLY_STRONG:
4825 case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
4826 isolate_->global_handles()->IterateStrongRoots(v);
4827 break;
4828 case VISIT_ALL_IN_SCAVENGE:
4829 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4830 break;
4831 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4832 case VISIT_ALL:
4833 isolate_->global_handles()->IterateAllRoots(v);
4834 break;
4835 }
4836 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4837
4838 // Iterate over eternal handles.
4839 if (mode == VISIT_ALL_IN_SCAVENGE) {
4840 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4841 } else {
4842 isolate_->eternal_handles()->IterateAllRoots(v);
4843 }
4844 v->Synchronize(VisitorSynchronization::kEternalHandles);
4845
4846 // Iterate over pointers being held by inactive threads.
4847 isolate_->thread_manager()->Iterate(v);
4848 v->Synchronize(VisitorSynchronization::kThreadManager);
4849
4850 // Iterate over other strong roots (currently only identity maps).
4851 for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
4852 v->VisitPointers(list->start, list->end);
4853 }
4854 v->Synchronize(VisitorSynchronization::kStrongRoots);
4855
4856 // Iterate over the partial snapshot cache unless serializing.
4857 if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
4858 SerializerDeserializer::Iterate(isolate_, v);
4859 }
4860 // We don't do a v->Synchronize call here, because in debug mode that will
4861 // output a flag to the snapshot. However at this point the serializer and
4862 // deserializer are deliberately a little unsynchronized (see above) so the
4863 // checking of the sync flag in the snapshot would fail.
4864 }
4865
4866
4867 // TODO(1236194): Since the heap size is configurable on the command line
4868 // and through the API, we should gracefully handle the case that the heap
4869 // size is not big enough to fit all the initial objects.
ConfigureHeap(int max_semi_space_size,int max_old_space_size,int max_executable_size,size_t code_range_size)4870 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4871 int max_executable_size, size_t code_range_size) {
4872 if (HasBeenSetUp()) return false;
4873
4874 // Overwrite default configuration.
4875 if (max_semi_space_size > 0) {
4876 max_semi_space_size_ = max_semi_space_size * MB;
4877 }
4878 if (max_old_space_size > 0) {
4879 max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
4880 }
4881 if (max_executable_size > 0) {
4882 max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
4883 }
4884
4885 // If max space size flags are specified overwrite the configuration.
4886 if (FLAG_max_semi_space_size > 0) {
4887 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4888 }
4889 if (FLAG_max_old_space_size > 0) {
4890 max_old_generation_size_ =
4891 static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
4892 }
4893 if (FLAG_max_executable_size > 0) {
4894 max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
4895 }
4896
4897 if (Page::kPageSize > MB) {
4898 max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
4899 max_old_generation_size_ =
4900 ROUND_UP(max_old_generation_size_, Page::kPageSize);
4901 max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
4902 }
4903
4904 if (FLAG_stress_compaction) {
4905 // This will cause more frequent GCs when stressing.
4906 max_semi_space_size_ = Page::kPageSize;
4907 }
4908
4909 // The new space size must be a power of two to support single-bit testing
4910 // for containment.
4911 max_semi_space_size_ =
4912 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
4913
4914 if (FLAG_min_semi_space_size > 0) {
4915 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4916 if (initial_semispace_size > max_semi_space_size_) {
4917 initial_semispace_size_ = max_semi_space_size_;
4918 if (FLAG_trace_gc) {
4919 PrintIsolate(isolate_,
4920 "Min semi-space size cannot be more than the maximum "
4921 "semi-space size of %d MB\n",
4922 max_semi_space_size_ / MB);
4923 }
4924 } else {
4925 initial_semispace_size_ =
4926 ROUND_UP(initial_semispace_size, Page::kPageSize);
4927 }
4928 }
4929
4930 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4931
4932 if (FLAG_semi_space_growth_factor < 2) {
4933 FLAG_semi_space_growth_factor = 2;
4934 }
4935
4936 // The old generation is paged and needs at least one page for each space.
4937 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4938 max_old_generation_size_ =
4939 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4940 max_old_generation_size_);
4941
4942 // The max executable size must be less than or equal to the max old
4943 // generation size.
4944 if (max_executable_size_ > max_old_generation_size_) {
4945 max_executable_size_ = max_old_generation_size_;
4946 }
4947
4948 if (FLAG_initial_old_space_size > 0) {
4949 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
4950 } else {
4951 initial_old_generation_size_ =
4952 max_old_generation_size_ / kInitalOldGenerationLimitFactor;
4953 }
4954 old_generation_allocation_limit_ = initial_old_generation_size_;
4955
4956 // We rely on being able to allocate new arrays in paged spaces.
4957 DCHECK(Page::kMaxRegularHeapObjectSize >=
4958 (JSArray::kSize +
4959 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
4960 AllocationMemento::kSize));
4961
4962 code_range_size_ = code_range_size * MB;
4963
4964 configured_ = true;
4965 return true;
4966 }
4967
4968
AddToRingBuffer(const char * string)4969 void Heap::AddToRingBuffer(const char* string) {
4970 size_t first_part =
4971 Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4972 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4973 ring_buffer_end_ += first_part;
4974 if (first_part < strlen(string)) {
4975 ring_buffer_full_ = true;
4976 size_t second_part = strlen(string) - first_part;
4977 memcpy(trace_ring_buffer_, string + first_part, second_part);
4978 ring_buffer_end_ = second_part;
4979 }
4980 }
4981
4982
GetFromRingBuffer(char * buffer)4983 void Heap::GetFromRingBuffer(char* buffer) {
4984 size_t copied = 0;
4985 if (ring_buffer_full_) {
4986 copied = kTraceRingBufferSize - ring_buffer_end_;
4987 memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
4988 }
4989 memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
4990 }
4991
4992
ConfigureHeapDefault()4993 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4994
4995
RecordStats(HeapStats * stats,bool take_snapshot)4996 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4997 *stats->start_marker = HeapStats::kStartMarker;
4998 *stats->end_marker = HeapStats::kEndMarker;
4999 *stats->new_space_size = new_space_.SizeAsInt();
5000 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5001 *stats->old_space_size = old_space_->SizeOfObjects();
5002 *stats->old_space_capacity = old_space_->Capacity();
5003 *stats->code_space_size = code_space_->SizeOfObjects();
5004 *stats->code_space_capacity = code_space_->Capacity();
5005 *stats->map_space_size = map_space_->SizeOfObjects();
5006 *stats->map_space_capacity = map_space_->Capacity();
5007 *stats->lo_space_size = lo_space_->Size();
5008 isolate_->global_handles()->RecordStats(stats);
5009 *stats->memory_allocator_size = memory_allocator()->Size();
5010 *stats->memory_allocator_capacity =
5011 memory_allocator()->Size() + memory_allocator()->Available();
5012 *stats->os_error = base::OS::GetLastError();
5013 memory_allocator()->Available();
5014 if (take_snapshot) {
5015 HeapIterator iterator(this);
5016 for (HeapObject* obj = iterator.next(); obj != NULL;
5017 obj = iterator.next()) {
5018 InstanceType type = obj->map()->instance_type();
5019 DCHECK(0 <= type && type <= LAST_TYPE);
5020 stats->objects_per_type[type]++;
5021 stats->size_per_type[type] += obj->Size();
5022 }
5023 }
5024 if (stats->last_few_messages != NULL)
5025 GetFromRingBuffer(stats->last_few_messages);
5026 if (stats->js_stacktrace != NULL) {
5027 FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
5028 StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
5029 if (gc_state() == Heap::NOT_IN_GC) {
5030 isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
5031 } else {
5032 accumulator.Add("Cannot get stack trace in GC.");
5033 }
5034 }
5035 }
5036
5037
PromotedSpaceSizeOfObjects()5038 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5039 return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5040 map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5041 }
5042
5043
PromotedExternalMemorySize()5044 int64_t Heap::PromotedExternalMemorySize() {
5045 if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
5046 return external_memory_ - external_memory_at_last_mark_compact_;
5047 }
5048
5049
5050 const double Heap::kMinHeapGrowingFactor = 1.1;
5051 const double Heap::kMaxHeapGrowingFactor = 4.0;
5052 const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
5053 const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
5054 const double Heap::kTargetMutatorUtilization = 0.97;
5055
5056
5057 // Given GC speed in bytes per ms, the allocation throughput in bytes per ms
5058 // (mutator speed), this function returns the heap growing factor that will
5059 // achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
5060 // remain the same until the next GC.
5061 //
5062 // For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
5063 // TM / (TM + TG), where TM is the time spent in the mutator and TG is the
5064 // time spent in the garbage collector.
5065 //
5066 // Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
5067 // time-frame from the end of the current GC to the end of the next GC. Based
5068 // on the MU we can compute the heap growing factor F as
5069 //
5070 // F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
5071 //
5072 // This formula can be derived as follows.
5073 //
5074 // F = Limit / Live by definition, where the Limit is the allocation limit,
5075 // and the Live is size of live objects.
5076 // Let’s assume that we already know the Limit. Then:
5077 // TG = Limit / gc_speed
5078 // TM = (TM + TG) * MU, by definition of MU.
5079 // TM = TG * MU / (1 - MU)
5080 // TM = Limit * MU / (gc_speed * (1 - MU))
5081 // On the other hand, if the allocation throughput remains constant:
5082 // Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
5083 // Solving it for TM, we get
5084 // TM = (Limit - Live) / mutator_speed
5085 // Combining the two equation for TM:
5086 // (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
5087 // (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
5088 // substitute R = gc_speed / mutator_speed
5089 // (Limit - Live) = Limit * MU / (R * (1 - MU))
5090 // substitute F = Limit / Live
5091 // F - 1 = F * MU / (R * (1 - MU))
5092 // F - F * MU / (R * (1 - MU)) = 1
5093 // F * (1 - MU / (R * (1 - MU))) = 1
5094 // F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
5095 // F = R * (1 - MU) / (R * (1 - MU) - MU)
HeapGrowingFactor(double gc_speed,double mutator_speed)5096 double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
5097 if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
5098
5099 const double speed_ratio = gc_speed / mutator_speed;
5100 const double mu = kTargetMutatorUtilization;
5101
5102 const double a = speed_ratio * (1 - mu);
5103 const double b = speed_ratio * (1 - mu) - mu;
5104
5105 // The factor is a / b, but we need to check for small b first.
5106 double factor =
5107 (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
5108 factor = Min(factor, kMaxHeapGrowingFactor);
5109 factor = Max(factor, kMinHeapGrowingFactor);
5110 return factor;
5111 }
5112
5113
CalculateOldGenerationAllocationLimit(double factor,intptr_t old_gen_size)5114 intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5115 intptr_t old_gen_size) {
5116 CHECK(factor > 1.0);
5117 CHECK(old_gen_size > 0);
5118 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5119 limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit);
5120 limit += new_space_.Capacity();
5121 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5122 return Min(limit, halfway_to_the_max);
5123 }
5124
5125
SetOldGenerationAllocationLimit(intptr_t old_gen_size,double gc_speed,double mutator_speed)5126 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
5127 double gc_speed,
5128 double mutator_speed) {
5129 const double kConservativeHeapGrowingFactor = 1.3;
5130
5131 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5132
5133 if (FLAG_trace_gc_verbose) {
5134 PrintIsolate(isolate_,
5135 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
5136 "(gc=%.f, mutator=%.f)\n",
5137 factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
5138 gc_speed, mutator_speed);
5139 }
5140
5141 // We set the old generation growing factor to 2 to grow the heap slower on
5142 // memory-constrained devices.
5143 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
5144 FLAG_optimize_for_size) {
5145 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
5146 }
5147
5148 if (memory_reducer_->ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
5149 factor = Min(factor, kConservativeHeapGrowingFactor);
5150 }
5151
5152 if (FLAG_stress_compaction || ShouldReduceMemory()) {
5153 factor = kMinHeapGrowingFactor;
5154 }
5155
5156 if (FLAG_heap_growing_percent > 0) {
5157 factor = 1.0 + FLAG_heap_growing_percent / 100.0;
5158 }
5159
5160 old_generation_allocation_limit_ =
5161 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5162
5163 if (FLAG_trace_gc_verbose) {
5164 PrintIsolate(isolate_, "Grow: old size: %" V8PRIdPTR
5165 " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
5166 old_gen_size / KB, old_generation_allocation_limit_ / KB,
5167 factor);
5168 }
5169 }
5170
5171
DampenOldGenerationAllocationLimit(intptr_t old_gen_size,double gc_speed,double mutator_speed)5172 void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
5173 double gc_speed,
5174 double mutator_speed) {
5175 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5176 intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5177 if (limit < old_generation_allocation_limit_) {
5178 if (FLAG_trace_gc_verbose) {
5179 PrintIsolate(isolate_,
5180 "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
5181 " KB, "
5182 "new limit: %" V8PRIdPTR " KB (%.1f)\n",
5183 old_gen_size / KB, old_generation_allocation_limit_ / KB,
5184 limit / KB, factor);
5185 }
5186 old_generation_allocation_limit_ = limit;
5187 }
5188 }
5189
5190
EnableInlineAllocation()5191 void Heap::EnableInlineAllocation() {
5192 if (!inline_allocation_disabled_) return;
5193 inline_allocation_disabled_ = false;
5194
5195 // Update inline allocation limit for new space.
5196 new_space()->UpdateInlineAllocationLimit(0);
5197 }
5198
5199
DisableInlineAllocation()5200 void Heap::DisableInlineAllocation() {
5201 if (inline_allocation_disabled_) return;
5202 inline_allocation_disabled_ = true;
5203
5204 // Update inline allocation limit for new space.
5205 new_space()->UpdateInlineAllocationLimit(0);
5206
5207 // Update inline allocation limit for old spaces.
5208 PagedSpaces spaces(this);
5209 for (PagedSpace* space = spaces.next(); space != NULL;
5210 space = spaces.next()) {
5211 space->EmptyAllocationInfo();
5212 }
5213 }
5214
5215
5216 V8_DECLARE_ONCE(initialize_gc_once);
5217
InitializeGCOnce()5218 static void InitializeGCOnce() {
5219 Scavenger::Initialize();
5220 StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
5221 StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
5222 MarkCompactCollector::Initialize();
5223 }
5224
5225
SetUp()5226 bool Heap::SetUp() {
5227 #ifdef DEBUG
5228 allocation_timeout_ = FLAG_gc_interval;
5229 #endif
5230
5231 // Initialize heap spaces and initial maps and objects. Whenever something
5232 // goes wrong, just return false. The caller should check the results and
5233 // call Heap::TearDown() to release allocated memory.
5234 //
5235 // If the heap is not yet configured (e.g. through the API), configure it.
5236 // Configuration is based on the flags new-space-size (really the semispace
5237 // size) and old-space-size if set or the initial values of semispace_size_
5238 // and old_generation_size_ otherwise.
5239 if (!configured_) {
5240 if (!ConfigureHeapDefault()) return false;
5241 }
5242
5243 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5244
5245 // Set up memory allocator.
5246 memory_allocator_ = new MemoryAllocator(isolate_);
5247 if (!memory_allocator_->SetUp(MaxReserved(), MaxExecutableSize(),
5248 code_range_size_))
5249 return false;
5250
5251 // Initialize incremental marking.
5252 incremental_marking_ = new IncrementalMarking(this);
5253
5254 // Set up new space.
5255 if (!new_space_.SetUp(initial_semispace_size_, max_semi_space_size_)) {
5256 return false;
5257 }
5258 new_space_top_after_last_gc_ = new_space()->top();
5259
5260 // Initialize old space.
5261 old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
5262 if (old_space_ == NULL) return false;
5263 if (!old_space_->SetUp()) return false;
5264
5265 // Initialize the code space, set its maximum capacity to the old
5266 // generation size. It needs executable memory.
5267 code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
5268 if (code_space_ == NULL) return false;
5269 if (!code_space_->SetUp()) return false;
5270
5271 // Initialize map space.
5272 map_space_ = new MapSpace(this, MAP_SPACE);
5273 if (map_space_ == NULL) return false;
5274 if (!map_space_->SetUp()) return false;
5275
5276 // The large object code space may contain code or data. We set the memory
5277 // to be non-executable here for safety, but this means we need to enable it
5278 // explicitly when allocating large code objects.
5279 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
5280 if (lo_space_ == NULL) return false;
5281 if (!lo_space_->SetUp()) return false;
5282
5283 // Set up the seed that is used to randomize the string hash function.
5284 DCHECK(hash_seed() == 0);
5285 if (FLAG_randomize_hashes) {
5286 if (FLAG_hash_seed == 0) {
5287 int rnd = isolate()->random_number_generator()->NextInt();
5288 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5289 } else {
5290 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5291 }
5292 }
5293
5294 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5295 i++) {
5296 deferred_counters_[i] = 0;
5297 }
5298
5299 tracer_ = new GCTracer(this);
5300
5301 scavenge_collector_ = new Scavenger(this);
5302
5303 mark_compact_collector_ = new MarkCompactCollector(this);
5304
5305 gc_idle_time_handler_ = new GCIdleTimeHandler();
5306
5307 memory_reducer_ = new MemoryReducer(this);
5308
5309 object_stats_ = new ObjectStats(this);
5310 object_stats_->ClearObjectStats(true);
5311
5312 scavenge_job_ = new ScavengeJob();
5313
5314 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5315 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5316
5317 store_buffer()->SetUp();
5318
5319 mark_compact_collector()->SetUp();
5320
5321 idle_scavenge_observer_ = new IdleScavengeObserver(
5322 *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
5323 new_space()->AddAllocationObserver(idle_scavenge_observer_);
5324
5325 return true;
5326 }
5327
5328
CreateHeapObjects()5329 bool Heap::CreateHeapObjects() {
5330 // Create initial maps.
5331 if (!CreateInitialMaps()) return false;
5332 CreateApiObjects();
5333
5334 // Create initial objects
5335 CreateInitialObjects();
5336 CHECK_EQ(0u, gc_count_);
5337
5338 set_native_contexts_list(undefined_value());
5339 set_allocation_sites_list(undefined_value());
5340
5341 return true;
5342 }
5343
5344
SetStackLimits()5345 void Heap::SetStackLimits() {
5346 DCHECK(isolate_ != NULL);
5347 DCHECK(isolate_ == isolate());
5348 // On 64 bit machines, pointers are generally out of range of Smis. We write
5349 // something that looks like an out of range Smi to the GC.
5350
5351 // Set up the special root array entries containing the stack limits.
5352 // These are actually addresses, but the tag makes the GC ignore it.
5353 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5354 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5355 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5356 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5357 }
5358
ClearStackLimits()5359 void Heap::ClearStackLimits() {
5360 roots_[kStackLimitRootIndex] = Smi::FromInt(0);
5361 roots_[kRealStackLimitRootIndex] = Smi::FromInt(0);
5362 }
5363
PrintAlloctionsHash()5364 void Heap::PrintAlloctionsHash() {
5365 uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5366 PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5367 }
5368
5369
NotifyDeserializationComplete()5370 void Heap::NotifyDeserializationComplete() {
5371 deserialization_complete_ = true;
5372 #ifdef DEBUG
5373 // All pages right after bootstrapping must be marked as never-evacuate.
5374 PagedSpaces spaces(this);
5375 for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
5376 for (Page* p : *s) {
5377 CHECK(p->NeverEvacuate());
5378 }
5379 }
5380 #endif // DEBUG
5381 }
5382
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)5383 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5384 mark_compact_collector()->SetEmbedderHeapTracer(tracer);
5385 }
5386
UsingEmbedderHeapTracer()5387 bool Heap::UsingEmbedderHeapTracer() {
5388 return mark_compact_collector()->UsingEmbedderHeapTracer();
5389 }
5390
TracePossibleWrapper(JSObject * js_object)5391 void Heap::TracePossibleWrapper(JSObject* js_object) {
5392 mark_compact_collector()->TracePossibleWrapper(js_object);
5393 }
5394
RegisterExternallyReferencedObject(Object ** object)5395 void Heap::RegisterExternallyReferencedObject(Object** object) {
5396 mark_compact_collector()->RegisterExternallyReferencedObject(object);
5397 }
5398
TearDown()5399 void Heap::TearDown() {
5400 #ifdef VERIFY_HEAP
5401 if (FLAG_verify_heap) {
5402 Verify();
5403 }
5404 #endif
5405
5406 UpdateMaximumCommitted();
5407
5408 if (FLAG_print_cumulative_gc_stat) {
5409 PrintF("\n");
5410 PrintF("gc_count=%d ", gc_count_);
5411 PrintF("mark_sweep_count=%d ", ms_count_);
5412 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5413 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5414 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5415 PrintF("max_alive_after_gc=%" V8PRIdPTR " ", get_max_alive_after_gc());
5416 PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
5417 PrintF("total_sweeping_time=%.1f ",
5418 tracer()->cumulative_sweeping_duration());
5419 PrintF("\n\n");
5420 }
5421
5422 if (FLAG_print_max_heap_committed) {
5423 PrintF("\n");
5424 PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ",
5425 MaximumCommittedMemory());
5426 PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ",
5427 new_space_.MaximumCommittedMemory());
5428 PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ",
5429 old_space_->MaximumCommittedMemory());
5430 PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ",
5431 code_space_->MaximumCommittedMemory());
5432 PrintF("maximum_committed_by_map_space=%" V8PRIdPTR " ",
5433 map_space_->MaximumCommittedMemory());
5434 PrintF("maximum_committed_by_lo_space=%" V8PRIdPTR " ",
5435 lo_space_->MaximumCommittedMemory());
5436 PrintF("\n\n");
5437 }
5438
5439 if (FLAG_verify_predictable) {
5440 PrintAlloctionsHash();
5441 }
5442
5443 new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
5444 delete idle_scavenge_observer_;
5445 idle_scavenge_observer_ = nullptr;
5446
5447 delete scavenge_collector_;
5448 scavenge_collector_ = nullptr;
5449
5450 if (mark_compact_collector_ != nullptr) {
5451 mark_compact_collector_->TearDown();
5452 delete mark_compact_collector_;
5453 mark_compact_collector_ = nullptr;
5454 }
5455
5456 delete incremental_marking_;
5457 incremental_marking_ = nullptr;
5458
5459 delete gc_idle_time_handler_;
5460 gc_idle_time_handler_ = nullptr;
5461
5462 if (memory_reducer_ != nullptr) {
5463 memory_reducer_->TearDown();
5464 delete memory_reducer_;
5465 memory_reducer_ = nullptr;
5466 }
5467
5468 delete object_stats_;
5469 object_stats_ = nullptr;
5470
5471 delete scavenge_job_;
5472 scavenge_job_ = nullptr;
5473
5474 isolate_->global_handles()->TearDown();
5475
5476 external_string_table_.TearDown();
5477
5478 delete tracer_;
5479 tracer_ = nullptr;
5480
5481 new_space_.TearDown();
5482
5483 if (old_space_ != NULL) {
5484 delete old_space_;
5485 old_space_ = NULL;
5486 }
5487
5488 if (code_space_ != NULL) {
5489 delete code_space_;
5490 code_space_ = NULL;
5491 }
5492
5493 if (map_space_ != NULL) {
5494 delete map_space_;
5495 map_space_ = NULL;
5496 }
5497
5498 if (lo_space_ != NULL) {
5499 lo_space_->TearDown();
5500 delete lo_space_;
5501 lo_space_ = NULL;
5502 }
5503
5504 store_buffer()->TearDown();
5505
5506 memory_allocator()->TearDown();
5507
5508 StrongRootsList* next = NULL;
5509 for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5510 next = list->next;
5511 delete list;
5512 }
5513 strong_roots_list_ = NULL;
5514
5515 delete memory_allocator_;
5516 memory_allocator_ = nullptr;
5517 }
5518
5519
AddGCPrologueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5520 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallback callback,
5521 GCType gc_type, bool pass_isolate) {
5522 DCHECK(callback != NULL);
5523 GCCallbackPair pair(callback, gc_type, pass_isolate);
5524 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5525 return gc_prologue_callbacks_.Add(pair);
5526 }
5527
5528
RemoveGCPrologueCallback(v8::Isolate::GCCallback callback)5529 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallback callback) {
5530 DCHECK(callback != NULL);
5531 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5532 if (gc_prologue_callbacks_[i].callback == callback) {
5533 gc_prologue_callbacks_.Remove(i);
5534 return;
5535 }
5536 }
5537 UNREACHABLE();
5538 }
5539
5540
AddGCEpilogueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5541 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
5542 GCType gc_type, bool pass_isolate) {
5543 DCHECK(callback != NULL);
5544 GCCallbackPair pair(callback, gc_type, pass_isolate);
5545 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5546 return gc_epilogue_callbacks_.Add(pair);
5547 }
5548
5549
RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback)5550 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
5551 DCHECK(callback != NULL);
5552 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5553 if (gc_epilogue_callbacks_[i].callback == callback) {
5554 gc_epilogue_callbacks_.Remove(i);
5555 return;
5556 }
5557 }
5558 UNREACHABLE();
5559 }
5560
5561 // TODO(ishell): Find a better place for this.
AddWeakObjectToCodeDependency(Handle<HeapObject> obj,Handle<DependentCode> dep)5562 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5563 Handle<DependentCode> dep) {
5564 DCHECK(!InNewSpace(*obj));
5565 DCHECK(!InNewSpace(*dep));
5566 Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5567 table = WeakHashTable::Put(table, obj, dep);
5568 if (*table != weak_object_to_code_table())
5569 set_weak_object_to_code_table(*table);
5570 DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5571 }
5572
5573
LookupWeakObjectToCodeDependency(Handle<HeapObject> obj)5574 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5575 Object* dep = weak_object_to_code_table()->Lookup(obj);
5576 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5577 return DependentCode::cast(empty_fixed_array());
5578 }
5579
5580 namespace {
CompactWeakFixedArray(Object * object)5581 void CompactWeakFixedArray(Object* object) {
5582 if (object->IsWeakFixedArray()) {
5583 WeakFixedArray* array = WeakFixedArray::cast(object);
5584 array->Compact<WeakFixedArray::NullCallback>();
5585 }
5586 }
5587 } // anonymous namespace
5588
CompactWeakFixedArrays()5589 void Heap::CompactWeakFixedArrays() {
5590 // Find known WeakFixedArrays and compact them.
5591 HeapIterator iterator(this);
5592 for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
5593 if (o->IsPrototypeInfo()) {
5594 Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
5595 if (prototype_users->IsWeakFixedArray()) {
5596 WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
5597 array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
5598 }
5599 } else if (o->IsScript()) {
5600 CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
5601 }
5602 }
5603 CompactWeakFixedArray(noscript_shared_function_infos());
5604 CompactWeakFixedArray(script_list());
5605 CompactWeakFixedArray(weak_stack_trace_list());
5606 }
5607
AddRetainedMap(Handle<Map> map)5608 void Heap::AddRetainedMap(Handle<Map> map) {
5609 Handle<WeakCell> cell = Map::WeakCellForMap(map);
5610 Handle<ArrayList> array(retained_maps(), isolate());
5611 if (array->IsFull()) {
5612 CompactRetainedMaps(*array);
5613 }
5614 array = ArrayList::Add(
5615 array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
5616 ArrayList::kReloadLengthAfterAllocation);
5617 if (*array != retained_maps()) {
5618 set_retained_maps(*array);
5619 }
5620 }
5621
5622
CompactRetainedMaps(ArrayList * retained_maps)5623 void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
5624 DCHECK_EQ(retained_maps, this->retained_maps());
5625 int length = retained_maps->Length();
5626 int new_length = 0;
5627 int new_number_of_disposed_maps = 0;
5628 // This loop compacts the array by removing cleared weak cells.
5629 for (int i = 0; i < length; i += 2) {
5630 DCHECK(retained_maps->Get(i)->IsWeakCell());
5631 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
5632 Object* age = retained_maps->Get(i + 1);
5633 if (cell->cleared()) continue;
5634 if (i != new_length) {
5635 retained_maps->Set(new_length, cell);
5636 retained_maps->Set(new_length + 1, age);
5637 }
5638 if (i < number_of_disposed_maps_) {
5639 new_number_of_disposed_maps += 2;
5640 }
5641 new_length += 2;
5642 }
5643 number_of_disposed_maps_ = new_number_of_disposed_maps;
5644 Object* undefined = undefined_value();
5645 for (int i = new_length; i < length; i++) {
5646 retained_maps->Clear(i, undefined);
5647 }
5648 if (new_length != length) retained_maps->SetLength(new_length);
5649 }
5650
FatalProcessOutOfMemory(const char * location,bool is_heap_oom)5651 void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
5652 v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
5653 }
5654
5655 #ifdef DEBUG
5656
5657 class PrintHandleVisitor : public ObjectVisitor {
5658 public:
VisitPointers(Object ** start,Object ** end)5659 void VisitPointers(Object** start, Object** end) override {
5660 for (Object** p = start; p < end; p++)
5661 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5662 reinterpret_cast<void*>(*p));
5663 }
5664 };
5665
5666
PrintHandles()5667 void Heap::PrintHandles() {
5668 PrintF("Handles:\n");
5669 PrintHandleVisitor v;
5670 isolate_->handle_scope_implementer()->Iterate(&v);
5671 }
5672
5673 #endif
5674
5675 class CheckHandleCountVisitor : public ObjectVisitor {
5676 public:
CheckHandleCountVisitor()5677 CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()5678 ~CheckHandleCountVisitor() override {
5679 CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
5680 }
VisitPointers(Object ** start,Object ** end)5681 void VisitPointers(Object** start, Object** end) override {
5682 handle_count_ += end - start;
5683 }
5684
5685 private:
5686 ptrdiff_t handle_count_;
5687 };
5688
5689
CheckHandleCount()5690 void Heap::CheckHandleCount() {
5691 CheckHandleCountVisitor v;
5692 isolate_->handle_scope_implementer()->Iterate(&v);
5693 }
5694
ClearRecordedSlot(HeapObject * object,Object ** slot)5695 void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
5696 if (!InNewSpace(object)) {
5697 store_buffer()->MoveEntriesToRememberedSet();
5698 Address slot_addr = reinterpret_cast<Address>(slot);
5699 Page* page = Page::FromAddress(slot_addr);
5700 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5701 RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
5702 RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
5703 }
5704 }
5705
ClearRecordedSlotRange(Address start,Address end)5706 void Heap::ClearRecordedSlotRange(Address start, Address end) {
5707 Page* page = Page::FromAddress(start);
5708 if (!page->InNewSpace()) {
5709 store_buffer()->MoveEntriesToRememberedSet();
5710 DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5711 RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
5712 RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
5713 }
5714 }
5715
next()5716 Space* AllSpaces::next() {
5717 switch (counter_++) {
5718 case NEW_SPACE:
5719 return heap_->new_space();
5720 case OLD_SPACE:
5721 return heap_->old_space();
5722 case CODE_SPACE:
5723 return heap_->code_space();
5724 case MAP_SPACE:
5725 return heap_->map_space();
5726 case LO_SPACE:
5727 return heap_->lo_space();
5728 default:
5729 return NULL;
5730 }
5731 }
5732
5733
next()5734 PagedSpace* PagedSpaces::next() {
5735 switch (counter_++) {
5736 case OLD_SPACE:
5737 return heap_->old_space();
5738 case CODE_SPACE:
5739 return heap_->code_space();
5740 case MAP_SPACE:
5741 return heap_->map_space();
5742 default:
5743 return NULL;
5744 }
5745 }
5746
5747
next()5748 OldSpace* OldSpaces::next() {
5749 switch (counter_++) {
5750 case OLD_SPACE:
5751 return heap_->old_space();
5752 case CODE_SPACE:
5753 return heap_->code_space();
5754 default:
5755 return NULL;
5756 }
5757 }
5758
5759
SpaceIterator(Heap * heap)5760 SpaceIterator::SpaceIterator(Heap* heap)
5761 : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {}
5762
5763
~SpaceIterator()5764 SpaceIterator::~SpaceIterator() {
5765 // Delete active iterator if any.
5766 delete iterator_;
5767 }
5768
5769
has_next()5770 bool SpaceIterator::has_next() {
5771 // Iterate until no more spaces.
5772 return current_space_ != LAST_SPACE;
5773 }
5774
5775
next()5776 ObjectIterator* SpaceIterator::next() {
5777 if (iterator_ != NULL) {
5778 delete iterator_;
5779 iterator_ = NULL;
5780 // Move to the next space
5781 current_space_++;
5782 if (current_space_ > LAST_SPACE) {
5783 return NULL;
5784 }
5785 }
5786
5787 // Return iterator for the new current space.
5788 return CreateIterator();
5789 }
5790
5791
5792 // Create an iterator for the space to iterate.
CreateIterator()5793 ObjectIterator* SpaceIterator::CreateIterator() {
5794 DCHECK(iterator_ == NULL);
5795
5796 switch (current_space_) {
5797 case NEW_SPACE:
5798 iterator_ = new SemiSpaceIterator(heap_->new_space());
5799 break;
5800 case OLD_SPACE:
5801 iterator_ = new HeapObjectIterator(heap_->old_space());
5802 break;
5803 case CODE_SPACE:
5804 iterator_ = new HeapObjectIterator(heap_->code_space());
5805 break;
5806 case MAP_SPACE:
5807 iterator_ = new HeapObjectIterator(heap_->map_space());
5808 break;
5809 case LO_SPACE:
5810 iterator_ = new LargeObjectIterator(heap_->lo_space());
5811 break;
5812 }
5813
5814 // Return the newly allocated iterator;
5815 DCHECK(iterator_ != NULL);
5816 return iterator_;
5817 }
5818
5819
5820 class HeapObjectsFilter {
5821 public:
~HeapObjectsFilter()5822 virtual ~HeapObjectsFilter() {}
5823 virtual bool SkipObject(HeapObject* object) = 0;
5824 };
5825
5826
5827 class UnreachableObjectsFilter : public HeapObjectsFilter {
5828 public:
UnreachableObjectsFilter(Heap * heap)5829 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5830 MarkReachableObjects();
5831 }
5832
~UnreachableObjectsFilter()5833 ~UnreachableObjectsFilter() {
5834 heap_->mark_compact_collector()->ClearMarkbits();
5835 }
5836
SkipObject(HeapObject * object)5837 bool SkipObject(HeapObject* object) {
5838 if (object->IsFiller()) return true;
5839 MarkBit mark_bit = Marking::MarkBitFrom(object);
5840 return Marking::IsWhite(mark_bit);
5841 }
5842
5843 private:
5844 class MarkingVisitor : public ObjectVisitor {
5845 public:
MarkingVisitor()5846 MarkingVisitor() : marking_stack_(10) {}
5847
VisitPointers(Object ** start,Object ** end)5848 void VisitPointers(Object** start, Object** end) override {
5849 for (Object** p = start; p < end; p++) {
5850 if (!(*p)->IsHeapObject()) continue;
5851 HeapObject* obj = HeapObject::cast(*p);
5852 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5853 if (Marking::IsWhite(mark_bit)) {
5854 Marking::WhiteToBlack(mark_bit);
5855 marking_stack_.Add(obj);
5856 }
5857 }
5858 }
5859
TransitiveClosure()5860 void TransitiveClosure() {
5861 while (!marking_stack_.is_empty()) {
5862 HeapObject* obj = marking_stack_.RemoveLast();
5863 obj->Iterate(this);
5864 }
5865 }
5866
5867 private:
5868 List<HeapObject*> marking_stack_;
5869 };
5870
MarkReachableObjects()5871 void MarkReachableObjects() {
5872 MarkingVisitor visitor;
5873 heap_->IterateRoots(&visitor, VISIT_ALL);
5874 visitor.TransitiveClosure();
5875 }
5876
5877 Heap* heap_;
5878 DisallowHeapAllocation no_allocation_;
5879 };
5880
5881
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)5882 HeapIterator::HeapIterator(Heap* heap,
5883 HeapIterator::HeapObjectsFiltering filtering)
5884 : make_heap_iterable_helper_(heap),
5885 no_heap_allocation_(),
5886 heap_(heap),
5887 filtering_(filtering),
5888 filter_(nullptr),
5889 space_iterator_(nullptr),
5890 object_iterator_(nullptr) {
5891 heap_->heap_iterator_start();
5892 // Start the iteration.
5893 space_iterator_ = new SpaceIterator(heap_);
5894 switch (filtering_) {
5895 case kFilterUnreachable:
5896 filter_ = new UnreachableObjectsFilter(heap_);
5897 break;
5898 default:
5899 break;
5900 }
5901 object_iterator_ = space_iterator_->next();
5902 }
5903
5904
~HeapIterator()5905 HeapIterator::~HeapIterator() {
5906 heap_->heap_iterator_end();
5907 #ifdef DEBUG
5908 // Assert that in filtering mode we have iterated through all
5909 // objects. Otherwise, heap will be left in an inconsistent state.
5910 if (filtering_ != kNoFiltering) {
5911 DCHECK(object_iterator_ == nullptr);
5912 }
5913 #endif
5914 // Make sure the last iterator is deallocated.
5915 delete object_iterator_;
5916 delete space_iterator_;
5917 delete filter_;
5918 }
5919
5920
next()5921 HeapObject* HeapIterator::next() {
5922 if (filter_ == nullptr) return NextObject();
5923
5924 HeapObject* obj = NextObject();
5925 while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
5926 return obj;
5927 }
5928
5929
NextObject()5930 HeapObject* HeapIterator::NextObject() {
5931 // No iterator means we are done.
5932 if (object_iterator_ == nullptr) return nullptr;
5933
5934 if (HeapObject* obj = object_iterator_->Next()) {
5935 // If the current iterator has more objects we are fine.
5936 return obj;
5937 } else {
5938 // Go though the spaces looking for one that has objects.
5939 while (space_iterator_->has_next()) {
5940 object_iterator_ = space_iterator_->next();
5941 if (HeapObject* obj = object_iterator_->Next()) {
5942 return obj;
5943 }
5944 }
5945 }
5946 // Done with the last space.
5947 object_iterator_ = nullptr;
5948 return nullptr;
5949 }
5950
5951
5952 #ifdef DEBUG
5953
5954 Object* const PathTracer::kAnyGlobalObject = NULL;
5955
5956 class PathTracer::MarkVisitor : public ObjectVisitor {
5957 public:
MarkVisitor(PathTracer * tracer)5958 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5959
VisitPointers(Object ** start,Object ** end)5960 void VisitPointers(Object** start, Object** end) override {
5961 // Scan all HeapObject pointers in [start, end)
5962 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5963 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5964 }
5965 }
5966
5967 private:
5968 PathTracer* tracer_;
5969 };
5970
5971
5972 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5973 public:
UnmarkVisitor(PathTracer * tracer)5974 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5975
VisitPointers(Object ** start,Object ** end)5976 void VisitPointers(Object** start, Object** end) override {
5977 // Scan all HeapObject pointers in [start, end)
5978 for (Object** p = start; p < end; p++) {
5979 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5980 }
5981 }
5982
5983 private:
5984 PathTracer* tracer_;
5985 };
5986
5987
VisitPointers(Object ** start,Object ** end)5988 void PathTracer::VisitPointers(Object** start, Object** end) {
5989 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5990 // Visit all HeapObject pointers in [start, end)
5991 for (Object** p = start; !done && (p < end); p++) {
5992 if ((*p)->IsHeapObject()) {
5993 TracePathFrom(p);
5994 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5995 }
5996 }
5997 }
5998
5999
Reset()6000 void PathTracer::Reset() {
6001 found_target_ = false;
6002 object_stack_.Clear();
6003 }
6004
6005
TracePathFrom(Object ** root)6006 void PathTracer::TracePathFrom(Object** root) {
6007 DCHECK((search_target_ == kAnyGlobalObject) ||
6008 search_target_->IsHeapObject());
6009 found_target_in_trace_ = false;
6010 Reset();
6011
6012 MarkVisitor mark_visitor(this);
6013 MarkRecursively(root, &mark_visitor);
6014
6015 UnmarkVisitor unmark_visitor(this);
6016 UnmarkRecursively(root, &unmark_visitor);
6017
6018 ProcessResults();
6019 }
6020
6021
SafeIsNativeContext(HeapObject * obj)6022 static bool SafeIsNativeContext(HeapObject* obj) {
6023 return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
6024 }
6025
6026
MarkRecursively(Object ** p,MarkVisitor * mark_visitor)6027 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6028 if (!(*p)->IsHeapObject()) return;
6029
6030 HeapObject* obj = HeapObject::cast(*p);
6031
6032 MapWord map_word = obj->map_word();
6033 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
6034
6035 if (found_target_in_trace_) return; // stop if target found
6036 object_stack_.Add(obj);
6037 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6038 (obj == search_target_)) {
6039 found_target_in_trace_ = true;
6040 found_target_ = true;
6041 return;
6042 }
6043
6044 bool is_native_context = SafeIsNativeContext(obj);
6045
6046 // not visited yet
6047 Map* map = Map::cast(map_word.ToMap());
6048
6049 MapWord marked_map_word =
6050 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
6051 obj->set_map_word(marked_map_word);
6052
6053 // Scan the object body.
6054 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6055 // This is specialized to scan Context's properly.
6056 Object** start =
6057 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
6058 Object** end =
6059 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
6060 Context::FIRST_WEAK_SLOT * kPointerSize);
6061 mark_visitor->VisitPointers(start, end);
6062 } else {
6063 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
6064 }
6065
6066 // Scan the map after the body because the body is a lot more interesting
6067 // when doing leak detection.
6068 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
6069
6070 if (!found_target_in_trace_) { // don't pop if found the target
6071 object_stack_.RemoveLast();
6072 }
6073 }
6074
6075
UnmarkRecursively(Object ** p,UnmarkVisitor * unmark_visitor)6076 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6077 if (!(*p)->IsHeapObject()) return;
6078
6079 HeapObject* obj = HeapObject::cast(*p);
6080
6081 MapWord map_word = obj->map_word();
6082 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
6083
6084 MapWord unmarked_map_word =
6085 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
6086 obj->set_map_word(unmarked_map_word);
6087
6088 Map* map = Map::cast(unmarked_map_word.ToMap());
6089
6090 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
6091
6092 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
6093 }
6094
6095
ProcessResults()6096 void PathTracer::ProcessResults() {
6097 if (found_target_) {
6098 OFStream os(stdout);
6099 os << "=====================================\n"
6100 << "==== Path to object ====\n"
6101 << "=====================================\n\n";
6102
6103 DCHECK(!object_stack_.is_empty());
6104 for (int i = 0; i < object_stack_.length(); i++) {
6105 if (i > 0) os << "\n |\n |\n V\n\n";
6106 object_stack_[i]->Print(os);
6107 }
6108 os << "=====================================\n";
6109 }
6110 }
6111
6112
6113 // Triggers a depth-first traversal of reachable objects from one
6114 // given root object and finds a path to a specific heap object and
6115 // prints it.
TracePathToObjectFrom(Object * target,Object * root)6116 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6117 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6118 tracer.VisitPointer(&root);
6119 }
6120
6121
6122 // Triggers a depth-first traversal of reachable objects from roots
6123 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)6124 void Heap::TracePathToObject(Object* target) {
6125 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6126 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6127 }
6128
6129
6130 // Triggers a depth-first traversal of reachable objects from roots
6131 // and finds a path to any global object and prints it. Useful for
6132 // determining the source for leaks of global objects.
TracePathToGlobal()6133 void Heap::TracePathToGlobal() {
6134 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
6135 VISIT_ALL);
6136 IterateRoots(&tracer, VISIT_ONLY_STRONG);
6137 }
6138 #endif
6139
6140
UpdateCumulativeGCStatistics(double duration,double spent_in_mutator,double marking_time)6141 void Heap::UpdateCumulativeGCStatistics(double duration,
6142 double spent_in_mutator,
6143 double marking_time) {
6144 if (FLAG_print_cumulative_gc_stat) {
6145 total_gc_time_ms_ += duration;
6146 max_gc_pause_ = Max(max_gc_pause_, duration);
6147 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
6148 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
6149 } else if (FLAG_trace_gc_verbose) {
6150 total_gc_time_ms_ += duration;
6151 }
6152
6153 marking_time_ += marking_time;
6154 }
6155
6156
Hash(Handle<Map> map,Handle<Name> name)6157 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6158 DisallowHeapAllocation no_gc;
6159 // Uses only lower 32 bits if pointers are larger.
6160 uintptr_t addr_hash =
6161 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6162 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6163 }
6164
6165
Lookup(Handle<Map> map,Handle<Name> name)6166 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6167 DisallowHeapAllocation no_gc;
6168 int index = (Hash(map, name) & kHashMask);
6169 for (int i = 0; i < kEntriesPerBucket; i++) {
6170 Key& key = keys_[index + i];
6171 if ((key.map == *map) && key.name->Equals(*name)) {
6172 return field_offsets_[index + i];
6173 }
6174 }
6175 return kNotFound;
6176 }
6177
6178
Update(Handle<Map> map,Handle<Name> name,int field_offset)6179 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
6180 int field_offset) {
6181 DisallowHeapAllocation no_gc;
6182 if (!name->IsUniqueName()) {
6183 if (!StringTable::InternalizeStringIfExists(
6184 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
6185 return;
6186 }
6187 }
6188 // This cache is cleared only between mark compact passes, so we expect the
6189 // cache to only contain old space names.
6190 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
6191
6192 int index = (Hash(map, name) & kHashMask);
6193 // After a GC there will be free slots, so we use them in order (this may
6194 // help to get the most frequently used one in position 0).
6195 for (int i = 0; i < kEntriesPerBucket; i++) {
6196 Key& key = keys_[index];
6197 Object* free_entry_indicator = NULL;
6198 if (key.map == free_entry_indicator) {
6199 key.map = *map;
6200 key.name = *name;
6201 field_offsets_[index + i] = field_offset;
6202 return;
6203 }
6204 }
6205 // No free entry found in this bucket, so we move them all down one and
6206 // put the new entry at position zero.
6207 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6208 Key& key = keys_[index + i];
6209 Key& key2 = keys_[index + i - 1];
6210 key = key2;
6211 field_offsets_[index + i] = field_offsets_[index + i - 1];
6212 }
6213
6214 // Write the new first entry.
6215 Key& key = keys_[index];
6216 key.map = *map;
6217 key.name = *name;
6218 field_offsets_[index] = field_offset;
6219 }
6220
6221
Clear()6222 void KeyedLookupCache::Clear() {
6223 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6224 }
6225
6226
Clear()6227 void DescriptorLookupCache::Clear() {
6228 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6229 }
6230
CleanUp()6231 void Heap::ExternalStringTable::CleanUp() {
6232 int last = 0;
6233 Isolate* isolate = heap_->isolate();
6234 for (int i = 0; i < new_space_strings_.length(); ++i) {
6235 if (new_space_strings_[i]->IsTheHole(isolate)) {
6236 continue;
6237 }
6238 DCHECK(new_space_strings_[i]->IsExternalString());
6239 if (heap_->InNewSpace(new_space_strings_[i])) {
6240 new_space_strings_[last++] = new_space_strings_[i];
6241 } else {
6242 old_space_strings_.Add(new_space_strings_[i]);
6243 }
6244 }
6245 new_space_strings_.Rewind(last);
6246 new_space_strings_.Trim();
6247
6248 last = 0;
6249 for (int i = 0; i < old_space_strings_.length(); ++i) {
6250 if (old_space_strings_[i]->IsTheHole(isolate)) {
6251 continue;
6252 }
6253 DCHECK(old_space_strings_[i]->IsExternalString());
6254 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6255 old_space_strings_[last++] = old_space_strings_[i];
6256 }
6257 old_space_strings_.Rewind(last);
6258 old_space_strings_.Trim();
6259 #ifdef VERIFY_HEAP
6260 if (FLAG_verify_heap) {
6261 Verify();
6262 }
6263 #endif
6264 }
6265
TearDown()6266 void Heap::ExternalStringTable::TearDown() {
6267 for (int i = 0; i < new_space_strings_.length(); ++i) {
6268 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6269 }
6270 new_space_strings_.Free();
6271 for (int i = 0; i < old_space_strings_.length(); ++i) {
6272 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6273 }
6274 old_space_strings_.Free();
6275 }
6276
6277
RememberUnmappedPage(Address page,bool compacted)6278 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6279 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6280 // Tag the page pointer to make it findable in the dump file.
6281 if (compacted) {
6282 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6283 } else {
6284 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6285 }
6286 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6287 reinterpret_cast<Address>(p);
6288 remembered_unmapped_pages_index_++;
6289 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6290 }
6291
6292
RegisterStrongRoots(Object ** start,Object ** end)6293 void Heap::RegisterStrongRoots(Object** start, Object** end) {
6294 StrongRootsList* list = new StrongRootsList();
6295 list->next = strong_roots_list_;
6296 list->start = start;
6297 list->end = end;
6298 strong_roots_list_ = list;
6299 }
6300
6301
UnregisterStrongRoots(Object ** start)6302 void Heap::UnregisterStrongRoots(Object** start) {
6303 StrongRootsList* prev = NULL;
6304 StrongRootsList* list = strong_roots_list_;
6305 while (list != nullptr) {
6306 StrongRootsList* next = list->next;
6307 if (list->start == start) {
6308 if (prev) {
6309 prev->next = next;
6310 } else {
6311 strong_roots_list_ = next;
6312 }
6313 delete list;
6314 } else {
6315 prev = list;
6316 }
6317 list = next;
6318 }
6319 }
6320
6321
NumberOfTrackedHeapObjectTypes()6322 size_t Heap::NumberOfTrackedHeapObjectTypes() {
6323 return ObjectStats::OBJECT_STATS_COUNT;
6324 }
6325
6326
ObjectCountAtLastGC(size_t index)6327 size_t Heap::ObjectCountAtLastGC(size_t index) {
6328 if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
6329 return object_stats_->object_count_last_gc(index);
6330 }
6331
6332
ObjectSizeAtLastGC(size_t index)6333 size_t Heap::ObjectSizeAtLastGC(size_t index) {
6334 if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
6335 return object_stats_->object_size_last_gc(index);
6336 }
6337
6338
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)6339 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6340 const char** object_sub_type) {
6341 if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6342
6343 switch (static_cast<int>(index)) {
6344 #define COMPARE_AND_RETURN_NAME(name) \
6345 case name: \
6346 *object_type = #name; \
6347 *object_sub_type = ""; \
6348 return true;
6349 INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6350 #undef COMPARE_AND_RETURN_NAME
6351 #define COMPARE_AND_RETURN_NAME(name) \
6352 case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
6353 *object_type = "CODE_TYPE"; \
6354 *object_sub_type = "CODE_KIND/" #name; \
6355 return true;
6356 CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
6357 #undef COMPARE_AND_RETURN_NAME
6358 #define COMPARE_AND_RETURN_NAME(name) \
6359 case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
6360 *object_type = "FIXED_ARRAY_TYPE"; \
6361 *object_sub_type = #name; \
6362 return true;
6363 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6364 #undef COMPARE_AND_RETURN_NAME
6365 #define COMPARE_AND_RETURN_NAME(name) \
6366 case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
6367 Code::kFirstCodeAge: \
6368 *object_type = "CODE_TYPE"; \
6369 *object_sub_type = "CODE_AGE/" #name; \
6370 return true;
6371 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6372 #undef COMPARE_AND_RETURN_NAME
6373 }
6374 return false;
6375 }
6376
6377
6378 // static
GetStaticVisitorIdForMap(Map * map)6379 int Heap::GetStaticVisitorIdForMap(Map* map) {
6380 return StaticVisitorBase::GetVisitorId(map);
6381 }
6382
6383 } // namespace internal
6384 } // namespace v8
6385