1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/once.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/compilation-cache.h"
13 #include "src/conversions.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug.h"
16 #include "src/deoptimizer.h"
17 #include "src/global-handles.h"
18 #include "src/heap-profiler.h"
19 #include "src/incremental-marking.h"
20 #include "src/isolate-inl.h"
21 #include "src/mark-compact.h"
22 #include "src/natives.h"
23 #include "src/objects-visiting.h"
24 #include "src/objects-visiting-inl.h"
25 #include "src/runtime-profiler.h"
26 #include "src/scopeinfo.h"
27 #include "src/snapshot.h"
28 #include "src/store-buffer.h"
29 #include "src/utils/random-number-generator.h"
30 #include "src/utils.h"
31 #include "src/v8threads.h"
32 #include "src/vm-state-inl.h"
33 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
34 #include "src/regexp-macro-assembler.h"
35 #include "src/arm/regexp-macro-assembler-arm.h"
36 #endif
37 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
38 #include "src/regexp-macro-assembler.h"
39 #include "src/mips/regexp-macro-assembler-mips.h"
40 #endif
41
42 namespace v8 {
43 namespace internal {
44
45
Heap()46 Heap::Heap()
47 : amount_of_external_allocated_memory_(0),
48 amount_of_external_allocated_memory_at_last_global_gc_(0),
49 isolate_(NULL),
50 code_range_size_(0),
51 // semispace_size_ should be a power of 2 and old_generation_size_ should be
52 // a multiple of Page::kPageSize.
53 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
54 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
55 initial_semispace_size_(Page::kPageSize),
56 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
57 max_executable_size_(256ul * (kPointerSize / 4) * MB),
58 // Variables set based on semispace_size_ and old_generation_size_ in
59 // ConfigureHeap.
60 // Will be 4 * reserved_semispace_size_ to ensure that young
61 // generation can be aligned to its size.
62 maximum_committed_(0),
63 survived_since_last_expansion_(0),
64 sweep_generation_(0),
65 always_allocate_scope_depth_(0),
66 linear_allocation_scope_depth_(0),
67 contexts_disposed_(0),
68 global_ic_age_(0),
69 flush_monomorphic_ics_(false),
70 scan_on_scavenge_pages_(0),
71 new_space_(this),
72 old_pointer_space_(NULL),
73 old_data_space_(NULL),
74 code_space_(NULL),
75 map_space_(NULL),
76 cell_space_(NULL),
77 property_cell_space_(NULL),
78 lo_space_(NULL),
79 gc_state_(NOT_IN_GC),
80 gc_post_processing_depth_(0),
81 ms_count_(0),
82 gc_count_(0),
83 remembered_unmapped_pages_index_(0),
84 unflattened_strings_length_(0),
85 #ifdef DEBUG
86 allocation_timeout_(0),
87 #endif // DEBUG
88 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
89 old_gen_exhausted_(false),
90 inline_allocation_disabled_(false),
91 store_buffer_rebuilder_(store_buffer()),
92 hidden_string_(NULL),
93 gc_safe_size_of_old_object_(NULL),
94 total_regexp_code_generated_(0),
95 tracer_(NULL),
96 high_survival_rate_period_length_(0),
97 promoted_objects_size_(0),
98 promotion_rate_(0),
99 semi_space_copied_object_size_(0),
100 semi_space_copied_rate_(0),
101 maximum_size_scavenges_(0),
102 max_gc_pause_(0.0),
103 total_gc_time_ms_(0.0),
104 max_alive_after_gc_(0),
105 min_in_mutator_(kMaxInt),
106 alive_after_last_gc_(0),
107 last_gc_end_timestamp_(0.0),
108 marking_time_(0.0),
109 sweeping_time_(0.0),
110 mark_compact_collector_(this),
111 store_buffer_(this),
112 marking_(this),
113 incremental_marking_(this),
114 number_idle_notifications_(0),
115 last_idle_notification_gc_count_(0),
116 last_idle_notification_gc_count_init_(false),
117 mark_sweeps_since_idle_round_started_(0),
118 gc_count_at_last_idle_gc_(0),
119 scavenges_since_last_idle_round_(kIdleScavengeThreshold),
120 full_codegen_bytes_generated_(0),
121 crankshaft_codegen_bytes_generated_(0),
122 gcs_since_last_deopt_(0),
123 #ifdef VERIFY_HEAP
124 no_weak_object_verification_scope_depth_(0),
125 #endif
126 allocation_sites_scratchpad_length_(0),
127 promotion_queue_(this),
128 configured_(false),
129 external_string_table_(this),
130 chunks_queued_for_free_(NULL),
131 gc_callbacks_depth_(0) {
132 // Allow build-time customization of the max semispace size. Building
133 // V8 with snapshots and a non-default max semispace size is much
134 // easier if you can define it as part of the build environment.
135 #if defined(V8_MAX_SEMISPACE_SIZE)
136 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
137 #endif
138
139 // Ensure old_generation_size_ is a multiple of kPageSize.
140 ASSERT(MB >= Page::kPageSize);
141
142 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
143 set_native_contexts_list(NULL);
144 set_array_buffers_list(Smi::FromInt(0));
145 set_allocation_sites_list(Smi::FromInt(0));
146 set_encountered_weak_collections(Smi::FromInt(0));
147 // Put a dummy entry in the remembered pages so we can find the list the
148 // minidump even if there are no real unmapped pages.
149 RememberUnmappedPage(NULL, false);
150
151 ClearObjectStats(true);
152 }
153
154
Capacity()155 intptr_t Heap::Capacity() {
156 if (!HasBeenSetUp()) return 0;
157
158 return new_space_.Capacity() +
159 old_pointer_space_->Capacity() +
160 old_data_space_->Capacity() +
161 code_space_->Capacity() +
162 map_space_->Capacity() +
163 cell_space_->Capacity() +
164 property_cell_space_->Capacity();
165 }
166
167
CommittedMemory()168 intptr_t Heap::CommittedMemory() {
169 if (!HasBeenSetUp()) return 0;
170
171 return new_space_.CommittedMemory() +
172 old_pointer_space_->CommittedMemory() +
173 old_data_space_->CommittedMemory() +
174 code_space_->CommittedMemory() +
175 map_space_->CommittedMemory() +
176 cell_space_->CommittedMemory() +
177 property_cell_space_->CommittedMemory() +
178 lo_space_->Size();
179 }
180
181
CommittedPhysicalMemory()182 size_t Heap::CommittedPhysicalMemory() {
183 if (!HasBeenSetUp()) return 0;
184
185 return new_space_.CommittedPhysicalMemory() +
186 old_pointer_space_->CommittedPhysicalMemory() +
187 old_data_space_->CommittedPhysicalMemory() +
188 code_space_->CommittedPhysicalMemory() +
189 map_space_->CommittedPhysicalMemory() +
190 cell_space_->CommittedPhysicalMemory() +
191 property_cell_space_->CommittedPhysicalMemory() +
192 lo_space_->CommittedPhysicalMemory();
193 }
194
195
CommittedMemoryExecutable()196 intptr_t Heap::CommittedMemoryExecutable() {
197 if (!HasBeenSetUp()) return 0;
198
199 return isolate()->memory_allocator()->SizeExecutable();
200 }
201
202
UpdateMaximumCommitted()203 void Heap::UpdateMaximumCommitted() {
204 if (!HasBeenSetUp()) return;
205
206 intptr_t current_committed_memory = CommittedMemory();
207 if (current_committed_memory > maximum_committed_) {
208 maximum_committed_ = current_committed_memory;
209 }
210 }
211
212
Available()213 intptr_t Heap::Available() {
214 if (!HasBeenSetUp()) return 0;
215
216 return new_space_.Available() +
217 old_pointer_space_->Available() +
218 old_data_space_->Available() +
219 code_space_->Available() +
220 map_space_->Available() +
221 cell_space_->Available() +
222 property_cell_space_->Available();
223 }
224
225
HasBeenSetUp()226 bool Heap::HasBeenSetUp() {
227 return old_pointer_space_ != NULL &&
228 old_data_space_ != NULL &&
229 code_space_ != NULL &&
230 map_space_ != NULL &&
231 cell_space_ != NULL &&
232 property_cell_space_ != NULL &&
233 lo_space_ != NULL;
234 }
235
236
GcSafeSizeOfOldObject(HeapObject * object)237 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
238 if (IntrusiveMarking::IsMarked(object)) {
239 return IntrusiveMarking::SizeOfMarkedObject(object);
240 }
241 return object->SizeFromMap(object->map());
242 }
243
244
SelectGarbageCollector(AllocationSpace space,const char ** reason)245 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
246 const char** reason) {
247 // Is global GC requested?
248 if (space != NEW_SPACE) {
249 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
250 *reason = "GC in old space requested";
251 return MARK_COMPACTOR;
252 }
253
254 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
255 *reason = "GC in old space forced by flags";
256 return MARK_COMPACTOR;
257 }
258
259 // Is enough data promoted to justify a global GC?
260 if (OldGenerationAllocationLimitReached()) {
261 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
262 *reason = "promotion limit reached";
263 return MARK_COMPACTOR;
264 }
265
266 // Have allocation in OLD and LO failed?
267 if (old_gen_exhausted_) {
268 isolate_->counters()->
269 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
270 *reason = "old generations exhausted";
271 return MARK_COMPACTOR;
272 }
273
274 // Is there enough space left in OLD to guarantee that a scavenge can
275 // succeed?
276 //
277 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
278 // for object promotion. It counts only the bytes that the memory
279 // allocator has not yet allocated from the OS and assigned to any space,
280 // and does not count available bytes already in the old space or code
281 // space. Undercounting is safe---we may get an unrequested full GC when
282 // a scavenge would have succeeded.
283 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
284 isolate_->counters()->
285 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
286 *reason = "scavenge might not succeed";
287 return MARK_COMPACTOR;
288 }
289
290 // Default
291 *reason = NULL;
292 return SCAVENGER;
293 }
294
295
296 // TODO(1238405): Combine the infrastructure for --heap-stats and
297 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsBeforeGC()298 void Heap::ReportStatisticsBeforeGC() {
299 // Heap::ReportHeapStatistics will also log NewSpace statistics when
300 // compiled --log-gc is set. The following logic is used to avoid
301 // double logging.
302 #ifdef DEBUG
303 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
304 if (FLAG_heap_stats) {
305 ReportHeapStatistics("Before GC");
306 } else if (FLAG_log_gc) {
307 new_space_.ReportStatistics();
308 }
309 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
310 #else
311 if (FLAG_log_gc) {
312 new_space_.CollectStatistics();
313 new_space_.ReportStatistics();
314 new_space_.ClearHistograms();
315 }
316 #endif // DEBUG
317 }
318
319
PrintShortHeapStatistics()320 void Heap::PrintShortHeapStatistics() {
321 if (!FLAG_trace_gc_verbose) return;
322 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB"
323 ", available: %6" V8_PTR_PREFIX "d KB\n",
324 isolate_->memory_allocator()->Size() / KB,
325 isolate_->memory_allocator()->Available() / KB);
326 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB"
327 ", available: %6" V8_PTR_PREFIX "d KB"
328 ", committed: %6" V8_PTR_PREFIX "d KB\n",
329 new_space_.Size() / KB,
330 new_space_.Available() / KB,
331 new_space_.CommittedMemory() / KB);
332 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB"
333 ", available: %6" V8_PTR_PREFIX "d KB"
334 ", committed: %6" V8_PTR_PREFIX "d KB\n",
335 old_pointer_space_->SizeOfObjects() / KB,
336 old_pointer_space_->Available() / KB,
337 old_pointer_space_->CommittedMemory() / KB);
338 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB"
339 ", available: %6" V8_PTR_PREFIX "d KB"
340 ", committed: %6" V8_PTR_PREFIX "d KB\n",
341 old_data_space_->SizeOfObjects() / KB,
342 old_data_space_->Available() / KB,
343 old_data_space_->CommittedMemory() / KB);
344 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB"
345 ", available: %6" V8_PTR_PREFIX "d KB"
346 ", committed: %6" V8_PTR_PREFIX "d KB\n",
347 code_space_->SizeOfObjects() / KB,
348 code_space_->Available() / KB,
349 code_space_->CommittedMemory() / KB);
350 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB"
351 ", available: %6" V8_PTR_PREFIX "d KB"
352 ", committed: %6" V8_PTR_PREFIX "d KB\n",
353 map_space_->SizeOfObjects() / KB,
354 map_space_->Available() / KB,
355 map_space_->CommittedMemory() / KB);
356 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB"
357 ", available: %6" V8_PTR_PREFIX "d KB"
358 ", committed: %6" V8_PTR_PREFIX "d KB\n",
359 cell_space_->SizeOfObjects() / KB,
360 cell_space_->Available() / KB,
361 cell_space_->CommittedMemory() / KB);
362 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
363 ", available: %6" V8_PTR_PREFIX "d KB"
364 ", committed: %6" V8_PTR_PREFIX "d KB\n",
365 property_cell_space_->SizeOfObjects() / KB,
366 property_cell_space_->Available() / KB,
367 property_cell_space_->CommittedMemory() / KB);
368 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
369 ", available: %6" V8_PTR_PREFIX "d KB"
370 ", committed: %6" V8_PTR_PREFIX "d KB\n",
371 lo_space_->SizeOfObjects() / KB,
372 lo_space_->Available() / KB,
373 lo_space_->CommittedMemory() / KB);
374 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB"
375 ", available: %6" V8_PTR_PREFIX "d KB"
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 this->SizeOfObjects() / KB,
378 this->Available() / KB,
379 this->CommittedMemory() / KB);
380 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
381 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
382 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
383 }
384
385
386 // TODO(1238405): Combine the infrastructure for --heap-stats and
387 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()388 void Heap::ReportStatisticsAfterGC() {
389 // Similar to the before GC, we use some complicated logic to ensure that
390 // NewSpace statistics are logged exactly once when --log-gc is turned on.
391 #if defined(DEBUG)
392 if (FLAG_heap_stats) {
393 new_space_.CollectStatistics();
394 ReportHeapStatistics("After GC");
395 } else if (FLAG_log_gc) {
396 new_space_.ReportStatistics();
397 }
398 #else
399 if (FLAG_log_gc) new_space_.ReportStatistics();
400 #endif // DEBUG
401 }
402
403
GarbageCollectionPrologue()404 void Heap::GarbageCollectionPrologue() {
405 { AllowHeapAllocation for_the_first_part_of_prologue;
406 ClearJSFunctionResultCaches();
407 gc_count_++;
408 unflattened_strings_length_ = 0;
409
410 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
411 mark_compact_collector()->EnableCodeFlushing(true);
412 }
413
414 #ifdef VERIFY_HEAP
415 if (FLAG_verify_heap) {
416 Verify();
417 }
418 #endif
419 }
420
421 // Reset GC statistics.
422 promoted_objects_size_ = 0;
423 semi_space_copied_object_size_ = 0;
424
425 UpdateMaximumCommitted();
426
427 #ifdef DEBUG
428 ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
429
430 if (FLAG_gc_verbose) Print();
431
432 ReportStatisticsBeforeGC();
433 #endif // DEBUG
434
435 store_buffer()->GCPrologue();
436
437 if (isolate()->concurrent_osr_enabled()) {
438 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
439 }
440
441 if (new_space_.IsAtMaximumCapacity()) {
442 maximum_size_scavenges_++;
443 } else {
444 maximum_size_scavenges_ = 0;
445 }
446 CheckNewSpaceExpansionCriteria();
447 }
448
449
SizeOfObjects()450 intptr_t Heap::SizeOfObjects() {
451 intptr_t total = 0;
452 AllSpaces spaces(this);
453 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
454 total += space->SizeOfObjects();
455 }
456 return total;
457 }
458
459
ClearAllICsByKind(Code::Kind kind)460 void Heap::ClearAllICsByKind(Code::Kind kind) {
461 HeapObjectIterator it(code_space());
462
463 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
464 Code* code = Code::cast(object);
465 Code::Kind current_kind = code->kind();
466 if (current_kind == Code::FUNCTION ||
467 current_kind == Code::OPTIMIZED_FUNCTION) {
468 code->ClearInlineCaches(kind);
469 }
470 }
471 }
472
473
RepairFreeListsAfterBoot()474 void Heap::RepairFreeListsAfterBoot() {
475 PagedSpaces spaces(this);
476 for (PagedSpace* space = spaces.next();
477 space != NULL;
478 space = spaces.next()) {
479 space->RepairFreeListsAfterBoot();
480 }
481 }
482
483
ProcessPretenuringFeedback()484 void Heap::ProcessPretenuringFeedback() {
485 if (FLAG_allocation_site_pretenuring) {
486 int tenure_decisions = 0;
487 int dont_tenure_decisions = 0;
488 int allocation_mementos_found = 0;
489 int allocation_sites = 0;
490 int active_allocation_sites = 0;
491
492 // If the scratchpad overflowed, we have to iterate over the allocation
493 // sites list.
494 // TODO(hpayer): We iterate over the whole list of allocation sites when
495 // we grew to the maximum semi-space size to deopt maybe tenured
496 // allocation sites. We could hold the maybe tenured allocation sites
497 // in a seperate data structure if this is a performance problem.
498 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
499 bool use_scratchpad =
500 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
501 !deopt_maybe_tenured;
502
503 int i = 0;
504 Object* list_element = allocation_sites_list();
505 bool trigger_deoptimization = false;
506 bool maximum_size_scavenge = MaximumSizeScavenge();
507 while (use_scratchpad ?
508 i < allocation_sites_scratchpad_length_ :
509 list_element->IsAllocationSite()) {
510 AllocationSite* site = use_scratchpad ?
511 AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
512 AllocationSite::cast(list_element);
513 allocation_mementos_found += site->memento_found_count();
514 if (site->memento_found_count() > 0) {
515 active_allocation_sites++;
516 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
517 trigger_deoptimization = true;
518 }
519 if (site->GetPretenureMode() == TENURED) {
520 tenure_decisions++;
521 } else {
522 dont_tenure_decisions++;
523 }
524 allocation_sites++;
525 }
526
527 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
528 site->set_deopt_dependent_code(true);
529 trigger_deoptimization = true;
530 }
531
532 if (use_scratchpad) {
533 i++;
534 } else {
535 list_element = site->weak_next();
536 }
537 }
538
539 if (trigger_deoptimization) {
540 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
541 }
542
543 FlushAllocationSitesScratchpad();
544
545 if (FLAG_trace_pretenuring_statistics &&
546 (allocation_mementos_found > 0 ||
547 tenure_decisions > 0 ||
548 dont_tenure_decisions > 0)) {
549 PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
550 "#mementos, #tenure decisions, #donttenure decisions) "
551 "(%s, %d, %d, %d, %d, %d)\n",
552 use_scratchpad ? "use scratchpad" : "use list",
553 allocation_sites,
554 active_allocation_sites,
555 allocation_mementos_found,
556 tenure_decisions,
557 dont_tenure_decisions);
558 }
559 }
560 }
561
562
DeoptMarkedAllocationSites()563 void Heap::DeoptMarkedAllocationSites() {
564 // TODO(hpayer): If iterating over the allocation sites list becomes a
565 // performance issue, use a cache heap data structure instead (similar to the
566 // allocation sites scratchpad).
567 Object* list_element = allocation_sites_list();
568 while (list_element->IsAllocationSite()) {
569 AllocationSite* site = AllocationSite::cast(list_element);
570 if (site->deopt_dependent_code()) {
571 site->dependent_code()->MarkCodeForDeoptimization(
572 isolate_,
573 DependentCode::kAllocationSiteTenuringChangedGroup);
574 site->set_deopt_dependent_code(false);
575 }
576 list_element = site->weak_next();
577 }
578 Deoptimizer::DeoptimizeMarkedCode(isolate_);
579 }
580
581
GarbageCollectionEpilogue()582 void Heap::GarbageCollectionEpilogue() {
583 store_buffer()->GCEpilogue();
584
585 // In release mode, we only zap the from space under heap verification.
586 if (Heap::ShouldZapGarbage()) {
587 ZapFromSpace();
588 }
589
590 // Process pretenuring feedback and update allocation sites.
591 ProcessPretenuringFeedback();
592
593 #ifdef VERIFY_HEAP
594 if (FLAG_verify_heap) {
595 Verify();
596 }
597 #endif
598
599 AllowHeapAllocation for_the_rest_of_the_epilogue;
600
601 #ifdef DEBUG
602 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
603 if (FLAG_print_handles) PrintHandles();
604 if (FLAG_gc_verbose) Print();
605 if (FLAG_code_stats) ReportCodeStatistics("After GC");
606 #endif
607 if (FLAG_deopt_every_n_garbage_collections > 0) {
608 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
609 // the topmost optimized frame can be deoptimized safely, because it
610 // might not have a lazy bailout point right after its current PC.
611 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
612 Deoptimizer::DeoptimizeAll(isolate());
613 gcs_since_last_deopt_ = 0;
614 }
615 }
616
617 UpdateMaximumCommitted();
618
619 isolate_->counters()->alive_after_last_gc()->Set(
620 static_cast<int>(SizeOfObjects()));
621
622 isolate_->counters()->string_table_capacity()->Set(
623 string_table()->Capacity());
624 isolate_->counters()->number_of_symbols()->Set(
625 string_table()->NumberOfElements());
626
627 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
628 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
629 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
630 (crankshaft_codegen_bytes_generated_
631 + full_codegen_bytes_generated_)));
632 }
633
634 if (CommittedMemory() > 0) {
635 isolate_->counters()->external_fragmentation_total()->AddSample(
636 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
637
638 isolate_->counters()->heap_fraction_new_space()->
639 AddSample(static_cast<int>(
640 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
641 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
642 static_cast<int>(
643 (old_pointer_space()->CommittedMemory() * 100.0) /
644 CommittedMemory()));
645 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
646 static_cast<int>(
647 (old_data_space()->CommittedMemory() * 100.0) /
648 CommittedMemory()));
649 isolate_->counters()->heap_fraction_code_space()->
650 AddSample(static_cast<int>(
651 (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
652 isolate_->counters()->heap_fraction_map_space()->AddSample(
653 static_cast<int>(
654 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
655 isolate_->counters()->heap_fraction_cell_space()->AddSample(
656 static_cast<int>(
657 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
658 isolate_->counters()->heap_fraction_property_cell_space()->
659 AddSample(static_cast<int>(
660 (property_cell_space()->CommittedMemory() * 100.0) /
661 CommittedMemory()));
662 isolate_->counters()->heap_fraction_lo_space()->
663 AddSample(static_cast<int>(
664 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
665
666 isolate_->counters()->heap_sample_total_committed()->AddSample(
667 static_cast<int>(CommittedMemory() / KB));
668 isolate_->counters()->heap_sample_total_used()->AddSample(
669 static_cast<int>(SizeOfObjects() / KB));
670 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
671 static_cast<int>(map_space()->CommittedMemory() / KB));
672 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
673 static_cast<int>(cell_space()->CommittedMemory() / KB));
674 isolate_->counters()->
675 heap_sample_property_cell_space_committed()->
676 AddSample(static_cast<int>(
677 property_cell_space()->CommittedMemory() / KB));
678 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
679 static_cast<int>(code_space()->CommittedMemory() / KB));
680
681 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
682 static_cast<int>(MaximumCommittedMemory() / KB));
683 }
684
685 #define UPDATE_COUNTERS_FOR_SPACE(space) \
686 isolate_->counters()->space##_bytes_available()->Set( \
687 static_cast<int>(space()->Available())); \
688 isolate_->counters()->space##_bytes_committed()->Set( \
689 static_cast<int>(space()->CommittedMemory())); \
690 isolate_->counters()->space##_bytes_used()->Set( \
691 static_cast<int>(space()->SizeOfObjects()));
692 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
693 if (space()->CommittedMemory() > 0) { \
694 isolate_->counters()->external_fragmentation_##space()->AddSample( \
695 static_cast<int>(100 - \
696 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
697 }
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699 UPDATE_COUNTERS_FOR_SPACE(space) \
700 UPDATE_FRAGMENTATION_FOR_SPACE(space)
701
702 UPDATE_COUNTERS_FOR_SPACE(new_space)
703 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
713
714 #ifdef DEBUG
715 ReportStatisticsAfterGC();
716 #endif // DEBUG
717 isolate_->debug()->AfterGarbageCollection();
718
719 // Remember the last top pointer so that we can later find out
720 // whether we allocated in new space since the last GC.
721 new_space_top_after_last_gc_ = new_space()->top();
722 }
723
724
CollectAllGarbage(int flags,const char * gc_reason,const v8::GCCallbackFlags gc_callback_flags)725 void Heap::CollectAllGarbage(int flags,
726 const char* gc_reason,
727 const v8::GCCallbackFlags gc_callback_flags) {
728 // Since we are ignoring the return value, the exact choice of space does
729 // not matter, so long as we do not specify NEW_SPACE, which would not
730 // cause a full GC.
731 mark_compact_collector_.SetFlags(flags);
732 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
733 mark_compact_collector_.SetFlags(kNoGCFlags);
734 }
735
736
CollectAllAvailableGarbage(const char * gc_reason)737 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
738 // Since we are ignoring the return value, the exact choice of space does
739 // not matter, so long as we do not specify NEW_SPACE, which would not
740 // cause a full GC.
741 // Major GC would invoke weak handle callbacks on weakly reachable
742 // handles, but won't collect weakly reachable objects until next
743 // major GC. Therefore if we collect aggressively and weak handle callback
744 // has been invoked, we rerun major GC to release objects which become
745 // garbage.
746 // Note: as weak callbacks can execute arbitrary code, we cannot
747 // hope that eventually there will be no weak callbacks invocations.
748 // Therefore stop recollecting after several attempts.
749 if (isolate()->concurrent_recompilation_enabled()) {
750 // The optimizing compiler may be unnecessarily holding on to memory.
751 DisallowHeapAllocation no_recursive_gc;
752 isolate()->optimizing_compiler_thread()->Flush();
753 }
754 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
755 kReduceMemoryFootprintMask);
756 isolate_->compilation_cache()->Clear();
757 const int kMaxNumberOfAttempts = 7;
758 const int kMinNumberOfAttempts = 2;
759 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
760 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
761 attempt + 1 >= kMinNumberOfAttempts) {
762 break;
763 }
764 }
765 mark_compact_collector()->SetFlags(kNoGCFlags);
766 new_space_.Shrink();
767 UncommitFromSpace();
768 incremental_marking()->UncommitMarkingDeque();
769 }
770
771
EnsureFillerObjectAtTop()772 void Heap::EnsureFillerObjectAtTop() {
773 // There may be an allocation memento behind every object in new space.
774 // If we evacuate a not full new space or if we are on the last page of
775 // the new space, then there may be uninitialized memory behind the top
776 // pointer of the new space page. We store a filler object there to
777 // identify the unused space.
778 Address from_top = new_space_.top();
779 Address from_limit = new_space_.limit();
780 if (from_top < from_limit) {
781 int remaining_in_page = static_cast<int>(from_limit - from_top);
782 CreateFillerObjectAt(from_top, remaining_in_page);
783 }
784 }
785
786
CollectGarbage(GarbageCollector collector,const char * gc_reason,const char * collector_reason,const v8::GCCallbackFlags gc_callback_flags)787 bool Heap::CollectGarbage(GarbageCollector collector,
788 const char* gc_reason,
789 const char* collector_reason,
790 const v8::GCCallbackFlags gc_callback_flags) {
791 // The VM is in the GC state until exiting this function.
792 VMState<GC> state(isolate_);
793
794 #ifdef DEBUG
795 // Reset the allocation timeout to the GC interval, but make sure to
796 // allow at least a few allocations after a collection. The reason
797 // for this is that we have a lot of allocation sequences and we
798 // assume that a garbage collection will allow the subsequent
799 // allocation attempts to go through.
800 allocation_timeout_ = Max(6, FLAG_gc_interval);
801 #endif
802
803 EnsureFillerObjectAtTop();
804
805 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
806 if (FLAG_trace_incremental_marking) {
807 PrintF("[IncrementalMarking] Scavenge during marking.\n");
808 }
809 }
810
811 if (collector == MARK_COMPACTOR &&
812 !mark_compact_collector()->abort_incremental_marking() &&
813 !incremental_marking()->IsStopped() &&
814 !incremental_marking()->should_hurry() &&
815 FLAG_incremental_marking_steps) {
816 // Make progress in incremental marking.
817 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
818 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
819 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
820 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
821 if (FLAG_trace_incremental_marking) {
822 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
823 }
824 collector = SCAVENGER;
825 collector_reason = "incremental marking delaying mark-sweep";
826 }
827 }
828
829 bool next_gc_likely_to_collect_more = false;
830
831 { GCTracer tracer(this, gc_reason, collector_reason);
832 ASSERT(AllowHeapAllocation::IsAllowed());
833 DisallowHeapAllocation no_allocation_during_gc;
834 GarbageCollectionPrologue();
835 // The GC count was incremented in the prologue. Tell the tracer about
836 // it.
837 tracer.set_gc_count(gc_count_);
838
839 // Tell the tracer which collector we've selected.
840 tracer.set_collector(collector);
841
842 {
843 HistogramTimerScope histogram_timer_scope(
844 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
845 : isolate_->counters()->gc_compactor());
846 next_gc_likely_to_collect_more =
847 PerformGarbageCollection(collector, &tracer, gc_callback_flags);
848 }
849
850 GarbageCollectionEpilogue();
851 }
852
853 // Start incremental marking for the next cycle. The heap snapshot
854 // generator needs incremental marking to stay off after it aborted.
855 if (!mark_compact_collector()->abort_incremental_marking() &&
856 incremental_marking()->IsStopped() &&
857 incremental_marking()->WorthActivating() &&
858 NextGCIsLikelyToBeFull()) {
859 incremental_marking()->Start();
860 }
861
862 return next_gc_likely_to_collect_more;
863 }
864
865
NotifyContextDisposed()866 int Heap::NotifyContextDisposed() {
867 if (isolate()->concurrent_recompilation_enabled()) {
868 // Flush the queued recompilation tasks.
869 isolate()->optimizing_compiler_thread()->Flush();
870 }
871 flush_monomorphic_ics_ = true;
872 AgeInlineCaches();
873 return ++contexts_disposed_;
874 }
875
876
MoveElements(FixedArray * array,int dst_index,int src_index,int len)877 void Heap::MoveElements(FixedArray* array,
878 int dst_index,
879 int src_index,
880 int len) {
881 if (len == 0) return;
882
883 ASSERT(array->map() != fixed_cow_array_map());
884 Object** dst_objects = array->data_start() + dst_index;
885 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
886 if (!InNewSpace(array)) {
887 for (int i = 0; i < len; i++) {
888 // TODO(hpayer): check store buffer for entries
889 if (InNewSpace(dst_objects[i])) {
890 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
891 }
892 }
893 }
894 incremental_marking()->RecordWrites(array);
895 }
896
897
898 #ifdef VERIFY_HEAP
899 // Helper class for verifying the string table.
900 class StringTableVerifier : public ObjectVisitor {
901 public:
VisitPointers(Object ** start,Object ** end)902 void VisitPointers(Object** start, Object** end) {
903 // Visit all HeapObject pointers in [start, end).
904 for (Object** p = start; p < end; p++) {
905 if ((*p)->IsHeapObject()) {
906 // Check that the string is actually internalized.
907 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
908 (*p)->IsInternalizedString());
909 }
910 }
911 }
912 };
913
914
VerifyStringTable(Heap * heap)915 static void VerifyStringTable(Heap* heap) {
916 StringTableVerifier verifier;
917 heap->string_table()->IterateElements(&verifier);
918 }
919 #endif // VERIFY_HEAP
920
921
AbortIncrementalMarkingAndCollectGarbage(Heap * heap,AllocationSpace space,const char * gc_reason=NULL)922 static bool AbortIncrementalMarkingAndCollectGarbage(
923 Heap* heap,
924 AllocationSpace space,
925 const char* gc_reason = NULL) {
926 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
927 bool result = heap->CollectGarbage(space, gc_reason);
928 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
929 return result;
930 }
931
932
ReserveSpace(int * sizes,Address * locations_out)933 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
934 bool gc_performed = true;
935 int counter = 0;
936 static const int kThreshold = 20;
937 while (gc_performed && counter++ < kThreshold) {
938 gc_performed = false;
939 ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
940 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
941 if (sizes[space] != 0) {
942 AllocationResult allocation;
943 if (space == NEW_SPACE) {
944 allocation = new_space()->AllocateRaw(sizes[space]);
945 } else {
946 allocation = paged_space(space)->AllocateRaw(sizes[space]);
947 }
948 FreeListNode* node;
949 if (!allocation.To(&node)) {
950 if (space == NEW_SPACE) {
951 Heap::CollectGarbage(NEW_SPACE,
952 "failed to reserve space in the new space");
953 } else {
954 AbortIncrementalMarkingAndCollectGarbage(
955 this,
956 static_cast<AllocationSpace>(space),
957 "failed to reserve space in paged space");
958 }
959 gc_performed = true;
960 break;
961 } else {
962 // Mark with a free list node, in case we have a GC before
963 // deserializing.
964 node->set_size(this, sizes[space]);
965 locations_out[space] = node->address();
966 }
967 }
968 }
969 }
970
971 if (gc_performed) {
972 // Failed to reserve the space after several attempts.
973 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
974 }
975 }
976
977
EnsureFromSpaceIsCommitted()978 void Heap::EnsureFromSpaceIsCommitted() {
979 if (new_space_.CommitFromSpaceIfNeeded()) return;
980
981 // Committing memory to from space failed.
982 // Memory is exhausted and we will die.
983 V8::FatalProcessOutOfMemory("Committing semi space failed.");
984 }
985
986
ClearJSFunctionResultCaches()987 void Heap::ClearJSFunctionResultCaches() {
988 if (isolate_->bootstrapper()->IsActive()) return;
989
990 Object* context = native_contexts_list();
991 while (!context->IsUndefined()) {
992 // Get the caches for this context. GC can happen when the context
993 // is not fully initialized, so the caches can be undefined.
994 Object* caches_or_undefined =
995 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
996 if (!caches_or_undefined->IsUndefined()) {
997 FixedArray* caches = FixedArray::cast(caches_or_undefined);
998 // Clear the caches:
999 int length = caches->length();
1000 for (int i = 0; i < length; i++) {
1001 JSFunctionResultCache::cast(caches->get(i))->Clear();
1002 }
1003 }
1004 // Get the next context:
1005 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1006 }
1007 }
1008
1009
ClearNormalizedMapCaches()1010 void Heap::ClearNormalizedMapCaches() {
1011 if (isolate_->bootstrapper()->IsActive() &&
1012 !incremental_marking()->IsMarking()) {
1013 return;
1014 }
1015
1016 Object* context = native_contexts_list();
1017 while (!context->IsUndefined()) {
1018 // GC can happen when the context is not fully initialized,
1019 // so the cache can be undefined.
1020 Object* cache =
1021 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1022 if (!cache->IsUndefined()) {
1023 NormalizedMapCache::cast(cache)->Clear();
1024 }
1025 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1026 }
1027 }
1028
1029
UpdateSurvivalStatistics(int start_new_space_size)1030 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1031 if (start_new_space_size == 0) return;
1032
1033 promotion_rate_ =
1034 (static_cast<double>(promoted_objects_size_) /
1035 static_cast<double>(start_new_space_size) * 100);
1036
1037 semi_space_copied_rate_ =
1038 (static_cast<double>(semi_space_copied_object_size_) /
1039 static_cast<double>(start_new_space_size) * 100);
1040
1041 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1042
1043 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1044 high_survival_rate_period_length_++;
1045 } else {
1046 high_survival_rate_period_length_ = 0;
1047 }
1048 }
1049
PerformGarbageCollection(GarbageCollector collector,GCTracer * tracer,const v8::GCCallbackFlags gc_callback_flags)1050 bool Heap::PerformGarbageCollection(
1051 GarbageCollector collector,
1052 GCTracer* tracer,
1053 const v8::GCCallbackFlags gc_callback_flags) {
1054 int freed_global_handles = 0;
1055
1056 if (collector != SCAVENGER) {
1057 PROFILE(isolate_, CodeMovingGCEvent());
1058 }
1059
1060 #ifdef VERIFY_HEAP
1061 if (FLAG_verify_heap) {
1062 VerifyStringTable(this);
1063 }
1064 #endif
1065
1066 GCType gc_type =
1067 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1068
1069 { GCCallbacksScope scope(this);
1070 if (scope.CheckReenter()) {
1071 AllowHeapAllocation allow_allocation;
1072 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1073 VMState<EXTERNAL> state(isolate_);
1074 HandleScope handle_scope(isolate_);
1075 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1076 }
1077 }
1078
1079 EnsureFromSpaceIsCommitted();
1080
1081 int start_new_space_size = Heap::new_space()->SizeAsInt();
1082
1083 if (IsHighSurvivalRate()) {
1084 // We speed up the incremental marker if it is running so that it
1085 // does not fall behind the rate of promotion, which would cause a
1086 // constantly growing old space.
1087 incremental_marking()->NotifyOfHighPromotionRate();
1088 }
1089
1090 if (collector == MARK_COMPACTOR) {
1091 // Perform mark-sweep with optional compaction.
1092 MarkCompact(tracer);
1093 sweep_generation_++;
1094 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1095 // allocates and triggers GC. The real limit is set at after
1096 // PostGarbageCollectionProcessing.
1097 old_generation_allocation_limit_ =
1098 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1099 old_gen_exhausted_ = false;
1100 } else {
1101 tracer_ = tracer;
1102 Scavenge();
1103 tracer_ = NULL;
1104 }
1105
1106 UpdateSurvivalStatistics(start_new_space_size);
1107
1108 isolate_->counters()->objs_since_last_young()->Set(0);
1109
1110 // Callbacks that fire after this point might trigger nested GCs and
1111 // restart incremental marking, the assertion can't be moved down.
1112 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1113
1114 gc_post_processing_depth_++;
1115 { AllowHeapAllocation allow_allocation;
1116 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1117 freed_global_handles =
1118 isolate_->global_handles()->PostGarbageCollectionProcessing(
1119 collector, tracer);
1120 }
1121 gc_post_processing_depth_--;
1122
1123 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1124
1125 // Update relocatables.
1126 Relocatable::PostGarbageCollectionProcessing(isolate_);
1127
1128 if (collector == MARK_COMPACTOR) {
1129 // Register the amount of external allocated memory.
1130 amount_of_external_allocated_memory_at_last_global_gc_ =
1131 amount_of_external_allocated_memory_;
1132 old_generation_allocation_limit_ =
1133 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1134 freed_global_handles);
1135 }
1136
1137 { GCCallbacksScope scope(this);
1138 if (scope.CheckReenter()) {
1139 AllowHeapAllocation allow_allocation;
1140 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1141 VMState<EXTERNAL> state(isolate_);
1142 HandleScope handle_scope(isolate_);
1143 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1144 }
1145 }
1146
1147 #ifdef VERIFY_HEAP
1148 if (FLAG_verify_heap) {
1149 VerifyStringTable(this);
1150 }
1151 #endif
1152
1153 return freed_global_handles > 0;
1154 }
1155
1156
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1157 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1158 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1159 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1160 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1161 v8::GCPrologueCallback callback =
1162 reinterpret_cast<v8::GCPrologueCallback>(
1163 gc_prologue_callbacks_[i].callback);
1164 callback(gc_type, flags);
1165 } else {
1166 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1168 }
1169 }
1170 }
1171 }
1172
1173
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags gc_callback_flags)1174 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1175 GCCallbackFlags gc_callback_flags) {
1176 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1177 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1178 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1179 v8::GCPrologueCallback callback =
1180 reinterpret_cast<v8::GCPrologueCallback>(
1181 gc_epilogue_callbacks_[i].callback);
1182 callback(gc_type, gc_callback_flags);
1183 } else {
1184 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1185 gc_epilogue_callbacks_[i].callback(
1186 isolate, gc_type, gc_callback_flags);
1187 }
1188 }
1189 }
1190 }
1191
1192
MarkCompact(GCTracer * tracer)1193 void Heap::MarkCompact(GCTracer* tracer) {
1194 gc_state_ = MARK_COMPACT;
1195 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1196
1197 uint64_t size_of_objects_before_gc = SizeOfObjects();
1198
1199 mark_compact_collector_.Prepare(tracer);
1200
1201 ms_count_++;
1202 tracer->set_full_gc_count(ms_count_);
1203
1204 MarkCompactPrologue();
1205
1206 mark_compact_collector_.CollectGarbage();
1207
1208 LOG(isolate_, ResourceEvent("markcompact", "end"));
1209
1210 gc_state_ = NOT_IN_GC;
1211
1212 isolate_->counters()->objs_since_last_full()->Set(0);
1213
1214 flush_monomorphic_ics_ = false;
1215
1216 if (FLAG_allocation_site_pretenuring) {
1217 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1218 }
1219 }
1220
1221
MarkCompactPrologue()1222 void Heap::MarkCompactPrologue() {
1223 // At any old GC clear the keyed lookup cache to enable collection of unused
1224 // maps.
1225 isolate_->keyed_lookup_cache()->Clear();
1226 isolate_->context_slot_cache()->Clear();
1227 isolate_->descriptor_lookup_cache()->Clear();
1228 RegExpResultsCache::Clear(string_split_cache());
1229 RegExpResultsCache::Clear(regexp_multiple_cache());
1230
1231 isolate_->compilation_cache()->MarkCompactPrologue();
1232
1233 CompletelyClearInstanceofCache();
1234
1235 FlushNumberStringCache();
1236 if (FLAG_cleanup_code_caches_at_gc) {
1237 polymorphic_code_cache()->set_cache(undefined_value());
1238 }
1239
1240 ClearNormalizedMapCaches();
1241 }
1242
1243
1244 // Helper class for copying HeapObjects
1245 class ScavengeVisitor: public ObjectVisitor {
1246 public:
ScavengeVisitor(Heap * heap)1247 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1248
VisitPointer(Object ** p)1249 void VisitPointer(Object** p) { ScavengePointer(p); }
1250
VisitPointers(Object ** start,Object ** end)1251 void VisitPointers(Object** start, Object** end) {
1252 // Copy all HeapObject pointers in [start, end)
1253 for (Object** p = start; p < end; p++) ScavengePointer(p);
1254 }
1255
1256 private:
ScavengePointer(Object ** p)1257 void ScavengePointer(Object** p) {
1258 Object* object = *p;
1259 if (!heap_->InNewSpace(object)) return;
1260 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1261 reinterpret_cast<HeapObject*>(object));
1262 }
1263
1264 Heap* heap_;
1265 };
1266
1267
1268 #ifdef VERIFY_HEAP
1269 // Visitor class to verify pointers in code or data space do not point into
1270 // new space.
1271 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1272 public:
VerifyNonPointerSpacePointersVisitor(Heap * heap)1273 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
VisitPointers(Object ** start,Object ** end)1274 void VisitPointers(Object** start, Object**end) {
1275 for (Object** current = start; current < end; current++) {
1276 if ((*current)->IsHeapObject()) {
1277 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1278 }
1279 }
1280 }
1281
1282 private:
1283 Heap* heap_;
1284 };
1285
1286
VerifyNonPointerSpacePointers(Heap * heap)1287 static void VerifyNonPointerSpacePointers(Heap* heap) {
1288 // Verify that there are no pointers to new space in spaces where we
1289 // do not expect them.
1290 VerifyNonPointerSpacePointersVisitor v(heap);
1291 HeapObjectIterator code_it(heap->code_space());
1292 for (HeapObject* object = code_it.Next();
1293 object != NULL; object = code_it.Next())
1294 object->Iterate(&v);
1295
1296 // The old data space was normally swept conservatively so that the iterator
1297 // doesn't work, so we normally skip the next bit.
1298 if (!heap->old_data_space()->was_swept_conservatively()) {
1299 HeapObjectIterator data_it(heap->old_data_space());
1300 for (HeapObject* object = data_it.Next();
1301 object != NULL; object = data_it.Next())
1302 object->Iterate(&v);
1303 }
1304 }
1305 #endif // VERIFY_HEAP
1306
1307
CheckNewSpaceExpansionCriteria()1308 void Heap::CheckNewSpaceExpansionCriteria() {
1309 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1310 survived_since_last_expansion_ > new_space_.Capacity()) {
1311 // Grow the size of new space if there is room to grow, enough data
1312 // has survived scavenge since the last expansion and we are not in
1313 // high promotion mode.
1314 new_space_.Grow();
1315 survived_since_last_expansion_ = 0;
1316 }
1317 }
1318
1319
IsUnscavengedHeapObject(Heap * heap,Object ** p)1320 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1321 return heap->InNewSpace(*p) &&
1322 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1323 }
1324
1325
ScavengeStoreBufferCallback(Heap * heap,MemoryChunk * page,StoreBufferEvent event)1326 void Heap::ScavengeStoreBufferCallback(
1327 Heap* heap,
1328 MemoryChunk* page,
1329 StoreBufferEvent event) {
1330 heap->store_buffer_rebuilder_.Callback(page, event);
1331 }
1332
1333
Callback(MemoryChunk * page,StoreBufferEvent event)1334 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1335 if (event == kStoreBufferStartScanningPagesEvent) {
1336 start_of_current_page_ = NULL;
1337 current_page_ = NULL;
1338 } else if (event == kStoreBufferScanningPageEvent) {
1339 if (current_page_ != NULL) {
1340 // If this page already overflowed the store buffer during this iteration.
1341 if (current_page_->scan_on_scavenge()) {
1342 // Then we should wipe out the entries that have been added for it.
1343 store_buffer_->SetTop(start_of_current_page_);
1344 } else if (store_buffer_->Top() - start_of_current_page_ >=
1345 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1346 // Did we find too many pointers in the previous page? The heuristic is
1347 // that no page can take more then 1/5 the remaining slots in the store
1348 // buffer.
1349 current_page_->set_scan_on_scavenge(true);
1350 store_buffer_->SetTop(start_of_current_page_);
1351 } else {
1352 // In this case the page we scanned took a reasonable number of slots in
1353 // the store buffer. It has now been rehabilitated and is no longer
1354 // marked scan_on_scavenge.
1355 ASSERT(!current_page_->scan_on_scavenge());
1356 }
1357 }
1358 start_of_current_page_ = store_buffer_->Top();
1359 current_page_ = page;
1360 } else if (event == kStoreBufferFullEvent) {
1361 // The current page overflowed the store buffer again. Wipe out its entries
1362 // in the store buffer and mark it scan-on-scavenge again. This may happen
1363 // several times while scanning.
1364 if (current_page_ == NULL) {
1365 // Store Buffer overflowed while scanning promoted objects. These are not
1366 // in any particular page, though they are likely to be clustered by the
1367 // allocation routines.
1368 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1369 } else {
1370 // Store Buffer overflowed while scanning a particular old space page for
1371 // pointers to new space.
1372 ASSERT(current_page_ == page);
1373 ASSERT(page != NULL);
1374 current_page_->set_scan_on_scavenge(true);
1375 ASSERT(start_of_current_page_ != store_buffer_->Top());
1376 store_buffer_->SetTop(start_of_current_page_);
1377 }
1378 } else {
1379 UNREACHABLE();
1380 }
1381 }
1382
1383
Initialize()1384 void PromotionQueue::Initialize() {
1385 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1386 // entries (where each is a pair of intptr_t). This allows us to simplify
1387 // the test fpr when to switch pages.
1388 ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1389 == 0);
1390 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1391 front_ = rear_ =
1392 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1393 emergency_stack_ = NULL;
1394 guard_ = false;
1395 }
1396
1397
RelocateQueueHead()1398 void PromotionQueue::RelocateQueueHead() {
1399 ASSERT(emergency_stack_ == NULL);
1400
1401 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1402 intptr_t* head_start = rear_;
1403 intptr_t* head_end =
1404 Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1405
1406 int entries_count =
1407 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1408
1409 emergency_stack_ = new List<Entry>(2 * entries_count);
1410
1411 while (head_start != head_end) {
1412 int size = static_cast<int>(*(head_start++));
1413 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1414 emergency_stack_->Add(Entry(obj, size));
1415 }
1416 rear_ = head_end;
1417 }
1418
1419
1420 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1421 public:
ScavengeWeakObjectRetainer(Heap * heap)1422 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1423
RetainAs(Object * object)1424 virtual Object* RetainAs(Object* object) {
1425 if (!heap_->InFromSpace(object)) {
1426 return object;
1427 }
1428
1429 MapWord map_word = HeapObject::cast(object)->map_word();
1430 if (map_word.IsForwardingAddress()) {
1431 return map_word.ToForwardingAddress();
1432 }
1433 return NULL;
1434 }
1435
1436 private:
1437 Heap* heap_;
1438 };
1439
1440
Scavenge()1441 void Heap::Scavenge() {
1442 RelocationLock relocation_lock(this);
1443
1444 #ifdef VERIFY_HEAP
1445 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1446 #endif
1447
1448 gc_state_ = SCAVENGE;
1449
1450 // Implements Cheney's copying algorithm
1451 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1452
1453 // Clear descriptor cache.
1454 isolate_->descriptor_lookup_cache()->Clear();
1455
1456 // Used for updating survived_since_last_expansion_ at function end.
1457 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1458
1459 SelectScavengingVisitorsTable();
1460
1461 incremental_marking()->PrepareForScavenge();
1462
1463 // Flip the semispaces. After flipping, to space is empty, from space has
1464 // live objects.
1465 new_space_.Flip();
1466 new_space_.ResetAllocationInfo();
1467
1468 // We need to sweep newly copied objects which can be either in the
1469 // to space or promoted to the old generation. For to-space
1470 // objects, we treat the bottom of the to space as a queue. Newly
1471 // copied and unswept objects lie between a 'front' mark and the
1472 // allocation pointer.
1473 //
1474 // Promoted objects can go into various old-generation spaces, and
1475 // can be allocated internally in the spaces (from the free list).
1476 // We treat the top of the to space as a queue of addresses of
1477 // promoted objects. The addresses of newly promoted and unswept
1478 // objects lie between a 'front' mark and a 'rear' mark that is
1479 // updated as a side effect of promoting an object.
1480 //
1481 // There is guaranteed to be enough room at the top of the to space
1482 // for the addresses of promoted objects: every object promoted
1483 // frees up its size in bytes from the top of the new space, and
1484 // objects are at least one pointer in size.
1485 Address new_space_front = new_space_.ToSpaceStart();
1486 promotion_queue_.Initialize();
1487
1488 #ifdef DEBUG
1489 store_buffer()->Clean();
1490 #endif
1491
1492 ScavengeVisitor scavenge_visitor(this);
1493 // Copy roots.
1494 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1495
1496 // Copy objects reachable from the old generation.
1497 {
1498 StoreBufferRebuildScope scope(this,
1499 store_buffer(),
1500 &ScavengeStoreBufferCallback);
1501 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1502 }
1503
1504 // Copy objects reachable from simple cells by scavenging cell values
1505 // directly.
1506 HeapObjectIterator cell_iterator(cell_space_);
1507 for (HeapObject* heap_object = cell_iterator.Next();
1508 heap_object != NULL;
1509 heap_object = cell_iterator.Next()) {
1510 if (heap_object->IsCell()) {
1511 Cell* cell = Cell::cast(heap_object);
1512 Address value_address = cell->ValueAddress();
1513 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1514 }
1515 }
1516
1517 // Copy objects reachable from global property cells by scavenging global
1518 // property cell values directly.
1519 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1520 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1521 heap_object != NULL;
1522 heap_object = js_global_property_cell_iterator.Next()) {
1523 if (heap_object->IsPropertyCell()) {
1524 PropertyCell* cell = PropertyCell::cast(heap_object);
1525 Address value_address = cell->ValueAddress();
1526 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1527 Address type_address = cell->TypeAddress();
1528 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1529 }
1530 }
1531
1532 // Copy objects reachable from the encountered weak collections list.
1533 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1534
1535 // Copy objects reachable from the code flushing candidates list.
1536 MarkCompactCollector* collector = mark_compact_collector();
1537 if (collector->is_code_flushing_enabled()) {
1538 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1539 }
1540
1541 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1542
1543 while (isolate()->global_handles()->IterateObjectGroups(
1544 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1545 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1546 }
1547 isolate()->global_handles()->RemoveObjectGroups();
1548 isolate()->global_handles()->RemoveImplicitRefGroups();
1549
1550 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1551 &IsUnscavengedHeapObject);
1552 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1553 &scavenge_visitor);
1554 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1555
1556 UpdateNewSpaceReferencesInExternalStringTable(
1557 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1558
1559 promotion_queue_.Destroy();
1560
1561 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1562
1563 ScavengeWeakObjectRetainer weak_object_retainer(this);
1564 ProcessWeakReferences(&weak_object_retainer);
1565
1566 ASSERT(new_space_front == new_space_.top());
1567
1568 // Set age mark.
1569 new_space_.set_age_mark(new_space_.top());
1570
1571 new_space_.LowerInlineAllocationLimit(
1572 new_space_.inline_allocation_limit_step());
1573
1574 // Update how much has survived scavenge.
1575 IncrementYoungSurvivorsCounter(static_cast<int>(
1576 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1577
1578 LOG(isolate_, ResourceEvent("scavenge", "end"));
1579
1580 gc_state_ = NOT_IN_GC;
1581
1582 scavenges_since_last_idle_round_++;
1583 }
1584
1585
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1586 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1587 Object** p) {
1588 MapWord first_word = HeapObject::cast(*p)->map_word();
1589
1590 if (!first_word.IsForwardingAddress()) {
1591 // Unreachable external string can be finalized.
1592 heap->FinalizeExternalString(String::cast(*p));
1593 return NULL;
1594 }
1595
1596 // String is still reachable.
1597 return String::cast(first_word.ToForwardingAddress());
1598 }
1599
1600
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1601 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1602 ExternalStringTableUpdaterCallback updater_func) {
1603 #ifdef VERIFY_HEAP
1604 if (FLAG_verify_heap) {
1605 external_string_table_.Verify();
1606 }
1607 #endif
1608
1609 if (external_string_table_.new_space_strings_.is_empty()) return;
1610
1611 Object** start = &external_string_table_.new_space_strings_[0];
1612 Object** end = start + external_string_table_.new_space_strings_.length();
1613 Object** last = start;
1614
1615 for (Object** p = start; p < end; ++p) {
1616 ASSERT(InFromSpace(*p));
1617 String* target = updater_func(this, p);
1618
1619 if (target == NULL) continue;
1620
1621 ASSERT(target->IsExternalString());
1622
1623 if (InNewSpace(target)) {
1624 // String is still in new space. Update the table entry.
1625 *last = target;
1626 ++last;
1627 } else {
1628 // String got promoted. Move it to the old string list.
1629 external_string_table_.AddOldString(target);
1630 }
1631 }
1632
1633 ASSERT(last <= end);
1634 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1635 }
1636
1637
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1638 void Heap::UpdateReferencesInExternalStringTable(
1639 ExternalStringTableUpdaterCallback updater_func) {
1640
1641 // Update old space string references.
1642 if (external_string_table_.old_space_strings_.length() > 0) {
1643 Object** start = &external_string_table_.old_space_strings_[0];
1644 Object** end = start + external_string_table_.old_space_strings_.length();
1645 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1646 }
1647
1648 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1649 }
1650
1651
ProcessWeakReferences(WeakObjectRetainer * retainer)1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1653 ProcessArrayBuffers(retainer);
1654 ProcessNativeContexts(retainer);
1655 // TODO(mvstanton): AllocationSites only need to be processed during
1656 // MARK_COMPACT, as they live in old space. Verify and address.
1657 ProcessAllocationSites(retainer);
1658 }
1659
1660
ProcessNativeContexts(WeakObjectRetainer * retainer)1661 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1662 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1663 // Update the head of the list of contexts.
1664 set_native_contexts_list(head);
1665 }
1666
1667
ProcessArrayBuffers(WeakObjectRetainer * retainer)1668 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1669 Object* array_buffer_obj =
1670 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1671 set_array_buffers_list(array_buffer_obj);
1672 }
1673
1674
TearDownArrayBuffers()1675 void Heap::TearDownArrayBuffers() {
1676 Object* undefined = undefined_value();
1677 for (Object* o = array_buffers_list(); o != undefined;) {
1678 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1679 Runtime::FreeArrayBuffer(isolate(), buffer);
1680 o = buffer->weak_next();
1681 }
1682 set_array_buffers_list(undefined);
1683 }
1684
1685
ProcessAllocationSites(WeakObjectRetainer * retainer)1686 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1687 Object* allocation_site_obj =
1688 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1689 set_allocation_sites_list(allocation_site_obj);
1690 }
1691
1692
ResetAllAllocationSitesDependentCode(PretenureFlag flag)1693 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1694 DisallowHeapAllocation no_allocation_scope;
1695 Object* cur = allocation_sites_list();
1696 bool marked = false;
1697 while (cur->IsAllocationSite()) {
1698 AllocationSite* casted = AllocationSite::cast(cur);
1699 if (casted->GetPretenureMode() == flag) {
1700 casted->ResetPretenureDecision();
1701 casted->set_deopt_dependent_code(true);
1702 marked = true;
1703 }
1704 cur = casted->weak_next();
1705 }
1706 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1707 }
1708
1709
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)1710 void Heap::EvaluateOldSpaceLocalPretenuring(
1711 uint64_t size_of_objects_before_gc) {
1712 uint64_t size_of_objects_after_gc = SizeOfObjects();
1713 double old_generation_survival_rate =
1714 (static_cast<double>(size_of_objects_after_gc) * 100) /
1715 static_cast<double>(size_of_objects_before_gc);
1716
1717 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1718 // Too many objects died in the old generation, pretenuring of wrong
1719 // allocation sites may be the cause for that. We have to deopt all
1720 // dependent code registered in the allocation sites to re-evaluate
1721 // our pretenuring decisions.
1722 ResetAllAllocationSitesDependentCode(TENURED);
1723 if (FLAG_trace_pretenuring) {
1724 PrintF("Deopt all allocation sites dependent code due to low survival "
1725 "rate in the old generation %f\n", old_generation_survival_rate);
1726 }
1727 }
1728 }
1729
1730
VisitExternalResources(v8::ExternalResourceVisitor * visitor)1731 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1732 DisallowHeapAllocation no_allocation;
1733 // All external strings are listed in the external string table.
1734
1735 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1736 public:
1737 explicit ExternalStringTableVisitorAdapter(
1738 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1739 virtual void VisitPointers(Object** start, Object** end) {
1740 for (Object** p = start; p < end; p++) {
1741 ASSERT((*p)->IsExternalString());
1742 visitor_->VisitExternalString(Utils::ToLocal(
1743 Handle<String>(String::cast(*p))));
1744 }
1745 }
1746 private:
1747 v8::ExternalResourceVisitor* visitor_;
1748 } external_string_table_visitor(visitor);
1749
1750 external_string_table_.Iterate(&external_string_table_visitor);
1751 }
1752
1753
1754 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1755 public:
VisitPointer(Heap * heap,Object ** p)1756 static inline void VisitPointer(Heap* heap, Object** p) {
1757 Object* object = *p;
1758 if (!heap->InNewSpace(object)) return;
1759 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1760 reinterpret_cast<HeapObject*>(object));
1761 }
1762 };
1763
1764
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)1765 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1766 Address new_space_front) {
1767 do {
1768 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1769 // The addresses new_space_front and new_space_.top() define a
1770 // queue of unprocessed copied objects. Process them until the
1771 // queue is empty.
1772 while (new_space_front != new_space_.top()) {
1773 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1774 HeapObject* object = HeapObject::FromAddress(new_space_front);
1775 new_space_front +=
1776 NewSpaceScavenger::IterateBody(object->map(), object);
1777 } else {
1778 new_space_front =
1779 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1780 }
1781 }
1782
1783 // Promote and process all the to-be-promoted objects.
1784 {
1785 StoreBufferRebuildScope scope(this,
1786 store_buffer(),
1787 &ScavengeStoreBufferCallback);
1788 while (!promotion_queue()->is_empty()) {
1789 HeapObject* target;
1790 int size;
1791 promotion_queue()->remove(&target, &size);
1792
1793 // Promoted object might be already partially visited
1794 // during old space pointer iteration. Thus we search specificly
1795 // for pointers to from semispace instead of looking for pointers
1796 // to new space.
1797 ASSERT(!target->IsMap());
1798 IterateAndMarkPointersToFromSpace(target->address(),
1799 target->address() + size,
1800 &ScavengeObject);
1801 }
1802 }
1803
1804 // Take another spin if there are now unswept objects in new space
1805 // (there are currently no more unswept promoted objects).
1806 } while (new_space_front != new_space_.top());
1807
1808 return new_space_front;
1809 }
1810
1811
1812 STATIC_ASSERT((FixedDoubleArray::kHeaderSize &
1813 kDoubleAlignmentMask) == 0); // NOLINT
1814 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset &
1815 kDoubleAlignmentMask) == 0); // NOLINT
1816 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1817 kDoubleAlignmentMask) == 0); // NOLINT
1818
1819
1820 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1821 HeapObject* object,
1822 int size));
1823
EnsureDoubleAligned(Heap * heap,HeapObject * object,int size)1824 static HeapObject* EnsureDoubleAligned(Heap* heap,
1825 HeapObject* object,
1826 int size) {
1827 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1828 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1829 return HeapObject::FromAddress(object->address() + kPointerSize);
1830 } else {
1831 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1832 kPointerSize);
1833 return object;
1834 }
1835 }
1836
1837
1838 enum LoggingAndProfiling {
1839 LOGGING_AND_PROFILING_ENABLED,
1840 LOGGING_AND_PROFILING_DISABLED
1841 };
1842
1843
1844 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1845
1846
1847 template<MarksHandling marks_handling,
1848 LoggingAndProfiling logging_and_profiling_mode>
1849 class ScavengingVisitor : public StaticVisitorBase {
1850 public:
Initialize()1851 static void Initialize() {
1852 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1853 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1854 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1855 table_.Register(kVisitByteArray, &EvacuateByteArray);
1856 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1857 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1858 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1859 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1860
1861 table_.Register(kVisitNativeContext,
1862 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1863 template VisitSpecialized<Context::kSize>);
1864
1865 table_.Register(kVisitConsString,
1866 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1867 template VisitSpecialized<ConsString::kSize>);
1868
1869 table_.Register(kVisitSlicedString,
1870 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1871 template VisitSpecialized<SlicedString::kSize>);
1872
1873 table_.Register(kVisitSymbol,
1874 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1875 template VisitSpecialized<Symbol::kSize>);
1876
1877 table_.Register(kVisitSharedFunctionInfo,
1878 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1879 template VisitSpecialized<SharedFunctionInfo::kSize>);
1880
1881 table_.Register(kVisitJSWeakCollection,
1882 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1883 Visit);
1884
1885 table_.Register(kVisitJSArrayBuffer,
1886 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1887 Visit);
1888
1889 table_.Register(kVisitJSTypedArray,
1890 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1891 Visit);
1892
1893 table_.Register(kVisitJSDataView,
1894 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1895 Visit);
1896
1897 table_.Register(kVisitJSRegExp,
1898 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1899 Visit);
1900
1901 if (marks_handling == IGNORE_MARKS) {
1902 table_.Register(kVisitJSFunction,
1903 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1904 template VisitSpecialized<JSFunction::kSize>);
1905 } else {
1906 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1907 }
1908
1909 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1910 kVisitDataObject,
1911 kVisitDataObjectGeneric>();
1912
1913 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1914 kVisitJSObject,
1915 kVisitJSObjectGeneric>();
1916
1917 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1918 kVisitStruct,
1919 kVisitStructGeneric>();
1920 }
1921
GetTable()1922 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1923 return &table_;
1924 }
1925
1926 private:
1927 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1928
RecordCopiedObject(Heap * heap,HeapObject * obj)1929 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1930 bool should_record = false;
1931 #ifdef DEBUG
1932 should_record = FLAG_heap_stats;
1933 #endif
1934 should_record = should_record || FLAG_log_gc;
1935 if (should_record) {
1936 if (heap->new_space()->Contains(obj)) {
1937 heap->new_space()->RecordAllocation(obj);
1938 } else {
1939 heap->new_space()->RecordPromotion(obj);
1940 }
1941 }
1942 }
1943
1944 // Helper function used by CopyObject to copy a source object to an
1945 // allocated target object and update the forwarding pointer in the source
1946 // object. Returns the target object.
INLINE(static void MigrateObject (Heap * heap,HeapObject * source,HeapObject * target,int size))1947 INLINE(static void MigrateObject(Heap* heap,
1948 HeapObject* source,
1949 HeapObject* target,
1950 int size)) {
1951 // Copy the content of source to target.
1952 heap->CopyBlock(target->address(), source->address(), size);
1953
1954 // Set the forwarding address.
1955 source->set_map_word(MapWord::FromForwardingAddress(target));
1956
1957 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1958 // Update NewSpace stats if necessary.
1959 RecordCopiedObject(heap, target);
1960 Isolate* isolate = heap->isolate();
1961 HeapProfiler* heap_profiler = isolate->heap_profiler();
1962 if (heap_profiler->is_tracking_object_moves()) {
1963 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
1964 size);
1965 }
1966 if (isolate->logger()->is_logging_code_events() ||
1967 isolate->cpu_profiler()->is_profiling()) {
1968 if (target->IsSharedFunctionInfo()) {
1969 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1970 source->address(), target->address()));
1971 }
1972 }
1973 }
1974
1975 if (marks_handling == TRANSFER_MARKS) {
1976 if (Marking::TransferColor(source, target)) {
1977 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1978 }
1979 }
1980 }
1981
1982
1983 template<ObjectContents object_contents, int alignment>
EvacuateObject(Map * map,HeapObject ** slot,HeapObject * object,int object_size)1984 static inline void EvacuateObject(Map* map,
1985 HeapObject** slot,
1986 HeapObject* object,
1987 int object_size) {
1988 SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1989 SLOW_ASSERT(object->Size() == object_size);
1990
1991 int allocation_size = object_size;
1992 if (alignment != kObjectAlignment) {
1993 ASSERT(alignment == kDoubleAlignment);
1994 allocation_size += kPointerSize;
1995 }
1996
1997 Heap* heap = map->GetHeap();
1998 if (heap->ShouldBePromoted(object->address(), object_size)) {
1999 AllocationResult allocation;
2000
2001 if (object_contents == DATA_OBJECT) {
2002 ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2003 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2004 } else {
2005 ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2006 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2007 }
2008
2009 HeapObject* target = NULL; // Initialization to please compiler.
2010 if (allocation.To(&target)) {
2011 if (alignment != kObjectAlignment) {
2012 target = EnsureDoubleAligned(heap, target, allocation_size);
2013 }
2014
2015 // Order is important: slot might be inside of the target if target
2016 // was allocated over a dead object and slot comes from the store
2017 // buffer.
2018 *slot = target;
2019 MigrateObject(heap, object, target, object_size);
2020
2021 if (object_contents == POINTER_OBJECT) {
2022 if (map->instance_type() == JS_FUNCTION_TYPE) {
2023 heap->promotion_queue()->insert(
2024 target, JSFunction::kNonWeakFieldsEndOffset);
2025 } else {
2026 heap->promotion_queue()->insert(target, object_size);
2027 }
2028 }
2029
2030 heap->IncrementPromotedObjectsSize(object_size);
2031 return;
2032 }
2033 }
2034 ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2035 AllocationResult allocation =
2036 heap->new_space()->AllocateRaw(allocation_size);
2037 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2038 HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
2039
2040 if (alignment != kObjectAlignment) {
2041 target = EnsureDoubleAligned(heap, target, allocation_size);
2042 }
2043
2044 // Order is important: slot might be inside of the target if target
2045 // was allocated over a dead object and slot comes from the store
2046 // buffer.
2047 *slot = target;
2048 MigrateObject(heap, object, target, object_size);
2049 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2050 return;
2051 }
2052
2053
EvacuateJSFunction(Map * map,HeapObject ** slot,HeapObject * object)2054 static inline void EvacuateJSFunction(Map* map,
2055 HeapObject** slot,
2056 HeapObject* object) {
2057 ObjectEvacuationStrategy<POINTER_OBJECT>::
2058 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2059
2060 HeapObject* target = *slot;
2061 MarkBit mark_bit = Marking::MarkBitFrom(target);
2062 if (Marking::IsBlack(mark_bit)) {
2063 // This object is black and it might not be rescanned by marker.
2064 // We should explicitly record code entry slot for compaction because
2065 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2066 // miss it as it is not HeapObject-tagged.
2067 Address code_entry_slot =
2068 target->address() + JSFunction::kCodeEntryOffset;
2069 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2070 map->GetHeap()->mark_compact_collector()->
2071 RecordCodeEntrySlot(code_entry_slot, code);
2072 }
2073 }
2074
2075
EvacuateFixedArray(Map * map,HeapObject ** slot,HeapObject * object)2076 static inline void EvacuateFixedArray(Map* map,
2077 HeapObject** slot,
2078 HeapObject* object) {
2079 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2080 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2081 map, slot, object, object_size);
2082 }
2083
2084
EvacuateFixedDoubleArray(Map * map,HeapObject ** slot,HeapObject * object)2085 static inline void EvacuateFixedDoubleArray(Map* map,
2086 HeapObject** slot,
2087 HeapObject* object) {
2088 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2089 int object_size = FixedDoubleArray::SizeFor(length);
2090 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2091 map, slot, object, object_size);
2092 }
2093
2094
EvacuateFixedTypedArray(Map * map,HeapObject ** slot,HeapObject * object)2095 static inline void EvacuateFixedTypedArray(Map* map,
2096 HeapObject** slot,
2097 HeapObject* object) {
2098 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2099 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2100 map, slot, object, object_size);
2101 }
2102
2103
EvacuateFixedFloat64Array(Map * map,HeapObject ** slot,HeapObject * object)2104 static inline void EvacuateFixedFloat64Array(Map* map,
2105 HeapObject** slot,
2106 HeapObject* object) {
2107 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2108 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2109 map, slot, object, object_size);
2110 }
2111
2112
EvacuateByteArray(Map * map,HeapObject ** slot,HeapObject * object)2113 static inline void EvacuateByteArray(Map* map,
2114 HeapObject** slot,
2115 HeapObject* object) {
2116 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2117 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2118 map, slot, object, object_size);
2119 }
2120
2121
EvacuateSeqOneByteString(Map * map,HeapObject ** slot,HeapObject * object)2122 static inline void EvacuateSeqOneByteString(Map* map,
2123 HeapObject** slot,
2124 HeapObject* object) {
2125 int object_size = SeqOneByteString::cast(object)->
2126 SeqOneByteStringSize(map->instance_type());
2127 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2128 map, slot, object, object_size);
2129 }
2130
2131
EvacuateSeqTwoByteString(Map * map,HeapObject ** slot,HeapObject * object)2132 static inline void EvacuateSeqTwoByteString(Map* map,
2133 HeapObject** slot,
2134 HeapObject* object) {
2135 int object_size = SeqTwoByteString::cast(object)->
2136 SeqTwoByteStringSize(map->instance_type());
2137 EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2138 map, slot, object, object_size);
2139 }
2140
2141
IsShortcutCandidate(int type)2142 static inline bool IsShortcutCandidate(int type) {
2143 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2144 }
2145
EvacuateShortcutCandidate(Map * map,HeapObject ** slot,HeapObject * object)2146 static inline void EvacuateShortcutCandidate(Map* map,
2147 HeapObject** slot,
2148 HeapObject* object) {
2149 ASSERT(IsShortcutCandidate(map->instance_type()));
2150
2151 Heap* heap = map->GetHeap();
2152
2153 if (marks_handling == IGNORE_MARKS &&
2154 ConsString::cast(object)->unchecked_second() ==
2155 heap->empty_string()) {
2156 HeapObject* first =
2157 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2158
2159 *slot = first;
2160
2161 if (!heap->InNewSpace(first)) {
2162 object->set_map_word(MapWord::FromForwardingAddress(first));
2163 return;
2164 }
2165
2166 MapWord first_word = first->map_word();
2167 if (first_word.IsForwardingAddress()) {
2168 HeapObject* target = first_word.ToForwardingAddress();
2169
2170 *slot = target;
2171 object->set_map_word(MapWord::FromForwardingAddress(target));
2172 return;
2173 }
2174
2175 heap->DoScavengeObject(first->map(), slot, first);
2176 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2177 return;
2178 }
2179
2180 int object_size = ConsString::kSize;
2181 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2182 map, slot, object, object_size);
2183 }
2184
2185 template<ObjectContents object_contents>
2186 class ObjectEvacuationStrategy {
2187 public:
2188 template<int object_size>
VisitSpecialized(Map * map,HeapObject ** slot,HeapObject * object)2189 static inline void VisitSpecialized(Map* map,
2190 HeapObject** slot,
2191 HeapObject* object) {
2192 EvacuateObject<object_contents, kObjectAlignment>(
2193 map, slot, object, object_size);
2194 }
2195
Visit(Map * map,HeapObject ** slot,HeapObject * object)2196 static inline void Visit(Map* map,
2197 HeapObject** slot,
2198 HeapObject* object) {
2199 int object_size = map->instance_size();
2200 EvacuateObject<object_contents, kObjectAlignment>(
2201 map, slot, object, object_size);
2202 }
2203 };
2204
2205 static VisitorDispatchTable<ScavengingCallback> table_;
2206 };
2207
2208
2209 template<MarksHandling marks_handling,
2210 LoggingAndProfiling logging_and_profiling_mode>
2211 VisitorDispatchTable<ScavengingCallback>
2212 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2213
2214
InitializeScavengingVisitorsTables()2215 static void InitializeScavengingVisitorsTables() {
2216 ScavengingVisitor<TRANSFER_MARKS,
2217 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2218 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2219 ScavengingVisitor<TRANSFER_MARKS,
2220 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2221 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2222 }
2223
2224
SelectScavengingVisitorsTable()2225 void Heap::SelectScavengingVisitorsTable() {
2226 bool logging_and_profiling =
2227 isolate()->logger()->is_logging() ||
2228 isolate()->cpu_profiler()->is_profiling() ||
2229 (isolate()->heap_profiler() != NULL &&
2230 isolate()->heap_profiler()->is_tracking_object_moves());
2231
2232 if (!incremental_marking()->IsMarking()) {
2233 if (!logging_and_profiling) {
2234 scavenging_visitors_table_.CopyFrom(
2235 ScavengingVisitor<IGNORE_MARKS,
2236 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2237 } else {
2238 scavenging_visitors_table_.CopyFrom(
2239 ScavengingVisitor<IGNORE_MARKS,
2240 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2241 }
2242 } else {
2243 if (!logging_and_profiling) {
2244 scavenging_visitors_table_.CopyFrom(
2245 ScavengingVisitor<TRANSFER_MARKS,
2246 LOGGING_AND_PROFILING_DISABLED>::GetTable());
2247 } else {
2248 scavenging_visitors_table_.CopyFrom(
2249 ScavengingVisitor<TRANSFER_MARKS,
2250 LOGGING_AND_PROFILING_ENABLED>::GetTable());
2251 }
2252
2253 if (incremental_marking()->IsCompacting()) {
2254 // When compacting forbid short-circuiting of cons-strings.
2255 // Scavenging code relies on the fact that new space object
2256 // can't be evacuated into evacuation candidate but
2257 // short-circuiting violates this assumption.
2258 scavenging_visitors_table_.Register(
2259 StaticVisitorBase::kVisitShortcutCandidate,
2260 scavenging_visitors_table_.GetVisitorById(
2261 StaticVisitorBase::kVisitConsString));
2262 }
2263 }
2264 }
2265
2266
ScavengeObjectSlow(HeapObject ** p,HeapObject * object)2267 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2268 SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2269 MapWord first_word = object->map_word();
2270 SLOW_ASSERT(!first_word.IsForwardingAddress());
2271 Map* map = first_word.ToMap();
2272 map->GetHeap()->DoScavengeObject(map, p, object);
2273 }
2274
2275
AllocatePartialMap(InstanceType instance_type,int instance_size)2276 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2277 int instance_size) {
2278 Object* result;
2279 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2280 if (!allocation.To(&result)) return allocation;
2281
2282 // Map::cast cannot be used due to uninitialized map field.
2283 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2284 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2285 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2286 reinterpret_cast<Map*>(result)->set_visitor_id(
2287 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2288 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2289 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2290 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2291 reinterpret_cast<Map*>(result)->set_bit_field(0);
2292 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2293 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2294 Map::OwnsDescriptors::encode(true);
2295 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2296 return result;
2297 }
2298
2299
AllocateMap(InstanceType instance_type,int instance_size,ElementsKind elements_kind)2300 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2301 int instance_size,
2302 ElementsKind elements_kind) {
2303 HeapObject* result;
2304 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2305 if (!allocation.To(&result)) return allocation;
2306
2307 result->set_map_no_write_barrier(meta_map());
2308 Map* map = Map::cast(result);
2309 map->set_instance_type(instance_type);
2310 map->set_visitor_id(
2311 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2312 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2313 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2314 map->set_instance_size(instance_size);
2315 map->set_inobject_properties(0);
2316 map->set_pre_allocated_property_fields(0);
2317 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2318 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2319 SKIP_WRITE_BARRIER);
2320 map->init_back_pointer(undefined_value());
2321 map->set_unused_property_fields(0);
2322 map->set_instance_descriptors(empty_descriptor_array());
2323 map->set_bit_field(0);
2324 map->set_bit_field2(1 << Map::kIsExtensible);
2325 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2326 Map::OwnsDescriptors::encode(true);
2327 map->set_bit_field3(bit_field3);
2328 map->set_elements_kind(elements_kind);
2329
2330 return map;
2331 }
2332
2333
AllocateFillerObject(int size,bool double_align,AllocationSpace space)2334 AllocationResult Heap::AllocateFillerObject(int size,
2335 bool double_align,
2336 AllocationSpace space) {
2337 HeapObject* obj;
2338 { AllocationResult allocation = AllocateRaw(size, space, space);
2339 if (!allocation.To(&obj)) return allocation;
2340 }
2341 #ifdef DEBUG
2342 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2343 ASSERT(chunk->owner()->identity() == space);
2344 #endif
2345 CreateFillerObjectAt(obj->address(), size);
2346 return obj;
2347 }
2348
2349
2350 const Heap::StringTypeTable Heap::string_type_table[] = {
2351 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2352 {type, size, k##camel_name##MapRootIndex},
2353 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2354 #undef STRING_TYPE_ELEMENT
2355 };
2356
2357
2358 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2359 #define CONSTANT_STRING_ELEMENT(name, contents) \
2360 {contents, k##name##RootIndex},
2361 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2362 #undef CONSTANT_STRING_ELEMENT
2363 };
2364
2365
2366 const Heap::StructTable Heap::struct_table[] = {
2367 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2368 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2369 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2370 #undef STRUCT_TABLE_ELEMENT
2371 };
2372
2373
CreateInitialMaps()2374 bool Heap::CreateInitialMaps() {
2375 HeapObject* obj;
2376 { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2377 if (!allocation.To(&obj)) return false;
2378 }
2379 // Map::cast cannot be used due to uninitialized map field.
2380 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2381 set_meta_map(new_meta_map);
2382 new_meta_map->set_map(new_meta_map);
2383
2384 { // Partial map allocation
2385 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2386 { Map* map; \
2387 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2388 set_##field_name##_map(map); \
2389 }
2390
2391 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2392 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2393 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2394 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2395 constant_pool_array);
2396
2397 #undef ALLOCATE_PARTIAL_MAP
2398 }
2399
2400 // Allocate the empty array.
2401 { AllocationResult allocation = AllocateEmptyFixedArray();
2402 if (!allocation.To(&obj)) return false;
2403 }
2404 set_empty_fixed_array(FixedArray::cast(obj));
2405
2406 { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2407 if (!allocation.To(&obj)) return false;
2408 }
2409 set_null_value(Oddball::cast(obj));
2410 Oddball::cast(obj)->set_kind(Oddball::kNull);
2411
2412 { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2413 if (!allocation.To(&obj)) return false;
2414 }
2415 set_undefined_value(Oddball::cast(obj));
2416 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2417 ASSERT(!InNewSpace(undefined_value()));
2418
2419 // Set preliminary exception sentinel value before actually initializing it.
2420 set_exception(null_value());
2421
2422 // Allocate the empty descriptor array.
2423 { AllocationResult allocation = AllocateEmptyFixedArray();
2424 if (!allocation.To(&obj)) return false;
2425 }
2426 set_empty_descriptor_array(DescriptorArray::cast(obj));
2427
2428 // Allocate the constant pool array.
2429 { AllocationResult allocation = AllocateEmptyConstantPoolArray();
2430 if (!allocation.To(&obj)) return false;
2431 }
2432 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2433
2434 // Fix the instance_descriptors for the existing maps.
2435 meta_map()->set_code_cache(empty_fixed_array());
2436 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2437 meta_map()->init_back_pointer(undefined_value());
2438 meta_map()->set_instance_descriptors(empty_descriptor_array());
2439
2440 fixed_array_map()->set_code_cache(empty_fixed_array());
2441 fixed_array_map()->set_dependent_code(
2442 DependentCode::cast(empty_fixed_array()));
2443 fixed_array_map()->init_back_pointer(undefined_value());
2444 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2445
2446 undefined_map()->set_code_cache(empty_fixed_array());
2447 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2448 undefined_map()->init_back_pointer(undefined_value());
2449 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2450
2451 null_map()->set_code_cache(empty_fixed_array());
2452 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453 null_map()->init_back_pointer(undefined_value());
2454 null_map()->set_instance_descriptors(empty_descriptor_array());
2455
2456 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2457 constant_pool_array_map()->set_dependent_code(
2458 DependentCode::cast(empty_fixed_array()));
2459 constant_pool_array_map()->init_back_pointer(undefined_value());
2460 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2461
2462 // Fix prototype object for existing maps.
2463 meta_map()->set_prototype(null_value());
2464 meta_map()->set_constructor(null_value());
2465
2466 fixed_array_map()->set_prototype(null_value());
2467 fixed_array_map()->set_constructor(null_value());
2468
2469 undefined_map()->set_prototype(null_value());
2470 undefined_map()->set_constructor(null_value());
2471
2472 null_map()->set_prototype(null_value());
2473 null_map()->set_constructor(null_value());
2474
2475 constant_pool_array_map()->set_prototype(null_value());
2476 constant_pool_array_map()->set_constructor(null_value());
2477
2478 { // Map allocation
2479 #define ALLOCATE_MAP(instance_type, size, field_name) \
2480 { Map* map; \
2481 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2482 set_##field_name##_map(map); \
2483 }
2484
2485 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2486 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2487
2488 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2489 ASSERT(fixed_array_map() != fixed_cow_array_map());
2490
2491 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2492 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2493 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2494 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2495
2496 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2497 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2498 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2499 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2500 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2501 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2502 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2503
2504 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2505 const StringTypeTable& entry = string_type_table[i];
2506 { AllocationResult allocation = AllocateMap(entry.type, entry.size);
2507 if (!allocation.To(&obj)) return false;
2508 }
2509 // Mark cons string maps as unstable, because their objects can change
2510 // maps during GC.
2511 Map* map = Map::cast(obj);
2512 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2513 roots_[entry.index] = map;
2514 }
2515
2516 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2517 undetectable_string_map()->set_is_undetectable();
2518
2519 ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2520 undetectable_ascii_string_map()->set_is_undetectable();
2521
2522 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2523 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2524 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2525
2526 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2527 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2528 external_##type##_array)
2529
2530 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2531 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2532
2533 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2534 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \
2535 fixed_##type##_array)
2536
2537 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2538 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2539
2540 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2541
2542 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2543
2544 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2545 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2546 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2547 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2548
2549
2550 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2551 const StructTable& entry = struct_table[i];
2552 Map* map;
2553 if (!AllocateMap(entry.type, entry.size).To(&map))
2554 return false;
2555 roots_[entry.index] = map;
2556 }
2557
2558 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2559 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2560
2561 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2562 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2563 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2564 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2567
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2569 native_context_map()->set_dictionary_map(true);
2570 native_context_map()->set_visitor_id(
2571 StaticVisitorBase::kVisitNativeContext);
2572
2573 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2574 shared_function_info)
2575
2576 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2577 message_object)
2578 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2579 external)
2580 external_map()->set_is_extensible(false);
2581 #undef ALLOCATE_VARSIZE_MAP
2582 #undef ALLOCATE_MAP
2583 }
2584
2585 { // Empty arrays
2586 { ByteArray* byte_array;
2587 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2588 set_empty_byte_array(byte_array);
2589 }
2590
2591 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2592 { ExternalArray* obj; \
2593 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2594 return false; \
2595 set_empty_external_##type##_array(obj); \
2596 }
2597
2598 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2599 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2600
2601 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2602 { FixedTypedArrayBase* obj; \
2603 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2604 return false; \
2605 set_empty_fixed_##type##_array(obj); \
2606 }
2607
2608 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2609 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2610 }
2611 ASSERT(!InNewSpace(empty_fixed_array()));
2612 return true;
2613 }
2614
2615
AllocateHeapNumber(double value,PretenureFlag pretenure)2616 AllocationResult Heap::AllocateHeapNumber(double value,
2617 PretenureFlag pretenure) {
2618 // Statically ensure that it is safe to allocate heap numbers in paged
2619 // spaces.
2620 int size = HeapNumber::kSize;
2621 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2622
2623 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2624
2625 HeapObject* result;
2626 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2627 if (!allocation.To(&result)) return allocation;
2628 }
2629
2630 result->set_map_no_write_barrier(heap_number_map());
2631 HeapNumber::cast(result)->set_value(value);
2632 return result;
2633 }
2634
2635
AllocateCell(Object * value)2636 AllocationResult Heap::AllocateCell(Object* value) {
2637 int size = Cell::kSize;
2638 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2639
2640 HeapObject* result;
2641 { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2642 if (!allocation.To(&result)) return allocation;
2643 }
2644 result->set_map_no_write_barrier(cell_map());
2645 Cell::cast(result)->set_value(value);
2646 return result;
2647 }
2648
2649
AllocatePropertyCell()2650 AllocationResult Heap::AllocatePropertyCell() {
2651 int size = PropertyCell::kSize;
2652 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2653
2654 HeapObject* result;
2655 AllocationResult allocation =
2656 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2657 if (!allocation.To(&result)) return allocation;
2658
2659 result->set_map_no_write_barrier(global_property_cell_map());
2660 PropertyCell* cell = PropertyCell::cast(result);
2661 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2662 SKIP_WRITE_BARRIER);
2663 cell->set_value(the_hole_value());
2664 cell->set_type(HeapType::None());
2665 return result;
2666 }
2667
2668
CreateApiObjects()2669 void Heap::CreateApiObjects() {
2670 HandleScope scope(isolate());
2671 Factory* factory = isolate()->factory();
2672 Handle<Map> new_neander_map =
2673 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2674
2675 // Don't use Smi-only elements optimizations for objects with the neander
2676 // map. There are too many cases where element values are set directly with a
2677 // bottleneck to trap the Smi-only -> fast elements transition, and there
2678 // appears to be no benefit for optimize this case.
2679 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2680 set_neander_map(*new_neander_map);
2681
2682 Handle<JSObject> listeners = factory->NewNeanderObject();
2683 Handle<FixedArray> elements = factory->NewFixedArray(2);
2684 elements->set(0, Smi::FromInt(0));
2685 listeners->set_elements(*elements);
2686 set_message_listeners(*listeners);
2687 }
2688
2689
CreateJSEntryStub()2690 void Heap::CreateJSEntryStub() {
2691 JSEntryStub stub(isolate());
2692 set_js_entry_code(*stub.GetCode());
2693 }
2694
2695
CreateJSConstructEntryStub()2696 void Heap::CreateJSConstructEntryStub() {
2697 JSConstructEntryStub stub(isolate());
2698 set_js_construct_entry_code(*stub.GetCode());
2699 }
2700
2701
CreateFixedStubs()2702 void Heap::CreateFixedStubs() {
2703 // Here we create roots for fixed stubs. They are needed at GC
2704 // for cooking and uncooking (check out frames.cc).
2705 // The eliminates the need for doing dictionary lookup in the
2706 // stub cache for these stubs.
2707 HandleScope scope(isolate());
2708
2709 // Create stubs that should be there, so we don't unexpectedly have to
2710 // create them if we need them during the creation of another stub.
2711 // Stub creation mixes raw pointers and handles in an unsafe manner so
2712 // we cannot create stubs while we are creating stubs.
2713 CodeStub::GenerateStubsAheadOfTime(isolate());
2714
2715 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2716 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2717 // is created.
2718
2719 // gcc-4.4 has problem generating correct code of following snippet:
2720 // { JSEntryStub stub;
2721 // js_entry_code_ = *stub.GetCode();
2722 // }
2723 // { JSConstructEntryStub stub;
2724 // js_construct_entry_code_ = *stub.GetCode();
2725 // }
2726 // To workaround the problem, make separate functions without inlining.
2727 Heap::CreateJSEntryStub();
2728 Heap::CreateJSConstructEntryStub();
2729 }
2730
2731
CreateInitialObjects()2732 void Heap::CreateInitialObjects() {
2733 HandleScope scope(isolate());
2734 Factory* factory = isolate()->factory();
2735
2736 // The -0 value must be set before NumberFromDouble works.
2737 set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED));
2738 ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2739
2740 set_nan_value(*factory->NewHeapNumber(OS::nan_value(), TENURED));
2741 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED));
2742
2743 // The hole has not been created yet, but we want to put something
2744 // predictable in the gaps in the string table, so lets make that Smi zero.
2745 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2746
2747 // Allocate initial string table.
2748 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2749
2750 // Finish initializing oddballs after creating the string table.
2751 Oddball::Initialize(isolate(),
2752 factory->undefined_value(),
2753 "undefined",
2754 factory->nan_value(),
2755 Oddball::kUndefined);
2756
2757 // Initialize the null_value.
2758 Oddball::Initialize(isolate(),
2759 factory->null_value(),
2760 "null",
2761 handle(Smi::FromInt(0), isolate()),
2762 Oddball::kNull);
2763
2764 set_true_value(*factory->NewOddball(factory->boolean_map(),
2765 "true",
2766 handle(Smi::FromInt(1), isolate()),
2767 Oddball::kTrue));
2768
2769 set_false_value(*factory->NewOddball(factory->boolean_map(),
2770 "false",
2771 handle(Smi::FromInt(0), isolate()),
2772 Oddball::kFalse));
2773
2774 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(),
2775 "hole",
2776 handle(Smi::FromInt(-1), isolate()),
2777 Oddball::kTheHole));
2778
2779 set_uninitialized_value(
2780 *factory->NewOddball(factory->uninitialized_map(),
2781 "uninitialized",
2782 handle(Smi::FromInt(-1), isolate()),
2783 Oddball::kUninitialized));
2784
2785 set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(),
2786 "arguments_marker",
2787 handle(Smi::FromInt(-4), isolate()),
2788 Oddball::kArgumentMarker));
2789
2790 set_no_interceptor_result_sentinel(
2791 *factory->NewOddball(factory->no_interceptor_result_sentinel_map(),
2792 "no_interceptor_result_sentinel",
2793 handle(Smi::FromInt(-2), isolate()),
2794 Oddball::kOther));
2795
2796 set_termination_exception(
2797 *factory->NewOddball(factory->termination_exception_map(),
2798 "termination_exception",
2799 handle(Smi::FromInt(-3), isolate()),
2800 Oddball::kOther));
2801
2802 set_exception(
2803 *factory->NewOddball(factory->exception_map(),
2804 "exception",
2805 handle(Smi::FromInt(-5), isolate()),
2806 Oddball::kException));
2807
2808 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2809 Handle<String> str =
2810 factory->InternalizeUtf8String(constant_string_table[i].contents);
2811 roots_[constant_string_table[i].index] = *str;
2812 }
2813
2814 // Allocate the hidden string which is used to identify the hidden properties
2815 // in JSObjects. The hash code has a special value so that it will not match
2816 // the empty string when searching for the property. It cannot be part of the
2817 // loop above because it needs to be allocated manually with the special
2818 // hash code in place. The hash code for the hidden_string is zero to ensure
2819 // that it will always be at the first entry in property descriptors.
2820 hidden_string_ = *factory->NewOneByteInternalizedString(
2821 OneByteVector("", 0), String::kEmptyStringHash);
2822
2823 // Create the code_stubs dictionary. The initial size is set to avoid
2824 // expanding the dictionary during bootstrapping.
2825 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2826
2827 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2828 // is set to avoid expanding the dictionary during bootstrapping.
2829 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2830
2831 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2832 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2833
2834 set_instanceof_cache_function(Smi::FromInt(0));
2835 set_instanceof_cache_map(Smi::FromInt(0));
2836 set_instanceof_cache_answer(Smi::FromInt(0));
2837
2838 CreateFixedStubs();
2839
2840 // Allocate the dictionary of intrinsic function names.
2841 Handle<NameDictionary> intrinsic_names =
2842 NameDictionary::New(isolate(), Runtime::kNumFunctions);
2843 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2844 set_intrinsic_function_names(*intrinsic_names);
2845
2846 set_number_string_cache(*factory->NewFixedArray(
2847 kInitialNumberStringCacheSize * 2, TENURED));
2848
2849 // Allocate cache for single character one byte strings.
2850 set_single_character_string_cache(*factory->NewFixedArray(
2851 String::kMaxOneByteCharCode + 1, TENURED));
2852
2853 // Allocate cache for string split and regexp-multiple.
2854 set_string_split_cache(*factory->NewFixedArray(
2855 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2856 set_regexp_multiple_cache(*factory->NewFixedArray(
2857 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2858
2859 // Allocate cache for external strings pointing to native source code.
2860 set_natives_source_cache(*factory->NewFixedArray(
2861 Natives::GetBuiltinsCount()));
2862
2863 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2864
2865 // The symbol registry is initialized lazily.
2866 set_symbol_registry(undefined_value());
2867
2868 // Allocate object to hold object observation state.
2869 set_observation_state(*factory->NewJSObjectFromMap(
2870 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2871
2872 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2873 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2874 set_microtask_queue(empty_fixed_array());
2875
2876 set_frozen_symbol(*factory->NewPrivateSymbol());
2877 set_nonexistent_symbol(*factory->NewPrivateSymbol());
2878 set_elements_transition_symbol(*factory->NewPrivateSymbol());
2879 set_uninitialized_symbol(*factory->NewPrivateSymbol());
2880 set_megamorphic_symbol(*factory->NewPrivateSymbol());
2881 set_observed_symbol(*factory->NewPrivateSymbol());
2882
2883 Handle<SeededNumberDictionary> slow_element_dictionary =
2884 SeededNumberDictionary::New(isolate(), 0, TENURED);
2885 slow_element_dictionary->set_requires_slow_elements();
2886 set_empty_slow_element_dictionary(*slow_element_dictionary);
2887
2888 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2889
2890 // Handling of script id generation is in Factory::NewScript.
2891 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2892
2893 set_allocation_sites_scratchpad(*factory->NewFixedArray(
2894 kAllocationSiteScratchpadSize, TENURED));
2895 InitializeAllocationSitesScratchpad();
2896
2897 // Initialize keyed lookup cache.
2898 isolate_->keyed_lookup_cache()->Clear();
2899
2900 // Initialize context slot cache.
2901 isolate_->context_slot_cache()->Clear();
2902
2903 // Initialize descriptor cache.
2904 isolate_->descriptor_lookup_cache()->Clear();
2905
2906 // Initialize compilation cache.
2907 isolate_->compilation_cache()->Clear();
2908 }
2909
2910
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2911 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2912 RootListIndex writable_roots[] = {
2913 kStoreBufferTopRootIndex,
2914 kStackLimitRootIndex,
2915 kNumberStringCacheRootIndex,
2916 kInstanceofCacheFunctionRootIndex,
2917 kInstanceofCacheMapRootIndex,
2918 kInstanceofCacheAnswerRootIndex,
2919 kCodeStubsRootIndex,
2920 kNonMonomorphicCacheRootIndex,
2921 kPolymorphicCodeCacheRootIndex,
2922 kLastScriptIdRootIndex,
2923 kEmptyScriptRootIndex,
2924 kRealStackLimitRootIndex,
2925 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2926 kConstructStubDeoptPCOffsetRootIndex,
2927 kGetterStubDeoptPCOffsetRootIndex,
2928 kSetterStubDeoptPCOffsetRootIndex,
2929 kStringTableRootIndex,
2930 };
2931
2932 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2933 if (root_index == writable_roots[i])
2934 return true;
2935 }
2936 return false;
2937 }
2938
2939
RootCanBeTreatedAsConstant(RootListIndex root_index)2940 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2941 return !RootCanBeWrittenAfterInitialization(root_index) &&
2942 !InNewSpace(roots_array_start()[root_index]);
2943 }
2944
2945
Lookup(Heap * heap,String * key_string,Object * key_pattern,ResultsCacheType type)2946 Object* RegExpResultsCache::Lookup(Heap* heap,
2947 String* key_string,
2948 Object* key_pattern,
2949 ResultsCacheType type) {
2950 FixedArray* cache;
2951 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2952 if (type == STRING_SPLIT_SUBSTRINGS) {
2953 ASSERT(key_pattern->IsString());
2954 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2955 cache = heap->string_split_cache();
2956 } else {
2957 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2958 ASSERT(key_pattern->IsFixedArray());
2959 cache = heap->regexp_multiple_cache();
2960 }
2961
2962 uint32_t hash = key_string->Hash();
2963 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2964 ~(kArrayEntriesPerCacheEntry - 1));
2965 if (cache->get(index + kStringOffset) == key_string &&
2966 cache->get(index + kPatternOffset) == key_pattern) {
2967 return cache->get(index + kArrayOffset);
2968 }
2969 index =
2970 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2971 if (cache->get(index + kStringOffset) == key_string &&
2972 cache->get(index + kPatternOffset) == key_pattern) {
2973 return cache->get(index + kArrayOffset);
2974 }
2975 return Smi::FromInt(0);
2976 }
2977
2978
Enter(Isolate * isolate,Handle<String> key_string,Handle<Object> key_pattern,Handle<FixedArray> value_array,ResultsCacheType type)2979 void RegExpResultsCache::Enter(Isolate* isolate,
2980 Handle<String> key_string,
2981 Handle<Object> key_pattern,
2982 Handle<FixedArray> value_array,
2983 ResultsCacheType type) {
2984 Factory* factory = isolate->factory();
2985 Handle<FixedArray> cache;
2986 if (!key_string->IsInternalizedString()) return;
2987 if (type == STRING_SPLIT_SUBSTRINGS) {
2988 ASSERT(key_pattern->IsString());
2989 if (!key_pattern->IsInternalizedString()) return;
2990 cache = factory->string_split_cache();
2991 } else {
2992 ASSERT(type == REGEXP_MULTIPLE_INDICES);
2993 ASSERT(key_pattern->IsFixedArray());
2994 cache = factory->regexp_multiple_cache();
2995 }
2996
2997 uint32_t hash = key_string->Hash();
2998 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2999 ~(kArrayEntriesPerCacheEntry - 1));
3000 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3001 cache->set(index + kStringOffset, *key_string);
3002 cache->set(index + kPatternOffset, *key_pattern);
3003 cache->set(index + kArrayOffset, *value_array);
3004 } else {
3005 uint32_t index2 =
3006 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3007 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3008 cache->set(index2 + kStringOffset, *key_string);
3009 cache->set(index2 + kPatternOffset, *key_pattern);
3010 cache->set(index2 + kArrayOffset, *value_array);
3011 } else {
3012 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3013 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3014 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3015 cache->set(index + kStringOffset, *key_string);
3016 cache->set(index + kPatternOffset, *key_pattern);
3017 cache->set(index + kArrayOffset, *value_array);
3018 }
3019 }
3020 // If the array is a reasonably short list of substrings, convert it into a
3021 // list of internalized strings.
3022 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3023 for (int i = 0; i < value_array->length(); i++) {
3024 Handle<String> str(String::cast(value_array->get(i)), isolate);
3025 Handle<String> internalized_str = factory->InternalizeString(str);
3026 value_array->set(i, *internalized_str);
3027 }
3028 }
3029 // Convert backing store to a copy-on-write array.
3030 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3031 }
3032
3033
Clear(FixedArray * cache)3034 void RegExpResultsCache::Clear(FixedArray* cache) {
3035 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3036 cache->set(i, Smi::FromInt(0));
3037 }
3038 }
3039
3040
FullSizeNumberStringCacheLength()3041 int Heap::FullSizeNumberStringCacheLength() {
3042 // Compute the size of the number string cache based on the max newspace size.
3043 // The number string cache has a minimum size based on twice the initial cache
3044 // size to ensure that it is bigger after being made 'full size'.
3045 int number_string_cache_size = max_semi_space_size_ / 512;
3046 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3047 Min(0x4000, number_string_cache_size));
3048 // There is a string and a number per entry so the length is twice the number
3049 // of entries.
3050 return number_string_cache_size * 2;
3051 }
3052
3053
FlushNumberStringCache()3054 void Heap::FlushNumberStringCache() {
3055 // Flush the number to string cache.
3056 int len = number_string_cache()->length();
3057 for (int i = 0; i < len; i++) {
3058 number_string_cache()->set_undefined(i);
3059 }
3060 }
3061
3062
FlushAllocationSitesScratchpad()3063 void Heap::FlushAllocationSitesScratchpad() {
3064 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3065 allocation_sites_scratchpad()->set_undefined(i);
3066 }
3067 allocation_sites_scratchpad_length_ = 0;
3068 }
3069
3070
InitializeAllocationSitesScratchpad()3071 void Heap::InitializeAllocationSitesScratchpad() {
3072 ASSERT(allocation_sites_scratchpad()->length() ==
3073 kAllocationSiteScratchpadSize);
3074 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3075 allocation_sites_scratchpad()->set_undefined(i);
3076 }
3077 }
3078
3079
AddAllocationSiteToScratchpad(AllocationSite * site,ScratchpadSlotMode mode)3080 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3081 ScratchpadSlotMode mode) {
3082 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3083 // We cannot use the normal write-barrier because slots need to be
3084 // recorded with non-incremental marking as well. We have to explicitly
3085 // record the slot to take evacuation candidates into account.
3086 allocation_sites_scratchpad()->set(
3087 allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
3088 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3089 allocation_sites_scratchpad_length_);
3090
3091 if (mode == RECORD_SCRATCHPAD_SLOT) {
3092 // We need to allow slots buffer overflow here since the evacuation
3093 // candidates are not part of the global list of old space pages and
3094 // releasing an evacuation candidate due to a slots buffer overflow
3095 // results in lost pages.
3096 mark_compact_collector()->RecordSlot(
3097 slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
3098 }
3099 allocation_sites_scratchpad_length_++;
3100 }
3101 }
3102
3103
MapForExternalArrayType(ExternalArrayType array_type)3104 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3105 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3106 }
3107
3108
RootIndexForExternalArrayType(ExternalArrayType array_type)3109 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3110 ExternalArrayType array_type) {
3111 switch (array_type) {
3112 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3113 case kExternal##Type##Array: \
3114 return kExternal##Type##ArrayMapRootIndex;
3115
3116 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3117 #undef ARRAY_TYPE_TO_ROOT_INDEX
3118
3119 default:
3120 UNREACHABLE();
3121 return kUndefinedValueRootIndex;
3122 }
3123 }
3124
3125
MapForFixedTypedArray(ExternalArrayType array_type)3126 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3127 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3128 }
3129
3130
RootIndexForFixedTypedArray(ExternalArrayType array_type)3131 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3132 ExternalArrayType array_type) {
3133 switch (array_type) {
3134 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3135 case kExternal##Type##Array: \
3136 return kFixed##Type##ArrayMapRootIndex;
3137
3138 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3139 #undef ARRAY_TYPE_TO_ROOT_INDEX
3140
3141 default:
3142 UNREACHABLE();
3143 return kUndefinedValueRootIndex;
3144 }
3145 }
3146
3147
RootIndexForEmptyExternalArray(ElementsKind elementsKind)3148 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3149 ElementsKind elementsKind) {
3150 switch (elementsKind) {
3151 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3152 case EXTERNAL_##TYPE##_ELEMENTS: \
3153 return kEmptyExternal##Type##ArrayRootIndex;
3154
3155 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3156 #undef ELEMENT_KIND_TO_ROOT_INDEX
3157
3158 default:
3159 UNREACHABLE();
3160 return kUndefinedValueRootIndex;
3161 }
3162 }
3163
3164
RootIndexForEmptyFixedTypedArray(ElementsKind elementsKind)3165 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3166 ElementsKind elementsKind) {
3167 switch (elementsKind) {
3168 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3169 case TYPE##_ELEMENTS: \
3170 return kEmptyFixed##Type##ArrayRootIndex;
3171
3172 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3173 #undef ELEMENT_KIND_TO_ROOT_INDEX
3174 default:
3175 UNREACHABLE();
3176 return kUndefinedValueRootIndex;
3177 }
3178 }
3179
3180
EmptyExternalArrayForMap(Map * map)3181 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3182 return ExternalArray::cast(
3183 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3184 }
3185
3186
EmptyFixedTypedArrayForMap(Map * map)3187 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3188 return FixedTypedArrayBase::cast(
3189 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3190 }
3191
3192
AllocateForeign(Address address,PretenureFlag pretenure)3193 AllocationResult Heap::AllocateForeign(Address address,
3194 PretenureFlag pretenure) {
3195 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3196 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3197 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3198 Foreign* result;
3199 AllocationResult allocation = Allocate(foreign_map(), space);
3200 if (!allocation.To(&result)) return allocation;
3201 result->set_foreign_address(address);
3202 return result;
3203 }
3204
3205
AllocateByteArray(int length,PretenureFlag pretenure)3206 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3207 if (length < 0 || length > ByteArray::kMaxLength) {
3208 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3209 }
3210 int size = ByteArray::SizeFor(length);
3211 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3212 HeapObject* result;
3213 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3214 if (!allocation.To(&result)) return allocation;
3215 }
3216
3217 result->set_map_no_write_barrier(byte_array_map());
3218 ByteArray::cast(result)->set_length(length);
3219 return result;
3220 }
3221
3222
CreateFillerObjectAt(Address addr,int size)3223 void Heap::CreateFillerObjectAt(Address addr, int size) {
3224 if (size == 0) return;
3225 HeapObject* filler = HeapObject::FromAddress(addr);
3226 if (size == kPointerSize) {
3227 filler->set_map_no_write_barrier(one_pointer_filler_map());
3228 } else if (size == 2 * kPointerSize) {
3229 filler->set_map_no_write_barrier(two_pointer_filler_map());
3230 } else {
3231 filler->set_map_no_write_barrier(free_space_map());
3232 FreeSpace::cast(filler)->set_size(size);
3233 }
3234 }
3235
3236
CanMoveObjectStart(HeapObject * object)3237 bool Heap::CanMoveObjectStart(HeapObject* object) {
3238 Address address = object->address();
3239 bool is_in_old_pointer_space = InOldPointerSpace(address);
3240 bool is_in_old_data_space = InOldDataSpace(address);
3241
3242 if (lo_space()->Contains(object)) return false;
3243
3244 Page* page = Page::FromAddress(address);
3245 // We can move the object start if:
3246 // (1) the object is not in old pointer or old data space,
3247 // (2) the page of the object was already swept,
3248 // (3) the page was already concurrently swept. This case is an optimization
3249 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3250 // pages is set after sweeping all pages.
3251 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3252 page->WasSwept() ||
3253 (mark_compact_collector()->AreSweeperThreadsActivated() &&
3254 page->parallel_sweeping() <=
3255 MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
3256 }
3257
3258
AdjustLiveBytes(Address address,int by,InvocationMode mode)3259 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3260 if (incremental_marking()->IsMarking() &&
3261 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3262 if (mode == FROM_GC) {
3263 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3264 } else {
3265 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3266 }
3267 }
3268 }
3269
3270
AllocateExternalArray(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)3271 AllocationResult Heap::AllocateExternalArray(int length,
3272 ExternalArrayType array_type,
3273 void* external_pointer,
3274 PretenureFlag pretenure) {
3275 int size = ExternalArray::kAlignedSize;
3276 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3277 HeapObject* result;
3278 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3279 if (!allocation.To(&result)) return allocation;
3280 }
3281
3282 result->set_map_no_write_barrier(
3283 MapForExternalArrayType(array_type));
3284 ExternalArray::cast(result)->set_length(length);
3285 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3286 return result;
3287 }
3288
ForFixedTypedArray(ExternalArrayType array_type,int * element_size,ElementsKind * element_kind)3289 static void ForFixedTypedArray(ExternalArrayType array_type,
3290 int* element_size,
3291 ElementsKind* element_kind) {
3292 switch (array_type) {
3293 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3294 case kExternal##Type##Array: \
3295 *element_size = size; \
3296 *element_kind = TYPE##_ELEMENTS; \
3297 return;
3298
3299 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3300 #undef TYPED_ARRAY_CASE
3301
3302 default:
3303 *element_size = 0; // Bogus
3304 *element_kind = UINT8_ELEMENTS; // Bogus
3305 UNREACHABLE();
3306 }
3307 }
3308
3309
AllocateFixedTypedArray(int length,ExternalArrayType array_type,PretenureFlag pretenure)3310 AllocationResult Heap::AllocateFixedTypedArray(int length,
3311 ExternalArrayType array_type,
3312 PretenureFlag pretenure) {
3313 int element_size;
3314 ElementsKind elements_kind;
3315 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3316 int size = OBJECT_POINTER_ALIGN(
3317 length * element_size + FixedTypedArrayBase::kDataOffset);
3318 #ifndef V8_HOST_ARCH_64_BIT
3319 if (array_type == kExternalFloat64Array) {
3320 size += kPointerSize;
3321 }
3322 #endif
3323 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3324
3325 HeapObject* object;
3326 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3327 if (!allocation.To(&object)) return allocation;
3328
3329 if (array_type == kExternalFloat64Array) {
3330 object = EnsureDoubleAligned(this, object, size);
3331 }
3332
3333 object->set_map(MapForFixedTypedArray(array_type));
3334 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3335 elements->set_length(length);
3336 memset(elements->DataPtr(), 0, elements->DataSize());
3337 return elements;
3338 }
3339
3340
AllocateCode(int object_size,bool immovable)3341 AllocationResult Heap::AllocateCode(int object_size,
3342 bool immovable) {
3343 ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3344 AllocationResult allocation;
3345 // Large code objects and code objects which should stay at a fixed address
3346 // are allocated in large object space.
3347 HeapObject* result;
3348 bool force_lo_space = object_size > code_space()->AreaSize();
3349 if (force_lo_space) {
3350 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3351 } else {
3352 allocation = AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3353 }
3354 if (!allocation.To(&result)) return allocation;
3355
3356 if (immovable && !force_lo_space &&
3357 // Objects on the first page of each space are never moved.
3358 !code_space_->FirstPage()->Contains(result->address())) {
3359 // Discard the first code allocation, which was on a page where it could be
3360 // moved.
3361 CreateFillerObjectAt(result->address(), object_size);
3362 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3363 if (!allocation.To(&result)) return allocation;
3364 }
3365
3366 result->set_map_no_write_barrier(code_map());
3367 Code* code = Code::cast(result);
3368 ASSERT(isolate_->code_range() == NULL ||
3369 !isolate_->code_range()->valid() ||
3370 isolate_->code_range()->contains(code->address()));
3371 code->set_gc_metadata(Smi::FromInt(0));
3372 code->set_ic_age(global_ic_age_);
3373 return code;
3374 }
3375
3376
CopyCode(Code * code)3377 AllocationResult Heap::CopyCode(Code* code) {
3378 AllocationResult allocation;
3379 HeapObject* new_constant_pool;
3380 if (FLAG_enable_ool_constant_pool &&
3381 code->constant_pool() != empty_constant_pool_array()) {
3382 // Copy the constant pool, since edits to the copied code may modify
3383 // the constant pool.
3384 allocation = CopyConstantPoolArray(code->constant_pool());
3385 if (!allocation.To(&new_constant_pool)) return allocation;
3386 } else {
3387 new_constant_pool = empty_constant_pool_array();
3388 }
3389
3390 // Allocate an object the same size as the code object.
3391 int obj_size = code->Size();
3392 if (obj_size > code_space()->AreaSize()) {
3393 allocation = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3394 } else {
3395 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3396 }
3397
3398 HeapObject* result;
3399 if (!allocation.To(&result)) return allocation;
3400
3401 // Copy code object.
3402 Address old_addr = code->address();
3403 Address new_addr = result->address();
3404 CopyBlock(new_addr, old_addr, obj_size);
3405 Code* new_code = Code::cast(result);
3406
3407 // Update the constant pool.
3408 new_code->set_constant_pool(new_constant_pool);
3409
3410 // Relocate the copy.
3411 ASSERT(isolate_->code_range() == NULL ||
3412 !isolate_->code_range()->valid() ||
3413 isolate_->code_range()->contains(code->address()));
3414 new_code->Relocate(new_addr - old_addr);
3415 return new_code;
3416 }
3417
3418
CopyCode(Code * code,Vector<byte> reloc_info)3419 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3420 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3421 // do not risk leaving uninitialized Code object (and breaking the heap).
3422 ByteArray* reloc_info_array;
3423 { AllocationResult allocation =
3424 AllocateByteArray(reloc_info.length(), TENURED);
3425 if (!allocation.To(&reloc_info_array)) return allocation;
3426 }
3427 HeapObject* new_constant_pool;
3428 if (FLAG_enable_ool_constant_pool &&
3429 code->constant_pool() != empty_constant_pool_array()) {
3430 // Copy the constant pool, since edits to the copied code may modify
3431 // the constant pool.
3432 AllocationResult allocation =
3433 CopyConstantPoolArray(code->constant_pool());
3434 if (!allocation.To(&new_constant_pool)) return allocation;
3435 } else {
3436 new_constant_pool = empty_constant_pool_array();
3437 }
3438
3439 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3440
3441 int new_obj_size = Code::SizeFor(new_body_size);
3442
3443 Address old_addr = code->address();
3444
3445 size_t relocation_offset =
3446 static_cast<size_t>(code->instruction_end() - old_addr);
3447
3448 AllocationResult allocation;
3449 if (new_obj_size > code_space()->AreaSize()) {
3450 allocation = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3451 } else {
3452 allocation = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3453 }
3454
3455 HeapObject* result;
3456 if (!allocation.To(&result)) return allocation;
3457
3458 // Copy code object.
3459 Address new_addr = result->address();
3460
3461 // Copy header and instructions.
3462 CopyBytes(new_addr, old_addr, relocation_offset);
3463
3464 Code* new_code = Code::cast(result);
3465 new_code->set_relocation_info(reloc_info_array);
3466
3467 // Update constant pool.
3468 new_code->set_constant_pool(new_constant_pool);
3469
3470 // Copy patched rinfo.
3471 CopyBytes(new_code->relocation_start(),
3472 reloc_info.start(),
3473 static_cast<size_t>(reloc_info.length()));
3474
3475 // Relocate the copy.
3476 ASSERT(isolate_->code_range() == NULL ||
3477 !isolate_->code_range()->valid() ||
3478 isolate_->code_range()->contains(code->address()));
3479 new_code->Relocate(new_addr - old_addr);
3480
3481 #ifdef VERIFY_HEAP
3482 if (FLAG_verify_heap) code->ObjectVerify();
3483 #endif
3484 return new_code;
3485 }
3486
3487
InitializeAllocationMemento(AllocationMemento * memento,AllocationSite * allocation_site)3488 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3489 AllocationSite* allocation_site) {
3490 memento->set_map_no_write_barrier(allocation_memento_map());
3491 ASSERT(allocation_site->map() == allocation_site_map());
3492 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3493 if (FLAG_allocation_site_pretenuring) {
3494 allocation_site->IncrementMementoCreateCount();
3495 }
3496 }
3497
3498
Allocate(Map * map,AllocationSpace space,AllocationSite * allocation_site)3499 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3500 AllocationSite* allocation_site) {
3501 ASSERT(gc_state_ == NOT_IN_GC);
3502 ASSERT(map->instance_type() != MAP_TYPE);
3503 // If allocation failures are disallowed, we may allocate in a different
3504 // space when new space is full and the object is not a large object.
3505 AllocationSpace retry_space =
3506 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3507 int size = map->instance_size();
3508 if (allocation_site != NULL) {
3509 size += AllocationMemento::kSize;
3510 }
3511 HeapObject* result;
3512 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3513 if (!allocation.To(&result)) return allocation;
3514 // No need for write barrier since object is white and map is in old space.
3515 result->set_map_no_write_barrier(map);
3516 if (allocation_site != NULL) {
3517 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3518 reinterpret_cast<Address>(result) + map->instance_size());
3519 InitializeAllocationMemento(alloc_memento, allocation_site);
3520 }
3521 return result;
3522 }
3523
3524
AllocateArgumentsObject(Object * callee,int length)3525 AllocationResult Heap::AllocateArgumentsObject(Object* callee, int length) {
3526 // To get fast allocation and map sharing for arguments objects we
3527 // allocate them based on an arguments boilerplate.
3528
3529 JSObject* boilerplate;
3530 int arguments_object_size;
3531 bool strict_mode_callee = callee->IsJSFunction() &&
3532 JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
3533 if (strict_mode_callee) {
3534 boilerplate =
3535 isolate()->context()->native_context()->strict_arguments_boilerplate();
3536 arguments_object_size = kStrictArgumentsObjectSize;
3537 } else {
3538 boilerplate =
3539 isolate()->context()->native_context()->sloppy_arguments_boilerplate();
3540 arguments_object_size = kSloppyArgumentsObjectSize;
3541 }
3542
3543 // Check that the size of the boilerplate matches our
3544 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3545 // on the size being a known constant.
3546 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3547
3548 // Do the allocation.
3549 HeapObject* result;
3550 { AllocationResult allocation =
3551 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3552 if (!allocation.To(&result)) return allocation;
3553 }
3554
3555 // Copy the content. The arguments boilerplate doesn't have any
3556 // fields that point to new space so it's safe to skip the write
3557 // barrier here.
3558 CopyBlock(result->address(), boilerplate->address(), JSObject::kHeaderSize);
3559
3560 // Set the length property.
3561 JSObject* js_obj = JSObject::cast(result);
3562 js_obj->InObjectPropertyAtPut(
3563 kArgumentsLengthIndex, Smi::FromInt(length), SKIP_WRITE_BARRIER);
3564 // Set the callee property for sloppy mode arguments object only.
3565 if (!strict_mode_callee) {
3566 js_obj->InObjectPropertyAtPut(kArgumentsCalleeIndex, callee);
3567 }
3568
3569 // Check the state of the object
3570 ASSERT(js_obj->HasFastProperties());
3571 ASSERT(js_obj->HasFastObjectElements());
3572
3573 return js_obj;
3574 }
3575
3576
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)3577 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3578 FixedArray* properties,
3579 Map* map) {
3580 obj->set_properties(properties);
3581 obj->initialize_elements();
3582 // TODO(1240798): Initialize the object's body using valid initial values
3583 // according to the object's initial map. For example, if the map's
3584 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3585 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3586 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3587 // verification code has to cope with (temporarily) invalid objects. See
3588 // for example, JSArray::JSArrayVerify).
3589 Object* filler;
3590 // We cannot always fill with one_pointer_filler_map because objects
3591 // created from API functions expect their internal fields to be initialized
3592 // with undefined_value.
3593 // Pre-allocated fields need to be initialized with undefined_value as well
3594 // so that object accesses before the constructor completes (e.g. in the
3595 // debugger) will not cause a crash.
3596 if (map->constructor()->IsJSFunction() &&
3597 JSFunction::cast(map->constructor())->
3598 IsInobjectSlackTrackingInProgress()) {
3599 // We might want to shrink the object later.
3600 ASSERT(obj->GetInternalFieldCount() == 0);
3601 filler = Heap::one_pointer_filler_map();
3602 } else {
3603 filler = Heap::undefined_value();
3604 }
3605 obj->InitializeBody(map, Heap::undefined_value(), filler);
3606 }
3607
3608
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure,bool allocate_properties,AllocationSite * allocation_site)3609 AllocationResult Heap::AllocateJSObjectFromMap(
3610 Map* map,
3611 PretenureFlag pretenure,
3612 bool allocate_properties,
3613 AllocationSite* allocation_site) {
3614 // JSFunctions should be allocated using AllocateFunction to be
3615 // properly initialized.
3616 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3617
3618 // Both types of global objects should be allocated using
3619 // AllocateGlobalObject to be properly initialized.
3620 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3621 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3622
3623 // Allocate the backing storage for the properties.
3624 FixedArray* properties;
3625 if (allocate_properties) {
3626 int prop_size = map->InitialPropertiesLength();
3627 ASSERT(prop_size >= 0);
3628 { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3629 if (!allocation.To(&properties)) return allocation;
3630 }
3631 } else {
3632 properties = empty_fixed_array();
3633 }
3634
3635 // Allocate the JSObject.
3636 int size = map->instance_size();
3637 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3638 JSObject* js_obj;
3639 AllocationResult allocation = Allocate(map, space, allocation_site);
3640 if (!allocation.To(&js_obj)) return allocation;
3641
3642 // Initialize the JSObject.
3643 InitializeJSObjectFromMap(js_obj, properties, map);
3644 ASSERT(js_obj->HasFastElements() ||
3645 js_obj->HasExternalArrayElements() ||
3646 js_obj->HasFixedTypedArrayElements());
3647 return js_obj;
3648 }
3649
3650
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure,AllocationSite * allocation_site)3651 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3652 PretenureFlag pretenure,
3653 AllocationSite* allocation_site) {
3654 ASSERT(constructor->has_initial_map());
3655
3656 // Allocate the object based on the constructors initial map.
3657 AllocationResult allocation = AllocateJSObjectFromMap(
3658 constructor->initial_map(), pretenure, true, allocation_site);
3659 #ifdef DEBUG
3660 // Make sure result is NOT a global object if valid.
3661 HeapObject* obj;
3662 ASSERT(!allocation.To(&obj) || !obj->IsGlobalObject());
3663 #endif
3664 return allocation;
3665 }
3666
3667
CopyJSObject(JSObject * source,AllocationSite * site)3668 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3669 // Never used to copy functions. If functions need to be copied we
3670 // have to be careful to clear the literals array.
3671 SLOW_ASSERT(!source->IsJSFunction());
3672
3673 // Make the clone.
3674 Map* map = source->map();
3675 int object_size = map->instance_size();
3676 HeapObject* clone;
3677
3678 ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3679
3680 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3681
3682 // If we're forced to always allocate, we use the general allocation
3683 // functions which may leave us with an object in old space.
3684 if (always_allocate()) {
3685 { AllocationResult allocation =
3686 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3687 if (!allocation.To(&clone)) return allocation;
3688 }
3689 Address clone_address = clone->address();
3690 CopyBlock(clone_address,
3691 source->address(),
3692 object_size);
3693 // Update write barrier for all fields that lie beyond the header.
3694 RecordWrites(clone_address,
3695 JSObject::kHeaderSize,
3696 (object_size - JSObject::kHeaderSize) / kPointerSize);
3697 } else {
3698 wb_mode = SKIP_WRITE_BARRIER;
3699
3700 { int adjusted_object_size = site != NULL
3701 ? object_size + AllocationMemento::kSize
3702 : object_size;
3703 AllocationResult allocation =
3704 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3705 if (!allocation.To(&clone)) return allocation;
3706 }
3707 SLOW_ASSERT(InNewSpace(clone));
3708 // Since we know the clone is allocated in new space, we can copy
3709 // the contents without worrying about updating the write barrier.
3710 CopyBlock(clone->address(),
3711 source->address(),
3712 object_size);
3713
3714 if (site != NULL) {
3715 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3716 reinterpret_cast<Address>(clone) + object_size);
3717 InitializeAllocationMemento(alloc_memento, site);
3718 }
3719 }
3720
3721 SLOW_ASSERT(
3722 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3723 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3724 FixedArray* properties = FixedArray::cast(source->properties());
3725 // Update elements if necessary.
3726 if (elements->length() > 0) {
3727 FixedArrayBase* elem;
3728 { AllocationResult allocation;
3729 if (elements->map() == fixed_cow_array_map()) {
3730 allocation = FixedArray::cast(elements);
3731 } else if (source->HasFastDoubleElements()) {
3732 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3733 } else {
3734 allocation = CopyFixedArray(FixedArray::cast(elements));
3735 }
3736 if (!allocation.To(&elem)) return allocation;
3737 }
3738 JSObject::cast(clone)->set_elements(elem, wb_mode);
3739 }
3740 // Update properties if necessary.
3741 if (properties->length() > 0) {
3742 FixedArray* prop;
3743 { AllocationResult allocation = CopyFixedArray(properties);
3744 if (!allocation.To(&prop)) return allocation;
3745 }
3746 JSObject::cast(clone)->set_properties(prop, wb_mode);
3747 }
3748 // Return the new clone.
3749 return clone;
3750 }
3751
3752
WriteOneByteData(Vector<const char> vector,uint8_t * chars,int len)3753 static inline void WriteOneByteData(Vector<const char> vector,
3754 uint8_t* chars,
3755 int len) {
3756 // Only works for ascii.
3757 ASSERT(vector.length() == len);
3758 MemCopy(chars, vector.start(), len);
3759 }
3760
WriteTwoByteData(Vector<const char> vector,uint16_t * chars,int len)3761 static inline void WriteTwoByteData(Vector<const char> vector,
3762 uint16_t* chars,
3763 int len) {
3764 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3765 unsigned stream_length = vector.length();
3766 while (stream_length != 0) {
3767 unsigned consumed = 0;
3768 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3769 ASSERT(c != unibrow::Utf8::kBadChar);
3770 ASSERT(consumed <= stream_length);
3771 stream_length -= consumed;
3772 stream += consumed;
3773 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3774 len -= 2;
3775 if (len < 0) break;
3776 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3777 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3778 } else {
3779 len -= 1;
3780 if (len < 0) break;
3781 *chars++ = c;
3782 }
3783 }
3784 ASSERT(stream_length == 0);
3785 ASSERT(len == 0);
3786 }
3787
3788
WriteOneByteData(String * s,uint8_t * chars,int len)3789 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3790 ASSERT(s->length() == len);
3791 String::WriteToFlat(s, chars, 0, len);
3792 }
3793
3794
WriteTwoByteData(String * s,uint16_t * chars,int len)3795 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3796 ASSERT(s->length() == len);
3797 String::WriteToFlat(s, chars, 0, len);
3798 }
3799
3800
3801 template<bool is_one_byte, typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)3802 AllocationResult Heap::AllocateInternalizedStringImpl(
3803 T t, int chars, uint32_t hash_field) {
3804 ASSERT(chars >= 0);
3805 // Compute map and object size.
3806 int size;
3807 Map* map;
3808
3809 ASSERT_LE(0, chars);
3810 ASSERT_GE(String::kMaxLength, chars);
3811 if (is_one_byte) {
3812 map = ascii_internalized_string_map();
3813 size = SeqOneByteString::SizeFor(chars);
3814 } else {
3815 map = internalized_string_map();
3816 size = SeqTwoByteString::SizeFor(chars);
3817 }
3818 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3819
3820 // Allocate string.
3821 HeapObject* result;
3822 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3823 if (!allocation.To(&result)) return allocation;
3824 }
3825
3826 result->set_map_no_write_barrier(map);
3827 // Set length and hash fields of the allocated string.
3828 String* answer = String::cast(result);
3829 answer->set_length(chars);
3830 answer->set_hash_field(hash_field);
3831
3832 ASSERT_EQ(size, answer->Size());
3833
3834 if (is_one_byte) {
3835 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3836 } else {
3837 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3838 }
3839 return answer;
3840 }
3841
3842
3843 // Need explicit instantiations.
3844 template
3845 AllocationResult Heap::AllocateInternalizedStringImpl<true>(
3846 String*, int, uint32_t);
3847 template
3848 AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3849 String*, int, uint32_t);
3850 template
3851 AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3852 Vector<const char>, int, uint32_t);
3853
3854
AllocateRawOneByteString(int length,PretenureFlag pretenure)3855 AllocationResult Heap::AllocateRawOneByteString(int length,
3856 PretenureFlag pretenure) {
3857 ASSERT_LE(0, length);
3858 ASSERT_GE(String::kMaxLength, length);
3859 int size = SeqOneByteString::SizeFor(length);
3860 ASSERT(size <= SeqOneByteString::kMaxSize);
3861 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3862
3863 HeapObject* result;
3864 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3865 if (!allocation.To(&result)) return allocation;
3866 }
3867
3868 // Partially initialize the object.
3869 result->set_map_no_write_barrier(ascii_string_map());
3870 String::cast(result)->set_length(length);
3871 String::cast(result)->set_hash_field(String::kEmptyHashField);
3872 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3873
3874 return result;
3875 }
3876
3877
AllocateRawTwoByteString(int length,PretenureFlag pretenure)3878 AllocationResult Heap::AllocateRawTwoByteString(int length,
3879 PretenureFlag pretenure) {
3880 ASSERT_LE(0, length);
3881 ASSERT_GE(String::kMaxLength, length);
3882 int size = SeqTwoByteString::SizeFor(length);
3883 ASSERT(size <= SeqTwoByteString::kMaxSize);
3884 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3885
3886 HeapObject* result;
3887 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3888 if (!allocation.To(&result)) return allocation;
3889 }
3890
3891 // Partially initialize the object.
3892 result->set_map_no_write_barrier(string_map());
3893 String::cast(result)->set_length(length);
3894 String::cast(result)->set_hash_field(String::kEmptyHashField);
3895 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3896 return result;
3897 }
3898
3899
AllocateEmptyFixedArray()3900 AllocationResult Heap::AllocateEmptyFixedArray() {
3901 int size = FixedArray::SizeFor(0);
3902 HeapObject* result;
3903 { AllocationResult allocation =
3904 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3905 if (!allocation.To(&result)) return allocation;
3906 }
3907 // Initialize the object.
3908 result->set_map_no_write_barrier(fixed_array_map());
3909 FixedArray::cast(result)->set_length(0);
3910 return result;
3911 }
3912
3913
AllocateEmptyExternalArray(ExternalArrayType array_type)3914 AllocationResult Heap::AllocateEmptyExternalArray(
3915 ExternalArrayType array_type) {
3916 return AllocateExternalArray(0, array_type, NULL, TENURED);
3917 }
3918
3919
CopyAndTenureFixedCOWArray(FixedArray * src)3920 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3921 if (!InNewSpace(src)) {
3922 return src;
3923 }
3924
3925 int len = src->length();
3926 HeapObject* obj;
3927 { AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3928 if (!allocation.To(&obj)) return allocation;
3929 }
3930 obj->set_map_no_write_barrier(fixed_array_map());
3931 FixedArray* result = FixedArray::cast(obj);
3932 result->set_length(len);
3933
3934 // Copy the content
3935 DisallowHeapAllocation no_gc;
3936 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3937 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3938
3939 // TODO(mvstanton): The map is set twice because of protection against calling
3940 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3941 // we might then be able to remove this whole method.
3942 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3943 return result;
3944 }
3945
3946
AllocateEmptyFixedTypedArray(ExternalArrayType array_type)3947 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3948 ExternalArrayType array_type) {
3949 return AllocateFixedTypedArray(0, array_type, TENURED);
3950 }
3951
3952
CopyFixedArrayWithMap(FixedArray * src,Map * map)3953 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3954 int len = src->length();
3955 HeapObject* obj;
3956 { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3957 if (!allocation.To(&obj)) return allocation;
3958 }
3959 if (InNewSpace(obj)) {
3960 obj->set_map_no_write_barrier(map);
3961 CopyBlock(obj->address() + kPointerSize,
3962 src->address() + kPointerSize,
3963 FixedArray::SizeFor(len) - kPointerSize);
3964 return obj;
3965 }
3966 obj->set_map_no_write_barrier(map);
3967 FixedArray* result = FixedArray::cast(obj);
3968 result->set_length(len);
3969
3970 // Copy the content
3971 DisallowHeapAllocation no_gc;
3972 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3973 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3974 return result;
3975 }
3976
3977
CopyFixedDoubleArrayWithMap(FixedDoubleArray * src,Map * map)3978 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3979 Map* map) {
3980 int len = src->length();
3981 HeapObject* obj;
3982 { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3983 if (!allocation.To(&obj)) return allocation;
3984 }
3985 obj->set_map_no_write_barrier(map);
3986 CopyBlock(
3987 obj->address() + FixedDoubleArray::kLengthOffset,
3988 src->address() + FixedDoubleArray::kLengthOffset,
3989 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3990 return obj;
3991 }
3992
3993
CopyConstantPoolArrayWithMap(ConstantPoolArray * src,Map * map)3994 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
3995 Map* map) {
3996 HeapObject* obj;
3997 if (src->is_extended_layout()) {
3998 ConstantPoolArray::NumberOfEntries small(src,
3999 ConstantPoolArray::SMALL_SECTION);
4000 ConstantPoolArray::NumberOfEntries extended(src,
4001 ConstantPoolArray::EXTENDED_SECTION);
4002 AllocationResult allocation =
4003 AllocateExtendedConstantPoolArray(small, extended);
4004 if (!allocation.To(&obj)) return allocation;
4005 } else {
4006 ConstantPoolArray::NumberOfEntries small(src,
4007 ConstantPoolArray::SMALL_SECTION);
4008 AllocationResult allocation = AllocateConstantPoolArray(small);
4009 if (!allocation.To(&obj)) return allocation;
4010 }
4011 obj->set_map_no_write_barrier(map);
4012 CopyBlock(
4013 obj->address() + ConstantPoolArray::kFirstEntryOffset,
4014 src->address() + ConstantPoolArray::kFirstEntryOffset,
4015 src->size() - ConstantPoolArray::kFirstEntryOffset);
4016 return obj;
4017 }
4018
4019
AllocateRawFixedArray(int length,PretenureFlag pretenure)4020 AllocationResult Heap::AllocateRawFixedArray(int length,
4021 PretenureFlag pretenure) {
4022 if (length < 0 || length > FixedArray::kMaxLength) {
4023 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4024 }
4025 int size = FixedArray::SizeFor(length);
4026 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4027
4028 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4029 }
4030
4031
AllocateFixedArrayWithFiller(int length,PretenureFlag pretenure,Object * filler)4032 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4033 PretenureFlag pretenure,
4034 Object* filler) {
4035 ASSERT(length >= 0);
4036 ASSERT(empty_fixed_array()->IsFixedArray());
4037 if (length == 0) return empty_fixed_array();
4038
4039 ASSERT(!InNewSpace(filler));
4040 HeapObject* result;
4041 { AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4042 if (!allocation.To(&result)) return allocation;
4043 }
4044
4045 result->set_map_no_write_barrier(fixed_array_map());
4046 FixedArray* array = FixedArray::cast(result);
4047 array->set_length(length);
4048 MemsetPointer(array->data_start(), filler, length);
4049 return array;
4050 }
4051
4052
AllocateFixedArray(int length,PretenureFlag pretenure)4053 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4054 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4055 }
4056
4057
AllocateUninitializedFixedArray(int length)4058 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4059 if (length == 0) return empty_fixed_array();
4060
4061 HeapObject* obj;
4062 { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4063 if (!allocation.To(&obj)) return allocation;
4064 }
4065
4066 obj->set_map_no_write_barrier(fixed_array_map());
4067 FixedArray::cast(obj)->set_length(length);
4068 return obj;
4069 }
4070
4071
AllocateUninitializedFixedDoubleArray(int length,PretenureFlag pretenure)4072 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4073 int length,
4074 PretenureFlag pretenure) {
4075 if (length == 0) return empty_fixed_array();
4076
4077 HeapObject* elements;
4078 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4079 if (!allocation.To(&elements)) return allocation;
4080
4081 elements->set_map_no_write_barrier(fixed_double_array_map());
4082 FixedDoubleArray::cast(elements)->set_length(length);
4083 return elements;
4084 }
4085
4086
AllocateRawFixedDoubleArray(int length,PretenureFlag pretenure)4087 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4088 PretenureFlag pretenure) {
4089 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4090 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4091 }
4092 int size = FixedDoubleArray::SizeFor(length);
4093 #ifndef V8_HOST_ARCH_64_BIT
4094 size += kPointerSize;
4095 #endif
4096 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4097
4098 HeapObject* object;
4099 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4100 if (!allocation.To(&object)) return allocation;
4101 }
4102
4103 return EnsureDoubleAligned(this, object, size);
4104 }
4105
4106
AllocateConstantPoolArray(const ConstantPoolArray::NumberOfEntries & small)4107 AllocationResult Heap::AllocateConstantPoolArray(
4108 const ConstantPoolArray::NumberOfEntries& small) {
4109 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4110 int size = ConstantPoolArray::SizeFor(small);
4111 #ifndef V8_HOST_ARCH_64_BIT
4112 size += kPointerSize;
4113 #endif
4114 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4115
4116 HeapObject* object;
4117 { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4118 if (!allocation.To(&object)) return allocation;
4119 }
4120 object = EnsureDoubleAligned(this, object, size);
4121 object->set_map_no_write_barrier(constant_pool_array_map());
4122
4123 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4124 constant_pool->Init(small);
4125 constant_pool->ClearPtrEntries(isolate());
4126 return constant_pool;
4127 }
4128
4129
AllocateExtendedConstantPoolArray(const ConstantPoolArray::NumberOfEntries & small,const ConstantPoolArray::NumberOfEntries & extended)4130 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4131 const ConstantPoolArray::NumberOfEntries& small,
4132 const ConstantPoolArray::NumberOfEntries& extended) {
4133 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4134 CHECK(extended.are_in_range(0, kMaxInt));
4135 int size = ConstantPoolArray::SizeForExtended(small, extended);
4136 #ifndef V8_HOST_ARCH_64_BIT
4137 size += kPointerSize;
4138 #endif
4139 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4140
4141 HeapObject* object;
4142 { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4143 if (!allocation.To(&object)) return allocation;
4144 }
4145 object = EnsureDoubleAligned(this, object, size);
4146 object->set_map_no_write_barrier(constant_pool_array_map());
4147
4148 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4149 constant_pool->InitExtended(small, extended);
4150 constant_pool->ClearPtrEntries(isolate());
4151 return constant_pool;
4152 }
4153
4154
AllocateEmptyConstantPoolArray()4155 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4156 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4157 int size = ConstantPoolArray::SizeFor(small);
4158 HeapObject* result;
4159 { AllocationResult allocation =
4160 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4161 if (!allocation.To(&result)) return allocation;
4162 }
4163 result->set_map_no_write_barrier(constant_pool_array_map());
4164 ConstantPoolArray::cast(result)->Init(small);
4165 return result;
4166 }
4167
4168
AllocateSymbol()4169 AllocationResult Heap::AllocateSymbol() {
4170 // Statically ensure that it is safe to allocate symbols in paged spaces.
4171 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4172
4173 HeapObject* result;
4174 AllocationResult allocation =
4175 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4176 if (!allocation.To(&result)) return allocation;
4177
4178 result->set_map_no_write_barrier(symbol_map());
4179
4180 // Generate a random hash value.
4181 int hash;
4182 int attempts = 0;
4183 do {
4184 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4185 attempts++;
4186 } while (hash == 0 && attempts < 30);
4187 if (hash == 0) hash = 1; // never return 0
4188
4189 Symbol::cast(result)->set_hash_field(
4190 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4191 Symbol::cast(result)->set_name(undefined_value());
4192 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4193
4194 ASSERT(!Symbol::cast(result)->is_private());
4195 return result;
4196 }
4197
4198
AllocateStruct(InstanceType type)4199 AllocationResult Heap::AllocateStruct(InstanceType type) {
4200 Map* map;
4201 switch (type) {
4202 #define MAKE_CASE(NAME, Name, name) \
4203 case NAME##_TYPE: map = name##_map(); break;
4204 STRUCT_LIST(MAKE_CASE)
4205 #undef MAKE_CASE
4206 default:
4207 UNREACHABLE();
4208 return exception();
4209 }
4210 int size = map->instance_size();
4211 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4212 Struct* result;
4213 { AllocationResult allocation = Allocate(map, space);
4214 if (!allocation.To(&result)) return allocation;
4215 }
4216 result->InitializeBody(size);
4217 return result;
4218 }
4219
4220
IsHeapIterable()4221 bool Heap::IsHeapIterable() {
4222 return (!old_pointer_space()->was_swept_conservatively() &&
4223 !old_data_space()->was_swept_conservatively() &&
4224 new_space_top_after_last_gc_ == new_space()->top());
4225 }
4226
4227
MakeHeapIterable()4228 void Heap::MakeHeapIterable() {
4229 ASSERT(AllowHeapAllocation::IsAllowed());
4230 if (!IsHeapIterable()) {
4231 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4232 }
4233 ASSERT(IsHeapIterable());
4234 }
4235
4236
AdvanceIdleIncrementalMarking(intptr_t step_size)4237 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4238 incremental_marking()->Step(step_size,
4239 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
4240
4241 if (incremental_marking()->IsComplete()) {
4242 bool uncommit = false;
4243 if (gc_count_at_last_idle_gc_ == gc_count_) {
4244 // No GC since the last full GC, the mutator is probably not active.
4245 isolate_->compilation_cache()->Clear();
4246 uncommit = true;
4247 }
4248 CollectAllGarbage(kReduceMemoryFootprintMask,
4249 "idle notification: finalize incremental");
4250 mark_sweeps_since_idle_round_started_++;
4251 gc_count_at_last_idle_gc_ = gc_count_;
4252 if (uncommit) {
4253 new_space_.Shrink();
4254 UncommitFromSpace();
4255 }
4256 }
4257 }
4258
4259
IdleNotification(int hint)4260 bool Heap::IdleNotification(int hint) {
4261 // Hints greater than this value indicate that
4262 // the embedder is requesting a lot of GC work.
4263 const int kMaxHint = 1000;
4264 const int kMinHintForIncrementalMarking = 10;
4265 // Minimal hint that allows to do full GC.
4266 const int kMinHintForFullGC = 100;
4267 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
4268 // The size factor is in range [5..250]. The numbers here are chosen from
4269 // experiments. If you changes them, make sure to test with
4270 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
4271 intptr_t step_size =
4272 size_factor * IncrementalMarking::kAllocatedThreshold;
4273
4274 if (contexts_disposed_ > 0) {
4275 contexts_disposed_ = 0;
4276 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
4277 if (hint >= mark_sweep_time && !FLAG_expose_gc &&
4278 incremental_marking()->IsStopped()) {
4279 HistogramTimerScope scope(isolate_->counters()->gc_context());
4280 CollectAllGarbage(kReduceMemoryFootprintMask,
4281 "idle notification: contexts disposed");
4282 } else {
4283 AdvanceIdleIncrementalMarking(step_size);
4284 }
4285
4286 // After context disposal there is likely a lot of garbage remaining, reset
4287 // the idle notification counters in order to trigger more incremental GCs
4288 // on subsequent idle notifications.
4289 StartIdleRound();
4290 return false;
4291 }
4292
4293 if (!FLAG_incremental_marking || isolate_->serializer_enabled()) {
4294 return IdleGlobalGC();
4295 }
4296
4297 // By doing small chunks of GC work in each IdleNotification,
4298 // perform a round of incremental GCs and after that wait until
4299 // the mutator creates enough garbage to justify a new round.
4300 // An incremental GC progresses as follows:
4301 // 1. many incremental marking steps,
4302 // 2. one old space mark-sweep-compact,
4303 // Use mark-sweep-compact events to count incremental GCs in a round.
4304
4305 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4306 if (EnoughGarbageSinceLastIdleRound()) {
4307 StartIdleRound();
4308 } else {
4309 return true;
4310 }
4311 }
4312
4313 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
4314 mark_sweeps_since_idle_round_started_;
4315
4316 if (incremental_marking()->IsStopped()) {
4317 // If there are no more than two GCs left in this idle round and we are
4318 // allowed to do a full GC, then make those GCs full in order to compact
4319 // the code space.
4320 // TODO(ulan): Once we enable code compaction for incremental marking,
4321 // we can get rid of this special case and always start incremental marking.
4322 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
4323 CollectAllGarbage(kReduceMemoryFootprintMask,
4324 "idle notification: finalize idle round");
4325 mark_sweeps_since_idle_round_started_++;
4326 } else if (hint > kMinHintForIncrementalMarking) {
4327 incremental_marking()->Start();
4328 }
4329 }
4330 if (!incremental_marking()->IsStopped() &&
4331 hint > kMinHintForIncrementalMarking) {
4332 AdvanceIdleIncrementalMarking(step_size);
4333 }
4334
4335 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4336 FinishIdleRound();
4337 return true;
4338 }
4339
4340 // If the IdleNotifcation is called with a large hint we will wait for
4341 // the sweepter threads here.
4342 if (hint >= kMinHintForFullGC &&
4343 mark_compact_collector()->IsConcurrentSweepingInProgress()) {
4344 mark_compact_collector()->WaitUntilSweepingCompleted();
4345 }
4346
4347 return false;
4348 }
4349
4350
IdleGlobalGC()4351 bool Heap::IdleGlobalGC() {
4352 static const int kIdlesBeforeScavenge = 4;
4353 static const int kIdlesBeforeMarkSweep = 7;
4354 static const int kIdlesBeforeMarkCompact = 8;
4355 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4356 static const unsigned int kGCsBetweenCleanup = 4;
4357
4358 if (!last_idle_notification_gc_count_init_) {
4359 last_idle_notification_gc_count_ = gc_count_;
4360 last_idle_notification_gc_count_init_ = true;
4361 }
4362
4363 bool uncommit = true;
4364 bool finished = false;
4365
4366 // Reset the number of idle notifications received when a number of
4367 // GCs have taken place. This allows another round of cleanup based
4368 // on idle notifications if enough work has been carried out to
4369 // provoke a number of garbage collections.
4370 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4371 number_idle_notifications_ =
4372 Min(number_idle_notifications_ + 1, kMaxIdleCount);
4373 } else {
4374 number_idle_notifications_ = 0;
4375 last_idle_notification_gc_count_ = gc_count_;
4376 }
4377
4378 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4379 CollectGarbage(NEW_SPACE, "idle notification");
4380 new_space_.Shrink();
4381 last_idle_notification_gc_count_ = gc_count_;
4382 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4383 // Before doing the mark-sweep collections we clear the
4384 // compilation cache to avoid hanging on to source code and
4385 // generated code for cached functions.
4386 isolate_->compilation_cache()->Clear();
4387
4388 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4389 new_space_.Shrink();
4390 last_idle_notification_gc_count_ = gc_count_;
4391
4392 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4393 CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4394 new_space_.Shrink();
4395 last_idle_notification_gc_count_ = gc_count_;
4396 number_idle_notifications_ = 0;
4397 finished = true;
4398 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4399 // If we have received more than kIdlesBeforeMarkCompact idle
4400 // notifications we do not perform any cleanup because we don't
4401 // expect to gain much by doing so.
4402 finished = true;
4403 }
4404
4405 if (uncommit) UncommitFromSpace();
4406
4407 return finished;
4408 }
4409
4410
4411 #ifdef DEBUG
4412
Print()4413 void Heap::Print() {
4414 if (!HasBeenSetUp()) return;
4415 isolate()->PrintStack(stdout);
4416 AllSpaces spaces(this);
4417 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4418 space->Print();
4419 }
4420 }
4421
4422
ReportCodeStatistics(const char * title)4423 void Heap::ReportCodeStatistics(const char* title) {
4424 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4425 PagedSpace::ResetCodeStatistics(isolate());
4426 // We do not look for code in new space, map space, or old space. If code
4427 // somehow ends up in those spaces, we would miss it here.
4428 code_space_->CollectCodeStatistics();
4429 lo_space_->CollectCodeStatistics();
4430 PagedSpace::ReportCodeStatistics(isolate());
4431 }
4432
4433
4434 // This function expects that NewSpace's allocated objects histogram is
4435 // populated (via a call to CollectStatistics or else as a side effect of a
4436 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)4437 void Heap::ReportHeapStatistics(const char* title) {
4438 USE(title);
4439 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4440 title, gc_count_);
4441 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4442 old_generation_allocation_limit_);
4443
4444 PrintF("\n");
4445 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4446 isolate_->global_handles()->PrintStats();
4447 PrintF("\n");
4448
4449 PrintF("Heap statistics : ");
4450 isolate_->memory_allocator()->ReportStatistics();
4451 PrintF("To space : ");
4452 new_space_.ReportStatistics();
4453 PrintF("Old pointer space : ");
4454 old_pointer_space_->ReportStatistics();
4455 PrintF("Old data space : ");
4456 old_data_space_->ReportStatistics();
4457 PrintF("Code space : ");
4458 code_space_->ReportStatistics();
4459 PrintF("Map space : ");
4460 map_space_->ReportStatistics();
4461 PrintF("Cell space : ");
4462 cell_space_->ReportStatistics();
4463 PrintF("PropertyCell space : ");
4464 property_cell_space_->ReportStatistics();
4465 PrintF("Large object space : ");
4466 lo_space_->ReportStatistics();
4467 PrintF(">>>>>> ========================================= >>>>>>\n");
4468 }
4469
4470 #endif // DEBUG
4471
Contains(HeapObject * value)4472 bool Heap::Contains(HeapObject* value) {
4473 return Contains(value->address());
4474 }
4475
4476
Contains(Address addr)4477 bool Heap::Contains(Address addr) {
4478 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4479 return HasBeenSetUp() &&
4480 (new_space_.ToSpaceContains(addr) ||
4481 old_pointer_space_->Contains(addr) ||
4482 old_data_space_->Contains(addr) ||
4483 code_space_->Contains(addr) ||
4484 map_space_->Contains(addr) ||
4485 cell_space_->Contains(addr) ||
4486 property_cell_space_->Contains(addr) ||
4487 lo_space_->SlowContains(addr));
4488 }
4489
4490
InSpace(HeapObject * value,AllocationSpace space)4491 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4492 return InSpace(value->address(), space);
4493 }
4494
4495
InSpace(Address addr,AllocationSpace space)4496 bool Heap::InSpace(Address addr, AllocationSpace space) {
4497 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4498 if (!HasBeenSetUp()) return false;
4499
4500 switch (space) {
4501 case NEW_SPACE:
4502 return new_space_.ToSpaceContains(addr);
4503 case OLD_POINTER_SPACE:
4504 return old_pointer_space_->Contains(addr);
4505 case OLD_DATA_SPACE:
4506 return old_data_space_->Contains(addr);
4507 case CODE_SPACE:
4508 return code_space_->Contains(addr);
4509 case MAP_SPACE:
4510 return map_space_->Contains(addr);
4511 case CELL_SPACE:
4512 return cell_space_->Contains(addr);
4513 case PROPERTY_CELL_SPACE:
4514 return property_cell_space_->Contains(addr);
4515 case LO_SPACE:
4516 return lo_space_->SlowContains(addr);
4517 case INVALID_SPACE:
4518 break;
4519 }
4520 UNREACHABLE();
4521 return false;
4522 }
4523
4524
4525 #ifdef VERIFY_HEAP
Verify()4526 void Heap::Verify() {
4527 CHECK(HasBeenSetUp());
4528 HandleScope scope(isolate());
4529
4530 store_buffer()->Verify();
4531
4532 VerifyPointersVisitor visitor;
4533 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4534
4535 VerifySmisVisitor smis_visitor;
4536 IterateSmiRoots(&smis_visitor);
4537
4538 new_space_.Verify();
4539
4540 old_pointer_space_->Verify(&visitor);
4541 map_space_->Verify(&visitor);
4542
4543 VerifyPointersVisitor no_dirty_regions_visitor;
4544 old_data_space_->Verify(&no_dirty_regions_visitor);
4545 code_space_->Verify(&no_dirty_regions_visitor);
4546 cell_space_->Verify(&no_dirty_regions_visitor);
4547 property_cell_space_->Verify(&no_dirty_regions_visitor);
4548
4549 lo_space_->Verify();
4550 }
4551 #endif
4552
4553
ZapFromSpace()4554 void Heap::ZapFromSpace() {
4555 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4556 new_space_.FromSpaceEnd());
4557 while (it.has_next()) {
4558 NewSpacePage* page = it.next();
4559 for (Address cursor = page->area_start(), limit = page->area_end();
4560 cursor < limit;
4561 cursor += kPointerSize) {
4562 Memory::Address_at(cursor) = kFromSpaceZapValue;
4563 }
4564 }
4565 }
4566
4567
IterateAndMarkPointersToFromSpace(Address start,Address end,ObjectSlotCallback callback)4568 void Heap::IterateAndMarkPointersToFromSpace(Address start,
4569 Address end,
4570 ObjectSlotCallback callback) {
4571 Address slot_address = start;
4572
4573 // We are not collecting slots on new space objects during mutation
4574 // thus we have to scan for pointers to evacuation candidates when we
4575 // promote objects. But we should not record any slots in non-black
4576 // objects. Grey object's slots would be rescanned.
4577 // White object might not survive until the end of collection
4578 // it would be a violation of the invariant to record it's slots.
4579 bool record_slots = false;
4580 if (incremental_marking()->IsCompacting()) {
4581 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4582 record_slots = Marking::IsBlack(mark_bit);
4583 }
4584
4585 while (slot_address < end) {
4586 Object** slot = reinterpret_cast<Object**>(slot_address);
4587 Object* object = *slot;
4588 // If the store buffer becomes overfull we mark pages as being exempt from
4589 // the store buffer. These pages are scanned to find pointers that point
4590 // to the new space. In that case we may hit newly promoted objects and
4591 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4592 if (object->IsHeapObject()) {
4593 if (Heap::InFromSpace(object)) {
4594 callback(reinterpret_cast<HeapObject**>(slot),
4595 HeapObject::cast(object));
4596 Object* new_object = *slot;
4597 if (InNewSpace(new_object)) {
4598 SLOW_ASSERT(Heap::InToSpace(new_object));
4599 SLOW_ASSERT(new_object->IsHeapObject());
4600 store_buffer_.EnterDirectlyIntoStoreBuffer(
4601 reinterpret_cast<Address>(slot));
4602 }
4603 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4604 } else if (record_slots &&
4605 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4606 mark_compact_collector()->RecordSlot(slot, slot, object);
4607 }
4608 }
4609 slot_address += kPointerSize;
4610 }
4611 }
4612
4613
4614 #ifdef DEBUG
4615 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4616
4617
IsAMapPointerAddress(Object ** addr)4618 bool IsAMapPointerAddress(Object** addr) {
4619 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4620 int mod = a % Map::kSize;
4621 return mod >= Map::kPointerFieldsBeginOffset &&
4622 mod < Map::kPointerFieldsEndOffset;
4623 }
4624
4625
EverythingsAPointer(Object ** addr)4626 bool EverythingsAPointer(Object** addr) {
4627 return true;
4628 }
4629
4630
CheckStoreBuffer(Heap * heap,Object ** current,Object ** limit,Object **** store_buffer_position,Object *** store_buffer_top,CheckStoreBufferFilter filter,Address special_garbage_start,Address special_garbage_end)4631 static void CheckStoreBuffer(Heap* heap,
4632 Object** current,
4633 Object** limit,
4634 Object**** store_buffer_position,
4635 Object*** store_buffer_top,
4636 CheckStoreBufferFilter filter,
4637 Address special_garbage_start,
4638 Address special_garbage_end) {
4639 Map* free_space_map = heap->free_space_map();
4640 for ( ; current < limit; current++) {
4641 Object* o = *current;
4642 Address current_address = reinterpret_cast<Address>(current);
4643 // Skip free space.
4644 if (o == free_space_map) {
4645 Address current_address = reinterpret_cast<Address>(current);
4646 FreeSpace* free_space =
4647 FreeSpace::cast(HeapObject::FromAddress(current_address));
4648 int skip = free_space->Size();
4649 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
4650 ASSERT(skip > 0);
4651 current_address += skip - kPointerSize;
4652 current = reinterpret_cast<Object**>(current_address);
4653 continue;
4654 }
4655 // Skip the current linear allocation space between top and limit which is
4656 // unmarked with the free space map, but can contain junk.
4657 if (current_address == special_garbage_start &&
4658 special_garbage_end != special_garbage_start) {
4659 current_address = special_garbage_end - kPointerSize;
4660 current = reinterpret_cast<Object**>(current_address);
4661 continue;
4662 }
4663 if (!(*filter)(current)) continue;
4664 ASSERT(current_address < special_garbage_start ||
4665 current_address >= special_garbage_end);
4666 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4667 // We have to check that the pointer does not point into new space
4668 // without trying to cast it to a heap object since the hash field of
4669 // a string can contain values like 1 and 3 which are tagged null
4670 // pointers.
4671 if (!heap->InNewSpace(o)) continue;
4672 while (**store_buffer_position < current &&
4673 *store_buffer_position < store_buffer_top) {
4674 (*store_buffer_position)++;
4675 }
4676 if (**store_buffer_position != current ||
4677 *store_buffer_position == store_buffer_top) {
4678 Object** obj_start = current;
4679 while (!(*obj_start)->IsMap()) obj_start--;
4680 UNREACHABLE();
4681 }
4682 }
4683 }
4684
4685
4686 // Check that the store buffer contains all intergenerational pointers by
4687 // scanning a page and ensuring that all pointers to young space are in the
4688 // store buffer.
OldPointerSpaceCheckStoreBuffer()4689 void Heap::OldPointerSpaceCheckStoreBuffer() {
4690 OldSpace* space = old_pointer_space();
4691 PageIterator pages(space);
4692
4693 store_buffer()->SortUniq();
4694
4695 while (pages.has_next()) {
4696 Page* page = pages.next();
4697 Object** current = reinterpret_cast<Object**>(page->area_start());
4698
4699 Address end = page->area_end();
4700
4701 Object*** store_buffer_position = store_buffer()->Start();
4702 Object*** store_buffer_top = store_buffer()->Top();
4703
4704 Object** limit = reinterpret_cast<Object**>(end);
4705 CheckStoreBuffer(this,
4706 current,
4707 limit,
4708 &store_buffer_position,
4709 store_buffer_top,
4710 &EverythingsAPointer,
4711 space->top(),
4712 space->limit());
4713 }
4714 }
4715
4716
MapSpaceCheckStoreBuffer()4717 void Heap::MapSpaceCheckStoreBuffer() {
4718 MapSpace* space = map_space();
4719 PageIterator pages(space);
4720
4721 store_buffer()->SortUniq();
4722
4723 while (pages.has_next()) {
4724 Page* page = pages.next();
4725 Object** current = reinterpret_cast<Object**>(page->area_start());
4726
4727 Address end = page->area_end();
4728
4729 Object*** store_buffer_position = store_buffer()->Start();
4730 Object*** store_buffer_top = store_buffer()->Top();
4731
4732 Object** limit = reinterpret_cast<Object**>(end);
4733 CheckStoreBuffer(this,
4734 current,
4735 limit,
4736 &store_buffer_position,
4737 store_buffer_top,
4738 &IsAMapPointerAddress,
4739 space->top(),
4740 space->limit());
4741 }
4742 }
4743
4744
LargeObjectSpaceCheckStoreBuffer()4745 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4746 LargeObjectIterator it(lo_space());
4747 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4748 // We only have code, sequential strings, or fixed arrays in large
4749 // object space, and only fixed arrays can possibly contain pointers to
4750 // the young generation.
4751 if (object->IsFixedArray()) {
4752 Object*** store_buffer_position = store_buffer()->Start();
4753 Object*** store_buffer_top = store_buffer()->Top();
4754 Object** current = reinterpret_cast<Object**>(object->address());
4755 Object** limit =
4756 reinterpret_cast<Object**>(object->address() + object->Size());
4757 CheckStoreBuffer(this,
4758 current,
4759 limit,
4760 &store_buffer_position,
4761 store_buffer_top,
4762 &EverythingsAPointer,
4763 NULL,
4764 NULL);
4765 }
4766 }
4767 }
4768 #endif
4769
4770
IterateRoots(ObjectVisitor * v,VisitMode mode)4771 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4772 IterateStrongRoots(v, mode);
4773 IterateWeakRoots(v, mode);
4774 }
4775
4776
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)4777 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4778 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4779 v->Synchronize(VisitorSynchronization::kStringTable);
4780 if (mode != VISIT_ALL_IN_SCAVENGE &&
4781 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4782 // Scavenge collections have special processing for this.
4783 external_string_table_.Iterate(v);
4784 }
4785 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4786 }
4787
4788
IterateSmiRoots(ObjectVisitor * v)4789 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4790 // Acquire execution access since we are going to read stack limit values.
4791 ExecutionAccess access(isolate());
4792 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4793 v->Synchronize(VisitorSynchronization::kSmiRootList);
4794 }
4795
4796
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)4797 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4798 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4799 v->Synchronize(VisitorSynchronization::kStrongRootList);
4800
4801 v->VisitPointer(BitCast<Object**>(&hidden_string_));
4802 v->Synchronize(VisitorSynchronization::kInternalizedString);
4803
4804 isolate_->bootstrapper()->Iterate(v);
4805 v->Synchronize(VisitorSynchronization::kBootstrapper);
4806 isolate_->Iterate(v);
4807 v->Synchronize(VisitorSynchronization::kTop);
4808 Relocatable::Iterate(isolate_, v);
4809 v->Synchronize(VisitorSynchronization::kRelocatable);
4810
4811 if (isolate_->deoptimizer_data() != NULL) {
4812 isolate_->deoptimizer_data()->Iterate(v);
4813 }
4814 v->Synchronize(VisitorSynchronization::kDebug);
4815 isolate_->compilation_cache()->Iterate(v);
4816 v->Synchronize(VisitorSynchronization::kCompilationCache);
4817
4818 // Iterate over local handles in handle scopes.
4819 isolate_->handle_scope_implementer()->Iterate(v);
4820 isolate_->IterateDeferredHandles(v);
4821 v->Synchronize(VisitorSynchronization::kHandleScope);
4822
4823 // Iterate over the builtin code objects and code stubs in the
4824 // heap. Note that it is not necessary to iterate over code objects
4825 // on scavenge collections.
4826 if (mode != VISIT_ALL_IN_SCAVENGE) {
4827 isolate_->builtins()->IterateBuiltins(v);
4828 }
4829 v->Synchronize(VisitorSynchronization::kBuiltins);
4830
4831 // Iterate over global handles.
4832 switch (mode) {
4833 case VISIT_ONLY_STRONG:
4834 isolate_->global_handles()->IterateStrongRoots(v);
4835 break;
4836 case VISIT_ALL_IN_SCAVENGE:
4837 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4838 break;
4839 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4840 case VISIT_ALL:
4841 isolate_->global_handles()->IterateAllRoots(v);
4842 break;
4843 }
4844 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4845
4846 // Iterate over eternal handles.
4847 if (mode == VISIT_ALL_IN_SCAVENGE) {
4848 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4849 } else {
4850 isolate_->eternal_handles()->IterateAllRoots(v);
4851 }
4852 v->Synchronize(VisitorSynchronization::kEternalHandles);
4853
4854 // Iterate over pointers being held by inactive threads.
4855 isolate_->thread_manager()->Iterate(v);
4856 v->Synchronize(VisitorSynchronization::kThreadManager);
4857
4858 // Iterate over the pointers the Serialization/Deserialization code is
4859 // holding.
4860 // During garbage collection this keeps the partial snapshot cache alive.
4861 // During deserialization of the startup snapshot this creates the partial
4862 // snapshot cache and deserializes the objects it refers to. During
4863 // serialization this does nothing, since the partial snapshot cache is
4864 // empty. However the next thing we do is create the partial snapshot,
4865 // filling up the partial snapshot cache with objects it needs as we go.
4866 SerializerDeserializer::Iterate(isolate_, v);
4867 // We don't do a v->Synchronize call here, because in debug mode that will
4868 // output a flag to the snapshot. However at this point the serializer and
4869 // deserializer are deliberately a little unsynchronized (see above) so the
4870 // checking of the sync flag in the snapshot would fail.
4871 }
4872
4873
4874 // TODO(1236194): Since the heap size is configurable on the command line
4875 // and through the API, we should gracefully handle the case that the heap
4876 // size is not big enough to fit all the initial objects.
ConfigureHeap(int max_semi_space_size,int max_old_space_size,int max_executable_size,size_t code_range_size)4877 bool Heap::ConfigureHeap(int max_semi_space_size,
4878 int max_old_space_size,
4879 int max_executable_size,
4880 size_t code_range_size) {
4881 if (HasBeenSetUp()) return false;
4882
4883 // Overwrite default configuration.
4884 if (max_semi_space_size > 0) {
4885 max_semi_space_size_ = max_semi_space_size * MB;
4886 }
4887 if (max_old_space_size > 0) {
4888 max_old_generation_size_ = max_old_space_size * MB;
4889 }
4890 if (max_executable_size > 0) {
4891 max_executable_size_ = max_executable_size * MB;
4892 }
4893
4894 // If max space size flags are specified overwrite the configuration.
4895 if (FLAG_max_semi_space_size > 0) {
4896 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4897 }
4898 if (FLAG_max_old_space_size > 0) {
4899 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4900 }
4901 if (FLAG_max_executable_size > 0) {
4902 max_executable_size_ = FLAG_max_executable_size * MB;
4903 }
4904
4905 if (FLAG_stress_compaction) {
4906 // This will cause more frequent GCs when stressing.
4907 max_semi_space_size_ = Page::kPageSize;
4908 }
4909
4910 if (Snapshot::IsEnabled()) {
4911 // If we are using a snapshot we always reserve the default amount
4912 // of memory for each semispace because code in the snapshot has
4913 // write-barrier code that relies on the size and alignment of new
4914 // space. We therefore cannot use a larger max semispace size
4915 // than the default reserved semispace size.
4916 if (max_semi_space_size_ > reserved_semispace_size_) {
4917 max_semi_space_size_ = reserved_semispace_size_;
4918 if (FLAG_trace_gc) {
4919 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4920 reserved_semispace_size_ >> 10);
4921 }
4922 }
4923 } else {
4924 // If we are not using snapshots we reserve space for the actual
4925 // max semispace size.
4926 reserved_semispace_size_ = max_semi_space_size_;
4927 }
4928
4929 // The max executable size must be less than or equal to the max old
4930 // generation size.
4931 if (max_executable_size_ > max_old_generation_size_) {
4932 max_executable_size_ = max_old_generation_size_;
4933 }
4934
4935 // The new space size must be a power of two to support single-bit testing
4936 // for containment.
4937 max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
4938 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4939
4940 if (FLAG_min_semi_space_size > 0) {
4941 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4942 if (initial_semispace_size > max_semi_space_size_) {
4943 initial_semispace_size_ = max_semi_space_size_;
4944 if (FLAG_trace_gc) {
4945 PrintPID("Min semi-space size cannot be more than the maximum"
4946 "semi-space size of %d MB\n", max_semi_space_size_);
4947 }
4948 } else {
4949 initial_semispace_size_ = initial_semispace_size;
4950 }
4951 }
4952
4953 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4954
4955 // The old generation is paged and needs at least one page for each space.
4956 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4957 max_old_generation_size_ =
4958 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4959 max_old_generation_size_);
4960
4961 // We rely on being able to allocate new arrays in paged spaces.
4962 ASSERT(Page::kMaxRegularHeapObjectSize >=
4963 (JSArray::kSize +
4964 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4965 AllocationMemento::kSize));
4966
4967 code_range_size_ = code_range_size * MB;
4968
4969 configured_ = true;
4970 return true;
4971 }
4972
4973
ConfigureHeapDefault()4974 bool Heap::ConfigureHeapDefault() {
4975 return ConfigureHeap(0, 0, 0, 0);
4976 }
4977
4978
RecordStats(HeapStats * stats,bool take_snapshot)4979 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4980 *stats->start_marker = HeapStats::kStartMarker;
4981 *stats->end_marker = HeapStats::kEndMarker;
4982 *stats->new_space_size = new_space_.SizeAsInt();
4983 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4984 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4985 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4986 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4987 *stats->old_data_space_capacity = old_data_space_->Capacity();
4988 *stats->code_space_size = code_space_->SizeOfObjects();
4989 *stats->code_space_capacity = code_space_->Capacity();
4990 *stats->map_space_size = map_space_->SizeOfObjects();
4991 *stats->map_space_capacity = map_space_->Capacity();
4992 *stats->cell_space_size = cell_space_->SizeOfObjects();
4993 *stats->cell_space_capacity = cell_space_->Capacity();
4994 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4995 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4996 *stats->lo_space_size = lo_space_->Size();
4997 isolate_->global_handles()->RecordStats(stats);
4998 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4999 *stats->memory_allocator_capacity =
5000 isolate()->memory_allocator()->Size() +
5001 isolate()->memory_allocator()->Available();
5002 *stats->os_error = OS::GetLastError();
5003 isolate()->memory_allocator()->Available();
5004 if (take_snapshot) {
5005 HeapIterator iterator(this);
5006 for (HeapObject* obj = iterator.next();
5007 obj != NULL;
5008 obj = iterator.next()) {
5009 InstanceType type = obj->map()->instance_type();
5010 ASSERT(0 <= type && type <= LAST_TYPE);
5011 stats->objects_per_type[type]++;
5012 stats->size_per_type[type] += obj->Size();
5013 }
5014 }
5015 }
5016
5017
PromotedSpaceSizeOfObjects()5018 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5019 return old_pointer_space_->SizeOfObjects()
5020 + old_data_space_->SizeOfObjects()
5021 + code_space_->SizeOfObjects()
5022 + map_space_->SizeOfObjects()
5023 + cell_space_->SizeOfObjects()
5024 + property_cell_space_->SizeOfObjects()
5025 + lo_space_->SizeOfObjects();
5026 }
5027
5028
PromotedExternalMemorySize()5029 int64_t Heap::PromotedExternalMemorySize() {
5030 if (amount_of_external_allocated_memory_
5031 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5032 return amount_of_external_allocated_memory_
5033 - amount_of_external_allocated_memory_at_last_global_gc_;
5034 }
5035
5036
OldGenerationAllocationLimit(intptr_t old_gen_size,int freed_global_handles)5037 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5038 int freed_global_handles) {
5039 const int kMaxHandles = 1000;
5040 const int kMinHandles = 100;
5041 double min_factor = 1.1;
5042 double max_factor = 4;
5043 // We set the old generation growing factor to 2 to grow the heap slower on
5044 // memory-constrained devices.
5045 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5046 max_factor = 2;
5047 }
5048 // If there are many freed global handles, then the next full GC will
5049 // likely collect a lot of garbage. Choose the heap growing factor
5050 // depending on freed global handles.
5051 // TODO(ulan, hpayer): Take into account mutator utilization.
5052 double factor;
5053 if (freed_global_handles <= kMinHandles) {
5054 factor = max_factor;
5055 } else if (freed_global_handles >= kMaxHandles) {
5056 factor = min_factor;
5057 } else {
5058 // Compute factor using linear interpolation between points
5059 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5060 factor = max_factor -
5061 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5062 (kMaxHandles - kMinHandles);
5063 }
5064
5065 if (FLAG_stress_compaction ||
5066 mark_compact_collector()->reduce_memory_footprint_) {
5067 factor = min_factor;
5068 }
5069
5070 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5071 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5072 limit += new_space_.Capacity();
5073 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5074 return Min(limit, halfway_to_the_max);
5075 }
5076
5077
EnableInlineAllocation()5078 void Heap::EnableInlineAllocation() {
5079 if (!inline_allocation_disabled_) return;
5080 inline_allocation_disabled_ = false;
5081
5082 // Update inline allocation limit for new space.
5083 new_space()->UpdateInlineAllocationLimit(0);
5084 }
5085
5086
DisableInlineAllocation()5087 void Heap::DisableInlineAllocation() {
5088 if (inline_allocation_disabled_) return;
5089 inline_allocation_disabled_ = true;
5090
5091 // Update inline allocation limit for new space.
5092 new_space()->UpdateInlineAllocationLimit(0);
5093
5094 // Update inline allocation limit for old spaces.
5095 PagedSpaces spaces(this);
5096 for (PagedSpace* space = spaces.next();
5097 space != NULL;
5098 space = spaces.next()) {
5099 space->EmptyAllocationInfo();
5100 }
5101 }
5102
5103
5104 V8_DECLARE_ONCE(initialize_gc_once);
5105
InitializeGCOnce()5106 static void InitializeGCOnce() {
5107 InitializeScavengingVisitorsTables();
5108 NewSpaceScavenger::Initialize();
5109 MarkCompactCollector::Initialize();
5110 }
5111
5112
SetUp()5113 bool Heap::SetUp() {
5114 #ifdef DEBUG
5115 allocation_timeout_ = FLAG_gc_interval;
5116 #endif
5117
5118 // Initialize heap spaces and initial maps and objects. Whenever something
5119 // goes wrong, just return false. The caller should check the results and
5120 // call Heap::TearDown() to release allocated memory.
5121 //
5122 // If the heap is not yet configured (e.g. through the API), configure it.
5123 // Configuration is based on the flags new-space-size (really the semispace
5124 // size) and old-space-size if set or the initial values of semispace_size_
5125 // and old_generation_size_ otherwise.
5126 if (!configured_) {
5127 if (!ConfigureHeapDefault()) return false;
5128 }
5129
5130 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5131
5132 MarkMapPointersAsEncoded(false);
5133
5134 // Set up memory allocator.
5135 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5136 return false;
5137
5138 // Set up new space.
5139 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5140 return false;
5141 }
5142 new_space_top_after_last_gc_ = new_space()->top();
5143
5144 // Initialize old pointer space.
5145 old_pointer_space_ =
5146 new OldSpace(this,
5147 max_old_generation_size_,
5148 OLD_POINTER_SPACE,
5149 NOT_EXECUTABLE);
5150 if (old_pointer_space_ == NULL) return false;
5151 if (!old_pointer_space_->SetUp()) return false;
5152
5153 // Initialize old data space.
5154 old_data_space_ =
5155 new OldSpace(this,
5156 max_old_generation_size_,
5157 OLD_DATA_SPACE,
5158 NOT_EXECUTABLE);
5159 if (old_data_space_ == NULL) return false;
5160 if (!old_data_space_->SetUp()) return false;
5161
5162 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5163
5164 // Initialize the code space, set its maximum capacity to the old
5165 // generation size. It needs executable memory.
5166 code_space_ =
5167 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5168 if (code_space_ == NULL) return false;
5169 if (!code_space_->SetUp()) return false;
5170
5171 // Initialize map space.
5172 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5173 if (map_space_ == NULL) return false;
5174 if (!map_space_->SetUp()) return false;
5175
5176 // Initialize simple cell space.
5177 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5178 if (cell_space_ == NULL) return false;
5179 if (!cell_space_->SetUp()) return false;
5180
5181 // Initialize global property cell space.
5182 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5183 PROPERTY_CELL_SPACE);
5184 if (property_cell_space_ == NULL) return false;
5185 if (!property_cell_space_->SetUp()) return false;
5186
5187 // The large object code space may contain code or data. We set the memory
5188 // to be non-executable here for safety, but this means we need to enable it
5189 // explicitly when allocating large code objects.
5190 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5191 if (lo_space_ == NULL) return false;
5192 if (!lo_space_->SetUp()) return false;
5193
5194 // Set up the seed that is used to randomize the string hash function.
5195 ASSERT(hash_seed() == 0);
5196 if (FLAG_randomize_hashes) {
5197 if (FLAG_hash_seed == 0) {
5198 int rnd = isolate()->random_number_generator()->NextInt();
5199 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5200 } else {
5201 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5202 }
5203 }
5204
5205 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5206 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5207
5208 store_buffer()->SetUp();
5209
5210 mark_compact_collector()->SetUp();
5211
5212 return true;
5213 }
5214
5215
CreateHeapObjects()5216 bool Heap::CreateHeapObjects() {
5217 // Create initial maps.
5218 if (!CreateInitialMaps()) return false;
5219 CreateApiObjects();
5220
5221 // Create initial objects
5222 CreateInitialObjects();
5223 CHECK_EQ(0, gc_count_);
5224
5225 set_native_contexts_list(undefined_value());
5226 set_array_buffers_list(undefined_value());
5227 set_allocation_sites_list(undefined_value());
5228 weak_object_to_code_table_ = undefined_value();
5229 return true;
5230 }
5231
5232
SetStackLimits()5233 void Heap::SetStackLimits() {
5234 ASSERT(isolate_ != NULL);
5235 ASSERT(isolate_ == isolate());
5236 // On 64 bit machines, pointers are generally out of range of Smis. We write
5237 // something that looks like an out of range Smi to the GC.
5238
5239 // Set up the special root array entries containing the stack limits.
5240 // These are actually addresses, but the tag makes the GC ignore it.
5241 roots_[kStackLimitRootIndex] =
5242 reinterpret_cast<Object*>(
5243 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5244 roots_[kRealStackLimitRootIndex] =
5245 reinterpret_cast<Object*>(
5246 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5247 }
5248
5249
TearDown()5250 void Heap::TearDown() {
5251 #ifdef VERIFY_HEAP
5252 if (FLAG_verify_heap) {
5253 Verify();
5254 }
5255 #endif
5256
5257 UpdateMaximumCommitted();
5258
5259 if (FLAG_print_cumulative_gc_stat) {
5260 PrintF("\n");
5261 PrintF("gc_count=%d ", gc_count_);
5262 PrintF("mark_sweep_count=%d ", ms_count_);
5263 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5264 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5265 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5266 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5267 get_max_alive_after_gc());
5268 PrintF("total_marking_time=%.1f ", marking_time());
5269 PrintF("total_sweeping_time=%.1f ", sweeping_time());
5270 PrintF("\n\n");
5271 }
5272
5273 if (FLAG_print_max_heap_committed) {
5274 PrintF("\n");
5275 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5276 MaximumCommittedMemory());
5277 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5278 new_space_.MaximumCommittedMemory());
5279 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5280 old_data_space_->MaximumCommittedMemory());
5281 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5282 old_pointer_space_->MaximumCommittedMemory());
5283 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5284 old_pointer_space_->MaximumCommittedMemory());
5285 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5286 code_space_->MaximumCommittedMemory());
5287 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5288 map_space_->MaximumCommittedMemory());
5289 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5290 cell_space_->MaximumCommittedMemory());
5291 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5292 property_cell_space_->MaximumCommittedMemory());
5293 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5294 lo_space_->MaximumCommittedMemory());
5295 PrintF("\n\n");
5296 }
5297
5298 TearDownArrayBuffers();
5299
5300 isolate_->global_handles()->TearDown();
5301
5302 external_string_table_.TearDown();
5303
5304 mark_compact_collector()->TearDown();
5305
5306 new_space_.TearDown();
5307
5308 if (old_pointer_space_ != NULL) {
5309 old_pointer_space_->TearDown();
5310 delete old_pointer_space_;
5311 old_pointer_space_ = NULL;
5312 }
5313
5314 if (old_data_space_ != NULL) {
5315 old_data_space_->TearDown();
5316 delete old_data_space_;
5317 old_data_space_ = NULL;
5318 }
5319
5320 if (code_space_ != NULL) {
5321 code_space_->TearDown();
5322 delete code_space_;
5323 code_space_ = NULL;
5324 }
5325
5326 if (map_space_ != NULL) {
5327 map_space_->TearDown();
5328 delete map_space_;
5329 map_space_ = NULL;
5330 }
5331
5332 if (cell_space_ != NULL) {
5333 cell_space_->TearDown();
5334 delete cell_space_;
5335 cell_space_ = NULL;
5336 }
5337
5338 if (property_cell_space_ != NULL) {
5339 property_cell_space_->TearDown();
5340 delete property_cell_space_;
5341 property_cell_space_ = NULL;
5342 }
5343
5344 if (lo_space_ != NULL) {
5345 lo_space_->TearDown();
5346 delete lo_space_;
5347 lo_space_ = NULL;
5348 }
5349
5350 store_buffer()->TearDown();
5351 incremental_marking()->TearDown();
5352
5353 isolate_->memory_allocator()->TearDown();
5354 }
5355
5356
AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,GCType gc_type,bool pass_isolate)5357 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5358 GCType gc_type,
5359 bool pass_isolate) {
5360 ASSERT(callback != NULL);
5361 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5362 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5363 return gc_prologue_callbacks_.Add(pair);
5364 }
5365
5366
RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback)5367 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5368 ASSERT(callback != NULL);
5369 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5370 if (gc_prologue_callbacks_[i].callback == callback) {
5371 gc_prologue_callbacks_.Remove(i);
5372 return;
5373 }
5374 }
5375 UNREACHABLE();
5376 }
5377
5378
AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,GCType gc_type,bool pass_isolate)5379 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5380 GCType gc_type,
5381 bool pass_isolate) {
5382 ASSERT(callback != NULL);
5383 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5384 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5385 return gc_epilogue_callbacks_.Add(pair);
5386 }
5387
5388
RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback)5389 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5390 ASSERT(callback != NULL);
5391 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5392 if (gc_epilogue_callbacks_[i].callback == callback) {
5393 gc_epilogue_callbacks_.Remove(i);
5394 return;
5395 }
5396 }
5397 UNREACHABLE();
5398 }
5399
5400
5401 // TODO(ishell): Find a better place for this.
AddWeakObjectToCodeDependency(Handle<Object> obj,Handle<DependentCode> dep)5402 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5403 Handle<DependentCode> dep) {
5404 ASSERT(!InNewSpace(*obj));
5405 ASSERT(!InNewSpace(*dep));
5406 // This handle scope keeps the table handle local to this function, which
5407 // allows us to safely skip write barriers in table update operations.
5408 HandleScope scope(isolate());
5409 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5410 isolate());
5411 table = WeakHashTable::Put(table, obj, dep);
5412
5413 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5414 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5415 }
5416 set_weak_object_to_code_table(*table);
5417 ASSERT_EQ(*dep, table->Lookup(obj));
5418 }
5419
5420
LookupWeakObjectToCodeDependency(Handle<Object> obj)5421 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5422 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5423 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5424 return DependentCode::cast(empty_fixed_array());
5425 }
5426
5427
EnsureWeakObjectToCodeTable()5428 void Heap::EnsureWeakObjectToCodeTable() {
5429 if (!weak_object_to_code_table()->IsHashTable()) {
5430 set_weak_object_to_code_table(*WeakHashTable::New(
5431 isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED));
5432 }
5433 }
5434
5435
FatalProcessOutOfMemory(const char * location,bool take_snapshot)5436 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5437 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5438 }
5439
5440 #ifdef DEBUG
5441
5442 class PrintHandleVisitor: public ObjectVisitor {
5443 public:
VisitPointers(Object ** start,Object ** end)5444 void VisitPointers(Object** start, Object** end) {
5445 for (Object** p = start; p < end; p++)
5446 PrintF(" handle %p to %p\n",
5447 reinterpret_cast<void*>(p),
5448 reinterpret_cast<void*>(*p));
5449 }
5450 };
5451
5452
PrintHandles()5453 void Heap::PrintHandles() {
5454 PrintF("Handles:\n");
5455 PrintHandleVisitor v;
5456 isolate_->handle_scope_implementer()->Iterate(&v);
5457 }
5458
5459 #endif
5460
5461
next()5462 Space* AllSpaces::next() {
5463 switch (counter_++) {
5464 case NEW_SPACE:
5465 return heap_->new_space();
5466 case OLD_POINTER_SPACE:
5467 return heap_->old_pointer_space();
5468 case OLD_DATA_SPACE:
5469 return heap_->old_data_space();
5470 case CODE_SPACE:
5471 return heap_->code_space();
5472 case MAP_SPACE:
5473 return heap_->map_space();
5474 case CELL_SPACE:
5475 return heap_->cell_space();
5476 case PROPERTY_CELL_SPACE:
5477 return heap_->property_cell_space();
5478 case LO_SPACE:
5479 return heap_->lo_space();
5480 default:
5481 return NULL;
5482 }
5483 }
5484
5485
next()5486 PagedSpace* PagedSpaces::next() {
5487 switch (counter_++) {
5488 case OLD_POINTER_SPACE:
5489 return heap_->old_pointer_space();
5490 case OLD_DATA_SPACE:
5491 return heap_->old_data_space();
5492 case CODE_SPACE:
5493 return heap_->code_space();
5494 case MAP_SPACE:
5495 return heap_->map_space();
5496 case CELL_SPACE:
5497 return heap_->cell_space();
5498 case PROPERTY_CELL_SPACE:
5499 return heap_->property_cell_space();
5500 default:
5501 return NULL;
5502 }
5503 }
5504
5505
5506
next()5507 OldSpace* OldSpaces::next() {
5508 switch (counter_++) {
5509 case OLD_POINTER_SPACE:
5510 return heap_->old_pointer_space();
5511 case OLD_DATA_SPACE:
5512 return heap_->old_data_space();
5513 case CODE_SPACE:
5514 return heap_->code_space();
5515 default:
5516 return NULL;
5517 }
5518 }
5519
5520
SpaceIterator(Heap * heap)5521 SpaceIterator::SpaceIterator(Heap* heap)
5522 : heap_(heap),
5523 current_space_(FIRST_SPACE),
5524 iterator_(NULL),
5525 size_func_(NULL) {
5526 }
5527
5528
SpaceIterator(Heap * heap,HeapObjectCallback size_func)5529 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5530 : heap_(heap),
5531 current_space_(FIRST_SPACE),
5532 iterator_(NULL),
5533 size_func_(size_func) {
5534 }
5535
5536
~SpaceIterator()5537 SpaceIterator::~SpaceIterator() {
5538 // Delete active iterator if any.
5539 delete iterator_;
5540 }
5541
5542
has_next()5543 bool SpaceIterator::has_next() {
5544 // Iterate until no more spaces.
5545 return current_space_ != LAST_SPACE;
5546 }
5547
5548
next()5549 ObjectIterator* SpaceIterator::next() {
5550 if (iterator_ != NULL) {
5551 delete iterator_;
5552 iterator_ = NULL;
5553 // Move to the next space
5554 current_space_++;
5555 if (current_space_ > LAST_SPACE) {
5556 return NULL;
5557 }
5558 }
5559
5560 // Return iterator for the new current space.
5561 return CreateIterator();
5562 }
5563
5564
5565 // Create an iterator for the space to iterate.
CreateIterator()5566 ObjectIterator* SpaceIterator::CreateIterator() {
5567 ASSERT(iterator_ == NULL);
5568
5569 switch (current_space_) {
5570 case NEW_SPACE:
5571 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5572 break;
5573 case OLD_POINTER_SPACE:
5574 iterator_ =
5575 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5576 break;
5577 case OLD_DATA_SPACE:
5578 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5579 break;
5580 case CODE_SPACE:
5581 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5582 break;
5583 case MAP_SPACE:
5584 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5585 break;
5586 case CELL_SPACE:
5587 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5588 break;
5589 case PROPERTY_CELL_SPACE:
5590 iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
5591 size_func_);
5592 break;
5593 case LO_SPACE:
5594 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5595 break;
5596 }
5597
5598 // Return the newly allocated iterator;
5599 ASSERT(iterator_ != NULL);
5600 return iterator_;
5601 }
5602
5603
5604 class HeapObjectsFilter {
5605 public:
~HeapObjectsFilter()5606 virtual ~HeapObjectsFilter() {}
5607 virtual bool SkipObject(HeapObject* object) = 0;
5608 };
5609
5610
5611 class UnreachableObjectsFilter : public HeapObjectsFilter {
5612 public:
UnreachableObjectsFilter(Heap * heap)5613 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5614 MarkReachableObjects();
5615 }
5616
~UnreachableObjectsFilter()5617 ~UnreachableObjectsFilter() {
5618 heap_->mark_compact_collector()->ClearMarkbits();
5619 }
5620
SkipObject(HeapObject * object)5621 bool SkipObject(HeapObject* object) {
5622 MarkBit mark_bit = Marking::MarkBitFrom(object);
5623 return !mark_bit.Get();
5624 }
5625
5626 private:
5627 class MarkingVisitor : public ObjectVisitor {
5628 public:
MarkingVisitor()5629 MarkingVisitor() : marking_stack_(10) {}
5630
VisitPointers(Object ** start,Object ** end)5631 void VisitPointers(Object** start, Object** end) {
5632 for (Object** p = start; p < end; p++) {
5633 if (!(*p)->IsHeapObject()) continue;
5634 HeapObject* obj = HeapObject::cast(*p);
5635 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5636 if (!mark_bit.Get()) {
5637 mark_bit.Set();
5638 marking_stack_.Add(obj);
5639 }
5640 }
5641 }
5642
TransitiveClosure()5643 void TransitiveClosure() {
5644 while (!marking_stack_.is_empty()) {
5645 HeapObject* obj = marking_stack_.RemoveLast();
5646 obj->Iterate(this);
5647 }
5648 }
5649
5650 private:
5651 List<HeapObject*> marking_stack_;
5652 };
5653
MarkReachableObjects()5654 void MarkReachableObjects() {
5655 MarkingVisitor visitor;
5656 heap_->IterateRoots(&visitor, VISIT_ALL);
5657 visitor.TransitiveClosure();
5658 }
5659
5660 Heap* heap_;
5661 DisallowHeapAllocation no_allocation_;
5662 };
5663
5664
HeapIterator(Heap * heap)5665 HeapIterator::HeapIterator(Heap* heap)
5666 : make_heap_iterable_helper_(heap),
5667 no_heap_allocation_(),
5668 heap_(heap),
5669 filtering_(HeapIterator::kNoFiltering),
5670 filter_(NULL) {
5671 Init();
5672 }
5673
5674
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)5675 HeapIterator::HeapIterator(Heap* heap,
5676 HeapIterator::HeapObjectsFiltering filtering)
5677 : make_heap_iterable_helper_(heap),
5678 no_heap_allocation_(),
5679 heap_(heap),
5680 filtering_(filtering),
5681 filter_(NULL) {
5682 Init();
5683 }
5684
5685
~HeapIterator()5686 HeapIterator::~HeapIterator() {
5687 Shutdown();
5688 }
5689
5690
Init()5691 void HeapIterator::Init() {
5692 // Start the iteration.
5693 space_iterator_ = new SpaceIterator(heap_);
5694 switch (filtering_) {
5695 case kFilterUnreachable:
5696 filter_ = new UnreachableObjectsFilter(heap_);
5697 break;
5698 default:
5699 break;
5700 }
5701 object_iterator_ = space_iterator_->next();
5702 }
5703
5704
Shutdown()5705 void HeapIterator::Shutdown() {
5706 #ifdef DEBUG
5707 // Assert that in filtering mode we have iterated through all
5708 // objects. Otherwise, heap will be left in an inconsistent state.
5709 if (filtering_ != kNoFiltering) {
5710 ASSERT(object_iterator_ == NULL);
5711 }
5712 #endif
5713 // Make sure the last iterator is deallocated.
5714 delete space_iterator_;
5715 space_iterator_ = NULL;
5716 object_iterator_ = NULL;
5717 delete filter_;
5718 filter_ = NULL;
5719 }
5720
5721
next()5722 HeapObject* HeapIterator::next() {
5723 if (filter_ == NULL) return NextObject();
5724
5725 HeapObject* obj = NextObject();
5726 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5727 return obj;
5728 }
5729
5730
NextObject()5731 HeapObject* HeapIterator::NextObject() {
5732 // No iterator means we are done.
5733 if (object_iterator_ == NULL) return NULL;
5734
5735 if (HeapObject* obj = object_iterator_->next_object()) {
5736 // If the current iterator has more objects we are fine.
5737 return obj;
5738 } else {
5739 // Go though the spaces looking for one that has objects.
5740 while (space_iterator_->has_next()) {
5741 object_iterator_ = space_iterator_->next();
5742 if (HeapObject* obj = object_iterator_->next_object()) {
5743 return obj;
5744 }
5745 }
5746 }
5747 // Done with the last space.
5748 object_iterator_ = NULL;
5749 return NULL;
5750 }
5751
5752
reset()5753 void HeapIterator::reset() {
5754 // Restart the iterator.
5755 Shutdown();
5756 Init();
5757 }
5758
5759
5760 #ifdef DEBUG
5761
5762 Object* const PathTracer::kAnyGlobalObject = NULL;
5763
5764 class PathTracer::MarkVisitor: public ObjectVisitor {
5765 public:
MarkVisitor(PathTracer * tracer)5766 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)5767 void VisitPointers(Object** start, Object** end) {
5768 // Scan all HeapObject pointers in [start, end)
5769 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5770 if ((*p)->IsHeapObject())
5771 tracer_->MarkRecursively(p, this);
5772 }
5773 }
5774
5775 private:
5776 PathTracer* tracer_;
5777 };
5778
5779
5780 class PathTracer::UnmarkVisitor: public ObjectVisitor {
5781 public:
UnmarkVisitor(PathTracer * tracer)5782 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)5783 void VisitPointers(Object** start, Object** end) {
5784 // Scan all HeapObject pointers in [start, end)
5785 for (Object** p = start; p < end; p++) {
5786 if ((*p)->IsHeapObject())
5787 tracer_->UnmarkRecursively(p, this);
5788 }
5789 }
5790
5791 private:
5792 PathTracer* tracer_;
5793 };
5794
5795
VisitPointers(Object ** start,Object ** end)5796 void PathTracer::VisitPointers(Object** start, Object** end) {
5797 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5798 // Visit all HeapObject pointers in [start, end)
5799 for (Object** p = start; !done && (p < end); p++) {
5800 if ((*p)->IsHeapObject()) {
5801 TracePathFrom(p);
5802 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5803 }
5804 }
5805 }
5806
5807
Reset()5808 void PathTracer::Reset() {
5809 found_target_ = false;
5810 object_stack_.Clear();
5811 }
5812
5813
TracePathFrom(Object ** root)5814 void PathTracer::TracePathFrom(Object** root) {
5815 ASSERT((search_target_ == kAnyGlobalObject) ||
5816 search_target_->IsHeapObject());
5817 found_target_in_trace_ = false;
5818 Reset();
5819
5820 MarkVisitor mark_visitor(this);
5821 MarkRecursively(root, &mark_visitor);
5822
5823 UnmarkVisitor unmark_visitor(this);
5824 UnmarkRecursively(root, &unmark_visitor);
5825
5826 ProcessResults();
5827 }
5828
5829
SafeIsNativeContext(HeapObject * obj)5830 static bool SafeIsNativeContext(HeapObject* obj) {
5831 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5832 }
5833
5834
MarkRecursively(Object ** p,MarkVisitor * mark_visitor)5835 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5836 if (!(*p)->IsHeapObject()) return;
5837
5838 HeapObject* obj = HeapObject::cast(*p);
5839
5840 MapWord map_word = obj->map_word();
5841 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5842
5843 if (found_target_in_trace_) return; // stop if target found
5844 object_stack_.Add(obj);
5845 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5846 (obj == search_target_)) {
5847 found_target_in_trace_ = true;
5848 found_target_ = true;
5849 return;
5850 }
5851
5852 bool is_native_context = SafeIsNativeContext(obj);
5853
5854 // not visited yet
5855 Map* map = Map::cast(map_word.ToMap());
5856
5857 MapWord marked_map_word =
5858 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5859 obj->set_map_word(marked_map_word);
5860
5861 // Scan the object body.
5862 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5863 // This is specialized to scan Context's properly.
5864 Object** start = reinterpret_cast<Object**>(obj->address() +
5865 Context::kHeaderSize);
5866 Object** end = reinterpret_cast<Object**>(obj->address() +
5867 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5868 mark_visitor->VisitPointers(start, end);
5869 } else {
5870 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5871 }
5872
5873 // Scan the map after the body because the body is a lot more interesting
5874 // when doing leak detection.
5875 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5876
5877 if (!found_target_in_trace_) { // don't pop if found the target
5878 object_stack_.RemoveLast();
5879 }
5880 }
5881
5882
UnmarkRecursively(Object ** p,UnmarkVisitor * unmark_visitor)5883 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5884 if (!(*p)->IsHeapObject()) return;
5885
5886 HeapObject* obj = HeapObject::cast(*p);
5887
5888 MapWord map_word = obj->map_word();
5889 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5890
5891 MapWord unmarked_map_word =
5892 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5893 obj->set_map_word(unmarked_map_word);
5894
5895 Map* map = Map::cast(unmarked_map_word.ToMap());
5896
5897 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5898
5899 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5900 }
5901
5902
ProcessResults()5903 void PathTracer::ProcessResults() {
5904 if (found_target_) {
5905 PrintF("=====================================\n");
5906 PrintF("==== Path to object ====\n");
5907 PrintF("=====================================\n\n");
5908
5909 ASSERT(!object_stack_.is_empty());
5910 for (int i = 0; i < object_stack_.length(); i++) {
5911 if (i > 0) PrintF("\n |\n |\n V\n\n");
5912 Object* obj = object_stack_[i];
5913 obj->Print();
5914 }
5915 PrintF("=====================================\n");
5916 }
5917 }
5918
5919
5920 // Triggers a depth-first traversal of reachable objects from one
5921 // given root object and finds a path to a specific heap object and
5922 // prints it.
TracePathToObjectFrom(Object * target,Object * root)5923 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5924 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5925 tracer.VisitPointer(&root);
5926 }
5927
5928
5929 // Triggers a depth-first traversal of reachable objects from roots
5930 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)5931 void Heap::TracePathToObject(Object* target) {
5932 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5933 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5934 }
5935
5936
5937 // Triggers a depth-first traversal of reachable objects from roots
5938 // and finds a path to any global object and prints it. Useful for
5939 // determining the source for leaks of global objects.
TracePathToGlobal()5940 void Heap::TracePathToGlobal() {
5941 PathTracer tracer(PathTracer::kAnyGlobalObject,
5942 PathTracer::FIND_ALL,
5943 VISIT_ALL);
5944 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5945 }
5946 #endif
5947
5948
CountTotalHolesSize(Heap * heap)5949 static intptr_t CountTotalHolesSize(Heap* heap) {
5950 intptr_t holes_size = 0;
5951 OldSpaces spaces(heap);
5952 for (OldSpace* space = spaces.next();
5953 space != NULL;
5954 space = spaces.next()) {
5955 holes_size += space->Waste() + space->Available();
5956 }
5957 return holes_size;
5958 }
5959
5960
GCTracer(Heap * heap,const char * gc_reason,const char * collector_reason)5961 GCTracer::GCTracer(Heap* heap,
5962 const char* gc_reason,
5963 const char* collector_reason)
5964 : start_time_(0.0),
5965 start_object_size_(0),
5966 start_memory_size_(0),
5967 gc_count_(0),
5968 full_gc_count_(0),
5969 allocated_since_last_gc_(0),
5970 spent_in_mutator_(0),
5971 nodes_died_in_new_space_(0),
5972 nodes_copied_in_new_space_(0),
5973 nodes_promoted_(0),
5974 heap_(heap),
5975 gc_reason_(gc_reason),
5976 collector_reason_(collector_reason) {
5977 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5978 start_time_ = OS::TimeCurrentMillis();
5979 start_object_size_ = heap_->SizeOfObjects();
5980 start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
5981
5982 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5983 scopes_[i] = 0;
5984 }
5985
5986 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
5987
5988 allocated_since_last_gc_ =
5989 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
5990
5991 if (heap_->last_gc_end_timestamp_ > 0) {
5992 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
5993 }
5994
5995 steps_count_ = heap_->incremental_marking()->steps_count();
5996 steps_took_ = heap_->incremental_marking()->steps_took();
5997 longest_step_ = heap_->incremental_marking()->longest_step();
5998 steps_count_since_last_gc_ =
5999 heap_->incremental_marking()->steps_count_since_last_gc();
6000 steps_took_since_last_gc_ =
6001 heap_->incremental_marking()->steps_took_since_last_gc();
6002 }
6003
6004
~GCTracer()6005 GCTracer::~GCTracer() {
6006 // Printf ONE line iff flag is set.
6007 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6008
6009 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6010
6011 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6012 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6013
6014 double time = heap_->last_gc_end_timestamp_ - start_time_;
6015
6016 // Update cumulative GC statistics if required.
6017 if (FLAG_print_cumulative_gc_stat) {
6018 heap_->total_gc_time_ms_ += time;
6019 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6020 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6021 heap_->alive_after_last_gc_);
6022 if (!first_gc) {
6023 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6024 spent_in_mutator_);
6025 }
6026 } else if (FLAG_trace_gc_verbose) {
6027 heap_->total_gc_time_ms_ += time;
6028 }
6029
6030 if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
6031
6032 heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
6033
6034 if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
6035 PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6036
6037 if (!FLAG_trace_gc_nvp) {
6038 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6039
6040 double end_memory_size_mb =
6041 static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6042
6043 PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6044 CollectorString(),
6045 static_cast<double>(start_object_size_) / MB,
6046 static_cast<double>(start_memory_size_) / MB,
6047 SizeOfHeapObjects(),
6048 end_memory_size_mb);
6049
6050 if (external_time > 0) PrintF("%d / ", external_time);
6051 PrintF("%.1f ms", time);
6052 if (steps_count_ > 0) {
6053 if (collector_ == SCAVENGER) {
6054 PrintF(" (+ %.1f ms in %d steps since last GC)",
6055 steps_took_since_last_gc_,
6056 steps_count_since_last_gc_);
6057 } else {
6058 PrintF(" (+ %.1f ms in %d steps since start of marking, "
6059 "biggest step %.1f ms)",
6060 steps_took_,
6061 steps_count_,
6062 longest_step_);
6063 }
6064 }
6065
6066 if (gc_reason_ != NULL) {
6067 PrintF(" [%s]", gc_reason_);
6068 }
6069
6070 if (collector_reason_ != NULL) {
6071 PrintF(" [%s]", collector_reason_);
6072 }
6073
6074 PrintF(".\n");
6075 } else {
6076 PrintF("pause=%.1f ", time);
6077 PrintF("mutator=%.1f ", spent_in_mutator_);
6078 PrintF("gc=");
6079 switch (collector_) {
6080 case SCAVENGER:
6081 PrintF("s");
6082 break;
6083 case MARK_COMPACTOR:
6084 PrintF("ms");
6085 break;
6086 default:
6087 UNREACHABLE();
6088 }
6089 PrintF(" ");
6090
6091 PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
6092 PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
6093 PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
6094 PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
6095 PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
6096 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
6097 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
6098 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
6099 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
6100 PrintF("compaction_ptrs=%.1f ",
6101 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
6102 PrintF("intracompaction_ptrs=%.1f ",
6103 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
6104 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
6105 PrintF("weakcollection_process=%.1f ",
6106 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
6107 PrintF("weakcollection_clear=%.1f ",
6108 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
6109
6110 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
6111 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6112 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6113 in_free_list_or_wasted_before_gc_);
6114 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
6115
6116 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6117 PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
6118 PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
6119 heap_->semi_space_copied_object_size_);
6120 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
6121 PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
6122 PrintF("nodes_promoted=%d ", nodes_promoted_);
6123 PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
6124 PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
6125
6126 if (collector_ == SCAVENGER) {
6127 PrintF("stepscount=%d ", steps_count_since_last_gc_);
6128 PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
6129 } else {
6130 PrintF("stepscount=%d ", steps_count_);
6131 PrintF("stepstook=%.1f ", steps_took_);
6132 PrintF("longeststep=%.1f ", longest_step_);
6133 }
6134
6135 PrintF("\n");
6136 }
6137
6138 heap_->PrintShortHeapStatistics();
6139 }
6140
6141
CollectorString()6142 const char* GCTracer::CollectorString() {
6143 switch (collector_) {
6144 case SCAVENGER:
6145 return "Scavenge";
6146 case MARK_COMPACTOR:
6147 return "Mark-sweep";
6148 }
6149 return "Unknown GC";
6150 }
6151
6152
Hash(Handle<Map> map,Handle<Name> name)6153 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6154 DisallowHeapAllocation no_gc;
6155 // Uses only lower 32 bits if pointers are larger.
6156 uintptr_t addr_hash =
6157 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6158 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6159 }
6160
6161
Lookup(Handle<Map> map,Handle<Name> name)6162 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6163 DisallowHeapAllocation no_gc;
6164 int index = (Hash(map, name) & kHashMask);
6165 for (int i = 0; i < kEntriesPerBucket; i++) {
6166 Key& key = keys_[index + i];
6167 if ((key.map == *map) && key.name->Equals(*name)) {
6168 return field_offsets_[index + i];
6169 }
6170 }
6171 return kNotFound;
6172 }
6173
6174
Update(Handle<Map> map,Handle<Name> name,int field_offset)6175 void KeyedLookupCache::Update(Handle<Map> map,
6176 Handle<Name> name,
6177 int field_offset) {
6178 DisallowHeapAllocation no_gc;
6179 if (!name->IsUniqueName()) {
6180 if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
6181 Handle<String>::cast(name)).
6182 ToHandle(&name)) {
6183 return;
6184 }
6185 }
6186 // This cache is cleared only between mark compact passes, so we expect the
6187 // cache to only contain old space names.
6188 ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name));
6189
6190 int index = (Hash(map, name) & kHashMask);
6191 // After a GC there will be free slots, so we use them in order (this may
6192 // help to get the most frequently used one in position 0).
6193 for (int i = 0; i< kEntriesPerBucket; i++) {
6194 Key& key = keys_[index];
6195 Object* free_entry_indicator = NULL;
6196 if (key.map == free_entry_indicator) {
6197 key.map = *map;
6198 key.name = *name;
6199 field_offsets_[index + i] = field_offset;
6200 return;
6201 }
6202 }
6203 // No free entry found in this bucket, so we move them all down one and
6204 // put the new entry at position zero.
6205 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6206 Key& key = keys_[index + i];
6207 Key& key2 = keys_[index + i - 1];
6208 key = key2;
6209 field_offsets_[index + i] = field_offsets_[index + i - 1];
6210 }
6211
6212 // Write the new first entry.
6213 Key& key = keys_[index];
6214 key.map = *map;
6215 key.name = *name;
6216 field_offsets_[index] = field_offset;
6217 }
6218
6219
Clear()6220 void KeyedLookupCache::Clear() {
6221 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6222 }
6223
6224
Clear()6225 void DescriptorLookupCache::Clear() {
6226 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6227 }
6228
6229
CleanUp()6230 void ExternalStringTable::CleanUp() {
6231 int last = 0;
6232 for (int i = 0; i < new_space_strings_.length(); ++i) {
6233 if (new_space_strings_[i] == heap_->the_hole_value()) {
6234 continue;
6235 }
6236 ASSERT(new_space_strings_[i]->IsExternalString());
6237 if (heap_->InNewSpace(new_space_strings_[i])) {
6238 new_space_strings_[last++] = new_space_strings_[i];
6239 } else {
6240 old_space_strings_.Add(new_space_strings_[i]);
6241 }
6242 }
6243 new_space_strings_.Rewind(last);
6244 new_space_strings_.Trim();
6245
6246 last = 0;
6247 for (int i = 0; i < old_space_strings_.length(); ++i) {
6248 if (old_space_strings_[i] == heap_->the_hole_value()) {
6249 continue;
6250 }
6251 ASSERT(old_space_strings_[i]->IsExternalString());
6252 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
6253 old_space_strings_[last++] = old_space_strings_[i];
6254 }
6255 old_space_strings_.Rewind(last);
6256 old_space_strings_.Trim();
6257 #ifdef VERIFY_HEAP
6258 if (FLAG_verify_heap) {
6259 Verify();
6260 }
6261 #endif
6262 }
6263
6264
TearDown()6265 void ExternalStringTable::TearDown() {
6266 for (int i = 0; i < new_space_strings_.length(); ++i) {
6267 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6268 }
6269 new_space_strings_.Free();
6270 for (int i = 0; i < old_space_strings_.length(); ++i) {
6271 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6272 }
6273 old_space_strings_.Free();
6274 }
6275
6276
QueueMemoryChunkForFree(MemoryChunk * chunk)6277 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6278 chunk->set_next_chunk(chunks_queued_for_free_);
6279 chunks_queued_for_free_ = chunk;
6280 }
6281
6282
FreeQueuedChunks()6283 void Heap::FreeQueuedChunks() {
6284 if (chunks_queued_for_free_ == NULL) return;
6285 MemoryChunk* next;
6286 MemoryChunk* chunk;
6287 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6288 next = chunk->next_chunk();
6289 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6290
6291 if (chunk->owner()->identity() == LO_SPACE) {
6292 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6293 // If FromAnyPointerAddress encounters a slot that belongs to a large
6294 // chunk queued for deletion it will fail to find the chunk because
6295 // it try to perform a search in the list of pages owned by of the large
6296 // object space and queued chunks were detached from that list.
6297 // To work around this we split large chunk into normal kPageSize aligned
6298 // pieces and initialize size, owner and flags field of every piece.
6299 // If FromAnyPointerAddress encounters a slot that belongs to one of
6300 // these smaller pieces it will treat it as a slot on a normal Page.
6301 Address chunk_end = chunk->address() + chunk->size();
6302 MemoryChunk* inner = MemoryChunk::FromAddress(
6303 chunk->address() + Page::kPageSize);
6304 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6305 while (inner <= inner_last) {
6306 // Size of a large chunk is always a multiple of
6307 // OS::AllocateAlignment() so there is always
6308 // enough space for a fake MemoryChunk header.
6309 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6310 // Guard against overflow.
6311 if (area_end < inner->address()) area_end = chunk_end;
6312 inner->SetArea(inner->address(), area_end);
6313 inner->set_size(Page::kPageSize);
6314 inner->set_owner(lo_space());
6315 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6316 inner = MemoryChunk::FromAddress(
6317 inner->address() + Page::kPageSize);
6318 }
6319 }
6320 }
6321 isolate_->heap()->store_buffer()->Compact();
6322 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6323 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6324 next = chunk->next_chunk();
6325 isolate_->memory_allocator()->Free(chunk);
6326 }
6327 chunks_queued_for_free_ = NULL;
6328 }
6329
6330
RememberUnmappedPage(Address page,bool compacted)6331 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6332 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6333 // Tag the page pointer to make it findable in the dump file.
6334 if (compacted) {
6335 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6336 } else {
6337 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6338 }
6339 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6340 reinterpret_cast<Address>(p);
6341 remembered_unmapped_pages_index_++;
6342 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6343 }
6344
6345
ClearObjectStats(bool clear_last_time_stats)6346 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6347 memset(object_counts_, 0, sizeof(object_counts_));
6348 memset(object_sizes_, 0, sizeof(object_sizes_));
6349 if (clear_last_time_stats) {
6350 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6351 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6352 }
6353 }
6354
6355
6356 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6357
6358
CheckpointObjectStats()6359 void Heap::CheckpointObjectStats() {
6360 LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
6361 Counters* counters = isolate()->counters();
6362 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6363 counters->count_of_##name()->Increment( \
6364 static_cast<int>(object_counts_[name])); \
6365 counters->count_of_##name()->Decrement( \
6366 static_cast<int>(object_counts_last_time_[name])); \
6367 counters->size_of_##name()->Increment( \
6368 static_cast<int>(object_sizes_[name])); \
6369 counters->size_of_##name()->Decrement( \
6370 static_cast<int>(object_sizes_last_time_[name]));
6371 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6372 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6373 int index;
6374 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6375 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6376 counters->count_of_CODE_TYPE_##name()->Increment( \
6377 static_cast<int>(object_counts_[index])); \
6378 counters->count_of_CODE_TYPE_##name()->Decrement( \
6379 static_cast<int>(object_counts_last_time_[index])); \
6380 counters->size_of_CODE_TYPE_##name()->Increment( \
6381 static_cast<int>(object_sizes_[index])); \
6382 counters->size_of_CODE_TYPE_##name()->Decrement( \
6383 static_cast<int>(object_sizes_last_time_[index]));
6384 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6385 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6386 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6387 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6388 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6389 static_cast<int>(object_counts_[index])); \
6390 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6391 static_cast<int>(object_counts_last_time_[index])); \
6392 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6393 static_cast<int>(object_sizes_[index])); \
6394 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6395 static_cast<int>(object_sizes_last_time_[index]));
6396 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6397 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6398 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6399 index = \
6400 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6401 counters->count_of_CODE_AGE_##name()->Increment( \
6402 static_cast<int>(object_counts_[index])); \
6403 counters->count_of_CODE_AGE_##name()->Decrement( \
6404 static_cast<int>(object_counts_last_time_[index])); \
6405 counters->size_of_CODE_AGE_##name()->Increment( \
6406 static_cast<int>(object_sizes_[index])); \
6407 counters->size_of_CODE_AGE_##name()->Decrement( \
6408 static_cast<int>(object_sizes_last_time_[index]));
6409 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6410 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6411
6412 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6413 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6414 ClearObjectStats();
6415 }
6416
6417 } } // namespace v8::internal
6418