1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/snapshot.h"
32 #include "src/utils.h"
33 #include "src/v8threads.h"
34 #include "src/vm-state-inl.h"
35
36 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
37 #include "src/regexp-macro-assembler.h" // NOLINT
38 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
39 #endif
40 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
41 #include "src/regexp-macro-assembler.h" // NOLINT
42 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
43 #endif
44 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
45 #include "src/regexp-macro-assembler.h"
46 #include "src/mips64/regexp-macro-assembler-mips64.h"
47 #endif
48
49 namespace v8 {
50 namespace internal {
51
52
Heap()53 Heap::Heap()
54 : amount_of_external_allocated_memory_(0),
55 amount_of_external_allocated_memory_at_last_global_gc_(0),
56 isolate_(NULL),
57 code_range_size_(0),
58 // semispace_size_ should be a power of 2 and old_generation_size_ should
59 // be a multiple of Page::kPageSize.
60 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
61 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
62 initial_semispace_size_(Page::kPageSize),
63 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
64 max_executable_size_(256ul * (kPointerSize / 4) * MB),
65 // Variables set based on semispace_size_ and old_generation_size_ in
66 // ConfigureHeap.
67 // Will be 4 * reserved_semispace_size_ to ensure that young
68 // generation can be aligned to its size.
69 maximum_committed_(0),
70 survived_since_last_expansion_(0),
71 sweep_generation_(0),
72 always_allocate_scope_depth_(0),
73 contexts_disposed_(0),
74 global_ic_age_(0),
75 flush_monomorphic_ics_(false),
76 scan_on_scavenge_pages_(0),
77 new_space_(this),
78 old_pointer_space_(NULL),
79 old_data_space_(NULL),
80 code_space_(NULL),
81 map_space_(NULL),
82 cell_space_(NULL),
83 property_cell_space_(NULL),
84 lo_space_(NULL),
85 gc_state_(NOT_IN_GC),
86 gc_post_processing_depth_(0),
87 allocations_count_(0),
88 raw_allocations_hash_(0),
89 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
90 ms_count_(0),
91 gc_count_(0),
92 remembered_unmapped_pages_index_(0),
93 unflattened_strings_length_(0),
94 #ifdef DEBUG
95 allocation_timeout_(0),
96 #endif // DEBUG
97 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
98 old_gen_exhausted_(false),
99 inline_allocation_disabled_(false),
100 store_buffer_rebuilder_(store_buffer()),
101 hidden_string_(NULL),
102 gc_safe_size_of_old_object_(NULL),
103 total_regexp_code_generated_(0),
104 tracer_(this),
105 high_survival_rate_period_length_(0),
106 promoted_objects_size_(0),
107 promotion_rate_(0),
108 semi_space_copied_object_size_(0),
109 semi_space_copied_rate_(0),
110 nodes_died_in_new_space_(0),
111 nodes_copied_in_new_space_(0),
112 nodes_promoted_(0),
113 maximum_size_scavenges_(0),
114 max_gc_pause_(0.0),
115 total_gc_time_ms_(0.0),
116 max_alive_after_gc_(0),
117 min_in_mutator_(kMaxInt),
118 marking_time_(0.0),
119 sweeping_time_(0.0),
120 mark_compact_collector_(this),
121 store_buffer_(this),
122 marking_(this),
123 incremental_marking_(this),
124 gc_count_at_last_idle_gc_(0),
125 full_codegen_bytes_generated_(0),
126 crankshaft_codegen_bytes_generated_(0),
127 gcs_since_last_deopt_(0),
128 #ifdef VERIFY_HEAP
129 no_weak_object_verification_scope_depth_(0),
130 #endif
131 allocation_sites_scratchpad_length_(0),
132 promotion_queue_(this),
133 configured_(false),
134 external_string_table_(this),
135 chunks_queued_for_free_(NULL),
136 gc_callbacks_depth_(0) {
137 // Allow build-time customization of the max semispace size. Building
138 // V8 with snapshots and a non-default max semispace size is much
139 // easier if you can define it as part of the build environment.
140 #if defined(V8_MAX_SEMISPACE_SIZE)
141 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
142 #endif
143
144 // Ensure old_generation_size_ is a multiple of kPageSize.
145 DCHECK(MB >= Page::kPageSize);
146
147 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
148 set_native_contexts_list(NULL);
149 set_array_buffers_list(Smi::FromInt(0));
150 set_allocation_sites_list(Smi::FromInt(0));
151 set_encountered_weak_collections(Smi::FromInt(0));
152 // Put a dummy entry in the remembered pages so we can find the list the
153 // minidump even if there are no real unmapped pages.
154 RememberUnmappedPage(NULL, false);
155
156 ClearObjectStats(true);
157 }
158
159
Capacity()160 intptr_t Heap::Capacity() {
161 if (!HasBeenSetUp()) return 0;
162
163 return new_space_.Capacity() + old_pointer_space_->Capacity() +
164 old_data_space_->Capacity() + code_space_->Capacity() +
165 map_space_->Capacity() + cell_space_->Capacity() +
166 property_cell_space_->Capacity();
167 }
168
169
CommittedMemory()170 intptr_t Heap::CommittedMemory() {
171 if (!HasBeenSetUp()) return 0;
172
173 return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
174 old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
175 map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
176 property_cell_space_->CommittedMemory() + lo_space_->Size();
177 }
178
179
CommittedPhysicalMemory()180 size_t Heap::CommittedPhysicalMemory() {
181 if (!HasBeenSetUp()) return 0;
182
183 return new_space_.CommittedPhysicalMemory() +
184 old_pointer_space_->CommittedPhysicalMemory() +
185 old_data_space_->CommittedPhysicalMemory() +
186 code_space_->CommittedPhysicalMemory() +
187 map_space_->CommittedPhysicalMemory() +
188 cell_space_->CommittedPhysicalMemory() +
189 property_cell_space_->CommittedPhysicalMemory() +
190 lo_space_->CommittedPhysicalMemory();
191 }
192
193
CommittedMemoryExecutable()194 intptr_t Heap::CommittedMemoryExecutable() {
195 if (!HasBeenSetUp()) return 0;
196
197 return isolate()->memory_allocator()->SizeExecutable();
198 }
199
200
UpdateMaximumCommitted()201 void Heap::UpdateMaximumCommitted() {
202 if (!HasBeenSetUp()) return;
203
204 intptr_t current_committed_memory = CommittedMemory();
205 if (current_committed_memory > maximum_committed_) {
206 maximum_committed_ = current_committed_memory;
207 }
208 }
209
210
Available()211 intptr_t Heap::Available() {
212 if (!HasBeenSetUp()) return 0;
213
214 return new_space_.Available() + old_pointer_space_->Available() +
215 old_data_space_->Available() + code_space_->Available() +
216 map_space_->Available() + cell_space_->Available() +
217 property_cell_space_->Available();
218 }
219
220
HasBeenSetUp()221 bool Heap::HasBeenSetUp() {
222 return old_pointer_space_ != NULL && old_data_space_ != NULL &&
223 code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
224 property_cell_space_ != NULL && lo_space_ != NULL;
225 }
226
227
GcSafeSizeOfOldObject(HeapObject * object)228 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
229 if (IntrusiveMarking::IsMarked(object)) {
230 return IntrusiveMarking::SizeOfMarkedObject(object);
231 }
232 return object->SizeFromMap(object->map());
233 }
234
235
SelectGarbageCollector(AllocationSpace space,const char ** reason)236 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
237 const char** reason) {
238 // Is global GC requested?
239 if (space != NEW_SPACE) {
240 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
241 *reason = "GC in old space requested";
242 return MARK_COMPACTOR;
243 }
244
245 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
246 *reason = "GC in old space forced by flags";
247 return MARK_COMPACTOR;
248 }
249
250 // Is enough data promoted to justify a global GC?
251 if (OldGenerationAllocationLimitReached()) {
252 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
253 *reason = "promotion limit reached";
254 return MARK_COMPACTOR;
255 }
256
257 // Have allocation in OLD and LO failed?
258 if (old_gen_exhausted_) {
259 isolate_->counters()
260 ->gc_compactor_caused_by_oldspace_exhaustion()
261 ->Increment();
262 *reason = "old generations exhausted";
263 return MARK_COMPACTOR;
264 }
265
266 // Is there enough space left in OLD to guarantee that a scavenge can
267 // succeed?
268 //
269 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
270 // for object promotion. It counts only the bytes that the memory
271 // allocator has not yet allocated from the OS and assigned to any space,
272 // and does not count available bytes already in the old space or code
273 // space. Undercounting is safe---we may get an unrequested full GC when
274 // a scavenge would have succeeded.
275 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
276 isolate_->counters()
277 ->gc_compactor_caused_by_oldspace_exhaustion()
278 ->Increment();
279 *reason = "scavenge might not succeed";
280 return MARK_COMPACTOR;
281 }
282
283 // Default
284 *reason = NULL;
285 return SCAVENGER;
286 }
287
288
289 // TODO(1238405): Combine the infrastructure for --heap-stats and
290 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsBeforeGC()291 void Heap::ReportStatisticsBeforeGC() {
292 // Heap::ReportHeapStatistics will also log NewSpace statistics when
293 // compiled --log-gc is set. The following logic is used to avoid
294 // double logging.
295 #ifdef DEBUG
296 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
297 if (FLAG_heap_stats) {
298 ReportHeapStatistics("Before GC");
299 } else if (FLAG_log_gc) {
300 new_space_.ReportStatistics();
301 }
302 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
303 #else
304 if (FLAG_log_gc) {
305 new_space_.CollectStatistics();
306 new_space_.ReportStatistics();
307 new_space_.ClearHistograms();
308 }
309 #endif // DEBUG
310 }
311
312
PrintShortHeapStatistics()313 void Heap::PrintShortHeapStatistics() {
314 if (!FLAG_trace_gc_verbose) return;
315 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
316 "d KB"
317 ", available: %6" V8_PTR_PREFIX "d KB\n",
318 isolate_->memory_allocator()->Size() / KB,
319 isolate_->memory_allocator()->Available() / KB);
320 PrintPID("New space, used: %6" V8_PTR_PREFIX
321 "d KB"
322 ", available: %6" V8_PTR_PREFIX
323 "d KB"
324 ", committed: %6" V8_PTR_PREFIX "d KB\n",
325 new_space_.Size() / KB, new_space_.Available() / KB,
326 new_space_.CommittedMemory() / KB);
327 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
328 "d KB"
329 ", available: %6" V8_PTR_PREFIX
330 "d KB"
331 ", committed: %6" V8_PTR_PREFIX "d KB\n",
332 old_pointer_space_->SizeOfObjects() / KB,
333 old_pointer_space_->Available() / KB,
334 old_pointer_space_->CommittedMemory() / KB);
335 PrintPID("Old data space, used: %6" V8_PTR_PREFIX
336 "d KB"
337 ", available: %6" V8_PTR_PREFIX
338 "d KB"
339 ", committed: %6" V8_PTR_PREFIX "d KB\n",
340 old_data_space_->SizeOfObjects() / KB,
341 old_data_space_->Available() / KB,
342 old_data_space_->CommittedMemory() / KB);
343 PrintPID("Code space, used: %6" V8_PTR_PREFIX
344 "d KB"
345 ", available: %6" V8_PTR_PREFIX
346 "d KB"
347 ", committed: %6" V8_PTR_PREFIX "d KB\n",
348 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
349 code_space_->CommittedMemory() / KB);
350 PrintPID("Map space, used: %6" V8_PTR_PREFIX
351 "d KB"
352 ", available: %6" V8_PTR_PREFIX
353 "d KB"
354 ", committed: %6" V8_PTR_PREFIX "d KB\n",
355 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
356 map_space_->CommittedMemory() / KB);
357 PrintPID("Cell space, used: %6" V8_PTR_PREFIX
358 "d KB"
359 ", available: %6" V8_PTR_PREFIX
360 "d KB"
361 ", committed: %6" V8_PTR_PREFIX "d KB\n",
362 cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
363 cell_space_->CommittedMemory() / KB);
364 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
365 "d KB"
366 ", available: %6" V8_PTR_PREFIX
367 "d KB"
368 ", committed: %6" V8_PTR_PREFIX "d KB\n",
369 property_cell_space_->SizeOfObjects() / KB,
370 property_cell_space_->Available() / KB,
371 property_cell_space_->CommittedMemory() / KB);
372 PrintPID("Large object space, used: %6" V8_PTR_PREFIX
373 "d KB"
374 ", available: %6" V8_PTR_PREFIX
375 "d KB"
376 ", committed: %6" V8_PTR_PREFIX "d KB\n",
377 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
378 lo_space_->CommittedMemory() / KB);
379 PrintPID("All spaces, used: %6" V8_PTR_PREFIX
380 "d KB"
381 ", available: %6" V8_PTR_PREFIX
382 "d KB"
383 ", committed: %6" V8_PTR_PREFIX "d KB\n",
384 this->SizeOfObjects() / KB, this->Available() / KB,
385 this->CommittedMemory() / KB);
386 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
387 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
388 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
389 }
390
391
392 // TODO(1238405): Combine the infrastructure for --heap-stats and
393 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()394 void Heap::ReportStatisticsAfterGC() {
395 // Similar to the before GC, we use some complicated logic to ensure that
396 // NewSpace statistics are logged exactly once when --log-gc is turned on.
397 #if defined(DEBUG)
398 if (FLAG_heap_stats) {
399 new_space_.CollectStatistics();
400 ReportHeapStatistics("After GC");
401 } else if (FLAG_log_gc) {
402 new_space_.ReportStatistics();
403 }
404 #else
405 if (FLAG_log_gc) new_space_.ReportStatistics();
406 #endif // DEBUG
407 }
408
409
GarbageCollectionPrologue()410 void Heap::GarbageCollectionPrologue() {
411 {
412 AllowHeapAllocation for_the_first_part_of_prologue;
413 ClearJSFunctionResultCaches();
414 gc_count_++;
415 unflattened_strings_length_ = 0;
416
417 if (FLAG_flush_code && FLAG_flush_code_incrementally) {
418 mark_compact_collector()->EnableCodeFlushing(true);
419 }
420
421 #ifdef VERIFY_HEAP
422 if (FLAG_verify_heap) {
423 Verify();
424 }
425 #endif
426 }
427
428 // Reset GC statistics.
429 promoted_objects_size_ = 0;
430 semi_space_copied_object_size_ = 0;
431 nodes_died_in_new_space_ = 0;
432 nodes_copied_in_new_space_ = 0;
433 nodes_promoted_ = 0;
434
435 UpdateMaximumCommitted();
436
437 #ifdef DEBUG
438 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
439
440 if (FLAG_gc_verbose) Print();
441
442 ReportStatisticsBeforeGC();
443 #endif // DEBUG
444
445 store_buffer()->GCPrologue();
446
447 if (isolate()->concurrent_osr_enabled()) {
448 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
449 }
450
451 if (new_space_.IsAtMaximumCapacity()) {
452 maximum_size_scavenges_++;
453 } else {
454 maximum_size_scavenges_ = 0;
455 }
456 CheckNewSpaceExpansionCriteria();
457 }
458
459
SizeOfObjects()460 intptr_t Heap::SizeOfObjects() {
461 intptr_t total = 0;
462 AllSpaces spaces(this);
463 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
464 total += space->SizeOfObjects();
465 }
466 return total;
467 }
468
469
ClearAllICsByKind(Code::Kind kind)470 void Heap::ClearAllICsByKind(Code::Kind kind) {
471 HeapObjectIterator it(code_space());
472
473 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
474 Code* code = Code::cast(object);
475 Code::Kind current_kind = code->kind();
476 if (current_kind == Code::FUNCTION ||
477 current_kind == Code::OPTIMIZED_FUNCTION) {
478 code->ClearInlineCaches(kind);
479 }
480 }
481 }
482
483
RepairFreeListsAfterBoot()484 void Heap::RepairFreeListsAfterBoot() {
485 PagedSpaces spaces(this);
486 for (PagedSpace* space = spaces.next(); space != NULL;
487 space = spaces.next()) {
488 space->RepairFreeListsAfterBoot();
489 }
490 }
491
492
ProcessPretenuringFeedback()493 void Heap::ProcessPretenuringFeedback() {
494 if (FLAG_allocation_site_pretenuring) {
495 int tenure_decisions = 0;
496 int dont_tenure_decisions = 0;
497 int allocation_mementos_found = 0;
498 int allocation_sites = 0;
499 int active_allocation_sites = 0;
500
501 // If the scratchpad overflowed, we have to iterate over the allocation
502 // sites list.
503 // TODO(hpayer): We iterate over the whole list of allocation sites when
504 // we grew to the maximum semi-space size to deopt maybe tenured
505 // allocation sites. We could hold the maybe tenured allocation sites
506 // in a seperate data structure if this is a performance problem.
507 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
508 bool use_scratchpad =
509 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
510 !deopt_maybe_tenured;
511
512 int i = 0;
513 Object* list_element = allocation_sites_list();
514 bool trigger_deoptimization = false;
515 bool maximum_size_scavenge = MaximumSizeScavenge();
516 while (use_scratchpad ? i < allocation_sites_scratchpad_length_
517 : list_element->IsAllocationSite()) {
518 AllocationSite* site =
519 use_scratchpad
520 ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
521 : AllocationSite::cast(list_element);
522 allocation_mementos_found += site->memento_found_count();
523 if (site->memento_found_count() > 0) {
524 active_allocation_sites++;
525 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
526 trigger_deoptimization = true;
527 }
528 if (site->GetPretenureMode() == TENURED) {
529 tenure_decisions++;
530 } else {
531 dont_tenure_decisions++;
532 }
533 allocation_sites++;
534 }
535
536 if (deopt_maybe_tenured && site->IsMaybeTenure()) {
537 site->set_deopt_dependent_code(true);
538 trigger_deoptimization = true;
539 }
540
541 if (use_scratchpad) {
542 i++;
543 } else {
544 list_element = site->weak_next();
545 }
546 }
547
548 if (trigger_deoptimization) {
549 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
550 }
551
552 FlushAllocationSitesScratchpad();
553
554 if (FLAG_trace_pretenuring_statistics &&
555 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
556 dont_tenure_decisions > 0)) {
557 PrintF(
558 "GC: (mode, #visited allocation sites, #active allocation sites, "
559 "#mementos, #tenure decisions, #donttenure decisions) "
560 "(%s, %d, %d, %d, %d, %d)\n",
561 use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
562 active_allocation_sites, allocation_mementos_found, tenure_decisions,
563 dont_tenure_decisions);
564 }
565 }
566 }
567
568
DeoptMarkedAllocationSites()569 void Heap::DeoptMarkedAllocationSites() {
570 // TODO(hpayer): If iterating over the allocation sites list becomes a
571 // performance issue, use a cache heap data structure instead (similar to the
572 // allocation sites scratchpad).
573 Object* list_element = allocation_sites_list();
574 while (list_element->IsAllocationSite()) {
575 AllocationSite* site = AllocationSite::cast(list_element);
576 if (site->deopt_dependent_code()) {
577 site->dependent_code()->MarkCodeForDeoptimization(
578 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
579 site->set_deopt_dependent_code(false);
580 }
581 list_element = site->weak_next();
582 }
583 Deoptimizer::DeoptimizeMarkedCode(isolate_);
584 }
585
586
GarbageCollectionEpilogue()587 void Heap::GarbageCollectionEpilogue() {
588 store_buffer()->GCEpilogue();
589
590 // In release mode, we only zap the from space under heap verification.
591 if (Heap::ShouldZapGarbage()) {
592 ZapFromSpace();
593 }
594
595 // Process pretenuring feedback and update allocation sites.
596 ProcessPretenuringFeedback();
597
598 #ifdef VERIFY_HEAP
599 if (FLAG_verify_heap) {
600 Verify();
601 }
602 #endif
603
604 AllowHeapAllocation for_the_rest_of_the_epilogue;
605
606 #ifdef DEBUG
607 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
608 if (FLAG_print_handles) PrintHandles();
609 if (FLAG_gc_verbose) Print();
610 if (FLAG_code_stats) ReportCodeStatistics("After GC");
611 #endif
612 if (FLAG_deopt_every_n_garbage_collections > 0) {
613 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
614 // the topmost optimized frame can be deoptimized safely, because it
615 // might not have a lazy bailout point right after its current PC.
616 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
617 Deoptimizer::DeoptimizeAll(isolate());
618 gcs_since_last_deopt_ = 0;
619 }
620 }
621
622 UpdateMaximumCommitted();
623
624 isolate_->counters()->alive_after_last_gc()->Set(
625 static_cast<int>(SizeOfObjects()));
626
627 isolate_->counters()->string_table_capacity()->Set(
628 string_table()->Capacity());
629 isolate_->counters()->number_of_symbols()->Set(
630 string_table()->NumberOfElements());
631
632 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
633 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
634 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
635 (crankshaft_codegen_bytes_generated_ +
636 full_codegen_bytes_generated_)));
637 }
638
639 if (CommittedMemory() > 0) {
640 isolate_->counters()->external_fragmentation_total()->AddSample(
641 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
642
643 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
644 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
645 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
646 static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
647 CommittedMemory()));
648 isolate_->counters()->heap_fraction_old_data_space()->AddSample(
649 static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
650 CommittedMemory()));
651 isolate_->counters()->heap_fraction_code_space()->AddSample(
652 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
653 CommittedMemory()));
654 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
655 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
656 isolate_->counters()->heap_fraction_cell_space()->AddSample(
657 static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
658 CommittedMemory()));
659 isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
660 static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
661 CommittedMemory()));
662 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
663 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
664
665 isolate_->counters()->heap_sample_total_committed()->AddSample(
666 static_cast<int>(CommittedMemory() / KB));
667 isolate_->counters()->heap_sample_total_used()->AddSample(
668 static_cast<int>(SizeOfObjects() / KB));
669 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
670 static_cast<int>(map_space()->CommittedMemory() / KB));
671 isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
672 static_cast<int>(cell_space()->CommittedMemory() / KB));
673 isolate_->counters()
674 ->heap_sample_property_cell_space_committed()
675 ->AddSample(
676 static_cast<int>(property_cell_space()->CommittedMemory() / KB));
677 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
678 static_cast<int>(code_space()->CommittedMemory() / KB));
679
680 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
681 static_cast<int>(MaximumCommittedMemory() / KB));
682 }
683
684 #define UPDATE_COUNTERS_FOR_SPACE(space) \
685 isolate_->counters()->space##_bytes_available()->Set( \
686 static_cast<int>(space()->Available())); \
687 isolate_->counters()->space##_bytes_committed()->Set( \
688 static_cast<int>(space()->CommittedMemory())); \
689 isolate_->counters()->space##_bytes_used()->Set( \
690 static_cast<int>(space()->SizeOfObjects()));
691 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
692 if (space()->CommittedMemory() > 0) { \
693 isolate_->counters()->external_fragmentation_##space()->AddSample( \
694 static_cast<int>(100 - \
695 (space()->SizeOfObjects() * 100.0) / \
696 space()->CommittedMemory())); \
697 }
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699 UPDATE_COUNTERS_FOR_SPACE(space) \
700 UPDATE_FRAGMENTATION_FOR_SPACE(space)
701
702 UPDATE_COUNTERS_FOR_SPACE(new_space)
703 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
713
714 #ifdef DEBUG
715 ReportStatisticsAfterGC();
716 #endif // DEBUG
717
718 // Remember the last top pointer so that we can later find out
719 // whether we allocated in new space since the last GC.
720 new_space_top_after_last_gc_ = new_space()->top();
721 }
722
723
CollectAllGarbage(int flags,const char * gc_reason,const v8::GCCallbackFlags gc_callback_flags)724 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
725 const v8::GCCallbackFlags gc_callback_flags) {
726 // Since we are ignoring the return value, the exact choice of space does
727 // not matter, so long as we do not specify NEW_SPACE, which would not
728 // cause a full GC.
729 mark_compact_collector_.SetFlags(flags);
730 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
731 mark_compact_collector_.SetFlags(kNoGCFlags);
732 }
733
734
CollectAllAvailableGarbage(const char * gc_reason)735 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
736 // Since we are ignoring the return value, the exact choice of space does
737 // not matter, so long as we do not specify NEW_SPACE, which would not
738 // cause a full GC.
739 // Major GC would invoke weak handle callbacks on weakly reachable
740 // handles, but won't collect weakly reachable objects until next
741 // major GC. Therefore if we collect aggressively and weak handle callback
742 // has been invoked, we rerun major GC to release objects which become
743 // garbage.
744 // Note: as weak callbacks can execute arbitrary code, we cannot
745 // hope that eventually there will be no weak callbacks invocations.
746 // Therefore stop recollecting after several attempts.
747 if (isolate()->concurrent_recompilation_enabled()) {
748 // The optimizing compiler may be unnecessarily holding on to memory.
749 DisallowHeapAllocation no_recursive_gc;
750 isolate()->optimizing_compiler_thread()->Flush();
751 }
752 mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
753 kReduceMemoryFootprintMask);
754 isolate_->compilation_cache()->Clear();
755 const int kMaxNumberOfAttempts = 7;
756 const int kMinNumberOfAttempts = 2;
757 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
758 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
759 attempt + 1 >= kMinNumberOfAttempts) {
760 break;
761 }
762 }
763 mark_compact_collector()->SetFlags(kNoGCFlags);
764 new_space_.Shrink();
765 UncommitFromSpace();
766 incremental_marking()->UncommitMarkingDeque();
767 }
768
769
EnsureFillerObjectAtTop()770 void Heap::EnsureFillerObjectAtTop() {
771 // There may be an allocation memento behind every object in new space.
772 // If we evacuate a not full new space or if we are on the last page of
773 // the new space, then there may be uninitialized memory behind the top
774 // pointer of the new space page. We store a filler object there to
775 // identify the unused space.
776 Address from_top = new_space_.top();
777 Address from_limit = new_space_.limit();
778 if (from_top < from_limit) {
779 int remaining_in_page = static_cast<int>(from_limit - from_top);
780 CreateFillerObjectAt(from_top, remaining_in_page);
781 }
782 }
783
784
CollectGarbage(GarbageCollector collector,const char * gc_reason,const char * collector_reason,const v8::GCCallbackFlags gc_callback_flags)785 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
786 const char* collector_reason,
787 const v8::GCCallbackFlags gc_callback_flags) {
788 // The VM is in the GC state until exiting this function.
789 VMState<GC> state(isolate_);
790
791 #ifdef DEBUG
792 // Reset the allocation timeout to the GC interval, but make sure to
793 // allow at least a few allocations after a collection. The reason
794 // for this is that we have a lot of allocation sequences and we
795 // assume that a garbage collection will allow the subsequent
796 // allocation attempts to go through.
797 allocation_timeout_ = Max(6, FLAG_gc_interval);
798 #endif
799
800 EnsureFillerObjectAtTop();
801
802 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
803 if (FLAG_trace_incremental_marking) {
804 PrintF("[IncrementalMarking] Scavenge during marking.\n");
805 }
806 }
807
808 if (collector == MARK_COMPACTOR &&
809 !mark_compact_collector()->abort_incremental_marking() &&
810 !incremental_marking()->IsStopped() &&
811 !incremental_marking()->should_hurry() &&
812 FLAG_incremental_marking_steps) {
813 // Make progress in incremental marking.
814 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
815 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
816 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
817 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
818 if (FLAG_trace_incremental_marking) {
819 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
820 }
821 collector = SCAVENGER;
822 collector_reason = "incremental marking delaying mark-sweep";
823 }
824 }
825
826 bool next_gc_likely_to_collect_more = false;
827
828 {
829 tracer()->Start(collector, gc_reason, collector_reason);
830 DCHECK(AllowHeapAllocation::IsAllowed());
831 DisallowHeapAllocation no_allocation_during_gc;
832 GarbageCollectionPrologue();
833
834 {
835 HistogramTimerScope histogram_timer_scope(
836 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
837 : isolate_->counters()->gc_compactor());
838 next_gc_likely_to_collect_more =
839 PerformGarbageCollection(collector, gc_callback_flags);
840 }
841
842 GarbageCollectionEpilogue();
843 tracer()->Stop();
844 }
845
846 // Start incremental marking for the next cycle. The heap snapshot
847 // generator needs incremental marking to stay off after it aborted.
848 if (!mark_compact_collector()->abort_incremental_marking() &&
849 WorthActivatingIncrementalMarking()) {
850 incremental_marking()->Start();
851 }
852
853 return next_gc_likely_to_collect_more;
854 }
855
856
NotifyContextDisposed()857 int Heap::NotifyContextDisposed() {
858 if (isolate()->concurrent_recompilation_enabled()) {
859 // Flush the queued recompilation tasks.
860 isolate()->optimizing_compiler_thread()->Flush();
861 }
862 flush_monomorphic_ics_ = true;
863 AgeInlineCaches();
864 return ++contexts_disposed_;
865 }
866
867
MoveElements(FixedArray * array,int dst_index,int src_index,int len)868 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
869 int len) {
870 if (len == 0) return;
871
872 DCHECK(array->map() != fixed_cow_array_map());
873 Object** dst_objects = array->data_start() + dst_index;
874 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
875 if (!InNewSpace(array)) {
876 for (int i = 0; i < len; i++) {
877 // TODO(hpayer): check store buffer for entries
878 if (InNewSpace(dst_objects[i])) {
879 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
880 }
881 }
882 }
883 incremental_marking()->RecordWrites(array);
884 }
885
886
887 #ifdef VERIFY_HEAP
888 // Helper class for verifying the string table.
889 class StringTableVerifier : public ObjectVisitor {
890 public:
VisitPointers(Object ** start,Object ** end)891 void VisitPointers(Object** start, Object** end) {
892 // Visit all HeapObject pointers in [start, end).
893 for (Object** p = start; p < end; p++) {
894 if ((*p)->IsHeapObject()) {
895 // Check that the string is actually internalized.
896 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
897 (*p)->IsInternalizedString());
898 }
899 }
900 }
901 };
902
903
VerifyStringTable(Heap * heap)904 static void VerifyStringTable(Heap* heap) {
905 StringTableVerifier verifier;
906 heap->string_table()->IterateElements(&verifier);
907 }
908 #endif // VERIFY_HEAP
909
910
AbortIncrementalMarkingAndCollectGarbage(Heap * heap,AllocationSpace space,const char * gc_reason=NULL)911 static bool AbortIncrementalMarkingAndCollectGarbage(
912 Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
913 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
914 bool result = heap->CollectGarbage(space, gc_reason);
915 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
916 return result;
917 }
918
919
ReserveSpace(int * sizes,Address * locations_out)920 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
921 bool gc_performed = true;
922 int counter = 0;
923 static const int kThreshold = 20;
924 while (gc_performed && counter++ < kThreshold) {
925 gc_performed = false;
926 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
927 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
928 if (sizes[space] != 0) {
929 AllocationResult allocation;
930 if (space == NEW_SPACE) {
931 allocation = new_space()->AllocateRaw(sizes[space]);
932 } else {
933 allocation = paged_space(space)->AllocateRaw(sizes[space]);
934 }
935 FreeListNode* node;
936 if (!allocation.To(&node)) {
937 if (space == NEW_SPACE) {
938 Heap::CollectGarbage(NEW_SPACE,
939 "failed to reserve space in the new space");
940 } else {
941 AbortIncrementalMarkingAndCollectGarbage(
942 this, static_cast<AllocationSpace>(space),
943 "failed to reserve space in paged space");
944 }
945 gc_performed = true;
946 break;
947 } else {
948 // Mark with a free list node, in case we have a GC before
949 // deserializing.
950 node->set_size(this, sizes[space]);
951 locations_out[space] = node->address();
952 }
953 }
954 }
955 }
956
957 if (gc_performed) {
958 // Failed to reserve the space after several attempts.
959 V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
960 }
961 }
962
963
EnsureFromSpaceIsCommitted()964 void Heap::EnsureFromSpaceIsCommitted() {
965 if (new_space_.CommitFromSpaceIfNeeded()) return;
966
967 // Committing memory to from space failed.
968 // Memory is exhausted and we will die.
969 V8::FatalProcessOutOfMemory("Committing semi space failed.");
970 }
971
972
ClearJSFunctionResultCaches()973 void Heap::ClearJSFunctionResultCaches() {
974 if (isolate_->bootstrapper()->IsActive()) return;
975
976 Object* context = native_contexts_list();
977 while (!context->IsUndefined()) {
978 // Get the caches for this context. GC can happen when the context
979 // is not fully initialized, so the caches can be undefined.
980 Object* caches_or_undefined =
981 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
982 if (!caches_or_undefined->IsUndefined()) {
983 FixedArray* caches = FixedArray::cast(caches_or_undefined);
984 // Clear the caches:
985 int length = caches->length();
986 for (int i = 0; i < length; i++) {
987 JSFunctionResultCache::cast(caches->get(i))->Clear();
988 }
989 }
990 // Get the next context:
991 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
992 }
993 }
994
995
ClearNormalizedMapCaches()996 void Heap::ClearNormalizedMapCaches() {
997 if (isolate_->bootstrapper()->IsActive() &&
998 !incremental_marking()->IsMarking()) {
999 return;
1000 }
1001
1002 Object* context = native_contexts_list();
1003 while (!context->IsUndefined()) {
1004 // GC can happen when the context is not fully initialized,
1005 // so the cache can be undefined.
1006 Object* cache =
1007 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1008 if (!cache->IsUndefined()) {
1009 NormalizedMapCache::cast(cache)->Clear();
1010 }
1011 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1012 }
1013 }
1014
1015
UpdateSurvivalStatistics(int start_new_space_size)1016 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1017 if (start_new_space_size == 0) return;
1018
1019 promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1020 static_cast<double>(start_new_space_size) * 100);
1021
1022 semi_space_copied_rate_ =
1023 (static_cast<double>(semi_space_copied_object_size_) /
1024 static_cast<double>(start_new_space_size) * 100);
1025
1026 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1027
1028 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1029 high_survival_rate_period_length_++;
1030 } else {
1031 high_survival_rate_period_length_ = 0;
1032 }
1033 }
1034
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1035 bool Heap::PerformGarbageCollection(
1036 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1037 int freed_global_handles = 0;
1038
1039 if (collector != SCAVENGER) {
1040 PROFILE(isolate_, CodeMovingGCEvent());
1041 }
1042
1043 #ifdef VERIFY_HEAP
1044 if (FLAG_verify_heap) {
1045 VerifyStringTable(this);
1046 }
1047 #endif
1048
1049 GCType gc_type =
1050 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1051
1052 {
1053 GCCallbacksScope scope(this);
1054 if (scope.CheckReenter()) {
1055 AllowHeapAllocation allow_allocation;
1056 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1057 VMState<EXTERNAL> state(isolate_);
1058 HandleScope handle_scope(isolate_);
1059 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1060 }
1061 }
1062
1063 EnsureFromSpaceIsCommitted();
1064
1065 int start_new_space_size = Heap::new_space()->SizeAsInt();
1066
1067 if (IsHighSurvivalRate()) {
1068 // We speed up the incremental marker if it is running so that it
1069 // does not fall behind the rate of promotion, which would cause a
1070 // constantly growing old space.
1071 incremental_marking()->NotifyOfHighPromotionRate();
1072 }
1073
1074 if (collector == MARK_COMPACTOR) {
1075 // Perform mark-sweep with optional compaction.
1076 MarkCompact();
1077 sweep_generation_++;
1078 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1079 // allocates and triggers GC. The real limit is set at after
1080 // PostGarbageCollectionProcessing.
1081 old_generation_allocation_limit_ =
1082 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1083 old_gen_exhausted_ = false;
1084 } else {
1085 Scavenge();
1086 }
1087
1088 UpdateSurvivalStatistics(start_new_space_size);
1089
1090 isolate_->counters()->objs_since_last_young()->Set(0);
1091
1092 // Callbacks that fire after this point might trigger nested GCs and
1093 // restart incremental marking, the assertion can't be moved down.
1094 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1095
1096 gc_post_processing_depth_++;
1097 {
1098 AllowHeapAllocation allow_allocation;
1099 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1100 freed_global_handles =
1101 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1102 }
1103 gc_post_processing_depth_--;
1104
1105 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1106
1107 // Update relocatables.
1108 Relocatable::PostGarbageCollectionProcessing(isolate_);
1109
1110 if (collector == MARK_COMPACTOR) {
1111 // Register the amount of external allocated memory.
1112 amount_of_external_allocated_memory_at_last_global_gc_ =
1113 amount_of_external_allocated_memory_;
1114 old_generation_allocation_limit_ = OldGenerationAllocationLimit(
1115 PromotedSpaceSizeOfObjects(), freed_global_handles);
1116 }
1117
1118 {
1119 GCCallbacksScope scope(this);
1120 if (scope.CheckReenter()) {
1121 AllowHeapAllocation allow_allocation;
1122 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1123 VMState<EXTERNAL> state(isolate_);
1124 HandleScope handle_scope(isolate_);
1125 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1126 }
1127 }
1128
1129 #ifdef VERIFY_HEAP
1130 if (FLAG_verify_heap) {
1131 VerifyStringTable(this);
1132 }
1133 #endif
1134
1135 return freed_global_handles > 0;
1136 }
1137
1138
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1139 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1140 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1141 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1142 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1143 v8::GCPrologueCallback callback =
1144 reinterpret_cast<v8::GCPrologueCallback>(
1145 gc_prologue_callbacks_[i].callback);
1146 callback(gc_type, flags);
1147 } else {
1148 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1149 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1150 }
1151 }
1152 }
1153 }
1154
1155
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags gc_callback_flags)1156 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1157 GCCallbackFlags gc_callback_flags) {
1158 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1159 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1160 if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1161 v8::GCPrologueCallback callback =
1162 reinterpret_cast<v8::GCPrologueCallback>(
1163 gc_epilogue_callbacks_[i].callback);
1164 callback(gc_type, gc_callback_flags);
1165 } else {
1166 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1168 }
1169 }
1170 }
1171 }
1172
1173
MarkCompact()1174 void Heap::MarkCompact() {
1175 gc_state_ = MARK_COMPACT;
1176 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1177
1178 uint64_t size_of_objects_before_gc = SizeOfObjects();
1179
1180 mark_compact_collector_.Prepare();
1181
1182 ms_count_++;
1183
1184 MarkCompactPrologue();
1185
1186 mark_compact_collector_.CollectGarbage();
1187
1188 LOG(isolate_, ResourceEvent("markcompact", "end"));
1189
1190 gc_state_ = NOT_IN_GC;
1191
1192 isolate_->counters()->objs_since_last_full()->Set(0);
1193
1194 flush_monomorphic_ics_ = false;
1195
1196 if (FLAG_allocation_site_pretenuring) {
1197 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1198 }
1199 }
1200
1201
MarkCompactPrologue()1202 void Heap::MarkCompactPrologue() {
1203 // At any old GC clear the keyed lookup cache to enable collection of unused
1204 // maps.
1205 isolate_->keyed_lookup_cache()->Clear();
1206 isolate_->context_slot_cache()->Clear();
1207 isolate_->descriptor_lookup_cache()->Clear();
1208 RegExpResultsCache::Clear(string_split_cache());
1209 RegExpResultsCache::Clear(regexp_multiple_cache());
1210
1211 isolate_->compilation_cache()->MarkCompactPrologue();
1212
1213 CompletelyClearInstanceofCache();
1214
1215 FlushNumberStringCache();
1216 if (FLAG_cleanup_code_caches_at_gc) {
1217 polymorphic_code_cache()->set_cache(undefined_value());
1218 }
1219
1220 ClearNormalizedMapCaches();
1221 }
1222
1223
1224 // Helper class for copying HeapObjects
1225 class ScavengeVisitor : public ObjectVisitor {
1226 public:
ScavengeVisitor(Heap * heap)1227 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1228
VisitPointer(Object ** p)1229 void VisitPointer(Object** p) { ScavengePointer(p); }
1230
VisitPointers(Object ** start,Object ** end)1231 void VisitPointers(Object** start, Object** end) {
1232 // Copy all HeapObject pointers in [start, end)
1233 for (Object** p = start; p < end; p++) ScavengePointer(p);
1234 }
1235
1236 private:
ScavengePointer(Object ** p)1237 void ScavengePointer(Object** p) {
1238 Object* object = *p;
1239 if (!heap_->InNewSpace(object)) return;
1240 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1241 reinterpret_cast<HeapObject*>(object));
1242 }
1243
1244 Heap* heap_;
1245 };
1246
1247
1248 #ifdef VERIFY_HEAP
1249 // Visitor class to verify pointers in code or data space do not point into
1250 // new space.
1251 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1252 public:
VerifyNonPointerSpacePointersVisitor(Heap * heap)1253 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
VisitPointers(Object ** start,Object ** end)1254 void VisitPointers(Object** start, Object** end) {
1255 for (Object** current = start; current < end; current++) {
1256 if ((*current)->IsHeapObject()) {
1257 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1258 }
1259 }
1260 }
1261
1262 private:
1263 Heap* heap_;
1264 };
1265
1266
VerifyNonPointerSpacePointers(Heap * heap)1267 static void VerifyNonPointerSpacePointers(Heap* heap) {
1268 // Verify that there are no pointers to new space in spaces where we
1269 // do not expect them.
1270 VerifyNonPointerSpacePointersVisitor v(heap);
1271 HeapObjectIterator code_it(heap->code_space());
1272 for (HeapObject* object = code_it.Next(); object != NULL;
1273 object = code_it.Next())
1274 object->Iterate(&v);
1275
1276 HeapObjectIterator data_it(heap->old_data_space());
1277 for (HeapObject* object = data_it.Next(); object != NULL;
1278 object = data_it.Next())
1279 object->Iterate(&v);
1280 }
1281 #endif // VERIFY_HEAP
1282
1283
CheckNewSpaceExpansionCriteria()1284 void Heap::CheckNewSpaceExpansionCriteria() {
1285 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1286 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1287 // Grow the size of new space if there is room to grow, enough data
1288 // has survived scavenge since the last expansion and we are not in
1289 // high promotion mode.
1290 new_space_.Grow();
1291 survived_since_last_expansion_ = 0;
1292 }
1293 }
1294
1295
IsUnscavengedHeapObject(Heap * heap,Object ** p)1296 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1297 return heap->InNewSpace(*p) &&
1298 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1299 }
1300
1301
ScavengeStoreBufferCallback(Heap * heap,MemoryChunk * page,StoreBufferEvent event)1302 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1303 StoreBufferEvent event) {
1304 heap->store_buffer_rebuilder_.Callback(page, event);
1305 }
1306
1307
Callback(MemoryChunk * page,StoreBufferEvent event)1308 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1309 if (event == kStoreBufferStartScanningPagesEvent) {
1310 start_of_current_page_ = NULL;
1311 current_page_ = NULL;
1312 } else if (event == kStoreBufferScanningPageEvent) {
1313 if (current_page_ != NULL) {
1314 // If this page already overflowed the store buffer during this iteration.
1315 if (current_page_->scan_on_scavenge()) {
1316 // Then we should wipe out the entries that have been added for it.
1317 store_buffer_->SetTop(start_of_current_page_);
1318 } else if (store_buffer_->Top() - start_of_current_page_ >=
1319 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1320 // Did we find too many pointers in the previous page? The heuristic is
1321 // that no page can take more then 1/5 the remaining slots in the store
1322 // buffer.
1323 current_page_->set_scan_on_scavenge(true);
1324 store_buffer_->SetTop(start_of_current_page_);
1325 } else {
1326 // In this case the page we scanned took a reasonable number of slots in
1327 // the store buffer. It has now been rehabilitated and is no longer
1328 // marked scan_on_scavenge.
1329 DCHECK(!current_page_->scan_on_scavenge());
1330 }
1331 }
1332 start_of_current_page_ = store_buffer_->Top();
1333 current_page_ = page;
1334 } else if (event == kStoreBufferFullEvent) {
1335 // The current page overflowed the store buffer again. Wipe out its entries
1336 // in the store buffer and mark it scan-on-scavenge again. This may happen
1337 // several times while scanning.
1338 if (current_page_ == NULL) {
1339 // Store Buffer overflowed while scanning promoted objects. These are not
1340 // in any particular page, though they are likely to be clustered by the
1341 // allocation routines.
1342 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1343 } else {
1344 // Store Buffer overflowed while scanning a particular old space page for
1345 // pointers to new space.
1346 DCHECK(current_page_ == page);
1347 DCHECK(page != NULL);
1348 current_page_->set_scan_on_scavenge(true);
1349 DCHECK(start_of_current_page_ != store_buffer_->Top());
1350 store_buffer_->SetTop(start_of_current_page_);
1351 }
1352 } else {
1353 UNREACHABLE();
1354 }
1355 }
1356
1357
Initialize()1358 void PromotionQueue::Initialize() {
1359 // Assumes that a NewSpacePage exactly fits a number of promotion queue
1360 // entries (where each is a pair of intptr_t). This allows us to simplify
1361 // the test fpr when to switch pages.
1362 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1363 0);
1364 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1365 front_ = rear_ =
1366 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1367 emergency_stack_ = NULL;
1368 }
1369
1370
RelocateQueueHead()1371 void PromotionQueue::RelocateQueueHead() {
1372 DCHECK(emergency_stack_ == NULL);
1373
1374 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1375 intptr_t* head_start = rear_;
1376 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1377
1378 int entries_count =
1379 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1380
1381 emergency_stack_ = new List<Entry>(2 * entries_count);
1382
1383 while (head_start != head_end) {
1384 int size = static_cast<int>(*(head_start++));
1385 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1386 emergency_stack_->Add(Entry(obj, size));
1387 }
1388 rear_ = head_end;
1389 }
1390
1391
1392 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1393 public:
ScavengeWeakObjectRetainer(Heap * heap)1394 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1395
RetainAs(Object * object)1396 virtual Object* RetainAs(Object* object) {
1397 if (!heap_->InFromSpace(object)) {
1398 return object;
1399 }
1400
1401 MapWord map_word = HeapObject::cast(object)->map_word();
1402 if (map_word.IsForwardingAddress()) {
1403 return map_word.ToForwardingAddress();
1404 }
1405 return NULL;
1406 }
1407
1408 private:
1409 Heap* heap_;
1410 };
1411
1412
Scavenge()1413 void Heap::Scavenge() {
1414 RelocationLock relocation_lock(this);
1415
1416 #ifdef VERIFY_HEAP
1417 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1418 #endif
1419
1420 gc_state_ = SCAVENGE;
1421
1422 // Implements Cheney's copying algorithm
1423 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1424
1425 // Clear descriptor cache.
1426 isolate_->descriptor_lookup_cache()->Clear();
1427
1428 // Used for updating survived_since_last_expansion_ at function end.
1429 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1430
1431 SelectScavengingVisitorsTable();
1432
1433 incremental_marking()->PrepareForScavenge();
1434
1435 // Flip the semispaces. After flipping, to space is empty, from space has
1436 // live objects.
1437 new_space_.Flip();
1438 new_space_.ResetAllocationInfo();
1439
1440 // We need to sweep newly copied objects which can be either in the
1441 // to space or promoted to the old generation. For to-space
1442 // objects, we treat the bottom of the to space as a queue. Newly
1443 // copied and unswept objects lie between a 'front' mark and the
1444 // allocation pointer.
1445 //
1446 // Promoted objects can go into various old-generation spaces, and
1447 // can be allocated internally in the spaces (from the free list).
1448 // We treat the top of the to space as a queue of addresses of
1449 // promoted objects. The addresses of newly promoted and unswept
1450 // objects lie between a 'front' mark and a 'rear' mark that is
1451 // updated as a side effect of promoting an object.
1452 //
1453 // There is guaranteed to be enough room at the top of the to space
1454 // for the addresses of promoted objects: every object promoted
1455 // frees up its size in bytes from the top of the new space, and
1456 // objects are at least one pointer in size.
1457 Address new_space_front = new_space_.ToSpaceStart();
1458 promotion_queue_.Initialize();
1459
1460 #ifdef DEBUG
1461 store_buffer()->Clean();
1462 #endif
1463
1464 ScavengeVisitor scavenge_visitor(this);
1465 // Copy roots.
1466 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1467
1468 // Copy objects reachable from the old generation.
1469 {
1470 StoreBufferRebuildScope scope(this, store_buffer(),
1471 &ScavengeStoreBufferCallback);
1472 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1473 }
1474
1475 // Copy objects reachable from simple cells by scavenging cell values
1476 // directly.
1477 HeapObjectIterator cell_iterator(cell_space_);
1478 for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1479 heap_object = cell_iterator.Next()) {
1480 if (heap_object->IsCell()) {
1481 Cell* cell = Cell::cast(heap_object);
1482 Address value_address = cell->ValueAddress();
1483 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1484 }
1485 }
1486
1487 // Copy objects reachable from global property cells by scavenging global
1488 // property cell values directly.
1489 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1490 for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1491 heap_object != NULL;
1492 heap_object = js_global_property_cell_iterator.Next()) {
1493 if (heap_object->IsPropertyCell()) {
1494 PropertyCell* cell = PropertyCell::cast(heap_object);
1495 Address value_address = cell->ValueAddress();
1496 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1497 Address type_address = cell->TypeAddress();
1498 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1499 }
1500 }
1501
1502 // Copy objects reachable from the encountered weak collections list.
1503 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1504
1505 // Copy objects reachable from the code flushing candidates list.
1506 MarkCompactCollector* collector = mark_compact_collector();
1507 if (collector->is_code_flushing_enabled()) {
1508 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1509 }
1510
1511 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1512
1513 while (isolate()->global_handles()->IterateObjectGroups(
1514 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1515 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1516 }
1517 isolate()->global_handles()->RemoveObjectGroups();
1518 isolate()->global_handles()->RemoveImplicitRefGroups();
1519
1520 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1521 &IsUnscavengedHeapObject);
1522 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1523 &scavenge_visitor);
1524 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1525
1526 UpdateNewSpaceReferencesInExternalStringTable(
1527 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1528
1529 promotion_queue_.Destroy();
1530
1531 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1532
1533 ScavengeWeakObjectRetainer weak_object_retainer(this);
1534 ProcessWeakReferences(&weak_object_retainer);
1535
1536 DCHECK(new_space_front == new_space_.top());
1537
1538 // Set age mark.
1539 new_space_.set_age_mark(new_space_.top());
1540
1541 new_space_.LowerInlineAllocationLimit(
1542 new_space_.inline_allocation_limit_step());
1543
1544 // Update how much has survived scavenge.
1545 IncrementYoungSurvivorsCounter(static_cast<int>(
1546 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1547
1548 LOG(isolate_, ResourceEvent("scavenge", "end"));
1549
1550 gc_state_ = NOT_IN_GC;
1551
1552 gc_idle_time_handler_.NotifyScavenge();
1553 }
1554
1555
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1556 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1557 Object** p) {
1558 MapWord first_word = HeapObject::cast(*p)->map_word();
1559
1560 if (!first_word.IsForwardingAddress()) {
1561 // Unreachable external string can be finalized.
1562 heap->FinalizeExternalString(String::cast(*p));
1563 return NULL;
1564 }
1565
1566 // String is still reachable.
1567 return String::cast(first_word.ToForwardingAddress());
1568 }
1569
1570
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1571 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1572 ExternalStringTableUpdaterCallback updater_func) {
1573 #ifdef VERIFY_HEAP
1574 if (FLAG_verify_heap) {
1575 external_string_table_.Verify();
1576 }
1577 #endif
1578
1579 if (external_string_table_.new_space_strings_.is_empty()) return;
1580
1581 Object** start = &external_string_table_.new_space_strings_[0];
1582 Object** end = start + external_string_table_.new_space_strings_.length();
1583 Object** last = start;
1584
1585 for (Object** p = start; p < end; ++p) {
1586 DCHECK(InFromSpace(*p));
1587 String* target = updater_func(this, p);
1588
1589 if (target == NULL) continue;
1590
1591 DCHECK(target->IsExternalString());
1592
1593 if (InNewSpace(target)) {
1594 // String is still in new space. Update the table entry.
1595 *last = target;
1596 ++last;
1597 } else {
1598 // String got promoted. Move it to the old string list.
1599 external_string_table_.AddOldString(target);
1600 }
1601 }
1602
1603 DCHECK(last <= end);
1604 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1605 }
1606
1607
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1608 void Heap::UpdateReferencesInExternalStringTable(
1609 ExternalStringTableUpdaterCallback updater_func) {
1610 // Update old space string references.
1611 if (external_string_table_.old_space_strings_.length() > 0) {
1612 Object** start = &external_string_table_.old_space_strings_[0];
1613 Object** end = start + external_string_table_.old_space_strings_.length();
1614 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1615 }
1616
1617 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1618 }
1619
1620
ProcessWeakReferences(WeakObjectRetainer * retainer)1621 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1622 ProcessArrayBuffers(retainer);
1623 ProcessNativeContexts(retainer);
1624 // TODO(mvstanton): AllocationSites only need to be processed during
1625 // MARK_COMPACT, as they live in old space. Verify and address.
1626 ProcessAllocationSites(retainer);
1627 }
1628
1629
ProcessNativeContexts(WeakObjectRetainer * retainer)1630 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1631 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1632 // Update the head of the list of contexts.
1633 set_native_contexts_list(head);
1634 }
1635
1636
ProcessArrayBuffers(WeakObjectRetainer * retainer)1637 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1638 Object* array_buffer_obj =
1639 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1640 set_array_buffers_list(array_buffer_obj);
1641 }
1642
1643
TearDownArrayBuffers()1644 void Heap::TearDownArrayBuffers() {
1645 Object* undefined = undefined_value();
1646 for (Object* o = array_buffers_list(); o != undefined;) {
1647 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1648 Runtime::FreeArrayBuffer(isolate(), buffer);
1649 o = buffer->weak_next();
1650 }
1651 set_array_buffers_list(undefined);
1652 }
1653
1654
ProcessAllocationSites(WeakObjectRetainer * retainer)1655 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1656 Object* allocation_site_obj =
1657 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1658 set_allocation_sites_list(allocation_site_obj);
1659 }
1660
1661
ResetAllAllocationSitesDependentCode(PretenureFlag flag)1662 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1663 DisallowHeapAllocation no_allocation_scope;
1664 Object* cur = allocation_sites_list();
1665 bool marked = false;
1666 while (cur->IsAllocationSite()) {
1667 AllocationSite* casted = AllocationSite::cast(cur);
1668 if (casted->GetPretenureMode() == flag) {
1669 casted->ResetPretenureDecision();
1670 casted->set_deopt_dependent_code(true);
1671 marked = true;
1672 }
1673 cur = casted->weak_next();
1674 }
1675 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1676 }
1677
1678
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)1679 void Heap::EvaluateOldSpaceLocalPretenuring(
1680 uint64_t size_of_objects_before_gc) {
1681 uint64_t size_of_objects_after_gc = SizeOfObjects();
1682 double old_generation_survival_rate =
1683 (static_cast<double>(size_of_objects_after_gc) * 100) /
1684 static_cast<double>(size_of_objects_before_gc);
1685
1686 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1687 // Too many objects died in the old generation, pretenuring of wrong
1688 // allocation sites may be the cause for that. We have to deopt all
1689 // dependent code registered in the allocation sites to re-evaluate
1690 // our pretenuring decisions.
1691 ResetAllAllocationSitesDependentCode(TENURED);
1692 if (FLAG_trace_pretenuring) {
1693 PrintF(
1694 "Deopt all allocation sites dependent code due to low survival "
1695 "rate in the old generation %f\n",
1696 old_generation_survival_rate);
1697 }
1698 }
1699 }
1700
1701
VisitExternalResources(v8::ExternalResourceVisitor * visitor)1702 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1703 DisallowHeapAllocation no_allocation;
1704 // All external strings are listed in the external string table.
1705
1706 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1707 public:
1708 explicit ExternalStringTableVisitorAdapter(
1709 v8::ExternalResourceVisitor* visitor)
1710 : visitor_(visitor) {}
1711 virtual void VisitPointers(Object** start, Object** end) {
1712 for (Object** p = start; p < end; p++) {
1713 DCHECK((*p)->IsExternalString());
1714 visitor_->VisitExternalString(
1715 Utils::ToLocal(Handle<String>(String::cast(*p))));
1716 }
1717 }
1718
1719 private:
1720 v8::ExternalResourceVisitor* visitor_;
1721 } external_string_table_visitor(visitor);
1722
1723 external_string_table_.Iterate(&external_string_table_visitor);
1724 }
1725
1726
1727 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1728 public:
VisitPointer(Heap * heap,Object ** p)1729 static inline void VisitPointer(Heap* heap, Object** p) {
1730 Object* object = *p;
1731 if (!heap->InNewSpace(object)) return;
1732 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1733 reinterpret_cast<HeapObject*>(object));
1734 }
1735 };
1736
1737
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)1738 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1739 Address new_space_front) {
1740 do {
1741 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1742 // The addresses new_space_front and new_space_.top() define a
1743 // queue of unprocessed copied objects. Process them until the
1744 // queue is empty.
1745 while (new_space_front != new_space_.top()) {
1746 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1747 HeapObject* object = HeapObject::FromAddress(new_space_front);
1748 new_space_front +=
1749 NewSpaceScavenger::IterateBody(object->map(), object);
1750 } else {
1751 new_space_front =
1752 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1753 }
1754 }
1755
1756 // Promote and process all the to-be-promoted objects.
1757 {
1758 StoreBufferRebuildScope scope(this, store_buffer(),
1759 &ScavengeStoreBufferCallback);
1760 while (!promotion_queue()->is_empty()) {
1761 HeapObject* target;
1762 int size;
1763 promotion_queue()->remove(&target, &size);
1764
1765 // Promoted object might be already partially visited
1766 // during old space pointer iteration. Thus we search specificly
1767 // for pointers to from semispace instead of looking for pointers
1768 // to new space.
1769 DCHECK(!target->IsMap());
1770 IterateAndMarkPointersToFromSpace(
1771 target->address(), target->address() + size, &ScavengeObject);
1772 }
1773 }
1774
1775 // Take another spin if there are now unswept objects in new space
1776 // (there are currently no more unswept promoted objects).
1777 } while (new_space_front != new_space_.top());
1778
1779 return new_space_front;
1780 }
1781
1782
1783 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1784 0); // NOLINT
1785 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1786 0); // NOLINT
1787 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1788 kDoubleAlignmentMask) == 0); // NOLINT
1789
1790
1791 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1792 int size));
1793
EnsureDoubleAligned(Heap * heap,HeapObject * object,int size)1794 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1795 int size) {
1796 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1797 heap->CreateFillerObjectAt(object->address(), kPointerSize);
1798 return HeapObject::FromAddress(object->address() + kPointerSize);
1799 } else {
1800 heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1801 kPointerSize);
1802 return object;
1803 }
1804 }
1805
1806
1807 enum LoggingAndProfiling {
1808 LOGGING_AND_PROFILING_ENABLED,
1809 LOGGING_AND_PROFILING_DISABLED
1810 };
1811
1812
1813 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1814
1815
1816 template <MarksHandling marks_handling,
1817 LoggingAndProfiling logging_and_profiling_mode>
1818 class ScavengingVisitor : public StaticVisitorBase {
1819 public:
Initialize()1820 static void Initialize() {
1821 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1822 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1823 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1824 table_.Register(kVisitByteArray, &EvacuateByteArray);
1825 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1826 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1827 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1828 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1829
1830 table_.Register(
1831 kVisitNativeContext,
1832 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1833 Context::kSize>);
1834
1835 table_.Register(
1836 kVisitConsString,
1837 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1838 ConsString::kSize>);
1839
1840 table_.Register(
1841 kVisitSlicedString,
1842 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1843 SlicedString::kSize>);
1844
1845 table_.Register(
1846 kVisitSymbol,
1847 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1848 Symbol::kSize>);
1849
1850 table_.Register(
1851 kVisitSharedFunctionInfo,
1852 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1853 SharedFunctionInfo::kSize>);
1854
1855 table_.Register(kVisitJSWeakCollection,
1856 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1857
1858 table_.Register(kVisitJSArrayBuffer,
1859 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1860
1861 table_.Register(kVisitJSTypedArray,
1862 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1863
1864 table_.Register(kVisitJSDataView,
1865 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1866
1867 table_.Register(kVisitJSRegExp,
1868 &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1869
1870 if (marks_handling == IGNORE_MARKS) {
1871 table_.Register(
1872 kVisitJSFunction,
1873 &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1874 JSFunction::kSize>);
1875 } else {
1876 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1877 }
1878
1879 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1880 kVisitDataObject, kVisitDataObjectGeneric>();
1881
1882 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1883 kVisitJSObject, kVisitJSObjectGeneric>();
1884
1885 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1886 kVisitStruct, kVisitStructGeneric>();
1887 }
1888
GetTable()1889 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1890 return &table_;
1891 }
1892
1893 private:
1894 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1895
RecordCopiedObject(Heap * heap,HeapObject * obj)1896 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1897 bool should_record = false;
1898 #ifdef DEBUG
1899 should_record = FLAG_heap_stats;
1900 #endif
1901 should_record = should_record || FLAG_log_gc;
1902 if (should_record) {
1903 if (heap->new_space()->Contains(obj)) {
1904 heap->new_space()->RecordAllocation(obj);
1905 } else {
1906 heap->new_space()->RecordPromotion(obj);
1907 }
1908 }
1909 }
1910
1911 // Helper function used by CopyObject to copy a source object to an
1912 // allocated target object and update the forwarding pointer in the source
1913 // object. Returns the target object.
INLINE(static void MigrateObject (Heap * heap,HeapObject * source,HeapObject * target,int size))1914 INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1915 HeapObject* target, int size)) {
1916 // If we migrate into to-space, then the to-space top pointer should be
1917 // right after the target object. Incorporate double alignment
1918 // over-allocation.
1919 DCHECK(!heap->InToSpace(target) ||
1920 target->address() + size == heap->new_space()->top() ||
1921 target->address() + size + kPointerSize == heap->new_space()->top());
1922
1923 // Make sure that we do not overwrite the promotion queue which is at
1924 // the end of to-space.
1925 DCHECK(!heap->InToSpace(target) ||
1926 heap->promotion_queue()->IsBelowPromotionQueue(
1927 heap->new_space()->top()));
1928
1929 // Copy the content of source to target.
1930 heap->CopyBlock(target->address(), source->address(), size);
1931
1932 // Set the forwarding address.
1933 source->set_map_word(MapWord::FromForwardingAddress(target));
1934
1935 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1936 // Update NewSpace stats if necessary.
1937 RecordCopiedObject(heap, target);
1938 heap->OnMoveEvent(target, source, size);
1939 }
1940
1941 if (marks_handling == TRANSFER_MARKS) {
1942 if (Marking::TransferColor(source, target)) {
1943 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1944 }
1945 }
1946 }
1947
1948 template <int alignment>
SemiSpaceCopyObject(Map * map,HeapObject ** slot,HeapObject * object,int object_size)1949 static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1950 HeapObject* object, int object_size) {
1951 Heap* heap = map->GetHeap();
1952
1953 int allocation_size = object_size;
1954 if (alignment != kObjectAlignment) {
1955 DCHECK(alignment == kDoubleAlignment);
1956 allocation_size += kPointerSize;
1957 }
1958
1959 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1960 AllocationResult allocation =
1961 heap->new_space()->AllocateRaw(allocation_size);
1962
1963 HeapObject* target = NULL; // Initialization to please compiler.
1964 if (allocation.To(&target)) {
1965 // Order is important here: Set the promotion limit before storing a
1966 // filler for double alignment or migrating the object. Otherwise we
1967 // may end up overwriting promotion queue entries when we migrate the
1968 // object.
1969 heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1970
1971 if (alignment != kObjectAlignment) {
1972 target = EnsureDoubleAligned(heap, target, allocation_size);
1973 }
1974
1975 // Order is important: slot might be inside of the target if target
1976 // was allocated over a dead object and slot comes from the store
1977 // buffer.
1978 *slot = target;
1979 MigrateObject(heap, object, target, object_size);
1980
1981 heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1982 return true;
1983 }
1984 return false;
1985 }
1986
1987
1988 template <ObjectContents object_contents, int alignment>
PromoteObject(Map * map,HeapObject ** slot,HeapObject * object,int object_size)1989 static inline bool PromoteObject(Map* map, HeapObject** slot,
1990 HeapObject* object, int object_size) {
1991 Heap* heap = map->GetHeap();
1992
1993 int allocation_size = object_size;
1994 if (alignment != kObjectAlignment) {
1995 DCHECK(alignment == kDoubleAlignment);
1996 allocation_size += kPointerSize;
1997 }
1998
1999 AllocationResult allocation;
2000 if (object_contents == DATA_OBJECT) {
2001 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2002 allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2003 } else {
2004 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2005 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2006 }
2007
2008 HeapObject* target = NULL; // Initialization to please compiler.
2009 if (allocation.To(&target)) {
2010 if (alignment != kObjectAlignment) {
2011 target = EnsureDoubleAligned(heap, target, allocation_size);
2012 }
2013
2014 // Order is important: slot might be inside of the target if target
2015 // was allocated over a dead object and slot comes from the store
2016 // buffer.
2017 *slot = target;
2018 MigrateObject(heap, object, target, object_size);
2019
2020 if (object_contents == POINTER_OBJECT) {
2021 if (map->instance_type() == JS_FUNCTION_TYPE) {
2022 heap->promotion_queue()->insert(target,
2023 JSFunction::kNonWeakFieldsEndOffset);
2024 } else {
2025 heap->promotion_queue()->insert(target, object_size);
2026 }
2027 }
2028 heap->IncrementPromotedObjectsSize(object_size);
2029 return true;
2030 }
2031 return false;
2032 }
2033
2034
2035 template <ObjectContents object_contents, int alignment>
EvacuateObject(Map * map,HeapObject ** slot,HeapObject * object,int object_size)2036 static inline void EvacuateObject(Map* map, HeapObject** slot,
2037 HeapObject* object, int object_size) {
2038 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2039 SLOW_DCHECK(object->Size() == object_size);
2040 Heap* heap = map->GetHeap();
2041
2042 if (!heap->ShouldBePromoted(object->address(), object_size)) {
2043 // A semi-space copy may fail due to fragmentation. In that case, we
2044 // try to promote the object.
2045 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2046 return;
2047 }
2048 }
2049
2050 if (PromoteObject<object_contents, alignment>(map, slot, object,
2051 object_size)) {
2052 return;
2053 }
2054
2055 // If promotion failed, we try to copy the object to the other semi-space
2056 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2057
2058 UNREACHABLE();
2059 }
2060
2061
EvacuateJSFunction(Map * map,HeapObject ** slot,HeapObject * object)2062 static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2063 HeapObject* object) {
2064 ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2065 JSFunction::kSize>(map, slot, object);
2066
2067 MapWord map_word = object->map_word();
2068 DCHECK(map_word.IsForwardingAddress());
2069 HeapObject* target = map_word.ToForwardingAddress();
2070
2071 MarkBit mark_bit = Marking::MarkBitFrom(target);
2072 if (Marking::IsBlack(mark_bit)) {
2073 // This object is black and it might not be rescanned by marker.
2074 // We should explicitly record code entry slot for compaction because
2075 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2076 // miss it as it is not HeapObject-tagged.
2077 Address code_entry_slot =
2078 target->address() + JSFunction::kCodeEntryOffset;
2079 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2080 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2081 code_entry_slot, code);
2082 }
2083 }
2084
2085
EvacuateFixedArray(Map * map,HeapObject ** slot,HeapObject * object)2086 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2087 HeapObject* object) {
2088 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2089 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2090 object_size);
2091 }
2092
2093
EvacuateFixedDoubleArray(Map * map,HeapObject ** slot,HeapObject * object)2094 static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2095 HeapObject* object) {
2096 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2097 int object_size = FixedDoubleArray::SizeFor(length);
2098 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2099 object_size);
2100 }
2101
2102
EvacuateFixedTypedArray(Map * map,HeapObject ** slot,HeapObject * object)2103 static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2104 HeapObject* object) {
2105 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2106 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2107 object_size);
2108 }
2109
2110
EvacuateFixedFloat64Array(Map * map,HeapObject ** slot,HeapObject * object)2111 static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2112 HeapObject* object) {
2113 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2114 EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2115 object_size);
2116 }
2117
2118
EvacuateByteArray(Map * map,HeapObject ** slot,HeapObject * object)2119 static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2120 HeapObject* object) {
2121 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2122 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2123 object_size);
2124 }
2125
2126
EvacuateSeqOneByteString(Map * map,HeapObject ** slot,HeapObject * object)2127 static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2128 HeapObject* object) {
2129 int object_size = SeqOneByteString::cast(object)
2130 ->SeqOneByteStringSize(map->instance_type());
2131 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2132 object_size);
2133 }
2134
2135
EvacuateSeqTwoByteString(Map * map,HeapObject ** slot,HeapObject * object)2136 static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2137 HeapObject* object) {
2138 int object_size = SeqTwoByteString::cast(object)
2139 ->SeqTwoByteStringSize(map->instance_type());
2140 EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2141 object_size);
2142 }
2143
2144
EvacuateShortcutCandidate(Map * map,HeapObject ** slot,HeapObject * object)2145 static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2146 HeapObject* object) {
2147 DCHECK(IsShortcutCandidate(map->instance_type()));
2148
2149 Heap* heap = map->GetHeap();
2150
2151 if (marks_handling == IGNORE_MARKS &&
2152 ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2153 HeapObject* first =
2154 HeapObject::cast(ConsString::cast(object)->unchecked_first());
2155
2156 *slot = first;
2157
2158 if (!heap->InNewSpace(first)) {
2159 object->set_map_word(MapWord::FromForwardingAddress(first));
2160 return;
2161 }
2162
2163 MapWord first_word = first->map_word();
2164 if (first_word.IsForwardingAddress()) {
2165 HeapObject* target = first_word.ToForwardingAddress();
2166
2167 *slot = target;
2168 object->set_map_word(MapWord::FromForwardingAddress(target));
2169 return;
2170 }
2171
2172 heap->DoScavengeObject(first->map(), slot, first);
2173 object->set_map_word(MapWord::FromForwardingAddress(*slot));
2174 return;
2175 }
2176
2177 int object_size = ConsString::kSize;
2178 EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2179 object_size);
2180 }
2181
2182 template <ObjectContents object_contents>
2183 class ObjectEvacuationStrategy {
2184 public:
2185 template <int object_size>
VisitSpecialized(Map * map,HeapObject ** slot,HeapObject * object)2186 static inline void VisitSpecialized(Map* map, HeapObject** slot,
2187 HeapObject* object) {
2188 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2189 object_size);
2190 }
2191
Visit(Map * map,HeapObject ** slot,HeapObject * object)2192 static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2193 int object_size = map->instance_size();
2194 EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2195 object_size);
2196 }
2197 };
2198
2199 static VisitorDispatchTable<ScavengingCallback> table_;
2200 };
2201
2202
2203 template <MarksHandling marks_handling,
2204 LoggingAndProfiling logging_and_profiling_mode>
2205 VisitorDispatchTable<ScavengingCallback>
2206 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2207
2208
InitializeScavengingVisitorsTables()2209 static void InitializeScavengingVisitorsTables() {
2210 ScavengingVisitor<TRANSFER_MARKS,
2211 LOGGING_AND_PROFILING_DISABLED>::Initialize();
2212 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2213 ScavengingVisitor<TRANSFER_MARKS,
2214 LOGGING_AND_PROFILING_ENABLED>::Initialize();
2215 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2216 }
2217
2218
SelectScavengingVisitorsTable()2219 void Heap::SelectScavengingVisitorsTable() {
2220 bool logging_and_profiling =
2221 FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2222 isolate()->cpu_profiler()->is_profiling() ||
2223 (isolate()->heap_profiler() != NULL &&
2224 isolate()->heap_profiler()->is_tracking_object_moves());
2225
2226 if (!incremental_marking()->IsMarking()) {
2227 if (!logging_and_profiling) {
2228 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2229 IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2230 } else {
2231 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2232 IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2233 }
2234 } else {
2235 if (!logging_and_profiling) {
2236 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2237 TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2238 } else {
2239 scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2240 TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2241 }
2242
2243 if (incremental_marking()->IsCompacting()) {
2244 // When compacting forbid short-circuiting of cons-strings.
2245 // Scavenging code relies on the fact that new space object
2246 // can't be evacuated into evacuation candidate but
2247 // short-circuiting violates this assumption.
2248 scavenging_visitors_table_.Register(
2249 StaticVisitorBase::kVisitShortcutCandidate,
2250 scavenging_visitors_table_.GetVisitorById(
2251 StaticVisitorBase::kVisitConsString));
2252 }
2253 }
2254 }
2255
2256
ScavengeObjectSlow(HeapObject ** p,HeapObject * object)2257 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2258 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2259 MapWord first_word = object->map_word();
2260 SLOW_DCHECK(!first_word.IsForwardingAddress());
2261 Map* map = first_word.ToMap();
2262 map->GetHeap()->DoScavengeObject(map, p, object);
2263 }
2264
2265
AllocatePartialMap(InstanceType instance_type,int instance_size)2266 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2267 int instance_size) {
2268 Object* result;
2269 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2270 if (!allocation.To(&result)) return allocation;
2271
2272 // Map::cast cannot be used due to uninitialized map field.
2273 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2274 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2275 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2276 reinterpret_cast<Map*>(result)->set_visitor_id(
2277 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2278 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2279 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2280 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2281 reinterpret_cast<Map*>(result)->set_bit_field(0);
2282 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2283 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2284 Map::OwnsDescriptors::encode(true);
2285 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2286 return result;
2287 }
2288
2289
AllocateMap(InstanceType instance_type,int instance_size,ElementsKind elements_kind)2290 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2291 int instance_size,
2292 ElementsKind elements_kind) {
2293 HeapObject* result;
2294 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2295 if (!allocation.To(&result)) return allocation;
2296
2297 result->set_map_no_write_barrier(meta_map());
2298 Map* map = Map::cast(result);
2299 map->set_instance_type(instance_type);
2300 map->set_visitor_id(
2301 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2302 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2303 map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2304 map->set_instance_size(instance_size);
2305 map->set_inobject_properties(0);
2306 map->set_pre_allocated_property_fields(0);
2307 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2308 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2309 SKIP_WRITE_BARRIER);
2310 map->init_back_pointer(undefined_value());
2311 map->set_unused_property_fields(0);
2312 map->set_instance_descriptors(empty_descriptor_array());
2313 map->set_bit_field(0);
2314 map->set_bit_field2(1 << Map::kIsExtensible);
2315 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2316 Map::OwnsDescriptors::encode(true);
2317 map->set_bit_field3(bit_field3);
2318 map->set_elements_kind(elements_kind);
2319
2320 return map;
2321 }
2322
2323
AllocateFillerObject(int size,bool double_align,AllocationSpace space)2324 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2325 AllocationSpace space) {
2326 HeapObject* obj;
2327 {
2328 AllocationResult allocation = AllocateRaw(size, space, space);
2329 if (!allocation.To(&obj)) return allocation;
2330 }
2331 #ifdef DEBUG
2332 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2333 DCHECK(chunk->owner()->identity() == space);
2334 #endif
2335 CreateFillerObjectAt(obj->address(), size);
2336 return obj;
2337 }
2338
2339
2340 const Heap::StringTypeTable Heap::string_type_table[] = {
2341 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2342 { type, size, k##camel_name##MapRootIndex } \
2343 ,
2344 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2345 #undef STRING_TYPE_ELEMENT
2346 };
2347
2348
2349 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2350 #define CONSTANT_STRING_ELEMENT(name, contents) \
2351 { contents, k##name##RootIndex } \
2352 ,
2353 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2354 #undef CONSTANT_STRING_ELEMENT
2355 };
2356
2357
2358 const Heap::StructTable Heap::struct_table[] = {
2359 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2360 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2361 ,
2362 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2363 #undef STRUCT_TABLE_ELEMENT
2364 };
2365
2366
CreateInitialMaps()2367 bool Heap::CreateInitialMaps() {
2368 HeapObject* obj;
2369 {
2370 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2371 if (!allocation.To(&obj)) return false;
2372 }
2373 // Map::cast cannot be used due to uninitialized map field.
2374 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2375 set_meta_map(new_meta_map);
2376 new_meta_map->set_map(new_meta_map);
2377
2378 { // Partial map allocation
2379 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2380 { \
2381 Map* map; \
2382 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2383 set_##field_name##_map(map); \
2384 }
2385
2386 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2387 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2388 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2389 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2390 constant_pool_array);
2391
2392 #undef ALLOCATE_PARTIAL_MAP
2393 }
2394
2395 // Allocate the empty array.
2396 {
2397 AllocationResult allocation = AllocateEmptyFixedArray();
2398 if (!allocation.To(&obj)) return false;
2399 }
2400 set_empty_fixed_array(FixedArray::cast(obj));
2401
2402 {
2403 AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2404 if (!allocation.To(&obj)) return false;
2405 }
2406 set_null_value(Oddball::cast(obj));
2407 Oddball::cast(obj)->set_kind(Oddball::kNull);
2408
2409 {
2410 AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2411 if (!allocation.To(&obj)) return false;
2412 }
2413 set_undefined_value(Oddball::cast(obj));
2414 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2415 DCHECK(!InNewSpace(undefined_value()));
2416
2417 // Set preliminary exception sentinel value before actually initializing it.
2418 set_exception(null_value());
2419
2420 // Allocate the empty descriptor array.
2421 {
2422 AllocationResult allocation = AllocateEmptyFixedArray();
2423 if (!allocation.To(&obj)) return false;
2424 }
2425 set_empty_descriptor_array(DescriptorArray::cast(obj));
2426
2427 // Allocate the constant pool array.
2428 {
2429 AllocationResult allocation = AllocateEmptyConstantPoolArray();
2430 if (!allocation.To(&obj)) return false;
2431 }
2432 set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2433
2434 // Fix the instance_descriptors for the existing maps.
2435 meta_map()->set_code_cache(empty_fixed_array());
2436 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2437 meta_map()->init_back_pointer(undefined_value());
2438 meta_map()->set_instance_descriptors(empty_descriptor_array());
2439
2440 fixed_array_map()->set_code_cache(empty_fixed_array());
2441 fixed_array_map()->set_dependent_code(
2442 DependentCode::cast(empty_fixed_array()));
2443 fixed_array_map()->init_back_pointer(undefined_value());
2444 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2445
2446 undefined_map()->set_code_cache(empty_fixed_array());
2447 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2448 undefined_map()->init_back_pointer(undefined_value());
2449 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2450
2451 null_map()->set_code_cache(empty_fixed_array());
2452 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453 null_map()->init_back_pointer(undefined_value());
2454 null_map()->set_instance_descriptors(empty_descriptor_array());
2455
2456 constant_pool_array_map()->set_code_cache(empty_fixed_array());
2457 constant_pool_array_map()->set_dependent_code(
2458 DependentCode::cast(empty_fixed_array()));
2459 constant_pool_array_map()->init_back_pointer(undefined_value());
2460 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2461
2462 // Fix prototype object for existing maps.
2463 meta_map()->set_prototype(null_value());
2464 meta_map()->set_constructor(null_value());
2465
2466 fixed_array_map()->set_prototype(null_value());
2467 fixed_array_map()->set_constructor(null_value());
2468
2469 undefined_map()->set_prototype(null_value());
2470 undefined_map()->set_constructor(null_value());
2471
2472 null_map()->set_prototype(null_value());
2473 null_map()->set_constructor(null_value());
2474
2475 constant_pool_array_map()->set_prototype(null_value());
2476 constant_pool_array_map()->set_constructor(null_value());
2477
2478 { // Map allocation
2479 #define ALLOCATE_MAP(instance_type, size, field_name) \
2480 { \
2481 Map* map; \
2482 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2483 set_##field_name##_map(map); \
2484 }
2485
2486 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2487 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2488
2489 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2490 DCHECK(fixed_array_map() != fixed_cow_array_map());
2491
2492 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2493 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2494 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2495 mutable_heap_number)
2496 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2497 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2498
2499 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2500 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2501 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2502 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2503 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2504 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2505 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2506
2507 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2508 const StringTypeTable& entry = string_type_table[i];
2509 {
2510 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2511 if (!allocation.To(&obj)) return false;
2512 }
2513 // Mark cons string maps as unstable, because their objects can change
2514 // maps during GC.
2515 Map* map = Map::cast(obj);
2516 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2517 roots_[entry.index] = map;
2518 }
2519
2520 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2521 undetectable_string_map()->set_is_undetectable();
2522
2523 ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
2524 undetectable_one_byte_string_map()->set_is_undetectable();
2525
2526 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2527 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2528 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2529
2530 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2531 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2532 external_##type##_array)
2533
2534 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2535 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2536
2537 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2538 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2539
2540 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2541 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2542
2543 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2544
2545 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2546
2547 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2548 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2549 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2550 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2551
2552
2553 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2554 const StructTable& entry = struct_table[i];
2555 Map* map;
2556 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2557 roots_[entry.index] = map;
2558 }
2559
2560 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2561 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2562
2563 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2564 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2565 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2566 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2567 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2568 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2569
2570 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2571 native_context_map()->set_dictionary_map(true);
2572 native_context_map()->set_visitor_id(
2573 StaticVisitorBase::kVisitNativeContext);
2574
2575 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2576 shared_function_info)
2577
2578 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2579 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2580 external_map()->set_is_extensible(false);
2581 #undef ALLOCATE_VARSIZE_MAP
2582 #undef ALLOCATE_MAP
2583 }
2584
2585 { // Empty arrays
2586 {
2587 ByteArray* byte_array;
2588 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2589 set_empty_byte_array(byte_array);
2590 }
2591
2592 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2593 { \
2594 ExternalArray* obj; \
2595 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2596 return false; \
2597 set_empty_external_##type##_array(obj); \
2598 }
2599
2600 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2601 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2602
2603 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2604 { \
2605 FixedTypedArrayBase* obj; \
2606 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2607 return false; \
2608 set_empty_fixed_##type##_array(obj); \
2609 }
2610
2611 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2612 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2613 }
2614 DCHECK(!InNewSpace(empty_fixed_array()));
2615 return true;
2616 }
2617
2618
AllocateHeapNumber(double value,MutableMode mode,PretenureFlag pretenure)2619 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2620 PretenureFlag pretenure) {
2621 // Statically ensure that it is safe to allocate heap numbers in paged
2622 // spaces.
2623 int size = HeapNumber::kSize;
2624 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2625
2626 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2627
2628 HeapObject* result;
2629 {
2630 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2631 if (!allocation.To(&result)) return allocation;
2632 }
2633
2634 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2635 HeapObject::cast(result)->set_map_no_write_barrier(map);
2636 HeapNumber::cast(result)->set_value(value);
2637 return result;
2638 }
2639
2640
AllocateCell(Object * value)2641 AllocationResult Heap::AllocateCell(Object* value) {
2642 int size = Cell::kSize;
2643 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2644
2645 HeapObject* result;
2646 {
2647 AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2648 if (!allocation.To(&result)) return allocation;
2649 }
2650 result->set_map_no_write_barrier(cell_map());
2651 Cell::cast(result)->set_value(value);
2652 return result;
2653 }
2654
2655
AllocatePropertyCell()2656 AllocationResult Heap::AllocatePropertyCell() {
2657 int size = PropertyCell::kSize;
2658 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2659
2660 HeapObject* result;
2661 AllocationResult allocation =
2662 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2663 if (!allocation.To(&result)) return allocation;
2664
2665 result->set_map_no_write_barrier(global_property_cell_map());
2666 PropertyCell* cell = PropertyCell::cast(result);
2667 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2668 SKIP_WRITE_BARRIER);
2669 cell->set_value(the_hole_value());
2670 cell->set_type(HeapType::None());
2671 return result;
2672 }
2673
2674
CreateApiObjects()2675 void Heap::CreateApiObjects() {
2676 HandleScope scope(isolate());
2677 Factory* factory = isolate()->factory();
2678 Handle<Map> new_neander_map =
2679 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2680
2681 // Don't use Smi-only elements optimizations for objects with the neander
2682 // map. There are too many cases where element values are set directly with a
2683 // bottleneck to trap the Smi-only -> fast elements transition, and there
2684 // appears to be no benefit for optimize this case.
2685 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2686 set_neander_map(*new_neander_map);
2687
2688 Handle<JSObject> listeners = factory->NewNeanderObject();
2689 Handle<FixedArray> elements = factory->NewFixedArray(2);
2690 elements->set(0, Smi::FromInt(0));
2691 listeners->set_elements(*elements);
2692 set_message_listeners(*listeners);
2693 }
2694
2695
CreateJSEntryStub()2696 void Heap::CreateJSEntryStub() {
2697 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2698 set_js_entry_code(*stub.GetCode());
2699 }
2700
2701
CreateJSConstructEntryStub()2702 void Heap::CreateJSConstructEntryStub() {
2703 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2704 set_js_construct_entry_code(*stub.GetCode());
2705 }
2706
2707
CreateFixedStubs()2708 void Heap::CreateFixedStubs() {
2709 // Here we create roots for fixed stubs. They are needed at GC
2710 // for cooking and uncooking (check out frames.cc).
2711 // The eliminates the need for doing dictionary lookup in the
2712 // stub cache for these stubs.
2713 HandleScope scope(isolate());
2714
2715 // Create stubs that should be there, so we don't unexpectedly have to
2716 // create them if we need them during the creation of another stub.
2717 // Stub creation mixes raw pointers and handles in an unsafe manner so
2718 // we cannot create stubs while we are creating stubs.
2719 CodeStub::GenerateStubsAheadOfTime(isolate());
2720
2721 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2722 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2723 // is created.
2724
2725 // gcc-4.4 has problem generating correct code of following snippet:
2726 // { JSEntryStub stub;
2727 // js_entry_code_ = *stub.GetCode();
2728 // }
2729 // { JSConstructEntryStub stub;
2730 // js_construct_entry_code_ = *stub.GetCode();
2731 // }
2732 // To workaround the problem, make separate functions without inlining.
2733 Heap::CreateJSEntryStub();
2734 Heap::CreateJSConstructEntryStub();
2735 }
2736
2737
CreateInitialObjects()2738 void Heap::CreateInitialObjects() {
2739 HandleScope scope(isolate());
2740 Factory* factory = isolate()->factory();
2741
2742 // The -0 value must be set before NewNumber works.
2743 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2744 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2745
2746 set_nan_value(
2747 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2748 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2749
2750 // The hole has not been created yet, but we want to put something
2751 // predictable in the gaps in the string table, so lets make that Smi zero.
2752 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2753
2754 // Allocate initial string table.
2755 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2756
2757 // Finish initializing oddballs after creating the string table.
2758 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2759 factory->nan_value(), Oddball::kUndefined);
2760
2761 // Initialize the null_value.
2762 Oddball::Initialize(isolate(), factory->null_value(), "null",
2763 handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2764
2765 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2766 handle(Smi::FromInt(1), isolate()),
2767 Oddball::kTrue));
2768
2769 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2770 handle(Smi::FromInt(0), isolate()),
2771 Oddball::kFalse));
2772
2773 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2774 handle(Smi::FromInt(-1), isolate()),
2775 Oddball::kTheHole));
2776
2777 set_uninitialized_value(*factory->NewOddball(
2778 factory->uninitialized_map(), "uninitialized",
2779 handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2780
2781 set_arguments_marker(*factory->NewOddball(
2782 factory->arguments_marker_map(), "arguments_marker",
2783 handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2784
2785 set_no_interceptor_result_sentinel(*factory->NewOddball(
2786 factory->no_interceptor_result_sentinel_map(),
2787 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2788 Oddball::kOther));
2789
2790 set_termination_exception(*factory->NewOddball(
2791 factory->termination_exception_map(), "termination_exception",
2792 handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
2793
2794 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2795 handle(Smi::FromInt(-5), isolate()),
2796 Oddball::kException));
2797
2798 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2799 Handle<String> str =
2800 factory->InternalizeUtf8String(constant_string_table[i].contents);
2801 roots_[constant_string_table[i].index] = *str;
2802 }
2803
2804 // Allocate the hidden string which is used to identify the hidden properties
2805 // in JSObjects. The hash code has a special value so that it will not match
2806 // the empty string when searching for the property. It cannot be part of the
2807 // loop above because it needs to be allocated manually with the special
2808 // hash code in place. The hash code for the hidden_string is zero to ensure
2809 // that it will always be at the first entry in property descriptors.
2810 hidden_string_ = *factory->NewOneByteInternalizedString(
2811 OneByteVector("", 0), String::kEmptyStringHash);
2812
2813 // Create the code_stubs dictionary. The initial size is set to avoid
2814 // expanding the dictionary during bootstrapping.
2815 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2816
2817 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2818 // is set to avoid expanding the dictionary during bootstrapping.
2819 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2820
2821 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2822 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2823
2824 set_instanceof_cache_function(Smi::FromInt(0));
2825 set_instanceof_cache_map(Smi::FromInt(0));
2826 set_instanceof_cache_answer(Smi::FromInt(0));
2827
2828 CreateFixedStubs();
2829
2830 // Allocate the dictionary of intrinsic function names.
2831 Handle<NameDictionary> intrinsic_names =
2832 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2833 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2834 set_intrinsic_function_names(*intrinsic_names);
2835
2836 set_number_string_cache(
2837 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2838
2839 // Allocate cache for single character one byte strings.
2840 set_single_character_string_cache(
2841 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2842
2843 // Allocate cache for string split and regexp-multiple.
2844 set_string_split_cache(*factory->NewFixedArray(
2845 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2846 set_regexp_multiple_cache(*factory->NewFixedArray(
2847 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2848
2849 // Allocate cache for external strings pointing to native source code.
2850 set_natives_source_cache(
2851 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2852
2853 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2854
2855 // The symbol registry is initialized lazily.
2856 set_symbol_registry(undefined_value());
2857
2858 // Allocate object to hold object observation state.
2859 set_observation_state(*factory->NewJSObjectFromMap(
2860 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2861
2862 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2863 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2864 set_microtask_queue(empty_fixed_array());
2865
2866 set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2867 set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
2868 set_frozen_symbol(*factory->NewPrivateOwnSymbol());
2869 set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
2870 set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
2871 set_generic_symbol(*factory->NewPrivateOwnSymbol());
2872 set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
2873 set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
2874 set_observed_symbol(*factory->NewPrivateOwnSymbol());
2875 set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2876 set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
2877 set_home_object_symbol(*factory->NewPrivateOwnSymbol());
2878
2879 Handle<SeededNumberDictionary> slow_element_dictionary =
2880 SeededNumberDictionary::New(isolate(), 0, TENURED);
2881 slow_element_dictionary->set_requires_slow_elements();
2882 set_empty_slow_element_dictionary(*slow_element_dictionary);
2883
2884 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2885
2886 // Handling of script id generation is in Factory::NewScript.
2887 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2888
2889 set_allocation_sites_scratchpad(
2890 *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2891 InitializeAllocationSitesScratchpad();
2892
2893 // Initialize keyed lookup cache.
2894 isolate_->keyed_lookup_cache()->Clear();
2895
2896 // Initialize context slot cache.
2897 isolate_->context_slot_cache()->Clear();
2898
2899 // Initialize descriptor cache.
2900 isolate_->descriptor_lookup_cache()->Clear();
2901
2902 // Initialize compilation cache.
2903 isolate_->compilation_cache()->Clear();
2904 }
2905
2906
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2907 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2908 RootListIndex writable_roots[] = {
2909 kStoreBufferTopRootIndex,
2910 kStackLimitRootIndex,
2911 kNumberStringCacheRootIndex,
2912 kInstanceofCacheFunctionRootIndex,
2913 kInstanceofCacheMapRootIndex,
2914 kInstanceofCacheAnswerRootIndex,
2915 kCodeStubsRootIndex,
2916 kNonMonomorphicCacheRootIndex,
2917 kPolymorphicCodeCacheRootIndex,
2918 kLastScriptIdRootIndex,
2919 kEmptyScriptRootIndex,
2920 kRealStackLimitRootIndex,
2921 kArgumentsAdaptorDeoptPCOffsetRootIndex,
2922 kConstructStubDeoptPCOffsetRootIndex,
2923 kGetterStubDeoptPCOffsetRootIndex,
2924 kSetterStubDeoptPCOffsetRootIndex,
2925 kStringTableRootIndex,
2926 };
2927
2928 for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
2929 if (root_index == writable_roots[i]) return true;
2930 }
2931 return false;
2932 }
2933
2934
RootCanBeTreatedAsConstant(RootListIndex root_index)2935 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2936 return !RootCanBeWrittenAfterInitialization(root_index) &&
2937 !InNewSpace(roots_array_start()[root_index]);
2938 }
2939
2940
Lookup(Heap * heap,String * key_string,Object * key_pattern,ResultsCacheType type)2941 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
2942 Object* key_pattern, ResultsCacheType type) {
2943 FixedArray* cache;
2944 if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2945 if (type == STRING_SPLIT_SUBSTRINGS) {
2946 DCHECK(key_pattern->IsString());
2947 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2948 cache = heap->string_split_cache();
2949 } else {
2950 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2951 DCHECK(key_pattern->IsFixedArray());
2952 cache = heap->regexp_multiple_cache();
2953 }
2954
2955 uint32_t hash = key_string->Hash();
2956 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2957 ~(kArrayEntriesPerCacheEntry - 1));
2958 if (cache->get(index + kStringOffset) == key_string &&
2959 cache->get(index + kPatternOffset) == key_pattern) {
2960 return cache->get(index + kArrayOffset);
2961 }
2962 index =
2963 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2964 if (cache->get(index + kStringOffset) == key_string &&
2965 cache->get(index + kPatternOffset) == key_pattern) {
2966 return cache->get(index + kArrayOffset);
2967 }
2968 return Smi::FromInt(0);
2969 }
2970
2971
Enter(Isolate * isolate,Handle<String> key_string,Handle<Object> key_pattern,Handle<FixedArray> value_array,ResultsCacheType type)2972 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
2973 Handle<Object> key_pattern,
2974 Handle<FixedArray> value_array,
2975 ResultsCacheType type) {
2976 Factory* factory = isolate->factory();
2977 Handle<FixedArray> cache;
2978 if (!key_string->IsInternalizedString()) return;
2979 if (type == STRING_SPLIT_SUBSTRINGS) {
2980 DCHECK(key_pattern->IsString());
2981 if (!key_pattern->IsInternalizedString()) return;
2982 cache = factory->string_split_cache();
2983 } else {
2984 DCHECK(type == REGEXP_MULTIPLE_INDICES);
2985 DCHECK(key_pattern->IsFixedArray());
2986 cache = factory->regexp_multiple_cache();
2987 }
2988
2989 uint32_t hash = key_string->Hash();
2990 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2991 ~(kArrayEntriesPerCacheEntry - 1));
2992 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2993 cache->set(index + kStringOffset, *key_string);
2994 cache->set(index + kPatternOffset, *key_pattern);
2995 cache->set(index + kArrayOffset, *value_array);
2996 } else {
2997 uint32_t index2 =
2998 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
2999 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3000 cache->set(index2 + kStringOffset, *key_string);
3001 cache->set(index2 + kPatternOffset, *key_pattern);
3002 cache->set(index2 + kArrayOffset, *value_array);
3003 } else {
3004 cache->set(index2 + kStringOffset, Smi::FromInt(0));
3005 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3006 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3007 cache->set(index + kStringOffset, *key_string);
3008 cache->set(index + kPatternOffset, *key_pattern);
3009 cache->set(index + kArrayOffset, *value_array);
3010 }
3011 }
3012 // If the array is a reasonably short list of substrings, convert it into a
3013 // list of internalized strings.
3014 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3015 for (int i = 0; i < value_array->length(); i++) {
3016 Handle<String> str(String::cast(value_array->get(i)), isolate);
3017 Handle<String> internalized_str = factory->InternalizeString(str);
3018 value_array->set(i, *internalized_str);
3019 }
3020 }
3021 // Convert backing store to a copy-on-write array.
3022 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3023 }
3024
3025
Clear(FixedArray * cache)3026 void RegExpResultsCache::Clear(FixedArray* cache) {
3027 for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3028 cache->set(i, Smi::FromInt(0));
3029 }
3030 }
3031
3032
FullSizeNumberStringCacheLength()3033 int Heap::FullSizeNumberStringCacheLength() {
3034 // Compute the size of the number string cache based on the max newspace size.
3035 // The number string cache has a minimum size based on twice the initial cache
3036 // size to ensure that it is bigger after being made 'full size'.
3037 int number_string_cache_size = max_semi_space_size_ / 512;
3038 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3039 Min(0x4000, number_string_cache_size));
3040 // There is a string and a number per entry so the length is twice the number
3041 // of entries.
3042 return number_string_cache_size * 2;
3043 }
3044
3045
FlushNumberStringCache()3046 void Heap::FlushNumberStringCache() {
3047 // Flush the number to string cache.
3048 int len = number_string_cache()->length();
3049 for (int i = 0; i < len; i++) {
3050 number_string_cache()->set_undefined(i);
3051 }
3052 }
3053
3054
FlushAllocationSitesScratchpad()3055 void Heap::FlushAllocationSitesScratchpad() {
3056 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3057 allocation_sites_scratchpad()->set_undefined(i);
3058 }
3059 allocation_sites_scratchpad_length_ = 0;
3060 }
3061
3062
InitializeAllocationSitesScratchpad()3063 void Heap::InitializeAllocationSitesScratchpad() {
3064 DCHECK(allocation_sites_scratchpad()->length() ==
3065 kAllocationSiteScratchpadSize);
3066 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3067 allocation_sites_scratchpad()->set_undefined(i);
3068 }
3069 }
3070
3071
AddAllocationSiteToScratchpad(AllocationSite * site,ScratchpadSlotMode mode)3072 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3073 ScratchpadSlotMode mode) {
3074 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3075 // We cannot use the normal write-barrier because slots need to be
3076 // recorded with non-incremental marking as well. We have to explicitly
3077 // record the slot to take evacuation candidates into account.
3078 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3079 site, SKIP_WRITE_BARRIER);
3080 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3081 allocation_sites_scratchpad_length_);
3082
3083 if (mode == RECORD_SCRATCHPAD_SLOT) {
3084 // We need to allow slots buffer overflow here since the evacuation
3085 // candidates are not part of the global list of old space pages and
3086 // releasing an evacuation candidate due to a slots buffer overflow
3087 // results in lost pages.
3088 mark_compact_collector()->RecordSlot(slot, slot, *slot,
3089 SlotsBuffer::IGNORE_OVERFLOW);
3090 }
3091 allocation_sites_scratchpad_length_++;
3092 }
3093 }
3094
3095
MapForExternalArrayType(ExternalArrayType array_type)3096 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3097 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3098 }
3099
3100
RootIndexForExternalArrayType(ExternalArrayType array_type)3101 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3102 ExternalArrayType array_type) {
3103 switch (array_type) {
3104 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3105 case kExternal##Type##Array: \
3106 return kExternal##Type##ArrayMapRootIndex;
3107
3108 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3109 #undef ARRAY_TYPE_TO_ROOT_INDEX
3110
3111 default:
3112 UNREACHABLE();
3113 return kUndefinedValueRootIndex;
3114 }
3115 }
3116
3117
MapForFixedTypedArray(ExternalArrayType array_type)3118 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3119 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3120 }
3121
3122
RootIndexForFixedTypedArray(ExternalArrayType array_type)3123 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3124 ExternalArrayType array_type) {
3125 switch (array_type) {
3126 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3127 case kExternal##Type##Array: \
3128 return kFixed##Type##ArrayMapRootIndex;
3129
3130 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3131 #undef ARRAY_TYPE_TO_ROOT_INDEX
3132
3133 default:
3134 UNREACHABLE();
3135 return kUndefinedValueRootIndex;
3136 }
3137 }
3138
3139
RootIndexForEmptyExternalArray(ElementsKind elementsKind)3140 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3141 ElementsKind elementsKind) {
3142 switch (elementsKind) {
3143 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3144 case EXTERNAL_##TYPE##_ELEMENTS: \
3145 return kEmptyExternal##Type##ArrayRootIndex;
3146
3147 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3148 #undef ELEMENT_KIND_TO_ROOT_INDEX
3149
3150 default:
3151 UNREACHABLE();
3152 return kUndefinedValueRootIndex;
3153 }
3154 }
3155
3156
RootIndexForEmptyFixedTypedArray(ElementsKind elementsKind)3157 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3158 ElementsKind elementsKind) {
3159 switch (elementsKind) {
3160 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3161 case TYPE##_ELEMENTS: \
3162 return kEmptyFixed##Type##ArrayRootIndex;
3163
3164 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3165 #undef ELEMENT_KIND_TO_ROOT_INDEX
3166 default:
3167 UNREACHABLE();
3168 return kUndefinedValueRootIndex;
3169 }
3170 }
3171
3172
EmptyExternalArrayForMap(Map * map)3173 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3174 return ExternalArray::cast(
3175 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3176 }
3177
3178
EmptyFixedTypedArrayForMap(Map * map)3179 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3180 return FixedTypedArrayBase::cast(
3181 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3182 }
3183
3184
AllocateForeign(Address address,PretenureFlag pretenure)3185 AllocationResult Heap::AllocateForeign(Address address,
3186 PretenureFlag pretenure) {
3187 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3188 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3189 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3190 Foreign* result;
3191 AllocationResult allocation = Allocate(foreign_map(), space);
3192 if (!allocation.To(&result)) return allocation;
3193 result->set_foreign_address(address);
3194 return result;
3195 }
3196
3197
AllocateByteArray(int length,PretenureFlag pretenure)3198 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3199 if (length < 0 || length > ByteArray::kMaxLength) {
3200 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3201 }
3202 int size = ByteArray::SizeFor(length);
3203 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3204 HeapObject* result;
3205 {
3206 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3207 if (!allocation.To(&result)) return allocation;
3208 }
3209
3210 result->set_map_no_write_barrier(byte_array_map());
3211 ByteArray::cast(result)->set_length(length);
3212 return result;
3213 }
3214
3215
CreateFillerObjectAt(Address addr,int size)3216 void Heap::CreateFillerObjectAt(Address addr, int size) {
3217 if (size == 0) return;
3218 HeapObject* filler = HeapObject::FromAddress(addr);
3219 if (size == kPointerSize) {
3220 filler->set_map_no_write_barrier(one_pointer_filler_map());
3221 } else if (size == 2 * kPointerSize) {
3222 filler->set_map_no_write_barrier(two_pointer_filler_map());
3223 } else {
3224 filler->set_map_no_write_barrier(free_space_map());
3225 FreeSpace::cast(filler)->set_size(size);
3226 }
3227 }
3228
3229
CanMoveObjectStart(HeapObject * object)3230 bool Heap::CanMoveObjectStart(HeapObject* object) {
3231 Address address = object->address();
3232 bool is_in_old_pointer_space = InOldPointerSpace(address);
3233 bool is_in_old_data_space = InOldDataSpace(address);
3234
3235 if (lo_space()->Contains(object)) return false;
3236
3237 Page* page = Page::FromAddress(address);
3238 // We can move the object start if:
3239 // (1) the object is not in old pointer or old data space,
3240 // (2) the page of the object was already swept,
3241 // (3) the page was already concurrently swept. This case is an optimization
3242 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3243 // pages is set after sweeping all pages.
3244 return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3245 page->WasSwept() || page->SweepingCompleted();
3246 }
3247
3248
AdjustLiveBytes(Address address,int by,InvocationMode mode)3249 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3250 if (incremental_marking()->IsMarking() &&
3251 Marking::IsBlack(Marking::MarkBitFrom(address))) {
3252 if (mode == FROM_GC) {
3253 MemoryChunk::IncrementLiveBytesFromGC(address, by);
3254 } else {
3255 MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3256 }
3257 }
3258 }
3259
3260
LeftTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3261 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3262 int elements_to_trim) {
3263 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3264 const int bytes_to_trim = elements_to_trim * element_size;
3265 Map* map = object->map();
3266
3267 // For now this trick is only applied to objects in new and paged space.
3268 // In large object space the object's start must coincide with chunk
3269 // and thus the trick is just not applicable.
3270 DCHECK(!lo_space()->Contains(object));
3271 DCHECK(object->map() != fixed_cow_array_map());
3272
3273 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3274 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3275 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3276
3277 const int len = object->length();
3278 DCHECK(elements_to_trim <= len);
3279
3280 // Calculate location of new array start.
3281 Address new_start = object->address() + bytes_to_trim;
3282
3283 // Technically in new space this write might be omitted (except for
3284 // debug mode which iterates through the heap), but to play safer
3285 // we still do it.
3286 CreateFillerObjectAt(object->address(), bytes_to_trim);
3287
3288 // Initialize header of the trimmed array. Since left trimming is only
3289 // performed on pages which are not concurrently swept creating a filler
3290 // object does not require synchronization.
3291 DCHECK(CanMoveObjectStart(object));
3292 Object** former_start = HeapObject::RawField(object, 0);
3293 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3294 former_start[new_start_index] = map;
3295 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3296 FixedArrayBase* new_object =
3297 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3298
3299 // Maintain consistency of live bytes during incremental marking
3300 marking()->TransferMark(object->address(), new_start);
3301 AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3302
3303 // Notify the heap profiler of change in object layout.
3304 OnMoveEvent(new_object, object, new_object->Size());
3305 return new_object;
3306 }
3307
3308
3309 // Force instantiation of templatized method.
3310 template
3311 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3312 template
3313 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3314
3315
3316 template<Heap::InvocationMode mode>
RightTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3317 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3318 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3319 const int bytes_to_trim = elements_to_trim * element_size;
3320
3321 // For now this trick is only applied to objects in new and paged space.
3322 DCHECK(object->map() != fixed_cow_array_map());
3323
3324 const int len = object->length();
3325 DCHECK(elements_to_trim < len);
3326
3327 // Calculate location of new array end.
3328 Address new_end = object->address() + object->Size() - bytes_to_trim;
3329
3330 // Technically in new space this write might be omitted (except for
3331 // debug mode which iterates through the heap), but to play safer
3332 // we still do it.
3333 // We do not create a filler for objects in large object space.
3334 // TODO(hpayer): We should shrink the large object page if the size
3335 // of the object changed significantly.
3336 if (!lo_space()->Contains(object)) {
3337 CreateFillerObjectAt(new_end, bytes_to_trim);
3338 }
3339
3340 // Initialize header of the trimmed array. We are storing the new length
3341 // using release store after creating a filler for the left-over space to
3342 // avoid races with the sweeper thread.
3343 object->synchronized_set_length(len - elements_to_trim);
3344
3345 // Maintain consistency of live bytes during incremental marking
3346 AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3347
3348 // Notify the heap profiler of change in object layout. The array may not be
3349 // moved during GC, and size has to be adjusted nevertheless.
3350 HeapProfiler* profiler = isolate()->heap_profiler();
3351 if (profiler->is_tracking_allocations()) {
3352 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3353 }
3354 }
3355
3356
AllocateExternalArray(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)3357 AllocationResult Heap::AllocateExternalArray(int length,
3358 ExternalArrayType array_type,
3359 void* external_pointer,
3360 PretenureFlag pretenure) {
3361 int size = ExternalArray::kAlignedSize;
3362 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3363 HeapObject* result;
3364 {
3365 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3366 if (!allocation.To(&result)) return allocation;
3367 }
3368
3369 result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3370 ExternalArray::cast(result)->set_length(length);
3371 ExternalArray::cast(result)->set_external_pointer(external_pointer);
3372 return result;
3373 }
3374
ForFixedTypedArray(ExternalArrayType array_type,int * element_size,ElementsKind * element_kind)3375 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3376 ElementsKind* element_kind) {
3377 switch (array_type) {
3378 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3379 case kExternal##Type##Array: \
3380 *element_size = size; \
3381 *element_kind = TYPE##_ELEMENTS; \
3382 return;
3383
3384 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3385 #undef TYPED_ARRAY_CASE
3386
3387 default:
3388 *element_size = 0; // Bogus
3389 *element_kind = UINT8_ELEMENTS; // Bogus
3390 UNREACHABLE();
3391 }
3392 }
3393
3394
AllocateFixedTypedArray(int length,ExternalArrayType array_type,PretenureFlag pretenure)3395 AllocationResult Heap::AllocateFixedTypedArray(int length,
3396 ExternalArrayType array_type,
3397 PretenureFlag pretenure) {
3398 int element_size;
3399 ElementsKind elements_kind;
3400 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3401 int size = OBJECT_POINTER_ALIGN(length * element_size +
3402 FixedTypedArrayBase::kDataOffset);
3403 #ifndef V8_HOST_ARCH_64_BIT
3404 if (array_type == kExternalFloat64Array) {
3405 size += kPointerSize;
3406 }
3407 #endif
3408 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3409
3410 HeapObject* object;
3411 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3412 if (!allocation.To(&object)) return allocation;
3413
3414 if (array_type == kExternalFloat64Array) {
3415 object = EnsureDoubleAligned(this, object, size);
3416 }
3417
3418 object->set_map(MapForFixedTypedArray(array_type));
3419 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3420 elements->set_length(length);
3421 memset(elements->DataPtr(), 0, elements->DataSize());
3422 return elements;
3423 }
3424
3425
AllocateCode(int object_size,bool immovable)3426 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3427 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3428 AllocationResult allocation =
3429 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3430
3431 HeapObject* result;
3432 if (!allocation.To(&result)) return allocation;
3433
3434 if (immovable) {
3435 Address address = result->address();
3436 // Code objects which should stay at a fixed address are allocated either
3437 // in the first page of code space (objects on the first page of each space
3438 // are never moved) or in large object space.
3439 if (!code_space_->FirstPage()->Contains(address) &&
3440 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3441 // Discard the first code allocation, which was on a page where it could
3442 // be moved.
3443 CreateFillerObjectAt(result->address(), object_size);
3444 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3445 if (!allocation.To(&result)) return allocation;
3446 OnAllocationEvent(result, object_size);
3447 }
3448 }
3449
3450 result->set_map_no_write_barrier(code_map());
3451 Code* code = Code::cast(result);
3452 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3453 isolate_->code_range()->contains(code->address()));
3454 code->set_gc_metadata(Smi::FromInt(0));
3455 code->set_ic_age(global_ic_age_);
3456 return code;
3457 }
3458
3459
CopyCode(Code * code)3460 AllocationResult Heap::CopyCode(Code* code) {
3461 AllocationResult allocation;
3462 HeapObject* new_constant_pool;
3463 if (FLAG_enable_ool_constant_pool &&
3464 code->constant_pool() != empty_constant_pool_array()) {
3465 // Copy the constant pool, since edits to the copied code may modify
3466 // the constant pool.
3467 allocation = CopyConstantPoolArray(code->constant_pool());
3468 if (!allocation.To(&new_constant_pool)) return allocation;
3469 } else {
3470 new_constant_pool = empty_constant_pool_array();
3471 }
3472
3473 HeapObject* result;
3474 // Allocate an object the same size as the code object.
3475 int obj_size = code->Size();
3476 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3477 if (!allocation.To(&result)) return allocation;
3478
3479 // Copy code object.
3480 Address old_addr = code->address();
3481 Address new_addr = result->address();
3482 CopyBlock(new_addr, old_addr, obj_size);
3483 Code* new_code = Code::cast(result);
3484
3485 // Update the constant pool.
3486 new_code->set_constant_pool(new_constant_pool);
3487
3488 // Relocate the copy.
3489 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3490 isolate_->code_range()->contains(code->address()));
3491 new_code->Relocate(new_addr - old_addr);
3492 return new_code;
3493 }
3494
3495
CopyCode(Code * code,Vector<byte> reloc_info)3496 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3497 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3498 // do not risk leaving uninitialized Code object (and breaking the heap).
3499 ByteArray* reloc_info_array;
3500 {
3501 AllocationResult allocation =
3502 AllocateByteArray(reloc_info.length(), TENURED);
3503 if (!allocation.To(&reloc_info_array)) return allocation;
3504 }
3505 HeapObject* new_constant_pool;
3506 if (FLAG_enable_ool_constant_pool &&
3507 code->constant_pool() != empty_constant_pool_array()) {
3508 // Copy the constant pool, since edits to the copied code may modify
3509 // the constant pool.
3510 AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3511 if (!allocation.To(&new_constant_pool)) return allocation;
3512 } else {
3513 new_constant_pool = empty_constant_pool_array();
3514 }
3515
3516 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3517
3518 int new_obj_size = Code::SizeFor(new_body_size);
3519
3520 Address old_addr = code->address();
3521
3522 size_t relocation_offset =
3523 static_cast<size_t>(code->instruction_end() - old_addr);
3524
3525 HeapObject* result;
3526 AllocationResult allocation =
3527 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3528 if (!allocation.To(&result)) return allocation;
3529
3530 // Copy code object.
3531 Address new_addr = result->address();
3532
3533 // Copy header and instructions.
3534 CopyBytes(new_addr, old_addr, relocation_offset);
3535
3536 Code* new_code = Code::cast(result);
3537 new_code->set_relocation_info(reloc_info_array);
3538
3539 // Update constant pool.
3540 new_code->set_constant_pool(new_constant_pool);
3541
3542 // Copy patched rinfo.
3543 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3544 static_cast<size_t>(reloc_info.length()));
3545
3546 // Relocate the copy.
3547 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3548 isolate_->code_range()->contains(code->address()));
3549 new_code->Relocate(new_addr - old_addr);
3550
3551 #ifdef VERIFY_HEAP
3552 if (FLAG_verify_heap) code->ObjectVerify();
3553 #endif
3554 return new_code;
3555 }
3556
3557
InitializeAllocationMemento(AllocationMemento * memento,AllocationSite * allocation_site)3558 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3559 AllocationSite* allocation_site) {
3560 memento->set_map_no_write_barrier(allocation_memento_map());
3561 DCHECK(allocation_site->map() == allocation_site_map());
3562 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3563 if (FLAG_allocation_site_pretenuring) {
3564 allocation_site->IncrementMementoCreateCount();
3565 }
3566 }
3567
3568
Allocate(Map * map,AllocationSpace space,AllocationSite * allocation_site)3569 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3570 AllocationSite* allocation_site) {
3571 DCHECK(gc_state_ == NOT_IN_GC);
3572 DCHECK(map->instance_type() != MAP_TYPE);
3573 // If allocation failures are disallowed, we may allocate in a different
3574 // space when new space is full and the object is not a large object.
3575 AllocationSpace retry_space =
3576 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3577 int size = map->instance_size();
3578 if (allocation_site != NULL) {
3579 size += AllocationMemento::kSize;
3580 }
3581 HeapObject* result;
3582 AllocationResult allocation = AllocateRaw(size, space, retry_space);
3583 if (!allocation.To(&result)) return allocation;
3584 // No need for write barrier since object is white and map is in old space.
3585 result->set_map_no_write_barrier(map);
3586 if (allocation_site != NULL) {
3587 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3588 reinterpret_cast<Address>(result) + map->instance_size());
3589 InitializeAllocationMemento(alloc_memento, allocation_site);
3590 }
3591 return result;
3592 }
3593
3594
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)3595 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3596 Map* map) {
3597 obj->set_properties(properties);
3598 obj->initialize_elements();
3599 // TODO(1240798): Initialize the object's body using valid initial values
3600 // according to the object's initial map. For example, if the map's
3601 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3602 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3603 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3604 // verification code has to cope with (temporarily) invalid objects. See
3605 // for example, JSArray::JSArrayVerify).
3606 Object* filler;
3607 // We cannot always fill with one_pointer_filler_map because objects
3608 // created from API functions expect their internal fields to be initialized
3609 // with undefined_value.
3610 // Pre-allocated fields need to be initialized with undefined_value as well
3611 // so that object accesses before the constructor completes (e.g. in the
3612 // debugger) will not cause a crash.
3613 if (map->constructor()->IsJSFunction() &&
3614 JSFunction::cast(map->constructor())
3615 ->IsInobjectSlackTrackingInProgress()) {
3616 // We might want to shrink the object later.
3617 DCHECK(obj->GetInternalFieldCount() == 0);
3618 filler = Heap::one_pointer_filler_map();
3619 } else {
3620 filler = Heap::undefined_value();
3621 }
3622 obj->InitializeBody(map, Heap::undefined_value(), filler);
3623 }
3624
3625
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure,bool allocate_properties,AllocationSite * allocation_site)3626 AllocationResult Heap::AllocateJSObjectFromMap(
3627 Map* map, PretenureFlag pretenure, bool allocate_properties,
3628 AllocationSite* allocation_site) {
3629 // JSFunctions should be allocated using AllocateFunction to be
3630 // properly initialized.
3631 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3632
3633 // Both types of global objects should be allocated using
3634 // AllocateGlobalObject to be properly initialized.
3635 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3636 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3637
3638 // Allocate the backing storage for the properties.
3639 FixedArray* properties;
3640 if (allocate_properties) {
3641 int prop_size = map->InitialPropertiesLength();
3642 DCHECK(prop_size >= 0);
3643 {
3644 AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3645 if (!allocation.To(&properties)) return allocation;
3646 }
3647 } else {
3648 properties = empty_fixed_array();
3649 }
3650
3651 // Allocate the JSObject.
3652 int size = map->instance_size();
3653 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3654 JSObject* js_obj;
3655 AllocationResult allocation = Allocate(map, space, allocation_site);
3656 if (!allocation.To(&js_obj)) return allocation;
3657
3658 // Initialize the JSObject.
3659 InitializeJSObjectFromMap(js_obj, properties, map);
3660 DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3661 js_obj->HasFixedTypedArrayElements());
3662 return js_obj;
3663 }
3664
3665
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure,AllocationSite * allocation_site)3666 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3667 PretenureFlag pretenure,
3668 AllocationSite* allocation_site) {
3669 DCHECK(constructor->has_initial_map());
3670
3671 // Allocate the object based on the constructors initial map.
3672 AllocationResult allocation = AllocateJSObjectFromMap(
3673 constructor->initial_map(), pretenure, true, allocation_site);
3674 #ifdef DEBUG
3675 // Make sure result is NOT a global object if valid.
3676 HeapObject* obj;
3677 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3678 #endif
3679 return allocation;
3680 }
3681
3682
CopyJSObject(JSObject * source,AllocationSite * site)3683 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3684 // Never used to copy functions. If functions need to be copied we
3685 // have to be careful to clear the literals array.
3686 SLOW_DCHECK(!source->IsJSFunction());
3687
3688 // Make the clone.
3689 Map* map = source->map();
3690 int object_size = map->instance_size();
3691 HeapObject* clone;
3692
3693 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3694
3695 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3696
3697 // If we're forced to always allocate, we use the general allocation
3698 // functions which may leave us with an object in old space.
3699 if (always_allocate()) {
3700 {
3701 AllocationResult allocation =
3702 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3703 if (!allocation.To(&clone)) return allocation;
3704 }
3705 Address clone_address = clone->address();
3706 CopyBlock(clone_address, source->address(), object_size);
3707 // Update write barrier for all fields that lie beyond the header.
3708 RecordWrites(clone_address, JSObject::kHeaderSize,
3709 (object_size - JSObject::kHeaderSize) / kPointerSize);
3710 } else {
3711 wb_mode = SKIP_WRITE_BARRIER;
3712
3713 {
3714 int adjusted_object_size =
3715 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3716 AllocationResult allocation =
3717 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3718 if (!allocation.To(&clone)) return allocation;
3719 }
3720 SLOW_DCHECK(InNewSpace(clone));
3721 // Since we know the clone is allocated in new space, we can copy
3722 // the contents without worrying about updating the write barrier.
3723 CopyBlock(clone->address(), source->address(), object_size);
3724
3725 if (site != NULL) {
3726 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3727 reinterpret_cast<Address>(clone) + object_size);
3728 InitializeAllocationMemento(alloc_memento, site);
3729 }
3730 }
3731
3732 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3733 source->GetElementsKind());
3734 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3735 FixedArray* properties = FixedArray::cast(source->properties());
3736 // Update elements if necessary.
3737 if (elements->length() > 0) {
3738 FixedArrayBase* elem;
3739 {
3740 AllocationResult allocation;
3741 if (elements->map() == fixed_cow_array_map()) {
3742 allocation = FixedArray::cast(elements);
3743 } else if (source->HasFastDoubleElements()) {
3744 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3745 } else {
3746 allocation = CopyFixedArray(FixedArray::cast(elements));
3747 }
3748 if (!allocation.To(&elem)) return allocation;
3749 }
3750 JSObject::cast(clone)->set_elements(elem, wb_mode);
3751 }
3752 // Update properties if necessary.
3753 if (properties->length() > 0) {
3754 FixedArray* prop;
3755 {
3756 AllocationResult allocation = CopyFixedArray(properties);
3757 if (!allocation.To(&prop)) return allocation;
3758 }
3759 JSObject::cast(clone)->set_properties(prop, wb_mode);
3760 }
3761 // Return the new clone.
3762 return clone;
3763 }
3764
3765
WriteOneByteData(Vector<const char> vector,uint8_t * chars,int len)3766 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3767 int len) {
3768 // Only works for one byte strings.
3769 DCHECK(vector.length() == len);
3770 MemCopy(chars, vector.start(), len);
3771 }
3772
WriteTwoByteData(Vector<const char> vector,uint16_t * chars,int len)3773 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3774 int len) {
3775 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3776 unsigned stream_length = vector.length();
3777 while (stream_length != 0) {
3778 unsigned consumed = 0;
3779 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3780 DCHECK(c != unibrow::Utf8::kBadChar);
3781 DCHECK(consumed <= stream_length);
3782 stream_length -= consumed;
3783 stream += consumed;
3784 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3785 len -= 2;
3786 if (len < 0) break;
3787 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3788 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3789 } else {
3790 len -= 1;
3791 if (len < 0) break;
3792 *chars++ = c;
3793 }
3794 }
3795 DCHECK(stream_length == 0);
3796 DCHECK(len == 0);
3797 }
3798
3799
WriteOneByteData(String * s,uint8_t * chars,int len)3800 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3801 DCHECK(s->length() == len);
3802 String::WriteToFlat(s, chars, 0, len);
3803 }
3804
3805
WriteTwoByteData(String * s,uint16_t * chars,int len)3806 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3807 DCHECK(s->length() == len);
3808 String::WriteToFlat(s, chars, 0, len);
3809 }
3810
3811
3812 template <bool is_one_byte, typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)3813 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3814 uint32_t hash_field) {
3815 DCHECK(chars >= 0);
3816 // Compute map and object size.
3817 int size;
3818 Map* map;
3819
3820 DCHECK_LE(0, chars);
3821 DCHECK_GE(String::kMaxLength, chars);
3822 if (is_one_byte) {
3823 map = one_byte_internalized_string_map();
3824 size = SeqOneByteString::SizeFor(chars);
3825 } else {
3826 map = internalized_string_map();
3827 size = SeqTwoByteString::SizeFor(chars);
3828 }
3829 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3830
3831 // Allocate string.
3832 HeapObject* result;
3833 {
3834 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3835 if (!allocation.To(&result)) return allocation;
3836 }
3837
3838 result->set_map_no_write_barrier(map);
3839 // Set length and hash fields of the allocated string.
3840 String* answer = String::cast(result);
3841 answer->set_length(chars);
3842 answer->set_hash_field(hash_field);
3843
3844 DCHECK_EQ(size, answer->Size());
3845
3846 if (is_one_byte) {
3847 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3848 } else {
3849 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3850 }
3851 return answer;
3852 }
3853
3854
3855 // Need explicit instantiations.
3856 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3857 int,
3858 uint32_t);
3859 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3860 int,
3861 uint32_t);
3862 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3863 Vector<const char>, int, uint32_t);
3864
3865
AllocateRawOneByteString(int length,PretenureFlag pretenure)3866 AllocationResult Heap::AllocateRawOneByteString(int length,
3867 PretenureFlag pretenure) {
3868 DCHECK_LE(0, length);
3869 DCHECK_GE(String::kMaxLength, length);
3870 int size = SeqOneByteString::SizeFor(length);
3871 DCHECK(size <= SeqOneByteString::kMaxSize);
3872 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3873
3874 HeapObject* result;
3875 {
3876 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3877 if (!allocation.To(&result)) return allocation;
3878 }
3879
3880 // Partially initialize the object.
3881 result->set_map_no_write_barrier(one_byte_string_map());
3882 String::cast(result)->set_length(length);
3883 String::cast(result)->set_hash_field(String::kEmptyHashField);
3884 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3885
3886 return result;
3887 }
3888
3889
AllocateRawTwoByteString(int length,PretenureFlag pretenure)3890 AllocationResult Heap::AllocateRawTwoByteString(int length,
3891 PretenureFlag pretenure) {
3892 DCHECK_LE(0, length);
3893 DCHECK_GE(String::kMaxLength, length);
3894 int size = SeqTwoByteString::SizeFor(length);
3895 DCHECK(size <= SeqTwoByteString::kMaxSize);
3896 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3897
3898 HeapObject* result;
3899 {
3900 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3901 if (!allocation.To(&result)) return allocation;
3902 }
3903
3904 // Partially initialize the object.
3905 result->set_map_no_write_barrier(string_map());
3906 String::cast(result)->set_length(length);
3907 String::cast(result)->set_hash_field(String::kEmptyHashField);
3908 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3909 return result;
3910 }
3911
3912
AllocateEmptyFixedArray()3913 AllocationResult Heap::AllocateEmptyFixedArray() {
3914 int size = FixedArray::SizeFor(0);
3915 HeapObject* result;
3916 {
3917 AllocationResult allocation =
3918 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3919 if (!allocation.To(&result)) return allocation;
3920 }
3921 // Initialize the object.
3922 result->set_map_no_write_barrier(fixed_array_map());
3923 FixedArray::cast(result)->set_length(0);
3924 return result;
3925 }
3926
3927
AllocateEmptyExternalArray(ExternalArrayType array_type)3928 AllocationResult Heap::AllocateEmptyExternalArray(
3929 ExternalArrayType array_type) {
3930 return AllocateExternalArray(0, array_type, NULL, TENURED);
3931 }
3932
3933
CopyAndTenureFixedCOWArray(FixedArray * src)3934 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3935 if (!InNewSpace(src)) {
3936 return src;
3937 }
3938
3939 int len = src->length();
3940 HeapObject* obj;
3941 {
3942 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3943 if (!allocation.To(&obj)) return allocation;
3944 }
3945 obj->set_map_no_write_barrier(fixed_array_map());
3946 FixedArray* result = FixedArray::cast(obj);
3947 result->set_length(len);
3948
3949 // Copy the content
3950 DisallowHeapAllocation no_gc;
3951 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3952 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3953
3954 // TODO(mvstanton): The map is set twice because of protection against calling
3955 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3956 // we might then be able to remove this whole method.
3957 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3958 return result;
3959 }
3960
3961
AllocateEmptyFixedTypedArray(ExternalArrayType array_type)3962 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3963 ExternalArrayType array_type) {
3964 return AllocateFixedTypedArray(0, array_type, TENURED);
3965 }
3966
3967
CopyFixedArrayWithMap(FixedArray * src,Map * map)3968 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3969 int len = src->length();
3970 HeapObject* obj;
3971 {
3972 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3973 if (!allocation.To(&obj)) return allocation;
3974 }
3975 if (InNewSpace(obj)) {
3976 obj->set_map_no_write_barrier(map);
3977 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3978 FixedArray::SizeFor(len) - kPointerSize);
3979 return obj;
3980 }
3981 obj->set_map_no_write_barrier(map);
3982 FixedArray* result = FixedArray::cast(obj);
3983 result->set_length(len);
3984
3985 // Copy the content
3986 DisallowHeapAllocation no_gc;
3987 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3988 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3989 return result;
3990 }
3991
3992
CopyFixedDoubleArrayWithMap(FixedDoubleArray * src,Map * map)3993 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3994 Map* map) {
3995 int len = src->length();
3996 HeapObject* obj;
3997 {
3998 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3999 if (!allocation.To(&obj)) return allocation;
4000 }
4001 obj->set_map_no_write_barrier(map);
4002 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4003 src->address() + FixedDoubleArray::kLengthOffset,
4004 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4005 return obj;
4006 }
4007
4008
CopyConstantPoolArrayWithMap(ConstantPoolArray * src,Map * map)4009 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4010 Map* map) {
4011 HeapObject* obj;
4012 if (src->is_extended_layout()) {
4013 ConstantPoolArray::NumberOfEntries small(src,
4014 ConstantPoolArray::SMALL_SECTION);
4015 ConstantPoolArray::NumberOfEntries extended(
4016 src, ConstantPoolArray::EXTENDED_SECTION);
4017 AllocationResult allocation =
4018 AllocateExtendedConstantPoolArray(small, extended);
4019 if (!allocation.To(&obj)) return allocation;
4020 } else {
4021 ConstantPoolArray::NumberOfEntries small(src,
4022 ConstantPoolArray::SMALL_SECTION);
4023 AllocationResult allocation = AllocateConstantPoolArray(small);
4024 if (!allocation.To(&obj)) return allocation;
4025 }
4026 obj->set_map_no_write_barrier(map);
4027 CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4028 src->address() + ConstantPoolArray::kFirstEntryOffset,
4029 src->size() - ConstantPoolArray::kFirstEntryOffset);
4030 return obj;
4031 }
4032
4033
AllocateRawFixedArray(int length,PretenureFlag pretenure)4034 AllocationResult Heap::AllocateRawFixedArray(int length,
4035 PretenureFlag pretenure) {
4036 if (length < 0 || length > FixedArray::kMaxLength) {
4037 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4038 }
4039 int size = FixedArray::SizeFor(length);
4040 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4041
4042 return AllocateRaw(size, space, OLD_POINTER_SPACE);
4043 }
4044
4045
AllocateFixedArrayWithFiller(int length,PretenureFlag pretenure,Object * filler)4046 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4047 PretenureFlag pretenure,
4048 Object* filler) {
4049 DCHECK(length >= 0);
4050 DCHECK(empty_fixed_array()->IsFixedArray());
4051 if (length == 0) return empty_fixed_array();
4052
4053 DCHECK(!InNewSpace(filler));
4054 HeapObject* result;
4055 {
4056 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4057 if (!allocation.To(&result)) return allocation;
4058 }
4059
4060 result->set_map_no_write_barrier(fixed_array_map());
4061 FixedArray* array = FixedArray::cast(result);
4062 array->set_length(length);
4063 MemsetPointer(array->data_start(), filler, length);
4064 return array;
4065 }
4066
4067
AllocateFixedArray(int length,PretenureFlag pretenure)4068 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4069 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4070 }
4071
4072
AllocateUninitializedFixedArray(int length)4073 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4074 if (length == 0) return empty_fixed_array();
4075
4076 HeapObject* obj;
4077 {
4078 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4079 if (!allocation.To(&obj)) return allocation;
4080 }
4081
4082 obj->set_map_no_write_barrier(fixed_array_map());
4083 FixedArray::cast(obj)->set_length(length);
4084 return obj;
4085 }
4086
4087
AllocateUninitializedFixedDoubleArray(int length,PretenureFlag pretenure)4088 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4089 int length, PretenureFlag pretenure) {
4090 if (length == 0) return empty_fixed_array();
4091
4092 HeapObject* elements;
4093 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4094 if (!allocation.To(&elements)) return allocation;
4095
4096 elements->set_map_no_write_barrier(fixed_double_array_map());
4097 FixedDoubleArray::cast(elements)->set_length(length);
4098 return elements;
4099 }
4100
4101
AllocateRawFixedDoubleArray(int length,PretenureFlag pretenure)4102 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4103 PretenureFlag pretenure) {
4104 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4105 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4106 }
4107 int size = FixedDoubleArray::SizeFor(length);
4108 #ifndef V8_HOST_ARCH_64_BIT
4109 size += kPointerSize;
4110 #endif
4111 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4112
4113 HeapObject* object;
4114 {
4115 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4116 if (!allocation.To(&object)) return allocation;
4117 }
4118
4119 return EnsureDoubleAligned(this, object, size);
4120 }
4121
4122
AllocateConstantPoolArray(const ConstantPoolArray::NumberOfEntries & small)4123 AllocationResult Heap::AllocateConstantPoolArray(
4124 const ConstantPoolArray::NumberOfEntries& small) {
4125 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4126 int size = ConstantPoolArray::SizeFor(small);
4127 #ifndef V8_HOST_ARCH_64_BIT
4128 size += kPointerSize;
4129 #endif
4130 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4131
4132 HeapObject* object;
4133 {
4134 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4135 if (!allocation.To(&object)) return allocation;
4136 }
4137 object = EnsureDoubleAligned(this, object, size);
4138 object->set_map_no_write_barrier(constant_pool_array_map());
4139
4140 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4141 constant_pool->Init(small);
4142 constant_pool->ClearPtrEntries(isolate());
4143 return constant_pool;
4144 }
4145
4146
AllocateExtendedConstantPoolArray(const ConstantPoolArray::NumberOfEntries & small,const ConstantPoolArray::NumberOfEntries & extended)4147 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4148 const ConstantPoolArray::NumberOfEntries& small,
4149 const ConstantPoolArray::NumberOfEntries& extended) {
4150 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4151 CHECK(extended.are_in_range(0, kMaxInt));
4152 int size = ConstantPoolArray::SizeForExtended(small, extended);
4153 #ifndef V8_HOST_ARCH_64_BIT
4154 size += kPointerSize;
4155 #endif
4156 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4157
4158 HeapObject* object;
4159 {
4160 AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4161 if (!allocation.To(&object)) return allocation;
4162 }
4163 object = EnsureDoubleAligned(this, object, size);
4164 object->set_map_no_write_barrier(constant_pool_array_map());
4165
4166 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4167 constant_pool->InitExtended(small, extended);
4168 constant_pool->ClearPtrEntries(isolate());
4169 return constant_pool;
4170 }
4171
4172
AllocateEmptyConstantPoolArray()4173 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4174 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4175 int size = ConstantPoolArray::SizeFor(small);
4176 HeapObject* result;
4177 {
4178 AllocationResult allocation =
4179 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4180 if (!allocation.To(&result)) return allocation;
4181 }
4182 result->set_map_no_write_barrier(constant_pool_array_map());
4183 ConstantPoolArray::cast(result)->Init(small);
4184 return result;
4185 }
4186
4187
AllocateSymbol()4188 AllocationResult Heap::AllocateSymbol() {
4189 // Statically ensure that it is safe to allocate symbols in paged spaces.
4190 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4191
4192 HeapObject* result;
4193 AllocationResult allocation =
4194 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4195 if (!allocation.To(&result)) return allocation;
4196
4197 result->set_map_no_write_barrier(symbol_map());
4198
4199 // Generate a random hash value.
4200 int hash;
4201 int attempts = 0;
4202 do {
4203 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4204 attempts++;
4205 } while (hash == 0 && attempts < 30);
4206 if (hash == 0) hash = 1; // never return 0
4207
4208 Symbol::cast(result)
4209 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4210 Symbol::cast(result)->set_name(undefined_value());
4211 Symbol::cast(result)->set_flags(Smi::FromInt(0));
4212
4213 DCHECK(!Symbol::cast(result)->is_private());
4214 return result;
4215 }
4216
4217
AllocateStruct(InstanceType type)4218 AllocationResult Heap::AllocateStruct(InstanceType type) {
4219 Map* map;
4220 switch (type) {
4221 #define MAKE_CASE(NAME, Name, name) \
4222 case NAME##_TYPE: \
4223 map = name##_map(); \
4224 break;
4225 STRUCT_LIST(MAKE_CASE)
4226 #undef MAKE_CASE
4227 default:
4228 UNREACHABLE();
4229 return exception();
4230 }
4231 int size = map->instance_size();
4232 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4233 Struct* result;
4234 {
4235 AllocationResult allocation = Allocate(map, space);
4236 if (!allocation.To(&result)) return allocation;
4237 }
4238 result->InitializeBody(size);
4239 return result;
4240 }
4241
4242
IsHeapIterable()4243 bool Heap::IsHeapIterable() {
4244 // TODO(hpayer): This function is not correct. Allocation folding in old
4245 // space breaks the iterability.
4246 return new_space_top_after_last_gc_ == new_space()->top();
4247 }
4248
4249
MakeHeapIterable()4250 void Heap::MakeHeapIterable() {
4251 DCHECK(AllowHeapAllocation::IsAllowed());
4252 if (!IsHeapIterable()) {
4253 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4254 }
4255 if (mark_compact_collector()->sweeping_in_progress()) {
4256 mark_compact_collector()->EnsureSweepingCompleted();
4257 }
4258 DCHECK(IsHeapIterable());
4259 }
4260
4261
IdleMarkCompact(const char * message)4262 void Heap::IdleMarkCompact(const char* message) {
4263 bool uncommit = false;
4264 if (gc_count_at_last_idle_gc_ == gc_count_) {
4265 // No GC since the last full GC, the mutator is probably not active.
4266 isolate_->compilation_cache()->Clear();
4267 uncommit = true;
4268 }
4269 CollectAllGarbage(kReduceMemoryFootprintMask, message);
4270 gc_idle_time_handler_.NotifyIdleMarkCompact();
4271 gc_count_at_last_idle_gc_ = gc_count_;
4272 if (uncommit) {
4273 new_space_.Shrink();
4274 UncommitFromSpace();
4275 }
4276 }
4277
4278
AdvanceIdleIncrementalMarking(intptr_t step_size)4279 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4280 incremental_marking()->Step(step_size,
4281 IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
4282
4283 if (incremental_marking()->IsComplete()) {
4284 IdleMarkCompact("idle notification: finalize incremental");
4285 }
4286 }
4287
4288
WorthActivatingIncrementalMarking()4289 bool Heap::WorthActivatingIncrementalMarking() {
4290 return incremental_marking()->IsStopped() &&
4291 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
4292 }
4293
4294
IdleNotification(int idle_time_in_ms)4295 bool Heap::IdleNotification(int idle_time_in_ms) {
4296 // If incremental marking is off, we do not perform idle notification.
4297 if (!FLAG_incremental_marking) return true;
4298 base::ElapsedTimer timer;
4299 timer.Start();
4300 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4301 idle_time_in_ms);
4302 HistogramTimerScope idle_notification_scope(
4303 isolate_->counters()->gc_idle_notification());
4304
4305 GCIdleTimeHandler::HeapState heap_state;
4306 heap_state.contexts_disposed = contexts_disposed_;
4307 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4308 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4309 // TODO(ulan): Start incremental marking only for large heaps.
4310 heap_state.can_start_incremental_marking =
4311 incremental_marking()->ShouldActivate();
4312 heap_state.sweeping_in_progress =
4313 mark_compact_collector()->sweeping_in_progress();
4314 heap_state.mark_compact_speed_in_bytes_per_ms =
4315 static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4316 heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4317 tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4318 heap_state.scavenge_speed_in_bytes_per_ms =
4319 static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4320 heap_state.available_new_space_memory = new_space_.Available();
4321 heap_state.new_space_capacity = new_space_.Capacity();
4322 heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4323 static_cast<size_t>(
4324 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4325
4326 GCIdleTimeAction action =
4327 gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4328
4329 bool result = false;
4330 switch (action.type) {
4331 case DONE:
4332 result = true;
4333 break;
4334 case DO_INCREMENTAL_MARKING:
4335 if (incremental_marking()->IsStopped()) {
4336 incremental_marking()->Start();
4337 }
4338 AdvanceIdleIncrementalMarking(action.parameter);
4339 break;
4340 case DO_FULL_GC: {
4341 HistogramTimerScope scope(isolate_->counters()->gc_context());
4342 if (contexts_disposed_) {
4343 CollectAllGarbage(kReduceMemoryFootprintMask,
4344 "idle notification: contexts disposed");
4345 gc_idle_time_handler_.NotifyIdleMarkCompact();
4346 gc_count_at_last_idle_gc_ = gc_count_;
4347 } else {
4348 IdleMarkCompact("idle notification: finalize idle round");
4349 }
4350 break;
4351 }
4352 case DO_SCAVENGE:
4353 CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4354 break;
4355 case DO_FINALIZE_SWEEPING:
4356 mark_compact_collector()->EnsureSweepingCompleted();
4357 break;
4358 case DO_NOTHING:
4359 break;
4360 }
4361
4362 int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
4363 if (actual_time_ms <= idle_time_in_ms) {
4364 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4365 idle_time_in_ms - actual_time_ms);
4366 } else {
4367 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4368 actual_time_ms - idle_time_in_ms);
4369 }
4370
4371 if (FLAG_trace_idle_notification) {
4372 PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
4373 idle_time_in_ms, actual_time_ms);
4374 action.Print();
4375 PrintF("]\n");
4376 }
4377
4378 contexts_disposed_ = 0;
4379 return result;
4380 }
4381
4382
4383 #ifdef DEBUG
4384
Print()4385 void Heap::Print() {
4386 if (!HasBeenSetUp()) return;
4387 isolate()->PrintStack(stdout);
4388 AllSpaces spaces(this);
4389 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4390 space->Print();
4391 }
4392 }
4393
4394
ReportCodeStatistics(const char * title)4395 void Heap::ReportCodeStatistics(const char* title) {
4396 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4397 PagedSpace::ResetCodeStatistics(isolate());
4398 // We do not look for code in new space, map space, or old space. If code
4399 // somehow ends up in those spaces, we would miss it here.
4400 code_space_->CollectCodeStatistics();
4401 lo_space_->CollectCodeStatistics();
4402 PagedSpace::ReportCodeStatistics(isolate());
4403 }
4404
4405
4406 // This function expects that NewSpace's allocated objects histogram is
4407 // populated (via a call to CollectStatistics or else as a side effect of a
4408 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)4409 void Heap::ReportHeapStatistics(const char* title) {
4410 USE(title);
4411 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4412 gc_count_);
4413 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4414 old_generation_allocation_limit_);
4415
4416 PrintF("\n");
4417 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4418 isolate_->global_handles()->PrintStats();
4419 PrintF("\n");
4420
4421 PrintF("Heap statistics : ");
4422 isolate_->memory_allocator()->ReportStatistics();
4423 PrintF("To space : ");
4424 new_space_.ReportStatistics();
4425 PrintF("Old pointer space : ");
4426 old_pointer_space_->ReportStatistics();
4427 PrintF("Old data space : ");
4428 old_data_space_->ReportStatistics();
4429 PrintF("Code space : ");
4430 code_space_->ReportStatistics();
4431 PrintF("Map space : ");
4432 map_space_->ReportStatistics();
4433 PrintF("Cell space : ");
4434 cell_space_->ReportStatistics();
4435 PrintF("PropertyCell space : ");
4436 property_cell_space_->ReportStatistics();
4437 PrintF("Large object space : ");
4438 lo_space_->ReportStatistics();
4439 PrintF(">>>>>> ========================================= >>>>>>\n");
4440 }
4441
4442 #endif // DEBUG
4443
Contains(HeapObject * value)4444 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4445
4446
Contains(Address addr)4447 bool Heap::Contains(Address addr) {
4448 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4449 return HasBeenSetUp() &&
4450 (new_space_.ToSpaceContains(addr) ||
4451 old_pointer_space_->Contains(addr) ||
4452 old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4453 map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4454 property_cell_space_->Contains(addr) ||
4455 lo_space_->SlowContains(addr));
4456 }
4457
4458
InSpace(HeapObject * value,AllocationSpace space)4459 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4460 return InSpace(value->address(), space);
4461 }
4462
4463
InSpace(Address addr,AllocationSpace space)4464 bool Heap::InSpace(Address addr, AllocationSpace space) {
4465 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4466 if (!HasBeenSetUp()) return false;
4467
4468 switch (space) {
4469 case NEW_SPACE:
4470 return new_space_.ToSpaceContains(addr);
4471 case OLD_POINTER_SPACE:
4472 return old_pointer_space_->Contains(addr);
4473 case OLD_DATA_SPACE:
4474 return old_data_space_->Contains(addr);
4475 case CODE_SPACE:
4476 return code_space_->Contains(addr);
4477 case MAP_SPACE:
4478 return map_space_->Contains(addr);
4479 case CELL_SPACE:
4480 return cell_space_->Contains(addr);
4481 case PROPERTY_CELL_SPACE:
4482 return property_cell_space_->Contains(addr);
4483 case LO_SPACE:
4484 return lo_space_->SlowContains(addr);
4485 case INVALID_SPACE:
4486 break;
4487 }
4488 UNREACHABLE();
4489 return false;
4490 }
4491
4492
4493 #ifdef VERIFY_HEAP
Verify()4494 void Heap::Verify() {
4495 CHECK(HasBeenSetUp());
4496 HandleScope scope(isolate());
4497
4498 store_buffer()->Verify();
4499
4500 if (mark_compact_collector()->sweeping_in_progress()) {
4501 // We have to wait here for the sweeper threads to have an iterable heap.
4502 mark_compact_collector()->EnsureSweepingCompleted();
4503 }
4504
4505 VerifyPointersVisitor visitor;
4506 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4507
4508 VerifySmisVisitor smis_visitor;
4509 IterateSmiRoots(&smis_visitor);
4510
4511 new_space_.Verify();
4512
4513 old_pointer_space_->Verify(&visitor);
4514 map_space_->Verify(&visitor);
4515
4516 VerifyPointersVisitor no_dirty_regions_visitor;
4517 old_data_space_->Verify(&no_dirty_regions_visitor);
4518 code_space_->Verify(&no_dirty_regions_visitor);
4519 cell_space_->Verify(&no_dirty_regions_visitor);
4520 property_cell_space_->Verify(&no_dirty_regions_visitor);
4521
4522 lo_space_->Verify();
4523 }
4524 #endif
4525
4526
ZapFromSpace()4527 void Heap::ZapFromSpace() {
4528 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4529 new_space_.FromSpaceEnd());
4530 while (it.has_next()) {
4531 NewSpacePage* page = it.next();
4532 for (Address cursor = page->area_start(), limit = page->area_end();
4533 cursor < limit; cursor += kPointerSize) {
4534 Memory::Address_at(cursor) = kFromSpaceZapValue;
4535 }
4536 }
4537 }
4538
4539
IterateAndMarkPointersToFromSpace(Address start,Address end,ObjectSlotCallback callback)4540 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4541 ObjectSlotCallback callback) {
4542 Address slot_address = start;
4543
4544 // We are not collecting slots on new space objects during mutation
4545 // thus we have to scan for pointers to evacuation candidates when we
4546 // promote objects. But we should not record any slots in non-black
4547 // objects. Grey object's slots would be rescanned.
4548 // White object might not survive until the end of collection
4549 // it would be a violation of the invariant to record it's slots.
4550 bool record_slots = false;
4551 if (incremental_marking()->IsCompacting()) {
4552 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4553 record_slots = Marking::IsBlack(mark_bit);
4554 }
4555
4556 while (slot_address < end) {
4557 Object** slot = reinterpret_cast<Object**>(slot_address);
4558 Object* object = *slot;
4559 // If the store buffer becomes overfull we mark pages as being exempt from
4560 // the store buffer. These pages are scanned to find pointers that point
4561 // to the new space. In that case we may hit newly promoted objects and
4562 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4563 if (object->IsHeapObject()) {
4564 if (Heap::InFromSpace(object)) {
4565 callback(reinterpret_cast<HeapObject**>(slot),
4566 HeapObject::cast(object));
4567 Object* new_object = *slot;
4568 if (InNewSpace(new_object)) {
4569 SLOW_DCHECK(Heap::InToSpace(new_object));
4570 SLOW_DCHECK(new_object->IsHeapObject());
4571 store_buffer_.EnterDirectlyIntoStoreBuffer(
4572 reinterpret_cast<Address>(slot));
4573 }
4574 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4575 } else if (record_slots &&
4576 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4577 mark_compact_collector()->RecordSlot(slot, slot, object);
4578 }
4579 }
4580 slot_address += kPointerSize;
4581 }
4582 }
4583
4584
4585 #ifdef DEBUG
4586 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4587
4588
IsAMapPointerAddress(Object ** addr)4589 bool IsAMapPointerAddress(Object** addr) {
4590 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4591 int mod = a % Map::kSize;
4592 return mod >= Map::kPointerFieldsBeginOffset &&
4593 mod < Map::kPointerFieldsEndOffset;
4594 }
4595
4596
EverythingsAPointer(Object ** addr)4597 bool EverythingsAPointer(Object** addr) { return true; }
4598
4599
CheckStoreBuffer(Heap * heap,Object ** current,Object ** limit,Object **** store_buffer_position,Object *** store_buffer_top,CheckStoreBufferFilter filter,Address special_garbage_start,Address special_garbage_end)4600 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4601 Object**** store_buffer_position,
4602 Object*** store_buffer_top,
4603 CheckStoreBufferFilter filter,
4604 Address special_garbage_start,
4605 Address special_garbage_end) {
4606 Map* free_space_map = heap->free_space_map();
4607 for (; current < limit; current++) {
4608 Object* o = *current;
4609 Address current_address = reinterpret_cast<Address>(current);
4610 // Skip free space.
4611 if (o == free_space_map) {
4612 Address current_address = reinterpret_cast<Address>(current);
4613 FreeSpace* free_space =
4614 FreeSpace::cast(HeapObject::FromAddress(current_address));
4615 int skip = free_space->Size();
4616 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4617 DCHECK(skip > 0);
4618 current_address += skip - kPointerSize;
4619 current = reinterpret_cast<Object**>(current_address);
4620 continue;
4621 }
4622 // Skip the current linear allocation space between top and limit which is
4623 // unmarked with the free space map, but can contain junk.
4624 if (current_address == special_garbage_start &&
4625 special_garbage_end != special_garbage_start) {
4626 current_address = special_garbage_end - kPointerSize;
4627 current = reinterpret_cast<Object**>(current_address);
4628 continue;
4629 }
4630 if (!(*filter)(current)) continue;
4631 DCHECK(current_address < special_garbage_start ||
4632 current_address >= special_garbage_end);
4633 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4634 // We have to check that the pointer does not point into new space
4635 // without trying to cast it to a heap object since the hash field of
4636 // a string can contain values like 1 and 3 which are tagged null
4637 // pointers.
4638 if (!heap->InNewSpace(o)) continue;
4639 while (**store_buffer_position < current &&
4640 *store_buffer_position < store_buffer_top) {
4641 (*store_buffer_position)++;
4642 }
4643 if (**store_buffer_position != current ||
4644 *store_buffer_position == store_buffer_top) {
4645 Object** obj_start = current;
4646 while (!(*obj_start)->IsMap()) obj_start--;
4647 UNREACHABLE();
4648 }
4649 }
4650 }
4651
4652
4653 // Check that the store buffer contains all intergenerational pointers by
4654 // scanning a page and ensuring that all pointers to young space are in the
4655 // store buffer.
OldPointerSpaceCheckStoreBuffer()4656 void Heap::OldPointerSpaceCheckStoreBuffer() {
4657 OldSpace* space = old_pointer_space();
4658 PageIterator pages(space);
4659
4660 store_buffer()->SortUniq();
4661
4662 while (pages.has_next()) {
4663 Page* page = pages.next();
4664 Object** current = reinterpret_cast<Object**>(page->area_start());
4665
4666 Address end = page->area_end();
4667
4668 Object*** store_buffer_position = store_buffer()->Start();
4669 Object*** store_buffer_top = store_buffer()->Top();
4670
4671 Object** limit = reinterpret_cast<Object**>(end);
4672 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4673 store_buffer_top, &EverythingsAPointer, space->top(),
4674 space->limit());
4675 }
4676 }
4677
4678
MapSpaceCheckStoreBuffer()4679 void Heap::MapSpaceCheckStoreBuffer() {
4680 MapSpace* space = map_space();
4681 PageIterator pages(space);
4682
4683 store_buffer()->SortUniq();
4684
4685 while (pages.has_next()) {
4686 Page* page = pages.next();
4687 Object** current = reinterpret_cast<Object**>(page->area_start());
4688
4689 Address end = page->area_end();
4690
4691 Object*** store_buffer_position = store_buffer()->Start();
4692 Object*** store_buffer_top = store_buffer()->Top();
4693
4694 Object** limit = reinterpret_cast<Object**>(end);
4695 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4696 store_buffer_top, &IsAMapPointerAddress, space->top(),
4697 space->limit());
4698 }
4699 }
4700
4701
LargeObjectSpaceCheckStoreBuffer()4702 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4703 LargeObjectIterator it(lo_space());
4704 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4705 // We only have code, sequential strings, or fixed arrays in large
4706 // object space, and only fixed arrays can possibly contain pointers to
4707 // the young generation.
4708 if (object->IsFixedArray()) {
4709 Object*** store_buffer_position = store_buffer()->Start();
4710 Object*** store_buffer_top = store_buffer()->Top();
4711 Object** current = reinterpret_cast<Object**>(object->address());
4712 Object** limit =
4713 reinterpret_cast<Object**>(object->address() + object->Size());
4714 CheckStoreBuffer(this, current, limit, &store_buffer_position,
4715 store_buffer_top, &EverythingsAPointer, NULL, NULL);
4716 }
4717 }
4718 }
4719 #endif
4720
4721
IterateRoots(ObjectVisitor * v,VisitMode mode)4722 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4723 IterateStrongRoots(v, mode);
4724 IterateWeakRoots(v, mode);
4725 }
4726
4727
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)4728 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4729 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4730 v->Synchronize(VisitorSynchronization::kStringTable);
4731 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4732 // Scavenge collections have special processing for this.
4733 external_string_table_.Iterate(v);
4734 }
4735 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4736 }
4737
4738
IterateSmiRoots(ObjectVisitor * v)4739 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4740 // Acquire execution access since we are going to read stack limit values.
4741 ExecutionAccess access(isolate());
4742 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4743 v->Synchronize(VisitorSynchronization::kSmiRootList);
4744 }
4745
4746
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)4747 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4748 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4749 v->Synchronize(VisitorSynchronization::kStrongRootList);
4750
4751 v->VisitPointer(bit_cast<Object**>(&hidden_string_));
4752 v->Synchronize(VisitorSynchronization::kInternalizedString);
4753
4754 isolate_->bootstrapper()->Iterate(v);
4755 v->Synchronize(VisitorSynchronization::kBootstrapper);
4756 isolate_->Iterate(v);
4757 v->Synchronize(VisitorSynchronization::kTop);
4758 Relocatable::Iterate(isolate_, v);
4759 v->Synchronize(VisitorSynchronization::kRelocatable);
4760
4761 if (isolate_->deoptimizer_data() != NULL) {
4762 isolate_->deoptimizer_data()->Iterate(v);
4763 }
4764 v->Synchronize(VisitorSynchronization::kDebug);
4765 isolate_->compilation_cache()->Iterate(v);
4766 v->Synchronize(VisitorSynchronization::kCompilationCache);
4767
4768 // Iterate over local handles in handle scopes.
4769 isolate_->handle_scope_implementer()->Iterate(v);
4770 isolate_->IterateDeferredHandles(v);
4771 v->Synchronize(VisitorSynchronization::kHandleScope);
4772
4773 // Iterate over the builtin code objects and code stubs in the
4774 // heap. Note that it is not necessary to iterate over code objects
4775 // on scavenge collections.
4776 if (mode != VISIT_ALL_IN_SCAVENGE) {
4777 isolate_->builtins()->IterateBuiltins(v);
4778 }
4779 v->Synchronize(VisitorSynchronization::kBuiltins);
4780
4781 // Iterate over global handles.
4782 switch (mode) {
4783 case VISIT_ONLY_STRONG:
4784 isolate_->global_handles()->IterateStrongRoots(v);
4785 break;
4786 case VISIT_ALL_IN_SCAVENGE:
4787 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4788 break;
4789 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4790 case VISIT_ALL:
4791 isolate_->global_handles()->IterateAllRoots(v);
4792 break;
4793 }
4794 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4795
4796 // Iterate over eternal handles.
4797 if (mode == VISIT_ALL_IN_SCAVENGE) {
4798 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4799 } else {
4800 isolate_->eternal_handles()->IterateAllRoots(v);
4801 }
4802 v->Synchronize(VisitorSynchronization::kEternalHandles);
4803
4804 // Iterate over pointers being held by inactive threads.
4805 isolate_->thread_manager()->Iterate(v);
4806 v->Synchronize(VisitorSynchronization::kThreadManager);
4807
4808 // Iterate over the pointers the Serialization/Deserialization code is
4809 // holding.
4810 // During garbage collection this keeps the partial snapshot cache alive.
4811 // During deserialization of the startup snapshot this creates the partial
4812 // snapshot cache and deserializes the objects it refers to. During
4813 // serialization this does nothing, since the partial snapshot cache is
4814 // empty. However the next thing we do is create the partial snapshot,
4815 // filling up the partial snapshot cache with objects it needs as we go.
4816 SerializerDeserializer::Iterate(isolate_, v);
4817 // We don't do a v->Synchronize call here, because in debug mode that will
4818 // output a flag to the snapshot. However at this point the serializer and
4819 // deserializer are deliberately a little unsynchronized (see above) so the
4820 // checking of the sync flag in the snapshot would fail.
4821 }
4822
4823
4824 // TODO(1236194): Since the heap size is configurable on the command line
4825 // and through the API, we should gracefully handle the case that the heap
4826 // size is not big enough to fit all the initial objects.
ConfigureHeap(int max_semi_space_size,int max_old_space_size,int max_executable_size,size_t code_range_size)4827 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4828 int max_executable_size, size_t code_range_size) {
4829 if (HasBeenSetUp()) return false;
4830
4831 // Overwrite default configuration.
4832 if (max_semi_space_size > 0) {
4833 max_semi_space_size_ = max_semi_space_size * MB;
4834 }
4835 if (max_old_space_size > 0) {
4836 max_old_generation_size_ = max_old_space_size * MB;
4837 }
4838 if (max_executable_size > 0) {
4839 max_executable_size_ = max_executable_size * MB;
4840 }
4841
4842 // If max space size flags are specified overwrite the configuration.
4843 if (FLAG_max_semi_space_size > 0) {
4844 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4845 }
4846 if (FLAG_max_old_space_size > 0) {
4847 max_old_generation_size_ = FLAG_max_old_space_size * MB;
4848 }
4849 if (FLAG_max_executable_size > 0) {
4850 max_executable_size_ = FLAG_max_executable_size * MB;
4851 }
4852
4853 if (FLAG_stress_compaction) {
4854 // This will cause more frequent GCs when stressing.
4855 max_semi_space_size_ = Page::kPageSize;
4856 }
4857
4858 if (Snapshot::HaveASnapshotToStartFrom()) {
4859 // If we are using a snapshot we always reserve the default amount
4860 // of memory for each semispace because code in the snapshot has
4861 // write-barrier code that relies on the size and alignment of new
4862 // space. We therefore cannot use a larger max semispace size
4863 // than the default reserved semispace size.
4864 if (max_semi_space_size_ > reserved_semispace_size_) {
4865 max_semi_space_size_ = reserved_semispace_size_;
4866 if (FLAG_trace_gc) {
4867 PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4868 reserved_semispace_size_ >> 10);
4869 }
4870 }
4871 } else {
4872 // If we are not using snapshots we reserve space for the actual
4873 // max semispace size.
4874 reserved_semispace_size_ = max_semi_space_size_;
4875 }
4876
4877 // The max executable size must be less than or equal to the max old
4878 // generation size.
4879 if (max_executable_size_ > max_old_generation_size_) {
4880 max_executable_size_ = max_old_generation_size_;
4881 }
4882
4883 // The new space size must be a power of two to support single-bit testing
4884 // for containment.
4885 max_semi_space_size_ =
4886 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
4887 reserved_semispace_size_ =
4888 base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
4889
4890 if (FLAG_min_semi_space_size > 0) {
4891 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4892 if (initial_semispace_size > max_semi_space_size_) {
4893 initial_semispace_size_ = max_semi_space_size_;
4894 if (FLAG_trace_gc) {
4895 PrintPID(
4896 "Min semi-space size cannot be more than the maximum"
4897 "semi-space size of %d MB\n",
4898 max_semi_space_size_);
4899 }
4900 } else {
4901 initial_semispace_size_ = initial_semispace_size;
4902 }
4903 }
4904
4905 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4906
4907 // The old generation is paged and needs at least one page for each space.
4908 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4909 max_old_generation_size_ =
4910 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4911 max_old_generation_size_);
4912
4913 // We rely on being able to allocate new arrays in paged spaces.
4914 DCHECK(Page::kMaxRegularHeapObjectSize >=
4915 (JSArray::kSize +
4916 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4917 AllocationMemento::kSize));
4918
4919 code_range_size_ = code_range_size * MB;
4920
4921 configured_ = true;
4922 return true;
4923 }
4924
4925
ConfigureHeapDefault()4926 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4927
4928
RecordStats(HeapStats * stats,bool take_snapshot)4929 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4930 *stats->start_marker = HeapStats::kStartMarker;
4931 *stats->end_marker = HeapStats::kEndMarker;
4932 *stats->new_space_size = new_space_.SizeAsInt();
4933 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4934 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4935 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4936 *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4937 *stats->old_data_space_capacity = old_data_space_->Capacity();
4938 *stats->code_space_size = code_space_->SizeOfObjects();
4939 *stats->code_space_capacity = code_space_->Capacity();
4940 *stats->map_space_size = map_space_->SizeOfObjects();
4941 *stats->map_space_capacity = map_space_->Capacity();
4942 *stats->cell_space_size = cell_space_->SizeOfObjects();
4943 *stats->cell_space_capacity = cell_space_->Capacity();
4944 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4945 *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4946 *stats->lo_space_size = lo_space_->Size();
4947 isolate_->global_handles()->RecordStats(stats);
4948 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4949 *stats->memory_allocator_capacity =
4950 isolate()->memory_allocator()->Size() +
4951 isolate()->memory_allocator()->Available();
4952 *stats->os_error = base::OS::GetLastError();
4953 isolate()->memory_allocator()->Available();
4954 if (take_snapshot) {
4955 HeapIterator iterator(this);
4956 for (HeapObject* obj = iterator.next(); obj != NULL;
4957 obj = iterator.next()) {
4958 InstanceType type = obj->map()->instance_type();
4959 DCHECK(0 <= type && type <= LAST_TYPE);
4960 stats->objects_per_type[type]++;
4961 stats->size_per_type[type] += obj->Size();
4962 }
4963 }
4964 }
4965
4966
PromotedSpaceSizeOfObjects()4967 intptr_t Heap::PromotedSpaceSizeOfObjects() {
4968 return old_pointer_space_->SizeOfObjects() +
4969 old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
4970 map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
4971 property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
4972 }
4973
4974
PromotedExternalMemorySize()4975 int64_t Heap::PromotedExternalMemorySize() {
4976 if (amount_of_external_allocated_memory_ <=
4977 amount_of_external_allocated_memory_at_last_global_gc_)
4978 return 0;
4979 return amount_of_external_allocated_memory_ -
4980 amount_of_external_allocated_memory_at_last_global_gc_;
4981 }
4982
4983
OldGenerationAllocationLimit(intptr_t old_gen_size,int freed_global_handles)4984 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
4985 int freed_global_handles) {
4986 const int kMaxHandles = 1000;
4987 const int kMinHandles = 100;
4988 double min_factor = 1.1;
4989 double max_factor = 4;
4990 // We set the old generation growing factor to 2 to grow the heap slower on
4991 // memory-constrained devices.
4992 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
4993 max_factor = 2;
4994 }
4995 // If there are many freed global handles, then the next full GC will
4996 // likely collect a lot of garbage. Choose the heap growing factor
4997 // depending on freed global handles.
4998 // TODO(ulan, hpayer): Take into account mutator utilization.
4999 double factor;
5000 if (freed_global_handles <= kMinHandles) {
5001 factor = max_factor;
5002 } else if (freed_global_handles >= kMaxHandles) {
5003 factor = min_factor;
5004 } else {
5005 // Compute factor using linear interpolation between points
5006 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5007 factor = max_factor -
5008 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5009 (kMaxHandles - kMinHandles);
5010 }
5011
5012 if (FLAG_stress_compaction ||
5013 mark_compact_collector()->reduce_memory_footprint_) {
5014 factor = min_factor;
5015 }
5016
5017 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5018 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5019 limit += new_space_.Capacity();
5020 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5021 return Min(limit, halfway_to_the_max);
5022 }
5023
5024
EnableInlineAllocation()5025 void Heap::EnableInlineAllocation() {
5026 if (!inline_allocation_disabled_) return;
5027 inline_allocation_disabled_ = false;
5028
5029 // Update inline allocation limit for new space.
5030 new_space()->UpdateInlineAllocationLimit(0);
5031 }
5032
5033
DisableInlineAllocation()5034 void Heap::DisableInlineAllocation() {
5035 if (inline_allocation_disabled_) return;
5036 inline_allocation_disabled_ = true;
5037
5038 // Update inline allocation limit for new space.
5039 new_space()->UpdateInlineAllocationLimit(0);
5040
5041 // Update inline allocation limit for old spaces.
5042 PagedSpaces spaces(this);
5043 for (PagedSpace* space = spaces.next(); space != NULL;
5044 space = spaces.next()) {
5045 space->EmptyAllocationInfo();
5046 }
5047 }
5048
5049
5050 V8_DECLARE_ONCE(initialize_gc_once);
5051
InitializeGCOnce()5052 static void InitializeGCOnce() {
5053 InitializeScavengingVisitorsTables();
5054 NewSpaceScavenger::Initialize();
5055 MarkCompactCollector::Initialize();
5056 }
5057
5058
SetUp()5059 bool Heap::SetUp() {
5060 #ifdef DEBUG
5061 allocation_timeout_ = FLAG_gc_interval;
5062 #endif
5063
5064 // Initialize heap spaces and initial maps and objects. Whenever something
5065 // goes wrong, just return false. The caller should check the results and
5066 // call Heap::TearDown() to release allocated memory.
5067 //
5068 // If the heap is not yet configured (e.g. through the API), configure it.
5069 // Configuration is based on the flags new-space-size (really the semispace
5070 // size) and old-space-size if set or the initial values of semispace_size_
5071 // and old_generation_size_ otherwise.
5072 if (!configured_) {
5073 if (!ConfigureHeapDefault()) return false;
5074 }
5075
5076 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5077
5078 MarkMapPointersAsEncoded(false);
5079
5080 // Set up memory allocator.
5081 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5082 return false;
5083
5084 // Set up new space.
5085 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5086 return false;
5087 }
5088 new_space_top_after_last_gc_ = new_space()->top();
5089
5090 // Initialize old pointer space.
5091 old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5092 OLD_POINTER_SPACE, NOT_EXECUTABLE);
5093 if (old_pointer_space_ == NULL) return false;
5094 if (!old_pointer_space_->SetUp()) return false;
5095
5096 // Initialize old data space.
5097 old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5098 NOT_EXECUTABLE);
5099 if (old_data_space_ == NULL) return false;
5100 if (!old_data_space_->SetUp()) return false;
5101
5102 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5103
5104 // Initialize the code space, set its maximum capacity to the old
5105 // generation size. It needs executable memory.
5106 code_space_ =
5107 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5108 if (code_space_ == NULL) return false;
5109 if (!code_space_->SetUp()) return false;
5110
5111 // Initialize map space.
5112 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5113 if (map_space_ == NULL) return false;
5114 if (!map_space_->SetUp()) return false;
5115
5116 // Initialize simple cell space.
5117 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5118 if (cell_space_ == NULL) return false;
5119 if (!cell_space_->SetUp()) return false;
5120
5121 // Initialize global property cell space.
5122 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5123 PROPERTY_CELL_SPACE);
5124 if (property_cell_space_ == NULL) return false;
5125 if (!property_cell_space_->SetUp()) return false;
5126
5127 // The large object code space may contain code or data. We set the memory
5128 // to be non-executable here for safety, but this means we need to enable it
5129 // explicitly when allocating large code objects.
5130 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5131 if (lo_space_ == NULL) return false;
5132 if (!lo_space_->SetUp()) return false;
5133
5134 // Set up the seed that is used to randomize the string hash function.
5135 DCHECK(hash_seed() == 0);
5136 if (FLAG_randomize_hashes) {
5137 if (FLAG_hash_seed == 0) {
5138 int rnd = isolate()->random_number_generator()->NextInt();
5139 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5140 } else {
5141 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5142 }
5143 }
5144
5145 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5146 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5147
5148 store_buffer()->SetUp();
5149
5150 mark_compact_collector()->SetUp();
5151
5152 return true;
5153 }
5154
5155
CreateHeapObjects()5156 bool Heap::CreateHeapObjects() {
5157 // Create initial maps.
5158 if (!CreateInitialMaps()) return false;
5159 CreateApiObjects();
5160
5161 // Create initial objects
5162 CreateInitialObjects();
5163 CHECK_EQ(0, gc_count_);
5164
5165 set_native_contexts_list(undefined_value());
5166 set_array_buffers_list(undefined_value());
5167 set_allocation_sites_list(undefined_value());
5168 weak_object_to_code_table_ = undefined_value();
5169 return true;
5170 }
5171
5172
SetStackLimits()5173 void Heap::SetStackLimits() {
5174 DCHECK(isolate_ != NULL);
5175 DCHECK(isolate_ == isolate());
5176 // On 64 bit machines, pointers are generally out of range of Smis. We write
5177 // something that looks like an out of range Smi to the GC.
5178
5179 // Set up the special root array entries containing the stack limits.
5180 // These are actually addresses, but the tag makes the GC ignore it.
5181 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5182 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5183 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5184 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5185 }
5186
5187
TearDown()5188 void Heap::TearDown() {
5189 #ifdef VERIFY_HEAP
5190 if (FLAG_verify_heap) {
5191 Verify();
5192 }
5193 #endif
5194
5195 UpdateMaximumCommitted();
5196
5197 if (FLAG_print_cumulative_gc_stat) {
5198 PrintF("\n");
5199 PrintF("gc_count=%d ", gc_count_);
5200 PrintF("mark_sweep_count=%d ", ms_count_);
5201 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5202 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5203 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5204 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5205 PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5206 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5207 PrintF("\n\n");
5208 }
5209
5210 if (FLAG_print_max_heap_committed) {
5211 PrintF("\n");
5212 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5213 MaximumCommittedMemory());
5214 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5215 new_space_.MaximumCommittedMemory());
5216 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5217 old_data_space_->MaximumCommittedMemory());
5218 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5219 old_pointer_space_->MaximumCommittedMemory());
5220 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5221 old_pointer_space_->MaximumCommittedMemory());
5222 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5223 code_space_->MaximumCommittedMemory());
5224 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5225 map_space_->MaximumCommittedMemory());
5226 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5227 cell_space_->MaximumCommittedMemory());
5228 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5229 property_cell_space_->MaximumCommittedMemory());
5230 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5231 lo_space_->MaximumCommittedMemory());
5232 PrintF("\n\n");
5233 }
5234
5235 if (FLAG_verify_predictable) {
5236 PrintAlloctionsHash();
5237 }
5238
5239 TearDownArrayBuffers();
5240
5241 isolate_->global_handles()->TearDown();
5242
5243 external_string_table_.TearDown();
5244
5245 mark_compact_collector()->TearDown();
5246
5247 new_space_.TearDown();
5248
5249 if (old_pointer_space_ != NULL) {
5250 old_pointer_space_->TearDown();
5251 delete old_pointer_space_;
5252 old_pointer_space_ = NULL;
5253 }
5254
5255 if (old_data_space_ != NULL) {
5256 old_data_space_->TearDown();
5257 delete old_data_space_;
5258 old_data_space_ = NULL;
5259 }
5260
5261 if (code_space_ != NULL) {
5262 code_space_->TearDown();
5263 delete code_space_;
5264 code_space_ = NULL;
5265 }
5266
5267 if (map_space_ != NULL) {
5268 map_space_->TearDown();
5269 delete map_space_;
5270 map_space_ = NULL;
5271 }
5272
5273 if (cell_space_ != NULL) {
5274 cell_space_->TearDown();
5275 delete cell_space_;
5276 cell_space_ = NULL;
5277 }
5278
5279 if (property_cell_space_ != NULL) {
5280 property_cell_space_->TearDown();
5281 delete property_cell_space_;
5282 property_cell_space_ = NULL;
5283 }
5284
5285 if (lo_space_ != NULL) {
5286 lo_space_->TearDown();
5287 delete lo_space_;
5288 lo_space_ = NULL;
5289 }
5290
5291 store_buffer()->TearDown();
5292 incremental_marking()->TearDown();
5293
5294 isolate_->memory_allocator()->TearDown();
5295 }
5296
5297
AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,GCType gc_type,bool pass_isolate)5298 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5299 GCType gc_type, bool pass_isolate) {
5300 DCHECK(callback != NULL);
5301 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5302 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5303 return gc_prologue_callbacks_.Add(pair);
5304 }
5305
5306
RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback)5307 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5308 DCHECK(callback != NULL);
5309 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5310 if (gc_prologue_callbacks_[i].callback == callback) {
5311 gc_prologue_callbacks_.Remove(i);
5312 return;
5313 }
5314 }
5315 UNREACHABLE();
5316 }
5317
5318
AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,GCType gc_type,bool pass_isolate)5319 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5320 GCType gc_type, bool pass_isolate) {
5321 DCHECK(callback != NULL);
5322 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5323 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5324 return gc_epilogue_callbacks_.Add(pair);
5325 }
5326
5327
RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback)5328 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5329 DCHECK(callback != NULL);
5330 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5331 if (gc_epilogue_callbacks_[i].callback == callback) {
5332 gc_epilogue_callbacks_.Remove(i);
5333 return;
5334 }
5335 }
5336 UNREACHABLE();
5337 }
5338
5339
5340 // TODO(ishell): Find a better place for this.
AddWeakObjectToCodeDependency(Handle<Object> obj,Handle<DependentCode> dep)5341 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5342 Handle<DependentCode> dep) {
5343 DCHECK(!InNewSpace(*obj));
5344 DCHECK(!InNewSpace(*dep));
5345 // This handle scope keeps the table handle local to this function, which
5346 // allows us to safely skip write barriers in table update operations.
5347 HandleScope scope(isolate());
5348 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5349 isolate());
5350 table = WeakHashTable::Put(table, obj, dep);
5351
5352 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5353 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5354 }
5355 set_weak_object_to_code_table(*table);
5356 DCHECK_EQ(*dep, table->Lookup(obj));
5357 }
5358
5359
LookupWeakObjectToCodeDependency(Handle<Object> obj)5360 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5361 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5362 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5363 return DependentCode::cast(empty_fixed_array());
5364 }
5365
5366
EnsureWeakObjectToCodeTable()5367 void Heap::EnsureWeakObjectToCodeTable() {
5368 if (!weak_object_to_code_table()->IsHashTable()) {
5369 set_weak_object_to_code_table(
5370 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
5371 TENURED));
5372 }
5373 }
5374
5375
FatalProcessOutOfMemory(const char * location,bool take_snapshot)5376 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5377 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5378 }
5379
5380 #ifdef DEBUG
5381
5382 class PrintHandleVisitor : public ObjectVisitor {
5383 public:
VisitPointers(Object ** start,Object ** end)5384 void VisitPointers(Object** start, Object** end) {
5385 for (Object** p = start; p < end; p++)
5386 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5387 reinterpret_cast<void*>(*p));
5388 }
5389 };
5390
5391
PrintHandles()5392 void Heap::PrintHandles() {
5393 PrintF("Handles:\n");
5394 PrintHandleVisitor v;
5395 isolate_->handle_scope_implementer()->Iterate(&v);
5396 }
5397
5398 #endif
5399
5400
next()5401 Space* AllSpaces::next() {
5402 switch (counter_++) {
5403 case NEW_SPACE:
5404 return heap_->new_space();
5405 case OLD_POINTER_SPACE:
5406 return heap_->old_pointer_space();
5407 case OLD_DATA_SPACE:
5408 return heap_->old_data_space();
5409 case CODE_SPACE:
5410 return heap_->code_space();
5411 case MAP_SPACE:
5412 return heap_->map_space();
5413 case CELL_SPACE:
5414 return heap_->cell_space();
5415 case PROPERTY_CELL_SPACE:
5416 return heap_->property_cell_space();
5417 case LO_SPACE:
5418 return heap_->lo_space();
5419 default:
5420 return NULL;
5421 }
5422 }
5423
5424
next()5425 PagedSpace* PagedSpaces::next() {
5426 switch (counter_++) {
5427 case OLD_POINTER_SPACE:
5428 return heap_->old_pointer_space();
5429 case OLD_DATA_SPACE:
5430 return heap_->old_data_space();
5431 case CODE_SPACE:
5432 return heap_->code_space();
5433 case MAP_SPACE:
5434 return heap_->map_space();
5435 case CELL_SPACE:
5436 return heap_->cell_space();
5437 case PROPERTY_CELL_SPACE:
5438 return heap_->property_cell_space();
5439 default:
5440 return NULL;
5441 }
5442 }
5443
5444
next()5445 OldSpace* OldSpaces::next() {
5446 switch (counter_++) {
5447 case OLD_POINTER_SPACE:
5448 return heap_->old_pointer_space();
5449 case OLD_DATA_SPACE:
5450 return heap_->old_data_space();
5451 case CODE_SPACE:
5452 return heap_->code_space();
5453 default:
5454 return NULL;
5455 }
5456 }
5457
5458
SpaceIterator(Heap * heap)5459 SpaceIterator::SpaceIterator(Heap* heap)
5460 : heap_(heap),
5461 current_space_(FIRST_SPACE),
5462 iterator_(NULL),
5463 size_func_(NULL) {}
5464
5465
SpaceIterator(Heap * heap,HeapObjectCallback size_func)5466 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5467 : heap_(heap),
5468 current_space_(FIRST_SPACE),
5469 iterator_(NULL),
5470 size_func_(size_func) {}
5471
5472
~SpaceIterator()5473 SpaceIterator::~SpaceIterator() {
5474 // Delete active iterator if any.
5475 delete iterator_;
5476 }
5477
5478
has_next()5479 bool SpaceIterator::has_next() {
5480 // Iterate until no more spaces.
5481 return current_space_ != LAST_SPACE;
5482 }
5483
5484
next()5485 ObjectIterator* SpaceIterator::next() {
5486 if (iterator_ != NULL) {
5487 delete iterator_;
5488 iterator_ = NULL;
5489 // Move to the next space
5490 current_space_++;
5491 if (current_space_ > LAST_SPACE) {
5492 return NULL;
5493 }
5494 }
5495
5496 // Return iterator for the new current space.
5497 return CreateIterator();
5498 }
5499
5500
5501 // Create an iterator for the space to iterate.
CreateIterator()5502 ObjectIterator* SpaceIterator::CreateIterator() {
5503 DCHECK(iterator_ == NULL);
5504
5505 switch (current_space_) {
5506 case NEW_SPACE:
5507 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5508 break;
5509 case OLD_POINTER_SPACE:
5510 iterator_ =
5511 new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5512 break;
5513 case OLD_DATA_SPACE:
5514 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5515 break;
5516 case CODE_SPACE:
5517 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5518 break;
5519 case MAP_SPACE:
5520 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5521 break;
5522 case CELL_SPACE:
5523 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5524 break;
5525 case PROPERTY_CELL_SPACE:
5526 iterator_ =
5527 new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5528 break;
5529 case LO_SPACE:
5530 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5531 break;
5532 }
5533
5534 // Return the newly allocated iterator;
5535 DCHECK(iterator_ != NULL);
5536 return iterator_;
5537 }
5538
5539
5540 class HeapObjectsFilter {
5541 public:
~HeapObjectsFilter()5542 virtual ~HeapObjectsFilter() {}
5543 virtual bool SkipObject(HeapObject* object) = 0;
5544 };
5545
5546
5547 class UnreachableObjectsFilter : public HeapObjectsFilter {
5548 public:
UnreachableObjectsFilter(Heap * heap)5549 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5550 MarkReachableObjects();
5551 }
5552
~UnreachableObjectsFilter()5553 ~UnreachableObjectsFilter() {
5554 heap_->mark_compact_collector()->ClearMarkbits();
5555 }
5556
SkipObject(HeapObject * object)5557 bool SkipObject(HeapObject* object) {
5558 MarkBit mark_bit = Marking::MarkBitFrom(object);
5559 return !mark_bit.Get();
5560 }
5561
5562 private:
5563 class MarkingVisitor : public ObjectVisitor {
5564 public:
MarkingVisitor()5565 MarkingVisitor() : marking_stack_(10) {}
5566
VisitPointers(Object ** start,Object ** end)5567 void VisitPointers(Object** start, Object** end) {
5568 for (Object** p = start; p < end; p++) {
5569 if (!(*p)->IsHeapObject()) continue;
5570 HeapObject* obj = HeapObject::cast(*p);
5571 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5572 if (!mark_bit.Get()) {
5573 mark_bit.Set();
5574 marking_stack_.Add(obj);
5575 }
5576 }
5577 }
5578
TransitiveClosure()5579 void TransitiveClosure() {
5580 while (!marking_stack_.is_empty()) {
5581 HeapObject* obj = marking_stack_.RemoveLast();
5582 obj->Iterate(this);
5583 }
5584 }
5585
5586 private:
5587 List<HeapObject*> marking_stack_;
5588 };
5589
MarkReachableObjects()5590 void MarkReachableObjects() {
5591 MarkingVisitor visitor;
5592 heap_->IterateRoots(&visitor, VISIT_ALL);
5593 visitor.TransitiveClosure();
5594 }
5595
5596 Heap* heap_;
5597 DisallowHeapAllocation no_allocation_;
5598 };
5599
5600
HeapIterator(Heap * heap)5601 HeapIterator::HeapIterator(Heap* heap)
5602 : make_heap_iterable_helper_(heap),
5603 no_heap_allocation_(),
5604 heap_(heap),
5605 filtering_(HeapIterator::kNoFiltering),
5606 filter_(NULL) {
5607 Init();
5608 }
5609
5610
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)5611 HeapIterator::HeapIterator(Heap* heap,
5612 HeapIterator::HeapObjectsFiltering filtering)
5613 : make_heap_iterable_helper_(heap),
5614 no_heap_allocation_(),
5615 heap_(heap),
5616 filtering_(filtering),
5617 filter_(NULL) {
5618 Init();
5619 }
5620
5621
~HeapIterator()5622 HeapIterator::~HeapIterator() { Shutdown(); }
5623
5624
Init()5625 void HeapIterator::Init() {
5626 // Start the iteration.
5627 space_iterator_ = new SpaceIterator(heap_);
5628 switch (filtering_) {
5629 case kFilterUnreachable:
5630 filter_ = new UnreachableObjectsFilter(heap_);
5631 break;
5632 default:
5633 break;
5634 }
5635 object_iterator_ = space_iterator_->next();
5636 }
5637
5638
Shutdown()5639 void HeapIterator::Shutdown() {
5640 #ifdef DEBUG
5641 // Assert that in filtering mode we have iterated through all
5642 // objects. Otherwise, heap will be left in an inconsistent state.
5643 if (filtering_ != kNoFiltering) {
5644 DCHECK(object_iterator_ == NULL);
5645 }
5646 #endif
5647 // Make sure the last iterator is deallocated.
5648 delete space_iterator_;
5649 space_iterator_ = NULL;
5650 object_iterator_ = NULL;
5651 delete filter_;
5652 filter_ = NULL;
5653 }
5654
5655
next()5656 HeapObject* HeapIterator::next() {
5657 if (filter_ == NULL) return NextObject();
5658
5659 HeapObject* obj = NextObject();
5660 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5661 return obj;
5662 }
5663
5664
NextObject()5665 HeapObject* HeapIterator::NextObject() {
5666 // No iterator means we are done.
5667 if (object_iterator_ == NULL) return NULL;
5668
5669 if (HeapObject* obj = object_iterator_->next_object()) {
5670 // If the current iterator has more objects we are fine.
5671 return obj;
5672 } else {
5673 // Go though the spaces looking for one that has objects.
5674 while (space_iterator_->has_next()) {
5675 object_iterator_ = space_iterator_->next();
5676 if (HeapObject* obj = object_iterator_->next_object()) {
5677 return obj;
5678 }
5679 }
5680 }
5681 // Done with the last space.
5682 object_iterator_ = NULL;
5683 return NULL;
5684 }
5685
5686
reset()5687 void HeapIterator::reset() {
5688 // Restart the iterator.
5689 Shutdown();
5690 Init();
5691 }
5692
5693
5694 #ifdef DEBUG
5695
5696 Object* const PathTracer::kAnyGlobalObject = NULL;
5697
5698 class PathTracer::MarkVisitor : public ObjectVisitor {
5699 public:
MarkVisitor(PathTracer * tracer)5700 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)5701 void VisitPointers(Object** start, Object** end) {
5702 // Scan all HeapObject pointers in [start, end)
5703 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5704 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5705 }
5706 }
5707
5708 private:
5709 PathTracer* tracer_;
5710 };
5711
5712
5713 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5714 public:
UnmarkVisitor(PathTracer * tracer)5715 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)5716 void VisitPointers(Object** start, Object** end) {
5717 // Scan all HeapObject pointers in [start, end)
5718 for (Object** p = start; p < end; p++) {
5719 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5720 }
5721 }
5722
5723 private:
5724 PathTracer* tracer_;
5725 };
5726
5727
VisitPointers(Object ** start,Object ** end)5728 void PathTracer::VisitPointers(Object** start, Object** end) {
5729 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5730 // Visit all HeapObject pointers in [start, end)
5731 for (Object** p = start; !done && (p < end); p++) {
5732 if ((*p)->IsHeapObject()) {
5733 TracePathFrom(p);
5734 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5735 }
5736 }
5737 }
5738
5739
Reset()5740 void PathTracer::Reset() {
5741 found_target_ = false;
5742 object_stack_.Clear();
5743 }
5744
5745
TracePathFrom(Object ** root)5746 void PathTracer::TracePathFrom(Object** root) {
5747 DCHECK((search_target_ == kAnyGlobalObject) ||
5748 search_target_->IsHeapObject());
5749 found_target_in_trace_ = false;
5750 Reset();
5751
5752 MarkVisitor mark_visitor(this);
5753 MarkRecursively(root, &mark_visitor);
5754
5755 UnmarkVisitor unmark_visitor(this);
5756 UnmarkRecursively(root, &unmark_visitor);
5757
5758 ProcessResults();
5759 }
5760
5761
SafeIsNativeContext(HeapObject * obj)5762 static bool SafeIsNativeContext(HeapObject* obj) {
5763 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5764 }
5765
5766
MarkRecursively(Object ** p,MarkVisitor * mark_visitor)5767 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5768 if (!(*p)->IsHeapObject()) return;
5769
5770 HeapObject* obj = HeapObject::cast(*p);
5771
5772 MapWord map_word = obj->map_word();
5773 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5774
5775 if (found_target_in_trace_) return; // stop if target found
5776 object_stack_.Add(obj);
5777 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5778 (obj == search_target_)) {
5779 found_target_in_trace_ = true;
5780 found_target_ = true;
5781 return;
5782 }
5783
5784 bool is_native_context = SafeIsNativeContext(obj);
5785
5786 // not visited yet
5787 Map* map = Map::cast(map_word.ToMap());
5788
5789 MapWord marked_map_word =
5790 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5791 obj->set_map_word(marked_map_word);
5792
5793 // Scan the object body.
5794 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5795 // This is specialized to scan Context's properly.
5796 Object** start =
5797 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5798 Object** end =
5799 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5800 Context::FIRST_WEAK_SLOT * kPointerSize);
5801 mark_visitor->VisitPointers(start, end);
5802 } else {
5803 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5804 }
5805
5806 // Scan the map after the body because the body is a lot more interesting
5807 // when doing leak detection.
5808 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5809
5810 if (!found_target_in_trace_) { // don't pop if found the target
5811 object_stack_.RemoveLast();
5812 }
5813 }
5814
5815
UnmarkRecursively(Object ** p,UnmarkVisitor * unmark_visitor)5816 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5817 if (!(*p)->IsHeapObject()) return;
5818
5819 HeapObject* obj = HeapObject::cast(*p);
5820
5821 MapWord map_word = obj->map_word();
5822 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5823
5824 MapWord unmarked_map_word =
5825 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5826 obj->set_map_word(unmarked_map_word);
5827
5828 Map* map = Map::cast(unmarked_map_word.ToMap());
5829
5830 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5831
5832 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5833 }
5834
5835
ProcessResults()5836 void PathTracer::ProcessResults() {
5837 if (found_target_) {
5838 OFStream os(stdout);
5839 os << "=====================================\n"
5840 << "==== Path to object ====\n"
5841 << "=====================================\n\n";
5842
5843 DCHECK(!object_stack_.is_empty());
5844 for (int i = 0; i < object_stack_.length(); i++) {
5845 if (i > 0) os << "\n |\n |\n V\n\n";
5846 object_stack_[i]->Print(os);
5847 }
5848 os << "=====================================\n";
5849 }
5850 }
5851
5852
5853 // Triggers a depth-first traversal of reachable objects from one
5854 // given root object and finds a path to a specific heap object and
5855 // prints it.
TracePathToObjectFrom(Object * target,Object * root)5856 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5857 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5858 tracer.VisitPointer(&root);
5859 }
5860
5861
5862 // Triggers a depth-first traversal of reachable objects from roots
5863 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)5864 void Heap::TracePathToObject(Object* target) {
5865 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5866 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5867 }
5868
5869
5870 // Triggers a depth-first traversal of reachable objects from roots
5871 // and finds a path to any global object and prints it. Useful for
5872 // determining the source for leaks of global objects.
TracePathToGlobal()5873 void Heap::TracePathToGlobal() {
5874 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5875 VISIT_ALL);
5876 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5877 }
5878 #endif
5879
5880
UpdateCumulativeGCStatistics(double duration,double spent_in_mutator,double marking_time)5881 void Heap::UpdateCumulativeGCStatistics(double duration,
5882 double spent_in_mutator,
5883 double marking_time) {
5884 if (FLAG_print_cumulative_gc_stat) {
5885 total_gc_time_ms_ += duration;
5886 max_gc_pause_ = Max(max_gc_pause_, duration);
5887 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5888 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5889 } else if (FLAG_trace_gc_verbose) {
5890 total_gc_time_ms_ += duration;
5891 }
5892
5893 marking_time_ += marking_time;
5894 }
5895
5896
Hash(Handle<Map> map,Handle<Name> name)5897 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5898 DisallowHeapAllocation no_gc;
5899 // Uses only lower 32 bits if pointers are larger.
5900 uintptr_t addr_hash =
5901 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5902 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5903 }
5904
5905
Lookup(Handle<Map> map,Handle<Name> name)5906 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5907 DisallowHeapAllocation no_gc;
5908 int index = (Hash(map, name) & kHashMask);
5909 for (int i = 0; i < kEntriesPerBucket; i++) {
5910 Key& key = keys_[index + i];
5911 if ((key.map == *map) && key.name->Equals(*name)) {
5912 return field_offsets_[index + i];
5913 }
5914 }
5915 return kNotFound;
5916 }
5917
5918
Update(Handle<Map> map,Handle<Name> name,int field_offset)5919 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5920 int field_offset) {
5921 DisallowHeapAllocation no_gc;
5922 if (!name->IsUniqueName()) {
5923 if (!StringTable::InternalizeStringIfExists(
5924 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5925 return;
5926 }
5927 }
5928 // This cache is cleared only between mark compact passes, so we expect the
5929 // cache to only contain old space names.
5930 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5931
5932 int index = (Hash(map, name) & kHashMask);
5933 // After a GC there will be free slots, so we use them in order (this may
5934 // help to get the most frequently used one in position 0).
5935 for (int i = 0; i < kEntriesPerBucket; i++) {
5936 Key& key = keys_[index];
5937 Object* free_entry_indicator = NULL;
5938 if (key.map == free_entry_indicator) {
5939 key.map = *map;
5940 key.name = *name;
5941 field_offsets_[index + i] = field_offset;
5942 return;
5943 }
5944 }
5945 // No free entry found in this bucket, so we move them all down one and
5946 // put the new entry at position zero.
5947 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5948 Key& key = keys_[index + i];
5949 Key& key2 = keys_[index + i - 1];
5950 key = key2;
5951 field_offsets_[index + i] = field_offsets_[index + i - 1];
5952 }
5953
5954 // Write the new first entry.
5955 Key& key = keys_[index];
5956 key.map = *map;
5957 key.name = *name;
5958 field_offsets_[index] = field_offset;
5959 }
5960
5961
Clear()5962 void KeyedLookupCache::Clear() {
5963 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5964 }
5965
5966
Clear()5967 void DescriptorLookupCache::Clear() {
5968 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
5969 }
5970
5971
CleanUp()5972 void ExternalStringTable::CleanUp() {
5973 int last = 0;
5974 for (int i = 0; i < new_space_strings_.length(); ++i) {
5975 if (new_space_strings_[i] == heap_->the_hole_value()) {
5976 continue;
5977 }
5978 DCHECK(new_space_strings_[i]->IsExternalString());
5979 if (heap_->InNewSpace(new_space_strings_[i])) {
5980 new_space_strings_[last++] = new_space_strings_[i];
5981 } else {
5982 old_space_strings_.Add(new_space_strings_[i]);
5983 }
5984 }
5985 new_space_strings_.Rewind(last);
5986 new_space_strings_.Trim();
5987
5988 last = 0;
5989 for (int i = 0; i < old_space_strings_.length(); ++i) {
5990 if (old_space_strings_[i] == heap_->the_hole_value()) {
5991 continue;
5992 }
5993 DCHECK(old_space_strings_[i]->IsExternalString());
5994 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
5995 old_space_strings_[last++] = old_space_strings_[i];
5996 }
5997 old_space_strings_.Rewind(last);
5998 old_space_strings_.Trim();
5999 #ifdef VERIFY_HEAP
6000 if (FLAG_verify_heap) {
6001 Verify();
6002 }
6003 #endif
6004 }
6005
6006
TearDown()6007 void ExternalStringTable::TearDown() {
6008 for (int i = 0; i < new_space_strings_.length(); ++i) {
6009 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6010 }
6011 new_space_strings_.Free();
6012 for (int i = 0; i < old_space_strings_.length(); ++i) {
6013 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6014 }
6015 old_space_strings_.Free();
6016 }
6017
6018
QueueMemoryChunkForFree(MemoryChunk * chunk)6019 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6020 chunk->set_next_chunk(chunks_queued_for_free_);
6021 chunks_queued_for_free_ = chunk;
6022 }
6023
6024
FreeQueuedChunks()6025 void Heap::FreeQueuedChunks() {
6026 if (chunks_queued_for_free_ == NULL) return;
6027 MemoryChunk* next;
6028 MemoryChunk* chunk;
6029 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6030 next = chunk->next_chunk();
6031 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6032
6033 if (chunk->owner()->identity() == LO_SPACE) {
6034 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6035 // If FromAnyPointerAddress encounters a slot that belongs to a large
6036 // chunk queued for deletion it will fail to find the chunk because
6037 // it try to perform a search in the list of pages owned by of the large
6038 // object space and queued chunks were detached from that list.
6039 // To work around this we split large chunk into normal kPageSize aligned
6040 // pieces and initialize size, owner and flags field of every piece.
6041 // If FromAnyPointerAddress encounters a slot that belongs to one of
6042 // these smaller pieces it will treat it as a slot on a normal Page.
6043 Address chunk_end = chunk->address() + chunk->size();
6044 MemoryChunk* inner =
6045 MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6046 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6047 while (inner <= inner_last) {
6048 // Size of a large chunk is always a multiple of
6049 // OS::AllocateAlignment() so there is always
6050 // enough space for a fake MemoryChunk header.
6051 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6052 // Guard against overflow.
6053 if (area_end < inner->address()) area_end = chunk_end;
6054 inner->SetArea(inner->address(), area_end);
6055 inner->set_size(Page::kPageSize);
6056 inner->set_owner(lo_space());
6057 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6058 inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6059 }
6060 }
6061 }
6062 isolate_->heap()->store_buffer()->Compact();
6063 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6064 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6065 next = chunk->next_chunk();
6066 isolate_->memory_allocator()->Free(chunk);
6067 }
6068 chunks_queued_for_free_ = NULL;
6069 }
6070
6071
RememberUnmappedPage(Address page,bool compacted)6072 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6073 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6074 // Tag the page pointer to make it findable in the dump file.
6075 if (compacted) {
6076 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6077 } else {
6078 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6079 }
6080 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6081 reinterpret_cast<Address>(p);
6082 remembered_unmapped_pages_index_++;
6083 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6084 }
6085
6086
ClearObjectStats(bool clear_last_time_stats)6087 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6088 memset(object_counts_, 0, sizeof(object_counts_));
6089 memset(object_sizes_, 0, sizeof(object_sizes_));
6090 if (clear_last_time_stats) {
6091 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6092 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6093 }
6094 }
6095
6096
6097 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6098
6099
CheckpointObjectStats()6100 void Heap::CheckpointObjectStats() {
6101 base::LockGuard<base::Mutex> lock_guard(
6102 checkpoint_object_stats_mutex.Pointer());
6103 Counters* counters = isolate()->counters();
6104 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6105 counters->count_of_##name()->Increment( \
6106 static_cast<int>(object_counts_[name])); \
6107 counters->count_of_##name()->Decrement( \
6108 static_cast<int>(object_counts_last_time_[name])); \
6109 counters->size_of_##name()->Increment( \
6110 static_cast<int>(object_sizes_[name])); \
6111 counters->size_of_##name()->Decrement( \
6112 static_cast<int>(object_sizes_last_time_[name]));
6113 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6114 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6115 int index;
6116 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6117 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6118 counters->count_of_CODE_TYPE_##name()->Increment( \
6119 static_cast<int>(object_counts_[index])); \
6120 counters->count_of_CODE_TYPE_##name()->Decrement( \
6121 static_cast<int>(object_counts_last_time_[index])); \
6122 counters->size_of_CODE_TYPE_##name()->Increment( \
6123 static_cast<int>(object_sizes_[index])); \
6124 counters->size_of_CODE_TYPE_##name()->Decrement( \
6125 static_cast<int>(object_sizes_last_time_[index]));
6126 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6127 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6128 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6129 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6130 counters->count_of_FIXED_ARRAY_##name()->Increment( \
6131 static_cast<int>(object_counts_[index])); \
6132 counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6133 static_cast<int>(object_counts_last_time_[index])); \
6134 counters->size_of_FIXED_ARRAY_##name()->Increment( \
6135 static_cast<int>(object_sizes_[index])); \
6136 counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6137 static_cast<int>(object_sizes_last_time_[index]));
6138 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6139 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6140 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6141 index = \
6142 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6143 counters->count_of_CODE_AGE_##name()->Increment( \
6144 static_cast<int>(object_counts_[index])); \
6145 counters->count_of_CODE_AGE_##name()->Decrement( \
6146 static_cast<int>(object_counts_last_time_[index])); \
6147 counters->size_of_CODE_AGE_##name()->Increment( \
6148 static_cast<int>(object_sizes_[index])); \
6149 counters->size_of_CODE_AGE_##name()->Decrement( \
6150 static_cast<int>(object_sizes_last_time_[index]));
6151 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6152 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6153
6154 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6155 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6156 ClearObjectStats();
6157 }
6158 }
6159 } // namespace v8::internal
6160