• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "isolate-inl.h"
42 #include "mark-compact.h"
43 #include "natives.h"
44 #include "objects-visiting.h"
45 #include "objects-visiting-inl.h"
46 #include "once.h"
47 #include "runtime-profiler.h"
48 #include "scopeinfo.h"
49 #include "snapshot.h"
50 #include "store-buffer.h"
51 #include "utils/random-number-generator.h"
52 #include "v8threads.h"
53 #include "v8utils.h"
54 #include "vm-state-inl.h"
55 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
56 #include "regexp-macro-assembler.h"
57 #include "arm/regexp-macro-assembler-arm.h"
58 #endif
59 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
60 #include "regexp-macro-assembler.h"
61 #include "mips/regexp-macro-assembler-mips.h"
62 #endif
63 
64 namespace v8 {
65 namespace internal {
66 
67 
Heap()68 Heap::Heap()
69     : isolate_(NULL),
70       code_range_size_(kIs64BitArch ? 512 * MB : 0),
71 // semispace_size_ should be a power of 2 and old_generation_size_ should be
72 // a multiple of Page::kPageSize.
73       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
74       max_semispace_size_(8 * (kPointerSize / 4)  * MB),
75       initial_semispace_size_(Page::kPageSize),
76       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
77       max_executable_size_(256ul * (kPointerSize / 4) * MB),
78 // Variables set based on semispace_size_ and old_generation_size_ in
79 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
80 // Will be 4 * reserved_semispace_size_ to ensure that young
81 // generation can be aligned to its size.
82       maximum_committed_(0),
83       survived_since_last_expansion_(0),
84       sweep_generation_(0),
85       always_allocate_scope_depth_(0),
86       linear_allocation_scope_depth_(0),
87       contexts_disposed_(0),
88       global_ic_age_(0),
89       flush_monomorphic_ics_(false),
90       scan_on_scavenge_pages_(0),
91       new_space_(this),
92       old_pointer_space_(NULL),
93       old_data_space_(NULL),
94       code_space_(NULL),
95       map_space_(NULL),
96       cell_space_(NULL),
97       property_cell_space_(NULL),
98       lo_space_(NULL),
99       gc_state_(NOT_IN_GC),
100       gc_post_processing_depth_(0),
101       ms_count_(0),
102       gc_count_(0),
103       remembered_unmapped_pages_index_(0),
104       unflattened_strings_length_(0),
105 #ifdef DEBUG
106       allocation_timeout_(0),
107       disallow_allocation_failure_(false),
108 #endif  // DEBUG
109       new_space_high_promotion_mode_active_(false),
110       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
111       size_of_old_gen_at_last_old_space_gc_(0),
112       external_allocation_limit_(0),
113       amount_of_external_allocated_memory_(0),
114       amount_of_external_allocated_memory_at_last_global_gc_(0),
115       old_gen_exhausted_(false),
116       inline_allocation_disabled_(false),
117       store_buffer_rebuilder_(store_buffer()),
118       hidden_string_(NULL),
119       gc_safe_size_of_old_object_(NULL),
120       total_regexp_code_generated_(0),
121       tracer_(NULL),
122       young_survivors_after_last_gc_(0),
123       high_survival_rate_period_length_(0),
124       low_survival_rate_period_length_(0),
125       survival_rate_(0),
126       previous_survival_rate_trend_(Heap::STABLE),
127       survival_rate_trend_(Heap::STABLE),
128       max_gc_pause_(0.0),
129       total_gc_time_ms_(0.0),
130       max_alive_after_gc_(0),
131       min_in_mutator_(kMaxInt),
132       alive_after_last_gc_(0),
133       last_gc_end_timestamp_(0.0),
134       marking_time_(0.0),
135       sweeping_time_(0.0),
136       store_buffer_(this),
137       marking_(this),
138       incremental_marking_(this),
139       number_idle_notifications_(0),
140       last_idle_notification_gc_count_(0),
141       last_idle_notification_gc_count_init_(false),
142       mark_sweeps_since_idle_round_started_(0),
143       gc_count_at_last_idle_gc_(0),
144       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
145       full_codegen_bytes_generated_(0),
146       crankshaft_codegen_bytes_generated_(0),
147       gcs_since_last_deopt_(0),
148 #ifdef VERIFY_HEAP
149       no_weak_object_verification_scope_depth_(0),
150 #endif
151       promotion_queue_(this),
152       configured_(false),
153       chunks_queued_for_free_(NULL),
154       relocation_mutex_(NULL) {
155   // Allow build-time customization of the max semispace size. Building
156   // V8 with snapshots and a non-default max semispace size is much
157   // easier if you can define it as part of the build environment.
158 #if defined(V8_MAX_SEMISPACE_SIZE)
159   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
160 #endif
161 
162   // Ensure old_generation_size_ is a multiple of kPageSize.
163   ASSERT(MB >= Page::kPageSize);
164 
165   intptr_t max_virtual = OS::MaxVirtualMemory();
166 
167   if (max_virtual > 0) {
168     if (code_range_size_ > 0) {
169       // Reserve no more than 1/8 of the memory for the code range.
170       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
171     }
172   }
173 
174   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
175   native_contexts_list_ = NULL;
176   array_buffers_list_ = Smi::FromInt(0);
177   allocation_sites_list_ = Smi::FromInt(0);
178   mark_compact_collector_.heap_ = this;
179   external_string_table_.heap_ = this;
180   // Put a dummy entry in the remembered pages so we can find the list the
181   // minidump even if there are no real unmapped pages.
182   RememberUnmappedPage(NULL, false);
183 
184   ClearObjectStats(true);
185 }
186 
187 
Capacity()188 intptr_t Heap::Capacity() {
189   if (!HasBeenSetUp()) return 0;
190 
191   return new_space_.Capacity() +
192       old_pointer_space_->Capacity() +
193       old_data_space_->Capacity() +
194       code_space_->Capacity() +
195       map_space_->Capacity() +
196       cell_space_->Capacity() +
197       property_cell_space_->Capacity();
198 }
199 
200 
CommittedMemory()201 intptr_t Heap::CommittedMemory() {
202   if (!HasBeenSetUp()) return 0;
203 
204   return new_space_.CommittedMemory() +
205       old_pointer_space_->CommittedMemory() +
206       old_data_space_->CommittedMemory() +
207       code_space_->CommittedMemory() +
208       map_space_->CommittedMemory() +
209       cell_space_->CommittedMemory() +
210       property_cell_space_->CommittedMemory() +
211       lo_space_->Size();
212 }
213 
214 
CommittedPhysicalMemory()215 size_t Heap::CommittedPhysicalMemory() {
216   if (!HasBeenSetUp()) return 0;
217 
218   return new_space_.CommittedPhysicalMemory() +
219       old_pointer_space_->CommittedPhysicalMemory() +
220       old_data_space_->CommittedPhysicalMemory() +
221       code_space_->CommittedPhysicalMemory() +
222       map_space_->CommittedPhysicalMemory() +
223       cell_space_->CommittedPhysicalMemory() +
224       property_cell_space_->CommittedPhysicalMemory() +
225       lo_space_->CommittedPhysicalMemory();
226 }
227 
228 
CommittedMemoryExecutable()229 intptr_t Heap::CommittedMemoryExecutable() {
230   if (!HasBeenSetUp()) return 0;
231 
232   return isolate()->memory_allocator()->SizeExecutable();
233 }
234 
235 
UpdateMaximumCommitted()236 void Heap::UpdateMaximumCommitted() {
237   if (!HasBeenSetUp()) return;
238 
239   intptr_t current_committed_memory = CommittedMemory();
240   if (current_committed_memory > maximum_committed_) {
241     maximum_committed_ = current_committed_memory;
242   }
243 }
244 
245 
Available()246 intptr_t Heap::Available() {
247   if (!HasBeenSetUp()) return 0;
248 
249   return new_space_.Available() +
250       old_pointer_space_->Available() +
251       old_data_space_->Available() +
252       code_space_->Available() +
253       map_space_->Available() +
254       cell_space_->Available() +
255       property_cell_space_->Available();
256 }
257 
258 
HasBeenSetUp()259 bool Heap::HasBeenSetUp() {
260   return old_pointer_space_ != NULL &&
261          old_data_space_ != NULL &&
262          code_space_ != NULL &&
263          map_space_ != NULL &&
264          cell_space_ != NULL &&
265          property_cell_space_ != NULL &&
266          lo_space_ != NULL;
267 }
268 
269 
GcSafeSizeOfOldObject(HeapObject * object)270 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
271   if (IntrusiveMarking::IsMarked(object)) {
272     return IntrusiveMarking::SizeOfMarkedObject(object);
273   }
274   return object->SizeFromMap(object->map());
275 }
276 
277 
SelectGarbageCollector(AllocationSpace space,const char ** reason)278 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
279                                               const char** reason) {
280   // Is global GC requested?
281   if (space != NEW_SPACE) {
282     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
283     *reason = "GC in old space requested";
284     return MARK_COMPACTOR;
285   }
286 
287   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
288     *reason = "GC in old space forced by flags";
289     return MARK_COMPACTOR;
290   }
291 
292   // Is enough data promoted to justify a global GC?
293   if (OldGenerationAllocationLimitReached()) {
294     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
295     *reason = "promotion limit reached";
296     return MARK_COMPACTOR;
297   }
298 
299   // Have allocation in OLD and LO failed?
300   if (old_gen_exhausted_) {
301     isolate_->counters()->
302         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
303     *reason = "old generations exhausted";
304     return MARK_COMPACTOR;
305   }
306 
307   // Is there enough space left in OLD to guarantee that a scavenge can
308   // succeed?
309   //
310   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
311   // for object promotion. It counts only the bytes that the memory
312   // allocator has not yet allocated from the OS and assigned to any space,
313   // and does not count available bytes already in the old space or code
314   // space.  Undercounting is safe---we may get an unrequested full GC when
315   // a scavenge would have succeeded.
316   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
317     isolate_->counters()->
318         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
319     *reason = "scavenge might not succeed";
320     return MARK_COMPACTOR;
321   }
322 
323   // Default
324   *reason = NULL;
325   return SCAVENGER;
326 }
327 
328 
329 // TODO(1238405): Combine the infrastructure for --heap-stats and
330 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsBeforeGC()331 void Heap::ReportStatisticsBeforeGC() {
332   // Heap::ReportHeapStatistics will also log NewSpace statistics when
333   // compiled --log-gc is set.  The following logic is used to avoid
334   // double logging.
335 #ifdef DEBUG
336   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
337   if (FLAG_heap_stats) {
338     ReportHeapStatistics("Before GC");
339   } else if (FLAG_log_gc) {
340     new_space_.ReportStatistics();
341   }
342   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
343 #else
344   if (FLAG_log_gc) {
345     new_space_.CollectStatistics();
346     new_space_.ReportStatistics();
347     new_space_.ClearHistograms();
348   }
349 #endif  // DEBUG
350 }
351 
352 
PrintShortHeapStatistics()353 void Heap::PrintShortHeapStatistics() {
354   if (!FLAG_trace_gc_verbose) return;
355   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
356                ", available: %6" V8_PTR_PREFIX "d KB\n",
357            isolate_->memory_allocator()->Size() / KB,
358            isolate_->memory_allocator()->Available() / KB);
359   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
360                ", available: %6" V8_PTR_PREFIX "d KB"
361                ", committed: %6" V8_PTR_PREFIX "d KB\n",
362            new_space_.Size() / KB,
363            new_space_.Available() / KB,
364            new_space_.CommittedMemory() / KB);
365   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
366                ", available: %6" V8_PTR_PREFIX "d KB"
367                ", committed: %6" V8_PTR_PREFIX "d KB\n",
368            old_pointer_space_->SizeOfObjects() / KB,
369            old_pointer_space_->Available() / KB,
370            old_pointer_space_->CommittedMemory() / KB);
371   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
372                ", available: %6" V8_PTR_PREFIX "d KB"
373                ", committed: %6" V8_PTR_PREFIX "d KB\n",
374            old_data_space_->SizeOfObjects() / KB,
375            old_data_space_->Available() / KB,
376            old_data_space_->CommittedMemory() / KB);
377   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
378                ", available: %6" V8_PTR_PREFIX "d KB"
379                ", committed: %6" V8_PTR_PREFIX "d KB\n",
380            code_space_->SizeOfObjects() / KB,
381            code_space_->Available() / KB,
382            code_space_->CommittedMemory() / KB);
383   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
384                ", available: %6" V8_PTR_PREFIX "d KB"
385                ", committed: %6" V8_PTR_PREFIX "d KB\n",
386            map_space_->SizeOfObjects() / KB,
387            map_space_->Available() / KB,
388            map_space_->CommittedMemory() / KB);
389   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
390                ", available: %6" V8_PTR_PREFIX "d KB"
391                ", committed: %6" V8_PTR_PREFIX "d KB\n",
392            cell_space_->SizeOfObjects() / KB,
393            cell_space_->Available() / KB,
394            cell_space_->CommittedMemory() / KB);
395   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
396                ", available: %6" V8_PTR_PREFIX "d KB"
397                ", committed: %6" V8_PTR_PREFIX "d KB\n",
398            property_cell_space_->SizeOfObjects() / KB,
399            property_cell_space_->Available() / KB,
400            property_cell_space_->CommittedMemory() / KB);
401   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
402                ", available: %6" V8_PTR_PREFIX "d KB"
403                ", committed: %6" V8_PTR_PREFIX "d KB\n",
404            lo_space_->SizeOfObjects() / KB,
405            lo_space_->Available() / KB,
406            lo_space_->CommittedMemory() / KB);
407   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
408                ", available: %6" V8_PTR_PREFIX "d KB"
409                ", committed: %6" V8_PTR_PREFIX "d KB\n",
410            this->SizeOfObjects() / KB,
411            this->Available() / KB,
412            this->CommittedMemory() / KB);
413   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
414            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
415   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
416 }
417 
418 
419 // TODO(1238405): Combine the infrastructure for --heap-stats and
420 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()421 void Heap::ReportStatisticsAfterGC() {
422   // Similar to the before GC, we use some complicated logic to ensure that
423   // NewSpace statistics are logged exactly once when --log-gc is turned on.
424 #if defined(DEBUG)
425   if (FLAG_heap_stats) {
426     new_space_.CollectStatistics();
427     ReportHeapStatistics("After GC");
428   } else if (FLAG_log_gc) {
429     new_space_.ReportStatistics();
430   }
431 #else
432   if (FLAG_log_gc) new_space_.ReportStatistics();
433 #endif  // DEBUG
434 }
435 
436 
GarbageCollectionPrologue()437 void Heap::GarbageCollectionPrologue() {
438   {  AllowHeapAllocation for_the_first_part_of_prologue;
439     isolate_->transcendental_cache()->Clear();
440     ClearJSFunctionResultCaches();
441     gc_count_++;
442     unflattened_strings_length_ = 0;
443 
444     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
445       mark_compact_collector()->EnableCodeFlushing(true);
446     }
447 
448 #ifdef VERIFY_HEAP
449     if (FLAG_verify_heap) {
450       Verify();
451     }
452 #endif
453   }
454 
455   UpdateMaximumCommitted();
456 
457 #ifdef DEBUG
458   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
459 
460   if (FLAG_gc_verbose) Print();
461 
462   ReportStatisticsBeforeGC();
463 #endif  // DEBUG
464 
465   store_buffer()->GCPrologue();
466 
467   if (isolate()->concurrent_osr_enabled()) {
468     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
469   }
470 }
471 
472 
SizeOfObjects()473 intptr_t Heap::SizeOfObjects() {
474   intptr_t total = 0;
475   AllSpaces spaces(this);
476   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
477     total += space->SizeOfObjects();
478   }
479   return total;
480 }
481 
482 
ClearAllICsByKind(Code::Kind kind)483 void Heap::ClearAllICsByKind(Code::Kind kind) {
484   HeapObjectIterator it(code_space());
485 
486   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
487     Code* code = Code::cast(object);
488     Code::Kind current_kind = code->kind();
489     if (current_kind == Code::FUNCTION ||
490         current_kind == Code::OPTIMIZED_FUNCTION) {
491       code->ClearInlineCaches(kind);
492     }
493   }
494 }
495 
496 
RepairFreeListsAfterBoot()497 void Heap::RepairFreeListsAfterBoot() {
498   PagedSpaces spaces(this);
499   for (PagedSpace* space = spaces.next();
500        space != NULL;
501        space = spaces.next()) {
502     space->RepairFreeListsAfterBoot();
503   }
504 }
505 
506 
GarbageCollectionEpilogue()507 void Heap::GarbageCollectionEpilogue() {
508   if (FLAG_allocation_site_pretenuring) {
509     int tenure_decisions = 0;
510     int dont_tenure_decisions = 0;
511     int allocation_mementos_found = 0;
512 
513     Object* cur = allocation_sites_list();
514     while (cur->IsAllocationSite()) {
515       AllocationSite* casted = AllocationSite::cast(cur);
516       allocation_mementos_found += casted->memento_found_count()->value();
517       if (casted->DigestPretenuringFeedback()) {
518         if (casted->GetPretenureMode() == TENURED) {
519           tenure_decisions++;
520         } else {
521           dont_tenure_decisions++;
522         }
523       }
524       cur = casted->weak_next();
525     }
526 
527     // TODO(mvstanton): Pretenure decisions are only made once for an allocation
528     // site. Find a sane way to decide about revisiting the decision later.
529 
530     if (FLAG_trace_track_allocation_sites &&
531         (allocation_mementos_found > 0 ||
532          tenure_decisions > 0 ||
533          dont_tenure_decisions > 0)) {
534       PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
535              "(%d, %d, %d)\n",
536              allocation_mementos_found,
537              tenure_decisions,
538              dont_tenure_decisions);
539     }
540   }
541 
542   store_buffer()->GCEpilogue();
543 
544   // In release mode, we only zap the from space under heap verification.
545   if (Heap::ShouldZapGarbage()) {
546     ZapFromSpace();
547   }
548 
549 #ifdef VERIFY_HEAP
550   if (FLAG_verify_heap) {
551     Verify();
552   }
553 #endif
554 
555   AllowHeapAllocation for_the_rest_of_the_epilogue;
556 
557 #ifdef DEBUG
558   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
559   if (FLAG_print_handles) PrintHandles();
560   if (FLAG_gc_verbose) Print();
561   if (FLAG_code_stats) ReportCodeStatistics("After GC");
562 #endif
563   if (FLAG_deopt_every_n_garbage_collections > 0) {
564     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
565       Deoptimizer::DeoptimizeAll(isolate());
566       gcs_since_last_deopt_ = 0;
567     }
568   }
569 
570   UpdateMaximumCommitted();
571 
572   isolate_->counters()->alive_after_last_gc()->Set(
573       static_cast<int>(SizeOfObjects()));
574 
575   isolate_->counters()->string_table_capacity()->Set(
576       string_table()->Capacity());
577   isolate_->counters()->number_of_symbols()->Set(
578       string_table()->NumberOfElements());
579 
580   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
581     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
582         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
583             (crankshaft_codegen_bytes_generated_
584             + full_codegen_bytes_generated_)));
585   }
586 
587   if (CommittedMemory() > 0) {
588     isolate_->counters()->external_fragmentation_total()->AddSample(
589         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
590 
591     isolate_->counters()->heap_fraction_new_space()->
592         AddSample(static_cast<int>(
593             (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
594     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
595         static_cast<int>(
596             (old_pointer_space()->CommittedMemory() * 100.0) /
597             CommittedMemory()));
598     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
599         static_cast<int>(
600             (old_data_space()->CommittedMemory() * 100.0) /
601             CommittedMemory()));
602     isolate_->counters()->heap_fraction_code_space()->
603         AddSample(static_cast<int>(
604             (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
605     isolate_->counters()->heap_fraction_map_space()->AddSample(
606         static_cast<int>(
607             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
608     isolate_->counters()->heap_fraction_cell_space()->AddSample(
609         static_cast<int>(
610             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
611     isolate_->counters()->heap_fraction_property_cell_space()->
612         AddSample(static_cast<int>(
613             (property_cell_space()->CommittedMemory() * 100.0) /
614             CommittedMemory()));
615     isolate_->counters()->heap_fraction_lo_space()->
616         AddSample(static_cast<int>(
617             (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
618 
619     isolate_->counters()->heap_sample_total_committed()->AddSample(
620         static_cast<int>(CommittedMemory() / KB));
621     isolate_->counters()->heap_sample_total_used()->AddSample(
622         static_cast<int>(SizeOfObjects() / KB));
623     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
624         static_cast<int>(map_space()->CommittedMemory() / KB));
625     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
626         static_cast<int>(cell_space()->CommittedMemory() / KB));
627     isolate_->counters()->
628         heap_sample_property_cell_space_committed()->
629             AddSample(static_cast<int>(
630                 property_cell_space()->CommittedMemory() / KB));
631     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
632         static_cast<int>(code_space()->CommittedMemory() / KB));
633 
634     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
635         static_cast<int>(MaximumCommittedMemory() / KB));
636   }
637 
638 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
639   isolate_->counters()->space##_bytes_available()->Set(                        \
640       static_cast<int>(space()->Available()));                                 \
641   isolate_->counters()->space##_bytes_committed()->Set(                        \
642       static_cast<int>(space()->CommittedMemory()));                           \
643   isolate_->counters()->space##_bytes_used()->Set(                             \
644       static_cast<int>(space()->SizeOfObjects()));
645 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
646   if (space()->CommittedMemory() > 0) {                                        \
647     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
648         static_cast<int>(100 -                                                 \
649             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
650   }
651 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
652   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
653   UPDATE_FRAGMENTATION_FOR_SPACE(space)
654 
655   UPDATE_COUNTERS_FOR_SPACE(new_space)
656   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
657   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
658   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
659   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
660   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
661   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
662   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
663 #undef UPDATE_COUNTERS_FOR_SPACE
664 #undef UPDATE_FRAGMENTATION_FOR_SPACE
665 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
666 
667 #if defined(DEBUG)
668   ReportStatisticsAfterGC();
669 #endif  // DEBUG
670 #ifdef ENABLE_DEBUGGER_SUPPORT
671   isolate_->debug()->AfterGarbageCollection();
672 #endif  // ENABLE_DEBUGGER_SUPPORT
673 }
674 
675 
CollectAllGarbage(int flags,const char * gc_reason)676 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
677   // Since we are ignoring the return value, the exact choice of space does
678   // not matter, so long as we do not specify NEW_SPACE, which would not
679   // cause a full GC.
680   mark_compact_collector_.SetFlags(flags);
681   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
682   mark_compact_collector_.SetFlags(kNoGCFlags);
683 }
684 
685 
CollectAllAvailableGarbage(const char * gc_reason)686 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
687   // Since we are ignoring the return value, the exact choice of space does
688   // not matter, so long as we do not specify NEW_SPACE, which would not
689   // cause a full GC.
690   // Major GC would invoke weak handle callbacks on weakly reachable
691   // handles, but won't collect weakly reachable objects until next
692   // major GC.  Therefore if we collect aggressively and weak handle callback
693   // has been invoked, we rerun major GC to release objects which become
694   // garbage.
695   // Note: as weak callbacks can execute arbitrary code, we cannot
696   // hope that eventually there will be no weak callbacks invocations.
697   // Therefore stop recollecting after several attempts.
698   if (isolate()->concurrent_recompilation_enabled()) {
699     // The optimizing compiler may be unnecessarily holding on to memory.
700     DisallowHeapAllocation no_recursive_gc;
701     isolate()->optimizing_compiler_thread()->Flush();
702   }
703   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
704                                      kReduceMemoryFootprintMask);
705   isolate_->compilation_cache()->Clear();
706   const int kMaxNumberOfAttempts = 7;
707   const int kMinNumberOfAttempts = 2;
708   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
709     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
710         attempt + 1 >= kMinNumberOfAttempts) {
711       break;
712     }
713   }
714   mark_compact_collector()->SetFlags(kNoGCFlags);
715   new_space_.Shrink();
716   UncommitFromSpace();
717   incremental_marking()->UncommitMarkingDeque();
718 }
719 
720 
CollectGarbage(AllocationSpace space,GarbageCollector collector,const char * gc_reason,const char * collector_reason)721 bool Heap::CollectGarbage(AllocationSpace space,
722                           GarbageCollector collector,
723                           const char* gc_reason,
724                           const char* collector_reason) {
725   // The VM is in the GC state until exiting this function.
726   VMState<GC> state(isolate_);
727 
728 #ifdef DEBUG
729   // Reset the allocation timeout to the GC interval, but make sure to
730   // allow at least a few allocations after a collection. The reason
731   // for this is that we have a lot of allocation sequences and we
732   // assume that a garbage collection will allow the subsequent
733   // allocation attempts to go through.
734   allocation_timeout_ = Max(6, FLAG_gc_interval);
735 #endif
736 
737   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
738     if (FLAG_trace_incremental_marking) {
739       PrintF("[IncrementalMarking] Scavenge during marking.\n");
740     }
741   }
742 
743   if (collector == MARK_COMPACTOR &&
744       !mark_compact_collector()->abort_incremental_marking() &&
745       !incremental_marking()->IsStopped() &&
746       !incremental_marking()->should_hurry() &&
747       FLAG_incremental_marking_steps) {
748     // Make progress in incremental marking.
749     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
750     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
751                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
752     if (!incremental_marking()->IsComplete()) {
753       if (FLAG_trace_incremental_marking) {
754         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
755       }
756       collector = SCAVENGER;
757       collector_reason = "incremental marking delaying mark-sweep";
758     }
759   }
760 
761   bool next_gc_likely_to_collect_more = false;
762 
763   { GCTracer tracer(this, gc_reason, collector_reason);
764     ASSERT(AllowHeapAllocation::IsAllowed());
765     DisallowHeapAllocation no_allocation_during_gc;
766     GarbageCollectionPrologue();
767     // The GC count was incremented in the prologue.  Tell the tracer about
768     // it.
769     tracer.set_gc_count(gc_count_);
770 
771     // Tell the tracer which collector we've selected.
772     tracer.set_collector(collector);
773 
774     {
775       HistogramTimerScope histogram_timer_scope(
776           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
777                                    : isolate_->counters()->gc_compactor());
778       next_gc_likely_to_collect_more =
779           PerformGarbageCollection(collector, &tracer);
780     }
781 
782     GarbageCollectionEpilogue();
783   }
784 
785   // Start incremental marking for the next cycle. The heap snapshot
786   // generator needs incremental marking to stay off after it aborted.
787   if (!mark_compact_collector()->abort_incremental_marking() &&
788       incremental_marking()->IsStopped() &&
789       incremental_marking()->WorthActivating() &&
790       NextGCIsLikelyToBeFull()) {
791     incremental_marking()->Start();
792   }
793 
794   return next_gc_likely_to_collect_more;
795 }
796 
797 
NotifyContextDisposed()798 int Heap::NotifyContextDisposed() {
799   if (isolate()->concurrent_recompilation_enabled()) {
800     // Flush the queued recompilation tasks.
801     isolate()->optimizing_compiler_thread()->Flush();
802   }
803   flush_monomorphic_ics_ = true;
804   AgeInlineCaches();
805   return ++contexts_disposed_;
806 }
807 
808 
PerformScavenge()809 void Heap::PerformScavenge() {
810   GCTracer tracer(this, NULL, NULL);
811   if (incremental_marking()->IsStopped()) {
812     PerformGarbageCollection(SCAVENGER, &tracer);
813   } else {
814     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
815   }
816 }
817 
818 
MoveElements(FixedArray * array,int dst_index,int src_index,int len)819 void Heap::MoveElements(FixedArray* array,
820                         int dst_index,
821                         int src_index,
822                         int len) {
823   if (len == 0) return;
824 
825   ASSERT(array->map() != fixed_cow_array_map());
826   Object** dst_objects = array->data_start() + dst_index;
827   OS::MemMove(dst_objects,
828               array->data_start() + src_index,
829               len * kPointerSize);
830   if (!InNewSpace(array)) {
831     for (int i = 0; i < len; i++) {
832       // TODO(hpayer): check store buffer for entries
833       if (InNewSpace(dst_objects[i])) {
834         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
835       }
836     }
837   }
838   incremental_marking()->RecordWrites(array);
839 }
840 
841 
842 #ifdef VERIFY_HEAP
843 // Helper class for verifying the string table.
844 class StringTableVerifier : public ObjectVisitor {
845  public:
VisitPointers(Object ** start,Object ** end)846   void VisitPointers(Object** start, Object** end) {
847     // Visit all HeapObject pointers in [start, end).
848     for (Object** p = start; p < end; p++) {
849       if ((*p)->IsHeapObject()) {
850         // Check that the string is actually internalized.
851         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
852               (*p)->IsInternalizedString());
853       }
854     }
855   }
856 };
857 
858 
VerifyStringTable(Heap * heap)859 static void VerifyStringTable(Heap* heap) {
860   StringTableVerifier verifier;
861   heap->string_table()->IterateElements(&verifier);
862 }
863 #endif  // VERIFY_HEAP
864 
865 
AbortIncrementalMarkingAndCollectGarbage(Heap * heap,AllocationSpace space,const char * gc_reason=NULL)866 static bool AbortIncrementalMarkingAndCollectGarbage(
867     Heap* heap,
868     AllocationSpace space,
869     const char* gc_reason = NULL) {
870   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
871   bool result = heap->CollectGarbage(space, gc_reason);
872   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
873   return result;
874 }
875 
876 
ReserveSpace(int * sizes,Address * locations_out)877 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
878   bool gc_performed = true;
879   int counter = 0;
880   static const int kThreshold = 20;
881   while (gc_performed && counter++ < kThreshold) {
882     gc_performed = false;
883     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
884     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
885       if (sizes[space] != 0) {
886         MaybeObject* allocation;
887         if (space == NEW_SPACE) {
888           allocation = new_space()->AllocateRaw(sizes[space]);
889         } else {
890           allocation = paged_space(space)->AllocateRaw(sizes[space]);
891         }
892         FreeListNode* node;
893         if (!allocation->To<FreeListNode>(&node)) {
894           if (space == NEW_SPACE) {
895             Heap::CollectGarbage(NEW_SPACE,
896                                  "failed to reserve space in the new space");
897           } else {
898             AbortIncrementalMarkingAndCollectGarbage(
899                 this,
900                 static_cast<AllocationSpace>(space),
901                 "failed to reserve space in paged space");
902           }
903           gc_performed = true;
904           break;
905         } else {
906           // Mark with a free list node, in case we have a GC before
907           // deserializing.
908           node->set_size(this, sizes[space]);
909           locations_out[space] = node->address();
910         }
911       }
912     }
913   }
914 
915   if (gc_performed) {
916     // Failed to reserve the space after several attempts.
917     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
918   }
919 }
920 
921 
EnsureFromSpaceIsCommitted()922 void Heap::EnsureFromSpaceIsCommitted() {
923   if (new_space_.CommitFromSpaceIfNeeded()) return;
924 
925   // Committing memory to from space failed.
926   // Memory is exhausted and we will die.
927   V8::FatalProcessOutOfMemory("Committing semi space failed.");
928 }
929 
930 
ClearJSFunctionResultCaches()931 void Heap::ClearJSFunctionResultCaches() {
932   if (isolate_->bootstrapper()->IsActive()) return;
933 
934   Object* context = native_contexts_list_;
935   while (!context->IsUndefined()) {
936     // Get the caches for this context. GC can happen when the context
937     // is not fully initialized, so the caches can be undefined.
938     Object* caches_or_undefined =
939         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
940     if (!caches_or_undefined->IsUndefined()) {
941       FixedArray* caches = FixedArray::cast(caches_or_undefined);
942       // Clear the caches:
943       int length = caches->length();
944       for (int i = 0; i < length; i++) {
945         JSFunctionResultCache::cast(caches->get(i))->Clear();
946       }
947     }
948     // Get the next context:
949     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
950   }
951 }
952 
953 
ClearNormalizedMapCaches()954 void Heap::ClearNormalizedMapCaches() {
955   if (isolate_->bootstrapper()->IsActive() &&
956       !incremental_marking()->IsMarking()) {
957     return;
958   }
959 
960   Object* context = native_contexts_list_;
961   while (!context->IsUndefined()) {
962     // GC can happen when the context is not fully initialized,
963     // so the cache can be undefined.
964     Object* cache =
965         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
966     if (!cache->IsUndefined()) {
967       NormalizedMapCache::cast(cache)->Clear();
968     }
969     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
970   }
971 }
972 
973 
UpdateSurvivalRateTrend(int start_new_space_size)974 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
975   if (start_new_space_size == 0) return;
976 
977   double survival_rate =
978       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
979       start_new_space_size;
980 
981   if (survival_rate > kYoungSurvivalRateHighThreshold) {
982     high_survival_rate_period_length_++;
983   } else {
984     high_survival_rate_period_length_ = 0;
985   }
986 
987   if (survival_rate < kYoungSurvivalRateLowThreshold) {
988     low_survival_rate_period_length_++;
989   } else {
990     low_survival_rate_period_length_ = 0;
991   }
992 
993   double survival_rate_diff = survival_rate_ - survival_rate;
994 
995   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
996     set_survival_rate_trend(DECREASING);
997   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
998     set_survival_rate_trend(INCREASING);
999   } else {
1000     set_survival_rate_trend(STABLE);
1001   }
1002 
1003   survival_rate_ = survival_rate;
1004 }
1005 
PerformGarbageCollection(GarbageCollector collector,GCTracer * tracer)1006 bool Heap::PerformGarbageCollection(GarbageCollector collector,
1007                                     GCTracer* tracer) {
1008   bool next_gc_likely_to_collect_more = false;
1009 
1010   if (collector != SCAVENGER) {
1011     PROFILE(isolate_, CodeMovingGCEvent());
1012   }
1013 
1014 #ifdef VERIFY_HEAP
1015   if (FLAG_verify_heap) {
1016     VerifyStringTable(this);
1017   }
1018 #endif
1019 
1020   GCType gc_type =
1021       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1022 
1023   {
1024     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1025     VMState<EXTERNAL> state(isolate_);
1026     HandleScope handle_scope(isolate_);
1027     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1028   }
1029 
1030   EnsureFromSpaceIsCommitted();
1031 
1032   int start_new_space_size = Heap::new_space()->SizeAsInt();
1033 
1034   if (IsHighSurvivalRate()) {
1035     // We speed up the incremental marker if it is running so that it
1036     // does not fall behind the rate of promotion, which would cause a
1037     // constantly growing old space.
1038     incremental_marking()->NotifyOfHighPromotionRate();
1039   }
1040 
1041   if (collector == MARK_COMPACTOR) {
1042     // Perform mark-sweep with optional compaction.
1043     MarkCompact(tracer);
1044     sweep_generation_++;
1045 
1046     UpdateSurvivalRateTrend(start_new_space_size);
1047 
1048     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
1049 
1050     old_generation_allocation_limit_ =
1051         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1052 
1053     old_gen_exhausted_ = false;
1054   } else {
1055     tracer_ = tracer;
1056     Scavenge();
1057     tracer_ = NULL;
1058 
1059     UpdateSurvivalRateTrend(start_new_space_size);
1060   }
1061 
1062   if (!new_space_high_promotion_mode_active_ &&
1063       new_space_.Capacity() == new_space_.MaximumCapacity() &&
1064       IsStableOrIncreasingSurvivalTrend() &&
1065       IsHighSurvivalRate()) {
1066     // Stable high survival rates even though young generation is at
1067     // maximum capacity indicates that most objects will be promoted.
1068     // To decrease scavenger pauses and final mark-sweep pauses, we
1069     // have to limit maximal capacity of the young generation.
1070     SetNewSpaceHighPromotionModeActive(true);
1071     if (FLAG_trace_gc) {
1072       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1073                new_space_.InitialCapacity() / MB);
1074     }
1075     // Support for global pre-tenuring uses the high promotion mode as a
1076     // heuristic indicator of whether to pretenure or not, we trigger
1077     // deoptimization here to take advantage of pre-tenuring as soon as
1078     // possible.
1079     if (FLAG_pretenuring) {
1080       isolate_->stack_guard()->FullDeopt();
1081     }
1082   } else if (new_space_high_promotion_mode_active_ &&
1083       IsStableOrDecreasingSurvivalTrend() &&
1084       IsLowSurvivalRate()) {
1085     // Decreasing low survival rates might indicate that the above high
1086     // promotion mode is over and we should allow the young generation
1087     // to grow again.
1088     SetNewSpaceHighPromotionModeActive(false);
1089     if (FLAG_trace_gc) {
1090       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1091                new_space_.MaximumCapacity() / MB);
1092     }
1093     // Trigger deoptimization here to turn off pre-tenuring as soon as
1094     // possible.
1095     if (FLAG_pretenuring) {
1096       isolate_->stack_guard()->FullDeopt();
1097     }
1098   }
1099 
1100   if (new_space_high_promotion_mode_active_ &&
1101       new_space_.Capacity() > new_space_.InitialCapacity()) {
1102     new_space_.Shrink();
1103   }
1104 
1105   isolate_->counters()->objs_since_last_young()->Set(0);
1106 
1107   // Callbacks that fire after this point might trigger nested GCs and
1108   // restart incremental marking, the assertion can't be moved down.
1109   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1110 
1111   gc_post_processing_depth_++;
1112   { AllowHeapAllocation allow_allocation;
1113     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1114     next_gc_likely_to_collect_more =
1115         isolate_->global_handles()->PostGarbageCollectionProcessing(
1116             collector, tracer);
1117   }
1118   gc_post_processing_depth_--;
1119 
1120   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1121 
1122   // Update relocatables.
1123   Relocatable::PostGarbageCollectionProcessing(isolate_);
1124 
1125   if (collector == MARK_COMPACTOR) {
1126     // Register the amount of external allocated memory.
1127     amount_of_external_allocated_memory_at_last_global_gc_ =
1128         amount_of_external_allocated_memory_;
1129   }
1130 
1131   {
1132     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1133     VMState<EXTERNAL> state(isolate_);
1134     HandleScope handle_scope(isolate_);
1135     CallGCEpilogueCallbacks(gc_type);
1136   }
1137 
1138 #ifdef VERIFY_HEAP
1139   if (FLAG_verify_heap) {
1140     VerifyStringTable(this);
1141   }
1142 #endif
1143 
1144   return next_gc_likely_to_collect_more;
1145 }
1146 
1147 
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1148 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1149   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1150     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1151       if (!gc_prologue_callbacks_[i].pass_isolate_) {
1152         v8::GCPrologueCallback callback =
1153             reinterpret_cast<v8::GCPrologueCallback>(
1154                 gc_prologue_callbacks_[i].callback);
1155         callback(gc_type, flags);
1156       } else {
1157         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1158         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1159       }
1160     }
1161   }
1162 }
1163 
1164 
CallGCEpilogueCallbacks(GCType gc_type)1165 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1166   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1167     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1168       if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1169         v8::GCPrologueCallback callback =
1170             reinterpret_cast<v8::GCPrologueCallback>(
1171                 gc_epilogue_callbacks_[i].callback);
1172         callback(gc_type, kNoGCCallbackFlags);
1173       } else {
1174         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1175         gc_epilogue_callbacks_[i].callback(
1176             isolate, gc_type, kNoGCCallbackFlags);
1177       }
1178     }
1179   }
1180 }
1181 
1182 
MarkCompact(GCTracer * tracer)1183 void Heap::MarkCompact(GCTracer* tracer) {
1184   gc_state_ = MARK_COMPACT;
1185   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1186 
1187   mark_compact_collector_.Prepare(tracer);
1188 
1189   ms_count_++;
1190   tracer->set_full_gc_count(ms_count_);
1191 
1192   MarkCompactPrologue();
1193 
1194   mark_compact_collector_.CollectGarbage();
1195 
1196   LOG(isolate_, ResourceEvent("markcompact", "end"));
1197 
1198   gc_state_ = NOT_IN_GC;
1199 
1200   isolate_->counters()->objs_since_last_full()->Set(0);
1201 
1202   flush_monomorphic_ics_ = false;
1203 }
1204 
1205 
MarkCompactPrologue()1206 void Heap::MarkCompactPrologue() {
1207   // At any old GC clear the keyed lookup cache to enable collection of unused
1208   // maps.
1209   isolate_->keyed_lookup_cache()->Clear();
1210   isolate_->context_slot_cache()->Clear();
1211   isolate_->descriptor_lookup_cache()->Clear();
1212   RegExpResultsCache::Clear(string_split_cache());
1213   RegExpResultsCache::Clear(regexp_multiple_cache());
1214 
1215   isolate_->compilation_cache()->MarkCompactPrologue();
1216 
1217   CompletelyClearInstanceofCache();
1218 
1219   FlushNumberStringCache();
1220   if (FLAG_cleanup_code_caches_at_gc) {
1221     polymorphic_code_cache()->set_cache(undefined_value());
1222   }
1223 
1224   ClearNormalizedMapCaches();
1225 }
1226 
1227 
1228 // Helper class for copying HeapObjects
1229 class ScavengeVisitor: public ObjectVisitor {
1230  public:
ScavengeVisitor(Heap * heap)1231   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1232 
VisitPointer(Object ** p)1233   void VisitPointer(Object** p) { ScavengePointer(p); }
1234 
VisitPointers(Object ** start,Object ** end)1235   void VisitPointers(Object** start, Object** end) {
1236     // Copy all HeapObject pointers in [start, end)
1237     for (Object** p = start; p < end; p++) ScavengePointer(p);
1238   }
1239 
1240  private:
ScavengePointer(Object ** p)1241   void ScavengePointer(Object** p) {
1242     Object* object = *p;
1243     if (!heap_->InNewSpace(object)) return;
1244     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1245                          reinterpret_cast<HeapObject*>(object));
1246   }
1247 
1248   Heap* heap_;
1249 };
1250 
1251 
1252 #ifdef VERIFY_HEAP
1253 // Visitor class to verify pointers in code or data space do not point into
1254 // new space.
1255 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1256  public:
VerifyNonPointerSpacePointersVisitor(Heap * heap)1257   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
VisitPointers(Object ** start,Object ** end)1258   void VisitPointers(Object** start, Object**end) {
1259     for (Object** current = start; current < end; current++) {
1260       if ((*current)->IsHeapObject()) {
1261         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1262       }
1263     }
1264   }
1265 
1266  private:
1267   Heap* heap_;
1268 };
1269 
1270 
VerifyNonPointerSpacePointers(Heap * heap)1271 static void VerifyNonPointerSpacePointers(Heap* heap) {
1272   // Verify that there are no pointers to new space in spaces where we
1273   // do not expect them.
1274   VerifyNonPointerSpacePointersVisitor v(heap);
1275   HeapObjectIterator code_it(heap->code_space());
1276   for (HeapObject* object = code_it.Next();
1277        object != NULL; object = code_it.Next())
1278     object->Iterate(&v);
1279 
1280   // The old data space was normally swept conservatively so that the iterator
1281   // doesn't work, so we normally skip the next bit.
1282   if (!heap->old_data_space()->was_swept_conservatively()) {
1283     HeapObjectIterator data_it(heap->old_data_space());
1284     for (HeapObject* object = data_it.Next();
1285          object != NULL; object = data_it.Next())
1286       object->Iterate(&v);
1287   }
1288 }
1289 #endif  // VERIFY_HEAP
1290 
1291 
CheckNewSpaceExpansionCriteria()1292 void Heap::CheckNewSpaceExpansionCriteria() {
1293   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1294       survived_since_last_expansion_ > new_space_.Capacity() &&
1295       !new_space_high_promotion_mode_active_) {
1296     // Grow the size of new space if there is room to grow, enough data
1297     // has survived scavenge since the last expansion and we are not in
1298     // high promotion mode.
1299     new_space_.Grow();
1300     survived_since_last_expansion_ = 0;
1301   }
1302 }
1303 
1304 
IsUnscavengedHeapObject(Heap * heap,Object ** p)1305 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1306   return heap->InNewSpace(*p) &&
1307       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1308 }
1309 
1310 
ScavengeStoreBufferCallback(Heap * heap,MemoryChunk * page,StoreBufferEvent event)1311 void Heap::ScavengeStoreBufferCallback(
1312     Heap* heap,
1313     MemoryChunk* page,
1314     StoreBufferEvent event) {
1315   heap->store_buffer_rebuilder_.Callback(page, event);
1316 }
1317 
1318 
Callback(MemoryChunk * page,StoreBufferEvent event)1319 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1320   if (event == kStoreBufferStartScanningPagesEvent) {
1321     start_of_current_page_ = NULL;
1322     current_page_ = NULL;
1323   } else if (event == kStoreBufferScanningPageEvent) {
1324     if (current_page_ != NULL) {
1325       // If this page already overflowed the store buffer during this iteration.
1326       if (current_page_->scan_on_scavenge()) {
1327         // Then we should wipe out the entries that have been added for it.
1328         store_buffer_->SetTop(start_of_current_page_);
1329       } else if (store_buffer_->Top() - start_of_current_page_ >=
1330                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1331         // Did we find too many pointers in the previous page?  The heuristic is
1332         // that no page can take more then 1/5 the remaining slots in the store
1333         // buffer.
1334         current_page_->set_scan_on_scavenge(true);
1335         store_buffer_->SetTop(start_of_current_page_);
1336       } else {
1337         // In this case the page we scanned took a reasonable number of slots in
1338         // the store buffer.  It has now been rehabilitated and is no longer
1339         // marked scan_on_scavenge.
1340         ASSERT(!current_page_->scan_on_scavenge());
1341       }
1342     }
1343     start_of_current_page_ = store_buffer_->Top();
1344     current_page_ = page;
1345   } else if (event == kStoreBufferFullEvent) {
1346     // The current page overflowed the store buffer again.  Wipe out its entries
1347     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1348     // several times while scanning.
1349     if (current_page_ == NULL) {
1350       // Store Buffer overflowed while scanning promoted objects.  These are not
1351       // in any particular page, though they are likely to be clustered by the
1352       // allocation routines.
1353       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1354     } else {
1355       // Store Buffer overflowed while scanning a particular old space page for
1356       // pointers to new space.
1357       ASSERT(current_page_ == page);
1358       ASSERT(page != NULL);
1359       current_page_->set_scan_on_scavenge(true);
1360       ASSERT(start_of_current_page_ != store_buffer_->Top());
1361       store_buffer_->SetTop(start_of_current_page_);
1362     }
1363   } else {
1364     UNREACHABLE();
1365   }
1366 }
1367 
1368 
Initialize()1369 void PromotionQueue::Initialize() {
1370   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1371   // entries (where each is a pair of intptr_t). This allows us to simplify
1372   // the test fpr when to switch pages.
1373   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1374          == 0);
1375   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1376   front_ = rear_ =
1377       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1378   emergency_stack_ = NULL;
1379   guard_ = false;
1380 }
1381 
1382 
RelocateQueueHead()1383 void PromotionQueue::RelocateQueueHead() {
1384   ASSERT(emergency_stack_ == NULL);
1385 
1386   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1387   intptr_t* head_start = rear_;
1388   intptr_t* head_end =
1389       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1390 
1391   int entries_count =
1392       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1393 
1394   emergency_stack_ = new List<Entry>(2 * entries_count);
1395 
1396   while (head_start != head_end) {
1397     int size = static_cast<int>(*(head_start++));
1398     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1399     emergency_stack_->Add(Entry(obj, size));
1400   }
1401   rear_ = head_end;
1402 }
1403 
1404 
1405 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1406  public:
ScavengeWeakObjectRetainer(Heap * heap)1407   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1408 
RetainAs(Object * object)1409   virtual Object* RetainAs(Object* object) {
1410     if (!heap_->InFromSpace(object)) {
1411       return object;
1412     }
1413 
1414     MapWord map_word = HeapObject::cast(object)->map_word();
1415     if (map_word.IsForwardingAddress()) {
1416       return map_word.ToForwardingAddress();
1417     }
1418     return NULL;
1419   }
1420 
1421  private:
1422   Heap* heap_;
1423 };
1424 
1425 
Scavenge()1426 void Heap::Scavenge() {
1427   RelocationLock relocation_lock(this);
1428 
1429 #ifdef VERIFY_HEAP
1430   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1431 #endif
1432 
1433   gc_state_ = SCAVENGE;
1434 
1435   // Implements Cheney's copying algorithm
1436   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1437 
1438   // Clear descriptor cache.
1439   isolate_->descriptor_lookup_cache()->Clear();
1440 
1441   // Used for updating survived_since_last_expansion_ at function end.
1442   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1443 
1444   CheckNewSpaceExpansionCriteria();
1445 
1446   SelectScavengingVisitorsTable();
1447 
1448   incremental_marking()->PrepareForScavenge();
1449 
1450   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1451   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1452 
1453   // Flip the semispaces.  After flipping, to space is empty, from space has
1454   // live objects.
1455   new_space_.Flip();
1456   new_space_.ResetAllocationInfo();
1457 
1458   // We need to sweep newly copied objects which can be either in the
1459   // to space or promoted to the old generation.  For to-space
1460   // objects, we treat the bottom of the to space as a queue.  Newly
1461   // copied and unswept objects lie between a 'front' mark and the
1462   // allocation pointer.
1463   //
1464   // Promoted objects can go into various old-generation spaces, and
1465   // can be allocated internally in the spaces (from the free list).
1466   // We treat the top of the to space as a queue of addresses of
1467   // promoted objects.  The addresses of newly promoted and unswept
1468   // objects lie between a 'front' mark and a 'rear' mark that is
1469   // updated as a side effect of promoting an object.
1470   //
1471   // There is guaranteed to be enough room at the top of the to space
1472   // for the addresses of promoted objects: every object promoted
1473   // frees up its size in bytes from the top of the new space, and
1474   // objects are at least one pointer in size.
1475   Address new_space_front = new_space_.ToSpaceStart();
1476   promotion_queue_.Initialize();
1477 
1478 #ifdef DEBUG
1479   store_buffer()->Clean();
1480 #endif
1481 
1482   ScavengeVisitor scavenge_visitor(this);
1483   // Copy roots.
1484   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1485 
1486   // Copy objects reachable from the old generation.
1487   {
1488     StoreBufferRebuildScope scope(this,
1489                                   store_buffer(),
1490                                   &ScavengeStoreBufferCallback);
1491     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1492   }
1493 
1494   // Copy objects reachable from simple cells by scavenging cell values
1495   // directly.
1496   HeapObjectIterator cell_iterator(cell_space_);
1497   for (HeapObject* heap_object = cell_iterator.Next();
1498        heap_object != NULL;
1499        heap_object = cell_iterator.Next()) {
1500     if (heap_object->IsCell()) {
1501       Cell* cell = Cell::cast(heap_object);
1502       Address value_address = cell->ValueAddress();
1503       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1504     }
1505   }
1506 
1507   // Copy objects reachable from global property cells by scavenging global
1508   // property cell values directly.
1509   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1510   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1511        heap_object != NULL;
1512        heap_object = js_global_property_cell_iterator.Next()) {
1513     if (heap_object->IsPropertyCell()) {
1514       PropertyCell* cell = PropertyCell::cast(heap_object);
1515       Address value_address = cell->ValueAddress();
1516       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1517       Address type_address = cell->TypeAddress();
1518       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1519     }
1520   }
1521 
1522   // Copy objects reachable from the code flushing candidates list.
1523   MarkCompactCollector* collector = mark_compact_collector();
1524   if (collector->is_code_flushing_enabled()) {
1525     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1526   }
1527 
1528   // Scavenge object reachable from the native contexts list directly.
1529   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1530 
1531   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1532 
1533   while (isolate()->global_handles()->IterateObjectGroups(
1534       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1535     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1536   }
1537   isolate()->global_handles()->RemoveObjectGroups();
1538   isolate()->global_handles()->RemoveImplicitRefGroups();
1539 
1540   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1541       &IsUnscavengedHeapObject);
1542   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1543       &scavenge_visitor);
1544   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1545 
1546   UpdateNewSpaceReferencesInExternalStringTable(
1547       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1548 
1549   promotion_queue_.Destroy();
1550 
1551   if (!FLAG_watch_ic_patching) {
1552     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1553   }
1554   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1555 
1556   ScavengeWeakObjectRetainer weak_object_retainer(this);
1557   ProcessWeakReferences(&weak_object_retainer);
1558 
1559   ASSERT(new_space_front == new_space_.top());
1560 
1561   // Set age mark.
1562   new_space_.set_age_mark(new_space_.top());
1563 
1564   new_space_.LowerInlineAllocationLimit(
1565       new_space_.inline_allocation_limit_step());
1566 
1567   // Update how much has survived scavenge.
1568   IncrementYoungSurvivorsCounter(static_cast<int>(
1569       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1570 
1571   LOG(isolate_, ResourceEvent("scavenge", "end"));
1572 
1573   gc_state_ = NOT_IN_GC;
1574 
1575   scavenges_since_last_idle_round_++;
1576 }
1577 
1578 
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1579 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1580                                                                 Object** p) {
1581   MapWord first_word = HeapObject::cast(*p)->map_word();
1582 
1583   if (!first_word.IsForwardingAddress()) {
1584     // Unreachable external string can be finalized.
1585     heap->FinalizeExternalString(String::cast(*p));
1586     return NULL;
1587   }
1588 
1589   // String is still reachable.
1590   return String::cast(first_word.ToForwardingAddress());
1591 }
1592 
1593 
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1594 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1595     ExternalStringTableUpdaterCallback updater_func) {
1596 #ifdef VERIFY_HEAP
1597   if (FLAG_verify_heap) {
1598     external_string_table_.Verify();
1599   }
1600 #endif
1601 
1602   if (external_string_table_.new_space_strings_.is_empty()) return;
1603 
1604   Object** start = &external_string_table_.new_space_strings_[0];
1605   Object** end = start + external_string_table_.new_space_strings_.length();
1606   Object** last = start;
1607 
1608   for (Object** p = start; p < end; ++p) {
1609     ASSERT(InFromSpace(*p));
1610     String* target = updater_func(this, p);
1611 
1612     if (target == NULL) continue;
1613 
1614     ASSERT(target->IsExternalString());
1615 
1616     if (InNewSpace(target)) {
1617       // String is still in new space.  Update the table entry.
1618       *last = target;
1619       ++last;
1620     } else {
1621       // String got promoted.  Move it to the old string list.
1622       external_string_table_.AddOldString(target);
1623     }
1624   }
1625 
1626   ASSERT(last <= end);
1627   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1628 }
1629 
1630 
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1631 void Heap::UpdateReferencesInExternalStringTable(
1632     ExternalStringTableUpdaterCallback updater_func) {
1633 
1634   // Update old space string references.
1635   if (external_string_table_.old_space_strings_.length() > 0) {
1636     Object** start = &external_string_table_.old_space_strings_[0];
1637     Object** end = start + external_string_table_.old_space_strings_.length();
1638     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1639   }
1640 
1641   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1642 }
1643 
1644 
1645 template <class T>
1646 struct WeakListVisitor;
1647 
1648 
1649 template <class T>
VisitWeakList(Heap * heap,Object * list,WeakObjectRetainer * retainer,bool record_slots)1650 static Object* VisitWeakList(Heap* heap,
1651                              Object* list,
1652                              WeakObjectRetainer* retainer,
1653                              bool record_slots) {
1654   Object* undefined = heap->undefined_value();
1655   Object* head = undefined;
1656   T* tail = NULL;
1657   MarkCompactCollector* collector = heap->mark_compact_collector();
1658   while (list != undefined) {
1659     // Check whether to keep the candidate in the list.
1660     T* candidate = reinterpret_cast<T*>(list);
1661     Object* retained = retainer->RetainAs(list);
1662     if (retained != NULL) {
1663       if (head == undefined) {
1664         // First element in the list.
1665         head = retained;
1666       } else {
1667         // Subsequent elements in the list.
1668         ASSERT(tail != NULL);
1669         WeakListVisitor<T>::SetWeakNext(tail, retained);
1670         if (record_slots) {
1671           Object** next_slot =
1672             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1673           collector->RecordSlot(next_slot, next_slot, retained);
1674         }
1675       }
1676       // Retained object is new tail.
1677       ASSERT(!retained->IsUndefined());
1678       candidate = reinterpret_cast<T*>(retained);
1679       tail = candidate;
1680 
1681 
1682       // tail is a live object, visit it.
1683       WeakListVisitor<T>::VisitLiveObject(
1684           heap, tail, retainer, record_slots);
1685     } else {
1686       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1687     }
1688 
1689     // Move to next element in the list.
1690     list = WeakListVisitor<T>::WeakNext(candidate);
1691   }
1692 
1693   // Terminate the list if there is one or more elements.
1694   if (tail != NULL) {
1695     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1696   }
1697   return head;
1698 }
1699 
1700 
1701 template<>
1702 struct WeakListVisitor<JSFunction> {
SetWeakNextv8::internal::WeakListVisitor1703   static void SetWeakNext(JSFunction* function, Object* next) {
1704     function->set_next_function_link(next);
1705   }
1706 
WeakNextv8::internal::WeakListVisitor1707   static Object* WeakNext(JSFunction* function) {
1708     return function->next_function_link();
1709   }
1710 
WeakNextOffsetv8::internal::WeakListVisitor1711   static int WeakNextOffset() {
1712     return JSFunction::kNextFunctionLinkOffset;
1713   }
1714 
VisitLiveObjectv8::internal::WeakListVisitor1715   static void VisitLiveObject(Heap*, JSFunction*,
1716                               WeakObjectRetainer*, bool) {
1717   }
1718 
VisitPhantomObjectv8::internal::WeakListVisitor1719   static void VisitPhantomObject(Heap*, JSFunction*) {
1720   }
1721 };
1722 
1723 
1724 template<>
1725 struct WeakListVisitor<Code> {
SetWeakNextv8::internal::WeakListVisitor1726   static void SetWeakNext(Code* code, Object* next) {
1727     code->set_next_code_link(next);
1728   }
1729 
WeakNextv8::internal::WeakListVisitor1730   static Object* WeakNext(Code* code) {
1731     return code->next_code_link();
1732   }
1733 
WeakNextOffsetv8::internal::WeakListVisitor1734   static int WeakNextOffset() {
1735     return Code::kNextCodeLinkOffset;
1736   }
1737 
VisitLiveObjectv8::internal::WeakListVisitor1738   static void VisitLiveObject(Heap*, Code*,
1739                               WeakObjectRetainer*, bool) {
1740   }
1741 
VisitPhantomObjectv8::internal::WeakListVisitor1742   static void VisitPhantomObject(Heap*, Code*) {
1743   }
1744 };
1745 
1746 
1747 template<>
1748 struct WeakListVisitor<Context> {
SetWeakNextv8::internal::WeakListVisitor1749   static void SetWeakNext(Context* context, Object* next) {
1750     context->set(Context::NEXT_CONTEXT_LINK,
1751                  next,
1752                  UPDATE_WRITE_BARRIER);
1753   }
1754 
WeakNextv8::internal::WeakListVisitor1755   static Object* WeakNext(Context* context) {
1756     return context->get(Context::NEXT_CONTEXT_LINK);
1757   }
1758 
VisitLiveObjectv8::internal::WeakListVisitor1759   static void VisitLiveObject(Heap* heap,
1760                               Context* context,
1761                               WeakObjectRetainer* retainer,
1762                               bool record_slots) {
1763     // Process the three weak lists linked off the context.
1764     DoWeakList<JSFunction>(heap, context, retainer, record_slots,
1765         Context::OPTIMIZED_FUNCTIONS_LIST);
1766     DoWeakList<Code>(heap, context, retainer, record_slots,
1767         Context::OPTIMIZED_CODE_LIST);
1768     DoWeakList<Code>(heap, context, retainer, record_slots,
1769         Context::DEOPTIMIZED_CODE_LIST);
1770   }
1771 
1772   template<class T>
DoWeakListv8::internal::WeakListVisitor1773   static void DoWeakList(Heap* heap,
1774                          Context* context,
1775                          WeakObjectRetainer* retainer,
1776                          bool record_slots,
1777                          int index) {
1778     // Visit the weak list, removing dead intermediate elements.
1779     Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
1780         record_slots);
1781 
1782     // Update the list head.
1783     context->set(index, list_head, UPDATE_WRITE_BARRIER);
1784 
1785     if (record_slots) {
1786       // Record the updated slot if necessary.
1787       Object** head_slot = HeapObject::RawField(
1788           context, FixedArray::SizeFor(index));
1789       heap->mark_compact_collector()->RecordSlot(
1790           head_slot, head_slot, list_head);
1791     }
1792   }
1793 
VisitPhantomObjectv8::internal::WeakListVisitor1794   static void VisitPhantomObject(Heap*, Context*) {
1795   }
1796 
WeakNextOffsetv8::internal::WeakListVisitor1797   static int WeakNextOffset() {
1798     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1799   }
1800 };
1801 
1802 
ProcessWeakReferences(WeakObjectRetainer * retainer)1803 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1804   // We don't record weak slots during marking or scavenges.
1805   // Instead we do it once when we complete mark-compact cycle.
1806   // Note that write barrier has no effect if we are already in the middle of
1807   // compacting mark-sweep cycle and we have to record slots manually.
1808   bool record_slots =
1809       gc_state() == MARK_COMPACT &&
1810       mark_compact_collector()->is_compacting();
1811   ProcessArrayBuffers(retainer, record_slots);
1812   ProcessNativeContexts(retainer, record_slots);
1813   // TODO(mvstanton): AllocationSites only need to be processed during
1814   // MARK_COMPACT, as they live in old space. Verify and address.
1815   ProcessAllocationSites(retainer, record_slots);
1816 }
1817 
ProcessNativeContexts(WeakObjectRetainer * retainer,bool record_slots)1818 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1819                                  bool record_slots) {
1820   Object* head =
1821       VisitWeakList<Context>(
1822           this, native_contexts_list(), retainer, record_slots);
1823   // Update the head of the list of contexts.
1824   native_contexts_list_ = head;
1825 }
1826 
1827 
1828 template<>
1829 struct WeakListVisitor<JSArrayBufferView> {
SetWeakNextv8::internal::WeakListVisitor1830   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1831     obj->set_weak_next(next);
1832   }
1833 
WeakNextv8::internal::WeakListVisitor1834   static Object* WeakNext(JSArrayBufferView* obj) {
1835     return obj->weak_next();
1836   }
1837 
VisitLiveObjectv8::internal::WeakListVisitor1838   static void VisitLiveObject(Heap*,
1839                               JSArrayBufferView* obj,
1840                               WeakObjectRetainer* retainer,
1841                               bool record_slots) {}
1842 
VisitPhantomObjectv8::internal::WeakListVisitor1843   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1844 
WeakNextOffsetv8::internal::WeakListVisitor1845   static int WeakNextOffset() {
1846     return JSArrayBufferView::kWeakNextOffset;
1847   }
1848 };
1849 
1850 
1851 template<>
1852 struct WeakListVisitor<JSArrayBuffer> {
SetWeakNextv8::internal::WeakListVisitor1853   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1854     obj->set_weak_next(next);
1855   }
1856 
WeakNextv8::internal::WeakListVisitor1857   static Object* WeakNext(JSArrayBuffer* obj) {
1858     return obj->weak_next();
1859   }
1860 
VisitLiveObjectv8::internal::WeakListVisitor1861   static void VisitLiveObject(Heap* heap,
1862                               JSArrayBuffer* array_buffer,
1863                               WeakObjectRetainer* retainer,
1864                               bool record_slots) {
1865     Object* typed_array_obj =
1866         VisitWeakList<JSArrayBufferView>(
1867             heap,
1868             array_buffer->weak_first_view(),
1869             retainer, record_slots);
1870     array_buffer->set_weak_first_view(typed_array_obj);
1871     if (typed_array_obj != heap->undefined_value() && record_slots) {
1872       Object** slot = HeapObject::RawField(
1873           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1874       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1875     }
1876   }
1877 
VisitPhantomObjectv8::internal::WeakListVisitor1878   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1879     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1880   }
1881 
WeakNextOffsetv8::internal::WeakListVisitor1882   static int WeakNextOffset() {
1883     return JSArrayBuffer::kWeakNextOffset;
1884   }
1885 };
1886 
1887 
ProcessArrayBuffers(WeakObjectRetainer * retainer,bool record_slots)1888 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1889                                bool record_slots) {
1890   Object* array_buffer_obj =
1891       VisitWeakList<JSArrayBuffer>(this,
1892                                    array_buffers_list(),
1893                                    retainer, record_slots);
1894   set_array_buffers_list(array_buffer_obj);
1895 }
1896 
1897 
TearDownArrayBuffers()1898 void Heap::TearDownArrayBuffers() {
1899   Object* undefined = undefined_value();
1900   for (Object* o = array_buffers_list(); o != undefined;) {
1901     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1902     Runtime::FreeArrayBuffer(isolate(), buffer);
1903     o = buffer->weak_next();
1904   }
1905   array_buffers_list_ = undefined;
1906 }
1907 
1908 
1909 template<>
1910 struct WeakListVisitor<AllocationSite> {
SetWeakNextv8::internal::WeakListVisitor1911   static void SetWeakNext(AllocationSite* obj, Object* next) {
1912     obj->set_weak_next(next);
1913   }
1914 
WeakNextv8::internal::WeakListVisitor1915   static Object* WeakNext(AllocationSite* obj) {
1916     return obj->weak_next();
1917   }
1918 
VisitLiveObjectv8::internal::WeakListVisitor1919   static void VisitLiveObject(Heap* heap,
1920                               AllocationSite* site,
1921                               WeakObjectRetainer* retainer,
1922                               bool record_slots) {}
1923 
VisitPhantomObjectv8::internal::WeakListVisitor1924   static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1925 
WeakNextOffsetv8::internal::WeakListVisitor1926   static int WeakNextOffset() {
1927     return AllocationSite::kWeakNextOffset;
1928   }
1929 };
1930 
1931 
ProcessAllocationSites(WeakObjectRetainer * retainer,bool record_slots)1932 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1933                                   bool record_slots) {
1934   Object* allocation_site_obj =
1935       VisitWeakList<AllocationSite>(this,
1936                                     allocation_sites_list(),
1937                                     retainer, record_slots);
1938   set_allocation_sites_list(allocation_site_obj);
1939 }
1940 
1941 
VisitExternalResources(v8::ExternalResourceVisitor * visitor)1942 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1943   DisallowHeapAllocation no_allocation;
1944 
1945   // Both the external string table and the string table may contain
1946   // external strings, but neither lists them exhaustively, nor is the
1947   // intersection set empty.  Therefore we iterate over the external string
1948   // table first, ignoring internalized strings, and then over the
1949   // internalized string table.
1950 
1951   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1952    public:
1953     explicit ExternalStringTableVisitorAdapter(
1954         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1955     virtual void VisitPointers(Object** start, Object** end) {
1956       for (Object** p = start; p < end; p++) {
1957         // Visit non-internalized external strings,
1958         // since internalized strings are listed in the string table.
1959         if (!(*p)->IsInternalizedString()) {
1960           ASSERT((*p)->IsExternalString());
1961           visitor_->VisitExternalString(Utils::ToLocal(
1962               Handle<String>(String::cast(*p))));
1963         }
1964       }
1965     }
1966    private:
1967     v8::ExternalResourceVisitor* visitor_;
1968   } external_string_table_visitor(visitor);
1969 
1970   external_string_table_.Iterate(&external_string_table_visitor);
1971 
1972   class StringTableVisitorAdapter : public ObjectVisitor {
1973    public:
1974     explicit StringTableVisitorAdapter(
1975         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1976     virtual void VisitPointers(Object** start, Object** end) {
1977       for (Object** p = start; p < end; p++) {
1978         if ((*p)->IsExternalString()) {
1979           ASSERT((*p)->IsInternalizedString());
1980           visitor_->VisitExternalString(Utils::ToLocal(
1981               Handle<String>(String::cast(*p))));
1982         }
1983       }
1984     }
1985    private:
1986     v8::ExternalResourceVisitor* visitor_;
1987   } string_table_visitor(visitor);
1988 
1989   string_table()->IterateElements(&string_table_visitor);
1990 }
1991 
1992 
1993 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1994  public:
VisitPointer(Heap * heap,Object ** p)1995   static inline void VisitPointer(Heap* heap, Object** p) {
1996     Object* object = *p;
1997     if (!heap->InNewSpace(object)) return;
1998     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1999                          reinterpret_cast<HeapObject*>(object));
2000   }
2001 };
2002 
2003 
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)2004 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
2005                          Address new_space_front) {
2006   do {
2007     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
2008     // The addresses new_space_front and new_space_.top() define a
2009     // queue of unprocessed copied objects.  Process them until the
2010     // queue is empty.
2011     while (new_space_front != new_space_.top()) {
2012       if (!NewSpacePage::IsAtEnd(new_space_front)) {
2013         HeapObject* object = HeapObject::FromAddress(new_space_front);
2014         new_space_front +=
2015           NewSpaceScavenger::IterateBody(object->map(), object);
2016       } else {
2017         new_space_front =
2018             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
2019       }
2020     }
2021 
2022     // Promote and process all the to-be-promoted objects.
2023     {
2024       StoreBufferRebuildScope scope(this,
2025                                     store_buffer(),
2026                                     &ScavengeStoreBufferCallback);
2027       while (!promotion_queue()->is_empty()) {
2028         HeapObject* target;
2029         int size;
2030         promotion_queue()->remove(&target, &size);
2031 
2032         // Promoted object might be already partially visited
2033         // during old space pointer iteration. Thus we search specificly
2034         // for pointers to from semispace instead of looking for pointers
2035         // to new space.
2036         ASSERT(!target->IsMap());
2037         IterateAndMarkPointersToFromSpace(target->address(),
2038                                           target->address() + size,
2039                                           &ScavengeObject);
2040       }
2041     }
2042 
2043     // Take another spin if there are now unswept objects in new space
2044     // (there are currently no more unswept promoted objects).
2045   } while (new_space_front != new_space_.top());
2046 
2047   return new_space_front;
2048 }
2049 
2050 
2051 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2052 STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2053 
2054 
2055 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
2056                                               HeapObject* object,
2057                                               int size));
2058 
EnsureDoubleAligned(Heap * heap,HeapObject * object,int size)2059 static HeapObject* EnsureDoubleAligned(Heap* heap,
2060                                        HeapObject* object,
2061                                        int size) {
2062   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
2063     heap->CreateFillerObjectAt(object->address(), kPointerSize);
2064     return HeapObject::FromAddress(object->address() + kPointerSize);
2065   } else {
2066     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
2067                                kPointerSize);
2068     return object;
2069   }
2070 }
2071 
2072 
2073 enum LoggingAndProfiling {
2074   LOGGING_AND_PROFILING_ENABLED,
2075   LOGGING_AND_PROFILING_DISABLED
2076 };
2077 
2078 
2079 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
2080 
2081 
2082 template<MarksHandling marks_handling,
2083          LoggingAndProfiling logging_and_profiling_mode>
2084 class ScavengingVisitor : public StaticVisitorBase {
2085  public:
Initialize()2086   static void Initialize() {
2087     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2088     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2089     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2090     table_.Register(kVisitByteArray, &EvacuateByteArray);
2091     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2092     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2093 
2094     table_.Register(kVisitNativeContext,
2095                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2096                         template VisitSpecialized<Context::kSize>);
2097 
2098     table_.Register(kVisitConsString,
2099                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2100                         template VisitSpecialized<ConsString::kSize>);
2101 
2102     table_.Register(kVisitSlicedString,
2103                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2104                         template VisitSpecialized<SlicedString::kSize>);
2105 
2106     table_.Register(kVisitSymbol,
2107                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2108                         template VisitSpecialized<Symbol::kSize>);
2109 
2110     table_.Register(kVisitSharedFunctionInfo,
2111                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2112                         template VisitSpecialized<SharedFunctionInfo::kSize>);
2113 
2114     table_.Register(kVisitJSWeakMap,
2115                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2116                     Visit);
2117 
2118     table_.Register(kVisitJSWeakSet,
2119                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2120                     Visit);
2121 
2122     table_.Register(kVisitJSArrayBuffer,
2123                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2124                     Visit);
2125 
2126     table_.Register(kVisitJSTypedArray,
2127                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2128                     Visit);
2129 
2130     table_.Register(kVisitJSDataView,
2131                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2132                     Visit);
2133 
2134     table_.Register(kVisitJSRegExp,
2135                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2136                     Visit);
2137 
2138     if (marks_handling == IGNORE_MARKS) {
2139       table_.Register(kVisitJSFunction,
2140                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
2141                           template VisitSpecialized<JSFunction::kSize>);
2142     } else {
2143       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2144     }
2145 
2146     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2147                                    kVisitDataObject,
2148                                    kVisitDataObjectGeneric>();
2149 
2150     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2151                                    kVisitJSObject,
2152                                    kVisitJSObjectGeneric>();
2153 
2154     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2155                                    kVisitStruct,
2156                                    kVisitStructGeneric>();
2157   }
2158 
GetTable()2159   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2160     return &table_;
2161   }
2162 
2163  private:
2164   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2165 
RecordCopiedObject(Heap * heap,HeapObject * obj)2166   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2167     bool should_record = false;
2168 #ifdef DEBUG
2169     should_record = FLAG_heap_stats;
2170 #endif
2171     should_record = should_record || FLAG_log_gc;
2172     if (should_record) {
2173       if (heap->new_space()->Contains(obj)) {
2174         heap->new_space()->RecordAllocation(obj);
2175       } else {
2176         heap->new_space()->RecordPromotion(obj);
2177       }
2178     }
2179   }
2180 
2181   // Helper function used by CopyObject to copy a source object to an
2182   // allocated target object and update the forwarding pointer in the source
2183   // object.  Returns the target object.
INLINE(static void MigrateObject (Heap * heap,HeapObject * source,HeapObject * target,int size))2184   INLINE(static void MigrateObject(Heap* heap,
2185                                    HeapObject* source,
2186                                    HeapObject* target,
2187                                    int size)) {
2188     // Copy the content of source to target.
2189     heap->CopyBlock(target->address(), source->address(), size);
2190 
2191     // Set the forwarding address.
2192     source->set_map_word(MapWord::FromForwardingAddress(target));
2193 
2194     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2195       // Update NewSpace stats if necessary.
2196       RecordCopiedObject(heap, target);
2197       Isolate* isolate = heap->isolate();
2198       HeapProfiler* heap_profiler = isolate->heap_profiler();
2199       if (heap_profiler->is_tracking_object_moves()) {
2200         heap_profiler->ObjectMoveEvent(source->address(), target->address(),
2201                                        size);
2202       }
2203       if (isolate->logger()->is_logging_code_events() ||
2204           isolate->cpu_profiler()->is_profiling()) {
2205         if (target->IsSharedFunctionInfo()) {
2206           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2207               source->address(), target->address()));
2208         }
2209       }
2210     }
2211 
2212     if (marks_handling == TRANSFER_MARKS) {
2213       if (Marking::TransferColor(source, target)) {
2214         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2215       }
2216     }
2217   }
2218 
2219 
2220   template<ObjectContents object_contents, int alignment>
EvacuateObject(Map * map,HeapObject ** slot,HeapObject * object,int object_size)2221   static inline void EvacuateObject(Map* map,
2222                                     HeapObject** slot,
2223                                     HeapObject* object,
2224                                     int object_size) {
2225     SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2226     SLOW_ASSERT(object->Size() == object_size);
2227 
2228     int allocation_size = object_size;
2229     if (alignment != kObjectAlignment) {
2230       ASSERT(alignment == kDoubleAlignment);
2231       allocation_size += kPointerSize;
2232     }
2233 
2234     Heap* heap = map->GetHeap();
2235     if (heap->ShouldBePromoted(object->address(), object_size)) {
2236       MaybeObject* maybe_result;
2237 
2238       if (object_contents == DATA_OBJECT) {
2239         ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2240         maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2241       } else {
2242         ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2243         maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2244       }
2245 
2246       Object* result = NULL;  // Initialization to please compiler.
2247       if (maybe_result->ToObject(&result)) {
2248         HeapObject* target = HeapObject::cast(result);
2249 
2250         if (alignment != kObjectAlignment) {
2251           target = EnsureDoubleAligned(heap, target, allocation_size);
2252         }
2253 
2254         // Order is important: slot might be inside of the target if target
2255         // was allocated over a dead object and slot comes from the store
2256         // buffer.
2257         *slot = target;
2258         MigrateObject(heap, object, target, object_size);
2259 
2260         if (object_contents == POINTER_OBJECT) {
2261           if (map->instance_type() == JS_FUNCTION_TYPE) {
2262             heap->promotion_queue()->insert(
2263                 target, JSFunction::kNonWeakFieldsEndOffset);
2264           } else {
2265             heap->promotion_queue()->insert(target, object_size);
2266           }
2267         }
2268 
2269         heap->tracer()->increment_promoted_objects_size(object_size);
2270         return;
2271       }
2272     }
2273     ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2274     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2275     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2276     Object* result = allocation->ToObjectUnchecked();
2277     HeapObject* target = HeapObject::cast(result);
2278 
2279     if (alignment != kObjectAlignment) {
2280       target = EnsureDoubleAligned(heap, target, allocation_size);
2281     }
2282 
2283     // Order is important: slot might be inside of the target if target
2284     // was allocated over a dead object and slot comes from the store
2285     // buffer.
2286     *slot = target;
2287     MigrateObject(heap, object, target, object_size);
2288     return;
2289   }
2290 
2291 
EvacuateJSFunction(Map * map,HeapObject ** slot,HeapObject * object)2292   static inline void EvacuateJSFunction(Map* map,
2293                                         HeapObject** slot,
2294                                         HeapObject* object) {
2295     ObjectEvacuationStrategy<POINTER_OBJECT>::
2296         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2297 
2298     HeapObject* target = *slot;
2299     MarkBit mark_bit = Marking::MarkBitFrom(target);
2300     if (Marking::IsBlack(mark_bit)) {
2301       // This object is black and it might not be rescanned by marker.
2302       // We should explicitly record code entry slot for compaction because
2303       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2304       // miss it as it is not HeapObject-tagged.
2305       Address code_entry_slot =
2306           target->address() + JSFunction::kCodeEntryOffset;
2307       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2308       map->GetHeap()->mark_compact_collector()->
2309           RecordCodeEntrySlot(code_entry_slot, code);
2310     }
2311   }
2312 
2313 
EvacuateFixedArray(Map * map,HeapObject ** slot,HeapObject * object)2314   static inline void EvacuateFixedArray(Map* map,
2315                                         HeapObject** slot,
2316                                         HeapObject* object) {
2317     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2318     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2319         map, slot, object, object_size);
2320   }
2321 
2322 
EvacuateFixedDoubleArray(Map * map,HeapObject ** slot,HeapObject * object)2323   static inline void EvacuateFixedDoubleArray(Map* map,
2324                                               HeapObject** slot,
2325                                               HeapObject* object) {
2326     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2327     int object_size = FixedDoubleArray::SizeFor(length);
2328     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2329         map, slot, object, object_size);
2330   }
2331 
2332 
EvacuateByteArray(Map * map,HeapObject ** slot,HeapObject * object)2333   static inline void EvacuateByteArray(Map* map,
2334                                        HeapObject** slot,
2335                                        HeapObject* object) {
2336     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2337     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2338         map, slot, object, object_size);
2339   }
2340 
2341 
EvacuateSeqOneByteString(Map * map,HeapObject ** slot,HeapObject * object)2342   static inline void EvacuateSeqOneByteString(Map* map,
2343                                             HeapObject** slot,
2344                                             HeapObject* object) {
2345     int object_size = SeqOneByteString::cast(object)->
2346         SeqOneByteStringSize(map->instance_type());
2347     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2348         map, slot, object, object_size);
2349   }
2350 
2351 
EvacuateSeqTwoByteString(Map * map,HeapObject ** slot,HeapObject * object)2352   static inline void EvacuateSeqTwoByteString(Map* map,
2353                                               HeapObject** slot,
2354                                               HeapObject* object) {
2355     int object_size = SeqTwoByteString::cast(object)->
2356         SeqTwoByteStringSize(map->instance_type());
2357     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2358         map, slot, object, object_size);
2359   }
2360 
2361 
IsShortcutCandidate(int type)2362   static inline bool IsShortcutCandidate(int type) {
2363     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2364   }
2365 
EvacuateShortcutCandidate(Map * map,HeapObject ** slot,HeapObject * object)2366   static inline void EvacuateShortcutCandidate(Map* map,
2367                                                HeapObject** slot,
2368                                                HeapObject* object) {
2369     ASSERT(IsShortcutCandidate(map->instance_type()));
2370 
2371     Heap* heap = map->GetHeap();
2372 
2373     if (marks_handling == IGNORE_MARKS &&
2374         ConsString::cast(object)->unchecked_second() ==
2375         heap->empty_string()) {
2376       HeapObject* first =
2377           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2378 
2379       *slot = first;
2380 
2381       if (!heap->InNewSpace(first)) {
2382         object->set_map_word(MapWord::FromForwardingAddress(first));
2383         return;
2384       }
2385 
2386       MapWord first_word = first->map_word();
2387       if (first_word.IsForwardingAddress()) {
2388         HeapObject* target = first_word.ToForwardingAddress();
2389 
2390         *slot = target;
2391         object->set_map_word(MapWord::FromForwardingAddress(target));
2392         return;
2393       }
2394 
2395       heap->DoScavengeObject(first->map(), slot, first);
2396       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2397       return;
2398     }
2399 
2400     int object_size = ConsString::kSize;
2401     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2402         map, slot, object, object_size);
2403   }
2404 
2405   template<ObjectContents object_contents>
2406   class ObjectEvacuationStrategy {
2407    public:
2408     template<int object_size>
VisitSpecialized(Map * map,HeapObject ** slot,HeapObject * object)2409     static inline void VisitSpecialized(Map* map,
2410                                         HeapObject** slot,
2411                                         HeapObject* object) {
2412       EvacuateObject<object_contents, kObjectAlignment>(
2413           map, slot, object, object_size);
2414     }
2415 
Visit(Map * map,HeapObject ** slot,HeapObject * object)2416     static inline void Visit(Map* map,
2417                              HeapObject** slot,
2418                              HeapObject* object) {
2419       int object_size = map->instance_size();
2420       EvacuateObject<object_contents, kObjectAlignment>(
2421           map, slot, object, object_size);
2422     }
2423   };
2424 
2425   static VisitorDispatchTable<ScavengingCallback> table_;
2426 };
2427 
2428 
2429 template<MarksHandling marks_handling,
2430          LoggingAndProfiling logging_and_profiling_mode>
2431 VisitorDispatchTable<ScavengingCallback>
2432     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2433 
2434 
InitializeScavengingVisitorsTables()2435 static void InitializeScavengingVisitorsTables() {
2436   ScavengingVisitor<TRANSFER_MARKS,
2437                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2438   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2439   ScavengingVisitor<TRANSFER_MARKS,
2440                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2441   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2442 }
2443 
2444 
SelectScavengingVisitorsTable()2445 void Heap::SelectScavengingVisitorsTable() {
2446   bool logging_and_profiling =
2447       isolate()->logger()->is_logging() ||
2448       isolate()->cpu_profiler()->is_profiling() ||
2449       (isolate()->heap_profiler() != NULL &&
2450        isolate()->heap_profiler()->is_tracking_object_moves());
2451 
2452   if (!incremental_marking()->IsMarking()) {
2453     if (!logging_and_profiling) {
2454       scavenging_visitors_table_.CopyFrom(
2455           ScavengingVisitor<IGNORE_MARKS,
2456                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2457     } else {
2458       scavenging_visitors_table_.CopyFrom(
2459           ScavengingVisitor<IGNORE_MARKS,
2460                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2461     }
2462   } else {
2463     if (!logging_and_profiling) {
2464       scavenging_visitors_table_.CopyFrom(
2465           ScavengingVisitor<TRANSFER_MARKS,
2466                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2467     } else {
2468       scavenging_visitors_table_.CopyFrom(
2469           ScavengingVisitor<TRANSFER_MARKS,
2470                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2471     }
2472 
2473     if (incremental_marking()->IsCompacting()) {
2474       // When compacting forbid short-circuiting of cons-strings.
2475       // Scavenging code relies on the fact that new space object
2476       // can't be evacuated into evacuation candidate but
2477       // short-circuiting violates this assumption.
2478       scavenging_visitors_table_.Register(
2479           StaticVisitorBase::kVisitShortcutCandidate,
2480           scavenging_visitors_table_.GetVisitorById(
2481               StaticVisitorBase::kVisitConsString));
2482     }
2483   }
2484 }
2485 
2486 
ScavengeObjectSlow(HeapObject ** p,HeapObject * object)2487 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2488   SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2489   MapWord first_word = object->map_word();
2490   SLOW_ASSERT(!first_word.IsForwardingAddress());
2491   Map* map = first_word.ToMap();
2492   map->GetHeap()->DoScavengeObject(map, p, object);
2493 }
2494 
2495 
AllocatePartialMap(InstanceType instance_type,int instance_size)2496 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2497                                       int instance_size) {
2498   Object* result;
2499   MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2500   if (!maybe_result->ToObject(&result)) return maybe_result;
2501 
2502   // Map::cast cannot be used due to uninitialized map field.
2503   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2504   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2505   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2506   reinterpret_cast<Map*>(result)->set_visitor_id(
2507         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2508   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2509   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2510   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2511   reinterpret_cast<Map*>(result)->set_bit_field(0);
2512   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2513   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2514                    Map::OwnsDescriptors::encode(true);
2515   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2516   return result;
2517 }
2518 
2519 
AllocateMap(InstanceType instance_type,int instance_size,ElementsKind elements_kind)2520 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2521                                int instance_size,
2522                                ElementsKind elements_kind) {
2523   Object* result;
2524   MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2525   if (!maybe_result->To(&result)) return maybe_result;
2526 
2527   Map* map = reinterpret_cast<Map*>(result);
2528   map->set_map_no_write_barrier(meta_map());
2529   map->set_instance_type(instance_type);
2530   map->set_visitor_id(
2531       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2532   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2533   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2534   map->set_instance_size(instance_size);
2535   map->set_inobject_properties(0);
2536   map->set_pre_allocated_property_fields(0);
2537   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2538   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2539                           SKIP_WRITE_BARRIER);
2540   map->init_back_pointer(undefined_value());
2541   map->set_unused_property_fields(0);
2542   map->set_instance_descriptors(empty_descriptor_array());
2543   map->set_bit_field(0);
2544   map->set_bit_field2(1 << Map::kIsExtensible);
2545   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2546                    Map::OwnsDescriptors::encode(true);
2547   map->set_bit_field3(bit_field3);
2548   map->set_elements_kind(elements_kind);
2549 
2550   return map;
2551 }
2552 
2553 
AllocateCodeCache()2554 MaybeObject* Heap::AllocateCodeCache() {
2555   CodeCache* code_cache;
2556   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2557     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2558   }
2559   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2560   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2561   return code_cache;
2562 }
2563 
2564 
AllocatePolymorphicCodeCache()2565 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2566   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2567 }
2568 
2569 
AllocateAccessorPair()2570 MaybeObject* Heap::AllocateAccessorPair() {
2571   AccessorPair* accessors;
2572   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2573     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2574   }
2575   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2576   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2577   accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
2578   return accessors;
2579 }
2580 
2581 
AllocateTypeFeedbackInfo()2582 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2583   TypeFeedbackInfo* info;
2584   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2585     if (!maybe_info->To(&info)) return maybe_info;
2586   }
2587   info->initialize_storage();
2588   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2589                                 SKIP_WRITE_BARRIER);
2590   return info;
2591 }
2592 
2593 
AllocateAliasedArgumentsEntry(int aliased_context_slot)2594 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2595   AliasedArgumentsEntry* entry;
2596   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2597     if (!maybe_entry->To(&entry)) return maybe_entry;
2598   }
2599   entry->set_aliased_context_slot(aliased_context_slot);
2600   return entry;
2601 }
2602 
2603 
2604 const Heap::StringTypeTable Heap::string_type_table[] = {
2605 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2606   {type, size, k##camel_name##MapRootIndex},
2607   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2608 #undef STRING_TYPE_ELEMENT
2609 };
2610 
2611 
2612 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2613 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2614   {contents, k##name##RootIndex},
2615   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2616 #undef CONSTANT_STRING_ELEMENT
2617 };
2618 
2619 
2620 const Heap::StructTable Heap::struct_table[] = {
2621 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2622   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2623   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2624 #undef STRUCT_TABLE_ELEMENT
2625 };
2626 
2627 
CreateInitialMaps()2628 bool Heap::CreateInitialMaps() {
2629   Object* obj;
2630   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2631     if (!maybe_obj->ToObject(&obj)) return false;
2632   }
2633   // Map::cast cannot be used due to uninitialized map field.
2634   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2635   set_meta_map(new_meta_map);
2636   new_meta_map->set_map(new_meta_map);
2637 
2638   { MaybeObject* maybe_obj =
2639         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2640     if (!maybe_obj->ToObject(&obj)) return false;
2641   }
2642   set_fixed_array_map(Map::cast(obj));
2643 
2644   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2645     if (!maybe_obj->ToObject(&obj)) return false;
2646   }
2647   set_oddball_map(Map::cast(obj));
2648 
2649   // Allocate the empty array.
2650   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2651     if (!maybe_obj->ToObject(&obj)) return false;
2652   }
2653   set_empty_fixed_array(FixedArray::cast(obj));
2654 
2655   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2656     if (!maybe_obj->ToObject(&obj)) return false;
2657   }
2658   set_null_value(Oddball::cast(obj));
2659   Oddball::cast(obj)->set_kind(Oddball::kNull);
2660 
2661   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2662     if (!maybe_obj->ToObject(&obj)) return false;
2663   }
2664   set_undefined_value(Oddball::cast(obj));
2665   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2666   ASSERT(!InNewSpace(undefined_value()));
2667 
2668   // Allocate the empty descriptor array.
2669   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2670     if (!maybe_obj->ToObject(&obj)) return false;
2671   }
2672   set_empty_descriptor_array(DescriptorArray::cast(obj));
2673 
2674   // Fix the instance_descriptors for the existing maps.
2675   meta_map()->set_code_cache(empty_fixed_array());
2676   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2677   meta_map()->init_back_pointer(undefined_value());
2678   meta_map()->set_instance_descriptors(empty_descriptor_array());
2679 
2680   fixed_array_map()->set_code_cache(empty_fixed_array());
2681   fixed_array_map()->set_dependent_code(
2682       DependentCode::cast(empty_fixed_array()));
2683   fixed_array_map()->init_back_pointer(undefined_value());
2684   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2685 
2686   oddball_map()->set_code_cache(empty_fixed_array());
2687   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2688   oddball_map()->init_back_pointer(undefined_value());
2689   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2690 
2691   // Fix prototype object for existing maps.
2692   meta_map()->set_prototype(null_value());
2693   meta_map()->set_constructor(null_value());
2694 
2695   fixed_array_map()->set_prototype(null_value());
2696   fixed_array_map()->set_constructor(null_value());
2697 
2698   oddball_map()->set_prototype(null_value());
2699   oddball_map()->set_constructor(null_value());
2700 
2701   { MaybeObject* maybe_obj =
2702         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2703     if (!maybe_obj->ToObject(&obj)) return false;
2704   }
2705   set_fixed_cow_array_map(Map::cast(obj));
2706   ASSERT(fixed_array_map() != fixed_cow_array_map());
2707 
2708   { MaybeObject* maybe_obj =
2709         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2710     if (!maybe_obj->ToObject(&obj)) return false;
2711   }
2712   set_scope_info_map(Map::cast(obj));
2713 
2714   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2715     if (!maybe_obj->ToObject(&obj)) return false;
2716   }
2717   set_heap_number_map(Map::cast(obj));
2718 
2719   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2720     if (!maybe_obj->ToObject(&obj)) return false;
2721   }
2722   set_symbol_map(Map::cast(obj));
2723 
2724   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2725     if (!maybe_obj->ToObject(&obj)) return false;
2726   }
2727   set_foreign_map(Map::cast(obj));
2728 
2729   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2730     const StringTypeTable& entry = string_type_table[i];
2731     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2732       if (!maybe_obj->ToObject(&obj)) return false;
2733     }
2734     roots_[entry.index] = Map::cast(obj);
2735   }
2736 
2737   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2738     if (!maybe_obj->ToObject(&obj)) return false;
2739   }
2740   set_undetectable_string_map(Map::cast(obj));
2741   Map::cast(obj)->set_is_undetectable();
2742 
2743   { MaybeObject* maybe_obj =
2744         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2745     if (!maybe_obj->ToObject(&obj)) return false;
2746   }
2747   set_undetectable_ascii_string_map(Map::cast(obj));
2748   Map::cast(obj)->set_is_undetectable();
2749 
2750   { MaybeObject* maybe_obj =
2751         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2752     if (!maybe_obj->ToObject(&obj)) return false;
2753   }
2754   set_fixed_double_array_map(Map::cast(obj));
2755 
2756   { MaybeObject* maybe_obj =
2757         AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
2758     if (!maybe_obj->ToObject(&obj)) return false;
2759   }
2760   set_constant_pool_array_map(Map::cast(obj));
2761 
2762   { MaybeObject* maybe_obj =
2763         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2764     if (!maybe_obj->ToObject(&obj)) return false;
2765   }
2766   set_byte_array_map(Map::cast(obj));
2767 
2768   { MaybeObject* maybe_obj =
2769         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2770     if (!maybe_obj->ToObject(&obj)) return false;
2771   }
2772   set_free_space_map(Map::cast(obj));
2773 
2774   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2775     if (!maybe_obj->ToObject(&obj)) return false;
2776   }
2777   set_empty_byte_array(ByteArray::cast(obj));
2778 
2779   { MaybeObject* maybe_obj =
2780         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2781     if (!maybe_obj->ToObject(&obj)) return false;
2782   }
2783   set_external_pixel_array_map(Map::cast(obj));
2784 
2785   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2786                                          ExternalArray::kAlignedSize);
2787     if (!maybe_obj->ToObject(&obj)) return false;
2788   }
2789   set_external_byte_array_map(Map::cast(obj));
2790 
2791   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2792                                          ExternalArray::kAlignedSize);
2793     if (!maybe_obj->ToObject(&obj)) return false;
2794   }
2795   set_external_unsigned_byte_array_map(Map::cast(obj));
2796 
2797   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2798                                          ExternalArray::kAlignedSize);
2799     if (!maybe_obj->ToObject(&obj)) return false;
2800   }
2801   set_external_short_array_map(Map::cast(obj));
2802 
2803   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2804                                          ExternalArray::kAlignedSize);
2805     if (!maybe_obj->ToObject(&obj)) return false;
2806   }
2807   set_external_unsigned_short_array_map(Map::cast(obj));
2808 
2809   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2810                                          ExternalArray::kAlignedSize);
2811     if (!maybe_obj->ToObject(&obj)) return false;
2812   }
2813   set_external_int_array_map(Map::cast(obj));
2814 
2815   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2816                                          ExternalArray::kAlignedSize);
2817     if (!maybe_obj->ToObject(&obj)) return false;
2818   }
2819   set_external_unsigned_int_array_map(Map::cast(obj));
2820 
2821   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2822                                          ExternalArray::kAlignedSize);
2823     if (!maybe_obj->ToObject(&obj)) return false;
2824   }
2825   set_external_float_array_map(Map::cast(obj));
2826 
2827   { MaybeObject* maybe_obj =
2828         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2829     if (!maybe_obj->ToObject(&obj)) return false;
2830   }
2831   set_non_strict_arguments_elements_map(Map::cast(obj));
2832 
2833   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2834                                          ExternalArray::kAlignedSize);
2835     if (!maybe_obj->ToObject(&obj)) return false;
2836   }
2837   set_external_double_array_map(Map::cast(obj));
2838 
2839   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2840     if (!maybe_obj->ToObject(&obj)) return false;
2841   }
2842   set_empty_external_byte_array(ExternalArray::cast(obj));
2843 
2844   { MaybeObject* maybe_obj =
2845         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2846     if (!maybe_obj->ToObject(&obj)) return false;
2847   }
2848   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2849 
2850   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2851     if (!maybe_obj->ToObject(&obj)) return false;
2852   }
2853   set_empty_external_short_array(ExternalArray::cast(obj));
2854 
2855   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2856       kExternalUnsignedShortArray);
2857     if (!maybe_obj->ToObject(&obj)) return false;
2858   }
2859   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2860 
2861   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2862     if (!maybe_obj->ToObject(&obj)) return false;
2863   }
2864   set_empty_external_int_array(ExternalArray::cast(obj));
2865 
2866   { MaybeObject* maybe_obj =
2867         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2868     if (!maybe_obj->ToObject(&obj)) return false;
2869   }
2870   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2871 
2872   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2873     if (!maybe_obj->ToObject(&obj)) return false;
2874   }
2875   set_empty_external_float_array(ExternalArray::cast(obj));
2876 
2877   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2878     if (!maybe_obj->ToObject(&obj)) return false;
2879   }
2880   set_empty_external_double_array(ExternalArray::cast(obj));
2881 
2882   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2883     if (!maybe_obj->ToObject(&obj)) return false;
2884   }
2885   set_empty_external_pixel_array(ExternalArray::cast(obj));
2886 
2887   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2888     if (!maybe_obj->ToObject(&obj)) return false;
2889   }
2890   set_code_map(Map::cast(obj));
2891 
2892   { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2893     if (!maybe_obj->ToObject(&obj)) return false;
2894   }
2895   set_cell_map(Map::cast(obj));
2896 
2897   { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2898                                          PropertyCell::kSize);
2899     if (!maybe_obj->ToObject(&obj)) return false;
2900   }
2901   set_global_property_cell_map(Map::cast(obj));
2902 
2903   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2904     if (!maybe_obj->ToObject(&obj)) return false;
2905   }
2906   set_one_pointer_filler_map(Map::cast(obj));
2907 
2908   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2909     if (!maybe_obj->ToObject(&obj)) return false;
2910   }
2911   set_two_pointer_filler_map(Map::cast(obj));
2912 
2913   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2914     const StructTable& entry = struct_table[i];
2915     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2916       if (!maybe_obj->ToObject(&obj)) return false;
2917     }
2918     roots_[entry.index] = Map::cast(obj);
2919   }
2920 
2921   { MaybeObject* maybe_obj =
2922         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2923     if (!maybe_obj->ToObject(&obj)) return false;
2924   }
2925   set_hash_table_map(Map::cast(obj));
2926 
2927   { MaybeObject* maybe_obj =
2928         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2929     if (!maybe_obj->ToObject(&obj)) return false;
2930   }
2931   set_function_context_map(Map::cast(obj));
2932 
2933   { MaybeObject* maybe_obj =
2934         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2935     if (!maybe_obj->ToObject(&obj)) return false;
2936   }
2937   set_catch_context_map(Map::cast(obj));
2938 
2939   { MaybeObject* maybe_obj =
2940         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2941     if (!maybe_obj->ToObject(&obj)) return false;
2942   }
2943   set_with_context_map(Map::cast(obj));
2944 
2945   { MaybeObject* maybe_obj =
2946         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2947     if (!maybe_obj->ToObject(&obj)) return false;
2948   }
2949   set_block_context_map(Map::cast(obj));
2950 
2951   { MaybeObject* maybe_obj =
2952         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2953     if (!maybe_obj->ToObject(&obj)) return false;
2954   }
2955   set_module_context_map(Map::cast(obj));
2956 
2957   { MaybeObject* maybe_obj =
2958         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2959     if (!maybe_obj->ToObject(&obj)) return false;
2960   }
2961   set_global_context_map(Map::cast(obj));
2962 
2963   { MaybeObject* maybe_obj =
2964         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2965     if (!maybe_obj->ToObject(&obj)) return false;
2966   }
2967   Map* native_context_map = Map::cast(obj);
2968   native_context_map->set_dictionary_map(true);
2969   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2970   set_native_context_map(native_context_map);
2971 
2972   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2973                                          SharedFunctionInfo::kAlignedSize);
2974     if (!maybe_obj->ToObject(&obj)) return false;
2975   }
2976   set_shared_function_info_map(Map::cast(obj));
2977 
2978   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2979                                          JSMessageObject::kSize);
2980     if (!maybe_obj->ToObject(&obj)) return false;
2981   }
2982   set_message_object_map(Map::cast(obj));
2983 
2984   Map* external_map;
2985   { MaybeObject* maybe_obj =
2986         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2987     if (!maybe_obj->To(&external_map)) return false;
2988   }
2989   external_map->set_is_extensible(false);
2990   set_external_map(external_map);
2991 
2992   ASSERT(!InNewSpace(empty_fixed_array()));
2993   return true;
2994 }
2995 
2996 
AllocateHeapNumber(double value,PretenureFlag pretenure)2997 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2998   // Statically ensure that it is safe to allocate heap numbers in paged
2999   // spaces.
3000   int size = HeapNumber::kSize;
3001   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
3002   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3003 
3004   Object* result;
3005   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3006     if (!maybe_result->ToObject(&result)) return maybe_result;
3007   }
3008 
3009   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
3010   HeapNumber::cast(result)->set_value(value);
3011   return result;
3012 }
3013 
3014 
AllocateCell(Object * value)3015 MaybeObject* Heap::AllocateCell(Object* value) {
3016   int size = Cell::kSize;
3017   STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
3018 
3019   Object* result;
3020   { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
3021     if (!maybe_result->ToObject(&result)) return maybe_result;
3022   }
3023   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
3024   Cell::cast(result)->set_value(value);
3025   return result;
3026 }
3027 
3028 
AllocatePropertyCell()3029 MaybeObject* Heap::AllocatePropertyCell() {
3030   int size = PropertyCell::kSize;
3031   STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
3032 
3033   Object* result;
3034   MaybeObject* maybe_result =
3035       AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
3036   if (!maybe_result->ToObject(&result)) return maybe_result;
3037 
3038   HeapObject::cast(result)->set_map_no_write_barrier(
3039       global_property_cell_map());
3040   PropertyCell* cell = PropertyCell::cast(result);
3041   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
3042                            SKIP_WRITE_BARRIER);
3043   cell->set_value(the_hole_value());
3044   cell->set_type(Type::None());
3045   return result;
3046 }
3047 
3048 
AllocateBox(Object * value,PretenureFlag pretenure)3049 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
3050   Box* result;
3051   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
3052   if (!maybe_result->To(&result)) return maybe_result;
3053   result->set_value(value);
3054   return result;
3055 }
3056 
3057 
AllocateAllocationSite()3058 MaybeObject* Heap::AllocateAllocationSite() {
3059   AllocationSite* site;
3060   MaybeObject* maybe_result = Allocate(allocation_site_map(),
3061                                        OLD_POINTER_SPACE);
3062   if (!maybe_result->To(&site)) return maybe_result;
3063   site->Initialize();
3064 
3065   // Link the site
3066   site->set_weak_next(allocation_sites_list());
3067   set_allocation_sites_list(site);
3068   return site;
3069 }
3070 
3071 
CreateOddball(const char * to_string,Object * to_number,byte kind)3072 MaybeObject* Heap::CreateOddball(const char* to_string,
3073                                  Object* to_number,
3074                                  byte kind) {
3075   Object* result;
3076   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
3077     if (!maybe_result->ToObject(&result)) return maybe_result;
3078   }
3079   return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
3080 }
3081 
3082 
CreateApiObjects()3083 bool Heap::CreateApiObjects() {
3084   Object* obj;
3085 
3086   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3087     if (!maybe_obj->ToObject(&obj)) return false;
3088   }
3089   // Don't use Smi-only elements optimizations for objects with the neander
3090   // map. There are too many cases where element values are set directly with a
3091   // bottleneck to trap the Smi-only -> fast elements transition, and there
3092   // appears to be no benefit for optimize this case.
3093   Map* new_neander_map = Map::cast(obj);
3094   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
3095   set_neander_map(new_neander_map);
3096 
3097   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
3098     if (!maybe_obj->ToObject(&obj)) return false;
3099   }
3100   Object* elements;
3101   { MaybeObject* maybe_elements = AllocateFixedArray(2);
3102     if (!maybe_elements->ToObject(&elements)) return false;
3103   }
3104   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
3105   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
3106   set_message_listeners(JSObject::cast(obj));
3107 
3108   return true;
3109 }
3110 
3111 
CreateJSEntryStub()3112 void Heap::CreateJSEntryStub() {
3113   JSEntryStub stub;
3114   set_js_entry_code(*stub.GetCode(isolate()));
3115 }
3116 
3117 
CreateJSConstructEntryStub()3118 void Heap::CreateJSConstructEntryStub() {
3119   JSConstructEntryStub stub;
3120   set_js_construct_entry_code(*stub.GetCode(isolate()));
3121 }
3122 
3123 
CreateFixedStubs()3124 void Heap::CreateFixedStubs() {
3125   // Here we create roots for fixed stubs. They are needed at GC
3126   // for cooking and uncooking (check out frames.cc).
3127   // The eliminates the need for doing dictionary lookup in the
3128   // stub cache for these stubs.
3129   HandleScope scope(isolate());
3130   // gcc-4.4 has problem generating correct code of following snippet:
3131   // {  JSEntryStub stub;
3132   //    js_entry_code_ = *stub.GetCode();
3133   // }
3134   // {  JSConstructEntryStub stub;
3135   //    js_construct_entry_code_ = *stub.GetCode();
3136   // }
3137   // To workaround the problem, make separate functions without inlining.
3138   Heap::CreateJSEntryStub();
3139   Heap::CreateJSConstructEntryStub();
3140 
3141   // Create stubs that should be there, so we don't unexpectedly have to
3142   // create them if we need them during the creation of another stub.
3143   // Stub creation mixes raw pointers and handles in an unsafe manner so
3144   // we cannot create stubs while we are creating stubs.
3145   CodeStub::GenerateStubsAheadOfTime(isolate());
3146 }
3147 
3148 
CreateStubsRequiringBuiltins()3149 void Heap::CreateStubsRequiringBuiltins() {
3150   HandleScope scope(isolate());
3151   CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(isolate());
3152 }
3153 
3154 
CreateInitialObjects()3155 bool Heap::CreateInitialObjects() {
3156   Object* obj;
3157 
3158   // The -0 value must be set before NumberFromDouble works.
3159   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3160     if (!maybe_obj->ToObject(&obj)) return false;
3161   }
3162   set_minus_zero_value(HeapNumber::cast(obj));
3163   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3164 
3165   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3166     if (!maybe_obj->ToObject(&obj)) return false;
3167   }
3168   set_nan_value(HeapNumber::cast(obj));
3169 
3170   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3171     if (!maybe_obj->ToObject(&obj)) return false;
3172   }
3173   set_infinity_value(HeapNumber::cast(obj));
3174 
3175   // The hole has not been created yet, but we want to put something
3176   // predictable in the gaps in the string table, so lets make that Smi zero.
3177   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3178 
3179   // Allocate initial string table.
3180   { MaybeObject* maybe_obj =
3181         StringTable::Allocate(this, kInitialStringTableSize);
3182     if (!maybe_obj->ToObject(&obj)) return false;
3183   }
3184   // Don't use set_string_table() due to asserts.
3185   roots_[kStringTableRootIndex] = obj;
3186 
3187   // Finish initializing oddballs after creating the string table.
3188   { MaybeObject* maybe_obj =
3189         undefined_value()->Initialize(this,
3190                                       "undefined",
3191                                       nan_value(),
3192                                       Oddball::kUndefined);
3193     if (!maybe_obj->ToObject(&obj)) return false;
3194   }
3195 
3196   // Initialize the null_value.
3197   { MaybeObject* maybe_obj = null_value()->Initialize(
3198       this, "null", Smi::FromInt(0), Oddball::kNull);
3199     if (!maybe_obj->ToObject(&obj)) return false;
3200   }
3201 
3202   { MaybeObject* maybe_obj = CreateOddball("true",
3203                                            Smi::FromInt(1),
3204                                            Oddball::kTrue);
3205     if (!maybe_obj->ToObject(&obj)) return false;
3206   }
3207   set_true_value(Oddball::cast(obj));
3208 
3209   { MaybeObject* maybe_obj = CreateOddball("false",
3210                                            Smi::FromInt(0),
3211                                            Oddball::kFalse);
3212     if (!maybe_obj->ToObject(&obj)) return false;
3213   }
3214   set_false_value(Oddball::cast(obj));
3215 
3216   { MaybeObject* maybe_obj = CreateOddball("hole",
3217                                            Smi::FromInt(-1),
3218                                            Oddball::kTheHole);
3219     if (!maybe_obj->ToObject(&obj)) return false;
3220   }
3221   set_the_hole_value(Oddball::cast(obj));
3222 
3223   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3224                                            Smi::FromInt(-1),
3225                                            Oddball::kUninitialized);
3226     if (!maybe_obj->ToObject(&obj)) return false;
3227   }
3228   set_uninitialized_value(Oddball::cast(obj));
3229 
3230   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3231                                            Smi::FromInt(-4),
3232                                            Oddball::kArgumentMarker);
3233     if (!maybe_obj->ToObject(&obj)) return false;
3234   }
3235   set_arguments_marker(Oddball::cast(obj));
3236 
3237   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3238                                            Smi::FromInt(-2),
3239                                            Oddball::kOther);
3240     if (!maybe_obj->ToObject(&obj)) return false;
3241   }
3242   set_no_interceptor_result_sentinel(obj);
3243 
3244   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3245                                            Smi::FromInt(-3),
3246                                            Oddball::kOther);
3247     if (!maybe_obj->ToObject(&obj)) return false;
3248   }
3249   set_termination_exception(obj);
3250 
3251   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3252     { MaybeObject* maybe_obj =
3253           InternalizeUtf8String(constant_string_table[i].contents);
3254       if (!maybe_obj->ToObject(&obj)) return false;
3255     }
3256     roots_[constant_string_table[i].index] = String::cast(obj);
3257   }
3258 
3259   // Allocate the hidden string which is used to identify the hidden properties
3260   // in JSObjects. The hash code has a special value so that it will not match
3261   // the empty string when searching for the property. It cannot be part of the
3262   // loop above because it needs to be allocated manually with the special
3263   // hash code in place. The hash code for the hidden_string is zero to ensure
3264   // that it will always be at the first entry in property descriptors.
3265   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3266       OneByteVector("", 0), String::kEmptyStringHash);
3267     if (!maybe_obj->ToObject(&obj)) return false;
3268   }
3269   hidden_string_ = String::cast(obj);
3270 
3271   // Allocate the code_stubs dictionary. The initial size is set to avoid
3272   // expanding the dictionary during bootstrapping.
3273   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3274     if (!maybe_obj->ToObject(&obj)) return false;
3275   }
3276   set_code_stubs(UnseededNumberDictionary::cast(obj));
3277 
3278 
3279   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3280   // is set to avoid expanding the dictionary during bootstrapping.
3281   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3282     if (!maybe_obj->ToObject(&obj)) return false;
3283   }
3284   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3285 
3286   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3287     if (!maybe_obj->ToObject(&obj)) return false;
3288   }
3289   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3290 
3291   set_instanceof_cache_function(Smi::FromInt(0));
3292   set_instanceof_cache_map(Smi::FromInt(0));
3293   set_instanceof_cache_answer(Smi::FromInt(0));
3294 
3295   CreateFixedStubs();
3296 
3297   // Allocate the dictionary of intrinsic function names.
3298   { MaybeObject* maybe_obj =
3299         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3300     if (!maybe_obj->ToObject(&obj)) return false;
3301   }
3302   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3303                                                                        obj);
3304     if (!maybe_obj->ToObject(&obj)) return false;
3305   }
3306   set_intrinsic_function_names(NameDictionary::cast(obj));
3307 
3308   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3309     if (!maybe_obj->ToObject(&obj)) return false;
3310   }
3311   set_number_string_cache(FixedArray::cast(obj));
3312 
3313   // Allocate cache for single character one byte strings.
3314   { MaybeObject* maybe_obj =
3315         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3316     if (!maybe_obj->ToObject(&obj)) return false;
3317   }
3318   set_single_character_string_cache(FixedArray::cast(obj));
3319 
3320   // Allocate cache for string split.
3321   { MaybeObject* maybe_obj = AllocateFixedArray(
3322       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3323     if (!maybe_obj->ToObject(&obj)) return false;
3324   }
3325   set_string_split_cache(FixedArray::cast(obj));
3326 
3327   { MaybeObject* maybe_obj = AllocateFixedArray(
3328       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3329     if (!maybe_obj->ToObject(&obj)) return false;
3330   }
3331   set_regexp_multiple_cache(FixedArray::cast(obj));
3332 
3333   // Allocate cache for external strings pointing to native source code.
3334   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3335     if (!maybe_obj->ToObject(&obj)) return false;
3336   }
3337   set_natives_source_cache(FixedArray::cast(obj));
3338 
3339   // Allocate object to hold object observation state.
3340   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3341     if (!maybe_obj->ToObject(&obj)) return false;
3342   }
3343   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3344     if (!maybe_obj->ToObject(&obj)) return false;
3345   }
3346   set_observation_state(JSObject::cast(obj));
3347 
3348   { MaybeObject* maybe_obj = AllocateSymbol();
3349     if (!maybe_obj->ToObject(&obj)) return false;
3350   }
3351   Symbol::cast(obj)->set_is_private(true);
3352   set_frozen_symbol(Symbol::cast(obj));
3353 
3354   { MaybeObject* maybe_obj = AllocateSymbol();
3355     if (!maybe_obj->ToObject(&obj)) return false;
3356   }
3357   Symbol::cast(obj)->set_is_private(true);
3358   set_elements_transition_symbol(Symbol::cast(obj));
3359 
3360   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3361     if (!maybe_obj->ToObject(&obj)) return false;
3362   }
3363   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3364   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3365 
3366   { MaybeObject* maybe_obj = AllocateSymbol();
3367     if (!maybe_obj->ToObject(&obj)) return false;
3368   }
3369   Symbol::cast(obj)->set_is_private(true);
3370   set_observed_symbol(Symbol::cast(obj));
3371 
3372   // Handling of script id generation is in Factory::NewScript.
3373   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3374 
3375   // Initialize keyed lookup cache.
3376   isolate_->keyed_lookup_cache()->Clear();
3377 
3378   // Initialize context slot cache.
3379   isolate_->context_slot_cache()->Clear();
3380 
3381   // Initialize descriptor cache.
3382   isolate_->descriptor_lookup_cache()->Clear();
3383 
3384   // Initialize compilation cache.
3385   isolate_->compilation_cache()->Clear();
3386 
3387   return true;
3388 }
3389 
3390 
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)3391 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3392   RootListIndex writable_roots[] = {
3393     kStoreBufferTopRootIndex,
3394     kStackLimitRootIndex,
3395     kNumberStringCacheRootIndex,
3396     kInstanceofCacheFunctionRootIndex,
3397     kInstanceofCacheMapRootIndex,
3398     kInstanceofCacheAnswerRootIndex,
3399     kCodeStubsRootIndex,
3400     kNonMonomorphicCacheRootIndex,
3401     kPolymorphicCodeCacheRootIndex,
3402     kLastScriptIdRootIndex,
3403     kEmptyScriptRootIndex,
3404     kRealStackLimitRootIndex,
3405     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3406     kConstructStubDeoptPCOffsetRootIndex,
3407     kGetterStubDeoptPCOffsetRootIndex,
3408     kSetterStubDeoptPCOffsetRootIndex,
3409     kStringTableRootIndex,
3410   };
3411 
3412   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3413     if (root_index == writable_roots[i])
3414       return true;
3415   }
3416   return false;
3417 }
3418 
3419 
RootCanBeTreatedAsConstant(RootListIndex root_index)3420 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3421   return !RootCanBeWrittenAfterInitialization(root_index) &&
3422       !InNewSpace(roots_array_start()[root_index]);
3423 }
3424 
3425 
Lookup(Heap * heap,String * key_string,Object * key_pattern,ResultsCacheType type)3426 Object* RegExpResultsCache::Lookup(Heap* heap,
3427                                    String* key_string,
3428                                    Object* key_pattern,
3429                                    ResultsCacheType type) {
3430   FixedArray* cache;
3431   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3432   if (type == STRING_SPLIT_SUBSTRINGS) {
3433     ASSERT(key_pattern->IsString());
3434     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3435     cache = heap->string_split_cache();
3436   } else {
3437     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3438     ASSERT(key_pattern->IsFixedArray());
3439     cache = heap->regexp_multiple_cache();
3440   }
3441 
3442   uint32_t hash = key_string->Hash();
3443   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3444       ~(kArrayEntriesPerCacheEntry - 1));
3445   if (cache->get(index + kStringOffset) == key_string &&
3446       cache->get(index + kPatternOffset) == key_pattern) {
3447     return cache->get(index + kArrayOffset);
3448   }
3449   index =
3450       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3451   if (cache->get(index + kStringOffset) == key_string &&
3452       cache->get(index + kPatternOffset) == key_pattern) {
3453     return cache->get(index + kArrayOffset);
3454   }
3455   return Smi::FromInt(0);
3456 }
3457 
3458 
Enter(Heap * heap,String * key_string,Object * key_pattern,FixedArray * value_array,ResultsCacheType type)3459 void RegExpResultsCache::Enter(Heap* heap,
3460                                String* key_string,
3461                                Object* key_pattern,
3462                                FixedArray* value_array,
3463                                ResultsCacheType type) {
3464   FixedArray* cache;
3465   if (!key_string->IsInternalizedString()) return;
3466   if (type == STRING_SPLIT_SUBSTRINGS) {
3467     ASSERT(key_pattern->IsString());
3468     if (!key_pattern->IsInternalizedString()) return;
3469     cache = heap->string_split_cache();
3470   } else {
3471     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3472     ASSERT(key_pattern->IsFixedArray());
3473     cache = heap->regexp_multiple_cache();
3474   }
3475 
3476   uint32_t hash = key_string->Hash();
3477   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3478       ~(kArrayEntriesPerCacheEntry - 1));
3479   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3480     cache->set(index + kStringOffset, key_string);
3481     cache->set(index + kPatternOffset, key_pattern);
3482     cache->set(index + kArrayOffset, value_array);
3483   } else {
3484     uint32_t index2 =
3485         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3486     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3487       cache->set(index2 + kStringOffset, key_string);
3488       cache->set(index2 + kPatternOffset, key_pattern);
3489       cache->set(index2 + kArrayOffset, value_array);
3490     } else {
3491       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3492       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3493       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3494       cache->set(index + kStringOffset, key_string);
3495       cache->set(index + kPatternOffset, key_pattern);
3496       cache->set(index + kArrayOffset, value_array);
3497     }
3498   }
3499   // If the array is a reasonably short list of substrings, convert it into a
3500   // list of internalized strings.
3501   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3502     for (int i = 0; i < value_array->length(); i++) {
3503       String* str = String::cast(value_array->get(i));
3504       Object* internalized_str;
3505       MaybeObject* maybe_string = heap->InternalizeString(str);
3506       if (maybe_string->ToObject(&internalized_str)) {
3507         value_array->set(i, internalized_str);
3508       }
3509     }
3510   }
3511   // Convert backing store to a copy-on-write array.
3512   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3513 }
3514 
3515 
Clear(FixedArray * cache)3516 void RegExpResultsCache::Clear(FixedArray* cache) {
3517   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3518     cache->set(i, Smi::FromInt(0));
3519   }
3520 }
3521 
3522 
AllocateInitialNumberStringCache()3523 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3524   MaybeObject* maybe_obj =
3525       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3526   return maybe_obj;
3527 }
3528 
3529 
FullSizeNumberStringCacheLength()3530 int Heap::FullSizeNumberStringCacheLength() {
3531   // Compute the size of the number string cache based on the max newspace size.
3532   // The number string cache has a minimum size based on twice the initial cache
3533   // size to ensure that it is bigger after being made 'full size'.
3534   int number_string_cache_size = max_semispace_size_ / 512;
3535   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3536                                  Min(0x4000, number_string_cache_size));
3537   // There is a string and a number per entry so the length is twice the number
3538   // of entries.
3539   return number_string_cache_size * 2;
3540 }
3541 
3542 
AllocateFullSizeNumberStringCache()3543 void Heap::AllocateFullSizeNumberStringCache() {
3544   // The idea is to have a small number string cache in the snapshot to keep
3545   // boot-time memory usage down.  If we expand the number string cache already
3546   // while creating the snapshot then that didn't work out.
3547   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3548   MaybeObject* maybe_obj =
3549       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3550   Object* new_cache;
3551   if (maybe_obj->ToObject(&new_cache)) {
3552     // We don't bother to repopulate the cache with entries from the old cache.
3553     // It will be repopulated soon enough with new strings.
3554     set_number_string_cache(FixedArray::cast(new_cache));
3555   }
3556   // If allocation fails then we just return without doing anything.  It is only
3557   // a cache, so best effort is OK here.
3558 }
3559 
3560 
FlushNumberStringCache()3561 void Heap::FlushNumberStringCache() {
3562   // Flush the number to string cache.
3563   int len = number_string_cache()->length();
3564   for (int i = 0; i < len; i++) {
3565     number_string_cache()->set_undefined(i);
3566   }
3567 }
3568 
3569 
double_get_hash(double d)3570 static inline int double_get_hash(double d) {
3571   DoubleRepresentation rep(d);
3572   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3573 }
3574 
3575 
smi_get_hash(Smi * smi)3576 static inline int smi_get_hash(Smi* smi) {
3577   return smi->value();
3578 }
3579 
3580 
GetNumberStringCache(Object * number)3581 Object* Heap::GetNumberStringCache(Object* number) {
3582   int hash;
3583   int mask = (number_string_cache()->length() >> 1) - 1;
3584   if (number->IsSmi()) {
3585     hash = smi_get_hash(Smi::cast(number)) & mask;
3586   } else {
3587     hash = double_get_hash(number->Number()) & mask;
3588   }
3589   Object* key = number_string_cache()->get(hash * 2);
3590   if (key == number) {
3591     return String::cast(number_string_cache()->get(hash * 2 + 1));
3592   } else if (key->IsHeapNumber() &&
3593              number->IsHeapNumber() &&
3594              key->Number() == number->Number()) {
3595     return String::cast(number_string_cache()->get(hash * 2 + 1));
3596   }
3597   return undefined_value();
3598 }
3599 
3600 
SetNumberStringCache(Object * number,String * string)3601 void Heap::SetNumberStringCache(Object* number, String* string) {
3602   int hash;
3603   int mask = (number_string_cache()->length() >> 1) - 1;
3604   if (number->IsSmi()) {
3605     hash = smi_get_hash(Smi::cast(number)) & mask;
3606   } else {
3607     hash = double_get_hash(number->Number()) & mask;
3608   }
3609   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3610       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3611     // The first time we have a hash collision, we move to the full sized
3612     // number string cache.
3613     AllocateFullSizeNumberStringCache();
3614     return;
3615   }
3616   number_string_cache()->set(hash * 2, number);
3617   number_string_cache()->set(hash * 2 + 1, string);
3618 }
3619 
3620 
NumberToString(Object * number,bool check_number_string_cache,PretenureFlag pretenure)3621 MaybeObject* Heap::NumberToString(Object* number,
3622                                   bool check_number_string_cache,
3623                                   PretenureFlag pretenure) {
3624   isolate_->counters()->number_to_string_runtime()->Increment();
3625   if (check_number_string_cache) {
3626     Object* cached = GetNumberStringCache(number);
3627     if (cached != undefined_value()) {
3628       return cached;
3629     }
3630   }
3631 
3632   char arr[100];
3633   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3634   const char* str;
3635   if (number->IsSmi()) {
3636     int num = Smi::cast(number)->value();
3637     str = IntToCString(num, buffer);
3638   } else {
3639     double num = HeapNumber::cast(number)->value();
3640     str = DoubleToCString(num, buffer);
3641   }
3642 
3643   Object* js_string;
3644   MaybeObject* maybe_js_string =
3645       AllocateStringFromOneByte(CStrVector(str), pretenure);
3646   if (maybe_js_string->ToObject(&js_string)) {
3647     SetNumberStringCache(number, String::cast(js_string));
3648   }
3649   return maybe_js_string;
3650 }
3651 
3652 
Uint32ToString(uint32_t value,bool check_number_string_cache)3653 MaybeObject* Heap::Uint32ToString(uint32_t value,
3654                                   bool check_number_string_cache) {
3655   Object* number;
3656   MaybeObject* maybe = NumberFromUint32(value);
3657   if (!maybe->To<Object>(&number)) return maybe;
3658   return NumberToString(number, check_number_string_cache);
3659 }
3660 
3661 
MapForExternalArrayType(ExternalArrayType array_type)3662 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3663   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3664 }
3665 
3666 
RootIndexForExternalArrayType(ExternalArrayType array_type)3667 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3668     ExternalArrayType array_type) {
3669   switch (array_type) {
3670     case kExternalByteArray:
3671       return kExternalByteArrayMapRootIndex;
3672     case kExternalUnsignedByteArray:
3673       return kExternalUnsignedByteArrayMapRootIndex;
3674     case kExternalShortArray:
3675       return kExternalShortArrayMapRootIndex;
3676     case kExternalUnsignedShortArray:
3677       return kExternalUnsignedShortArrayMapRootIndex;
3678     case kExternalIntArray:
3679       return kExternalIntArrayMapRootIndex;
3680     case kExternalUnsignedIntArray:
3681       return kExternalUnsignedIntArrayMapRootIndex;
3682     case kExternalFloatArray:
3683       return kExternalFloatArrayMapRootIndex;
3684     case kExternalDoubleArray:
3685       return kExternalDoubleArrayMapRootIndex;
3686     case kExternalPixelArray:
3687       return kExternalPixelArrayMapRootIndex;
3688     default:
3689       UNREACHABLE();
3690       return kUndefinedValueRootIndex;
3691   }
3692 }
3693 
RootIndexForEmptyExternalArray(ElementsKind elementsKind)3694 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3695     ElementsKind elementsKind) {
3696   switch (elementsKind) {
3697     case EXTERNAL_BYTE_ELEMENTS:
3698       return kEmptyExternalByteArrayRootIndex;
3699     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3700       return kEmptyExternalUnsignedByteArrayRootIndex;
3701     case EXTERNAL_SHORT_ELEMENTS:
3702       return kEmptyExternalShortArrayRootIndex;
3703     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3704       return kEmptyExternalUnsignedShortArrayRootIndex;
3705     case EXTERNAL_INT_ELEMENTS:
3706       return kEmptyExternalIntArrayRootIndex;
3707     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3708       return kEmptyExternalUnsignedIntArrayRootIndex;
3709     case EXTERNAL_FLOAT_ELEMENTS:
3710       return kEmptyExternalFloatArrayRootIndex;
3711     case EXTERNAL_DOUBLE_ELEMENTS:
3712       return kEmptyExternalDoubleArrayRootIndex;
3713     case EXTERNAL_PIXEL_ELEMENTS:
3714       return kEmptyExternalPixelArrayRootIndex;
3715     default:
3716       UNREACHABLE();
3717       return kUndefinedValueRootIndex;
3718   }
3719 }
3720 
3721 
EmptyExternalArrayForMap(Map * map)3722 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3723   return ExternalArray::cast(
3724       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3725 }
3726 
3727 
3728 
3729 
NumberFromDouble(double value,PretenureFlag pretenure)3730 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3731   // We need to distinguish the minus zero value and this cannot be
3732   // done after conversion to int. Doing this by comparing bit
3733   // patterns is faster than using fpclassify() et al.
3734   static const DoubleRepresentation minus_zero(-0.0);
3735 
3736   DoubleRepresentation rep(value);
3737   if (rep.bits == minus_zero.bits) {
3738     return AllocateHeapNumber(-0.0, pretenure);
3739   }
3740 
3741   int int_value = FastD2I(value);
3742   if (value == int_value && Smi::IsValid(int_value)) {
3743     return Smi::FromInt(int_value);
3744   }
3745 
3746   // Materialize the value in the heap.
3747   return AllocateHeapNumber(value, pretenure);
3748 }
3749 
3750 
AllocateForeign(Address address,PretenureFlag pretenure)3751 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3752   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3753   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3754   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3755   Foreign* result;
3756   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3757   if (!maybe_result->To(&result)) return maybe_result;
3758   result->set_foreign_address(address);
3759   return result;
3760 }
3761 
3762 
AllocateSharedFunctionInfo(Object * name)3763 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3764   SharedFunctionInfo* share;
3765   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3766   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3767 
3768   // Set pointer fields.
3769   share->set_name(name);
3770   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3771   share->set_code(illegal);
3772   share->set_optimized_code_map(Smi::FromInt(0));
3773   share->set_scope_info(ScopeInfo::Empty(isolate_));
3774   Code* construct_stub =
3775       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3776   share->set_construct_stub(construct_stub);
3777   share->set_instance_class_name(Object_string());
3778   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3779   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3780   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3781   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3782   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3783   share->set_ast_node_count(0);
3784   share->set_counters(0);
3785 
3786   // Set integer fields (smi or int, depending on the architecture).
3787   share->set_length(0);
3788   share->set_formal_parameter_count(0);
3789   share->set_expected_nof_properties(0);
3790   share->set_num_literals(0);
3791   share->set_start_position_and_type(0);
3792   share->set_end_position(0);
3793   share->set_function_token_position(0);
3794   // All compiler hints default to false or 0.
3795   share->set_compiler_hints(0);
3796   share->set_opt_count_and_bailout_reason(0);
3797 
3798   return share;
3799 }
3800 
3801 
AllocateJSMessageObject(String * type,JSArray * arguments,int start_position,int end_position,Object * script,Object * stack_trace,Object * stack_frames)3802 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3803                                            JSArray* arguments,
3804                                            int start_position,
3805                                            int end_position,
3806                                            Object* script,
3807                                            Object* stack_trace,
3808                                            Object* stack_frames) {
3809   Object* result;
3810   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3811     if (!maybe_result->ToObject(&result)) return maybe_result;
3812   }
3813   JSMessageObject* message = JSMessageObject::cast(result);
3814   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3815   message->initialize_elements();
3816   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3817   message->set_type(type);
3818   message->set_arguments(arguments);
3819   message->set_start_position(start_position);
3820   message->set_end_position(end_position);
3821   message->set_script(script);
3822   message->set_stack_trace(stack_trace);
3823   message->set_stack_frames(stack_frames);
3824   return result;
3825 }
3826 
3827 
3828 
3829 // Returns true for a character in a range.  Both limits are inclusive.
Between(uint32_t character,uint32_t from,uint32_t to)3830 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3831   // This makes uses of the the unsigned wraparound.
3832   return character - from <= to - from;
3833 }
3834 
3835 
MakeOrFindTwoCharacterString(Heap * heap,uint16_t c1,uint16_t c2)3836 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3837     Heap* heap,
3838     uint16_t c1,
3839     uint16_t c2) {
3840   String* result;
3841   // Numeric strings have a different hash algorithm not known by
3842   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3843   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3844       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3845     return result;
3846   // Now we know the length is 2, we might as well make use of that fact
3847   // when building the new string.
3848   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3849     // We can do this.
3850     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3851     Object* result;
3852     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3853       if (!maybe_result->ToObject(&result)) return maybe_result;
3854     }
3855     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3856     dest[0] = static_cast<uint8_t>(c1);
3857     dest[1] = static_cast<uint8_t>(c2);
3858     return result;
3859   } else {
3860     Object* result;
3861     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3862       if (!maybe_result->ToObject(&result)) return maybe_result;
3863     }
3864     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3865     dest[0] = c1;
3866     dest[1] = c2;
3867     return result;
3868   }
3869 }
3870 
3871 
AllocateConsString(String * first,String * second)3872 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3873   int first_length = first->length();
3874   if (first_length == 0) {
3875     return second;
3876   }
3877 
3878   int second_length = second->length();
3879   if (second_length == 0) {
3880     return first;
3881   }
3882 
3883   int length = first_length + second_length;
3884 
3885   // Optimization for 2-byte strings often used as keys in a decompression
3886   // dictionary.  Check whether we already have the string in the string
3887   // table to prevent creation of many unneccesary strings.
3888   if (length == 2) {
3889     uint16_t c1 = first->Get(0);
3890     uint16_t c2 = second->Get(0);
3891     return MakeOrFindTwoCharacterString(this, c1, c2);
3892   }
3893 
3894   bool first_is_one_byte = first->IsOneByteRepresentation();
3895   bool second_is_one_byte = second->IsOneByteRepresentation();
3896   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3897   // Make sure that an out of memory exception is thrown if the length
3898   // of the new cons string is too large.
3899   if (length > String::kMaxLength || length < 0) {
3900     isolate()->context()->mark_out_of_memory();
3901     return Failure::OutOfMemoryException(0x4);
3902   }
3903 
3904   bool is_one_byte_data_in_two_byte_string = false;
3905   if (!is_one_byte) {
3906     // At least one of the strings uses two-byte representation so we
3907     // can't use the fast case code for short ASCII strings below, but
3908     // we can try to save memory if all chars actually fit in ASCII.
3909     is_one_byte_data_in_two_byte_string =
3910         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3911     if (is_one_byte_data_in_two_byte_string) {
3912       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3913     }
3914   }
3915 
3916   // If the resulting string is small make a flat string.
3917   if (length < ConsString::kMinLength) {
3918     // Note that neither of the two inputs can be a slice because:
3919     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3920     ASSERT(first->IsFlat());
3921     ASSERT(second->IsFlat());
3922     if (is_one_byte) {
3923       Object* result;
3924       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3925         if (!maybe_result->ToObject(&result)) return maybe_result;
3926       }
3927       // Copy the characters into the new object.
3928       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3929       // Copy first part.
3930       const uint8_t* src;
3931       if (first->IsExternalString()) {
3932         src = ExternalAsciiString::cast(first)->GetChars();
3933       } else {
3934         src = SeqOneByteString::cast(first)->GetChars();
3935       }
3936       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3937       // Copy second part.
3938       if (second->IsExternalString()) {
3939         src = ExternalAsciiString::cast(second)->GetChars();
3940       } else {
3941         src = SeqOneByteString::cast(second)->GetChars();
3942       }
3943       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3944       return result;
3945     } else {
3946       if (is_one_byte_data_in_two_byte_string) {
3947         Object* result;
3948         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3949           if (!maybe_result->ToObject(&result)) return maybe_result;
3950         }
3951         // Copy the characters into the new object.
3952         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3953         String::WriteToFlat(first, dest, 0, first_length);
3954         String::WriteToFlat(second, dest + first_length, 0, second_length);
3955         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3956         return result;
3957       }
3958 
3959       Object* result;
3960       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3961         if (!maybe_result->ToObject(&result)) return maybe_result;
3962       }
3963       // Copy the characters into the new object.
3964       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3965       String::WriteToFlat(first, dest, 0, first_length);
3966       String::WriteToFlat(second, dest + first_length, 0, second_length);
3967       return result;
3968     }
3969   }
3970 
3971   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3972       cons_ascii_string_map() : cons_string_map();
3973 
3974   Object* result;
3975   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3976     if (!maybe_result->ToObject(&result)) return maybe_result;
3977   }
3978 
3979   DisallowHeapAllocation no_gc;
3980   ConsString* cons_string = ConsString::cast(result);
3981   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3982   cons_string->set_length(length);
3983   cons_string->set_hash_field(String::kEmptyHashField);
3984   cons_string->set_first(first, mode);
3985   cons_string->set_second(second, mode);
3986   return result;
3987 }
3988 
3989 
AllocateSubString(String * buffer,int start,int end,PretenureFlag pretenure)3990 MaybeObject* Heap::AllocateSubString(String* buffer,
3991                                      int start,
3992                                      int end,
3993                                      PretenureFlag pretenure) {
3994   int length = end - start;
3995   if (length <= 0) {
3996     return empty_string();
3997   }
3998 
3999   // Make an attempt to flatten the buffer to reduce access time.
4000   buffer = buffer->TryFlattenGetString();
4001 
4002   if (length == 1) {
4003     return LookupSingleCharacterStringFromCode(buffer->Get(start));
4004   } else if (length == 2) {
4005     // Optimization for 2-byte strings often used as keys in a decompression
4006     // dictionary.  Check whether we already have the string in the string
4007     // table to prevent creation of many unnecessary strings.
4008     uint16_t c1 = buffer->Get(start);
4009     uint16_t c2 = buffer->Get(start + 1);
4010     return MakeOrFindTwoCharacterString(this, c1, c2);
4011   }
4012 
4013   if (!FLAG_string_slices ||
4014       !buffer->IsFlat() ||
4015       length < SlicedString::kMinLength ||
4016       pretenure == TENURED) {
4017     Object* result;
4018     // WriteToFlat takes care of the case when an indirect string has a
4019     // different encoding from its underlying string.  These encodings may
4020     // differ because of externalization.
4021     bool is_one_byte = buffer->IsOneByteRepresentation();
4022     { MaybeObject* maybe_result = is_one_byte
4023                                   ? AllocateRawOneByteString(length, pretenure)
4024                                   : AllocateRawTwoByteString(length, pretenure);
4025       if (!maybe_result->ToObject(&result)) return maybe_result;
4026     }
4027     String* string_result = String::cast(result);
4028     // Copy the characters into the new object.
4029     if (is_one_byte) {
4030       ASSERT(string_result->IsOneByteRepresentation());
4031       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
4032       String::WriteToFlat(buffer, dest, start, end);
4033     } else {
4034       ASSERT(string_result->IsTwoByteRepresentation());
4035       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
4036       String::WriteToFlat(buffer, dest, start, end);
4037     }
4038     return result;
4039   }
4040 
4041   ASSERT(buffer->IsFlat());
4042 #if VERIFY_HEAP
4043   if (FLAG_verify_heap) {
4044     buffer->StringVerify();
4045   }
4046 #endif
4047 
4048   Object* result;
4049   // When slicing an indirect string we use its encoding for a newly created
4050   // slice and don't check the encoding of the underlying string.  This is safe
4051   // even if the encodings are different because of externalization.  If an
4052   // indirect ASCII string is pointing to a two-byte string, the two-byte char
4053   // codes of the underlying string must still fit into ASCII (because
4054   // externalization must not change char codes).
4055   { Map* map = buffer->IsOneByteRepresentation()
4056                  ? sliced_ascii_string_map()
4057                  : sliced_string_map();
4058     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4059     if (!maybe_result->ToObject(&result)) return maybe_result;
4060   }
4061 
4062   DisallowHeapAllocation no_gc;
4063   SlicedString* sliced_string = SlicedString::cast(result);
4064   sliced_string->set_length(length);
4065   sliced_string->set_hash_field(String::kEmptyHashField);
4066   if (buffer->IsConsString()) {
4067     ConsString* cons = ConsString::cast(buffer);
4068     ASSERT(cons->second()->length() == 0);
4069     sliced_string->set_parent(cons->first());
4070     sliced_string->set_offset(start);
4071   } else if (buffer->IsSlicedString()) {
4072     // Prevent nesting sliced strings.
4073     SlicedString* parent_slice = SlicedString::cast(buffer);
4074     sliced_string->set_parent(parent_slice->parent());
4075     sliced_string->set_offset(start + parent_slice->offset());
4076   } else {
4077     sliced_string->set_parent(buffer);
4078     sliced_string->set_offset(start);
4079   }
4080   ASSERT(sliced_string->parent()->IsSeqString() ||
4081          sliced_string->parent()->IsExternalString());
4082   return result;
4083 }
4084 
4085 
AllocateExternalStringFromAscii(const ExternalAsciiString::Resource * resource)4086 MaybeObject* Heap::AllocateExternalStringFromAscii(
4087     const ExternalAsciiString::Resource* resource) {
4088   size_t length = resource->length();
4089   if (length > static_cast<size_t>(String::kMaxLength)) {
4090     isolate()->context()->mark_out_of_memory();
4091     return Failure::OutOfMemoryException(0x5);
4092   }
4093 
4094   Map* map = external_ascii_string_map();
4095   Object* result;
4096   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4097     if (!maybe_result->ToObject(&result)) return maybe_result;
4098   }
4099 
4100   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
4101   external_string->set_length(static_cast<int>(length));
4102   external_string->set_hash_field(String::kEmptyHashField);
4103   external_string->set_resource(resource);
4104 
4105   return result;
4106 }
4107 
4108 
AllocateExternalStringFromTwoByte(const ExternalTwoByteString::Resource * resource)4109 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
4110     const ExternalTwoByteString::Resource* resource) {
4111   size_t length = resource->length();
4112   if (length > static_cast<size_t>(String::kMaxLength)) {
4113     isolate()->context()->mark_out_of_memory();
4114     return Failure::OutOfMemoryException(0x6);
4115   }
4116 
4117   // For small strings we check whether the resource contains only
4118   // one byte characters.  If yes, we use a different string map.
4119   static const size_t kOneByteCheckLengthLimit = 32;
4120   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
4121       String::IsOneByte(resource->data(), static_cast<int>(length));
4122   Map* map = is_one_byte ?
4123       external_string_with_one_byte_data_map() : external_string_map();
4124   Object* result;
4125   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4126     if (!maybe_result->ToObject(&result)) return maybe_result;
4127   }
4128 
4129   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
4130   external_string->set_length(static_cast<int>(length));
4131   external_string->set_hash_field(String::kEmptyHashField);
4132   external_string->set_resource(resource);
4133 
4134   return result;
4135 }
4136 
4137 
LookupSingleCharacterStringFromCode(uint16_t code)4138 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
4139   if (code <= String::kMaxOneByteCharCode) {
4140     Object* value = single_character_string_cache()->get(code);
4141     if (value != undefined_value()) return value;
4142 
4143     uint8_t buffer[1];
4144     buffer[0] = static_cast<uint8_t>(code);
4145     Object* result;
4146     MaybeObject* maybe_result =
4147         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
4148 
4149     if (!maybe_result->ToObject(&result)) return maybe_result;
4150     single_character_string_cache()->set(code, result);
4151     return result;
4152   }
4153 
4154   SeqTwoByteString* result;
4155   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
4156     if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
4157   }
4158   result->SeqTwoByteStringSet(0, code);
4159   return result;
4160 }
4161 
4162 
AllocateByteArray(int length,PretenureFlag pretenure)4163 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4164   if (length < 0 || length > ByteArray::kMaxLength) {
4165     return Failure::OutOfMemoryException(0x7);
4166   }
4167   int size = ByteArray::SizeFor(length);
4168   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4169   Object* result;
4170   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4171     if (!maybe_result->ToObject(&result)) return maybe_result;
4172   }
4173 
4174   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4175       byte_array_map());
4176   reinterpret_cast<ByteArray*>(result)->set_length(length);
4177   return result;
4178 }
4179 
4180 
CreateFillerObjectAt(Address addr,int size)4181 void Heap::CreateFillerObjectAt(Address addr, int size) {
4182   if (size == 0) return;
4183   HeapObject* filler = HeapObject::FromAddress(addr);
4184   if (size == kPointerSize) {
4185     filler->set_map_no_write_barrier(one_pointer_filler_map());
4186   } else if (size == 2 * kPointerSize) {
4187     filler->set_map_no_write_barrier(two_pointer_filler_map());
4188   } else {
4189     filler->set_map_no_write_barrier(free_space_map());
4190     FreeSpace::cast(filler)->set_size(size);
4191   }
4192 }
4193 
4194 
AllocateExternalArray(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)4195 MaybeObject* Heap::AllocateExternalArray(int length,
4196                                          ExternalArrayType array_type,
4197                                          void* external_pointer,
4198                                          PretenureFlag pretenure) {
4199   int size = ExternalArray::kAlignedSize;
4200   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4201   Object* result;
4202   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4203     if (!maybe_result->ToObject(&result)) return maybe_result;
4204   }
4205 
4206   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4207       MapForExternalArrayType(array_type));
4208   reinterpret_cast<ExternalArray*>(result)->set_length(length);
4209   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4210       external_pointer);
4211 
4212   return result;
4213 }
4214 
4215 
CreateCode(const CodeDesc & desc,Code::Flags flags,Handle<Object> self_reference,bool immovable,bool crankshafted,int prologue_offset)4216 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4217                               Code::Flags flags,
4218                               Handle<Object> self_reference,
4219                               bool immovable,
4220                               bool crankshafted,
4221                               int prologue_offset) {
4222   // Allocate ByteArray before the Code object, so that we do not risk
4223   // leaving uninitialized Code object (and breaking the heap).
4224   ByteArray* reloc_info;
4225   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4226   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4227 
4228   // Compute size.
4229   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4230   int obj_size = Code::SizeFor(body_size);
4231   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4232   MaybeObject* maybe_result;
4233   // Large code objects and code objects which should stay at a fixed address
4234   // are allocated in large object space.
4235   HeapObject* result;
4236   bool force_lo_space = obj_size > code_space()->AreaSize();
4237   if (force_lo_space) {
4238     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4239   } else {
4240     maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4241   }
4242   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4243 
4244   if (immovable && !force_lo_space &&
4245       // Objects on the first page of each space are never moved.
4246       !code_space_->FirstPage()->Contains(result->address())) {
4247     // Discard the first code allocation, which was on a page where it could be
4248     // moved.
4249     CreateFillerObjectAt(result->address(), obj_size);
4250     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4251     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4252   }
4253 
4254   // Initialize the object
4255   result->set_map_no_write_barrier(code_map());
4256   Code* code = Code::cast(result);
4257   ASSERT(!isolate_->code_range()->exists() ||
4258       isolate_->code_range()->contains(code->address()));
4259   code->set_instruction_size(desc.instr_size);
4260   code->set_relocation_info(reloc_info);
4261   code->set_flags(flags);
4262   code->set_raw_kind_specific_flags1(0);
4263   code->set_raw_kind_specific_flags2(0);
4264   if (code->is_call_stub() || code->is_keyed_call_stub()) {
4265     code->set_check_type(RECEIVER_MAP_CHECK);
4266   }
4267   code->set_is_crankshafted(crankshafted);
4268   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4269   code->set_raw_type_feedback_info(undefined_value());
4270   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4271   code->set_gc_metadata(Smi::FromInt(0));
4272   code->set_ic_age(global_ic_age_);
4273   code->set_prologue_offset(prologue_offset);
4274   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4275     code->set_marked_for_deoptimization(false);
4276   }
4277 
4278 #ifdef ENABLE_DEBUGGER_SUPPORT
4279   if (code->kind() == Code::FUNCTION) {
4280     code->set_has_debug_break_slots(
4281         isolate_->debugger()->IsDebuggerActive());
4282   }
4283 #endif
4284 
4285   // Allow self references to created code object by patching the handle to
4286   // point to the newly allocated Code object.
4287   if (!self_reference.is_null()) {
4288     *(self_reference.location()) = code;
4289   }
4290   // Migrate generated code.
4291   // The generated code can contain Object** values (typically from handles)
4292   // that are dereferenced during the copy to point directly to the actual heap
4293   // objects. These pointers can include references to the code object itself,
4294   // through the self_reference parameter.
4295   code->CopyFrom(desc);
4296 
4297 #ifdef VERIFY_HEAP
4298   if (FLAG_verify_heap) {
4299     code->Verify();
4300   }
4301 #endif
4302   return code;
4303 }
4304 
4305 
CopyCode(Code * code)4306 MaybeObject* Heap::CopyCode(Code* code) {
4307   // Allocate an object the same size as the code object.
4308   int obj_size = code->Size();
4309   MaybeObject* maybe_result;
4310   if (obj_size > code_space()->AreaSize()) {
4311     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4312   } else {
4313     maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4314   }
4315 
4316   Object* result;
4317   if (!maybe_result->ToObject(&result)) return maybe_result;
4318 
4319   // Copy code object.
4320   Address old_addr = code->address();
4321   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4322   CopyBlock(new_addr, old_addr, obj_size);
4323   // Relocate the copy.
4324   Code* new_code = Code::cast(result);
4325   ASSERT(!isolate_->code_range()->exists() ||
4326       isolate_->code_range()->contains(code->address()));
4327   new_code->Relocate(new_addr - old_addr);
4328   return new_code;
4329 }
4330 
4331 
CopyCode(Code * code,Vector<byte> reloc_info)4332 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4333   // Allocate ByteArray before the Code object, so that we do not risk
4334   // leaving uninitialized Code object (and breaking the heap).
4335   Object* reloc_info_array;
4336   { MaybeObject* maybe_reloc_info_array =
4337         AllocateByteArray(reloc_info.length(), TENURED);
4338     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4339       return maybe_reloc_info_array;
4340     }
4341   }
4342 
4343   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4344 
4345   int new_obj_size = Code::SizeFor(new_body_size);
4346 
4347   Address old_addr = code->address();
4348 
4349   size_t relocation_offset =
4350       static_cast<size_t>(code->instruction_end() - old_addr);
4351 
4352   MaybeObject* maybe_result;
4353   if (new_obj_size > code_space()->AreaSize()) {
4354     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4355   } else {
4356     maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
4357   }
4358 
4359   Object* result;
4360   if (!maybe_result->ToObject(&result)) return maybe_result;
4361 
4362   // Copy code object.
4363   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4364 
4365   // Copy header and instructions.
4366   CopyBytes(new_addr, old_addr, relocation_offset);
4367 
4368   Code* new_code = Code::cast(result);
4369   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4370 
4371   // Copy patched rinfo.
4372   CopyBytes(new_code->relocation_start(),
4373             reloc_info.start(),
4374             static_cast<size_t>(reloc_info.length()));
4375 
4376   // Relocate the copy.
4377   ASSERT(!isolate_->code_range()->exists() ||
4378       isolate_->code_range()->contains(code->address()));
4379   new_code->Relocate(new_addr - old_addr);
4380 
4381 #ifdef VERIFY_HEAP
4382   if (FLAG_verify_heap) {
4383     code->Verify();
4384   }
4385 #endif
4386   return new_code;
4387 }
4388 
4389 
InitializeAllocationMemento(AllocationMemento * memento,AllocationSite * allocation_site)4390 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4391                                        AllocationSite* allocation_site) {
4392   memento->set_map_no_write_barrier(allocation_memento_map());
4393   ASSERT(allocation_site->map() == allocation_site_map());
4394   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4395   if (FLAG_allocation_site_pretenuring) {
4396     allocation_site->IncrementMementoCreateCount();
4397   }
4398 }
4399 
4400 
AllocateWithAllocationSite(Map * map,AllocationSpace space,Handle<AllocationSite> allocation_site)4401 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4402     Handle<AllocationSite> allocation_site) {
4403   ASSERT(gc_state_ == NOT_IN_GC);
4404   ASSERT(map->instance_type() != MAP_TYPE);
4405   // If allocation failures are disallowed, we may allocate in a different
4406   // space when new space is full and the object is not a large object.
4407   AllocationSpace retry_space =
4408       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4409   int size = map->instance_size() + AllocationMemento::kSize;
4410   Object* result;
4411   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4412   if (!maybe_result->ToObject(&result)) return maybe_result;
4413   // No need for write barrier since object is white and map is in old space.
4414   HeapObject::cast(result)->set_map_no_write_barrier(map);
4415   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4416       reinterpret_cast<Address>(result) + map->instance_size());
4417   InitializeAllocationMemento(alloc_memento, *allocation_site);
4418   return result;
4419 }
4420 
4421 
Allocate(Map * map,AllocationSpace space)4422 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4423   ASSERT(gc_state_ == NOT_IN_GC);
4424   ASSERT(map->instance_type() != MAP_TYPE);
4425   // If allocation failures are disallowed, we may allocate in a different
4426   // space when new space is full and the object is not a large object.
4427   AllocationSpace retry_space =
4428       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4429   int size = map->instance_size();
4430   Object* result;
4431   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4432   if (!maybe_result->ToObject(&result)) return maybe_result;
4433   // No need for write barrier since object is white and map is in old space.
4434   HeapObject::cast(result)->set_map_no_write_barrier(map);
4435   return result;
4436 }
4437 
4438 
InitializeFunction(JSFunction * function,SharedFunctionInfo * shared,Object * prototype)4439 void Heap::InitializeFunction(JSFunction* function,
4440                               SharedFunctionInfo* shared,
4441                               Object* prototype) {
4442   ASSERT(!prototype->IsMap());
4443   function->initialize_properties();
4444   function->initialize_elements();
4445   function->set_shared(shared);
4446   function->set_code(shared->code());
4447   function->set_prototype_or_initial_map(prototype);
4448   function->set_context(undefined_value());
4449   function->set_literals_or_bindings(empty_fixed_array());
4450   function->set_next_function_link(undefined_value());
4451 }
4452 
4453 
AllocateFunction(Map * function_map,SharedFunctionInfo * shared,Object * prototype,PretenureFlag pretenure)4454 MaybeObject* Heap::AllocateFunction(Map* function_map,
4455                                     SharedFunctionInfo* shared,
4456                                     Object* prototype,
4457                                     PretenureFlag pretenure) {
4458   AllocationSpace space =
4459       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4460   Object* result;
4461   { MaybeObject* maybe_result = Allocate(function_map, space);
4462     if (!maybe_result->ToObject(&result)) return maybe_result;
4463   }
4464   InitializeFunction(JSFunction::cast(result), shared, prototype);
4465   return result;
4466 }
4467 
4468 
AllocateArgumentsObject(Object * callee,int length)4469 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4470   // To get fast allocation and map sharing for arguments objects we
4471   // allocate them based on an arguments boilerplate.
4472 
4473   JSObject* boilerplate;
4474   int arguments_object_size;
4475   bool strict_mode_callee = callee->IsJSFunction() &&
4476       !JSFunction::cast(callee)->shared()->is_classic_mode();
4477   if (strict_mode_callee) {
4478     boilerplate =
4479         isolate()->context()->native_context()->
4480             strict_mode_arguments_boilerplate();
4481     arguments_object_size = kArgumentsObjectSizeStrict;
4482   } else {
4483     boilerplate =
4484         isolate()->context()->native_context()->arguments_boilerplate();
4485     arguments_object_size = kArgumentsObjectSize;
4486   }
4487 
4488   // Check that the size of the boilerplate matches our
4489   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4490   // on the size being a known constant.
4491   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4492 
4493   // Do the allocation.
4494   Object* result;
4495   { MaybeObject* maybe_result =
4496         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4497     if (!maybe_result->ToObject(&result)) return maybe_result;
4498   }
4499 
4500   // Copy the content. The arguments boilerplate doesn't have any
4501   // fields that point to new space so it's safe to skip the write
4502   // barrier here.
4503   CopyBlock(HeapObject::cast(result)->address(),
4504             boilerplate->address(),
4505             JSObject::kHeaderSize);
4506 
4507   // Set the length property.
4508   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4509                                                 Smi::FromInt(length),
4510                                                 SKIP_WRITE_BARRIER);
4511   // Set the callee property for non-strict mode arguments object only.
4512   if (!strict_mode_callee) {
4513     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4514                                                   callee);
4515   }
4516 
4517   // Check the state of the object
4518   ASSERT(JSObject::cast(result)->HasFastProperties());
4519   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4520 
4521   return result;
4522 }
4523 
4524 
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)4525 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4526                                      FixedArray* properties,
4527                                      Map* map) {
4528   obj->set_properties(properties);
4529   obj->initialize_elements();
4530   // TODO(1240798): Initialize the object's body using valid initial values
4531   // according to the object's initial map.  For example, if the map's
4532   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4533   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4534   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4535   // verification code has to cope with (temporarily) invalid objects.  See
4536   // for example, JSArray::JSArrayVerify).
4537   Object* filler;
4538   // We cannot always fill with one_pointer_filler_map because objects
4539   // created from API functions expect their internal fields to be initialized
4540   // with undefined_value.
4541   // Pre-allocated fields need to be initialized with undefined_value as well
4542   // so that object accesses before the constructor completes (e.g. in the
4543   // debugger) will not cause a crash.
4544   if (map->constructor()->IsJSFunction() &&
4545       JSFunction::cast(map->constructor())->shared()->
4546           IsInobjectSlackTrackingInProgress()) {
4547     // We might want to shrink the object later.
4548     ASSERT(obj->GetInternalFieldCount() == 0);
4549     filler = Heap::one_pointer_filler_map();
4550   } else {
4551     filler = Heap::undefined_value();
4552   }
4553   obj->InitializeBody(map, Heap::undefined_value(), filler);
4554 }
4555 
4556 
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure,bool allocate_properties)4557 MaybeObject* Heap::AllocateJSObjectFromMap(
4558     Map* map, PretenureFlag pretenure, bool allocate_properties) {
4559   // JSFunctions should be allocated using AllocateFunction to be
4560   // properly initialized.
4561   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4562 
4563   // Both types of global objects should be allocated using
4564   // AllocateGlobalObject to be properly initialized.
4565   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4566   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4567 
4568   // Allocate the backing storage for the properties.
4569   FixedArray* properties;
4570   if (allocate_properties) {
4571     int prop_size = map->InitialPropertiesLength();
4572     ASSERT(prop_size >= 0);
4573     { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4574       if (!maybe_properties->To(&properties)) return maybe_properties;
4575     }
4576   } else {
4577     properties = empty_fixed_array();
4578   }
4579 
4580   // Allocate the JSObject.
4581   int size = map->instance_size();
4582   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4583   Object* obj;
4584   MaybeObject* maybe_obj = Allocate(map, space);
4585   if (!maybe_obj->To(&obj)) return maybe_obj;
4586 
4587   // Initialize the JSObject.
4588   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4589   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4590          JSObject::cast(obj)->HasExternalArrayElements());
4591   return obj;
4592 }
4593 
4594 
AllocateJSObjectFromMapWithAllocationSite(Map * map,Handle<AllocationSite> allocation_site)4595 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4596     Map* map, Handle<AllocationSite> allocation_site) {
4597   // JSFunctions should be allocated using AllocateFunction to be
4598   // properly initialized.
4599   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4600 
4601   // Both types of global objects should be allocated using
4602   // AllocateGlobalObject to be properly initialized.
4603   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4604   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4605 
4606   // Allocate the backing storage for the properties.
4607   int prop_size = map->InitialPropertiesLength();
4608   ASSERT(prop_size >= 0);
4609   FixedArray* properties;
4610   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4611     if (!maybe_properties->To(&properties)) return maybe_properties;
4612   }
4613 
4614   // Allocate the JSObject.
4615   int size = map->instance_size();
4616   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
4617   Object* obj;
4618   MaybeObject* maybe_obj =
4619       AllocateWithAllocationSite(map, space, allocation_site);
4620   if (!maybe_obj->To(&obj)) return maybe_obj;
4621 
4622   // Initialize the JSObject.
4623   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4624   ASSERT(JSObject::cast(obj)->HasFastElements());
4625   return obj;
4626 }
4627 
4628 
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure)4629 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4630                                     PretenureFlag pretenure) {
4631   ASSERT(constructor->has_initial_map());
4632   // Allocate the object based on the constructors initial map.
4633   MaybeObject* result = AllocateJSObjectFromMap(
4634       constructor->initial_map(), pretenure);
4635 #ifdef DEBUG
4636   // Make sure result is NOT a global object if valid.
4637   Object* non_failure;
4638   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4639 #endif
4640   return result;
4641 }
4642 
4643 
AllocateJSObjectWithAllocationSite(JSFunction * constructor,Handle<AllocationSite> allocation_site)4644 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4645     Handle<AllocationSite> allocation_site) {
4646   ASSERT(constructor->has_initial_map());
4647   // Allocate the object based on the constructors initial map, or the payload
4648   // advice
4649   Map* initial_map = constructor->initial_map();
4650 
4651   ElementsKind to_kind = allocation_site->GetElementsKind();
4652   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4653   if (to_kind != initial_map->elements_kind()) {
4654     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4655     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4656     // Possibly alter the mode, since we found an updated elements kind
4657     // in the type info cell.
4658     mode = AllocationSite::GetMode(to_kind);
4659   }
4660 
4661   MaybeObject* result;
4662   if (mode == TRACK_ALLOCATION_SITE) {
4663     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4664         allocation_site);
4665   } else {
4666     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4667   }
4668 #ifdef DEBUG
4669   // Make sure result is NOT a global object if valid.
4670   Object* non_failure;
4671   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4672 #endif
4673   return result;
4674 }
4675 
4676 
AllocateJSModule(Context * context,ScopeInfo * scope_info)4677 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4678   // Allocate a fresh map. Modules do not have a prototype.
4679   Map* map;
4680   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4681   if (!maybe_map->To(&map)) return maybe_map;
4682   // Allocate the object based on the map.
4683   JSModule* module;
4684   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4685   if (!maybe_module->To(&module)) return maybe_module;
4686   module->set_context(context);
4687   module->set_scope_info(scope_info);
4688   return module;
4689 }
4690 
4691 
AllocateJSArrayAndStorage(ElementsKind elements_kind,int length,int capacity,ArrayStorageAllocationMode mode,PretenureFlag pretenure)4692 MaybeObject* Heap::AllocateJSArrayAndStorage(
4693     ElementsKind elements_kind,
4694     int length,
4695     int capacity,
4696     ArrayStorageAllocationMode mode,
4697     PretenureFlag pretenure) {
4698   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4699   JSArray* array;
4700   if (!maybe_array->To(&array)) return maybe_array;
4701 
4702   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4703   // for performance reasons.
4704   ASSERT(capacity >= length);
4705 
4706   if (capacity == 0) {
4707     array->set_length(Smi::FromInt(0));
4708     array->set_elements(empty_fixed_array());
4709     return array;
4710   }
4711 
4712   FixedArrayBase* elms;
4713   MaybeObject* maybe_elms = NULL;
4714   if (IsFastDoubleElementsKind(elements_kind)) {
4715     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4716       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4717     } else {
4718       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4719       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4720     }
4721   } else {
4722     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4723     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4724       maybe_elms = AllocateUninitializedFixedArray(capacity);
4725     } else {
4726       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4727       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4728     }
4729   }
4730   if (!maybe_elms->To(&elms)) return maybe_elms;
4731 
4732   array->set_elements(elms);
4733   array->set_length(Smi::FromInt(length));
4734   return array;
4735 }
4736 
4737 
AllocateJSArrayStorage(JSArray * array,int length,int capacity,ArrayStorageAllocationMode mode)4738 MaybeObject* Heap::AllocateJSArrayStorage(
4739     JSArray* array,
4740     int length,
4741     int capacity,
4742     ArrayStorageAllocationMode mode) {
4743   ASSERT(capacity >= length);
4744 
4745   if (capacity == 0) {
4746     array->set_length(Smi::FromInt(0));
4747     array->set_elements(empty_fixed_array());
4748     return array;
4749   }
4750 
4751   FixedArrayBase* elms;
4752   MaybeObject* maybe_elms = NULL;
4753   ElementsKind elements_kind = array->GetElementsKind();
4754   if (IsFastDoubleElementsKind(elements_kind)) {
4755     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4756       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4757     } else {
4758       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4759       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4760     }
4761   } else {
4762     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4763     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4764       maybe_elms = AllocateUninitializedFixedArray(capacity);
4765     } else {
4766       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4767       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4768     }
4769   }
4770   if (!maybe_elms->To(&elms)) return maybe_elms;
4771 
4772   array->set_elements(elms);
4773   array->set_length(Smi::FromInt(length));
4774   return array;
4775 }
4776 
4777 
AllocateJSArrayWithElements(FixedArrayBase * elements,ElementsKind elements_kind,int length,PretenureFlag pretenure)4778 MaybeObject* Heap::AllocateJSArrayWithElements(
4779     FixedArrayBase* elements,
4780     ElementsKind elements_kind,
4781     int length,
4782     PretenureFlag pretenure) {
4783   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4784   JSArray* array;
4785   if (!maybe_array->To(&array)) return maybe_array;
4786 
4787   array->set_elements(elements);
4788   array->set_length(Smi::FromInt(length));
4789   array->ValidateElements();
4790   return array;
4791 }
4792 
4793 
AllocateJSProxy(Object * handler,Object * prototype)4794 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4795   // Allocate map.
4796   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4797   // maps. Will probably depend on the identity of the handler object, too.
4798   Map* map;
4799   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4800   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4801   map->set_prototype(prototype);
4802 
4803   // Allocate the proxy object.
4804   JSProxy* result;
4805   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4806   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4807   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4808   result->set_handler(handler);
4809   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4810   return result;
4811 }
4812 
4813 
AllocateJSFunctionProxy(Object * handler,Object * call_trap,Object * construct_trap,Object * prototype)4814 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4815                                            Object* call_trap,
4816                                            Object* construct_trap,
4817                                            Object* prototype) {
4818   // Allocate map.
4819   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4820   // maps. Will probably depend on the identity of the handler object, too.
4821   Map* map;
4822   MaybeObject* maybe_map_obj =
4823       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4824   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4825   map->set_prototype(prototype);
4826 
4827   // Allocate the proxy object.
4828   JSFunctionProxy* result;
4829   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4830   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4831   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4832   result->set_handler(handler);
4833   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4834   result->set_call_trap(call_trap);
4835   result->set_construct_trap(construct_trap);
4836   return result;
4837 }
4838 
4839 
CopyJSObject(JSObject * source,AllocationSite * site)4840 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4841   // Never used to copy functions.  If functions need to be copied we
4842   // have to be careful to clear the literals array.
4843   SLOW_ASSERT(!source->IsJSFunction());
4844 
4845   // Make the clone.
4846   Map* map = source->map();
4847   int object_size = map->instance_size();
4848   Object* clone;
4849 
4850   ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4851 
4852   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4853 
4854   // If we're forced to always allocate, we use the general allocation
4855   // functions which may leave us with an object in old space.
4856   if (always_allocate()) {
4857     { MaybeObject* maybe_clone =
4858           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4859       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4860     }
4861     Address clone_address = HeapObject::cast(clone)->address();
4862     CopyBlock(clone_address,
4863               source->address(),
4864               object_size);
4865     // Update write barrier for all fields that lie beyond the header.
4866     RecordWrites(clone_address,
4867                  JSObject::kHeaderSize,
4868                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4869   } else {
4870     wb_mode = SKIP_WRITE_BARRIER;
4871 
4872     { int adjusted_object_size = site != NULL
4873           ? object_size + AllocationMemento::kSize
4874           : object_size;
4875       MaybeObject* maybe_clone =
4876           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4877       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4878     }
4879     SLOW_ASSERT(InNewSpace(clone));
4880     // Since we know the clone is allocated in new space, we can copy
4881     // the contents without worrying about updating the write barrier.
4882     CopyBlock(HeapObject::cast(clone)->address(),
4883               source->address(),
4884               object_size);
4885 
4886     if (site != NULL) {
4887       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4888           reinterpret_cast<Address>(clone) + object_size);
4889       InitializeAllocationMemento(alloc_memento, site);
4890     }
4891   }
4892 
4893   SLOW_ASSERT(
4894       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4895   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4896   FixedArray* properties = FixedArray::cast(source->properties());
4897   // Update elements if necessary.
4898   if (elements->length() > 0) {
4899     Object* elem;
4900     { MaybeObject* maybe_elem;
4901       if (elements->map() == fixed_cow_array_map()) {
4902         maybe_elem = FixedArray::cast(elements);
4903       } else if (source->HasFastDoubleElements()) {
4904         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4905       } else {
4906         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4907       }
4908       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4909     }
4910     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4911   }
4912   // Update properties if necessary.
4913   if (properties->length() > 0) {
4914     Object* prop;
4915     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4916       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4917     }
4918     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4919   }
4920   // Return the new clone.
4921   return clone;
4922 }
4923 
4924 
ReinitializeJSReceiver(JSReceiver * object,InstanceType type,int size)4925 MaybeObject* Heap::ReinitializeJSReceiver(
4926     JSReceiver* object, InstanceType type, int size) {
4927   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4928 
4929   // Allocate fresh map.
4930   // TODO(rossberg): Once we optimize proxies, cache these maps.
4931   Map* map;
4932   MaybeObject* maybe = AllocateMap(type, size);
4933   if (!maybe->To<Map>(&map)) return maybe;
4934 
4935   // Check that the receiver has at least the size of the fresh object.
4936   int size_difference = object->map()->instance_size() - map->instance_size();
4937   ASSERT(size_difference >= 0);
4938 
4939   map->set_prototype(object->map()->prototype());
4940 
4941   // Allocate the backing storage for the properties.
4942   int prop_size = map->unused_property_fields() - map->inobject_properties();
4943   Object* properties;
4944   maybe = AllocateFixedArray(prop_size, TENURED);
4945   if (!maybe->ToObject(&properties)) return maybe;
4946 
4947   // Functions require some allocation, which might fail here.
4948   SharedFunctionInfo* shared = NULL;
4949   if (type == JS_FUNCTION_TYPE) {
4950     String* name;
4951     maybe =
4952         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4953     if (!maybe->To<String>(&name)) return maybe;
4954     maybe = AllocateSharedFunctionInfo(name);
4955     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4956   }
4957 
4958   // Because of possible retries of this function after failure,
4959   // we must NOT fail after this point, where we have changed the type!
4960 
4961   // Reset the map for the object.
4962   object->set_map(map);
4963   JSObject* jsobj = JSObject::cast(object);
4964 
4965   // Reinitialize the object from the constructor map.
4966   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4967 
4968   // Functions require some minimal initialization.
4969   if (type == JS_FUNCTION_TYPE) {
4970     map->set_function_with_prototype(true);
4971     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4972     JSFunction::cast(object)->set_context(
4973         isolate()->context()->native_context());
4974   }
4975 
4976   // Put in filler if the new object is smaller than the old.
4977   if (size_difference > 0) {
4978     CreateFillerObjectAt(
4979         object->address() + map->instance_size(), size_difference);
4980   }
4981 
4982   return object;
4983 }
4984 
4985 
ReinitializeJSGlobalProxy(JSFunction * constructor,JSGlobalProxy * object)4986 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4987                                              JSGlobalProxy* object) {
4988   ASSERT(constructor->has_initial_map());
4989   Map* map = constructor->initial_map();
4990 
4991   // Check that the already allocated object has the same size and type as
4992   // objects allocated using the constructor.
4993   ASSERT(map->instance_size() == object->map()->instance_size());
4994   ASSERT(map->instance_type() == object->map()->instance_type());
4995 
4996   // Allocate the backing storage for the properties.
4997   int prop_size = map->unused_property_fields() - map->inobject_properties();
4998   Object* properties;
4999   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5000     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5001   }
5002 
5003   // Reset the map for the object.
5004   object->set_map(constructor->initial_map());
5005 
5006   // Reinitialize the object from the constructor map.
5007   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5008   return object;
5009 }
5010 
5011 
AllocateStringFromOneByte(Vector<const uint8_t> string,PretenureFlag pretenure)5012 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5013                                              PretenureFlag pretenure) {
5014   int length = string.length();
5015   if (length == 1) {
5016     return Heap::LookupSingleCharacterStringFromCode(string[0]);
5017   }
5018   Object* result;
5019   { MaybeObject* maybe_result =
5020         AllocateRawOneByteString(string.length(), pretenure);
5021     if (!maybe_result->ToObject(&result)) return maybe_result;
5022   }
5023 
5024   // Copy the characters into the new object.
5025   CopyChars(SeqOneByteString::cast(result)->GetChars(),
5026             string.start(),
5027             length);
5028   return result;
5029 }
5030 
5031 
AllocateStringFromUtf8Slow(Vector<const char> string,int non_ascii_start,PretenureFlag pretenure)5032 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5033                                               int non_ascii_start,
5034                                               PretenureFlag pretenure) {
5035   // Continue counting the number of characters in the UTF-8 string, starting
5036   // from the first non-ascii character or word.
5037   Access<UnicodeCache::Utf8Decoder>
5038       decoder(isolate_->unicode_cache()->utf8_decoder());
5039   decoder->Reset(string.start() + non_ascii_start,
5040                  string.length() - non_ascii_start);
5041   int utf16_length = decoder->Utf16Length();
5042   ASSERT(utf16_length > 0);
5043   // Allocate string.
5044   Object* result;
5045   {
5046     int chars = non_ascii_start + utf16_length;
5047     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5048     if (!maybe_result->ToObject(&result)) return maybe_result;
5049   }
5050   // Convert and copy the characters into the new object.
5051   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5052   // Copy ascii portion.
5053   uint16_t* data = twobyte->GetChars();
5054   if (non_ascii_start != 0) {
5055     const char* ascii_data = string.start();
5056     for (int i = 0; i < non_ascii_start; i++) {
5057       *data++ = *ascii_data++;
5058     }
5059   }
5060   // Now write the remainder.
5061   decoder->WriteUtf16(data, utf16_length);
5062   return result;
5063 }
5064 
5065 
AllocateStringFromTwoByte(Vector<const uc16> string,PretenureFlag pretenure)5066 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5067                                              PretenureFlag pretenure) {
5068   // Check if the string is an ASCII string.
5069   Object* result;
5070   int length = string.length();
5071   const uc16* start = string.start();
5072 
5073   if (String::IsOneByte(start, length)) {
5074     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5075     if (!maybe_result->ToObject(&result)) return maybe_result;
5076     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5077   } else {  // It's not a one byte string.
5078     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5079     if (!maybe_result->ToObject(&result)) return maybe_result;
5080     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5081   }
5082   return result;
5083 }
5084 
5085 
InternalizedStringMapForString(String * string)5086 Map* Heap::InternalizedStringMapForString(String* string) {
5087   // If the string is in new space it cannot be used as internalized.
5088   if (InNewSpace(string)) return NULL;
5089 
5090   // Find the corresponding internalized string map for strings.
5091   switch (string->map()->instance_type()) {
5092     case STRING_TYPE: return internalized_string_map();
5093     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5094     case CONS_STRING_TYPE: return cons_internalized_string_map();
5095     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5096     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5097     case EXTERNAL_ASCII_STRING_TYPE:
5098       return external_ascii_internalized_string_map();
5099     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5100       return external_internalized_string_with_one_byte_data_map();
5101     case SHORT_EXTERNAL_STRING_TYPE:
5102       return short_external_internalized_string_map();
5103     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5104       return short_external_ascii_internalized_string_map();
5105     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5106       return short_external_internalized_string_with_one_byte_data_map();
5107     default: return NULL;  // No match found.
5108   }
5109 }
5110 
5111 
WriteOneByteData(Vector<const char> vector,uint8_t * chars,int len)5112 static inline void WriteOneByteData(Vector<const char> vector,
5113                                     uint8_t* chars,
5114                                     int len) {
5115   // Only works for ascii.
5116   ASSERT(vector.length() == len);
5117   OS::MemCopy(chars, vector.start(), len);
5118 }
5119 
WriteTwoByteData(Vector<const char> vector,uint16_t * chars,int len)5120 static inline void WriteTwoByteData(Vector<const char> vector,
5121                                     uint16_t* chars,
5122                                     int len) {
5123   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5124   unsigned stream_length = vector.length();
5125   while (stream_length != 0) {
5126     unsigned consumed = 0;
5127     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5128     ASSERT(c != unibrow::Utf8::kBadChar);
5129     ASSERT(consumed <= stream_length);
5130     stream_length -= consumed;
5131     stream += consumed;
5132     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5133       len -= 2;
5134       if (len < 0) break;
5135       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5136       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5137     } else {
5138       len -= 1;
5139       if (len < 0) break;
5140       *chars++ = c;
5141     }
5142   }
5143   ASSERT(stream_length == 0);
5144   ASSERT(len == 0);
5145 }
5146 
5147 
WriteOneByteData(String * s,uint8_t * chars,int len)5148 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5149   ASSERT(s->length() == len);
5150   String::WriteToFlat(s, chars, 0, len);
5151 }
5152 
5153 
WriteTwoByteData(String * s,uint16_t * chars,int len)5154 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5155   ASSERT(s->length() == len);
5156   String::WriteToFlat(s, chars, 0, len);
5157 }
5158 
5159 
5160 template<bool is_one_byte, typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)5161 MaybeObject* Heap::AllocateInternalizedStringImpl(
5162     T t, int chars, uint32_t hash_field) {
5163   ASSERT(chars >= 0);
5164   // Compute map and object size.
5165   int size;
5166   Map* map;
5167 
5168   if (is_one_byte) {
5169     if (chars > SeqOneByteString::kMaxLength) {
5170       return Failure::OutOfMemoryException(0x9);
5171     }
5172     map = ascii_internalized_string_map();
5173     size = SeqOneByteString::SizeFor(chars);
5174   } else {
5175     if (chars > SeqTwoByteString::kMaxLength) {
5176       return Failure::OutOfMemoryException(0xa);
5177     }
5178     map = internalized_string_map();
5179     size = SeqTwoByteString::SizeFor(chars);
5180   }
5181   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
5182 
5183   // Allocate string.
5184   Object* result;
5185   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5186     if (!maybe_result->ToObject(&result)) return maybe_result;
5187   }
5188 
5189   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5190   // Set length and hash fields of the allocated string.
5191   String* answer = String::cast(result);
5192   answer->set_length(chars);
5193   answer->set_hash_field(hash_field);
5194 
5195   ASSERT_EQ(size, answer->Size());
5196 
5197   if (is_one_byte) {
5198     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5199   } else {
5200     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5201   }
5202   return answer;
5203 }
5204 
5205 
5206 // Need explicit instantiations.
5207 template
5208 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5209 template
5210 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5211     String*, int, uint32_t);
5212 template
5213 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5214     Vector<const char>, int, uint32_t);
5215 
5216 
AllocateRawOneByteString(int length,PretenureFlag pretenure)5217 MaybeObject* Heap::AllocateRawOneByteString(int length,
5218                                             PretenureFlag pretenure) {
5219   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5220     return Failure::OutOfMemoryException(0xb);
5221   }
5222   int size = SeqOneByteString::SizeFor(length);
5223   ASSERT(size <= SeqOneByteString::kMaxSize);
5224   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5225 
5226   Object* result;
5227   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5228     if (!maybe_result->ToObject(&result)) return maybe_result;
5229   }
5230 
5231   // Partially initialize the object.
5232   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5233   String::cast(result)->set_length(length);
5234   String::cast(result)->set_hash_field(String::kEmptyHashField);
5235   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5236 
5237   return result;
5238 }
5239 
5240 
AllocateRawTwoByteString(int length,PretenureFlag pretenure)5241 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5242                                             PretenureFlag pretenure) {
5243   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5244     return Failure::OutOfMemoryException(0xc);
5245   }
5246   int size = SeqTwoByteString::SizeFor(length);
5247   ASSERT(size <= SeqTwoByteString::kMaxSize);
5248   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5249 
5250   Object* result;
5251   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5252     if (!maybe_result->ToObject(&result)) return maybe_result;
5253   }
5254 
5255   // Partially initialize the object.
5256   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5257   String::cast(result)->set_length(length);
5258   String::cast(result)->set_hash_field(String::kEmptyHashField);
5259   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5260   return result;
5261 }
5262 
5263 
AllocateJSArray(ElementsKind elements_kind,PretenureFlag pretenure)5264 MaybeObject* Heap::AllocateJSArray(
5265     ElementsKind elements_kind,
5266     PretenureFlag pretenure) {
5267   Context* native_context = isolate()->context()->native_context();
5268   JSFunction* array_function = native_context->array_function();
5269   Map* map = array_function->initial_map();
5270   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5271   if (transition_map != NULL) map = transition_map;
5272   return AllocateJSObjectFromMap(map, pretenure);
5273 }
5274 
5275 
AllocateEmptyFixedArray()5276 MaybeObject* Heap::AllocateEmptyFixedArray() {
5277   int size = FixedArray::SizeFor(0);
5278   Object* result;
5279   { MaybeObject* maybe_result =
5280         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5281     if (!maybe_result->ToObject(&result)) return maybe_result;
5282   }
5283   // Initialize the object.
5284   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5285       fixed_array_map());
5286   reinterpret_cast<FixedArray*>(result)->set_length(0);
5287   return result;
5288 }
5289 
5290 
AllocateEmptyExternalArray(ExternalArrayType array_type)5291 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5292   return AllocateExternalArray(0, array_type, NULL, TENURED);
5293 }
5294 
5295 
CopyFixedArrayWithMap(FixedArray * src,Map * map)5296 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5297   int len = src->length();
5298   Object* obj;
5299   { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
5300     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5301   }
5302   if (InNewSpace(obj)) {
5303     HeapObject* dst = HeapObject::cast(obj);
5304     dst->set_map_no_write_barrier(map);
5305     CopyBlock(dst->address() + kPointerSize,
5306               src->address() + kPointerSize,
5307               FixedArray::SizeFor(len) - kPointerSize);
5308     return obj;
5309   }
5310   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5311   FixedArray* result = FixedArray::cast(obj);
5312   result->set_length(len);
5313 
5314   // Copy the content
5315   DisallowHeapAllocation no_gc;
5316   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5317   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5318   return result;
5319 }
5320 
5321 
CopyFixedDoubleArrayWithMap(FixedDoubleArray * src,Map * map)5322 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5323                                                Map* map) {
5324   int len = src->length();
5325   Object* obj;
5326   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5327     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5328   }
5329   HeapObject* dst = HeapObject::cast(obj);
5330   dst->set_map_no_write_barrier(map);
5331   CopyBlock(
5332       dst->address() + FixedDoubleArray::kLengthOffset,
5333       src->address() + FixedDoubleArray::kLengthOffset,
5334       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5335   return obj;
5336 }
5337 
5338 
CopyConstantPoolArrayWithMap(ConstantPoolArray * src,Map * map)5339 MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
5340                                                 Map* map) {
5341   int int64_entries = src->count_of_int64_entries();
5342   int ptr_entries = src->count_of_ptr_entries();
5343   int int32_entries = src->count_of_int32_entries();
5344   Object* obj;
5345   { MaybeObject* maybe_obj =
5346         AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
5347     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5348   }
5349   HeapObject* dst = HeapObject::cast(obj);
5350   dst->set_map_no_write_barrier(map);
5351   CopyBlock(
5352       dst->address() + ConstantPoolArray::kLengthOffset,
5353       src->address() + ConstantPoolArray::kLengthOffset,
5354       ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
5355           - ConstantPoolArray::kLengthOffset);
5356   return obj;
5357 }
5358 
5359 
AllocateRawFixedArray(int length,PretenureFlag pretenure)5360 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5361   if (length < 0 || length > FixedArray::kMaxLength) {
5362     return Failure::OutOfMemoryException(0xe);
5363   }
5364   int size = FixedArray::SizeFor(length);
5365   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
5366 
5367   return AllocateRaw(size, space, OLD_POINTER_SPACE);
5368 }
5369 
5370 
AllocateFixedArrayWithFiller(int length,PretenureFlag pretenure,Object * filler)5371 MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
5372                                                 PretenureFlag pretenure,
5373                                                 Object* filler) {
5374   ASSERT(length >= 0);
5375   ASSERT(empty_fixed_array()->IsFixedArray());
5376   if (length == 0) return empty_fixed_array();
5377 
5378   ASSERT(!InNewSpace(filler));
5379   Object* result;
5380   { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
5381     if (!maybe_result->ToObject(&result)) return maybe_result;
5382   }
5383 
5384   HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
5385   FixedArray* array = FixedArray::cast(result);
5386   array->set_length(length);
5387   MemsetPointer(array->data_start(), filler, length);
5388   return array;
5389 }
5390 
5391 
AllocateFixedArray(int length,PretenureFlag pretenure)5392 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5393   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
5394 }
5395 
5396 
AllocateFixedArrayWithHoles(int length,PretenureFlag pretenure)5397 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5398                                                PretenureFlag pretenure) {
5399   return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
5400 }
5401 
5402 
AllocateUninitializedFixedArray(int length)5403 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5404   if (length == 0) return empty_fixed_array();
5405 
5406   Object* obj;
5407   { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
5408     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5409   }
5410 
5411   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5412       fixed_array_map());
5413   FixedArray::cast(obj)->set_length(length);
5414   return obj;
5415 }
5416 
5417 
AllocateEmptyFixedDoubleArray()5418 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5419   int size = FixedDoubleArray::SizeFor(0);
5420   Object* result;
5421   { MaybeObject* maybe_result =
5422         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5423     if (!maybe_result->ToObject(&result)) return maybe_result;
5424   }
5425   // Initialize the object.
5426   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5427       fixed_double_array_map());
5428   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5429   return result;
5430 }
5431 
5432 
AllocateUninitializedFixedDoubleArray(int length,PretenureFlag pretenure)5433 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5434     int length,
5435     PretenureFlag pretenure) {
5436   if (length == 0) return empty_fixed_array();
5437 
5438   Object* elements_object;
5439   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5440   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5441   FixedDoubleArray* elements =
5442       reinterpret_cast<FixedDoubleArray*>(elements_object);
5443 
5444   elements->set_map_no_write_barrier(fixed_double_array_map());
5445   elements->set_length(length);
5446   return elements;
5447 }
5448 
5449 
AllocateFixedDoubleArrayWithHoles(int length,PretenureFlag pretenure)5450 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5451     int length,
5452     PretenureFlag pretenure) {
5453   if (length == 0) return empty_fixed_array();
5454 
5455   Object* elements_object;
5456   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5457   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5458   FixedDoubleArray* elements =
5459       reinterpret_cast<FixedDoubleArray*>(elements_object);
5460 
5461   for (int i = 0; i < length; ++i) {
5462     elements->set_the_hole(i);
5463   }
5464 
5465   elements->set_map_no_write_barrier(fixed_double_array_map());
5466   elements->set_length(length);
5467   return elements;
5468 }
5469 
5470 
AllocateRawFixedDoubleArray(int length,PretenureFlag pretenure)5471 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5472                                                PretenureFlag pretenure) {
5473   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5474     return Failure::OutOfMemoryException(0xf);
5475   }
5476   int size = FixedDoubleArray::SizeFor(length);
5477 #ifndef V8_HOST_ARCH_64_BIT
5478   size += kPointerSize;
5479 #endif
5480   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5481 
5482   HeapObject* object;
5483   { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
5484     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5485   }
5486 
5487   return EnsureDoubleAligned(this, object, size);
5488 }
5489 
5490 
AllocateConstantPoolArray(int number_of_int64_entries,int number_of_ptr_entries,int number_of_int32_entries)5491 MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
5492                                              int number_of_ptr_entries,
5493                                              int number_of_int32_entries) {
5494   ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
5495          number_of_int32_entries > 0);
5496   int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
5497                                         number_of_ptr_entries,
5498                                         number_of_int32_entries);
5499 #ifndef V8_HOST_ARCH_64_BIT
5500   size += kPointerSize;
5501 #endif
5502   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5503 
5504   HeapObject* object;
5505   { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
5506     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5507   }
5508   object = EnsureDoubleAligned(this, object, size);
5509   HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
5510 
5511   ConstantPoolArray* constant_pool =
5512       reinterpret_cast<ConstantPoolArray*>(object);
5513   constant_pool->SetEntryCounts(number_of_int64_entries,
5514                                 number_of_ptr_entries,
5515                                 number_of_int32_entries);
5516   MemsetPointer(
5517       HeapObject::RawField(
5518           constant_pool,
5519           constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
5520       undefined_value(),
5521       number_of_ptr_entries);
5522   return constant_pool;
5523 }
5524 
5525 
AllocateHashTable(int length,PretenureFlag pretenure)5526 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5527   Object* result;
5528   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5529     if (!maybe_result->ToObject(&result)) return maybe_result;
5530   }
5531   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5532       hash_table_map());
5533   ASSERT(result->IsHashTable());
5534   return result;
5535 }
5536 
5537 
AllocateSymbol()5538 MaybeObject* Heap::AllocateSymbol() {
5539   // Statically ensure that it is safe to allocate symbols in paged spaces.
5540   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5541 
5542   Object* result;
5543   MaybeObject* maybe =
5544       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5545   if (!maybe->ToObject(&result)) return maybe;
5546 
5547   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5548 
5549   // Generate a random hash value.
5550   int hash;
5551   int attempts = 0;
5552   do {
5553     hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
5554     attempts++;
5555   } while (hash == 0 && attempts < 30);
5556   if (hash == 0) hash = 1;  // never return 0
5557 
5558   Symbol::cast(result)->set_hash_field(
5559       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5560   Symbol::cast(result)->set_name(undefined_value());
5561   Symbol::cast(result)->set_flags(Smi::FromInt(0));
5562 
5563   ASSERT(!Symbol::cast(result)->is_private());
5564   return result;
5565 }
5566 
5567 
AllocatePrivateSymbol()5568 MaybeObject* Heap::AllocatePrivateSymbol() {
5569   MaybeObject* maybe = AllocateSymbol();
5570   Symbol* symbol;
5571   if (!maybe->To(&symbol)) return maybe;
5572   symbol->set_is_private(true);
5573   return symbol;
5574 }
5575 
5576 
AllocateNativeContext()5577 MaybeObject* Heap::AllocateNativeContext() {
5578   Object* result;
5579   { MaybeObject* maybe_result =
5580         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5581     if (!maybe_result->ToObject(&result)) return maybe_result;
5582   }
5583   Context* context = reinterpret_cast<Context*>(result);
5584   context->set_map_no_write_barrier(native_context_map());
5585   context->set_js_array_maps(undefined_value());
5586   ASSERT(context->IsNativeContext());
5587   ASSERT(result->IsContext());
5588   return result;
5589 }
5590 
5591 
AllocateGlobalContext(JSFunction * function,ScopeInfo * scope_info)5592 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5593                                          ScopeInfo* scope_info) {
5594   Object* result;
5595   { MaybeObject* maybe_result =
5596         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5597     if (!maybe_result->ToObject(&result)) return maybe_result;
5598   }
5599   Context* context = reinterpret_cast<Context*>(result);
5600   context->set_map_no_write_barrier(global_context_map());
5601   context->set_closure(function);
5602   context->set_previous(function->context());
5603   context->set_extension(scope_info);
5604   context->set_global_object(function->context()->global_object());
5605   ASSERT(context->IsGlobalContext());
5606   ASSERT(result->IsContext());
5607   return context;
5608 }
5609 
5610 
AllocateModuleContext(ScopeInfo * scope_info)5611 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5612   Object* result;
5613   { MaybeObject* maybe_result =
5614         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5615     if (!maybe_result->ToObject(&result)) return maybe_result;
5616   }
5617   Context* context = reinterpret_cast<Context*>(result);
5618   context->set_map_no_write_barrier(module_context_map());
5619   // Instance link will be set later.
5620   context->set_extension(Smi::FromInt(0));
5621   return context;
5622 }
5623 
5624 
AllocateFunctionContext(int length,JSFunction * function)5625 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5626   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5627   Object* result;
5628   { MaybeObject* maybe_result = AllocateFixedArray(length);
5629     if (!maybe_result->ToObject(&result)) return maybe_result;
5630   }
5631   Context* context = reinterpret_cast<Context*>(result);
5632   context->set_map_no_write_barrier(function_context_map());
5633   context->set_closure(function);
5634   context->set_previous(function->context());
5635   context->set_extension(Smi::FromInt(0));
5636   context->set_global_object(function->context()->global_object());
5637   return context;
5638 }
5639 
5640 
AllocateCatchContext(JSFunction * function,Context * previous,String * name,Object * thrown_object)5641 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5642                                         Context* previous,
5643                                         String* name,
5644                                         Object* thrown_object) {
5645   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5646   Object* result;
5647   { MaybeObject* maybe_result =
5648         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5649     if (!maybe_result->ToObject(&result)) return maybe_result;
5650   }
5651   Context* context = reinterpret_cast<Context*>(result);
5652   context->set_map_no_write_barrier(catch_context_map());
5653   context->set_closure(function);
5654   context->set_previous(previous);
5655   context->set_extension(name);
5656   context->set_global_object(previous->global_object());
5657   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5658   return context;
5659 }
5660 
5661 
AllocateWithContext(JSFunction * function,Context * previous,JSReceiver * extension)5662 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5663                                        Context* previous,
5664                                        JSReceiver* extension) {
5665   Object* result;
5666   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5667     if (!maybe_result->ToObject(&result)) return maybe_result;
5668   }
5669   Context* context = reinterpret_cast<Context*>(result);
5670   context->set_map_no_write_barrier(with_context_map());
5671   context->set_closure(function);
5672   context->set_previous(previous);
5673   context->set_extension(extension);
5674   context->set_global_object(previous->global_object());
5675   return context;
5676 }
5677 
5678 
AllocateBlockContext(JSFunction * function,Context * previous,ScopeInfo * scope_info)5679 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5680                                         Context* previous,
5681                                         ScopeInfo* scope_info) {
5682   Object* result;
5683   { MaybeObject* maybe_result =
5684         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5685     if (!maybe_result->ToObject(&result)) return maybe_result;
5686   }
5687   Context* context = reinterpret_cast<Context*>(result);
5688   context->set_map_no_write_barrier(block_context_map());
5689   context->set_closure(function);
5690   context->set_previous(previous);
5691   context->set_extension(scope_info);
5692   context->set_global_object(previous->global_object());
5693   return context;
5694 }
5695 
5696 
AllocateScopeInfo(int length)5697 MaybeObject* Heap::AllocateScopeInfo(int length) {
5698   FixedArray* scope_info;
5699   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5700   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5701   scope_info->set_map_no_write_barrier(scope_info_map());
5702   return scope_info;
5703 }
5704 
5705 
AllocateExternal(void * value)5706 MaybeObject* Heap::AllocateExternal(void* value) {
5707   Foreign* foreign;
5708   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5709     if (!maybe_result->To(&foreign)) return maybe_result;
5710   }
5711   JSObject* external;
5712   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5713     if (!maybe_result->To(&external)) return maybe_result;
5714   }
5715   external->SetInternalField(0, foreign);
5716   return external;
5717 }
5718 
5719 
AllocateStruct(InstanceType type)5720 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5721   Map* map;
5722   switch (type) {
5723 #define MAKE_CASE(NAME, Name, name) \
5724     case NAME##_TYPE: map = name##_map(); break;
5725 STRUCT_LIST(MAKE_CASE)
5726 #undef MAKE_CASE
5727     default:
5728       UNREACHABLE();
5729       return Failure::InternalError();
5730   }
5731   int size = map->instance_size();
5732   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5733   Object* result;
5734   { MaybeObject* maybe_result = Allocate(map, space);
5735     if (!maybe_result->ToObject(&result)) return maybe_result;
5736   }
5737   Struct::cast(result)->InitializeBody(size);
5738   return result;
5739 }
5740 
5741 
IsHeapIterable()5742 bool Heap::IsHeapIterable() {
5743   return (!old_pointer_space()->was_swept_conservatively() &&
5744           !old_data_space()->was_swept_conservatively());
5745 }
5746 
5747 
EnsureHeapIsIterable()5748 void Heap::EnsureHeapIsIterable() {
5749   ASSERT(AllowHeapAllocation::IsAllowed());
5750   if (!IsHeapIterable()) {
5751     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5752   }
5753   ASSERT(IsHeapIterable());
5754 }
5755 
5756 
AdvanceIdleIncrementalMarking(intptr_t step_size)5757 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5758   incremental_marking()->Step(step_size,
5759                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5760 
5761   if (incremental_marking()->IsComplete()) {
5762     bool uncommit = false;
5763     if (gc_count_at_last_idle_gc_ == gc_count_) {
5764       // No GC since the last full GC, the mutator is probably not active.
5765       isolate_->compilation_cache()->Clear();
5766       uncommit = true;
5767     }
5768     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5769     mark_sweeps_since_idle_round_started_++;
5770     gc_count_at_last_idle_gc_ = gc_count_;
5771     if (uncommit) {
5772       new_space_.Shrink();
5773       UncommitFromSpace();
5774     }
5775   }
5776 }
5777 
5778 
IdleNotification(int hint)5779 bool Heap::IdleNotification(int hint) {
5780   // Hints greater than this value indicate that
5781   // the embedder is requesting a lot of GC work.
5782   const int kMaxHint = 1000;
5783   const int kMinHintForIncrementalMarking = 10;
5784   // Minimal hint that allows to do full GC.
5785   const int kMinHintForFullGC = 100;
5786   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5787   // The size factor is in range [5..250]. The numbers here are chosen from
5788   // experiments. If you changes them, make sure to test with
5789   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5790   intptr_t step_size =
5791       size_factor * IncrementalMarking::kAllocatedThreshold;
5792 
5793   if (contexts_disposed_ > 0) {
5794     contexts_disposed_ = 0;
5795     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5796     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5797         incremental_marking()->IsStopped()) {
5798       HistogramTimerScope scope(isolate_->counters()->gc_context());
5799       CollectAllGarbage(kReduceMemoryFootprintMask,
5800                         "idle notification: contexts disposed");
5801     } else {
5802       AdvanceIdleIncrementalMarking(step_size);
5803     }
5804 
5805     // After context disposal there is likely a lot of garbage remaining, reset
5806     // the idle notification counters in order to trigger more incremental GCs
5807     // on subsequent idle notifications.
5808     StartIdleRound();
5809     return false;
5810   }
5811 
5812   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5813     return IdleGlobalGC();
5814   }
5815 
5816   // By doing small chunks of GC work in each IdleNotification,
5817   // perform a round of incremental GCs and after that wait until
5818   // the mutator creates enough garbage to justify a new round.
5819   // An incremental GC progresses as follows:
5820   // 1. many incremental marking steps,
5821   // 2. one old space mark-sweep-compact,
5822   // 3. many lazy sweep steps.
5823   // Use mark-sweep-compact events to count incremental GCs in a round.
5824 
5825   if (incremental_marking()->IsStopped()) {
5826     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5827         !IsSweepingComplete() &&
5828         !AdvanceSweepers(static_cast<int>(step_size))) {
5829       return false;
5830     }
5831   }
5832 
5833   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5834     if (EnoughGarbageSinceLastIdleRound()) {
5835       StartIdleRound();
5836     } else {
5837       return true;
5838     }
5839   }
5840 
5841   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5842                               mark_sweeps_since_idle_round_started_;
5843 
5844   if (incremental_marking()->IsStopped()) {
5845     // If there are no more than two GCs left in this idle round and we are
5846     // allowed to do a full GC, then make those GCs full in order to compact
5847     // the code space.
5848     // TODO(ulan): Once we enable code compaction for incremental marking,
5849     // we can get rid of this special case and always start incremental marking.
5850     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5851       CollectAllGarbage(kReduceMemoryFootprintMask,
5852                         "idle notification: finalize idle round");
5853       mark_sweeps_since_idle_round_started_++;
5854     } else if (hint > kMinHintForIncrementalMarking) {
5855       incremental_marking()->Start();
5856     }
5857   }
5858   if (!incremental_marking()->IsStopped() &&
5859       hint > kMinHintForIncrementalMarking) {
5860     AdvanceIdleIncrementalMarking(step_size);
5861   }
5862 
5863   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5864     FinishIdleRound();
5865     return true;
5866   }
5867 
5868   return false;
5869 }
5870 
5871 
IdleGlobalGC()5872 bool Heap::IdleGlobalGC() {
5873   static const int kIdlesBeforeScavenge = 4;
5874   static const int kIdlesBeforeMarkSweep = 7;
5875   static const int kIdlesBeforeMarkCompact = 8;
5876   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5877   static const unsigned int kGCsBetweenCleanup = 4;
5878 
5879   if (!last_idle_notification_gc_count_init_) {
5880     last_idle_notification_gc_count_ = gc_count_;
5881     last_idle_notification_gc_count_init_ = true;
5882   }
5883 
5884   bool uncommit = true;
5885   bool finished = false;
5886 
5887   // Reset the number of idle notifications received when a number of
5888   // GCs have taken place. This allows another round of cleanup based
5889   // on idle notifications if enough work has been carried out to
5890   // provoke a number of garbage collections.
5891   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5892     number_idle_notifications_ =
5893         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5894   } else {
5895     number_idle_notifications_ = 0;
5896     last_idle_notification_gc_count_ = gc_count_;
5897   }
5898 
5899   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5900     CollectGarbage(NEW_SPACE, "idle notification");
5901     new_space_.Shrink();
5902     last_idle_notification_gc_count_ = gc_count_;
5903   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5904     // Before doing the mark-sweep collections we clear the
5905     // compilation cache to avoid hanging on to source code and
5906     // generated code for cached functions.
5907     isolate_->compilation_cache()->Clear();
5908 
5909     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5910     new_space_.Shrink();
5911     last_idle_notification_gc_count_ = gc_count_;
5912 
5913   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5914     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5915     new_space_.Shrink();
5916     last_idle_notification_gc_count_ = gc_count_;
5917     number_idle_notifications_ = 0;
5918     finished = true;
5919   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5920     // If we have received more than kIdlesBeforeMarkCompact idle
5921     // notifications we do not perform any cleanup because we don't
5922     // expect to gain much by doing so.
5923     finished = true;
5924   }
5925 
5926   if (uncommit) UncommitFromSpace();
5927 
5928   return finished;
5929 }
5930 
5931 
5932 #ifdef DEBUG
5933 
Print()5934 void Heap::Print() {
5935   if (!HasBeenSetUp()) return;
5936   isolate()->PrintStack(stdout);
5937   AllSpaces spaces(this);
5938   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5939     space->Print();
5940   }
5941 }
5942 
5943 
ReportCodeStatistics(const char * title)5944 void Heap::ReportCodeStatistics(const char* title) {
5945   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5946   PagedSpace::ResetCodeStatistics(isolate());
5947   // We do not look for code in new space, map space, or old space.  If code
5948   // somehow ends up in those spaces, we would miss it here.
5949   code_space_->CollectCodeStatistics();
5950   lo_space_->CollectCodeStatistics();
5951   PagedSpace::ReportCodeStatistics(isolate());
5952 }
5953 
5954 
5955 // This function expects that NewSpace's allocated objects histogram is
5956 // populated (via a call to CollectStatistics or else as a side effect of a
5957 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)5958 void Heap::ReportHeapStatistics(const char* title) {
5959   USE(title);
5960   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5961          title, gc_count_);
5962   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5963          old_generation_allocation_limit_);
5964 
5965   PrintF("\n");
5966   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5967   isolate_->global_handles()->PrintStats();
5968   PrintF("\n");
5969 
5970   PrintF("Heap statistics : ");
5971   isolate_->memory_allocator()->ReportStatistics();
5972   PrintF("To space : ");
5973   new_space_.ReportStatistics();
5974   PrintF("Old pointer space : ");
5975   old_pointer_space_->ReportStatistics();
5976   PrintF("Old data space : ");
5977   old_data_space_->ReportStatistics();
5978   PrintF("Code space : ");
5979   code_space_->ReportStatistics();
5980   PrintF("Map space : ");
5981   map_space_->ReportStatistics();
5982   PrintF("Cell space : ");
5983   cell_space_->ReportStatistics();
5984   PrintF("PropertyCell space : ");
5985   property_cell_space_->ReportStatistics();
5986   PrintF("Large object space : ");
5987   lo_space_->ReportStatistics();
5988   PrintF(">>>>>> ========================================= >>>>>>\n");
5989 }
5990 
5991 #endif  // DEBUG
5992 
Contains(HeapObject * value)5993 bool Heap::Contains(HeapObject* value) {
5994   return Contains(value->address());
5995 }
5996 
5997 
Contains(Address addr)5998 bool Heap::Contains(Address addr) {
5999   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
6000   return HasBeenSetUp() &&
6001     (new_space_.ToSpaceContains(addr) ||
6002      old_pointer_space_->Contains(addr) ||
6003      old_data_space_->Contains(addr) ||
6004      code_space_->Contains(addr) ||
6005      map_space_->Contains(addr) ||
6006      cell_space_->Contains(addr) ||
6007      property_cell_space_->Contains(addr) ||
6008      lo_space_->SlowContains(addr));
6009 }
6010 
6011 
InSpace(HeapObject * value,AllocationSpace space)6012 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6013   return InSpace(value->address(), space);
6014 }
6015 
6016 
InSpace(Address addr,AllocationSpace space)6017 bool Heap::InSpace(Address addr, AllocationSpace space) {
6018   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
6019   if (!HasBeenSetUp()) return false;
6020 
6021   switch (space) {
6022     case NEW_SPACE:
6023       return new_space_.ToSpaceContains(addr);
6024     case OLD_POINTER_SPACE:
6025       return old_pointer_space_->Contains(addr);
6026     case OLD_DATA_SPACE:
6027       return old_data_space_->Contains(addr);
6028     case CODE_SPACE:
6029       return code_space_->Contains(addr);
6030     case MAP_SPACE:
6031       return map_space_->Contains(addr);
6032     case CELL_SPACE:
6033       return cell_space_->Contains(addr);
6034     case PROPERTY_CELL_SPACE:
6035       return property_cell_space_->Contains(addr);
6036     case LO_SPACE:
6037       return lo_space_->SlowContains(addr);
6038   }
6039 
6040   return false;
6041 }
6042 
6043 
6044 #ifdef VERIFY_HEAP
Verify()6045 void Heap::Verify() {
6046   CHECK(HasBeenSetUp());
6047 
6048   store_buffer()->Verify();
6049 
6050   VerifyPointersVisitor visitor;
6051   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6052 
6053   new_space_.Verify();
6054 
6055   old_pointer_space_->Verify(&visitor);
6056   map_space_->Verify(&visitor);
6057 
6058   VerifyPointersVisitor no_dirty_regions_visitor;
6059   old_data_space_->Verify(&no_dirty_regions_visitor);
6060   code_space_->Verify(&no_dirty_regions_visitor);
6061   cell_space_->Verify(&no_dirty_regions_visitor);
6062   property_cell_space_->Verify(&no_dirty_regions_visitor);
6063 
6064   lo_space_->Verify();
6065 }
6066 #endif
6067 
6068 
InternalizeUtf8String(Vector<const char> string)6069 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6070   Object* result = NULL;
6071   Object* new_table;
6072   { MaybeObject* maybe_new_table =
6073         string_table()->LookupUtf8String(string, &result);
6074     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6075   }
6076   // Can't use set_string_table because StringTable::cast knows that
6077   // StringTable is a singleton and checks for identity.
6078   roots_[kStringTableRootIndex] = new_table;
6079   ASSERT(result != NULL);
6080   return result;
6081 }
6082 
6083 
InternalizeOneByteString(Vector<const uint8_t> string)6084 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6085   Object* result = NULL;
6086   Object* new_table;
6087   { MaybeObject* maybe_new_table =
6088         string_table()->LookupOneByteString(string, &result);
6089     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6090   }
6091   // Can't use set_string_table because StringTable::cast knows that
6092   // StringTable is a singleton and checks for identity.
6093   roots_[kStringTableRootIndex] = new_table;
6094   ASSERT(result != NULL);
6095   return result;
6096 }
6097 
6098 
InternalizeOneByteString(Handle<SeqOneByteString> string,int from,int length)6099 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6100                                      int from,
6101                                      int length) {
6102   Object* result = NULL;
6103   Object* new_table;
6104   { MaybeObject* maybe_new_table =
6105         string_table()->LookupSubStringOneByteString(string,
6106                                                    from,
6107                                                    length,
6108                                                    &result);
6109     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6110   }
6111   // Can't use set_string_table because StringTable::cast knows that
6112   // StringTable is a singleton and checks for identity.
6113   roots_[kStringTableRootIndex] = new_table;
6114   ASSERT(result != NULL);
6115   return result;
6116 }
6117 
6118 
InternalizeTwoByteString(Vector<const uc16> string)6119 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6120   Object* result = NULL;
6121   Object* new_table;
6122   { MaybeObject* maybe_new_table =
6123         string_table()->LookupTwoByteString(string, &result);
6124     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6125   }
6126   // Can't use set_string_table because StringTable::cast knows that
6127   // StringTable is a singleton and checks for identity.
6128   roots_[kStringTableRootIndex] = new_table;
6129   ASSERT(result != NULL);
6130   return result;
6131 }
6132 
6133 
InternalizeString(String * string)6134 MaybeObject* Heap::InternalizeString(String* string) {
6135   if (string->IsInternalizedString()) return string;
6136   Object* result = NULL;
6137   Object* new_table;
6138   { MaybeObject* maybe_new_table =
6139         string_table()->LookupString(string, &result);
6140     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6141   }
6142   // Can't use set_string_table because StringTable::cast knows that
6143   // StringTable is a singleton and checks for identity.
6144   roots_[kStringTableRootIndex] = new_table;
6145   ASSERT(result != NULL);
6146   return result;
6147 }
6148 
6149 
InternalizeStringIfExists(String * string,String ** result)6150 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6151   if (string->IsInternalizedString()) {
6152     *result = string;
6153     return true;
6154   }
6155   return string_table()->LookupStringIfExists(string, result);
6156 }
6157 
6158 
ZapFromSpace()6159 void Heap::ZapFromSpace() {
6160   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6161                           new_space_.FromSpaceEnd());
6162   while (it.has_next()) {
6163     NewSpacePage* page = it.next();
6164     for (Address cursor = page->area_start(), limit = page->area_end();
6165          cursor < limit;
6166          cursor += kPointerSize) {
6167       Memory::Address_at(cursor) = kFromSpaceZapValue;
6168     }
6169   }
6170 }
6171 
6172 
IterateAndMarkPointersToFromSpace(Address start,Address end,ObjectSlotCallback callback)6173 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6174                                              Address end,
6175                                              ObjectSlotCallback callback) {
6176   Address slot_address = start;
6177 
6178   // We are not collecting slots on new space objects during mutation
6179   // thus we have to scan for pointers to evacuation candidates when we
6180   // promote objects. But we should not record any slots in non-black
6181   // objects. Grey object's slots would be rescanned.
6182   // White object might not survive until the end of collection
6183   // it would be a violation of the invariant to record it's slots.
6184   bool record_slots = false;
6185   if (incremental_marking()->IsCompacting()) {
6186     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6187     record_slots = Marking::IsBlack(mark_bit);
6188   }
6189 
6190   while (slot_address < end) {
6191     Object** slot = reinterpret_cast<Object**>(slot_address);
6192     Object* object = *slot;
6193     // If the store buffer becomes overfull we mark pages as being exempt from
6194     // the store buffer.  These pages are scanned to find pointers that point
6195     // to the new space.  In that case we may hit newly promoted objects and
6196     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6197     if (object->IsHeapObject()) {
6198       if (Heap::InFromSpace(object)) {
6199         callback(reinterpret_cast<HeapObject**>(slot),
6200                  HeapObject::cast(object));
6201         Object* new_object = *slot;
6202         if (InNewSpace(new_object)) {
6203           SLOW_ASSERT(Heap::InToSpace(new_object));
6204           SLOW_ASSERT(new_object->IsHeapObject());
6205           store_buffer_.EnterDirectlyIntoStoreBuffer(
6206               reinterpret_cast<Address>(slot));
6207         }
6208         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6209       } else if (record_slots &&
6210                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6211         mark_compact_collector()->RecordSlot(slot, slot, object);
6212       }
6213     }
6214     slot_address += kPointerSize;
6215   }
6216 }
6217 
6218 
6219 #ifdef DEBUG
6220 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6221 
6222 
IsAMapPointerAddress(Object ** addr)6223 bool IsAMapPointerAddress(Object** addr) {
6224   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6225   int mod = a % Map::kSize;
6226   return mod >= Map::kPointerFieldsBeginOffset &&
6227          mod < Map::kPointerFieldsEndOffset;
6228 }
6229 
6230 
EverythingsAPointer(Object ** addr)6231 bool EverythingsAPointer(Object** addr) {
6232   return true;
6233 }
6234 
6235 
CheckStoreBuffer(Heap * heap,Object ** current,Object ** limit,Object **** store_buffer_position,Object *** store_buffer_top,CheckStoreBufferFilter filter,Address special_garbage_start,Address special_garbage_end)6236 static void CheckStoreBuffer(Heap* heap,
6237                              Object** current,
6238                              Object** limit,
6239                              Object**** store_buffer_position,
6240                              Object*** store_buffer_top,
6241                              CheckStoreBufferFilter filter,
6242                              Address special_garbage_start,
6243                              Address special_garbage_end) {
6244   Map* free_space_map = heap->free_space_map();
6245   for ( ; current < limit; current++) {
6246     Object* o = *current;
6247     Address current_address = reinterpret_cast<Address>(current);
6248     // Skip free space.
6249     if (o == free_space_map) {
6250       Address current_address = reinterpret_cast<Address>(current);
6251       FreeSpace* free_space =
6252           FreeSpace::cast(HeapObject::FromAddress(current_address));
6253       int skip = free_space->Size();
6254       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6255       ASSERT(skip > 0);
6256       current_address += skip - kPointerSize;
6257       current = reinterpret_cast<Object**>(current_address);
6258       continue;
6259     }
6260     // Skip the current linear allocation space between top and limit which is
6261     // unmarked with the free space map, but can contain junk.
6262     if (current_address == special_garbage_start &&
6263         special_garbage_end != special_garbage_start) {
6264       current_address = special_garbage_end - kPointerSize;
6265       current = reinterpret_cast<Object**>(current_address);
6266       continue;
6267     }
6268     if (!(*filter)(current)) continue;
6269     ASSERT(current_address < special_garbage_start ||
6270            current_address >= special_garbage_end);
6271     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6272     // We have to check that the pointer does not point into new space
6273     // without trying to cast it to a heap object since the hash field of
6274     // a string can contain values like 1 and 3 which are tagged null
6275     // pointers.
6276     if (!heap->InNewSpace(o)) continue;
6277     while (**store_buffer_position < current &&
6278            *store_buffer_position < store_buffer_top) {
6279       (*store_buffer_position)++;
6280     }
6281     if (**store_buffer_position != current ||
6282         *store_buffer_position == store_buffer_top) {
6283       Object** obj_start = current;
6284       while (!(*obj_start)->IsMap()) obj_start--;
6285       UNREACHABLE();
6286     }
6287   }
6288 }
6289 
6290 
6291 // Check that the store buffer contains all intergenerational pointers by
6292 // scanning a page and ensuring that all pointers to young space are in the
6293 // store buffer.
OldPointerSpaceCheckStoreBuffer()6294 void Heap::OldPointerSpaceCheckStoreBuffer() {
6295   OldSpace* space = old_pointer_space();
6296   PageIterator pages(space);
6297 
6298   store_buffer()->SortUniq();
6299 
6300   while (pages.has_next()) {
6301     Page* page = pages.next();
6302     Object** current = reinterpret_cast<Object**>(page->area_start());
6303 
6304     Address end = page->area_end();
6305 
6306     Object*** store_buffer_position = store_buffer()->Start();
6307     Object*** store_buffer_top = store_buffer()->Top();
6308 
6309     Object** limit = reinterpret_cast<Object**>(end);
6310     CheckStoreBuffer(this,
6311                      current,
6312                      limit,
6313                      &store_buffer_position,
6314                      store_buffer_top,
6315                      &EverythingsAPointer,
6316                      space->top(),
6317                      space->limit());
6318   }
6319 }
6320 
6321 
MapSpaceCheckStoreBuffer()6322 void Heap::MapSpaceCheckStoreBuffer() {
6323   MapSpace* space = map_space();
6324   PageIterator pages(space);
6325 
6326   store_buffer()->SortUniq();
6327 
6328   while (pages.has_next()) {
6329     Page* page = pages.next();
6330     Object** current = reinterpret_cast<Object**>(page->area_start());
6331 
6332     Address end = page->area_end();
6333 
6334     Object*** store_buffer_position = store_buffer()->Start();
6335     Object*** store_buffer_top = store_buffer()->Top();
6336 
6337     Object** limit = reinterpret_cast<Object**>(end);
6338     CheckStoreBuffer(this,
6339                      current,
6340                      limit,
6341                      &store_buffer_position,
6342                      store_buffer_top,
6343                      &IsAMapPointerAddress,
6344                      space->top(),
6345                      space->limit());
6346   }
6347 }
6348 
6349 
LargeObjectSpaceCheckStoreBuffer()6350 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6351   LargeObjectIterator it(lo_space());
6352   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6353     // We only have code, sequential strings, or fixed arrays in large
6354     // object space, and only fixed arrays can possibly contain pointers to
6355     // the young generation.
6356     if (object->IsFixedArray()) {
6357       Object*** store_buffer_position = store_buffer()->Start();
6358       Object*** store_buffer_top = store_buffer()->Top();
6359       Object** current = reinterpret_cast<Object**>(object->address());
6360       Object** limit =
6361           reinterpret_cast<Object**>(object->address() + object->Size());
6362       CheckStoreBuffer(this,
6363                        current,
6364                        limit,
6365                        &store_buffer_position,
6366                        store_buffer_top,
6367                        &EverythingsAPointer,
6368                        NULL,
6369                        NULL);
6370     }
6371   }
6372 }
6373 #endif
6374 
6375 
IterateRoots(ObjectVisitor * v,VisitMode mode)6376 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6377   IterateStrongRoots(v, mode);
6378   IterateWeakRoots(v, mode);
6379 }
6380 
6381 
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)6382 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6383   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6384   v->Synchronize(VisitorSynchronization::kStringTable);
6385   if (mode != VISIT_ALL_IN_SCAVENGE &&
6386       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6387     // Scavenge collections have special processing for this.
6388     external_string_table_.Iterate(v);
6389   }
6390   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6391 }
6392 
6393 
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)6394 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6395   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6396   v->Synchronize(VisitorSynchronization::kStrongRootList);
6397 
6398   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6399   v->Synchronize(VisitorSynchronization::kInternalizedString);
6400 
6401   isolate_->bootstrapper()->Iterate(v);
6402   v->Synchronize(VisitorSynchronization::kBootstrapper);
6403   isolate_->Iterate(v);
6404   v->Synchronize(VisitorSynchronization::kTop);
6405   Relocatable::Iterate(isolate_, v);
6406   v->Synchronize(VisitorSynchronization::kRelocatable);
6407 
6408 #ifdef ENABLE_DEBUGGER_SUPPORT
6409   isolate_->debug()->Iterate(v);
6410   if (isolate_->deoptimizer_data() != NULL) {
6411     isolate_->deoptimizer_data()->Iterate(v);
6412   }
6413 #endif
6414   v->Synchronize(VisitorSynchronization::kDebug);
6415   isolate_->compilation_cache()->Iterate(v);
6416   v->Synchronize(VisitorSynchronization::kCompilationCache);
6417 
6418   // Iterate over local handles in handle scopes.
6419   isolate_->handle_scope_implementer()->Iterate(v);
6420   isolate_->IterateDeferredHandles(v);
6421   v->Synchronize(VisitorSynchronization::kHandleScope);
6422 
6423   // Iterate over the builtin code objects and code stubs in the
6424   // heap. Note that it is not necessary to iterate over code objects
6425   // on scavenge collections.
6426   if (mode != VISIT_ALL_IN_SCAVENGE) {
6427     isolate_->builtins()->IterateBuiltins(v);
6428   }
6429   v->Synchronize(VisitorSynchronization::kBuiltins);
6430 
6431   // Iterate over global handles.
6432   switch (mode) {
6433     case VISIT_ONLY_STRONG:
6434       isolate_->global_handles()->IterateStrongRoots(v);
6435       break;
6436     case VISIT_ALL_IN_SCAVENGE:
6437       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6438       break;
6439     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6440     case VISIT_ALL:
6441       isolate_->global_handles()->IterateAllRoots(v);
6442       break;
6443   }
6444   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6445 
6446   // Iterate over eternal handles.
6447   if (mode == VISIT_ALL_IN_SCAVENGE) {
6448     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6449   } else {
6450     isolate_->eternal_handles()->IterateAllRoots(v);
6451   }
6452   v->Synchronize(VisitorSynchronization::kEternalHandles);
6453 
6454   // Iterate over pointers being held by inactive threads.
6455   isolate_->thread_manager()->Iterate(v);
6456   v->Synchronize(VisitorSynchronization::kThreadManager);
6457 
6458   // Iterate over the pointers the Serialization/Deserialization code is
6459   // holding.
6460   // During garbage collection this keeps the partial snapshot cache alive.
6461   // During deserialization of the startup snapshot this creates the partial
6462   // snapshot cache and deserializes the objects it refers to.  During
6463   // serialization this does nothing, since the partial snapshot cache is
6464   // empty.  However the next thing we do is create the partial snapshot,
6465   // filling up the partial snapshot cache with objects it needs as we go.
6466   SerializerDeserializer::Iterate(isolate_, v);
6467   // We don't do a v->Synchronize call here, because in debug mode that will
6468   // output a flag to the snapshot.  However at this point the serializer and
6469   // deserializer are deliberately a little unsynchronized (see above) so the
6470   // checking of the sync flag in the snapshot would fail.
6471 }
6472 
6473 
6474 // TODO(1236194): Since the heap size is configurable on the command line
6475 // and through the API, we should gracefully handle the case that the heap
6476 // size is not big enough to fit all the initial objects.
ConfigureHeap(int max_semispace_size,intptr_t max_old_gen_size,intptr_t max_executable_size)6477 bool Heap::ConfigureHeap(int max_semispace_size,
6478                          intptr_t max_old_gen_size,
6479                          intptr_t max_executable_size) {
6480   if (HasBeenSetUp()) return false;
6481 
6482   if (FLAG_stress_compaction) {
6483     // This will cause more frequent GCs when stressing.
6484     max_semispace_size_ = Page::kPageSize;
6485   }
6486 
6487   if (max_semispace_size > 0) {
6488     if (max_semispace_size < Page::kPageSize) {
6489       max_semispace_size = Page::kPageSize;
6490       if (FLAG_trace_gc) {
6491         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6492                  Page::kPageSize >> 10);
6493       }
6494     }
6495     max_semispace_size_ = max_semispace_size;
6496   }
6497 
6498   if (Snapshot::IsEnabled()) {
6499     // If we are using a snapshot we always reserve the default amount
6500     // of memory for each semispace because code in the snapshot has
6501     // write-barrier code that relies on the size and alignment of new
6502     // space.  We therefore cannot use a larger max semispace size
6503     // than the default reserved semispace size.
6504     if (max_semispace_size_ > reserved_semispace_size_) {
6505       max_semispace_size_ = reserved_semispace_size_;
6506       if (FLAG_trace_gc) {
6507         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6508                  reserved_semispace_size_ >> 10);
6509       }
6510     }
6511   } else {
6512     // If we are not using snapshots we reserve space for the actual
6513     // max semispace size.
6514     reserved_semispace_size_ = max_semispace_size_;
6515   }
6516 
6517   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6518   if (max_executable_size > 0) {
6519     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6520   }
6521 
6522   // The max executable size must be less than or equal to the max old
6523   // generation size.
6524   if (max_executable_size_ > max_old_generation_size_) {
6525     max_executable_size_ = max_old_generation_size_;
6526   }
6527 
6528   // The new space size must be a power of two to support single-bit testing
6529   // for containment.
6530   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6531   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6532   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6533 
6534   // The external allocation limit should be below 256 MB on all architectures
6535   // to avoid unnecessary low memory notifications, as that is the threshold
6536   // for some embedders.
6537   external_allocation_limit_ = 12 * max_semispace_size_;
6538   ASSERT(external_allocation_limit_ <= 256 * MB);
6539 
6540   // The old generation is paged and needs at least one page for each space.
6541   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6542   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6543                                                        Page::kPageSize),
6544                                  RoundUp(max_old_generation_size_,
6545                                          Page::kPageSize));
6546 
6547   // We rely on being able to allocate new arrays in paged spaces.
6548   ASSERT(MaxRegularSpaceAllocationSize() >=
6549          (JSArray::kSize +
6550           FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
6551           AllocationMemento::kSize));
6552 
6553   configured_ = true;
6554   return true;
6555 }
6556 
6557 
ConfigureHeapDefault()6558 bool Heap::ConfigureHeapDefault() {
6559   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6560                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6561                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6562 }
6563 
6564 
RecordStats(HeapStats * stats,bool take_snapshot)6565 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6566   *stats->start_marker = HeapStats::kStartMarker;
6567   *stats->end_marker = HeapStats::kEndMarker;
6568   *stats->new_space_size = new_space_.SizeAsInt();
6569   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6570   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6571   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6572   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6573   *stats->old_data_space_capacity = old_data_space_->Capacity();
6574   *stats->code_space_size = code_space_->SizeOfObjects();
6575   *stats->code_space_capacity = code_space_->Capacity();
6576   *stats->map_space_size = map_space_->SizeOfObjects();
6577   *stats->map_space_capacity = map_space_->Capacity();
6578   *stats->cell_space_size = cell_space_->SizeOfObjects();
6579   *stats->cell_space_capacity = cell_space_->Capacity();
6580   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6581   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6582   *stats->lo_space_size = lo_space_->Size();
6583   isolate_->global_handles()->RecordStats(stats);
6584   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6585   *stats->memory_allocator_capacity =
6586       isolate()->memory_allocator()->Size() +
6587       isolate()->memory_allocator()->Available();
6588   *stats->os_error = OS::GetLastError();
6589       isolate()->memory_allocator()->Available();
6590   if (take_snapshot) {
6591     HeapIterator iterator(this);
6592     for (HeapObject* obj = iterator.next();
6593          obj != NULL;
6594          obj = iterator.next()) {
6595       InstanceType type = obj->map()->instance_type();
6596       ASSERT(0 <= type && type <= LAST_TYPE);
6597       stats->objects_per_type[type]++;
6598       stats->size_per_type[type] += obj->Size();
6599     }
6600   }
6601 }
6602 
6603 
PromotedSpaceSizeOfObjects()6604 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6605   return old_pointer_space_->SizeOfObjects()
6606       + old_data_space_->SizeOfObjects()
6607       + code_space_->SizeOfObjects()
6608       + map_space_->SizeOfObjects()
6609       + cell_space_->SizeOfObjects()
6610       + property_cell_space_->SizeOfObjects()
6611       + lo_space_->SizeOfObjects();
6612 }
6613 
6614 
AdvanceSweepers(int step_size)6615 bool Heap::AdvanceSweepers(int step_size) {
6616   ASSERT(isolate()->num_sweeper_threads() == 0);
6617   bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
6618   sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
6619   return sweeping_complete;
6620 }
6621 
6622 
PromotedExternalMemorySize()6623 int64_t Heap::PromotedExternalMemorySize() {
6624   if (amount_of_external_allocated_memory_
6625       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6626   return amount_of_external_allocated_memory_
6627       - amount_of_external_allocated_memory_at_last_global_gc_;
6628 }
6629 
6630 
EnableInlineAllocation()6631 void Heap::EnableInlineAllocation() {
6632   ASSERT(inline_allocation_disabled_);
6633   inline_allocation_disabled_ = false;
6634 
6635   // Update inline allocation limit for new space.
6636   new_space()->UpdateInlineAllocationLimit(0);
6637 }
6638 
6639 
DisableInlineAllocation()6640 void Heap::DisableInlineAllocation() {
6641   ASSERT(!inline_allocation_disabled_);
6642   inline_allocation_disabled_ = true;
6643 
6644   // Update inline allocation limit for new space.
6645   new_space()->UpdateInlineAllocationLimit(0);
6646 
6647   // Update inline allocation limit for old spaces.
6648   PagedSpaces spaces(this);
6649   for (PagedSpace* space = spaces.next();
6650        space != NULL;
6651        space = spaces.next()) {
6652     space->EmptyAllocationInfo();
6653   }
6654 }
6655 
6656 
6657 V8_DECLARE_ONCE(initialize_gc_once);
6658 
InitializeGCOnce()6659 static void InitializeGCOnce() {
6660   InitializeScavengingVisitorsTables();
6661   NewSpaceScavenger::Initialize();
6662   MarkCompactCollector::Initialize();
6663 }
6664 
6665 
SetUp()6666 bool Heap::SetUp() {
6667 #ifdef DEBUG
6668   allocation_timeout_ = FLAG_gc_interval;
6669 #endif
6670 
6671   // Initialize heap spaces and initial maps and objects. Whenever something
6672   // goes wrong, just return false. The caller should check the results and
6673   // call Heap::TearDown() to release allocated memory.
6674   //
6675   // If the heap is not yet configured (e.g. through the API), configure it.
6676   // Configuration is based on the flags new-space-size (really the semispace
6677   // size) and old-space-size if set or the initial values of semispace_size_
6678   // and old_generation_size_ otherwise.
6679   if (!configured_) {
6680     if (!ConfigureHeapDefault()) return false;
6681   }
6682 
6683   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6684 
6685   MarkMapPointersAsEncoded(false);
6686 
6687   // Set up memory allocator.
6688   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6689       return false;
6690 
6691   // Set up new space.
6692   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6693     return false;
6694   }
6695 
6696   // Initialize old pointer space.
6697   old_pointer_space_ =
6698       new OldSpace(this,
6699                    max_old_generation_size_,
6700                    OLD_POINTER_SPACE,
6701                    NOT_EXECUTABLE);
6702   if (old_pointer_space_ == NULL) return false;
6703   if (!old_pointer_space_->SetUp()) return false;
6704 
6705   // Initialize old data space.
6706   old_data_space_ =
6707       new OldSpace(this,
6708                    max_old_generation_size_,
6709                    OLD_DATA_SPACE,
6710                    NOT_EXECUTABLE);
6711   if (old_data_space_ == NULL) return false;
6712   if (!old_data_space_->SetUp()) return false;
6713 
6714   // Initialize the code space, set its maximum capacity to the old
6715   // generation size. It needs executable memory.
6716   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6717   // virtual address space, so that they can call each other with near calls.
6718   if (code_range_size_ > 0) {
6719     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6720       return false;
6721     }
6722   }
6723 
6724   code_space_ =
6725       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6726   if (code_space_ == NULL) return false;
6727   if (!code_space_->SetUp()) return false;
6728 
6729   // Initialize map space.
6730   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6731   if (map_space_ == NULL) return false;
6732   if (!map_space_->SetUp()) return false;
6733 
6734   // Initialize simple cell space.
6735   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6736   if (cell_space_ == NULL) return false;
6737   if (!cell_space_->SetUp()) return false;
6738 
6739   // Initialize global property cell space.
6740   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6741                                                PROPERTY_CELL_SPACE);
6742   if (property_cell_space_ == NULL) return false;
6743   if (!property_cell_space_->SetUp()) return false;
6744 
6745   // The large object code space may contain code or data.  We set the memory
6746   // to be non-executable here for safety, but this means we need to enable it
6747   // explicitly when allocating large code objects.
6748   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6749   if (lo_space_ == NULL) return false;
6750   if (!lo_space_->SetUp()) return false;
6751 
6752   // Set up the seed that is used to randomize the string hash function.
6753   ASSERT(hash_seed() == 0);
6754   if (FLAG_randomize_hashes) {
6755     if (FLAG_hash_seed == 0) {
6756       int rnd = isolate()->random_number_generator()->NextInt();
6757       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
6758     } else {
6759       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6760     }
6761   }
6762 
6763   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6764   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6765 
6766   store_buffer()->SetUp();
6767 
6768   if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
6769 
6770   return true;
6771 }
6772 
6773 
CreateHeapObjects()6774 bool Heap::CreateHeapObjects() {
6775   // Create initial maps.
6776   if (!CreateInitialMaps()) return false;
6777   if (!CreateApiObjects()) return false;
6778 
6779   // Create initial objects
6780   if (!CreateInitialObjects()) return false;
6781 
6782   native_contexts_list_ = undefined_value();
6783   array_buffers_list_ = undefined_value();
6784   allocation_sites_list_ = undefined_value();
6785   weak_object_to_code_table_ = undefined_value();
6786   return true;
6787 }
6788 
6789 
SetStackLimits()6790 void Heap::SetStackLimits() {
6791   ASSERT(isolate_ != NULL);
6792   ASSERT(isolate_ == isolate());
6793   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6794   // something that looks like an out of range Smi to the GC.
6795 
6796   // Set up the special root array entries containing the stack limits.
6797   // These are actually addresses, but the tag makes the GC ignore it.
6798   roots_[kStackLimitRootIndex] =
6799       reinterpret_cast<Object*>(
6800           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6801   roots_[kRealStackLimitRootIndex] =
6802       reinterpret_cast<Object*>(
6803           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6804 }
6805 
6806 
TearDown()6807 void Heap::TearDown() {
6808 #ifdef VERIFY_HEAP
6809   if (FLAG_verify_heap) {
6810     Verify();
6811   }
6812 #endif
6813 
6814   UpdateMaximumCommitted();
6815 
6816   if (FLAG_print_cumulative_gc_stat) {
6817     PrintF("\n");
6818     PrintF("gc_count=%d ", gc_count_);
6819     PrintF("mark_sweep_count=%d ", ms_count_);
6820     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6821     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6822     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6823     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6824            get_max_alive_after_gc());
6825     PrintF("total_marking_time=%.1f ", marking_time());
6826     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6827     PrintF("\n\n");
6828   }
6829 
6830   if (FLAG_print_max_heap_committed) {
6831     PrintF("\n");
6832     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
6833       MaximumCommittedMemory());
6834     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
6835       new_space_.MaximumCommittedMemory());
6836     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
6837       old_data_space_->MaximumCommittedMemory());
6838     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6839       old_pointer_space_->MaximumCommittedMemory());
6840     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6841       old_pointer_space_->MaximumCommittedMemory());
6842     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
6843       code_space_->MaximumCommittedMemory());
6844     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
6845       map_space_->MaximumCommittedMemory());
6846     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
6847       cell_space_->MaximumCommittedMemory());
6848     PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
6849       property_cell_space_->MaximumCommittedMemory());
6850     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
6851       lo_space_->MaximumCommittedMemory());
6852     PrintF("\n\n");
6853   }
6854 
6855   TearDownArrayBuffers();
6856 
6857   isolate_->global_handles()->TearDown();
6858 
6859   external_string_table_.TearDown();
6860 
6861   mark_compact_collector()->TearDown();
6862 
6863   new_space_.TearDown();
6864 
6865   if (old_pointer_space_ != NULL) {
6866     old_pointer_space_->TearDown();
6867     delete old_pointer_space_;
6868     old_pointer_space_ = NULL;
6869   }
6870 
6871   if (old_data_space_ != NULL) {
6872     old_data_space_->TearDown();
6873     delete old_data_space_;
6874     old_data_space_ = NULL;
6875   }
6876 
6877   if (code_space_ != NULL) {
6878     code_space_->TearDown();
6879     delete code_space_;
6880     code_space_ = NULL;
6881   }
6882 
6883   if (map_space_ != NULL) {
6884     map_space_->TearDown();
6885     delete map_space_;
6886     map_space_ = NULL;
6887   }
6888 
6889   if (cell_space_ != NULL) {
6890     cell_space_->TearDown();
6891     delete cell_space_;
6892     cell_space_ = NULL;
6893   }
6894 
6895   if (property_cell_space_ != NULL) {
6896     property_cell_space_->TearDown();
6897     delete property_cell_space_;
6898     property_cell_space_ = NULL;
6899   }
6900 
6901   if (lo_space_ != NULL) {
6902     lo_space_->TearDown();
6903     delete lo_space_;
6904     lo_space_ = NULL;
6905   }
6906 
6907   store_buffer()->TearDown();
6908   incremental_marking()->TearDown();
6909 
6910   isolate_->memory_allocator()->TearDown();
6911 
6912   delete relocation_mutex_;
6913   relocation_mutex_ = NULL;
6914 }
6915 
6916 
AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,GCType gc_type,bool pass_isolate)6917 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
6918                                  GCType gc_type,
6919                                  bool pass_isolate) {
6920   ASSERT(callback != NULL);
6921   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6922   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6923   return gc_prologue_callbacks_.Add(pair);
6924 }
6925 
6926 
RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback)6927 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
6928   ASSERT(callback != NULL);
6929   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6930     if (gc_prologue_callbacks_[i].callback == callback) {
6931       gc_prologue_callbacks_.Remove(i);
6932       return;
6933     }
6934   }
6935   UNREACHABLE();
6936 }
6937 
6938 
AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,GCType gc_type,bool pass_isolate)6939 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
6940                                  GCType gc_type,
6941                                  bool pass_isolate) {
6942   ASSERT(callback != NULL);
6943   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
6944   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6945   return gc_epilogue_callbacks_.Add(pair);
6946 }
6947 
6948 
RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback)6949 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
6950   ASSERT(callback != NULL);
6951   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6952     if (gc_epilogue_callbacks_[i].callback == callback) {
6953       gc_epilogue_callbacks_.Remove(i);
6954       return;
6955     }
6956   }
6957   UNREACHABLE();
6958 }
6959 
6960 
AddWeakObjectToCodeDependency(Object * obj,DependentCode * dep)6961 MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
6962                                                  DependentCode* dep) {
6963   ASSERT(!InNewSpace(obj));
6964   ASSERT(!InNewSpace(dep));
6965   MaybeObject* maybe_obj =
6966       WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
6967   WeakHashTable* table;
6968   if (!maybe_obj->To(&table)) return maybe_obj;
6969   if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
6970     WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
6971   }
6972   set_weak_object_to_code_table(table);
6973   ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
6974   return weak_object_to_code_table_;
6975 }
6976 
6977 
LookupWeakObjectToCodeDependency(Object * obj)6978 DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
6979   Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
6980   if (dep->IsDependentCode()) return DependentCode::cast(dep);
6981   return DependentCode::cast(empty_fixed_array());
6982 }
6983 
6984 
EnsureWeakObjectToCodeTable()6985 void Heap::EnsureWeakObjectToCodeTable() {
6986   if (!weak_object_to_code_table()->IsHashTable()) {
6987     set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
6988   }
6989 }
6990 
6991 
6992 #ifdef DEBUG
6993 
6994 class PrintHandleVisitor: public ObjectVisitor {
6995  public:
VisitPointers(Object ** start,Object ** end)6996   void VisitPointers(Object** start, Object** end) {
6997     for (Object** p = start; p < end; p++)
6998       PrintF("  handle %p to %p\n",
6999              reinterpret_cast<void*>(p),
7000              reinterpret_cast<void*>(*p));
7001   }
7002 };
7003 
7004 
PrintHandles()7005 void Heap::PrintHandles() {
7006   PrintF("Handles:\n");
7007   PrintHandleVisitor v;
7008   isolate_->handle_scope_implementer()->Iterate(&v);
7009 }
7010 
7011 #endif
7012 
7013 
next()7014 Space* AllSpaces::next() {
7015   switch (counter_++) {
7016     case NEW_SPACE:
7017       return heap_->new_space();
7018     case OLD_POINTER_SPACE:
7019       return heap_->old_pointer_space();
7020     case OLD_DATA_SPACE:
7021       return heap_->old_data_space();
7022     case CODE_SPACE:
7023       return heap_->code_space();
7024     case MAP_SPACE:
7025       return heap_->map_space();
7026     case CELL_SPACE:
7027       return heap_->cell_space();
7028     case PROPERTY_CELL_SPACE:
7029       return heap_->property_cell_space();
7030     case LO_SPACE:
7031       return heap_->lo_space();
7032     default:
7033       return NULL;
7034   }
7035 }
7036 
7037 
next()7038 PagedSpace* PagedSpaces::next() {
7039   switch (counter_++) {
7040     case OLD_POINTER_SPACE:
7041       return heap_->old_pointer_space();
7042     case OLD_DATA_SPACE:
7043       return heap_->old_data_space();
7044     case CODE_SPACE:
7045       return heap_->code_space();
7046     case MAP_SPACE:
7047       return heap_->map_space();
7048     case CELL_SPACE:
7049       return heap_->cell_space();
7050     case PROPERTY_CELL_SPACE:
7051       return heap_->property_cell_space();
7052     default:
7053       return NULL;
7054   }
7055 }
7056 
7057 
7058 
next()7059 OldSpace* OldSpaces::next() {
7060   switch (counter_++) {
7061     case OLD_POINTER_SPACE:
7062       return heap_->old_pointer_space();
7063     case OLD_DATA_SPACE:
7064       return heap_->old_data_space();
7065     case CODE_SPACE:
7066       return heap_->code_space();
7067     default:
7068       return NULL;
7069   }
7070 }
7071 
7072 
SpaceIterator(Heap * heap)7073 SpaceIterator::SpaceIterator(Heap* heap)
7074     : heap_(heap),
7075       current_space_(FIRST_SPACE),
7076       iterator_(NULL),
7077       size_func_(NULL) {
7078 }
7079 
7080 
SpaceIterator(Heap * heap,HeapObjectCallback size_func)7081 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7082     : heap_(heap),
7083       current_space_(FIRST_SPACE),
7084       iterator_(NULL),
7085       size_func_(size_func) {
7086 }
7087 
7088 
~SpaceIterator()7089 SpaceIterator::~SpaceIterator() {
7090   // Delete active iterator if any.
7091   delete iterator_;
7092 }
7093 
7094 
has_next()7095 bool SpaceIterator::has_next() {
7096   // Iterate until no more spaces.
7097   return current_space_ != LAST_SPACE;
7098 }
7099 
7100 
next()7101 ObjectIterator* SpaceIterator::next() {
7102   if (iterator_ != NULL) {
7103     delete iterator_;
7104     iterator_ = NULL;
7105     // Move to the next space
7106     current_space_++;
7107     if (current_space_ > LAST_SPACE) {
7108       return NULL;
7109     }
7110   }
7111 
7112   // Return iterator for the new current space.
7113   return CreateIterator();
7114 }
7115 
7116 
7117 // Create an iterator for the space to iterate.
CreateIterator()7118 ObjectIterator* SpaceIterator::CreateIterator() {
7119   ASSERT(iterator_ == NULL);
7120 
7121   switch (current_space_) {
7122     case NEW_SPACE:
7123       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7124       break;
7125     case OLD_POINTER_SPACE:
7126       iterator_ =
7127           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7128       break;
7129     case OLD_DATA_SPACE:
7130       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7131       break;
7132     case CODE_SPACE:
7133       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7134       break;
7135     case MAP_SPACE:
7136       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7137       break;
7138     case CELL_SPACE:
7139       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7140       break;
7141     case PROPERTY_CELL_SPACE:
7142       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7143                                          size_func_);
7144       break;
7145     case LO_SPACE:
7146       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7147       break;
7148   }
7149 
7150   // Return the newly allocated iterator;
7151   ASSERT(iterator_ != NULL);
7152   return iterator_;
7153 }
7154 
7155 
7156 class HeapObjectsFilter {
7157  public:
~HeapObjectsFilter()7158   virtual ~HeapObjectsFilter() {}
7159   virtual bool SkipObject(HeapObject* object) = 0;
7160 };
7161 
7162 
7163 class UnreachableObjectsFilter : public HeapObjectsFilter {
7164  public:
UnreachableObjectsFilter(Heap * heap)7165   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
7166     MarkReachableObjects();
7167   }
7168 
~UnreachableObjectsFilter()7169   ~UnreachableObjectsFilter() {
7170     heap_->mark_compact_collector()->ClearMarkbits();
7171   }
7172 
SkipObject(HeapObject * object)7173   bool SkipObject(HeapObject* object) {
7174     MarkBit mark_bit = Marking::MarkBitFrom(object);
7175     return !mark_bit.Get();
7176   }
7177 
7178  private:
7179   class MarkingVisitor : public ObjectVisitor {
7180    public:
MarkingVisitor()7181     MarkingVisitor() : marking_stack_(10) {}
7182 
VisitPointers(Object ** start,Object ** end)7183     void VisitPointers(Object** start, Object** end) {
7184       for (Object** p = start; p < end; p++) {
7185         if (!(*p)->IsHeapObject()) continue;
7186         HeapObject* obj = HeapObject::cast(*p);
7187         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7188         if (!mark_bit.Get()) {
7189           mark_bit.Set();
7190           marking_stack_.Add(obj);
7191         }
7192       }
7193     }
7194 
TransitiveClosure()7195     void TransitiveClosure() {
7196       while (!marking_stack_.is_empty()) {
7197         HeapObject* obj = marking_stack_.RemoveLast();
7198         obj->Iterate(this);
7199       }
7200     }
7201 
7202    private:
7203     List<HeapObject*> marking_stack_;
7204   };
7205 
MarkReachableObjects()7206   void MarkReachableObjects() {
7207     MarkingVisitor visitor;
7208     heap_->IterateRoots(&visitor, VISIT_ALL);
7209     visitor.TransitiveClosure();
7210   }
7211 
7212   Heap* heap_;
7213   DisallowHeapAllocation no_allocation_;
7214 };
7215 
7216 
HeapIterator(Heap * heap)7217 HeapIterator::HeapIterator(Heap* heap)
7218     : heap_(heap),
7219       filtering_(HeapIterator::kNoFiltering),
7220       filter_(NULL) {
7221   Init();
7222 }
7223 
7224 
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)7225 HeapIterator::HeapIterator(Heap* heap,
7226                            HeapIterator::HeapObjectsFiltering filtering)
7227     : heap_(heap),
7228       filtering_(filtering),
7229       filter_(NULL) {
7230   Init();
7231 }
7232 
7233 
~HeapIterator()7234 HeapIterator::~HeapIterator() {
7235   Shutdown();
7236 }
7237 
7238 
Init()7239 void HeapIterator::Init() {
7240   // Start the iteration.
7241   space_iterator_ = new SpaceIterator(heap_);
7242   switch (filtering_) {
7243     case kFilterUnreachable:
7244       filter_ = new UnreachableObjectsFilter(heap_);
7245       break;
7246     default:
7247       break;
7248   }
7249   object_iterator_ = space_iterator_->next();
7250 }
7251 
7252 
Shutdown()7253 void HeapIterator::Shutdown() {
7254 #ifdef DEBUG
7255   // Assert that in filtering mode we have iterated through all
7256   // objects. Otherwise, heap will be left in an inconsistent state.
7257   if (filtering_ != kNoFiltering) {
7258     ASSERT(object_iterator_ == NULL);
7259   }
7260 #endif
7261   // Make sure the last iterator is deallocated.
7262   delete space_iterator_;
7263   space_iterator_ = NULL;
7264   object_iterator_ = NULL;
7265   delete filter_;
7266   filter_ = NULL;
7267 }
7268 
7269 
next()7270 HeapObject* HeapIterator::next() {
7271   if (filter_ == NULL) return NextObject();
7272 
7273   HeapObject* obj = NextObject();
7274   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7275   return obj;
7276 }
7277 
7278 
NextObject()7279 HeapObject* HeapIterator::NextObject() {
7280   // No iterator means we are done.
7281   if (object_iterator_ == NULL) return NULL;
7282 
7283   if (HeapObject* obj = object_iterator_->next_object()) {
7284     // If the current iterator has more objects we are fine.
7285     return obj;
7286   } else {
7287     // Go though the spaces looking for one that has objects.
7288     while (space_iterator_->has_next()) {
7289       object_iterator_ = space_iterator_->next();
7290       if (HeapObject* obj = object_iterator_->next_object()) {
7291         return obj;
7292       }
7293     }
7294   }
7295   // Done with the last space.
7296   object_iterator_ = NULL;
7297   return NULL;
7298 }
7299 
7300 
reset()7301 void HeapIterator::reset() {
7302   // Restart the iterator.
7303   Shutdown();
7304   Init();
7305 }
7306 
7307 
7308 #ifdef DEBUG
7309 
7310 Object* const PathTracer::kAnyGlobalObject = NULL;
7311 
7312 class PathTracer::MarkVisitor: public ObjectVisitor {
7313  public:
MarkVisitor(PathTracer * tracer)7314   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)7315   void VisitPointers(Object** start, Object** end) {
7316     // Scan all HeapObject pointers in [start, end)
7317     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7318       if ((*p)->IsHeapObject())
7319         tracer_->MarkRecursively(p, this);
7320     }
7321   }
7322 
7323  private:
7324   PathTracer* tracer_;
7325 };
7326 
7327 
7328 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7329  public:
UnmarkVisitor(PathTracer * tracer)7330   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)7331   void VisitPointers(Object** start, Object** end) {
7332     // Scan all HeapObject pointers in [start, end)
7333     for (Object** p = start; p < end; p++) {
7334       if ((*p)->IsHeapObject())
7335         tracer_->UnmarkRecursively(p, this);
7336     }
7337   }
7338 
7339  private:
7340   PathTracer* tracer_;
7341 };
7342 
7343 
VisitPointers(Object ** start,Object ** end)7344 void PathTracer::VisitPointers(Object** start, Object** end) {
7345   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7346   // Visit all HeapObject pointers in [start, end)
7347   for (Object** p = start; !done && (p < end); p++) {
7348     if ((*p)->IsHeapObject()) {
7349       TracePathFrom(p);
7350       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7351     }
7352   }
7353 }
7354 
7355 
Reset()7356 void PathTracer::Reset() {
7357   found_target_ = false;
7358   object_stack_.Clear();
7359 }
7360 
7361 
TracePathFrom(Object ** root)7362 void PathTracer::TracePathFrom(Object** root) {
7363   ASSERT((search_target_ == kAnyGlobalObject) ||
7364          search_target_->IsHeapObject());
7365   found_target_in_trace_ = false;
7366   Reset();
7367 
7368   MarkVisitor mark_visitor(this);
7369   MarkRecursively(root, &mark_visitor);
7370 
7371   UnmarkVisitor unmark_visitor(this);
7372   UnmarkRecursively(root, &unmark_visitor);
7373 
7374   ProcessResults();
7375 }
7376 
7377 
SafeIsNativeContext(HeapObject * obj)7378 static bool SafeIsNativeContext(HeapObject* obj) {
7379   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7380 }
7381 
7382 
MarkRecursively(Object ** p,MarkVisitor * mark_visitor)7383 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7384   if (!(*p)->IsHeapObject()) return;
7385 
7386   HeapObject* obj = HeapObject::cast(*p);
7387 
7388   Object* map = obj->map();
7389 
7390   if (!map->IsHeapObject()) return;  // visited before
7391 
7392   if (found_target_in_trace_) return;  // stop if target found
7393   object_stack_.Add(obj);
7394   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7395       (obj == search_target_)) {
7396     found_target_in_trace_ = true;
7397     found_target_ = true;
7398     return;
7399   }
7400 
7401   bool is_native_context = SafeIsNativeContext(obj);
7402 
7403   // not visited yet
7404   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7405 
7406   Address map_addr = map_p->address();
7407 
7408   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7409 
7410   // Scan the object body.
7411   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7412     // This is specialized to scan Context's properly.
7413     Object** start = reinterpret_cast<Object**>(obj->address() +
7414                                                 Context::kHeaderSize);
7415     Object** end = reinterpret_cast<Object**>(obj->address() +
7416         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7417     mark_visitor->VisitPointers(start, end);
7418   } else {
7419     obj->IterateBody(map_p->instance_type(),
7420                      obj->SizeFromMap(map_p),
7421                      mark_visitor);
7422   }
7423 
7424   // Scan the map after the body because the body is a lot more interesting
7425   // when doing leak detection.
7426   MarkRecursively(&map, mark_visitor);
7427 
7428   if (!found_target_in_trace_)  // don't pop if found the target
7429     object_stack_.RemoveLast();
7430 }
7431 
7432 
UnmarkRecursively(Object ** p,UnmarkVisitor * unmark_visitor)7433 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7434   if (!(*p)->IsHeapObject()) return;
7435 
7436   HeapObject* obj = HeapObject::cast(*p);
7437 
7438   Object* map = obj->map();
7439 
7440   if (map->IsHeapObject()) return;  // unmarked already
7441 
7442   Address map_addr = reinterpret_cast<Address>(map);
7443 
7444   map_addr -= kMarkTag;
7445 
7446   ASSERT_TAG_ALIGNED(map_addr);
7447 
7448   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7449 
7450   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7451 
7452   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7453 
7454   obj->IterateBody(Map::cast(map_p)->instance_type(),
7455                    obj->SizeFromMap(Map::cast(map_p)),
7456                    unmark_visitor);
7457 }
7458 
7459 
ProcessResults()7460 void PathTracer::ProcessResults() {
7461   if (found_target_) {
7462     PrintF("=====================================\n");
7463     PrintF("====        Path to object       ====\n");
7464     PrintF("=====================================\n\n");
7465 
7466     ASSERT(!object_stack_.is_empty());
7467     for (int i = 0; i < object_stack_.length(); i++) {
7468       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7469       Object* obj = object_stack_[i];
7470       obj->Print();
7471     }
7472     PrintF("=====================================\n");
7473   }
7474 }
7475 
7476 
7477 // Triggers a depth-first traversal of reachable objects from one
7478 // given root object and finds a path to a specific heap object and
7479 // prints it.
TracePathToObjectFrom(Object * target,Object * root)7480 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7481   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7482   tracer.VisitPointer(&root);
7483 }
7484 
7485 
7486 // Triggers a depth-first traversal of reachable objects from roots
7487 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)7488 void Heap::TracePathToObject(Object* target) {
7489   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7490   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7491 }
7492 
7493 
7494 // Triggers a depth-first traversal of reachable objects from roots
7495 // and finds a path to any global object and prints it. Useful for
7496 // determining the source for leaks of global objects.
TracePathToGlobal()7497 void Heap::TracePathToGlobal() {
7498   PathTracer tracer(PathTracer::kAnyGlobalObject,
7499                     PathTracer::FIND_ALL,
7500                     VISIT_ALL);
7501   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7502 }
7503 #endif
7504 
7505 
CountTotalHolesSize(Heap * heap)7506 static intptr_t CountTotalHolesSize(Heap* heap) {
7507   intptr_t holes_size = 0;
7508   OldSpaces spaces(heap);
7509   for (OldSpace* space = spaces.next();
7510        space != NULL;
7511        space = spaces.next()) {
7512     holes_size += space->Waste() + space->Available();
7513   }
7514   return holes_size;
7515 }
7516 
7517 
GCTracer(Heap * heap,const char * gc_reason,const char * collector_reason)7518 GCTracer::GCTracer(Heap* heap,
7519                    const char* gc_reason,
7520                    const char* collector_reason)
7521     : start_time_(0.0),
7522       start_object_size_(0),
7523       start_memory_size_(0),
7524       gc_count_(0),
7525       full_gc_count_(0),
7526       allocated_since_last_gc_(0),
7527       spent_in_mutator_(0),
7528       promoted_objects_size_(0),
7529       nodes_died_in_new_space_(0),
7530       nodes_copied_in_new_space_(0),
7531       nodes_promoted_(0),
7532       heap_(heap),
7533       gc_reason_(gc_reason),
7534       collector_reason_(collector_reason) {
7535   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7536   start_time_ = OS::TimeCurrentMillis();
7537   start_object_size_ = heap_->SizeOfObjects();
7538   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7539 
7540   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7541     scopes_[i] = 0;
7542   }
7543 
7544   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7545 
7546   allocated_since_last_gc_ =
7547       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7548 
7549   if (heap_->last_gc_end_timestamp_ > 0) {
7550     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7551   }
7552 
7553   steps_count_ = heap_->incremental_marking()->steps_count();
7554   steps_took_ = heap_->incremental_marking()->steps_took();
7555   longest_step_ = heap_->incremental_marking()->longest_step();
7556   steps_count_since_last_gc_ =
7557       heap_->incremental_marking()->steps_count_since_last_gc();
7558   steps_took_since_last_gc_ =
7559       heap_->incremental_marking()->steps_took_since_last_gc();
7560 }
7561 
7562 
~GCTracer()7563 GCTracer::~GCTracer() {
7564   // Printf ONE line iff flag is set.
7565   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7566 
7567   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7568 
7569   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7570   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7571 
7572   double time = heap_->last_gc_end_timestamp_ - start_time_;
7573 
7574   // Update cumulative GC statistics if required.
7575   if (FLAG_print_cumulative_gc_stat) {
7576     heap_->total_gc_time_ms_ += time;
7577     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7578     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7579                                      heap_->alive_after_last_gc_);
7580     if (!first_gc) {
7581       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7582                                    spent_in_mutator_);
7583     }
7584   } else if (FLAG_trace_gc_verbose) {
7585     heap_->total_gc_time_ms_ += time;
7586   }
7587 
7588   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7589 
7590   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7591 
7592   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7593   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7594 
7595   if (!FLAG_trace_gc_nvp) {
7596     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7597 
7598     double end_memory_size_mb =
7599         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7600 
7601     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7602            CollectorString(),
7603            static_cast<double>(start_object_size_) / MB,
7604            static_cast<double>(start_memory_size_) / MB,
7605            SizeOfHeapObjects(),
7606            end_memory_size_mb);
7607 
7608     if (external_time > 0) PrintF("%d / ", external_time);
7609     PrintF("%.1f ms", time);
7610     if (steps_count_ > 0) {
7611       if (collector_ == SCAVENGER) {
7612         PrintF(" (+ %.1f ms in %d steps since last GC)",
7613                steps_took_since_last_gc_,
7614                steps_count_since_last_gc_);
7615       } else {
7616         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7617                    "biggest step %.1f ms)",
7618                steps_took_,
7619                steps_count_,
7620                longest_step_);
7621       }
7622     }
7623 
7624     if (gc_reason_ != NULL) {
7625       PrintF(" [%s]", gc_reason_);
7626     }
7627 
7628     if (collector_reason_ != NULL) {
7629       PrintF(" [%s]", collector_reason_);
7630     }
7631 
7632     PrintF(".\n");
7633   } else {
7634     PrintF("pause=%.1f ", time);
7635     PrintF("mutator=%.1f ", spent_in_mutator_);
7636     PrintF("gc=");
7637     switch (collector_) {
7638       case SCAVENGER:
7639         PrintF("s");
7640         break;
7641       case MARK_COMPACTOR:
7642         PrintF("ms");
7643         break;
7644       default:
7645         UNREACHABLE();
7646     }
7647     PrintF(" ");
7648 
7649     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7650     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7651     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7652     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7653     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7654     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7655     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7656     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7657     PrintF("compaction_ptrs=%.1f ",
7658         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7659     PrintF("intracompaction_ptrs=%.1f ",
7660         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7661     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7662     PrintF("weakcollection_process=%.1f ",
7663         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7664     PrintF("weakcollection_clear=%.1f ",
7665         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7666 
7667     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7668     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7669     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7670            in_free_list_or_wasted_before_gc_);
7671     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7672 
7673     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7674     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7675     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7676     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7677     PrintF("nodes_promoted=%d ", nodes_promoted_);
7678 
7679     if (collector_ == SCAVENGER) {
7680       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7681       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7682     } else {
7683       PrintF("stepscount=%d ", steps_count_);
7684       PrintF("stepstook=%.1f ", steps_took_);
7685       PrintF("longeststep=%.1f ", longest_step_);
7686     }
7687 
7688     PrintF("\n");
7689   }
7690 
7691   heap_->PrintShortHeapStatistics();
7692 }
7693 
7694 
CollectorString()7695 const char* GCTracer::CollectorString() {
7696   switch (collector_) {
7697     case SCAVENGER:
7698       return "Scavenge";
7699     case MARK_COMPACTOR:
7700       return "Mark-sweep";
7701   }
7702   return "Unknown GC";
7703 }
7704 
7705 
Hash(Map * map,Name * name)7706 int KeyedLookupCache::Hash(Map* map, Name* name) {
7707   // Uses only lower 32 bits if pointers are larger.
7708   uintptr_t addr_hash =
7709       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7710   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7711 }
7712 
7713 
Lookup(Map * map,Name * name)7714 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7715   int index = (Hash(map, name) & kHashMask);
7716   for (int i = 0; i < kEntriesPerBucket; i++) {
7717     Key& key = keys_[index + i];
7718     if ((key.map == map) && key.name->Equals(name)) {
7719       return field_offsets_[index + i];
7720     }
7721   }
7722   return kNotFound;
7723 }
7724 
7725 
Update(Map * map,Name * name,int field_offset)7726 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7727   if (!name->IsUniqueName()) {
7728     String* internalized_string;
7729     if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
7730             String::cast(name), &internalized_string)) {
7731       return;
7732     }
7733     name = internalized_string;
7734   }
7735   // This cache is cleared only between mark compact passes, so we expect the
7736   // cache to only contain old space names.
7737   ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
7738 
7739   int index = (Hash(map, name) & kHashMask);
7740   // After a GC there will be free slots, so we use them in order (this may
7741   // help to get the most frequently used one in position 0).
7742   for (int i = 0; i< kEntriesPerBucket; i++) {
7743     Key& key = keys_[index];
7744     Object* free_entry_indicator = NULL;
7745     if (key.map == free_entry_indicator) {
7746       key.map = map;
7747       key.name = name;
7748       field_offsets_[index + i] = field_offset;
7749       return;
7750     }
7751   }
7752   // No free entry found in this bucket, so we move them all down one and
7753   // put the new entry at position zero.
7754   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7755     Key& key = keys_[index + i];
7756     Key& key2 = keys_[index + i - 1];
7757     key = key2;
7758     field_offsets_[index + i] = field_offsets_[index + i - 1];
7759   }
7760 
7761   // Write the new first entry.
7762   Key& key = keys_[index];
7763   key.map = map;
7764   key.name = name;
7765   field_offsets_[index] = field_offset;
7766 }
7767 
7768 
Clear()7769 void KeyedLookupCache::Clear() {
7770   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7771 }
7772 
7773 
Clear()7774 void DescriptorLookupCache::Clear() {
7775   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7776 }
7777 
7778 
7779 #ifdef DEBUG
GarbageCollectionGreedyCheck()7780 void Heap::GarbageCollectionGreedyCheck() {
7781   ASSERT(FLAG_gc_greedy);
7782   if (isolate_->bootstrapper()->IsActive()) return;
7783   if (disallow_allocation_failure()) return;
7784   CollectGarbage(NEW_SPACE);
7785 }
7786 #endif
7787 
7788 
SubCache(Isolate * isolate,Type t)7789 TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
7790   : type_(t),
7791     isolate_(isolate) {
7792   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7793   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7794   for (int i = 0; i < kCacheSize; i++) {
7795     elements_[i].in[0] = in0;
7796     elements_[i].in[1] = in1;
7797     elements_[i].output = NULL;
7798   }
7799 }
7800 
7801 
Clear()7802 void TranscendentalCache::Clear() {
7803   for (int i = 0; i < kNumberOfCaches; i++) {
7804     if (caches_[i] != NULL) {
7805       delete caches_[i];
7806       caches_[i] = NULL;
7807     }
7808   }
7809 }
7810 
7811 
CleanUp()7812 void ExternalStringTable::CleanUp() {
7813   int last = 0;
7814   for (int i = 0; i < new_space_strings_.length(); ++i) {
7815     if (new_space_strings_[i] == heap_->the_hole_value()) {
7816       continue;
7817     }
7818     ASSERT(new_space_strings_[i]->IsExternalString());
7819     if (heap_->InNewSpace(new_space_strings_[i])) {
7820       new_space_strings_[last++] = new_space_strings_[i];
7821     } else {
7822       old_space_strings_.Add(new_space_strings_[i]);
7823     }
7824   }
7825   new_space_strings_.Rewind(last);
7826   new_space_strings_.Trim();
7827 
7828   last = 0;
7829   for (int i = 0; i < old_space_strings_.length(); ++i) {
7830     if (old_space_strings_[i] == heap_->the_hole_value()) {
7831       continue;
7832     }
7833     ASSERT(old_space_strings_[i]->IsExternalString());
7834     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7835     old_space_strings_[last++] = old_space_strings_[i];
7836   }
7837   old_space_strings_.Rewind(last);
7838   old_space_strings_.Trim();
7839 #ifdef VERIFY_HEAP
7840   if (FLAG_verify_heap) {
7841     Verify();
7842   }
7843 #endif
7844 }
7845 
7846 
TearDown()7847 void ExternalStringTable::TearDown() {
7848   for (int i = 0; i < new_space_strings_.length(); ++i) {
7849     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
7850   }
7851   new_space_strings_.Free();
7852   for (int i = 0; i < old_space_strings_.length(); ++i) {
7853     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
7854   }
7855   old_space_strings_.Free();
7856 }
7857 
7858 
QueueMemoryChunkForFree(MemoryChunk * chunk)7859 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7860   chunk->set_next_chunk(chunks_queued_for_free_);
7861   chunks_queued_for_free_ = chunk;
7862 }
7863 
7864 
FreeQueuedChunks()7865 void Heap::FreeQueuedChunks() {
7866   if (chunks_queued_for_free_ == NULL) return;
7867   MemoryChunk* next;
7868   MemoryChunk* chunk;
7869   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7870     next = chunk->next_chunk();
7871     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7872 
7873     if (chunk->owner()->identity() == LO_SPACE) {
7874       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7875       // If FromAnyPointerAddress encounters a slot that belongs to a large
7876       // chunk queued for deletion it will fail to find the chunk because
7877       // it try to perform a search in the list of pages owned by of the large
7878       // object space and queued chunks were detached from that list.
7879       // To work around this we split large chunk into normal kPageSize aligned
7880       // pieces and initialize size, owner and flags field of every piece.
7881       // If FromAnyPointerAddress encounters a slot that belongs to one of
7882       // these smaller pieces it will treat it as a slot on a normal Page.
7883       Address chunk_end = chunk->address() + chunk->size();
7884       MemoryChunk* inner = MemoryChunk::FromAddress(
7885           chunk->address() + Page::kPageSize);
7886       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7887       while (inner <= inner_last) {
7888         // Size of a large chunk is always a multiple of
7889         // OS::AllocateAlignment() so there is always
7890         // enough space for a fake MemoryChunk header.
7891         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7892         // Guard against overflow.
7893         if (area_end < inner->address()) area_end = chunk_end;
7894         inner->SetArea(inner->address(), area_end);
7895         inner->set_size(Page::kPageSize);
7896         inner->set_owner(lo_space());
7897         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7898         inner = MemoryChunk::FromAddress(
7899             inner->address() + Page::kPageSize);
7900       }
7901     }
7902   }
7903   isolate_->heap()->store_buffer()->Compact();
7904   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7905   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7906     next = chunk->next_chunk();
7907     isolate_->memory_allocator()->Free(chunk);
7908   }
7909   chunks_queued_for_free_ = NULL;
7910 }
7911 
7912 
RememberUnmappedPage(Address page,bool compacted)7913 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7914   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7915   // Tag the page pointer to make it findable in the dump file.
7916   if (compacted) {
7917     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7918   } else {
7919     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7920   }
7921   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7922       reinterpret_cast<Address>(p);
7923   remembered_unmapped_pages_index_++;
7924   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7925 }
7926 
7927 
ClearObjectStats(bool clear_last_time_stats)7928 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7929   memset(object_counts_, 0, sizeof(object_counts_));
7930   memset(object_sizes_, 0, sizeof(object_sizes_));
7931   if (clear_last_time_stats) {
7932     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7933     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7934   }
7935 }
7936 
7937 
7938 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7939 
7940 
CheckpointObjectStats()7941 void Heap::CheckpointObjectStats() {
7942   LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
7943   Counters* counters = isolate()->counters();
7944 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7945   counters->count_of_##name()->Increment(                                      \
7946       static_cast<int>(object_counts_[name]));                                 \
7947   counters->count_of_##name()->Decrement(                                      \
7948       static_cast<int>(object_counts_last_time_[name]));                       \
7949   counters->size_of_##name()->Increment(                                       \
7950       static_cast<int>(object_sizes_[name]));                                  \
7951   counters->size_of_##name()->Decrement(                                       \
7952       static_cast<int>(object_sizes_last_time_[name]));
7953   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7954 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7955   int index;
7956 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7957   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7958   counters->count_of_CODE_TYPE_##name()->Increment(       \
7959       static_cast<int>(object_counts_[index]));           \
7960   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7961       static_cast<int>(object_counts_last_time_[index])); \
7962   counters->size_of_CODE_TYPE_##name()->Increment(        \
7963       static_cast<int>(object_sizes_[index]));            \
7964   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7965       static_cast<int>(object_sizes_last_time_[index]));
7966   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7967 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7968 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7969   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7970   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7971       static_cast<int>(object_counts_[index]));           \
7972   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7973       static_cast<int>(object_counts_last_time_[index])); \
7974   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7975       static_cast<int>(object_sizes_[index]));            \
7976   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7977       static_cast<int>(object_sizes_last_time_[index]));
7978   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7979 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7980 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
7981   index =                                                                     \
7982       FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
7983   counters->count_of_CODE_AGE_##name()->Increment(                            \
7984       static_cast<int>(object_counts_[index]));                               \
7985   counters->count_of_CODE_AGE_##name()->Decrement(                            \
7986       static_cast<int>(object_counts_last_time_[index]));                     \
7987   counters->size_of_CODE_AGE_##name()->Increment(                             \
7988       static_cast<int>(object_sizes_[index]));                                \
7989   counters->size_of_CODE_AGE_##name()->Decrement(                             \
7990       static_cast<int>(object_sizes_last_time_[index]));
7991   CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7992 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7993 
7994   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7995   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7996   ClearObjectStats();
7997 }
7998 
7999 } }  // namespace v8::internal
8000