1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen-inl.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "heap-profiler.h"
37 #include "global-handles.h"
38 #include "mark-compact.h"
39 #include "natives.h"
40 #include "scanner.h"
41 #include "scopeinfo.h"
42 #include "snapshot.h"
43 #include "v8threads.h"
44 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
45 #include "regexp-macro-assembler.h"
46 #include "arm/regexp-macro-assembler-arm.h"
47 #endif
48
49 namespace v8 {
50 namespace internal {
51
52
53 String* Heap::hidden_symbol_;
54 Object* Heap::roots_[Heap::kRootListLength];
55
56
57 NewSpace Heap::new_space_;
58 OldSpace* Heap::old_pointer_space_ = NULL;
59 OldSpace* Heap::old_data_space_ = NULL;
60 OldSpace* Heap::code_space_ = NULL;
61 MapSpace* Heap::map_space_ = NULL;
62 CellSpace* Heap::cell_space_ = NULL;
63 LargeObjectSpace* Heap::lo_space_ = NULL;
64
65 static const int kMinimumPromotionLimit = 2*MB;
66 static const int kMinimumAllocationLimit = 8*MB;
67
68 int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
69 int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
70
71 int Heap::old_gen_exhausted_ = false;
72
73 int Heap::amount_of_external_allocated_memory_ = 0;
74 int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
75
76 // semispace_size_ should be a power of 2 and old_generation_size_ should be
77 // a multiple of Page::kPageSize.
78 #if defined(ANDROID)
79 int Heap::max_semispace_size_ = 2*MB;
80 int Heap::max_old_generation_size_ = 192*MB;
81 int Heap::initial_semispace_size_ = 128*KB;
82 size_t Heap::code_range_size_ = 0;
83 #elif defined(V8_TARGET_ARCH_X64)
84 int Heap::max_semispace_size_ = 16*MB;
85 int Heap::max_old_generation_size_ = 1*GB;
86 int Heap::initial_semispace_size_ = 1*MB;
87 size_t Heap::code_range_size_ = 512*MB;
88 #else
89 int Heap::max_semispace_size_ = 8*MB;
90 int Heap::max_old_generation_size_ = 512*MB;
91 int Heap::initial_semispace_size_ = 512*KB;
92 size_t Heap::code_range_size_ = 0;
93 #endif
94
95 // The snapshot semispace size will be the default semispace size if
96 // snapshotting is used and will be the requested semispace size as
97 // set up by ConfigureHeap otherwise.
98 int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
99
100 GCCallback Heap::global_gc_prologue_callback_ = NULL;
101 GCCallback Heap::global_gc_epilogue_callback_ = NULL;
102
103 // Variables set based on semispace_size_ and old_generation_size_ in
104 // ConfigureHeap.
105
106 // Will be 4 * reserved_semispace_size_ to ensure that young
107 // generation can be aligned to its size.
108 int Heap::survived_since_last_expansion_ = 0;
109 int Heap::external_allocation_limit_ = 0;
110
111 Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
112
113 int Heap::mc_count_ = 0;
114 int Heap::gc_count_ = 0;
115
116 int Heap::always_allocate_scope_depth_ = 0;
117 int Heap::linear_allocation_scope_depth_ = 0;
118 bool Heap::context_disposed_pending_ = false;
119
120 #ifdef DEBUG
121 bool Heap::allocation_allowed_ = true;
122
123 int Heap::allocation_timeout_ = 0;
124 bool Heap::disallow_allocation_failure_ = false;
125 #endif // DEBUG
126
127
Capacity()128 int Heap::Capacity() {
129 if (!HasBeenSetup()) return 0;
130
131 return new_space_.Capacity() +
132 old_pointer_space_->Capacity() +
133 old_data_space_->Capacity() +
134 code_space_->Capacity() +
135 map_space_->Capacity() +
136 cell_space_->Capacity();
137 }
138
139
CommittedMemory()140 int Heap::CommittedMemory() {
141 if (!HasBeenSetup()) return 0;
142
143 return new_space_.CommittedMemory() +
144 old_pointer_space_->CommittedMemory() +
145 old_data_space_->CommittedMemory() +
146 code_space_->CommittedMemory() +
147 map_space_->CommittedMemory() +
148 cell_space_->CommittedMemory() +
149 lo_space_->Size();
150 }
151
152
Available()153 int Heap::Available() {
154 if (!HasBeenSetup()) return 0;
155
156 return new_space_.Available() +
157 old_pointer_space_->Available() +
158 old_data_space_->Available() +
159 code_space_->Available() +
160 map_space_->Available() +
161 cell_space_->Available();
162 }
163
164
HasBeenSetup()165 bool Heap::HasBeenSetup() {
166 return old_pointer_space_ != NULL &&
167 old_data_space_ != NULL &&
168 code_space_ != NULL &&
169 map_space_ != NULL &&
170 cell_space_ != NULL &&
171 lo_space_ != NULL;
172 }
173
174
SelectGarbageCollector(AllocationSpace space)175 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
176 // Is global GC requested?
177 if (space != NEW_SPACE || FLAG_gc_global) {
178 Counters::gc_compactor_caused_by_request.Increment();
179 return MARK_COMPACTOR;
180 }
181
182 // Is enough data promoted to justify a global GC?
183 if (OldGenerationPromotionLimitReached()) {
184 Counters::gc_compactor_caused_by_promoted_data.Increment();
185 return MARK_COMPACTOR;
186 }
187
188 // Have allocation in OLD and LO failed?
189 if (old_gen_exhausted_) {
190 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
191 return MARK_COMPACTOR;
192 }
193
194 // Is there enough space left in OLD to guarantee that a scavenge can
195 // succeed?
196 //
197 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
198 // for object promotion. It counts only the bytes that the memory
199 // allocator has not yet allocated from the OS and assigned to any space,
200 // and does not count available bytes already in the old space or code
201 // space. Undercounting is safe---we may get an unrequested full GC when
202 // a scavenge would have succeeded.
203 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
204 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
205 return MARK_COMPACTOR;
206 }
207
208 // Default
209 return SCAVENGER;
210 }
211
212
213 // TODO(1238405): Combine the infrastructure for --heap-stats and
214 // --log-gc to avoid the complicated preprocessor and flag testing.
215 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsBeforeGC()216 void Heap::ReportStatisticsBeforeGC() {
217 // Heap::ReportHeapStatistics will also log NewSpace statistics when
218 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
219 // following logic is used to avoid double logging.
220 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
221 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
222 if (FLAG_heap_stats) {
223 ReportHeapStatistics("Before GC");
224 } else if (FLAG_log_gc) {
225 new_space_.ReportStatistics();
226 }
227 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
228 #elif defined(DEBUG)
229 if (FLAG_heap_stats) {
230 new_space_.CollectStatistics();
231 ReportHeapStatistics("Before GC");
232 new_space_.ClearHistograms();
233 }
234 #elif defined(ENABLE_LOGGING_AND_PROFILING)
235 if (FLAG_log_gc) {
236 new_space_.CollectStatistics();
237 new_space_.ReportStatistics();
238 new_space_.ClearHistograms();
239 }
240 #endif
241 }
242
243
244 #if defined(ENABLE_LOGGING_AND_PROFILING)
PrintShortHeapStatistics()245 void Heap::PrintShortHeapStatistics() {
246 if (!FLAG_trace_gc_verbose) return;
247 PrintF("Memory allocator, used: %8d, available: %8d\n",
248 MemoryAllocator::Size(),
249 MemoryAllocator::Available());
250 PrintF("New space, used: %8d, available: %8d\n",
251 Heap::new_space_.Size(),
252 new_space_.Available());
253 PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n",
254 old_pointer_space_->Size(),
255 old_pointer_space_->Available(),
256 old_pointer_space_->Waste());
257 PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n",
258 old_data_space_->Size(),
259 old_data_space_->Available(),
260 old_data_space_->Waste());
261 PrintF("Code space, used: %8d, available: %8d, waste: %8d\n",
262 code_space_->Size(),
263 code_space_->Available(),
264 code_space_->Waste());
265 PrintF("Map space, used: %8d, available: %8d, waste: %8d\n",
266 map_space_->Size(),
267 map_space_->Available(),
268 map_space_->Waste());
269 PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n",
270 cell_space_->Size(),
271 cell_space_->Available(),
272 cell_space_->Waste());
273 PrintF("Large object space, used: %8d, avaialble: %8d\n",
274 lo_space_->Size(),
275 lo_space_->Available());
276 }
277 #endif
278
279
280 // TODO(1238405): Combine the infrastructure for --heap-stats and
281 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()282 void Heap::ReportStatisticsAfterGC() {
283 // Similar to the before GC, we use some complicated logic to ensure that
284 // NewSpace statistics are logged exactly once when --log-gc is turned on.
285 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
286 if (FLAG_heap_stats) {
287 new_space_.CollectStatistics();
288 ReportHeapStatistics("After GC");
289 } else if (FLAG_log_gc) {
290 new_space_.ReportStatistics();
291 }
292 #elif defined(DEBUG)
293 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
294 #elif defined(ENABLE_LOGGING_AND_PROFILING)
295 if (FLAG_log_gc) new_space_.ReportStatistics();
296 #endif
297 }
298 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
299
300
GarbageCollectionPrologue()301 void Heap::GarbageCollectionPrologue() {
302 TranscendentalCache::Clear();
303 gc_count_++;
304 #ifdef DEBUG
305 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
306 allow_allocation(false);
307
308 if (FLAG_verify_heap) {
309 Verify();
310 }
311
312 if (FLAG_gc_verbose) Print();
313
314 if (FLAG_print_rset) {
315 // Not all spaces have remembered set bits that we care about.
316 old_pointer_space_->PrintRSet();
317 map_space_->PrintRSet();
318 lo_space_->PrintRSet();
319 }
320 #endif
321
322 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
323 ReportStatisticsBeforeGC();
324 #endif
325 }
326
SizeOfObjects()327 int Heap::SizeOfObjects() {
328 int total = 0;
329 AllSpaces spaces;
330 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
331 total += space->Size();
332 }
333 return total;
334 }
335
GarbageCollectionEpilogue()336 void Heap::GarbageCollectionEpilogue() {
337 #ifdef DEBUG
338 allow_allocation(true);
339 ZapFromSpace();
340
341 if (FLAG_verify_heap) {
342 Verify();
343 }
344
345 if (FLAG_print_global_handles) GlobalHandles::Print();
346 if (FLAG_print_handles) PrintHandles();
347 if (FLAG_gc_verbose) Print();
348 if (FLAG_code_stats) ReportCodeStatistics("After GC");
349 #endif
350
351 Counters::alive_after_last_gc.Set(SizeOfObjects());
352
353 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
354 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
355 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
356 ReportStatisticsAfterGC();
357 #endif
358 #ifdef ENABLE_DEBUGGER_SUPPORT
359 Debug::AfterGarbageCollection();
360 #endif
361 }
362
363
CollectAllGarbage(bool force_compaction)364 void Heap::CollectAllGarbage(bool force_compaction) {
365 // Since we are ignoring the return value, the exact choice of space does
366 // not matter, so long as we do not specify NEW_SPACE, which would not
367 // cause a full GC.
368 MarkCompactCollector::SetForceCompaction(force_compaction);
369 CollectGarbage(0, OLD_POINTER_SPACE);
370 MarkCompactCollector::SetForceCompaction(false);
371 }
372
373
CollectAllGarbageIfContextDisposed()374 void Heap::CollectAllGarbageIfContextDisposed() {
375 // If the garbage collector interface is exposed through the global
376 // gc() function, we avoid being clever about forcing GCs when
377 // contexts are disposed and leave it to the embedder to make
378 // informed decisions about when to force a collection.
379 if (!FLAG_expose_gc && context_disposed_pending_) {
380 HistogramTimerScope scope(&Counters::gc_context);
381 CollectAllGarbage(false);
382 }
383 context_disposed_pending_ = false;
384 }
385
386
NotifyContextDisposed()387 void Heap::NotifyContextDisposed() {
388 context_disposed_pending_ = true;
389 }
390
391
CollectGarbage(int requested_size,AllocationSpace space)392 bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
393 // The VM is in the GC state until exiting this function.
394 VMState state(GC);
395
396 #ifdef DEBUG
397 // Reset the allocation timeout to the GC interval, but make sure to
398 // allow at least a few allocations after a collection. The reason
399 // for this is that we have a lot of allocation sequences and we
400 // assume that a garbage collection will allow the subsequent
401 // allocation attempts to go through.
402 allocation_timeout_ = Max(6, FLAG_gc_interval);
403 #endif
404
405 { GCTracer tracer;
406 GarbageCollectionPrologue();
407 // The GC count was incremented in the prologue. Tell the tracer about
408 // it.
409 tracer.set_gc_count(gc_count_);
410
411 GarbageCollector collector = SelectGarbageCollector(space);
412 // Tell the tracer which collector we've selected.
413 tracer.set_collector(collector);
414
415 HistogramTimer* rate = (collector == SCAVENGER)
416 ? &Counters::gc_scavenger
417 : &Counters::gc_compactor;
418 rate->Start();
419 PerformGarbageCollection(space, collector, &tracer);
420 rate->Stop();
421
422 GarbageCollectionEpilogue();
423 }
424
425
426 #ifdef ENABLE_LOGGING_AND_PROFILING
427 if (FLAG_log_gc) HeapProfiler::WriteSample();
428 #endif
429
430 switch (space) {
431 case NEW_SPACE:
432 return new_space_.Available() >= requested_size;
433 case OLD_POINTER_SPACE:
434 return old_pointer_space_->Available() >= requested_size;
435 case OLD_DATA_SPACE:
436 return old_data_space_->Available() >= requested_size;
437 case CODE_SPACE:
438 return code_space_->Available() >= requested_size;
439 case MAP_SPACE:
440 return map_space_->Available() >= requested_size;
441 case CELL_SPACE:
442 return cell_space_->Available() >= requested_size;
443 case LO_SPACE:
444 return lo_space_->Available() >= requested_size;
445 }
446 return false;
447 }
448
449
PerformScavenge()450 void Heap::PerformScavenge() {
451 GCTracer tracer;
452 PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
453 }
454
455
456 #ifdef DEBUG
457 // Helper class for verifying the symbol table.
458 class SymbolTableVerifier : public ObjectVisitor {
459 public:
SymbolTableVerifier()460 SymbolTableVerifier() { }
VisitPointers(Object ** start,Object ** end)461 void VisitPointers(Object** start, Object** end) {
462 // Visit all HeapObject pointers in [start, end).
463 for (Object** p = start; p < end; p++) {
464 if ((*p)->IsHeapObject()) {
465 // Check that the symbol is actually a symbol.
466 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
467 }
468 }
469 }
470 };
471 #endif // DEBUG
472
473
VerifySymbolTable()474 static void VerifySymbolTable() {
475 #ifdef DEBUG
476 SymbolTableVerifier verifier;
477 Heap::symbol_table()->IterateElements(&verifier);
478 #endif // DEBUG
479 }
480
481
ReserveSpace(int new_space_size,int pointer_space_size,int data_space_size,int code_space_size,int map_space_size,int cell_space_size,int large_object_size)482 void Heap::ReserveSpace(
483 int new_space_size,
484 int pointer_space_size,
485 int data_space_size,
486 int code_space_size,
487 int map_space_size,
488 int cell_space_size,
489 int large_object_size) {
490 NewSpace* new_space = Heap::new_space();
491 PagedSpace* old_pointer_space = Heap::old_pointer_space();
492 PagedSpace* old_data_space = Heap::old_data_space();
493 PagedSpace* code_space = Heap::code_space();
494 PagedSpace* map_space = Heap::map_space();
495 PagedSpace* cell_space = Heap::cell_space();
496 LargeObjectSpace* lo_space = Heap::lo_space();
497 bool gc_performed = true;
498 while (gc_performed) {
499 gc_performed = false;
500 if (!new_space->ReserveSpace(new_space_size)) {
501 Heap::CollectGarbage(new_space_size, NEW_SPACE);
502 gc_performed = true;
503 }
504 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
505 Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
506 gc_performed = true;
507 }
508 if (!(old_data_space->ReserveSpace(data_space_size))) {
509 Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
510 gc_performed = true;
511 }
512 if (!(code_space->ReserveSpace(code_space_size))) {
513 Heap::CollectGarbage(code_space_size, CODE_SPACE);
514 gc_performed = true;
515 }
516 if (!(map_space->ReserveSpace(map_space_size))) {
517 Heap::CollectGarbage(map_space_size, MAP_SPACE);
518 gc_performed = true;
519 }
520 if (!(cell_space->ReserveSpace(cell_space_size))) {
521 Heap::CollectGarbage(cell_space_size, CELL_SPACE);
522 gc_performed = true;
523 }
524 // We add a slack-factor of 2 in order to have space for the remembered
525 // set and a series of large-object allocations that are only just larger
526 // than the page size.
527 large_object_size *= 2;
528 // The ReserveSpace method on the large object space checks how much
529 // we can expand the old generation. This includes expansion caused by
530 // allocation in the other spaces.
531 large_object_size += cell_space_size + map_space_size + code_space_size +
532 data_space_size + pointer_space_size;
533 if (!(lo_space->ReserveSpace(large_object_size))) {
534 Heap::CollectGarbage(large_object_size, LO_SPACE);
535 gc_performed = true;
536 }
537 }
538 }
539
540
EnsureFromSpaceIsCommitted()541 void Heap::EnsureFromSpaceIsCommitted() {
542 if (new_space_.CommitFromSpaceIfNeeded()) return;
543
544 // Committing memory to from space failed.
545 // Try shrinking and try again.
546 Shrink();
547 if (new_space_.CommitFromSpaceIfNeeded()) return;
548
549 // Committing memory to from space failed again.
550 // Memory is exhausted and we will die.
551 V8::FatalProcessOutOfMemory("Committing semi space failed.");
552 }
553
554
PerformGarbageCollection(AllocationSpace space,GarbageCollector collector,GCTracer * tracer)555 void Heap::PerformGarbageCollection(AllocationSpace space,
556 GarbageCollector collector,
557 GCTracer* tracer) {
558 VerifySymbolTable();
559 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
560 ASSERT(!allocation_allowed_);
561 global_gc_prologue_callback_();
562 }
563 EnsureFromSpaceIsCommitted();
564 if (collector == MARK_COMPACTOR) {
565 MarkCompact(tracer);
566
567 int old_gen_size = PromotedSpaceSize();
568 old_gen_promotion_limit_ =
569 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
570 old_gen_allocation_limit_ =
571 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
572 old_gen_exhausted_ = false;
573 }
574 Scavenge();
575
576 Counters::objs_since_last_young.Set(0);
577
578 if (collector == MARK_COMPACTOR) {
579 DisableAssertNoAllocation allow_allocation;
580 GlobalHandles::PostGarbageCollectionProcessing();
581 }
582
583 // Update relocatables.
584 Relocatable::PostGarbageCollectionProcessing();
585
586 if (collector == MARK_COMPACTOR) {
587 // Register the amount of external allocated memory.
588 amount_of_external_allocated_memory_at_last_global_gc_ =
589 amount_of_external_allocated_memory_;
590 }
591
592 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
593 ASSERT(!allocation_allowed_);
594 global_gc_epilogue_callback_();
595 }
596 VerifySymbolTable();
597 }
598
599
MarkCompact(GCTracer * tracer)600 void Heap::MarkCompact(GCTracer* tracer) {
601 gc_state_ = MARK_COMPACT;
602 mc_count_++;
603 tracer->set_full_gc_count(mc_count_);
604 LOG(ResourceEvent("markcompact", "begin"));
605
606 MarkCompactCollector::Prepare(tracer);
607
608 bool is_compacting = MarkCompactCollector::IsCompacting();
609
610 MarkCompactPrologue(is_compacting);
611
612 MarkCompactCollector::CollectGarbage();
613
614 MarkCompactEpilogue(is_compacting);
615
616 LOG(ResourceEvent("markcompact", "end"));
617
618 gc_state_ = NOT_IN_GC;
619
620 Shrink();
621
622 Counters::objs_since_last_full.Set(0);
623 context_disposed_pending_ = false;
624 }
625
626
MarkCompactPrologue(bool is_compacting)627 void Heap::MarkCompactPrologue(bool is_compacting) {
628 // At any old GC clear the keyed lookup cache to enable collection of unused
629 // maps.
630 KeyedLookupCache::Clear();
631 ContextSlotCache::Clear();
632 DescriptorLookupCache::Clear();
633
634 CompilationCache::MarkCompactPrologue();
635
636 Top::MarkCompactPrologue(is_compacting);
637 ThreadManager::MarkCompactPrologue(is_compacting);
638
639 if (is_compacting) FlushNumberStringCache();
640 }
641
642
MarkCompactEpilogue(bool is_compacting)643 void Heap::MarkCompactEpilogue(bool is_compacting) {
644 Top::MarkCompactEpilogue(is_compacting);
645 ThreadManager::MarkCompactEpilogue(is_compacting);
646 }
647
648
FindCodeObject(Address a)649 Object* Heap::FindCodeObject(Address a) {
650 Object* obj = code_space_->FindObject(a);
651 if (obj->IsFailure()) {
652 obj = lo_space_->FindObject(a);
653 }
654 ASSERT(!obj->IsFailure());
655 return obj;
656 }
657
658
659 // Helper class for copying HeapObjects
660 class ScavengeVisitor: public ObjectVisitor {
661 public:
662
VisitPointer(Object ** p)663 void VisitPointer(Object** p) { ScavengePointer(p); }
664
VisitPointers(Object ** start,Object ** end)665 void VisitPointers(Object** start, Object** end) {
666 // Copy all HeapObject pointers in [start, end)
667 for (Object** p = start; p < end; p++) ScavengePointer(p);
668 }
669
670 private:
ScavengePointer(Object ** p)671 void ScavengePointer(Object** p) {
672 Object* object = *p;
673 if (!Heap::InNewSpace(object)) return;
674 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
675 reinterpret_cast<HeapObject*>(object));
676 }
677 };
678
679
680 // A queue of pointers and maps of to-be-promoted objects during a
681 // scavenge collection.
682 class PromotionQueue {
683 public:
Initialize(Address start_address)684 void Initialize(Address start_address) {
685 front_ = rear_ = reinterpret_cast<HeapObject**>(start_address);
686 }
687
is_empty()688 bool is_empty() { return front_ <= rear_; }
689
insert(HeapObject * object,Map * map)690 void insert(HeapObject* object, Map* map) {
691 *(--rear_) = object;
692 *(--rear_) = map;
693 // Assert no overflow into live objects.
694 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
695 }
696
remove(HeapObject ** object,Map ** map)697 void remove(HeapObject** object, Map** map) {
698 *object = *(--front_);
699 *map = Map::cast(*(--front_));
700 // Assert no underflow.
701 ASSERT(front_ >= rear_);
702 }
703
704 private:
705 // The front of the queue is higher in memory than the rear.
706 HeapObject** front_;
707 HeapObject** rear_;
708 };
709
710
711 // Shared state read by the scavenge collector and set by ScavengeObject.
712 static PromotionQueue promotion_queue;
713
714
715 #ifdef DEBUG
716 // Visitor class to verify pointers in code or data space do not point into
717 // new space.
718 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
719 public:
VisitPointers(Object ** start,Object ** end)720 void VisitPointers(Object** start, Object**end) {
721 for (Object** current = start; current < end; current++) {
722 if ((*current)->IsHeapObject()) {
723 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
724 }
725 }
726 }
727 };
728
729
VerifyNonPointerSpacePointers()730 static void VerifyNonPointerSpacePointers() {
731 // Verify that there are no pointers to new space in spaces where we
732 // do not expect them.
733 VerifyNonPointerSpacePointersVisitor v;
734 HeapObjectIterator code_it(Heap::code_space());
735 for (HeapObject* object = code_it.next();
736 object != NULL; object = code_it.next())
737 object->Iterate(&v);
738
739 HeapObjectIterator data_it(Heap::old_data_space());
740 for (HeapObject* object = data_it.next();
741 object != NULL; object = data_it.next())
742 object->Iterate(&v);
743 }
744 #endif
745
746
Scavenge()747 void Heap::Scavenge() {
748 #ifdef DEBUG
749 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
750 #endif
751
752 gc_state_ = SCAVENGE;
753
754 // Implements Cheney's copying algorithm
755 LOG(ResourceEvent("scavenge", "begin"));
756
757 // Clear descriptor cache.
758 DescriptorLookupCache::Clear();
759
760 // Used for updating survived_since_last_expansion_ at function end.
761 int survived_watermark = PromotedSpaceSize();
762
763 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
764 survived_since_last_expansion_ > new_space_.Capacity()) {
765 // Grow the size of new space if there is room to grow and enough
766 // data has survived scavenge since the last expansion.
767 new_space_.Grow();
768 survived_since_last_expansion_ = 0;
769 }
770
771 // Flip the semispaces. After flipping, to space is empty, from space has
772 // live objects.
773 new_space_.Flip();
774 new_space_.ResetAllocationInfo();
775
776 // We need to sweep newly copied objects which can be either in the
777 // to space or promoted to the old generation. For to-space
778 // objects, we treat the bottom of the to space as a queue. Newly
779 // copied and unswept objects lie between a 'front' mark and the
780 // allocation pointer.
781 //
782 // Promoted objects can go into various old-generation spaces, and
783 // can be allocated internally in the spaces (from the free list).
784 // We treat the top of the to space as a queue of addresses of
785 // promoted objects. The addresses of newly promoted and unswept
786 // objects lie between a 'front' mark and a 'rear' mark that is
787 // updated as a side effect of promoting an object.
788 //
789 // There is guaranteed to be enough room at the top of the to space
790 // for the addresses of promoted objects: every object promoted
791 // frees up its size in bytes from the top of the new space, and
792 // objects are at least one pointer in size.
793 Address new_space_front = new_space_.ToSpaceLow();
794 promotion_queue.Initialize(new_space_.ToSpaceHigh());
795
796 ScavengeVisitor scavenge_visitor;
797 // Copy roots.
798 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
799
800 // Copy objects reachable from the old generation. By definition,
801 // there are no intergenerational pointers in code or data spaces.
802 IterateRSet(old_pointer_space_, &ScavengePointer);
803 IterateRSet(map_space_, &ScavengePointer);
804 lo_space_->IterateRSet(&ScavengePointer);
805
806 // Copy objects reachable from cells by scavenging cell values directly.
807 HeapObjectIterator cell_iterator(cell_space_);
808 for (HeapObject* cell = cell_iterator.next();
809 cell != NULL; cell = cell_iterator.next()) {
810 if (cell->IsJSGlobalPropertyCell()) {
811 Address value_address =
812 reinterpret_cast<Address>(cell) +
813 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
814 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
815 }
816 }
817
818 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
819
820 ScavengeExternalStringTable();
821 ASSERT(new_space_front == new_space_.top());
822
823 // Set age mark.
824 new_space_.set_age_mark(new_space_.top());
825
826 // Update how much has survived scavenge.
827 survived_since_last_expansion_ +=
828 (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
829
830 LOG(ResourceEvent("scavenge", "end"));
831
832 gc_state_ = NOT_IN_GC;
833 }
834
835
ScavengeExternalStringTable()836 void Heap::ScavengeExternalStringTable() {
837 ExternalStringTable::Verify();
838
839 if (ExternalStringTable::new_space_strings_.is_empty()) return;
840
841 Object** start = &ExternalStringTable::new_space_strings_[0];
842 Object** end = start + ExternalStringTable::new_space_strings_.length();
843 Object** last = start;
844
845 for (Object** p = start; p < end; ++p) {
846 ASSERT(Heap::InFromSpace(*p));
847 MapWord first_word = HeapObject::cast(*p)->map_word();
848
849 if (!first_word.IsForwardingAddress()) {
850 // Unreachable external string can be finalized.
851 FinalizeExternalString(String::cast(*p));
852 continue;
853 }
854
855 // String is still reachable.
856 String* target = String::cast(first_word.ToForwardingAddress());
857 ASSERT(target->IsExternalString());
858
859 if (Heap::InNewSpace(target)) {
860 // String is still in new space. Update the table entry.
861 *last = target;
862 ++last;
863 } else {
864 // String got promoted. Move it to the old string list.
865 ExternalStringTable::AddOldString(target);
866 }
867 }
868
869 ASSERT(last <= end);
870 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
871 }
872
873
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)874 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
875 Address new_space_front) {
876 do {
877 ASSERT(new_space_front <= new_space_.top());
878
879 // The addresses new_space_front and new_space_.top() define a
880 // queue of unprocessed copied objects. Process them until the
881 // queue is empty.
882 while (new_space_front < new_space_.top()) {
883 HeapObject* object = HeapObject::FromAddress(new_space_front);
884 object->Iterate(scavenge_visitor);
885 new_space_front += object->Size();
886 }
887
888 // Promote and process all the to-be-promoted objects.
889 while (!promotion_queue.is_empty()) {
890 HeapObject* source;
891 Map* map;
892 promotion_queue.remove(&source, &map);
893 // Copy the from-space object to its new location (given by the
894 // forwarding address) and fix its map.
895 HeapObject* target = source->map_word().ToForwardingAddress();
896 CopyBlock(reinterpret_cast<Object**>(target->address()),
897 reinterpret_cast<Object**>(source->address()),
898 source->SizeFromMap(map));
899 target->set_map(map);
900
901 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
902 // Update NewSpace stats if necessary.
903 RecordCopiedObject(target);
904 #endif
905 // Visit the newly copied object for pointers to new space.
906 target->Iterate(scavenge_visitor);
907 UpdateRSet(target);
908 }
909
910 // Take another spin if there are now unswept objects in new space
911 // (there are currently no more unswept promoted objects).
912 } while (new_space_front < new_space_.top());
913
914 return new_space_front;
915 }
916
917
ClearRSetRange(Address start,int size_in_bytes)918 void Heap::ClearRSetRange(Address start, int size_in_bytes) {
919 uint32_t start_bit;
920 Address start_word_address =
921 Page::ComputeRSetBitPosition(start, 0, &start_bit);
922 uint32_t end_bit;
923 Address end_word_address =
924 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
925 0,
926 &end_bit);
927
928 // We want to clear the bits in the starting word starting with the
929 // first bit, and in the ending word up to and including the last
930 // bit. Build a pair of bitmasks to do that.
931 uint32_t start_bitmask = start_bit - 1;
932 uint32_t end_bitmask = ~((end_bit << 1) - 1);
933
934 // If the start address and end address are the same, we mask that
935 // word once, otherwise mask the starting and ending word
936 // separately and all the ones in between.
937 if (start_word_address == end_word_address) {
938 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
939 } else {
940 Memory::uint32_at(start_word_address) &= start_bitmask;
941 Memory::uint32_at(end_word_address) &= end_bitmask;
942 start_word_address += kIntSize;
943 memset(start_word_address, 0, end_word_address - start_word_address);
944 }
945 }
946
947
948 class UpdateRSetVisitor: public ObjectVisitor {
949 public:
950
VisitPointer(Object ** p)951 void VisitPointer(Object** p) {
952 UpdateRSet(p);
953 }
954
VisitPointers(Object ** start,Object ** end)955 void VisitPointers(Object** start, Object** end) {
956 // Update a store into slots [start, end), used (a) to update remembered
957 // set when promoting a young object to old space or (b) to rebuild
958 // remembered sets after a mark-compact collection.
959 for (Object** p = start; p < end; p++) UpdateRSet(p);
960 }
961 private:
962
UpdateRSet(Object ** p)963 void UpdateRSet(Object** p) {
964 // The remembered set should not be set. It should be clear for objects
965 // newly copied to old space, and it is cleared before rebuilding in the
966 // mark-compact collector.
967 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
968 if (Heap::InNewSpace(*p)) {
969 Page::SetRSet(reinterpret_cast<Address>(p), 0);
970 }
971 }
972 };
973
974
UpdateRSet(HeapObject * obj)975 int Heap::UpdateRSet(HeapObject* obj) {
976 ASSERT(!InNewSpace(obj));
977 // Special handling of fixed arrays to iterate the body based on the start
978 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
979 // will not work because Page::SetRSet needs to have the start of the
980 // object for large object pages.
981 if (obj->IsFixedArray()) {
982 FixedArray* array = FixedArray::cast(obj);
983 int length = array->length();
984 for (int i = 0; i < length; i++) {
985 int offset = FixedArray::kHeaderSize + i * kPointerSize;
986 ASSERT(!Page::IsRSetSet(obj->address(), offset));
987 if (Heap::InNewSpace(array->get(i))) {
988 Page::SetRSet(obj->address(), offset);
989 }
990 }
991 } else if (!obj->IsCode()) {
992 // Skip code object, we know it does not contain inter-generational
993 // pointers.
994 UpdateRSetVisitor v;
995 obj->Iterate(&v);
996 }
997 return obj->Size();
998 }
999
1000
RebuildRSets()1001 void Heap::RebuildRSets() {
1002 // By definition, we do not care about remembered set bits in code,
1003 // data, or cell spaces.
1004 map_space_->ClearRSet();
1005 RebuildRSets(map_space_);
1006
1007 old_pointer_space_->ClearRSet();
1008 RebuildRSets(old_pointer_space_);
1009
1010 Heap::lo_space_->ClearRSet();
1011 RebuildRSets(lo_space_);
1012 }
1013
1014
RebuildRSets(PagedSpace * space)1015 void Heap::RebuildRSets(PagedSpace* space) {
1016 HeapObjectIterator it(space);
1017 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1018 Heap::UpdateRSet(obj);
1019 }
1020
1021
RebuildRSets(LargeObjectSpace * space)1022 void Heap::RebuildRSets(LargeObjectSpace* space) {
1023 LargeObjectIterator it(space);
1024 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1025 Heap::UpdateRSet(obj);
1026 }
1027
1028
1029 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
RecordCopiedObject(HeapObject * obj)1030 void Heap::RecordCopiedObject(HeapObject* obj) {
1031 bool should_record = false;
1032 #ifdef DEBUG
1033 should_record = FLAG_heap_stats;
1034 #endif
1035 #ifdef ENABLE_LOGGING_AND_PROFILING
1036 should_record = should_record || FLAG_log_gc;
1037 #endif
1038 if (should_record) {
1039 if (new_space_.Contains(obj)) {
1040 new_space_.RecordAllocation(obj);
1041 } else {
1042 new_space_.RecordPromotion(obj);
1043 }
1044 }
1045 }
1046 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1047
1048
1049
MigrateObject(HeapObject * source,HeapObject * target,int size)1050 HeapObject* Heap::MigrateObject(HeapObject* source,
1051 HeapObject* target,
1052 int size) {
1053 // Copy the content of source to target.
1054 CopyBlock(reinterpret_cast<Object**>(target->address()),
1055 reinterpret_cast<Object**>(source->address()),
1056 size);
1057
1058 // Set the forwarding address.
1059 source->set_map_word(MapWord::FromForwardingAddress(target));
1060
1061 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1062 // Update NewSpace stats if necessary.
1063 RecordCopiedObject(target);
1064 #endif
1065
1066 return target;
1067 }
1068
1069
IsShortcutCandidate(HeapObject * object,Map * map)1070 static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
1071 STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0);
1072 ASSERT(object->map() == map);
1073 InstanceType type = map->instance_type();
1074 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false;
1075 ASSERT(object->IsString() && !object->IsSymbol());
1076 return ConsString::cast(object)->unchecked_second() == Heap::empty_string();
1077 }
1078
1079
ScavengeObjectSlow(HeapObject ** p,HeapObject * object)1080 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1081 ASSERT(InFromSpace(object));
1082 MapWord first_word = object->map_word();
1083 ASSERT(!first_word.IsForwardingAddress());
1084
1085 // Optimization: Bypass flattened ConsString objects.
1086 if (IsShortcutCandidate(object, first_word.ToMap())) {
1087 object = HeapObject::cast(ConsString::cast(object)->unchecked_first());
1088 *p = object;
1089 // After patching *p we have to repeat the checks that object is in the
1090 // active semispace of the young generation and not already copied.
1091 if (!InNewSpace(object)) return;
1092 first_word = object->map_word();
1093 if (first_word.IsForwardingAddress()) {
1094 *p = first_word.ToForwardingAddress();
1095 return;
1096 }
1097 }
1098
1099 int object_size = object->SizeFromMap(first_word.ToMap());
1100 // We rely on live objects in new space to be at least two pointers,
1101 // so we can store the from-space address and map pointer of promoted
1102 // objects in the to space.
1103 ASSERT(object_size >= 2 * kPointerSize);
1104
1105 // If the object should be promoted, we try to copy it to old space.
1106 if (ShouldBePromoted(object->address(), object_size)) {
1107 Object* result;
1108 if (object_size > MaxObjectSizeInPagedSpace()) {
1109 result = lo_space_->AllocateRawFixedArray(object_size);
1110 if (!result->IsFailure()) {
1111 // Save the from-space object pointer and its map pointer at the
1112 // top of the to space to be swept and copied later. Write the
1113 // forwarding address over the map word of the from-space
1114 // object.
1115 HeapObject* target = HeapObject::cast(result);
1116 promotion_queue.insert(object, first_word.ToMap());
1117 object->set_map_word(MapWord::FromForwardingAddress(target));
1118
1119 // Give the space allocated for the result a proper map by
1120 // treating it as a free list node (not linked into the free
1121 // list).
1122 FreeListNode* node = FreeListNode::FromAddress(target->address());
1123 node->set_size(object_size);
1124
1125 *p = target;
1126 return;
1127 }
1128 } else {
1129 OldSpace* target_space = Heap::TargetSpace(object);
1130 ASSERT(target_space == Heap::old_pointer_space_ ||
1131 target_space == Heap::old_data_space_);
1132 result = target_space->AllocateRaw(object_size);
1133 if (!result->IsFailure()) {
1134 HeapObject* target = HeapObject::cast(result);
1135 if (target_space == Heap::old_pointer_space_) {
1136 // Save the from-space object pointer and its map pointer at the
1137 // top of the to space to be swept and copied later. Write the
1138 // forwarding address over the map word of the from-space
1139 // object.
1140 promotion_queue.insert(object, first_word.ToMap());
1141 object->set_map_word(MapWord::FromForwardingAddress(target));
1142
1143 // Give the space allocated for the result a proper map by
1144 // treating it as a free list node (not linked into the free
1145 // list).
1146 FreeListNode* node = FreeListNode::FromAddress(target->address());
1147 node->set_size(object_size);
1148
1149 *p = target;
1150 } else {
1151 // Objects promoted to the data space can be copied immediately
1152 // and not revisited---we will never sweep that space for
1153 // pointers and the copied objects do not contain pointers to
1154 // new space objects.
1155 *p = MigrateObject(object, target, object_size);
1156 #ifdef DEBUG
1157 VerifyNonPointerSpacePointersVisitor v;
1158 (*p)->Iterate(&v);
1159 #endif
1160 }
1161 return;
1162 }
1163 }
1164 }
1165 // The object should remain in new space or the old space allocation failed.
1166 Object* result = new_space_.AllocateRaw(object_size);
1167 // Failed allocation at this point is utterly unexpected.
1168 ASSERT(!result->IsFailure());
1169 *p = MigrateObject(object, HeapObject::cast(result), object_size);
1170 }
1171
1172
ScavengePointer(HeapObject ** p)1173 void Heap::ScavengePointer(HeapObject** p) {
1174 ScavengeObject(p, *p);
1175 }
1176
1177
AllocatePartialMap(InstanceType instance_type,int instance_size)1178 Object* Heap::AllocatePartialMap(InstanceType instance_type,
1179 int instance_size) {
1180 Object* result = AllocateRawMap();
1181 if (result->IsFailure()) return result;
1182
1183 // Map::cast cannot be used due to uninitialized map field.
1184 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1185 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1186 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1187 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1188 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1189 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1190 reinterpret_cast<Map*>(result)->set_bit_field(0);
1191 reinterpret_cast<Map*>(result)->set_bit_field2(0);
1192 return result;
1193 }
1194
1195
AllocateMap(InstanceType instance_type,int instance_size)1196 Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1197 Object* result = AllocateRawMap();
1198 if (result->IsFailure()) return result;
1199
1200 Map* map = reinterpret_cast<Map*>(result);
1201 map->set_map(meta_map());
1202 map->set_instance_type(instance_type);
1203 map->set_prototype(null_value());
1204 map->set_constructor(null_value());
1205 map->set_instance_size(instance_size);
1206 map->set_inobject_properties(0);
1207 map->set_pre_allocated_property_fields(0);
1208 map->set_instance_descriptors(empty_descriptor_array());
1209 map->set_code_cache(empty_fixed_array());
1210 map->set_unused_property_fields(0);
1211 map->set_bit_field(0);
1212 map->set_bit_field2(1 << Map::kIsExtensible);
1213
1214 // If the map object is aligned fill the padding area with Smi 0 objects.
1215 if (Map::kPadStart < Map::kSize) {
1216 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1217 0,
1218 Map::kSize - Map::kPadStart);
1219 }
1220 return map;
1221 }
1222
1223
1224 const Heap::StringTypeTable Heap::string_type_table[] = {
1225 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1226 {type, size, k##camel_name##MapRootIndex},
1227 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1228 #undef STRING_TYPE_ELEMENT
1229 };
1230
1231
1232 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1233 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1234 {contents, k##name##RootIndex},
1235 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1236 #undef CONSTANT_SYMBOL_ELEMENT
1237 };
1238
1239
1240 const Heap::StructTable Heap::struct_table[] = {
1241 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1242 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1243 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1244 #undef STRUCT_TABLE_ELEMENT
1245 };
1246
1247
CreateInitialMaps()1248 bool Heap::CreateInitialMaps() {
1249 Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1250 if (obj->IsFailure()) return false;
1251 // Map::cast cannot be used due to uninitialized map field.
1252 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1253 set_meta_map(new_meta_map);
1254 new_meta_map->set_map(new_meta_map);
1255
1256 obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
1257 if (obj->IsFailure()) return false;
1258 set_fixed_array_map(Map::cast(obj));
1259
1260 obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1261 if (obj->IsFailure()) return false;
1262 set_oddball_map(Map::cast(obj));
1263
1264 // Allocate the empty array
1265 obj = AllocateEmptyFixedArray();
1266 if (obj->IsFailure()) return false;
1267 set_empty_fixed_array(FixedArray::cast(obj));
1268
1269 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1270 if (obj->IsFailure()) return false;
1271 set_null_value(obj);
1272
1273 // Allocate the empty descriptor array.
1274 obj = AllocateEmptyFixedArray();
1275 if (obj->IsFailure()) return false;
1276 set_empty_descriptor_array(DescriptorArray::cast(obj));
1277
1278 // Fix the instance_descriptors for the existing maps.
1279 meta_map()->set_instance_descriptors(empty_descriptor_array());
1280 meta_map()->set_code_cache(empty_fixed_array());
1281
1282 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1283 fixed_array_map()->set_code_cache(empty_fixed_array());
1284
1285 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1286 oddball_map()->set_code_cache(empty_fixed_array());
1287
1288 // Fix prototype object for existing maps.
1289 meta_map()->set_prototype(null_value());
1290 meta_map()->set_constructor(null_value());
1291
1292 fixed_array_map()->set_prototype(null_value());
1293 fixed_array_map()->set_constructor(null_value());
1294
1295 oddball_map()->set_prototype(null_value());
1296 oddball_map()->set_constructor(null_value());
1297
1298 obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1299 if (obj->IsFailure()) return false;
1300 set_heap_number_map(Map::cast(obj));
1301
1302 obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1303 if (obj->IsFailure()) return false;
1304 set_proxy_map(Map::cast(obj));
1305
1306 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1307 const StringTypeTable& entry = string_type_table[i];
1308 obj = AllocateMap(entry.type, entry.size);
1309 if (obj->IsFailure()) return false;
1310 roots_[entry.index] = Map::cast(obj);
1311 }
1312
1313 obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
1314 if (obj->IsFailure()) return false;
1315 set_undetectable_string_map(Map::cast(obj));
1316 Map::cast(obj)->set_is_undetectable();
1317
1318 obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
1319 if (obj->IsFailure()) return false;
1320 set_undetectable_ascii_string_map(Map::cast(obj));
1321 Map::cast(obj)->set_is_undetectable();
1322
1323 obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
1324 if (obj->IsFailure()) return false;
1325 set_byte_array_map(Map::cast(obj));
1326
1327 obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1328 if (obj->IsFailure()) return false;
1329 set_pixel_array_map(Map::cast(obj));
1330
1331 obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1332 ExternalArray::kAlignedSize);
1333 if (obj->IsFailure()) return false;
1334 set_external_byte_array_map(Map::cast(obj));
1335
1336 obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1337 ExternalArray::kAlignedSize);
1338 if (obj->IsFailure()) return false;
1339 set_external_unsigned_byte_array_map(Map::cast(obj));
1340
1341 obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1342 ExternalArray::kAlignedSize);
1343 if (obj->IsFailure()) return false;
1344 set_external_short_array_map(Map::cast(obj));
1345
1346 obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1347 ExternalArray::kAlignedSize);
1348 if (obj->IsFailure()) return false;
1349 set_external_unsigned_short_array_map(Map::cast(obj));
1350
1351 obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1352 ExternalArray::kAlignedSize);
1353 if (obj->IsFailure()) return false;
1354 set_external_int_array_map(Map::cast(obj));
1355
1356 obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1357 ExternalArray::kAlignedSize);
1358 if (obj->IsFailure()) return false;
1359 set_external_unsigned_int_array_map(Map::cast(obj));
1360
1361 obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1362 ExternalArray::kAlignedSize);
1363 if (obj->IsFailure()) return false;
1364 set_external_float_array_map(Map::cast(obj));
1365
1366 obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
1367 if (obj->IsFailure()) return false;
1368 set_code_map(Map::cast(obj));
1369
1370 obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1371 JSGlobalPropertyCell::kSize);
1372 if (obj->IsFailure()) return false;
1373 set_global_property_cell_map(Map::cast(obj));
1374
1375 obj = AllocateMap(FILLER_TYPE, kPointerSize);
1376 if (obj->IsFailure()) return false;
1377 set_one_pointer_filler_map(Map::cast(obj));
1378
1379 obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1380 if (obj->IsFailure()) return false;
1381 set_two_pointer_filler_map(Map::cast(obj));
1382
1383 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1384 const StructTable& entry = struct_table[i];
1385 obj = AllocateMap(entry.type, entry.size);
1386 if (obj->IsFailure()) return false;
1387 roots_[entry.index] = Map::cast(obj);
1388 }
1389
1390 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1391 if (obj->IsFailure()) return false;
1392 set_hash_table_map(Map::cast(obj));
1393
1394 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1395 if (obj->IsFailure()) return false;
1396 set_context_map(Map::cast(obj));
1397
1398 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1399 if (obj->IsFailure()) return false;
1400 set_catch_context_map(Map::cast(obj));
1401
1402 obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
1403 if (obj->IsFailure()) return false;
1404 set_global_context_map(Map::cast(obj));
1405
1406 obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
1407 if (obj->IsFailure()) return false;
1408 set_boilerplate_function_map(Map::cast(obj));
1409
1410 obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
1411 if (obj->IsFailure()) return false;
1412 set_shared_function_info_map(Map::cast(obj));
1413
1414 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1415 return true;
1416 }
1417
1418
AllocateHeapNumber(double value,PretenureFlag pretenure)1419 Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1420 // Statically ensure that it is safe to allocate heap numbers in paged
1421 // spaces.
1422 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1423 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1424
1425 Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1426 if (result->IsFailure()) return result;
1427
1428 HeapObject::cast(result)->set_map(heap_number_map());
1429 HeapNumber::cast(result)->set_value(value);
1430 return result;
1431 }
1432
1433
AllocateHeapNumber(double value)1434 Object* Heap::AllocateHeapNumber(double value) {
1435 // Use general version, if we're forced to always allocate.
1436 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1437
1438 // This version of AllocateHeapNumber is optimized for
1439 // allocation in new space.
1440 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1441 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1442 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
1443 if (result->IsFailure()) return result;
1444 HeapObject::cast(result)->set_map(heap_number_map());
1445 HeapNumber::cast(result)->set_value(value);
1446 return result;
1447 }
1448
1449
AllocateJSGlobalPropertyCell(Object * value)1450 Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1451 Object* result = AllocateRawCell();
1452 if (result->IsFailure()) return result;
1453 HeapObject::cast(result)->set_map(global_property_cell_map());
1454 JSGlobalPropertyCell::cast(result)->set_value(value);
1455 return result;
1456 }
1457
1458
CreateOddball(Map * map,const char * to_string,Object * to_number)1459 Object* Heap::CreateOddball(Map* map,
1460 const char* to_string,
1461 Object* to_number) {
1462 Object* result = Allocate(map, OLD_DATA_SPACE);
1463 if (result->IsFailure()) return result;
1464 return Oddball::cast(result)->Initialize(to_string, to_number);
1465 }
1466
1467
CreateApiObjects()1468 bool Heap::CreateApiObjects() {
1469 Object* obj;
1470
1471 obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1472 if (obj->IsFailure()) return false;
1473 set_neander_map(Map::cast(obj));
1474
1475 obj = Heap::AllocateJSObjectFromMap(neander_map());
1476 if (obj->IsFailure()) return false;
1477 Object* elements = AllocateFixedArray(2);
1478 if (elements->IsFailure()) return false;
1479 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1480 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1481 set_message_listeners(JSObject::cast(obj));
1482
1483 return true;
1484 }
1485
1486
CreateCEntryStub()1487 void Heap::CreateCEntryStub() {
1488 CEntryStub stub(1);
1489 set_c_entry_code(*stub.GetCode());
1490 }
1491
1492
1493 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
CreateRegExpCEntryStub()1494 void Heap::CreateRegExpCEntryStub() {
1495 RegExpCEntryStub stub;
1496 set_re_c_entry_code(*stub.GetCode());
1497 }
1498 #endif
1499
1500
CreateJSEntryStub()1501 void Heap::CreateJSEntryStub() {
1502 JSEntryStub stub;
1503 set_js_entry_code(*stub.GetCode());
1504 }
1505
1506
CreateJSConstructEntryStub()1507 void Heap::CreateJSConstructEntryStub() {
1508 JSConstructEntryStub stub;
1509 set_js_construct_entry_code(*stub.GetCode());
1510 }
1511
1512
CreateFixedStubs()1513 void Heap::CreateFixedStubs() {
1514 // Here we create roots for fixed stubs. They are needed at GC
1515 // for cooking and uncooking (check out frames.cc).
1516 // The eliminates the need for doing dictionary lookup in the
1517 // stub cache for these stubs.
1518 HandleScope scope;
1519 // gcc-4.4 has problem generating correct code of following snippet:
1520 // { CEntryStub stub;
1521 // c_entry_code_ = *stub.GetCode();
1522 // }
1523 // { DebuggerStatementStub stub;
1524 // debugger_statement_code_ = *stub.GetCode();
1525 // }
1526 // To workaround the problem, make separate functions without inlining.
1527 Heap::CreateCEntryStub();
1528 Heap::CreateJSEntryStub();
1529 Heap::CreateJSConstructEntryStub();
1530 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
1531 Heap::CreateRegExpCEntryStub();
1532 #endif
1533 }
1534
1535
CreateInitialObjects()1536 bool Heap::CreateInitialObjects() {
1537 Object* obj;
1538
1539 // The -0 value must be set before NumberFromDouble works.
1540 obj = AllocateHeapNumber(-0.0, TENURED);
1541 if (obj->IsFailure()) return false;
1542 set_minus_zero_value(obj);
1543 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1544
1545 obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1546 if (obj->IsFailure()) return false;
1547 set_nan_value(obj);
1548
1549 obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1550 if (obj->IsFailure()) return false;
1551 set_undefined_value(obj);
1552 ASSERT(!InNewSpace(undefined_value()));
1553
1554 // Allocate initial symbol table.
1555 obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1556 if (obj->IsFailure()) return false;
1557 // Don't use set_symbol_table() due to asserts.
1558 roots_[kSymbolTableRootIndex] = obj;
1559
1560 // Assign the print strings for oddballs after creating symboltable.
1561 Object* symbol = LookupAsciiSymbol("undefined");
1562 if (symbol->IsFailure()) return false;
1563 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1564 Oddball::cast(undefined_value())->set_to_number(nan_value());
1565
1566 // Assign the print strings for oddballs after creating symboltable.
1567 symbol = LookupAsciiSymbol("null");
1568 if (symbol->IsFailure()) return false;
1569 Oddball::cast(null_value())->set_to_string(String::cast(symbol));
1570 Oddball::cast(null_value())->set_to_number(Smi::FromInt(0));
1571
1572 // Allocate the null_value
1573 obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1574 if (obj->IsFailure()) return false;
1575
1576 obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
1577 if (obj->IsFailure()) return false;
1578 set_true_value(obj);
1579
1580 obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
1581 if (obj->IsFailure()) return false;
1582 set_false_value(obj);
1583
1584 obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
1585 if (obj->IsFailure()) return false;
1586 set_the_hole_value(obj);
1587
1588 obj = CreateOddball(
1589 oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2));
1590 if (obj->IsFailure()) return false;
1591 set_no_interceptor_result_sentinel(obj);
1592
1593 obj = CreateOddball(oddball_map(), "termination_exception", Smi::FromInt(-3));
1594 if (obj->IsFailure()) return false;
1595 set_termination_exception(obj);
1596
1597 // Allocate the empty string.
1598 obj = AllocateRawAsciiString(0, TENURED);
1599 if (obj->IsFailure()) return false;
1600 set_empty_string(String::cast(obj));
1601
1602 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
1603 obj = LookupAsciiSymbol(constant_symbol_table[i].contents);
1604 if (obj->IsFailure()) return false;
1605 roots_[constant_symbol_table[i].index] = String::cast(obj);
1606 }
1607
1608 // Allocate the hidden symbol which is used to identify the hidden properties
1609 // in JSObjects. The hash code has a special value so that it will not match
1610 // the empty string when searching for the property. It cannot be part of the
1611 // loop above because it needs to be allocated manually with the special
1612 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1613 // that it will always be at the first entry in property descriptors.
1614 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
1615 if (obj->IsFailure()) return false;
1616 hidden_symbol_ = String::cast(obj);
1617
1618 // Allocate the proxy for __proto__.
1619 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1620 if (obj->IsFailure()) return false;
1621 set_prototype_accessors(Proxy::cast(obj));
1622
1623 // Allocate the code_stubs dictionary. The initial size is set to avoid
1624 // expanding the dictionary during bootstrapping.
1625 obj = NumberDictionary::Allocate(128);
1626 if (obj->IsFailure()) return false;
1627 set_code_stubs(NumberDictionary::cast(obj));
1628
1629 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
1630 // is set to avoid expanding the dictionary during bootstrapping.
1631 obj = NumberDictionary::Allocate(64);
1632 if (obj->IsFailure()) return false;
1633 set_non_monomorphic_cache(NumberDictionary::cast(obj));
1634
1635 CreateFixedStubs();
1636
1637 if (InitializeNumberStringCache()->IsFailure()) return false;
1638
1639 // Allocate cache for single character strings.
1640 obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
1641 if (obj->IsFailure()) return false;
1642 set_single_character_string_cache(FixedArray::cast(obj));
1643
1644 // Allocate cache for external strings pointing to native source code.
1645 obj = AllocateFixedArray(Natives::GetBuiltinsCount());
1646 if (obj->IsFailure()) return false;
1647 set_natives_source_cache(FixedArray::cast(obj));
1648
1649 // Handling of script id generation is in Factory::NewScript.
1650 set_last_script_id(undefined_value());
1651
1652 // Initialize keyed lookup cache.
1653 KeyedLookupCache::Clear();
1654
1655 // Initialize context slot cache.
1656 ContextSlotCache::Clear();
1657
1658 // Initialize descriptor cache.
1659 DescriptorLookupCache::Clear();
1660
1661 // Initialize compilation cache.
1662 CompilationCache::Clear();
1663
1664 return true;
1665 }
1666
1667
InitializeNumberStringCache()1668 Object* Heap::InitializeNumberStringCache() {
1669 // Compute the size of the number string cache based on the max heap size.
1670 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
1671 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
1672 int number_string_cache_size = max_semispace_size_ / 512;
1673 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
1674 Object* obj = AllocateFixedArray(number_string_cache_size * 2);
1675 if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
1676 return obj;
1677 }
1678
1679
FlushNumberStringCache()1680 void Heap::FlushNumberStringCache() {
1681 // Flush the number to string cache.
1682 int len = number_string_cache()->length();
1683 for (int i = 0; i < len; i++) {
1684 number_string_cache()->set_undefined(i);
1685 }
1686 }
1687
1688
double_get_hash(double d)1689 static inline int double_get_hash(double d) {
1690 DoubleRepresentation rep(d);
1691 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
1692 }
1693
1694
smi_get_hash(Smi * smi)1695 static inline int smi_get_hash(Smi* smi) {
1696 return smi->value();
1697 }
1698
1699
GetNumberStringCache(Object * number)1700 Object* Heap::GetNumberStringCache(Object* number) {
1701 int hash;
1702 int mask = (number_string_cache()->length() >> 1) - 1;
1703 if (number->IsSmi()) {
1704 hash = smi_get_hash(Smi::cast(number)) & mask;
1705 } else {
1706 hash = double_get_hash(number->Number()) & mask;
1707 }
1708 Object* key = number_string_cache()->get(hash * 2);
1709 if (key == number) {
1710 return String::cast(number_string_cache()->get(hash * 2 + 1));
1711 } else if (key->IsHeapNumber() &&
1712 number->IsHeapNumber() &&
1713 key->Number() == number->Number()) {
1714 return String::cast(number_string_cache()->get(hash * 2 + 1));
1715 }
1716 return undefined_value();
1717 }
1718
1719
SetNumberStringCache(Object * number,String * string)1720 void Heap::SetNumberStringCache(Object* number, String* string) {
1721 int hash;
1722 int mask = (number_string_cache()->length() >> 1) - 1;
1723 if (number->IsSmi()) {
1724 hash = smi_get_hash(Smi::cast(number)) & mask;
1725 number_string_cache()->set(hash * 2, Smi::cast(number));
1726 } else {
1727 hash = double_get_hash(number->Number()) & mask;
1728 number_string_cache()->set(hash * 2, number);
1729 }
1730 number_string_cache()->set(hash * 2 + 1, string);
1731 }
1732
1733
SmiOrNumberFromDouble(double value,bool new_object,PretenureFlag pretenure)1734 Object* Heap::SmiOrNumberFromDouble(double value,
1735 bool new_object,
1736 PretenureFlag pretenure) {
1737 // We need to distinguish the minus zero value and this cannot be
1738 // done after conversion to int. Doing this by comparing bit
1739 // patterns is faster than using fpclassify() et al.
1740 static const DoubleRepresentation plus_zero(0.0);
1741 static const DoubleRepresentation minus_zero(-0.0);
1742 static const DoubleRepresentation nan(OS::nan_value());
1743 ASSERT(minus_zero_value() != NULL);
1744 ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
1745
1746 DoubleRepresentation rep(value);
1747 if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
1748 if (rep.bits == minus_zero.bits) {
1749 return new_object ? AllocateHeapNumber(-0.0, pretenure)
1750 : minus_zero_value();
1751 }
1752 if (rep.bits == nan.bits) {
1753 return new_object
1754 ? AllocateHeapNumber(OS::nan_value(), pretenure)
1755 : nan_value();
1756 }
1757
1758 // Try to represent the value as a tagged small integer.
1759 int int_value = FastD2I(value);
1760 if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
1761 return Smi::FromInt(int_value);
1762 }
1763
1764 // Materialize the value in the heap.
1765 return AllocateHeapNumber(value, pretenure);
1766 }
1767
1768
NumberToString(Object * number)1769 Object* Heap::NumberToString(Object* number) {
1770 Counters::number_to_string_runtime.Increment();
1771 Object* cached = GetNumberStringCache(number);
1772 if (cached != undefined_value()) {
1773 return cached;
1774 }
1775
1776 char arr[100];
1777 Vector<char> buffer(arr, ARRAY_SIZE(arr));
1778 const char* str;
1779 if (number->IsSmi()) {
1780 int num = Smi::cast(number)->value();
1781 str = IntToCString(num, buffer);
1782 } else {
1783 double num = HeapNumber::cast(number)->value();
1784 str = DoubleToCString(num, buffer);
1785 }
1786 Object* result = AllocateStringFromAscii(CStrVector(str));
1787
1788 if (!result->IsFailure()) {
1789 SetNumberStringCache(number, String::cast(result));
1790 }
1791 return result;
1792 }
1793
1794
MapForExternalArrayType(ExternalArrayType array_type)1795 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
1796 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
1797 }
1798
1799
RootIndexForExternalArrayType(ExternalArrayType array_type)1800 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
1801 ExternalArrayType array_type) {
1802 switch (array_type) {
1803 case kExternalByteArray:
1804 return kExternalByteArrayMapRootIndex;
1805 case kExternalUnsignedByteArray:
1806 return kExternalUnsignedByteArrayMapRootIndex;
1807 case kExternalShortArray:
1808 return kExternalShortArrayMapRootIndex;
1809 case kExternalUnsignedShortArray:
1810 return kExternalUnsignedShortArrayMapRootIndex;
1811 case kExternalIntArray:
1812 return kExternalIntArrayMapRootIndex;
1813 case kExternalUnsignedIntArray:
1814 return kExternalUnsignedIntArrayMapRootIndex;
1815 case kExternalFloatArray:
1816 return kExternalFloatArrayMapRootIndex;
1817 default:
1818 UNREACHABLE();
1819 return kUndefinedValueRootIndex;
1820 }
1821 }
1822
1823
NewNumberFromDouble(double value,PretenureFlag pretenure)1824 Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
1825 return SmiOrNumberFromDouble(value,
1826 true /* number object must be new */,
1827 pretenure);
1828 }
1829
1830
NumberFromDouble(double value,PretenureFlag pretenure)1831 Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
1832 return SmiOrNumberFromDouble(value,
1833 false /* use preallocated NaN, -0.0 */,
1834 pretenure);
1835 }
1836
1837
AllocateProxy(Address proxy,PretenureFlag pretenure)1838 Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
1839 // Statically ensure that it is safe to allocate proxies in paged spaces.
1840 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
1841 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1842 Object* result = Allocate(proxy_map(), space);
1843 if (result->IsFailure()) return result;
1844
1845 Proxy::cast(result)->set_proxy(proxy);
1846 return result;
1847 }
1848
1849
AllocateSharedFunctionInfo(Object * name)1850 Object* Heap::AllocateSharedFunctionInfo(Object* name) {
1851 Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
1852 if (result->IsFailure()) return result;
1853
1854 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
1855 share->set_name(name);
1856 Code* illegal = Builtins::builtin(Builtins::Illegal);
1857 share->set_code(illegal);
1858 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
1859 share->set_construct_stub(construct_stub);
1860 share->set_expected_nof_properties(0);
1861 share->set_length(0);
1862 share->set_formal_parameter_count(0);
1863 share->set_instance_class_name(Object_symbol());
1864 share->set_function_data(undefined_value());
1865 share->set_script(undefined_value());
1866 share->set_start_position_and_type(0);
1867 share->set_debug_info(undefined_value());
1868 share->set_inferred_name(empty_string());
1869 share->set_compiler_hints(0);
1870 share->set_this_property_assignments_count(0);
1871 share->set_this_property_assignments(undefined_value());
1872 return result;
1873 }
1874
1875
1876 // Returns true for a character in a range. Both limits are inclusive.
Between(uint32_t character,uint32_t from,uint32_t to)1877 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
1878 // This makes uses of the the unsigned wraparound.
1879 return character - from <= to - from;
1880 }
1881
1882
MakeOrFindTwoCharacterString(uint32_t c1,uint32_t c2)1883 static inline Object* MakeOrFindTwoCharacterString(uint32_t c1, uint32_t c2) {
1884 String* symbol;
1885 // Numeric strings have a different hash algorithm not known by
1886 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
1887 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
1888 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
1889 return symbol;
1890 // Now we know the length is 2, we might as well make use of that fact
1891 // when building the new string.
1892 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
1893 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
1894 Object* result = Heap::AllocateRawAsciiString(2);
1895 if (result->IsFailure()) return result;
1896 char* dest = SeqAsciiString::cast(result)->GetChars();
1897 dest[0] = c1;
1898 dest[1] = c2;
1899 return result;
1900 } else {
1901 Object* result = Heap::AllocateRawTwoByteString(2);
1902 if (result->IsFailure()) return result;
1903 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
1904 dest[0] = c1;
1905 dest[1] = c2;
1906 return result;
1907 }
1908 }
1909
1910
AllocateConsString(String * first,String * second)1911 Object* Heap::AllocateConsString(String* first, String* second) {
1912 int first_length = first->length();
1913 if (first_length == 0) {
1914 return second;
1915 }
1916
1917 int second_length = second->length();
1918 if (second_length == 0) {
1919 return first;
1920 }
1921
1922 int length = first_length + second_length;
1923
1924 // Optimization for 2-byte strings often used as keys in a decompression
1925 // dictionary. Check whether we already have the string in the symbol
1926 // table to prevent creation of many unneccesary strings.
1927 if (length == 2) {
1928 unsigned c1 = first->Get(0);
1929 unsigned c2 = second->Get(0);
1930 return MakeOrFindTwoCharacterString(c1, c2);
1931 }
1932
1933 bool is_ascii = first->IsAsciiRepresentation()
1934 && second->IsAsciiRepresentation();
1935
1936 // Make sure that an out of memory exception is thrown if the length
1937 // of the new cons string is too large.
1938 if (length > String::kMaxLength || length < 0) {
1939 Top::context()->mark_out_of_memory();
1940 return Failure::OutOfMemoryException();
1941 }
1942
1943 // If the resulting string is small make a flat string.
1944 if (length < String::kMinNonFlatLength) {
1945 ASSERT(first->IsFlat());
1946 ASSERT(second->IsFlat());
1947 if (is_ascii) {
1948 Object* result = AllocateRawAsciiString(length);
1949 if (result->IsFailure()) return result;
1950 // Copy the characters into the new object.
1951 char* dest = SeqAsciiString::cast(result)->GetChars();
1952 // Copy first part.
1953 const char* src;
1954 if (first->IsExternalString()) {
1955 src = ExternalAsciiString::cast(first)->resource()->data();
1956 } else {
1957 src = SeqAsciiString::cast(first)->GetChars();
1958 }
1959 for (int i = 0; i < first_length; i++) *dest++ = src[i];
1960 // Copy second part.
1961 if (second->IsExternalString()) {
1962 src = ExternalAsciiString::cast(second)->resource()->data();
1963 } else {
1964 src = SeqAsciiString::cast(second)->GetChars();
1965 }
1966 for (int i = 0; i < second_length; i++) *dest++ = src[i];
1967 return result;
1968 } else {
1969 Object* result = AllocateRawTwoByteString(length);
1970 if (result->IsFailure()) return result;
1971 // Copy the characters into the new object.
1972 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
1973 String::WriteToFlat(first, dest, 0, first_length);
1974 String::WriteToFlat(second, dest + first_length, 0, second_length);
1975 return result;
1976 }
1977 }
1978
1979 Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
1980
1981 Object* result = Allocate(map, NEW_SPACE);
1982 if (result->IsFailure()) return result;
1983
1984 AssertNoAllocation no_gc;
1985 ConsString* cons_string = ConsString::cast(result);
1986 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
1987 cons_string->set_length(length);
1988 cons_string->set_hash_field(String::kEmptyHashField);
1989 cons_string->set_first(first, mode);
1990 cons_string->set_second(second, mode);
1991 return result;
1992 }
1993
1994
AllocateSubString(String * buffer,int start,int end)1995 Object* Heap::AllocateSubString(String* buffer,
1996 int start,
1997 int end) {
1998 int length = end - start;
1999
2000 if (length == 1) {
2001 return Heap::LookupSingleCharacterStringFromCode(
2002 buffer->Get(start));
2003 } else if (length == 2) {
2004 // Optimization for 2-byte strings often used as keys in a decompression
2005 // dictionary. Check whether we already have the string in the symbol
2006 // table to prevent creation of many unneccesary strings.
2007 unsigned c1 = buffer->Get(start);
2008 unsigned c2 = buffer->Get(start + 1);
2009 return MakeOrFindTwoCharacterString(c1, c2);
2010 }
2011
2012 // Make an attempt to flatten the buffer to reduce access time.
2013 if (!buffer->IsFlat()) {
2014 buffer->TryFlatten();
2015 }
2016
2017 Object* result = buffer->IsAsciiRepresentation()
2018 ? AllocateRawAsciiString(length)
2019 : AllocateRawTwoByteString(length);
2020 if (result->IsFailure()) return result;
2021 String* string_result = String::cast(result);
2022
2023 // Copy the characters into the new object.
2024 if (buffer->IsAsciiRepresentation()) {
2025 ASSERT(string_result->IsAsciiRepresentation());
2026 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2027 String::WriteToFlat(buffer, dest, start, end);
2028 } else {
2029 ASSERT(string_result->IsTwoByteRepresentation());
2030 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2031 String::WriteToFlat(buffer, dest, start, end);
2032 }
2033
2034 return result;
2035 }
2036
2037
AllocateExternalStringFromAscii(ExternalAsciiString::Resource * resource)2038 Object* Heap::AllocateExternalStringFromAscii(
2039 ExternalAsciiString::Resource* resource) {
2040 size_t length = resource->length();
2041 if (length > static_cast<size_t>(String::kMaxLength)) {
2042 Top::context()->mark_out_of_memory();
2043 return Failure::OutOfMemoryException();
2044 }
2045
2046 Map* map = external_ascii_string_map();
2047 Object* result = Allocate(map, NEW_SPACE);
2048 if (result->IsFailure()) return result;
2049
2050 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
2051 external_string->set_length(static_cast<int>(length));
2052 external_string->set_hash_field(String::kEmptyHashField);
2053 external_string->set_resource(resource);
2054
2055 return result;
2056 }
2057
2058
AllocateExternalStringFromTwoByte(ExternalTwoByteString::Resource * resource)2059 Object* Heap::AllocateExternalStringFromTwoByte(
2060 ExternalTwoByteString::Resource* resource) {
2061 size_t length = resource->length();
2062 if (length > static_cast<size_t>(String::kMaxLength)) {
2063 Top::context()->mark_out_of_memory();
2064 return Failure::OutOfMemoryException();
2065 }
2066
2067 Map* map = Heap::external_string_map();
2068 Object* result = Allocate(map, NEW_SPACE);
2069 if (result->IsFailure()) return result;
2070
2071 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
2072 external_string->set_length(static_cast<int>(length));
2073 external_string->set_hash_field(String::kEmptyHashField);
2074 external_string->set_resource(resource);
2075
2076 return result;
2077 }
2078
2079
LookupSingleCharacterStringFromCode(uint16_t code)2080 Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
2081 if (code <= String::kMaxAsciiCharCode) {
2082 Object* value = Heap::single_character_string_cache()->get(code);
2083 if (value != Heap::undefined_value()) return value;
2084
2085 char buffer[1];
2086 buffer[0] = static_cast<char>(code);
2087 Object* result = LookupSymbol(Vector<const char>(buffer, 1));
2088
2089 if (result->IsFailure()) return result;
2090 Heap::single_character_string_cache()->set(code, result);
2091 return result;
2092 }
2093
2094 Object* result = Heap::AllocateRawTwoByteString(1);
2095 if (result->IsFailure()) return result;
2096 String* answer = String::cast(result);
2097 answer->Set(0, code);
2098 return answer;
2099 }
2100
2101
AllocateByteArray(int length,PretenureFlag pretenure)2102 Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
2103 if (length < 0 || length > ByteArray::kMaxLength) {
2104 return Failure::OutOfMemoryException();
2105 }
2106 if (pretenure == NOT_TENURED) {
2107 return AllocateByteArray(length);
2108 }
2109 int size = ByteArray::SizeFor(length);
2110 Object* result = (size <= MaxObjectSizeInPagedSpace())
2111 ? old_data_space_->AllocateRaw(size)
2112 : lo_space_->AllocateRaw(size);
2113 if (result->IsFailure()) return result;
2114
2115 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
2116 reinterpret_cast<Array*>(result)->set_length(length);
2117 return result;
2118 }
2119
2120
AllocateByteArray(int length)2121 Object* Heap::AllocateByteArray(int length) {
2122 if (length < 0 || length > ByteArray::kMaxLength) {
2123 return Failure::OutOfMemoryException();
2124 }
2125 int size = ByteArray::SizeFor(length);
2126 AllocationSpace space =
2127 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
2128 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
2129 if (result->IsFailure()) return result;
2130
2131 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
2132 reinterpret_cast<Array*>(result)->set_length(length);
2133 return result;
2134 }
2135
2136
CreateFillerObjectAt(Address addr,int size)2137 void Heap::CreateFillerObjectAt(Address addr, int size) {
2138 if (size == 0) return;
2139 HeapObject* filler = HeapObject::FromAddress(addr);
2140 if (size == kPointerSize) {
2141 filler->set_map(Heap::one_pointer_filler_map());
2142 } else {
2143 filler->set_map(Heap::byte_array_map());
2144 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2145 }
2146 }
2147
2148
AllocatePixelArray(int length,uint8_t * external_pointer,PretenureFlag pretenure)2149 Object* Heap::AllocatePixelArray(int length,
2150 uint8_t* external_pointer,
2151 PretenureFlag pretenure) {
2152 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2153 Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
2154 if (result->IsFailure()) return result;
2155
2156 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2157 reinterpret_cast<PixelArray*>(result)->set_length(length);
2158 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2159
2160 return result;
2161 }
2162
2163
AllocateExternalArray(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)2164 Object* Heap::AllocateExternalArray(int length,
2165 ExternalArrayType array_type,
2166 void* external_pointer,
2167 PretenureFlag pretenure) {
2168 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2169 Object* result = AllocateRaw(ExternalArray::kAlignedSize,
2170 space,
2171 OLD_DATA_SPACE);
2172 if (result->IsFailure()) return result;
2173
2174 reinterpret_cast<ExternalArray*>(result)->set_map(
2175 MapForExternalArrayType(array_type));
2176 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2177 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2178 external_pointer);
2179
2180 return result;
2181 }
2182
2183
CreateCode(const CodeDesc & desc,ZoneScopeInfo * sinfo,Code::Flags flags,Handle<Object> self_reference)2184 Object* Heap::CreateCode(const CodeDesc& desc,
2185 ZoneScopeInfo* sinfo,
2186 Code::Flags flags,
2187 Handle<Object> self_reference) {
2188 // Compute size
2189 int body_size = RoundUp(desc.instr_size + desc.reloc_size, kObjectAlignment);
2190 int sinfo_size = 0;
2191 if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
2192 int obj_size = Code::SizeFor(body_size, sinfo_size);
2193 ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
2194 Object* result;
2195 if (obj_size > MaxObjectSizeInPagedSpace()) {
2196 result = lo_space_->AllocateRawCode(obj_size);
2197 } else {
2198 result = code_space_->AllocateRaw(obj_size);
2199 }
2200
2201 if (result->IsFailure()) return result;
2202
2203 // Initialize the object
2204 HeapObject::cast(result)->set_map(code_map());
2205 Code* code = Code::cast(result);
2206 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2207 code->set_instruction_size(desc.instr_size);
2208 code->set_relocation_size(desc.reloc_size);
2209 code->set_sinfo_size(sinfo_size);
2210 code->set_flags(flags);
2211 // Allow self references to created code object by patching the handle to
2212 // point to the newly allocated Code object.
2213 if (!self_reference.is_null()) {
2214 *(self_reference.location()) = code;
2215 }
2216 // Migrate generated code.
2217 // The generated code can contain Object** values (typically from handles)
2218 // that are dereferenced during the copy to point directly to the actual heap
2219 // objects. These pointers can include references to the code object itself,
2220 // through the self_reference parameter.
2221 code->CopyFrom(desc);
2222 if (sinfo != NULL) sinfo->Serialize(code); // write scope info
2223
2224 #ifdef DEBUG
2225 code->Verify();
2226 #endif
2227 return code;
2228 }
2229
2230
CopyCode(Code * code)2231 Object* Heap::CopyCode(Code* code) {
2232 // Allocate an object the same size as the code object.
2233 int obj_size = code->Size();
2234 Object* result;
2235 if (obj_size > MaxObjectSizeInPagedSpace()) {
2236 result = lo_space_->AllocateRawCode(obj_size);
2237 } else {
2238 result = code_space_->AllocateRaw(obj_size);
2239 }
2240
2241 if (result->IsFailure()) return result;
2242
2243 // Copy code object.
2244 Address old_addr = code->address();
2245 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2246 CopyBlock(reinterpret_cast<Object**>(new_addr),
2247 reinterpret_cast<Object**>(old_addr),
2248 obj_size);
2249 // Relocate the copy.
2250 Code* new_code = Code::cast(result);
2251 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2252 new_code->Relocate(new_addr - old_addr);
2253 return new_code;
2254 }
2255
2256
Allocate(Map * map,AllocationSpace space)2257 Object* Heap::Allocate(Map* map, AllocationSpace space) {
2258 ASSERT(gc_state_ == NOT_IN_GC);
2259 ASSERT(map->instance_type() != MAP_TYPE);
2260 // If allocation failures are disallowed, we may allocate in a different
2261 // space when new space is full and the object is not a large object.
2262 AllocationSpace retry_space =
2263 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
2264 Object* result =
2265 AllocateRaw(map->instance_size(), space, retry_space);
2266 if (result->IsFailure()) return result;
2267 HeapObject::cast(result)->set_map(map);
2268 #ifdef ENABLE_LOGGING_AND_PROFILING
2269 ProducerHeapProfile::RecordJSObjectAllocation(result);
2270 #endif
2271 return result;
2272 }
2273
2274
InitializeFunction(JSFunction * function,SharedFunctionInfo * shared,Object * prototype)2275 Object* Heap::InitializeFunction(JSFunction* function,
2276 SharedFunctionInfo* shared,
2277 Object* prototype) {
2278 ASSERT(!prototype->IsMap());
2279 function->initialize_properties();
2280 function->initialize_elements();
2281 function->set_shared(shared);
2282 function->set_prototype_or_initial_map(prototype);
2283 function->set_context(undefined_value());
2284 function->set_literals(empty_fixed_array());
2285 return function;
2286 }
2287
2288
AllocateFunctionPrototype(JSFunction * function)2289 Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
2290 // Allocate the prototype. Make sure to use the object function
2291 // from the function's context, since the function can be from a
2292 // different context.
2293 JSFunction* object_function =
2294 function->context()->global_context()->object_function();
2295 Object* prototype = AllocateJSObject(object_function);
2296 if (prototype->IsFailure()) return prototype;
2297 // When creating the prototype for the function we must set its
2298 // constructor to the function.
2299 Object* result =
2300 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2301 function,
2302 DONT_ENUM);
2303 if (result->IsFailure()) return result;
2304 return prototype;
2305 }
2306
2307
AllocateFunction(Map * function_map,SharedFunctionInfo * shared,Object * prototype,PretenureFlag pretenure)2308 Object* Heap::AllocateFunction(Map* function_map,
2309 SharedFunctionInfo* shared,
2310 Object* prototype,
2311 PretenureFlag pretenure) {
2312 AllocationSpace space =
2313 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2314 Object* result = Allocate(function_map, space);
2315 if (result->IsFailure()) return result;
2316 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2317 }
2318
2319
AllocateArgumentsObject(Object * callee,int length)2320 Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
2321 // To get fast allocation and map sharing for arguments objects we
2322 // allocate them based on an arguments boilerplate.
2323
2324 // This calls Copy directly rather than using Heap::AllocateRaw so we
2325 // duplicate the check here.
2326 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2327
2328 JSObject* boilerplate =
2329 Top::context()->global_context()->arguments_boilerplate();
2330
2331 // Check that the size of the boilerplate matches our
2332 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2333 // on the size being a known constant.
2334 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2335
2336 // Do the allocation.
2337 Object* result =
2338 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2339 if (result->IsFailure()) return result;
2340
2341 // Copy the content. The arguments boilerplate doesn't have any
2342 // fields that point to new space so it's safe to skip the write
2343 // barrier here.
2344 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
2345 reinterpret_cast<Object**>(boilerplate->address()),
2346 kArgumentsObjectSize);
2347
2348 // Set the two properties.
2349 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2350 callee);
2351 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2352 Smi::FromInt(length),
2353 SKIP_WRITE_BARRIER);
2354
2355 // Check the state of the object
2356 ASSERT(JSObject::cast(result)->HasFastProperties());
2357 ASSERT(JSObject::cast(result)->HasFastElements());
2358
2359 return result;
2360 }
2361
2362
AllocateInitialMap(JSFunction * fun)2363 Object* Heap::AllocateInitialMap(JSFunction* fun) {
2364 ASSERT(!fun->has_initial_map());
2365
2366 // First create a new map with the size and number of in-object properties
2367 // suggested by the function.
2368 int instance_size = fun->shared()->CalculateInstanceSize();
2369 int in_object_properties = fun->shared()->CalculateInObjectProperties();
2370 Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2371 if (map_obj->IsFailure()) return map_obj;
2372
2373 // Fetch or allocate prototype.
2374 Object* prototype;
2375 if (fun->has_instance_prototype()) {
2376 prototype = fun->instance_prototype();
2377 } else {
2378 prototype = AllocateFunctionPrototype(fun);
2379 if (prototype->IsFailure()) return prototype;
2380 }
2381 Map* map = Map::cast(map_obj);
2382 map->set_inobject_properties(in_object_properties);
2383 map->set_unused_property_fields(in_object_properties);
2384 map->set_prototype(prototype);
2385
2386 // If the function has only simple this property assignments add
2387 // field descriptors for these to the initial map as the object
2388 // cannot be constructed without having these properties. Guard by
2389 // the inline_new flag so we only change the map if we generate a
2390 // specialized construct stub.
2391 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
2392 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
2393 int count = fun->shared()->this_property_assignments_count();
2394 if (count > in_object_properties) {
2395 count = in_object_properties;
2396 }
2397 Object* descriptors_obj = DescriptorArray::Allocate(count);
2398 if (descriptors_obj->IsFailure()) return descriptors_obj;
2399 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
2400 for (int i = 0; i < count; i++) {
2401 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
2402 ASSERT(name->IsSymbol());
2403 FieldDescriptor field(name, i, NONE);
2404 field.SetEnumerationIndex(i);
2405 descriptors->Set(i, &field);
2406 }
2407 descriptors->SetNextEnumerationIndex(count);
2408 descriptors->Sort();
2409 map->set_instance_descriptors(descriptors);
2410 map->set_pre_allocated_property_fields(count);
2411 map->set_unused_property_fields(in_object_properties - count);
2412 }
2413 return map;
2414 }
2415
2416
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)2417 void Heap::InitializeJSObjectFromMap(JSObject* obj,
2418 FixedArray* properties,
2419 Map* map) {
2420 obj->set_properties(properties);
2421 obj->initialize_elements();
2422 // TODO(1240798): Initialize the object's body using valid initial values
2423 // according to the object's initial map. For example, if the map's
2424 // instance type is JS_ARRAY_TYPE, the length field should be initialized
2425 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
2426 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
2427 // verification code has to cope with (temporarily) invalid objects. See
2428 // for example, JSArray::JSArrayVerify).
2429 obj->InitializeBody(map->instance_size());
2430 }
2431
2432
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure)2433 Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
2434 // JSFunctions should be allocated using AllocateFunction to be
2435 // properly initialized.
2436 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
2437
2438 // Both types of globla objects should be allocated using
2439 // AllocateGloblaObject to be properly initialized.
2440 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
2441 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
2442
2443 // Allocate the backing storage for the properties.
2444 int prop_size =
2445 map->pre_allocated_property_fields() +
2446 map->unused_property_fields() -
2447 map->inobject_properties();
2448 ASSERT(prop_size >= 0);
2449 Object* properties = AllocateFixedArray(prop_size, pretenure);
2450 if (properties->IsFailure()) return properties;
2451
2452 // Allocate the JSObject.
2453 AllocationSpace space =
2454 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2455 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
2456 Object* obj = Allocate(map, space);
2457 if (obj->IsFailure()) return obj;
2458
2459 // Initialize the JSObject.
2460 InitializeJSObjectFromMap(JSObject::cast(obj),
2461 FixedArray::cast(properties),
2462 map);
2463 return obj;
2464 }
2465
2466
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure)2467 Object* Heap::AllocateJSObject(JSFunction* constructor,
2468 PretenureFlag pretenure) {
2469 // Allocate the initial map if absent.
2470 if (!constructor->has_initial_map()) {
2471 Object* initial_map = AllocateInitialMap(constructor);
2472 if (initial_map->IsFailure()) return initial_map;
2473 constructor->set_initial_map(Map::cast(initial_map));
2474 Map::cast(initial_map)->set_constructor(constructor);
2475 }
2476 // Allocate the object based on the constructors initial map.
2477 Object* result =
2478 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
2479 // Make sure result is NOT a global object if valid.
2480 ASSERT(result->IsFailure() || !result->IsGlobalObject());
2481 return result;
2482 }
2483
2484
AllocateGlobalObject(JSFunction * constructor)2485 Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
2486 ASSERT(constructor->has_initial_map());
2487 Map* map = constructor->initial_map();
2488
2489 // Make sure no field properties are described in the initial map.
2490 // This guarantees us that normalizing the properties does not
2491 // require us to change property values to JSGlobalPropertyCells.
2492 ASSERT(map->NextFreePropertyIndex() == 0);
2493
2494 // Make sure we don't have a ton of pre-allocated slots in the
2495 // global objects. They will be unused once we normalize the object.
2496 ASSERT(map->unused_property_fields() == 0);
2497 ASSERT(map->inobject_properties() == 0);
2498
2499 // Initial size of the backing store to avoid resize of the storage during
2500 // bootstrapping. The size differs between the JS global object ad the
2501 // builtins object.
2502 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
2503
2504 // Allocate a dictionary object for backing storage.
2505 Object* obj =
2506 StringDictionary::Allocate(
2507 map->NumberOfDescribedProperties() * 2 + initial_size);
2508 if (obj->IsFailure()) return obj;
2509 StringDictionary* dictionary = StringDictionary::cast(obj);
2510
2511 // The global object might be created from an object template with accessors.
2512 // Fill these accessors into the dictionary.
2513 DescriptorArray* descs = map->instance_descriptors();
2514 for (int i = 0; i < descs->number_of_descriptors(); i++) {
2515 PropertyDetails details = descs->GetDetails(i);
2516 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
2517 PropertyDetails d =
2518 PropertyDetails(details.attributes(), CALLBACKS, details.index());
2519 Object* value = descs->GetCallbacksObject(i);
2520 value = Heap::AllocateJSGlobalPropertyCell(value);
2521 if (value->IsFailure()) return value;
2522
2523 Object* result = dictionary->Add(descs->GetKey(i), value, d);
2524 if (result->IsFailure()) return result;
2525 dictionary = StringDictionary::cast(result);
2526 }
2527
2528 // Allocate the global object and initialize it with the backing store.
2529 obj = Allocate(map, OLD_POINTER_SPACE);
2530 if (obj->IsFailure()) return obj;
2531 JSObject* global = JSObject::cast(obj);
2532 InitializeJSObjectFromMap(global, dictionary, map);
2533
2534 // Create a new map for the global object.
2535 obj = map->CopyDropDescriptors();
2536 if (obj->IsFailure()) return obj;
2537 Map* new_map = Map::cast(obj);
2538
2539 // Setup the global object as a normalized object.
2540 global->set_map(new_map);
2541 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
2542 global->set_properties(dictionary);
2543
2544 // Make sure result is a global object with properties in dictionary.
2545 ASSERT(global->IsGlobalObject());
2546 ASSERT(!global->HasFastProperties());
2547 return global;
2548 }
2549
2550
CopyJSObject(JSObject * source)2551 Object* Heap::CopyJSObject(JSObject* source) {
2552 // Never used to copy functions. If functions need to be copied we
2553 // have to be careful to clear the literals array.
2554 ASSERT(!source->IsJSFunction());
2555
2556 // Make the clone.
2557 Map* map = source->map();
2558 int object_size = map->instance_size();
2559 Object* clone;
2560
2561 // If we're forced to always allocate, we use the general allocation
2562 // functions which may leave us with an object in old space.
2563 if (always_allocate()) {
2564 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
2565 if (clone->IsFailure()) return clone;
2566 Address clone_address = HeapObject::cast(clone)->address();
2567 CopyBlock(reinterpret_cast<Object**>(clone_address),
2568 reinterpret_cast<Object**>(source->address()),
2569 object_size);
2570 // Update write barrier for all fields that lie beyond the header.
2571 for (int offset = JSObject::kHeaderSize;
2572 offset < object_size;
2573 offset += kPointerSize) {
2574 RecordWrite(clone_address, offset);
2575 }
2576 } else {
2577 clone = new_space_.AllocateRaw(object_size);
2578 if (clone->IsFailure()) return clone;
2579 ASSERT(Heap::InNewSpace(clone));
2580 // Since we know the clone is allocated in new space, we can copy
2581 // the contents without worrying about updating the write barrier.
2582 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
2583 reinterpret_cast<Object**>(source->address()),
2584 object_size);
2585 }
2586
2587 FixedArray* elements = FixedArray::cast(source->elements());
2588 FixedArray* properties = FixedArray::cast(source->properties());
2589 // Update elements if necessary.
2590 if (elements->length()> 0) {
2591 Object* elem = CopyFixedArray(elements);
2592 if (elem->IsFailure()) return elem;
2593 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
2594 }
2595 // Update properties if necessary.
2596 if (properties->length() > 0) {
2597 Object* prop = CopyFixedArray(properties);
2598 if (prop->IsFailure()) return prop;
2599 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
2600 }
2601 // Return the new clone.
2602 #ifdef ENABLE_LOGGING_AND_PROFILING
2603 ProducerHeapProfile::RecordJSObjectAllocation(clone);
2604 #endif
2605 return clone;
2606 }
2607
2608
ReinitializeJSGlobalProxy(JSFunction * constructor,JSGlobalProxy * object)2609 Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
2610 JSGlobalProxy* object) {
2611 // Allocate initial map if absent.
2612 if (!constructor->has_initial_map()) {
2613 Object* initial_map = AllocateInitialMap(constructor);
2614 if (initial_map->IsFailure()) return initial_map;
2615 constructor->set_initial_map(Map::cast(initial_map));
2616 Map::cast(initial_map)->set_constructor(constructor);
2617 }
2618
2619 Map* map = constructor->initial_map();
2620
2621 // Check that the already allocated object has the same size as
2622 // objects allocated using the constructor.
2623 ASSERT(map->instance_size() == object->map()->instance_size());
2624
2625 // Allocate the backing storage for the properties.
2626 int prop_size = map->unused_property_fields() - map->inobject_properties();
2627 Object* properties = AllocateFixedArray(prop_size, TENURED);
2628 if (properties->IsFailure()) return properties;
2629
2630 // Reset the map for the object.
2631 object->set_map(constructor->initial_map());
2632
2633 // Reinitialize the object from the constructor map.
2634 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
2635 return object;
2636 }
2637
2638
AllocateStringFromAscii(Vector<const char> string,PretenureFlag pretenure)2639 Object* Heap::AllocateStringFromAscii(Vector<const char> string,
2640 PretenureFlag pretenure) {
2641 Object* result = AllocateRawAsciiString(string.length(), pretenure);
2642 if (result->IsFailure()) return result;
2643
2644 // Copy the characters into the new object.
2645 SeqAsciiString* string_result = SeqAsciiString::cast(result);
2646 for (int i = 0; i < string.length(); i++) {
2647 string_result->SeqAsciiStringSet(i, string[i]);
2648 }
2649 return result;
2650 }
2651
2652
AllocateStringFromUtf8(Vector<const char> string,PretenureFlag pretenure)2653 Object* Heap::AllocateStringFromUtf8(Vector<const char> string,
2654 PretenureFlag pretenure) {
2655 // Count the number of characters in the UTF-8 string and check if
2656 // it is an ASCII string.
2657 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
2658 decoder->Reset(string.start(), string.length());
2659 int chars = 0;
2660 bool is_ascii = true;
2661 while (decoder->has_more()) {
2662 uc32 r = decoder->GetNext();
2663 if (r > String::kMaxAsciiCharCode) is_ascii = false;
2664 chars++;
2665 }
2666
2667 // If the string is ascii, we do not need to convert the characters
2668 // since UTF8 is backwards compatible with ascii.
2669 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
2670
2671 Object* result = AllocateRawTwoByteString(chars, pretenure);
2672 if (result->IsFailure()) return result;
2673
2674 // Convert and copy the characters into the new object.
2675 String* string_result = String::cast(result);
2676 decoder->Reset(string.start(), string.length());
2677 for (int i = 0; i < chars; i++) {
2678 uc32 r = decoder->GetNext();
2679 string_result->Set(i, r);
2680 }
2681 return result;
2682 }
2683
2684
AllocateStringFromTwoByte(Vector<const uc16> string,PretenureFlag pretenure)2685 Object* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
2686 PretenureFlag pretenure) {
2687 // Check if the string is an ASCII string.
2688 int i = 0;
2689 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
2690
2691 Object* result;
2692 if (i == string.length()) { // It's an ASCII string.
2693 result = AllocateRawAsciiString(string.length(), pretenure);
2694 } else { // It's not an ASCII string.
2695 result = AllocateRawTwoByteString(string.length(), pretenure);
2696 }
2697 if (result->IsFailure()) return result;
2698
2699 // Copy the characters into the new object, which may be either ASCII or
2700 // UTF-16.
2701 String* string_result = String::cast(result);
2702 for (int i = 0; i < string.length(); i++) {
2703 string_result->Set(i, string[i]);
2704 }
2705 return result;
2706 }
2707
2708
SymbolMapForString(String * string)2709 Map* Heap::SymbolMapForString(String* string) {
2710 // If the string is in new space it cannot be used as a symbol.
2711 if (InNewSpace(string)) return NULL;
2712
2713 // Find the corresponding symbol map for strings.
2714 Map* map = string->map();
2715 if (map == ascii_string_map()) return ascii_symbol_map();
2716 if (map == string_map()) return symbol_map();
2717 if (map == cons_string_map()) return cons_symbol_map();
2718 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
2719 if (map == external_string_map()) return external_symbol_map();
2720 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
2721
2722 // No match found.
2723 return NULL;
2724 }
2725
2726
AllocateInternalSymbol(unibrow::CharacterStream * buffer,int chars,uint32_t hash_field)2727 Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
2728 int chars,
2729 uint32_t hash_field) {
2730 ASSERT(chars >= 0);
2731 // Ensure the chars matches the number of characters in the buffer.
2732 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
2733 // Determine whether the string is ascii.
2734 bool is_ascii = true;
2735 while (buffer->has_more()) {
2736 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
2737 is_ascii = false;
2738 break;
2739 }
2740 }
2741 buffer->Rewind();
2742
2743 // Compute map and object size.
2744 int size;
2745 Map* map;
2746
2747 if (is_ascii) {
2748 if (chars > SeqAsciiString::kMaxLength) {
2749 return Failure::OutOfMemoryException();
2750 }
2751 map = ascii_symbol_map();
2752 size = SeqAsciiString::SizeFor(chars);
2753 } else {
2754 if (chars > SeqTwoByteString::kMaxLength) {
2755 return Failure::OutOfMemoryException();
2756 }
2757 map = symbol_map();
2758 size = SeqTwoByteString::SizeFor(chars);
2759 }
2760
2761 // Allocate string.
2762 Object* result = (size > MaxObjectSizeInPagedSpace())
2763 ? lo_space_->AllocateRaw(size)
2764 : old_data_space_->AllocateRaw(size);
2765 if (result->IsFailure()) return result;
2766
2767 reinterpret_cast<HeapObject*>(result)->set_map(map);
2768 // Set length and hash fields of the allocated string.
2769 String* answer = String::cast(result);
2770 answer->set_length(chars);
2771 answer->set_hash_field(hash_field);
2772
2773 ASSERT_EQ(size, answer->Size());
2774
2775 // Fill in the characters.
2776 for (int i = 0; i < chars; i++) {
2777 answer->Set(i, buffer->GetNext());
2778 }
2779 return answer;
2780 }
2781
2782
AllocateRawAsciiString(int length,PretenureFlag pretenure)2783 Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
2784 if (length < 0 || length > SeqAsciiString::kMaxLength) {
2785 return Failure::OutOfMemoryException();
2786 }
2787
2788 int size = SeqAsciiString::SizeFor(length);
2789 ASSERT(size <= SeqAsciiString::kMaxSize);
2790
2791 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2792 AllocationSpace retry_space = OLD_DATA_SPACE;
2793
2794 if (space == NEW_SPACE) {
2795 if (size > kMaxObjectSizeInNewSpace) {
2796 // Allocate in large object space, retry space will be ignored.
2797 space = LO_SPACE;
2798 } else if (size > MaxObjectSizeInPagedSpace()) {
2799 // Allocate in new space, retry in large object space.
2800 retry_space = LO_SPACE;
2801 }
2802 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
2803 space = LO_SPACE;
2804 }
2805 Object* result = AllocateRaw(size, space, retry_space);
2806 if (result->IsFailure()) return result;
2807
2808 // Partially initialize the object.
2809 HeapObject::cast(result)->set_map(ascii_string_map());
2810 String::cast(result)->set_length(length);
2811 String::cast(result)->set_hash_field(String::kEmptyHashField);
2812 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2813 return result;
2814 }
2815
2816
AllocateRawTwoByteString(int length,PretenureFlag pretenure)2817 Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
2818 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
2819 return Failure::OutOfMemoryException();
2820 }
2821 int size = SeqTwoByteString::SizeFor(length);
2822 ASSERT(size <= SeqTwoByteString::kMaxSize);
2823 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2824 AllocationSpace retry_space = OLD_DATA_SPACE;
2825
2826 if (space == NEW_SPACE) {
2827 if (size > kMaxObjectSizeInNewSpace) {
2828 // Allocate in large object space, retry space will be ignored.
2829 space = LO_SPACE;
2830 } else if (size > MaxObjectSizeInPagedSpace()) {
2831 // Allocate in new space, retry in large object space.
2832 retry_space = LO_SPACE;
2833 }
2834 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
2835 space = LO_SPACE;
2836 }
2837 Object* result = AllocateRaw(size, space, retry_space);
2838 if (result->IsFailure()) return result;
2839
2840 // Partially initialize the object.
2841 HeapObject::cast(result)->set_map(string_map());
2842 String::cast(result)->set_length(length);
2843 String::cast(result)->set_hash_field(String::kEmptyHashField);
2844 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2845 return result;
2846 }
2847
2848
AllocateEmptyFixedArray()2849 Object* Heap::AllocateEmptyFixedArray() {
2850 int size = FixedArray::SizeFor(0);
2851 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
2852 if (result->IsFailure()) return result;
2853 // Initialize the object.
2854 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2855 reinterpret_cast<Array*>(result)->set_length(0);
2856 return result;
2857 }
2858
2859
AllocateRawFixedArray(int length)2860 Object* Heap::AllocateRawFixedArray(int length) {
2861 if (length < 0 || length > FixedArray::kMaxLength) {
2862 return Failure::OutOfMemoryException();
2863 }
2864 // Use the general function if we're forced to always allocate.
2865 if (always_allocate()) return AllocateFixedArray(length, TENURED);
2866 // Allocate the raw data for a fixed array.
2867 int size = FixedArray::SizeFor(length);
2868 return size <= kMaxObjectSizeInNewSpace
2869 ? new_space_.AllocateRaw(size)
2870 : lo_space_->AllocateRawFixedArray(size);
2871 }
2872
2873
CopyFixedArray(FixedArray * src)2874 Object* Heap::CopyFixedArray(FixedArray* src) {
2875 int len = src->length();
2876 Object* obj = AllocateRawFixedArray(len);
2877 if (obj->IsFailure()) return obj;
2878 if (Heap::InNewSpace(obj)) {
2879 HeapObject* dst = HeapObject::cast(obj);
2880 CopyBlock(reinterpret_cast<Object**>(dst->address()),
2881 reinterpret_cast<Object**>(src->address()),
2882 FixedArray::SizeFor(len));
2883 return obj;
2884 }
2885 HeapObject::cast(obj)->set_map(src->map());
2886 FixedArray* result = FixedArray::cast(obj);
2887 result->set_length(len);
2888
2889 // Copy the content
2890 AssertNoAllocation no_gc;
2891 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
2892 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
2893 return result;
2894 }
2895
2896
AllocateFixedArray(int length)2897 Object* Heap::AllocateFixedArray(int length) {
2898 ASSERT(length >= 0);
2899 if (length == 0) return empty_fixed_array();
2900 Object* result = AllocateRawFixedArray(length);
2901 if (!result->IsFailure()) {
2902 // Initialize header.
2903 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2904 FixedArray* array = FixedArray::cast(result);
2905 array->set_length(length);
2906 Object* value = undefined_value();
2907 // Initialize body.
2908 for (int index = 0; index < length; index++) {
2909 ASSERT(!Heap::InNewSpace(value)); // value = undefined
2910 array->set(index, value, SKIP_WRITE_BARRIER);
2911 }
2912 }
2913 return result;
2914 }
2915
2916
AllocateFixedArray(int length,PretenureFlag pretenure)2917 Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
2918 ASSERT(length >= 0);
2919 ASSERT(empty_fixed_array()->IsFixedArray());
2920 if (length < 0 || length > FixedArray::kMaxLength) {
2921 return Failure::OutOfMemoryException();
2922 }
2923 if (length == 0) return empty_fixed_array();
2924
2925 AllocationSpace space =
2926 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2927 int size = FixedArray::SizeFor(length);
2928 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
2929 // Too big for new space.
2930 space = LO_SPACE;
2931 } else if (space == OLD_POINTER_SPACE &&
2932 size > MaxObjectSizeInPagedSpace()) {
2933 // Too big for old pointer space.
2934 space = LO_SPACE;
2935 }
2936
2937 // Specialize allocation for the space.
2938 Object* result = Failure::OutOfMemoryException();
2939 if (space == NEW_SPACE) {
2940 // We cannot use Heap::AllocateRaw() because it will not properly
2941 // allocate extra remembered set bits if always_allocate() is true and
2942 // new space allocation fails.
2943 result = new_space_.AllocateRaw(size);
2944 if (result->IsFailure() && always_allocate()) {
2945 if (size <= MaxObjectSizeInPagedSpace()) {
2946 result = old_pointer_space_->AllocateRaw(size);
2947 } else {
2948 result = lo_space_->AllocateRawFixedArray(size);
2949 }
2950 }
2951 } else if (space == OLD_POINTER_SPACE) {
2952 result = old_pointer_space_->AllocateRaw(size);
2953 } else {
2954 ASSERT(space == LO_SPACE);
2955 result = lo_space_->AllocateRawFixedArray(size);
2956 }
2957 if (result->IsFailure()) return result;
2958
2959 // Initialize the object.
2960 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2961 FixedArray* array = FixedArray::cast(result);
2962 array->set_length(length);
2963 Object* value = undefined_value();
2964 for (int index = 0; index < length; index++) {
2965 ASSERT(!Heap::InNewSpace(value)); // value = undefined
2966 array->set(index, value, SKIP_WRITE_BARRIER);
2967 }
2968 return array;
2969 }
2970
2971
AllocateFixedArrayWithHoles(int length)2972 Object* Heap::AllocateFixedArrayWithHoles(int length) {
2973 if (length == 0) return empty_fixed_array();
2974 Object* result = AllocateRawFixedArray(length);
2975 if (!result->IsFailure()) {
2976 // Initialize header.
2977 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2978 FixedArray* array = FixedArray::cast(result);
2979 array->set_length(length);
2980 // Initialize body.
2981 Object* value = the_hole_value();
2982 for (int index = 0; index < length; index++) {
2983 ASSERT(!Heap::InNewSpace(value)); // value = the hole
2984 array->set(index, value, SKIP_WRITE_BARRIER);
2985 }
2986 }
2987 return result;
2988 }
2989
2990
AllocateHashTable(int length)2991 Object* Heap::AllocateHashTable(int length) {
2992 Object* result = Heap::AllocateFixedArray(length);
2993 if (result->IsFailure()) return result;
2994 reinterpret_cast<Array*>(result)->set_map(hash_table_map());
2995 ASSERT(result->IsHashTable());
2996 return result;
2997 }
2998
2999
AllocateGlobalContext()3000 Object* Heap::AllocateGlobalContext() {
3001 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3002 if (result->IsFailure()) return result;
3003 Context* context = reinterpret_cast<Context*>(result);
3004 context->set_map(global_context_map());
3005 ASSERT(context->IsGlobalContext());
3006 ASSERT(result->IsContext());
3007 return result;
3008 }
3009
3010
AllocateFunctionContext(int length,JSFunction * function)3011 Object* Heap::AllocateFunctionContext(int length, JSFunction* function) {
3012 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
3013 Object* result = Heap::AllocateFixedArray(length);
3014 if (result->IsFailure()) return result;
3015 Context* context = reinterpret_cast<Context*>(result);
3016 context->set_map(context_map());
3017 context->set_closure(function);
3018 context->set_fcontext(context);
3019 context->set_previous(NULL);
3020 context->set_extension(NULL);
3021 context->set_global(function->context()->global());
3022 ASSERT(!context->IsGlobalContext());
3023 ASSERT(context->is_function_context());
3024 ASSERT(result->IsContext());
3025 return result;
3026 }
3027
3028
AllocateWithContext(Context * previous,JSObject * extension,bool is_catch_context)3029 Object* Heap::AllocateWithContext(Context* previous,
3030 JSObject* extension,
3031 bool is_catch_context) {
3032 Object* result = Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3033 if (result->IsFailure()) return result;
3034 Context* context = reinterpret_cast<Context*>(result);
3035 context->set_map(is_catch_context ? catch_context_map() : context_map());
3036 context->set_closure(previous->closure());
3037 context->set_fcontext(previous->fcontext());
3038 context->set_previous(previous);
3039 context->set_extension(extension);
3040 context->set_global(previous->global());
3041 ASSERT(!context->IsGlobalContext());
3042 ASSERT(!context->is_function_context());
3043 ASSERT(result->IsContext());
3044 return result;
3045 }
3046
3047
AllocateStruct(InstanceType type)3048 Object* Heap::AllocateStruct(InstanceType type) {
3049 Map* map;
3050 switch (type) {
3051 #define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3052 STRUCT_LIST(MAKE_CASE)
3053 #undef MAKE_CASE
3054 default:
3055 UNREACHABLE();
3056 return Failure::InternalError();
3057 }
3058 int size = map->instance_size();
3059 AllocationSpace space =
3060 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
3061 Object* result = Heap::Allocate(map, space);
3062 if (result->IsFailure()) return result;
3063 Struct::cast(result)->InitializeBody(size);
3064 return result;
3065 }
3066
3067
IdleNotification()3068 bool Heap::IdleNotification() {
3069 static const int kIdlesBeforeScavenge = 4;
3070 static const int kIdlesBeforeMarkSweep = 7;
3071 static const int kIdlesBeforeMarkCompact = 8;
3072 static int number_idle_notifications = 0;
3073 static int last_gc_count = gc_count_;
3074
3075 bool finished = false;
3076
3077 if (last_gc_count == gc_count_) {
3078 number_idle_notifications++;
3079 } else {
3080 number_idle_notifications = 0;
3081 last_gc_count = gc_count_;
3082 }
3083
3084 if (number_idle_notifications == kIdlesBeforeScavenge) {
3085 CollectGarbage(0, NEW_SPACE);
3086 new_space_.Shrink();
3087 last_gc_count = gc_count_;
3088
3089 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
3090 // Before doing the mark-sweep collections we clear the
3091 // compilation cache to avoid hanging on to source code and
3092 // generated code for cached functions.
3093 CompilationCache::Clear();
3094
3095 CollectAllGarbage(false);
3096 new_space_.Shrink();
3097 last_gc_count = gc_count_;
3098
3099 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3100 CollectAllGarbage(true);
3101 new_space_.Shrink();
3102 last_gc_count = gc_count_;
3103 number_idle_notifications = 0;
3104 finished = true;
3105 }
3106
3107 // Uncommit unused memory in new space.
3108 Heap::UncommitFromSpace();
3109 return finished;
3110 }
3111
3112
3113 #ifdef DEBUG
3114
Print()3115 void Heap::Print() {
3116 if (!HasBeenSetup()) return;
3117 Top::PrintStack();
3118 AllSpaces spaces;
3119 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3120 space->Print();
3121 }
3122
3123
ReportCodeStatistics(const char * title)3124 void Heap::ReportCodeStatistics(const char* title) {
3125 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3126 PagedSpace::ResetCodeStatistics();
3127 // We do not look for code in new space, map space, or old space. If code
3128 // somehow ends up in those spaces, we would miss it here.
3129 code_space_->CollectCodeStatistics();
3130 lo_space_->CollectCodeStatistics();
3131 PagedSpace::ReportCodeStatistics();
3132 }
3133
3134
3135 // This function expects that NewSpace's allocated objects histogram is
3136 // populated (via a call to CollectStatistics or else as a side effect of a
3137 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)3138 void Heap::ReportHeapStatistics(const char* title) {
3139 USE(title);
3140 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3141 title, gc_count_);
3142 PrintF("mark-compact GC : %d\n", mc_count_);
3143 PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
3144 PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
3145
3146 PrintF("\n");
3147 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3148 GlobalHandles::PrintStats();
3149 PrintF("\n");
3150
3151 PrintF("Heap statistics : ");
3152 MemoryAllocator::ReportStatistics();
3153 PrintF("To space : ");
3154 new_space_.ReportStatistics();
3155 PrintF("Old pointer space : ");
3156 old_pointer_space_->ReportStatistics();
3157 PrintF("Old data space : ");
3158 old_data_space_->ReportStatistics();
3159 PrintF("Code space : ");
3160 code_space_->ReportStatistics();
3161 PrintF("Map space : ");
3162 map_space_->ReportStatistics();
3163 PrintF("Cell space : ");
3164 cell_space_->ReportStatistics();
3165 PrintF("Large object space : ");
3166 lo_space_->ReportStatistics();
3167 PrintF(">>>>>> ========================================= >>>>>>\n");
3168 }
3169
3170 #endif // DEBUG
3171
Contains(HeapObject * value)3172 bool Heap::Contains(HeapObject* value) {
3173 return Contains(value->address());
3174 }
3175
3176
Contains(Address addr)3177 bool Heap::Contains(Address addr) {
3178 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3179 return HasBeenSetup() &&
3180 (new_space_.ToSpaceContains(addr) ||
3181 old_pointer_space_->Contains(addr) ||
3182 old_data_space_->Contains(addr) ||
3183 code_space_->Contains(addr) ||
3184 map_space_->Contains(addr) ||
3185 cell_space_->Contains(addr) ||
3186 lo_space_->SlowContains(addr));
3187 }
3188
3189
InSpace(HeapObject * value,AllocationSpace space)3190 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3191 return InSpace(value->address(), space);
3192 }
3193
3194
InSpace(Address addr,AllocationSpace space)3195 bool Heap::InSpace(Address addr, AllocationSpace space) {
3196 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3197 if (!HasBeenSetup()) return false;
3198
3199 switch (space) {
3200 case NEW_SPACE:
3201 return new_space_.ToSpaceContains(addr);
3202 case OLD_POINTER_SPACE:
3203 return old_pointer_space_->Contains(addr);
3204 case OLD_DATA_SPACE:
3205 return old_data_space_->Contains(addr);
3206 case CODE_SPACE:
3207 return code_space_->Contains(addr);
3208 case MAP_SPACE:
3209 return map_space_->Contains(addr);
3210 case CELL_SPACE:
3211 return cell_space_->Contains(addr);
3212 case LO_SPACE:
3213 return lo_space_->SlowContains(addr);
3214 }
3215
3216 return false;
3217 }
3218
3219
3220 #ifdef DEBUG
Verify()3221 void Heap::Verify() {
3222 ASSERT(HasBeenSetup());
3223
3224 VerifyPointersVisitor visitor;
3225 IterateRoots(&visitor, VISIT_ONLY_STRONG);
3226
3227 new_space_.Verify();
3228
3229 VerifyPointersAndRSetVisitor rset_visitor;
3230 old_pointer_space_->Verify(&rset_visitor);
3231 map_space_->Verify(&rset_visitor);
3232
3233 VerifyPointersVisitor no_rset_visitor;
3234 old_data_space_->Verify(&no_rset_visitor);
3235 code_space_->Verify(&no_rset_visitor);
3236 cell_space_->Verify(&no_rset_visitor);
3237
3238 lo_space_->Verify();
3239 }
3240 #endif // DEBUG
3241
3242
LookupSymbol(Vector<const char> string)3243 Object* Heap::LookupSymbol(Vector<const char> string) {
3244 Object* symbol = NULL;
3245 Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
3246 if (new_table->IsFailure()) return new_table;
3247 // Can't use set_symbol_table because SymbolTable::cast knows that
3248 // SymbolTable is a singleton and checks for identity.
3249 roots_[kSymbolTableRootIndex] = new_table;
3250 ASSERT(symbol != NULL);
3251 return symbol;
3252 }
3253
3254
LookupSymbol(String * string)3255 Object* Heap::LookupSymbol(String* string) {
3256 if (string->IsSymbol()) return string;
3257 Object* symbol = NULL;
3258 Object* new_table = symbol_table()->LookupString(string, &symbol);
3259 if (new_table->IsFailure()) return new_table;
3260 // Can't use set_symbol_table because SymbolTable::cast knows that
3261 // SymbolTable is a singleton and checks for identity.
3262 roots_[kSymbolTableRootIndex] = new_table;
3263 ASSERT(symbol != NULL);
3264 return symbol;
3265 }
3266
3267
LookupSymbolIfExists(String * string,String ** symbol)3268 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
3269 if (string->IsSymbol()) {
3270 *symbol = string;
3271 return true;
3272 }
3273 return symbol_table()->LookupSymbolIfExists(string, symbol);
3274 }
3275
3276
3277 #ifdef DEBUG
ZapFromSpace()3278 void Heap::ZapFromSpace() {
3279 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3280 for (Address a = new_space_.FromSpaceLow();
3281 a < new_space_.FromSpaceHigh();
3282 a += kPointerSize) {
3283 Memory::Address_at(a) = kFromSpaceZapValue;
3284 }
3285 }
3286 #endif // DEBUG
3287
3288
IterateRSetRange(Address object_start,Address object_end,Address rset_start,ObjectSlotCallback copy_object_func)3289 int Heap::IterateRSetRange(Address object_start,
3290 Address object_end,
3291 Address rset_start,
3292 ObjectSlotCallback copy_object_func) {
3293 Address object_address = object_start;
3294 Address rset_address = rset_start;
3295 int set_bits_count = 0;
3296
3297 // Loop over all the pointers in [object_start, object_end).
3298 while (object_address < object_end) {
3299 uint32_t rset_word = Memory::uint32_at(rset_address);
3300 if (rset_word != 0) {
3301 uint32_t result_rset = rset_word;
3302 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
3303 // Do not dereference pointers at or past object_end.
3304 if ((rset_word & bitmask) != 0 && object_address < object_end) {
3305 Object** object_p = reinterpret_cast<Object**>(object_address);
3306 if (Heap::InNewSpace(*object_p)) {
3307 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
3308 }
3309 // If this pointer does not need to be remembered anymore, clear
3310 // the remembered set bit.
3311 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
3312 set_bits_count++;
3313 }
3314 object_address += kPointerSize;
3315 }
3316 // Update the remembered set if it has changed.
3317 if (result_rset != rset_word) {
3318 Memory::uint32_at(rset_address) = result_rset;
3319 }
3320 } else {
3321 // No bits in the word were set. This is the common case.
3322 object_address += kPointerSize * kBitsPerInt;
3323 }
3324 rset_address += kIntSize;
3325 }
3326 return set_bits_count;
3327 }
3328
3329
IterateRSet(PagedSpace * space,ObjectSlotCallback copy_object_func)3330 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
3331 ASSERT(Page::is_rset_in_use());
3332 ASSERT(space == old_pointer_space_ || space == map_space_);
3333
3334 static void* paged_rset_histogram = StatsTable::CreateHistogram(
3335 "V8.RSetPaged",
3336 0,
3337 Page::kObjectAreaSize / kPointerSize,
3338 30);
3339
3340 PageIterator it(space, PageIterator::PAGES_IN_USE);
3341 while (it.has_next()) {
3342 Page* page = it.next();
3343 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
3344 page->RSetStart(), copy_object_func);
3345 if (paged_rset_histogram != NULL) {
3346 StatsTable::AddHistogramSample(paged_rset_histogram, count);
3347 }
3348 }
3349 }
3350
3351
IterateRoots(ObjectVisitor * v,VisitMode mode)3352 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
3353 IterateStrongRoots(v, mode);
3354 IterateWeakRoots(v, mode);
3355 }
3356
3357
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)3358 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
3359 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
3360 v->Synchronize("symbol_table");
3361 if (mode != VISIT_ALL_IN_SCAVENGE) {
3362 // Scavenge collections have special processing for this.
3363 ExternalStringTable::Iterate(v);
3364 }
3365 v->Synchronize("external_string_table");
3366 }
3367
3368
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)3369 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
3370 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
3371 v->Synchronize("strong_root_list");
3372
3373 v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
3374 v->Synchronize("symbol");
3375
3376 Bootstrapper::Iterate(v);
3377 v->Synchronize("bootstrapper");
3378 Top::Iterate(v);
3379 v->Synchronize("top");
3380 Relocatable::Iterate(v);
3381 v->Synchronize("relocatable");
3382
3383 #ifdef ENABLE_DEBUGGER_SUPPORT
3384 Debug::Iterate(v);
3385 #endif
3386 v->Synchronize("debug");
3387 CompilationCache::Iterate(v);
3388 v->Synchronize("compilationcache");
3389
3390 // Iterate over local handles in handle scopes.
3391 HandleScopeImplementer::Iterate(v);
3392 v->Synchronize("handlescope");
3393
3394 // Iterate over the builtin code objects and code stubs in the
3395 // heap. Note that it is not necessary to iterate over code objects
3396 // on scavenge collections.
3397 if (mode != VISIT_ALL_IN_SCAVENGE) {
3398 Builtins::IterateBuiltins(v);
3399 }
3400 v->Synchronize("builtins");
3401
3402 // Iterate over global handles.
3403 if (mode == VISIT_ONLY_STRONG) {
3404 GlobalHandles::IterateStrongRoots(v);
3405 } else {
3406 GlobalHandles::IterateAllRoots(v);
3407 }
3408 v->Synchronize("globalhandles");
3409
3410 // Iterate over pointers being held by inactive threads.
3411 ThreadManager::Iterate(v);
3412 v->Synchronize("threadmanager");
3413
3414 // Iterate over the pointers the Serialization/Deserialization code is
3415 // holding.
3416 // During garbage collection this keeps the partial snapshot cache alive.
3417 // During deserialization of the startup snapshot this creates the partial
3418 // snapshot cache and deserializes the objects it refers to. During
3419 // serialization this does nothing, since the partial snapshot cache is
3420 // empty. However the next thing we do is create the partial snapshot,
3421 // filling up the partial snapshot cache with objects it needs as we go.
3422 SerializerDeserializer::Iterate(v);
3423 // We don't do a v->Synchronize call here, because in debug mode that will
3424 // output a flag to the snapshot. However at this point the serializer and
3425 // deserializer are deliberately a little unsynchronized (see above) so the
3426 // checking of the sync flag in the snapshot would fail.
3427 }
3428
3429
3430 // Flag is set when the heap has been configured. The heap can be repeatedly
3431 // configured through the API until it is setup.
3432 static bool heap_configured = false;
3433
3434 // TODO(1236194): Since the heap size is configurable on the command line
3435 // and through the API, we should gracefully handle the case that the heap
3436 // size is not big enough to fit all the initial objects.
ConfigureHeap(int max_semispace_size,int max_old_gen_size)3437 bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
3438 if (HasBeenSetup()) return false;
3439
3440 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
3441
3442 if (Snapshot::IsEnabled()) {
3443 // If we are using a snapshot we always reserve the default amount
3444 // of memory for each semispace because code in the snapshot has
3445 // write-barrier code that relies on the size and alignment of new
3446 // space. We therefore cannot use a larger max semispace size
3447 // than the default reserved semispace size.
3448 if (max_semispace_size_ > reserved_semispace_size_) {
3449 max_semispace_size_ = reserved_semispace_size_;
3450 }
3451 } else {
3452 // If we are not using snapshots we reserve space for the actual
3453 // max semispace size.
3454 reserved_semispace_size_ = max_semispace_size_;
3455 }
3456
3457 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
3458
3459 // The new space size must be a power of two to support single-bit testing
3460 // for containment.
3461 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
3462 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
3463 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
3464 external_allocation_limit_ = 10 * max_semispace_size_;
3465
3466 // The old generation is paged.
3467 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
3468
3469 heap_configured = true;
3470 return true;
3471 }
3472
3473
ConfigureHeapDefault()3474 bool Heap::ConfigureHeapDefault() {
3475 return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
3476 }
3477
3478
RecordStats(HeapStats * stats)3479 void Heap::RecordStats(HeapStats* stats) {
3480 *stats->start_marker = 0xDECADE00;
3481 *stats->end_marker = 0xDECADE01;
3482 *stats->new_space_size = new_space_.Size();
3483 *stats->new_space_capacity = new_space_.Capacity();
3484 *stats->old_pointer_space_size = old_pointer_space_->Size();
3485 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
3486 *stats->old_data_space_size = old_data_space_->Size();
3487 *stats->old_data_space_capacity = old_data_space_->Capacity();
3488 *stats->code_space_size = code_space_->Size();
3489 *stats->code_space_capacity = code_space_->Capacity();
3490 *stats->map_space_size = map_space_->Size();
3491 *stats->map_space_capacity = map_space_->Capacity();
3492 *stats->cell_space_size = cell_space_->Size();
3493 *stats->cell_space_capacity = cell_space_->Capacity();
3494 *stats->lo_space_size = lo_space_->Size();
3495 GlobalHandles::RecordStats(stats);
3496 }
3497
3498
PromotedSpaceSize()3499 int Heap::PromotedSpaceSize() {
3500 return old_pointer_space_->Size()
3501 + old_data_space_->Size()
3502 + code_space_->Size()
3503 + map_space_->Size()
3504 + cell_space_->Size()
3505 + lo_space_->Size();
3506 }
3507
3508
PromotedExternalMemorySize()3509 int Heap::PromotedExternalMemorySize() {
3510 if (amount_of_external_allocated_memory_
3511 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
3512 return amount_of_external_allocated_memory_
3513 - amount_of_external_allocated_memory_at_last_global_gc_;
3514 }
3515
3516
Setup(bool create_heap_objects)3517 bool Heap::Setup(bool create_heap_objects) {
3518 // Initialize heap spaces and initial maps and objects. Whenever something
3519 // goes wrong, just return false. The caller should check the results and
3520 // call Heap::TearDown() to release allocated memory.
3521 //
3522 // If the heap is not yet configured (eg, through the API), configure it.
3523 // Configuration is based on the flags new-space-size (really the semispace
3524 // size) and old-space-size if set or the initial values of semispace_size_
3525 // and old_generation_size_ otherwise.
3526 if (!heap_configured) {
3527 if (!ConfigureHeapDefault()) return false;
3528 }
3529
3530 // Setup memory allocator and reserve a chunk of memory for new
3531 // space. The chunk is double the size of the requested reserved
3532 // new space size to ensure that we can find a pair of semispaces that
3533 // are contiguous and aligned to their size.
3534 if (!MemoryAllocator::Setup(MaxReserved())) return false;
3535 void* chunk =
3536 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
3537 if (chunk == NULL) return false;
3538
3539 // Align the pair of semispaces to their size, which must be a power
3540 // of 2.
3541 Address new_space_start =
3542 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
3543 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
3544 return false;
3545 }
3546
3547 // Initialize old pointer space.
3548 old_pointer_space_ =
3549 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
3550 if (old_pointer_space_ == NULL) return false;
3551 if (!old_pointer_space_->Setup(NULL, 0)) return false;
3552
3553 // Initialize old data space.
3554 old_data_space_ =
3555 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
3556 if (old_data_space_ == NULL) return false;
3557 if (!old_data_space_->Setup(NULL, 0)) return false;
3558
3559 // Initialize the code space, set its maximum capacity to the old
3560 // generation size. It needs executable memory.
3561 // On 64-bit platform(s), we put all code objects in a 2 GB range of
3562 // virtual address space, so that they can call each other with near calls.
3563 if (code_range_size_ > 0) {
3564 if (!CodeRange::Setup(code_range_size_)) {
3565 return false;
3566 }
3567 }
3568
3569 code_space_ =
3570 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
3571 if (code_space_ == NULL) return false;
3572 if (!code_space_->Setup(NULL, 0)) return false;
3573
3574 // Initialize map space.
3575 map_space_ = new MapSpace(FLAG_use_big_map_space
3576 ? max_old_generation_size_
3577 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
3578 FLAG_max_map_space_pages,
3579 MAP_SPACE);
3580 if (map_space_ == NULL) return false;
3581 if (!map_space_->Setup(NULL, 0)) return false;
3582
3583 // Initialize global property cell space.
3584 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
3585 if (cell_space_ == NULL) return false;
3586 if (!cell_space_->Setup(NULL, 0)) return false;
3587
3588 // The large object code space may contain code or data. We set the memory
3589 // to be non-executable here for safety, but this means we need to enable it
3590 // explicitly when allocating large code objects.
3591 lo_space_ = new LargeObjectSpace(LO_SPACE);
3592 if (lo_space_ == NULL) return false;
3593 if (!lo_space_->Setup()) return false;
3594
3595 if (create_heap_objects) {
3596 // Create initial maps.
3597 if (!CreateInitialMaps()) return false;
3598 if (!CreateApiObjects()) return false;
3599
3600 // Create initial objects
3601 if (!CreateInitialObjects()) return false;
3602 }
3603
3604 LOG(IntEvent("heap-capacity", Capacity()));
3605 LOG(IntEvent("heap-available", Available()));
3606
3607 #ifdef ENABLE_LOGGING_AND_PROFILING
3608 // This should be called only after initial objects have been created.
3609 ProducerHeapProfile::Setup();
3610 #endif
3611
3612 return true;
3613 }
3614
3615
SetStackLimits()3616 void Heap::SetStackLimits() {
3617 // On 64 bit machines, pointers are generally out of range of Smis. We write
3618 // something that looks like an out of range Smi to the GC.
3619
3620 // Set up the special root array entries containing the stack limits.
3621 // These are actually addresses, but the tag makes the GC ignore it.
3622 roots_[kStackLimitRootIndex] =
3623 reinterpret_cast<Object*>(
3624 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
3625 roots_[kRealStackLimitRootIndex] =
3626 reinterpret_cast<Object*>(
3627 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
3628 }
3629
3630
TearDown()3631 void Heap::TearDown() {
3632 GlobalHandles::TearDown();
3633
3634 ExternalStringTable::TearDown();
3635
3636 new_space_.TearDown();
3637
3638 if (old_pointer_space_ != NULL) {
3639 old_pointer_space_->TearDown();
3640 delete old_pointer_space_;
3641 old_pointer_space_ = NULL;
3642 }
3643
3644 if (old_data_space_ != NULL) {
3645 old_data_space_->TearDown();
3646 delete old_data_space_;
3647 old_data_space_ = NULL;
3648 }
3649
3650 if (code_space_ != NULL) {
3651 code_space_->TearDown();
3652 delete code_space_;
3653 code_space_ = NULL;
3654 }
3655
3656 if (map_space_ != NULL) {
3657 map_space_->TearDown();
3658 delete map_space_;
3659 map_space_ = NULL;
3660 }
3661
3662 if (cell_space_ != NULL) {
3663 cell_space_->TearDown();
3664 delete cell_space_;
3665 cell_space_ = NULL;
3666 }
3667
3668 if (lo_space_ != NULL) {
3669 lo_space_->TearDown();
3670 delete lo_space_;
3671 lo_space_ = NULL;
3672 }
3673
3674 MemoryAllocator::TearDown();
3675 }
3676
3677
Shrink()3678 void Heap::Shrink() {
3679 // Try to shrink all paged spaces.
3680 PagedSpaces spaces;
3681 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
3682 space->Shrink();
3683 }
3684
3685
3686 #ifdef ENABLE_HEAP_PROTECTION
3687
Protect()3688 void Heap::Protect() {
3689 if (HasBeenSetup()) {
3690 AllSpaces spaces;
3691 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3692 space->Protect();
3693 }
3694 }
3695
3696
Unprotect()3697 void Heap::Unprotect() {
3698 if (HasBeenSetup()) {
3699 AllSpaces spaces;
3700 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3701 space->Unprotect();
3702 }
3703 }
3704
3705 #endif
3706
3707
3708 #ifdef DEBUG
3709
3710 class PrintHandleVisitor: public ObjectVisitor {
3711 public:
VisitPointers(Object ** start,Object ** end)3712 void VisitPointers(Object** start, Object** end) {
3713 for (Object** p = start; p < end; p++)
3714 PrintF(" handle %p to %p\n", p, *p);
3715 }
3716 };
3717
PrintHandles()3718 void Heap::PrintHandles() {
3719 PrintF("Handles:\n");
3720 PrintHandleVisitor v;
3721 HandleScopeImplementer::Iterate(&v);
3722 }
3723
3724 #endif
3725
3726
next()3727 Space* AllSpaces::next() {
3728 switch (counter_++) {
3729 case NEW_SPACE:
3730 return Heap::new_space();
3731 case OLD_POINTER_SPACE:
3732 return Heap::old_pointer_space();
3733 case OLD_DATA_SPACE:
3734 return Heap::old_data_space();
3735 case CODE_SPACE:
3736 return Heap::code_space();
3737 case MAP_SPACE:
3738 return Heap::map_space();
3739 case CELL_SPACE:
3740 return Heap::cell_space();
3741 case LO_SPACE:
3742 return Heap::lo_space();
3743 default:
3744 return NULL;
3745 }
3746 }
3747
3748
next()3749 PagedSpace* PagedSpaces::next() {
3750 switch (counter_++) {
3751 case OLD_POINTER_SPACE:
3752 return Heap::old_pointer_space();
3753 case OLD_DATA_SPACE:
3754 return Heap::old_data_space();
3755 case CODE_SPACE:
3756 return Heap::code_space();
3757 case MAP_SPACE:
3758 return Heap::map_space();
3759 case CELL_SPACE:
3760 return Heap::cell_space();
3761 default:
3762 return NULL;
3763 }
3764 }
3765
3766
3767
next()3768 OldSpace* OldSpaces::next() {
3769 switch (counter_++) {
3770 case OLD_POINTER_SPACE:
3771 return Heap::old_pointer_space();
3772 case OLD_DATA_SPACE:
3773 return Heap::old_data_space();
3774 case CODE_SPACE:
3775 return Heap::code_space();
3776 default:
3777 return NULL;
3778 }
3779 }
3780
3781
SpaceIterator()3782 SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
3783 }
3784
3785
~SpaceIterator()3786 SpaceIterator::~SpaceIterator() {
3787 // Delete active iterator if any.
3788 delete iterator_;
3789 }
3790
3791
has_next()3792 bool SpaceIterator::has_next() {
3793 // Iterate until no more spaces.
3794 return current_space_ != LAST_SPACE;
3795 }
3796
3797
next()3798 ObjectIterator* SpaceIterator::next() {
3799 if (iterator_ != NULL) {
3800 delete iterator_;
3801 iterator_ = NULL;
3802 // Move to the next space
3803 current_space_++;
3804 if (current_space_ > LAST_SPACE) {
3805 return NULL;
3806 }
3807 }
3808
3809 // Return iterator for the new current space.
3810 return CreateIterator();
3811 }
3812
3813
3814 // Create an iterator for the space to iterate.
CreateIterator()3815 ObjectIterator* SpaceIterator::CreateIterator() {
3816 ASSERT(iterator_ == NULL);
3817
3818 switch (current_space_) {
3819 case NEW_SPACE:
3820 iterator_ = new SemiSpaceIterator(Heap::new_space());
3821 break;
3822 case OLD_POINTER_SPACE:
3823 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
3824 break;
3825 case OLD_DATA_SPACE:
3826 iterator_ = new HeapObjectIterator(Heap::old_data_space());
3827 break;
3828 case CODE_SPACE:
3829 iterator_ = new HeapObjectIterator(Heap::code_space());
3830 break;
3831 case MAP_SPACE:
3832 iterator_ = new HeapObjectIterator(Heap::map_space());
3833 break;
3834 case CELL_SPACE:
3835 iterator_ = new HeapObjectIterator(Heap::cell_space());
3836 break;
3837 case LO_SPACE:
3838 iterator_ = new LargeObjectIterator(Heap::lo_space());
3839 break;
3840 }
3841
3842 // Return the newly allocated iterator;
3843 ASSERT(iterator_ != NULL);
3844 return iterator_;
3845 }
3846
3847
HeapIterator()3848 HeapIterator::HeapIterator() {
3849 Init();
3850 }
3851
3852
~HeapIterator()3853 HeapIterator::~HeapIterator() {
3854 Shutdown();
3855 }
3856
3857
Init()3858 void HeapIterator::Init() {
3859 // Start the iteration.
3860 space_iterator_ = new SpaceIterator();
3861 object_iterator_ = space_iterator_->next();
3862 }
3863
3864
Shutdown()3865 void HeapIterator::Shutdown() {
3866 // Make sure the last iterator is deallocated.
3867 delete space_iterator_;
3868 space_iterator_ = NULL;
3869 object_iterator_ = NULL;
3870 }
3871
3872
next()3873 HeapObject* HeapIterator::next() {
3874 // No iterator means we are done.
3875 if (object_iterator_ == NULL) return NULL;
3876
3877 if (HeapObject* obj = object_iterator_->next_object()) {
3878 // If the current iterator has more objects we are fine.
3879 return obj;
3880 } else {
3881 // Go though the spaces looking for one that has objects.
3882 while (space_iterator_->has_next()) {
3883 object_iterator_ = space_iterator_->next();
3884 if (HeapObject* obj = object_iterator_->next_object()) {
3885 return obj;
3886 }
3887 }
3888 }
3889 // Done with the last space.
3890 object_iterator_ = NULL;
3891 return NULL;
3892 }
3893
3894
reset()3895 void HeapIterator::reset() {
3896 // Restart the iterator.
3897 Shutdown();
3898 Init();
3899 }
3900
3901
3902 #ifdef DEBUG
3903
3904 static bool search_for_any_global;
3905 static Object* search_target;
3906 static bool found_target;
3907 static List<Object*> object_stack(20);
3908
3909
3910 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
3911 static const int kMarkTag = 2;
3912
3913 static void MarkObjectRecursively(Object** p);
3914 class MarkObjectVisitor : public ObjectVisitor {
3915 public:
VisitPointers(Object ** start,Object ** end)3916 void VisitPointers(Object** start, Object** end) {
3917 // Copy all HeapObject pointers in [start, end)
3918 for (Object** p = start; p < end; p++) {
3919 if ((*p)->IsHeapObject())
3920 MarkObjectRecursively(p);
3921 }
3922 }
3923 };
3924
3925 static MarkObjectVisitor mark_visitor;
3926
MarkObjectRecursively(Object ** p)3927 static void MarkObjectRecursively(Object** p) {
3928 if (!(*p)->IsHeapObject()) return;
3929
3930 HeapObject* obj = HeapObject::cast(*p);
3931
3932 Object* map = obj->map();
3933
3934 if (!map->IsHeapObject()) return; // visited before
3935
3936 if (found_target) return; // stop if target found
3937 object_stack.Add(obj);
3938 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
3939 (!search_for_any_global && (obj == search_target))) {
3940 found_target = true;
3941 return;
3942 }
3943
3944 // not visited yet
3945 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
3946
3947 Address map_addr = map_p->address();
3948
3949 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
3950
3951 MarkObjectRecursively(&map);
3952
3953 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
3954 &mark_visitor);
3955
3956 if (!found_target) // don't pop if found the target
3957 object_stack.RemoveLast();
3958 }
3959
3960
3961 static void UnmarkObjectRecursively(Object** p);
3962 class UnmarkObjectVisitor : public ObjectVisitor {
3963 public:
VisitPointers(Object ** start,Object ** end)3964 void VisitPointers(Object** start, Object** end) {
3965 // Copy all HeapObject pointers in [start, end)
3966 for (Object** p = start; p < end; p++) {
3967 if ((*p)->IsHeapObject())
3968 UnmarkObjectRecursively(p);
3969 }
3970 }
3971 };
3972
3973 static UnmarkObjectVisitor unmark_visitor;
3974
UnmarkObjectRecursively(Object ** p)3975 static void UnmarkObjectRecursively(Object** p) {
3976 if (!(*p)->IsHeapObject()) return;
3977
3978 HeapObject* obj = HeapObject::cast(*p);
3979
3980 Object* map = obj->map();
3981
3982 if (map->IsHeapObject()) return; // unmarked already
3983
3984 Address map_addr = reinterpret_cast<Address>(map);
3985
3986 map_addr -= kMarkTag;
3987
3988 ASSERT_TAG_ALIGNED(map_addr);
3989
3990 HeapObject* map_p = HeapObject::FromAddress(map_addr);
3991
3992 obj->set_map(reinterpret_cast<Map*>(map_p));
3993
3994 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
3995
3996 obj->IterateBody(Map::cast(map_p)->instance_type(),
3997 obj->SizeFromMap(Map::cast(map_p)),
3998 &unmark_visitor);
3999 }
4000
4001
MarkRootObjectRecursively(Object ** root)4002 static void MarkRootObjectRecursively(Object** root) {
4003 if (search_for_any_global) {
4004 ASSERT(search_target == NULL);
4005 } else {
4006 ASSERT(search_target->IsHeapObject());
4007 }
4008 found_target = false;
4009 object_stack.Clear();
4010
4011 MarkObjectRecursively(root);
4012 UnmarkObjectRecursively(root);
4013
4014 if (found_target) {
4015 PrintF("=====================================\n");
4016 PrintF("==== Path to object ====\n");
4017 PrintF("=====================================\n\n");
4018
4019 ASSERT(!object_stack.is_empty());
4020 for (int i = 0; i < object_stack.length(); i++) {
4021 if (i > 0) PrintF("\n |\n |\n V\n\n");
4022 Object* obj = object_stack[i];
4023 obj->Print();
4024 }
4025 PrintF("=====================================\n");
4026 }
4027 }
4028
4029
4030 // Helper class for visiting HeapObjects recursively.
4031 class MarkRootVisitor: public ObjectVisitor {
4032 public:
VisitPointers(Object ** start,Object ** end)4033 void VisitPointers(Object** start, Object** end) {
4034 // Visit all HeapObject pointers in [start, end)
4035 for (Object** p = start; p < end; p++) {
4036 if ((*p)->IsHeapObject())
4037 MarkRootObjectRecursively(p);
4038 }
4039 }
4040 };
4041
4042
4043 // Triggers a depth-first traversal of reachable objects from roots
4044 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)4045 void Heap::TracePathToObject(Object* target) {
4046 search_target = target;
4047 search_for_any_global = false;
4048
4049 MarkRootVisitor root_visitor;
4050 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
4051 }
4052
4053
4054 // Triggers a depth-first traversal of reachable objects from roots
4055 // and finds a path to any global object and prints it. Useful for
4056 // determining the source for leaks of global objects.
TracePathToGlobal()4057 void Heap::TracePathToGlobal() {
4058 search_target = NULL;
4059 search_for_any_global = true;
4060
4061 MarkRootVisitor root_visitor;
4062 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
4063 }
4064 #endif
4065
4066
GCTracer()4067 GCTracer::GCTracer()
4068 : start_time_(0.0),
4069 start_size_(0.0),
4070 gc_count_(0),
4071 full_gc_count_(0),
4072 is_compacting_(false),
4073 marked_count_(0) {
4074 // These two fields reflect the state of the previous full collection.
4075 // Set them before they are changed by the collector.
4076 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
4077 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
4078 if (!FLAG_trace_gc) return;
4079 start_time_ = OS::TimeCurrentMillis();
4080 start_size_ = SizeOfHeapObjects();
4081 }
4082
4083
~GCTracer()4084 GCTracer::~GCTracer() {
4085 if (!FLAG_trace_gc) return;
4086 // Printf ONE line iff flag is set.
4087 PrintF("%s %.1f -> %.1f MB, %d ms.\n",
4088 CollectorString(),
4089 start_size_, SizeOfHeapObjects(),
4090 static_cast<int>(OS::TimeCurrentMillis() - start_time_));
4091
4092 #if defined(ENABLE_LOGGING_AND_PROFILING)
4093 Heap::PrintShortHeapStatistics();
4094 #endif
4095 }
4096
4097
CollectorString()4098 const char* GCTracer::CollectorString() {
4099 switch (collector_) {
4100 case SCAVENGER:
4101 return "Scavenge";
4102 case MARK_COMPACTOR:
4103 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
4104 : "Mark-sweep";
4105 }
4106 return "Unknown GC";
4107 }
4108
4109
Hash(Map * map,String * name)4110 int KeyedLookupCache::Hash(Map* map, String* name) {
4111 // Uses only lower 32 bits if pointers are larger.
4112 uintptr_t addr_hash =
4113 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
4114 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
4115 }
4116
4117
Lookup(Map * map,String * name)4118 int KeyedLookupCache::Lookup(Map* map, String* name) {
4119 int index = Hash(map, name);
4120 Key& key = keys_[index];
4121 if ((key.map == map) && key.name->Equals(name)) {
4122 return field_offsets_[index];
4123 }
4124 return -1;
4125 }
4126
4127
Update(Map * map,String * name,int field_offset)4128 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
4129 String* symbol;
4130 if (Heap::LookupSymbolIfExists(name, &symbol)) {
4131 int index = Hash(map, symbol);
4132 Key& key = keys_[index];
4133 key.map = map;
4134 key.name = symbol;
4135 field_offsets_[index] = field_offset;
4136 }
4137 }
4138
4139
Clear()4140 void KeyedLookupCache::Clear() {
4141 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
4142 }
4143
4144
4145 KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
4146
4147
4148 int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
4149
4150
Clear()4151 void DescriptorLookupCache::Clear() {
4152 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
4153 }
4154
4155
4156 DescriptorLookupCache::Key
4157 DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
4158
4159 int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
4160
4161
4162 #ifdef DEBUG
GarbageCollectionGreedyCheck()4163 bool Heap::GarbageCollectionGreedyCheck() {
4164 ASSERT(FLAG_gc_greedy);
4165 if (Bootstrapper::IsActive()) return true;
4166 if (disallow_allocation_failure()) return true;
4167 return CollectGarbage(0, NEW_SPACE);
4168 }
4169 #endif
4170
4171
TranscendentalCache(TranscendentalCache::Type t)4172 TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
4173 : type_(t) {
4174 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
4175 uint32_t in1 = 0xffffffffu; // generated by the FPU.
4176 for (int i = 0; i < kCacheSize; i++) {
4177 elements_[i].in[0] = in0;
4178 elements_[i].in[1] = in1;
4179 elements_[i].output = NULL;
4180 }
4181 }
4182
4183
4184 TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
4185
4186
Clear()4187 void TranscendentalCache::Clear() {
4188 for (int i = 0; i < kNumberOfCaches; i++) {
4189 if (caches_[i] != NULL) {
4190 delete caches_[i];
4191 caches_[i] = NULL;
4192 }
4193 }
4194 }
4195
4196
CleanUp()4197 void ExternalStringTable::CleanUp() {
4198 int last = 0;
4199 for (int i = 0; i < new_space_strings_.length(); ++i) {
4200 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4201 if (Heap::InNewSpace(new_space_strings_[i])) {
4202 new_space_strings_[last++] = new_space_strings_[i];
4203 } else {
4204 old_space_strings_.Add(new_space_strings_[i]);
4205 }
4206 }
4207 new_space_strings_.Rewind(last);
4208 last = 0;
4209 for (int i = 0; i < old_space_strings_.length(); ++i) {
4210 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
4211 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
4212 old_space_strings_[last++] = old_space_strings_[i];
4213 }
4214 old_space_strings_.Rewind(last);
4215 Verify();
4216 }
4217
4218
TearDown()4219 void ExternalStringTable::TearDown() {
4220 new_space_strings_.Free();
4221 old_space_strings_.Free();
4222 }
4223
4224
4225 List<Object*> ExternalStringTable::new_space_strings_;
4226 List<Object*> ExternalStringTable::old_space_strings_;
4227
4228 } } // namespace v8::internal
4229