1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "debug.h"
36 #include "heap-profiler.h"
37 #include "global-handles.h"
38 #include "liveobjectlist-inl.h"
39 #include "mark-compact.h"
40 #include "natives.h"
41 #include "objects-visiting.h"
42 #include "runtime-profiler.h"
43 #include "scanner-base.h"
44 #include "scopeinfo.h"
45 #include "snapshot.h"
46 #include "v8threads.h"
47 #include "vm-state-inl.h"
48 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
49 #include "regexp-macro-assembler.h"
50 #include "arm/regexp-macro-assembler-arm.h"
51 #endif
52 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h"
54 #include "mips/regexp-macro-assembler-mips.h"
55 #endif
56
57 namespace v8 {
58 namespace internal {
59
60
61 static const intptr_t kMinimumPromotionLimit = 2 * MB;
62 static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
64
65 static Mutex* gc_initializer_mutex = OS::CreateMutex();
66
67
Heap()68 Heap::Heap()
69 : isolate_(NULL),
70 // semispace_size_ should be a power of 2 and old_generation_size_ should be
71 // a multiple of Page::kPageSize.
72 #if defined(ANDROID)
73 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
79 #elif defined(V8_TARGET_ARCH_X64)
80 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1*GB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
86 #else
87 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(512*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0),
93 #endif
94 // Variables set based on semispace_size_ and old_generation_size_ in
95 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
96 // Will be 4 * reserved_semispace_size_ to ensure that young
97 // generation can be aligned to its size.
98 survived_since_last_expansion_(0),
99 always_allocate_scope_depth_(0),
100 linear_allocation_scope_depth_(0),
101 contexts_disposed_(0),
102 new_space_(this),
103 old_pointer_space_(NULL),
104 old_data_space_(NULL),
105 code_space_(NULL),
106 map_space_(NULL),
107 cell_space_(NULL),
108 lo_space_(NULL),
109 gc_state_(NOT_IN_GC),
110 mc_count_(0),
111 ms_count_(0),
112 gc_count_(0),
113 unflattened_strings_length_(0),
114 #ifdef DEBUG
115 allocation_allowed_(true),
116 allocation_timeout_(0),
117 disallow_allocation_failure_(false),
118 debug_utils_(NULL),
119 #endif // DEBUG
120 old_gen_promotion_limit_(kMinimumPromotionLimit),
121 old_gen_allocation_limit_(kMinimumAllocationLimit),
122 external_allocation_limit_(0),
123 amount_of_external_allocated_memory_(0),
124 amount_of_external_allocated_memory_at_last_global_gc_(0),
125 old_gen_exhausted_(false),
126 hidden_symbol_(NULL),
127 global_gc_prologue_callback_(NULL),
128 global_gc_epilogue_callback_(NULL),
129 gc_safe_size_of_old_object_(NULL),
130 total_regexp_code_generated_(0),
131 tracer_(NULL),
132 young_survivors_after_last_gc_(0),
133 high_survival_rate_period_length_(0),
134 survival_rate_(0),
135 previous_survival_rate_trend_(Heap::STABLE),
136 survival_rate_trend_(Heap::STABLE),
137 max_gc_pause_(0),
138 max_alive_after_gc_(0),
139 min_in_mutator_(kMaxInt),
140 alive_after_last_gc_(0),
141 last_gc_end_timestamp_(0.0),
142 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
143 number_idle_notifications_(0),
144 last_idle_notification_gc_count_(0),
145 last_idle_notification_gc_count_init_(false),
146 configured_(false),
147 is_safe_to_read_maps_(true) {
148 // Allow build-time customization of the max semispace size. Building
149 // V8 with snapshots and a non-default max semispace size is much
150 // easier if you can define it as part of the build environment.
151 #if defined(V8_MAX_SEMISPACE_SIZE)
152 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
153 #endif
154
155 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
156 global_contexts_list_ = NULL;
157 mark_compact_collector_.heap_ = this;
158 external_string_table_.heap_ = this;
159 }
160
161
Capacity()162 intptr_t Heap::Capacity() {
163 if (!HasBeenSetup()) return 0;
164
165 return new_space_.Capacity() +
166 old_pointer_space_->Capacity() +
167 old_data_space_->Capacity() +
168 code_space_->Capacity() +
169 map_space_->Capacity() +
170 cell_space_->Capacity();
171 }
172
173
CommittedMemory()174 intptr_t Heap::CommittedMemory() {
175 if (!HasBeenSetup()) return 0;
176
177 return new_space_.CommittedMemory() +
178 old_pointer_space_->CommittedMemory() +
179 old_data_space_->CommittedMemory() +
180 code_space_->CommittedMemory() +
181 map_space_->CommittedMemory() +
182 cell_space_->CommittedMemory() +
183 lo_space_->Size();
184 }
185
CommittedMemoryExecutable()186 intptr_t Heap::CommittedMemoryExecutable() {
187 if (!HasBeenSetup()) return 0;
188
189 return isolate()->memory_allocator()->SizeExecutable();
190 }
191
192
Available()193 intptr_t Heap::Available() {
194 if (!HasBeenSetup()) return 0;
195
196 return new_space_.Available() +
197 old_pointer_space_->Available() +
198 old_data_space_->Available() +
199 code_space_->Available() +
200 map_space_->Available() +
201 cell_space_->Available();
202 }
203
204
HasBeenSetup()205 bool Heap::HasBeenSetup() {
206 return old_pointer_space_ != NULL &&
207 old_data_space_ != NULL &&
208 code_space_ != NULL &&
209 map_space_ != NULL &&
210 cell_space_ != NULL &&
211 lo_space_ != NULL;
212 }
213
214
GcSafeSizeOfOldObject(HeapObject * object)215 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
216 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
217 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
218 MapWord map_word = object->map_word();
219 map_word.ClearMark();
220 map_word.ClearOverflow();
221 return object->SizeFromMap(map_word.ToMap());
222 }
223
224
GcSafeSizeOfOldObjectWithEncodedMap(HeapObject * object)225 int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
226 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
227 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
228 uint32_t marker = Memory::uint32_at(object->address());
229 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
230 return kIntSize;
231 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
232 return Memory::int_at(object->address() + kIntSize);
233 } else {
234 MapWord map_word = object->map_word();
235 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
236 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
237 return object->SizeFromMap(map);
238 }
239 }
240
241
SelectGarbageCollector(AllocationSpace space)242 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
243 // Is global GC requested?
244 if (space != NEW_SPACE || FLAG_gc_global) {
245 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
246 return MARK_COMPACTOR;
247 }
248
249 // Is enough data promoted to justify a global GC?
250 if (OldGenerationPromotionLimitReached()) {
251 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
252 return MARK_COMPACTOR;
253 }
254
255 // Have allocation in OLD and LO failed?
256 if (old_gen_exhausted_) {
257 isolate_->counters()->
258 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
259 return MARK_COMPACTOR;
260 }
261
262 // Is there enough space left in OLD to guarantee that a scavenge can
263 // succeed?
264 //
265 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
266 // for object promotion. It counts only the bytes that the memory
267 // allocator has not yet allocated from the OS and assigned to any space,
268 // and does not count available bytes already in the old space or code
269 // space. Undercounting is safe---we may get an unrequested full GC when
270 // a scavenge would have succeeded.
271 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
272 isolate_->counters()->
273 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
274 return MARK_COMPACTOR;
275 }
276
277 // Default
278 return SCAVENGER;
279 }
280
281
282 // TODO(1238405): Combine the infrastructure for --heap-stats and
283 // --log-gc to avoid the complicated preprocessor and flag testing.
284 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsBeforeGC()285 void Heap::ReportStatisticsBeforeGC() {
286 // Heap::ReportHeapStatistics will also log NewSpace statistics when
287 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
288 // following logic is used to avoid double logging.
289 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
290 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
291 if (FLAG_heap_stats) {
292 ReportHeapStatistics("Before GC");
293 } else if (FLAG_log_gc) {
294 new_space_.ReportStatistics();
295 }
296 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
297 #elif defined(DEBUG)
298 if (FLAG_heap_stats) {
299 new_space_.CollectStatistics();
300 ReportHeapStatistics("Before GC");
301 new_space_.ClearHistograms();
302 }
303 #elif defined(ENABLE_LOGGING_AND_PROFILING)
304 if (FLAG_log_gc) {
305 new_space_.CollectStatistics();
306 new_space_.ReportStatistics();
307 new_space_.ClearHistograms();
308 }
309 #endif
310 }
311
312
313 #if defined(ENABLE_LOGGING_AND_PROFILING)
PrintShortHeapStatistics()314 void Heap::PrintShortHeapStatistics() {
315 if (!FLAG_trace_gc_verbose) return;
316 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
317 ", available: %8" V8_PTR_PREFIX "d\n",
318 isolate_->memory_allocator()->Size(),
319 isolate_->memory_allocator()->Available());
320 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
321 ", available: %8" V8_PTR_PREFIX "d\n",
322 Heap::new_space_.Size(),
323 new_space_.Available());
324 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
325 ", available: %8" V8_PTR_PREFIX "d"
326 ", waste: %8" V8_PTR_PREFIX "d\n",
327 old_pointer_space_->Size(),
328 old_pointer_space_->Available(),
329 old_pointer_space_->Waste());
330 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
331 ", available: %8" V8_PTR_PREFIX "d"
332 ", waste: %8" V8_PTR_PREFIX "d\n",
333 old_data_space_->Size(),
334 old_data_space_->Available(),
335 old_data_space_->Waste());
336 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
337 ", available: %8" V8_PTR_PREFIX "d"
338 ", waste: %8" V8_PTR_PREFIX "d\n",
339 code_space_->Size(),
340 code_space_->Available(),
341 code_space_->Waste());
342 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
343 ", available: %8" V8_PTR_PREFIX "d"
344 ", waste: %8" V8_PTR_PREFIX "d\n",
345 map_space_->Size(),
346 map_space_->Available(),
347 map_space_->Waste());
348 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
349 ", available: %8" V8_PTR_PREFIX "d"
350 ", waste: %8" V8_PTR_PREFIX "d\n",
351 cell_space_->Size(),
352 cell_space_->Available(),
353 cell_space_->Waste());
354 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
355 ", available: %8" V8_PTR_PREFIX "d\n",
356 lo_space_->Size(),
357 lo_space_->Available());
358 }
359 #endif
360
361
362 // TODO(1238405): Combine the infrastructure for --heap-stats and
363 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()364 void Heap::ReportStatisticsAfterGC() {
365 // Similar to the before GC, we use some complicated logic to ensure that
366 // NewSpace statistics are logged exactly once when --log-gc is turned on.
367 #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
368 if (FLAG_heap_stats) {
369 new_space_.CollectStatistics();
370 ReportHeapStatistics("After GC");
371 } else if (FLAG_log_gc) {
372 new_space_.ReportStatistics();
373 }
374 #elif defined(DEBUG)
375 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
376 #elif defined(ENABLE_LOGGING_AND_PROFILING)
377 if (FLAG_log_gc) new_space_.ReportStatistics();
378 #endif
379 }
380 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
381
382
GarbageCollectionPrologue()383 void Heap::GarbageCollectionPrologue() {
384 isolate_->transcendental_cache()->Clear();
385 ClearJSFunctionResultCaches();
386 gc_count_++;
387 unflattened_strings_length_ = 0;
388 #ifdef DEBUG
389 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
390 allow_allocation(false);
391
392 if (FLAG_verify_heap) {
393 Verify();
394 }
395
396 if (FLAG_gc_verbose) Print();
397 #endif
398
399 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
400 ReportStatisticsBeforeGC();
401 #endif
402
403 LiveObjectList::GCPrologue();
404 }
405
SizeOfObjects()406 intptr_t Heap::SizeOfObjects() {
407 intptr_t total = 0;
408 AllSpaces spaces;
409 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
410 total += space->SizeOfObjects();
411 }
412 return total;
413 }
414
GarbageCollectionEpilogue()415 void Heap::GarbageCollectionEpilogue() {
416 LiveObjectList::GCEpilogue();
417 #ifdef DEBUG
418 allow_allocation(true);
419 ZapFromSpace();
420
421 if (FLAG_verify_heap) {
422 Verify();
423 }
424
425 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
426 if (FLAG_print_handles) PrintHandles();
427 if (FLAG_gc_verbose) Print();
428 if (FLAG_code_stats) ReportCodeStatistics("After GC");
429 #endif
430
431 isolate_->counters()->alive_after_last_gc()->Set(
432 static_cast<int>(SizeOfObjects()));
433
434 isolate_->counters()->symbol_table_capacity()->Set(
435 symbol_table()->Capacity());
436 isolate_->counters()->number_of_symbols()->Set(
437 symbol_table()->NumberOfElements());
438 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
439 ReportStatisticsAfterGC();
440 #endif
441 #ifdef ENABLE_DEBUGGER_SUPPORT
442 isolate_->debug()->AfterGarbageCollection();
443 #endif
444 }
445
446
CollectAllGarbage(bool force_compaction)447 void Heap::CollectAllGarbage(bool force_compaction) {
448 // Since we are ignoring the return value, the exact choice of space does
449 // not matter, so long as we do not specify NEW_SPACE, which would not
450 // cause a full GC.
451 mark_compact_collector_.SetForceCompaction(force_compaction);
452 CollectGarbage(OLD_POINTER_SPACE);
453 mark_compact_collector_.SetForceCompaction(false);
454 }
455
456
CollectAllAvailableGarbage()457 void Heap::CollectAllAvailableGarbage() {
458 // Since we are ignoring the return value, the exact choice of space does
459 // not matter, so long as we do not specify NEW_SPACE, which would not
460 // cause a full GC.
461 mark_compact_collector()->SetForceCompaction(true);
462
463 // Major GC would invoke weak handle callbacks on weakly reachable
464 // handles, but won't collect weakly reachable objects until next
465 // major GC. Therefore if we collect aggressively and weak handle callback
466 // has been invoked, we rerun major GC to release objects which become
467 // garbage.
468 // Note: as weak callbacks can execute arbitrary code, we cannot
469 // hope that eventually there will be no weak callbacks invocations.
470 // Therefore stop recollecting after several attempts.
471 const int kMaxNumberOfAttempts = 7;
472 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
473 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
474 break;
475 }
476 }
477 mark_compact_collector()->SetForceCompaction(false);
478 }
479
480
CollectGarbage(AllocationSpace space,GarbageCollector collector)481 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
482 // The VM is in the GC state until exiting this function.
483 VMState state(isolate_, GC);
484
485 #ifdef DEBUG
486 // Reset the allocation timeout to the GC interval, but make sure to
487 // allow at least a few allocations after a collection. The reason
488 // for this is that we have a lot of allocation sequences and we
489 // assume that a garbage collection will allow the subsequent
490 // allocation attempts to go through.
491 allocation_timeout_ = Max(6, FLAG_gc_interval);
492 #endif
493
494 bool next_gc_likely_to_collect_more = false;
495
496 { GCTracer tracer(this);
497 GarbageCollectionPrologue();
498 // The GC count was incremented in the prologue. Tell the tracer about
499 // it.
500 tracer.set_gc_count(gc_count_);
501
502 // Tell the tracer which collector we've selected.
503 tracer.set_collector(collector);
504
505 HistogramTimer* rate = (collector == SCAVENGER)
506 ? isolate_->counters()->gc_scavenger()
507 : isolate_->counters()->gc_compactor();
508 rate->Start();
509 next_gc_likely_to_collect_more =
510 PerformGarbageCollection(collector, &tracer);
511 rate->Stop();
512
513 GarbageCollectionEpilogue();
514 }
515
516
517 #ifdef ENABLE_LOGGING_AND_PROFILING
518 if (FLAG_log_gc) HeapProfiler::WriteSample();
519 #endif
520
521 return next_gc_likely_to_collect_more;
522 }
523
524
PerformScavenge()525 void Heap::PerformScavenge() {
526 GCTracer tracer(this);
527 PerformGarbageCollection(SCAVENGER, &tracer);
528 }
529
530
531 #ifdef DEBUG
532 // Helper class for verifying the symbol table.
533 class SymbolTableVerifier : public ObjectVisitor {
534 public:
VisitPointers(Object ** start,Object ** end)535 void VisitPointers(Object** start, Object** end) {
536 // Visit all HeapObject pointers in [start, end).
537 for (Object** p = start; p < end; p++) {
538 if ((*p)->IsHeapObject()) {
539 // Check that the symbol is actually a symbol.
540 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
541 }
542 }
543 }
544 };
545 #endif // DEBUG
546
547
VerifySymbolTable()548 static void VerifySymbolTable() {
549 #ifdef DEBUG
550 SymbolTableVerifier verifier;
551 HEAP->symbol_table()->IterateElements(&verifier);
552 #endif // DEBUG
553 }
554
555
ReserveSpace(int new_space_size,int pointer_space_size,int data_space_size,int code_space_size,int map_space_size,int cell_space_size,int large_object_size)556 void Heap::ReserveSpace(
557 int new_space_size,
558 int pointer_space_size,
559 int data_space_size,
560 int code_space_size,
561 int map_space_size,
562 int cell_space_size,
563 int large_object_size) {
564 NewSpace* new_space = Heap::new_space();
565 PagedSpace* old_pointer_space = Heap::old_pointer_space();
566 PagedSpace* old_data_space = Heap::old_data_space();
567 PagedSpace* code_space = Heap::code_space();
568 PagedSpace* map_space = Heap::map_space();
569 PagedSpace* cell_space = Heap::cell_space();
570 LargeObjectSpace* lo_space = Heap::lo_space();
571 bool gc_performed = true;
572 while (gc_performed) {
573 gc_performed = false;
574 if (!new_space->ReserveSpace(new_space_size)) {
575 Heap::CollectGarbage(NEW_SPACE);
576 gc_performed = true;
577 }
578 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
579 Heap::CollectGarbage(OLD_POINTER_SPACE);
580 gc_performed = true;
581 }
582 if (!(old_data_space->ReserveSpace(data_space_size))) {
583 Heap::CollectGarbage(OLD_DATA_SPACE);
584 gc_performed = true;
585 }
586 if (!(code_space->ReserveSpace(code_space_size))) {
587 Heap::CollectGarbage(CODE_SPACE);
588 gc_performed = true;
589 }
590 if (!(map_space->ReserveSpace(map_space_size))) {
591 Heap::CollectGarbage(MAP_SPACE);
592 gc_performed = true;
593 }
594 if (!(cell_space->ReserveSpace(cell_space_size))) {
595 Heap::CollectGarbage(CELL_SPACE);
596 gc_performed = true;
597 }
598 // We add a slack-factor of 2 in order to have space for a series of
599 // large-object allocations that are only just larger than the page size.
600 large_object_size *= 2;
601 // The ReserveSpace method on the large object space checks how much
602 // we can expand the old generation. This includes expansion caused by
603 // allocation in the other spaces.
604 large_object_size += cell_space_size + map_space_size + code_space_size +
605 data_space_size + pointer_space_size;
606 if (!(lo_space->ReserveSpace(large_object_size))) {
607 Heap::CollectGarbage(LO_SPACE);
608 gc_performed = true;
609 }
610 }
611 }
612
613
EnsureFromSpaceIsCommitted()614 void Heap::EnsureFromSpaceIsCommitted() {
615 if (new_space_.CommitFromSpaceIfNeeded()) return;
616
617 // Committing memory to from space failed.
618 // Try shrinking and try again.
619 PagedSpaces spaces;
620 for (PagedSpace* space = spaces.next();
621 space != NULL;
622 space = spaces.next()) {
623 space->RelinkPageListInChunkOrder(true);
624 }
625
626 Shrink();
627 if (new_space_.CommitFromSpaceIfNeeded()) return;
628
629 // Committing memory to from space failed again.
630 // Memory is exhausted and we will die.
631 V8::FatalProcessOutOfMemory("Committing semi space failed.");
632 }
633
634
ClearJSFunctionResultCaches()635 void Heap::ClearJSFunctionResultCaches() {
636 if (isolate_->bootstrapper()->IsActive()) return;
637
638 Object* context = global_contexts_list_;
639 while (!context->IsUndefined()) {
640 // Get the caches for this context:
641 FixedArray* caches =
642 Context::cast(context)->jsfunction_result_caches();
643 // Clear the caches:
644 int length = caches->length();
645 for (int i = 0; i < length; i++) {
646 JSFunctionResultCache::cast(caches->get(i))->Clear();
647 }
648 // Get the next context:
649 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
650 }
651 }
652
653
654
ClearNormalizedMapCaches()655 void Heap::ClearNormalizedMapCaches() {
656 if (isolate_->bootstrapper()->IsActive()) return;
657
658 Object* context = global_contexts_list_;
659 while (!context->IsUndefined()) {
660 Context::cast(context)->normalized_map_cache()->Clear();
661 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
662 }
663 }
664
665
666 #ifdef DEBUG
667
668 enum PageWatermarkValidity {
669 ALL_VALID,
670 ALL_INVALID
671 };
672
VerifyPageWatermarkValidity(PagedSpace * space,PageWatermarkValidity validity)673 static void VerifyPageWatermarkValidity(PagedSpace* space,
674 PageWatermarkValidity validity) {
675 PageIterator it(space, PageIterator::PAGES_IN_USE);
676 bool expected_value = (validity == ALL_VALID);
677 while (it.has_next()) {
678 Page* page = it.next();
679 ASSERT(page->IsWatermarkValid() == expected_value);
680 }
681 }
682 #endif
683
UpdateSurvivalRateTrend(int start_new_space_size)684 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
685 double survival_rate =
686 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
687 start_new_space_size;
688
689 if (survival_rate > kYoungSurvivalRateThreshold) {
690 high_survival_rate_period_length_++;
691 } else {
692 high_survival_rate_period_length_ = 0;
693 }
694
695 double survival_rate_diff = survival_rate_ - survival_rate;
696
697 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
698 set_survival_rate_trend(DECREASING);
699 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
700 set_survival_rate_trend(INCREASING);
701 } else {
702 set_survival_rate_trend(STABLE);
703 }
704
705 survival_rate_ = survival_rate;
706 }
707
PerformGarbageCollection(GarbageCollector collector,GCTracer * tracer)708 bool Heap::PerformGarbageCollection(GarbageCollector collector,
709 GCTracer* tracer) {
710 bool next_gc_likely_to_collect_more = false;
711
712 if (collector != SCAVENGER) {
713 PROFILE(isolate_, CodeMovingGCEvent());
714 }
715
716 VerifySymbolTable();
717 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
718 ASSERT(!allocation_allowed_);
719 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
720 global_gc_prologue_callback_();
721 }
722
723 GCType gc_type =
724 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
725
726 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
727 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
728 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
729 }
730 }
731
732 EnsureFromSpaceIsCommitted();
733
734 int start_new_space_size = Heap::new_space()->SizeAsInt();
735
736 if (collector == MARK_COMPACTOR) {
737 // Perform mark-sweep with optional compaction.
738 MarkCompact(tracer);
739
740 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
741 IsStableOrIncreasingSurvivalTrend();
742
743 UpdateSurvivalRateTrend(start_new_space_size);
744
745 intptr_t old_gen_size = PromotedSpaceSize();
746 old_gen_promotion_limit_ =
747 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
748 old_gen_allocation_limit_ =
749 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
750
751 if (high_survival_rate_during_scavenges &&
752 IsStableOrIncreasingSurvivalTrend()) {
753 // Stable high survival rates of young objects both during partial and
754 // full collection indicate that mutator is either building or modifying
755 // a structure with a long lifetime.
756 // In this case we aggressively raise old generation memory limits to
757 // postpone subsequent mark-sweep collection and thus trade memory
758 // space for the mutation speed.
759 old_gen_promotion_limit_ *= 2;
760 old_gen_allocation_limit_ *= 2;
761 }
762
763 old_gen_exhausted_ = false;
764 } else {
765 tracer_ = tracer;
766 Scavenge();
767 tracer_ = NULL;
768
769 UpdateSurvivalRateTrend(start_new_space_size);
770 }
771
772 isolate_->counters()->objs_since_last_young()->Set(0);
773
774 if (collector == MARK_COMPACTOR) {
775 DisableAssertNoAllocation allow_allocation;
776 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
777 next_gc_likely_to_collect_more =
778 isolate_->global_handles()->PostGarbageCollectionProcessing();
779 }
780
781 // Update relocatables.
782 Relocatable::PostGarbageCollectionProcessing();
783
784 if (collector == MARK_COMPACTOR) {
785 // Register the amount of external allocated memory.
786 amount_of_external_allocated_memory_at_last_global_gc_ =
787 amount_of_external_allocated_memory_;
788 }
789
790 GCCallbackFlags callback_flags = tracer->is_compacting()
791 ? kGCCallbackFlagCompacted
792 : kNoGCCallbackFlags;
793 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
794 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
795 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
796 }
797 }
798
799 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
800 ASSERT(!allocation_allowed_);
801 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
802 global_gc_epilogue_callback_();
803 }
804 VerifySymbolTable();
805
806 return next_gc_likely_to_collect_more;
807 }
808
809
MarkCompact(GCTracer * tracer)810 void Heap::MarkCompact(GCTracer* tracer) {
811 gc_state_ = MARK_COMPACT;
812 LOG(isolate_, ResourceEvent("markcompact", "begin"));
813
814 mark_compact_collector_.Prepare(tracer);
815
816 bool is_compacting = mark_compact_collector_.IsCompacting();
817
818 if (is_compacting) {
819 mc_count_++;
820 } else {
821 ms_count_++;
822 }
823 tracer->set_full_gc_count(mc_count_ + ms_count_);
824
825 MarkCompactPrologue(is_compacting);
826
827 is_safe_to_read_maps_ = false;
828 mark_compact_collector_.CollectGarbage();
829 is_safe_to_read_maps_ = true;
830
831 LOG(isolate_, ResourceEvent("markcompact", "end"));
832
833 gc_state_ = NOT_IN_GC;
834
835 Shrink();
836
837 isolate_->counters()->objs_since_last_full()->Set(0);
838
839 contexts_disposed_ = 0;
840 }
841
842
MarkCompactPrologue(bool is_compacting)843 void Heap::MarkCompactPrologue(bool is_compacting) {
844 // At any old GC clear the keyed lookup cache to enable collection of unused
845 // maps.
846 isolate_->keyed_lookup_cache()->Clear();
847 isolate_->context_slot_cache()->Clear();
848 isolate_->descriptor_lookup_cache()->Clear();
849
850 isolate_->compilation_cache()->MarkCompactPrologue();
851
852 CompletelyClearInstanceofCache();
853
854 if (is_compacting) FlushNumberStringCache();
855
856 ClearNormalizedMapCaches();
857 }
858
859
FindCodeObject(Address a)860 Object* Heap::FindCodeObject(Address a) {
861 Object* obj = NULL; // Initialization to please compiler.
862 { MaybeObject* maybe_obj = code_space_->FindObject(a);
863 if (!maybe_obj->ToObject(&obj)) {
864 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
865 }
866 }
867 return obj;
868 }
869
870
871 // Helper class for copying HeapObjects
872 class ScavengeVisitor: public ObjectVisitor {
873 public:
ScavengeVisitor(Heap * heap)874 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
875
VisitPointer(Object ** p)876 void VisitPointer(Object** p) { ScavengePointer(p); }
877
VisitPointers(Object ** start,Object ** end)878 void VisitPointers(Object** start, Object** end) {
879 // Copy all HeapObject pointers in [start, end)
880 for (Object** p = start; p < end; p++) ScavengePointer(p);
881 }
882
883 private:
ScavengePointer(Object ** p)884 void ScavengePointer(Object** p) {
885 Object* object = *p;
886 if (!heap_->InNewSpace(object)) return;
887 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
888 reinterpret_cast<HeapObject*>(object));
889 }
890
891 Heap* heap_;
892 };
893
894
895 #ifdef DEBUG
896 // Visitor class to verify pointers in code or data space do not point into
897 // new space.
898 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
899 public:
VisitPointers(Object ** start,Object ** end)900 void VisitPointers(Object** start, Object**end) {
901 for (Object** current = start; current < end; current++) {
902 if ((*current)->IsHeapObject()) {
903 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
904 }
905 }
906 }
907 };
908
909
VerifyNonPointerSpacePointers()910 static void VerifyNonPointerSpacePointers() {
911 // Verify that there are no pointers to new space in spaces where we
912 // do not expect them.
913 VerifyNonPointerSpacePointersVisitor v;
914 HeapObjectIterator code_it(HEAP->code_space());
915 for (HeapObject* object = code_it.next();
916 object != NULL; object = code_it.next())
917 object->Iterate(&v);
918
919 HeapObjectIterator data_it(HEAP->old_data_space());
920 for (HeapObject* object = data_it.next();
921 object != NULL; object = data_it.next())
922 object->Iterate(&v);
923 }
924 #endif
925
926
CheckNewSpaceExpansionCriteria()927 void Heap::CheckNewSpaceExpansionCriteria() {
928 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
929 survived_since_last_expansion_ > new_space_.Capacity()) {
930 // Grow the size of new space if there is room to grow and enough
931 // data has survived scavenge since the last expansion.
932 new_space_.Grow();
933 survived_since_last_expansion_ = 0;
934 }
935 }
936
937
Scavenge()938 void Heap::Scavenge() {
939 #ifdef DEBUG
940 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
941 #endif
942
943 gc_state_ = SCAVENGE;
944
945 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
946
947 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
948 #ifdef DEBUG
949 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
950 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
951 #endif
952
953 // We do not update an allocation watermark of the top page during linear
954 // allocation to avoid overhead. So to maintain the watermark invariant
955 // we have to manually cache the watermark and mark the top page as having an
956 // invalid watermark. This guarantees that dirty regions iteration will use a
957 // correct watermark even if a linear allocation happens.
958 old_pointer_space_->FlushTopPageWatermark();
959 map_space_->FlushTopPageWatermark();
960
961 // Implements Cheney's copying algorithm
962 LOG(isolate_, ResourceEvent("scavenge", "begin"));
963
964 // Clear descriptor cache.
965 isolate_->descriptor_lookup_cache()->Clear();
966
967 // Used for updating survived_since_last_expansion_ at function end.
968 intptr_t survived_watermark = PromotedSpaceSize();
969
970 CheckNewSpaceExpansionCriteria();
971
972 // Flip the semispaces. After flipping, to space is empty, from space has
973 // live objects.
974 new_space_.Flip();
975 new_space_.ResetAllocationInfo();
976
977 // We need to sweep newly copied objects which can be either in the
978 // to space or promoted to the old generation. For to-space
979 // objects, we treat the bottom of the to space as a queue. Newly
980 // copied and unswept objects lie between a 'front' mark and the
981 // allocation pointer.
982 //
983 // Promoted objects can go into various old-generation spaces, and
984 // can be allocated internally in the spaces (from the free list).
985 // We treat the top of the to space as a queue of addresses of
986 // promoted objects. The addresses of newly promoted and unswept
987 // objects lie between a 'front' mark and a 'rear' mark that is
988 // updated as a side effect of promoting an object.
989 //
990 // There is guaranteed to be enough room at the top of the to space
991 // for the addresses of promoted objects: every object promoted
992 // frees up its size in bytes from the top of the new space, and
993 // objects are at least one pointer in size.
994 Address new_space_front = new_space_.ToSpaceLow();
995 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
996
997 is_safe_to_read_maps_ = false;
998 ScavengeVisitor scavenge_visitor(this);
999 // Copy roots.
1000 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1001
1002 // Copy objects reachable from the old generation. By definition,
1003 // there are no intergenerational pointers in code or data spaces.
1004 IterateDirtyRegions(old_pointer_space_,
1005 &Heap::IteratePointersInDirtyRegion,
1006 &ScavengePointer,
1007 WATERMARK_CAN_BE_INVALID);
1008
1009 IterateDirtyRegions(map_space_,
1010 &IteratePointersInDirtyMapsRegion,
1011 &ScavengePointer,
1012 WATERMARK_CAN_BE_INVALID);
1013
1014 lo_space_->IterateDirtyRegions(&ScavengePointer);
1015
1016 // Copy objects reachable from cells by scavenging cell values directly.
1017 HeapObjectIterator cell_iterator(cell_space_);
1018 for (HeapObject* cell = cell_iterator.next();
1019 cell != NULL; cell = cell_iterator.next()) {
1020 if (cell->IsJSGlobalPropertyCell()) {
1021 Address value_address =
1022 reinterpret_cast<Address>(cell) +
1023 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1024 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1025 }
1026 }
1027
1028 // Scavenge object reachable from the global contexts list directly.
1029 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1030
1031 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1032
1033 UpdateNewSpaceReferencesInExternalStringTable(
1034 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1035
1036 LiveObjectList::UpdateReferencesForScavengeGC();
1037 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1038
1039 ASSERT(new_space_front == new_space_.top());
1040
1041 is_safe_to_read_maps_ = true;
1042
1043 // Set age mark.
1044 new_space_.set_age_mark(new_space_.top());
1045
1046 // Update how much has survived scavenge.
1047 IncrementYoungSurvivorsCounter(static_cast<int>(
1048 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
1049
1050 LOG(isolate_, ResourceEvent("scavenge", "end"));
1051
1052 gc_state_ = NOT_IN_GC;
1053 }
1054
1055
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1056 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1057 Object** p) {
1058 MapWord first_word = HeapObject::cast(*p)->map_word();
1059
1060 if (!first_word.IsForwardingAddress()) {
1061 // Unreachable external string can be finalized.
1062 heap->FinalizeExternalString(String::cast(*p));
1063 return NULL;
1064 }
1065
1066 // String is still reachable.
1067 return String::cast(first_word.ToForwardingAddress());
1068 }
1069
1070
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1071 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1072 ExternalStringTableUpdaterCallback updater_func) {
1073 external_string_table_.Verify();
1074
1075 if (external_string_table_.new_space_strings_.is_empty()) return;
1076
1077 Object** start = &external_string_table_.new_space_strings_[0];
1078 Object** end = start + external_string_table_.new_space_strings_.length();
1079 Object** last = start;
1080
1081 for (Object** p = start; p < end; ++p) {
1082 ASSERT(InFromSpace(*p));
1083 String* target = updater_func(this, p);
1084
1085 if (target == NULL) continue;
1086
1087 ASSERT(target->IsExternalString());
1088
1089 if (InNewSpace(target)) {
1090 // String is still in new space. Update the table entry.
1091 *last = target;
1092 ++last;
1093 } else {
1094 // String got promoted. Move it to the old string list.
1095 external_string_table_.AddOldString(target);
1096 }
1097 }
1098
1099 ASSERT(last <= end);
1100 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1101 }
1102
1103
ProcessFunctionWeakReferences(Heap * heap,Object * function,WeakObjectRetainer * retainer)1104 static Object* ProcessFunctionWeakReferences(Heap* heap,
1105 Object* function,
1106 WeakObjectRetainer* retainer) {
1107 Object* head = heap->undefined_value();
1108 JSFunction* tail = NULL;
1109 Object* candidate = function;
1110 while (candidate != heap->undefined_value()) {
1111 // Check whether to keep the candidate in the list.
1112 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1113 Object* retain = retainer->RetainAs(candidate);
1114 if (retain != NULL) {
1115 if (head == heap->undefined_value()) {
1116 // First element in the list.
1117 head = candidate_function;
1118 } else {
1119 // Subsequent elements in the list.
1120 ASSERT(tail != NULL);
1121 tail->set_next_function_link(candidate_function);
1122 }
1123 // Retained function is new tail.
1124 tail = candidate_function;
1125 }
1126 // Move to next element in the list.
1127 candidate = candidate_function->next_function_link();
1128 }
1129
1130 // Terminate the list if there is one or more elements.
1131 if (tail != NULL) {
1132 tail->set_next_function_link(heap->undefined_value());
1133 }
1134
1135 return head;
1136 }
1137
1138
ProcessWeakReferences(WeakObjectRetainer * retainer)1139 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1140 Object* head = undefined_value();
1141 Context* tail = NULL;
1142 Object* candidate = global_contexts_list_;
1143 while (candidate != undefined_value()) {
1144 // Check whether to keep the candidate in the list.
1145 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1146 Object* retain = retainer->RetainAs(candidate);
1147 if (retain != NULL) {
1148 if (head == undefined_value()) {
1149 // First element in the list.
1150 head = candidate_context;
1151 } else {
1152 // Subsequent elements in the list.
1153 ASSERT(tail != NULL);
1154 tail->set_unchecked(this,
1155 Context::NEXT_CONTEXT_LINK,
1156 candidate_context,
1157 UPDATE_WRITE_BARRIER);
1158 }
1159 // Retained context is new tail.
1160 tail = candidate_context;
1161
1162 // Process the weak list of optimized functions for the context.
1163 Object* function_list_head =
1164 ProcessFunctionWeakReferences(
1165 this,
1166 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1167 retainer);
1168 candidate_context->set_unchecked(this,
1169 Context::OPTIMIZED_FUNCTIONS_LIST,
1170 function_list_head,
1171 UPDATE_WRITE_BARRIER);
1172 }
1173 // Move to next element in the list.
1174 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1175 }
1176
1177 // Terminate the list if there is one or more elements.
1178 if (tail != NULL) {
1179 tail->set_unchecked(this,
1180 Context::NEXT_CONTEXT_LINK,
1181 Heap::undefined_value(),
1182 UPDATE_WRITE_BARRIER);
1183 }
1184
1185 // Update the head of the list of contexts.
1186 global_contexts_list_ = head;
1187 }
1188
1189
1190 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1191 public:
VisitPointer(Heap * heap,Object ** p)1192 static inline void VisitPointer(Heap* heap, Object** p) {
1193 Object* object = *p;
1194 if (!heap->InNewSpace(object)) return;
1195 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1196 reinterpret_cast<HeapObject*>(object));
1197 }
1198 };
1199
1200
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)1201 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1202 Address new_space_front) {
1203 do {
1204 ASSERT(new_space_front <= new_space_.top());
1205
1206 // The addresses new_space_front and new_space_.top() define a
1207 // queue of unprocessed copied objects. Process them until the
1208 // queue is empty.
1209 while (new_space_front < new_space_.top()) {
1210 HeapObject* object = HeapObject::FromAddress(new_space_front);
1211 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
1212 }
1213
1214 // Promote and process all the to-be-promoted objects.
1215 while (!promotion_queue_.is_empty()) {
1216 HeapObject* target;
1217 int size;
1218 promotion_queue_.remove(&target, &size);
1219
1220 // Promoted object might be already partially visited
1221 // during dirty regions iteration. Thus we search specificly
1222 // for pointers to from semispace instead of looking for pointers
1223 // to new space.
1224 ASSERT(!target->IsMap());
1225 IterateAndMarkPointersToFromSpace(target->address(),
1226 target->address() + size,
1227 &ScavengePointer);
1228 }
1229
1230 // Take another spin if there are now unswept objects in new space
1231 // (there are currently no more unswept promoted objects).
1232 } while (new_space_front < new_space_.top());
1233
1234 return new_space_front;
1235 }
1236
1237
1238 enum LoggingAndProfiling {
1239 LOGGING_AND_PROFILING_ENABLED,
1240 LOGGING_AND_PROFILING_DISABLED
1241 };
1242
1243
1244 typedef void (*ScavengingCallback)(Map* map,
1245 HeapObject** slot,
1246 HeapObject* object);
1247
1248
1249 static Atomic32 scavenging_visitors_table_mode_;
1250 static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1251
1252
1253 INLINE(static void DoScavengeObject(Map* map,
1254 HeapObject** slot,
1255 HeapObject* obj));
1256
1257
DoScavengeObject(Map * map,HeapObject ** slot,HeapObject * obj)1258 void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1259 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1260 }
1261
1262
1263 template<LoggingAndProfiling logging_and_profiling_mode>
1264 class ScavengingVisitor : public StaticVisitorBase {
1265 public:
Initialize()1266 static void Initialize() {
1267 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1268 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1269 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1270 table_.Register(kVisitByteArray, &EvacuateByteArray);
1271 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1272
1273 table_.Register(kVisitGlobalContext,
1274 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1275 template VisitSpecialized<Context::kSize>);
1276
1277 table_.Register(kVisitConsString,
1278 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1279 template VisitSpecialized<ConsString::kSize>);
1280
1281 table_.Register(kVisitSharedFunctionInfo,
1282 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1283 template VisitSpecialized<SharedFunctionInfo::kSize>);
1284
1285 table_.Register(kVisitJSFunction,
1286 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1287 template VisitSpecialized<JSFunction::kSize>);
1288
1289 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1290 kVisitDataObject,
1291 kVisitDataObjectGeneric>();
1292
1293 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1294 kVisitJSObject,
1295 kVisitJSObjectGeneric>();
1296
1297 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1298 kVisitStruct,
1299 kVisitStructGeneric>();
1300 }
1301
GetTable()1302 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1303 return &table_;
1304 }
1305
1306 private:
1307 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1308 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1309
1310 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
RecordCopiedObject(Heap * heap,HeapObject * obj)1311 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1312 bool should_record = false;
1313 #ifdef DEBUG
1314 should_record = FLAG_heap_stats;
1315 #endif
1316 #ifdef ENABLE_LOGGING_AND_PROFILING
1317 should_record = should_record || FLAG_log_gc;
1318 #endif
1319 if (should_record) {
1320 if (heap->new_space()->Contains(obj)) {
1321 heap->new_space()->RecordAllocation(obj);
1322 } else {
1323 heap->new_space()->RecordPromotion(obj);
1324 }
1325 }
1326 }
1327 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1328
1329 // Helper function used by CopyObject to copy a source object to an
1330 // allocated target object and update the forwarding pointer in the source
1331 // object. Returns the target object.
INLINE(static HeapObject * MigrateObject (Heap * heap,HeapObject * source,HeapObject * target,int size))1332 INLINE(static HeapObject* MigrateObject(Heap* heap,
1333 HeapObject* source,
1334 HeapObject* target,
1335 int size)) {
1336 // Copy the content of source to target.
1337 heap->CopyBlock(target->address(), source->address(), size);
1338
1339 // Set the forwarding address.
1340 source->set_map_word(MapWord::FromForwardingAddress(target));
1341
1342 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1343 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1344 // Update NewSpace stats if necessary.
1345 RecordCopiedObject(heap, target);
1346 #endif
1347 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
1348 #if defined(ENABLE_LOGGING_AND_PROFILING)
1349 Isolate* isolate = heap->isolate();
1350 if (isolate->logger()->is_logging() ||
1351 isolate->cpu_profiler()->is_profiling()) {
1352 if (target->IsSharedFunctionInfo()) {
1353 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1354 source->address(), target->address()));
1355 }
1356 }
1357 #endif
1358 }
1359
1360 return target;
1361 }
1362
1363
1364 template<ObjectContents object_contents, SizeRestriction size_restriction>
EvacuateObject(Map * map,HeapObject ** slot,HeapObject * object,int object_size)1365 static inline void EvacuateObject(Map* map,
1366 HeapObject** slot,
1367 HeapObject* object,
1368 int object_size) {
1369 ASSERT((size_restriction != SMALL) ||
1370 (object_size <= Page::kMaxHeapObjectSize));
1371 ASSERT(object->Size() == object_size);
1372
1373 Heap* heap = map->heap();
1374 if (heap->ShouldBePromoted(object->address(), object_size)) {
1375 MaybeObject* maybe_result;
1376
1377 if ((size_restriction != SMALL) &&
1378 (object_size > Page::kMaxHeapObjectSize)) {
1379 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
1380 } else {
1381 if (object_contents == DATA_OBJECT) {
1382 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
1383 } else {
1384 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
1385 }
1386 }
1387
1388 Object* result = NULL; // Initialization to please compiler.
1389 if (maybe_result->ToObject(&result)) {
1390 HeapObject* target = HeapObject::cast(result);
1391 *slot = MigrateObject(heap, object , target, object_size);
1392
1393 if (object_contents == POINTER_OBJECT) {
1394 heap->promotion_queue()->insert(target, object_size);
1395 }
1396
1397 heap->tracer()->increment_promoted_objects_size(object_size);
1398 return;
1399 }
1400 }
1401 Object* result =
1402 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1403 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
1404 return;
1405 }
1406
1407
EvacuateFixedArray(Map * map,HeapObject ** slot,HeapObject * object)1408 static inline void EvacuateFixedArray(Map* map,
1409 HeapObject** slot,
1410 HeapObject* object) {
1411 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1412 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1413 slot,
1414 object,
1415 object_size);
1416 }
1417
1418
EvacuateByteArray(Map * map,HeapObject ** slot,HeapObject * object)1419 static inline void EvacuateByteArray(Map* map,
1420 HeapObject** slot,
1421 HeapObject* object) {
1422 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1423 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1424 }
1425
1426
EvacuateSeqAsciiString(Map * map,HeapObject ** slot,HeapObject * object)1427 static inline void EvacuateSeqAsciiString(Map* map,
1428 HeapObject** slot,
1429 HeapObject* object) {
1430 int object_size = SeqAsciiString::cast(object)->
1431 SeqAsciiStringSize(map->instance_type());
1432 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1433 }
1434
1435
EvacuateSeqTwoByteString(Map * map,HeapObject ** slot,HeapObject * object)1436 static inline void EvacuateSeqTwoByteString(Map* map,
1437 HeapObject** slot,
1438 HeapObject* object) {
1439 int object_size = SeqTwoByteString::cast(object)->
1440 SeqTwoByteStringSize(map->instance_type());
1441 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1442 }
1443
1444
IsShortcutCandidate(int type)1445 static inline bool IsShortcutCandidate(int type) {
1446 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1447 }
1448
EvacuateShortcutCandidate(Map * map,HeapObject ** slot,HeapObject * object)1449 static inline void EvacuateShortcutCandidate(Map* map,
1450 HeapObject** slot,
1451 HeapObject* object) {
1452 ASSERT(IsShortcutCandidate(map->instance_type()));
1453
1454 if (ConsString::cast(object)->unchecked_second() ==
1455 map->heap()->empty_string()) {
1456 HeapObject* first =
1457 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1458
1459 *slot = first;
1460
1461 if (!map->heap()->InNewSpace(first)) {
1462 object->set_map_word(MapWord::FromForwardingAddress(first));
1463 return;
1464 }
1465
1466 MapWord first_word = first->map_word();
1467 if (first_word.IsForwardingAddress()) {
1468 HeapObject* target = first_word.ToForwardingAddress();
1469
1470 *slot = target;
1471 object->set_map_word(MapWord::FromForwardingAddress(target));
1472 return;
1473 }
1474
1475 DoScavengeObject(first->map(), slot, first);
1476 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1477 return;
1478 }
1479
1480 int object_size = ConsString::kSize;
1481 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1482 }
1483
1484 template<ObjectContents object_contents>
1485 class ObjectEvacuationStrategy {
1486 public:
1487 template<int object_size>
VisitSpecialized(Map * map,HeapObject ** slot,HeapObject * object)1488 static inline void VisitSpecialized(Map* map,
1489 HeapObject** slot,
1490 HeapObject* object) {
1491 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1492 }
1493
Visit(Map * map,HeapObject ** slot,HeapObject * object)1494 static inline void Visit(Map* map,
1495 HeapObject** slot,
1496 HeapObject* object) {
1497 int object_size = map->instance_size();
1498 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1499 }
1500 };
1501
1502 static VisitorDispatchTable<ScavengingCallback> table_;
1503 };
1504
1505
1506 template<LoggingAndProfiling logging_and_profiling_mode>
1507 VisitorDispatchTable<ScavengingCallback>
1508 ScavengingVisitor<logging_and_profiling_mode>::table_;
1509
1510
InitializeScavengingVisitorsTables()1511 static void InitializeScavengingVisitorsTables() {
1512 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1513 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1514 scavenging_visitors_table_.CopyFrom(
1515 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1516 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1517 }
1518
1519
SwitchScavengingVisitorsTableIfProfilingWasEnabled()1520 void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
1521 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1522 // Table was already updated by some isolate.
1523 return;
1524 }
1525
1526 if (isolate()->logger()->is_logging() ||
1527 isolate()->cpu_profiler()->is_profiling() ||
1528 (isolate()->heap_profiler() != NULL &&
1529 isolate()->heap_profiler()->is_profiling())) {
1530 // If one of the isolates is doing scavenge at this moment of time
1531 // it might see this table in an inconsitent state when
1532 // some of the callbacks point to
1533 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1534 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1535 // However this does not lead to any bugs as such isolate does not have
1536 // profiling enabled and any isolate with enabled profiling is guaranteed
1537 // to see the table in the consistent state.
1538 scavenging_visitors_table_.CopyFrom(
1539 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1540
1541 // We use Release_Store to prevent reordering of this write before writes
1542 // to the table.
1543 Release_Store(&scavenging_visitors_table_mode_,
1544 LOGGING_AND_PROFILING_ENABLED);
1545 }
1546 }
1547
1548
ScavengeObjectSlow(HeapObject ** p,HeapObject * object)1549 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1550 ASSERT(HEAP->InFromSpace(object));
1551 MapWord first_word = object->map_word();
1552 ASSERT(!first_word.IsForwardingAddress());
1553 Map* map = first_word.ToMap();
1554 DoScavengeObject(map, p, object);
1555 }
1556
1557
AllocatePartialMap(InstanceType instance_type,int instance_size)1558 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1559 int instance_size) {
1560 Object* result;
1561 { MaybeObject* maybe_result = AllocateRawMap();
1562 if (!maybe_result->ToObject(&result)) return maybe_result;
1563 }
1564
1565 // Map::cast cannot be used due to uninitialized map field.
1566 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1567 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1568 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1569 reinterpret_cast<Map*>(result)->set_visitor_id(
1570 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1571 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1572 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1573 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1574 reinterpret_cast<Map*>(result)->set_bit_field(0);
1575 reinterpret_cast<Map*>(result)->set_bit_field2(0);
1576 return result;
1577 }
1578
1579
AllocateMap(InstanceType instance_type,int instance_size)1580 MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1581 Object* result;
1582 { MaybeObject* maybe_result = AllocateRawMap();
1583 if (!maybe_result->ToObject(&result)) return maybe_result;
1584 }
1585
1586 Map* map = reinterpret_cast<Map*>(result);
1587 map->set_map(meta_map());
1588 map->set_instance_type(instance_type);
1589 map->set_visitor_id(
1590 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1591 map->set_prototype(null_value());
1592 map->set_constructor(null_value());
1593 map->set_instance_size(instance_size);
1594 map->set_inobject_properties(0);
1595 map->set_pre_allocated_property_fields(0);
1596 map->set_instance_descriptors(empty_descriptor_array());
1597 map->set_code_cache(empty_fixed_array());
1598 map->set_prototype_transitions(empty_fixed_array());
1599 map->set_unused_property_fields(0);
1600 map->set_bit_field(0);
1601 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
1602
1603 // If the map object is aligned fill the padding area with Smi 0 objects.
1604 if (Map::kPadStart < Map::kSize) {
1605 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1606 0,
1607 Map::kSize - Map::kPadStart);
1608 }
1609 return map;
1610 }
1611
1612
AllocateCodeCache()1613 MaybeObject* Heap::AllocateCodeCache() {
1614 Object* result;
1615 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1616 if (!maybe_result->ToObject(&result)) return maybe_result;
1617 }
1618 CodeCache* code_cache = CodeCache::cast(result);
1619 code_cache->set_default_cache(empty_fixed_array());
1620 code_cache->set_normal_type_cache(undefined_value());
1621 return code_cache;
1622 }
1623
1624
1625 const Heap::StringTypeTable Heap::string_type_table[] = {
1626 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1627 {type, size, k##camel_name##MapRootIndex},
1628 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1629 #undef STRING_TYPE_ELEMENT
1630 };
1631
1632
1633 const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1634 #define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1635 {contents, k##name##RootIndex},
1636 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1637 #undef CONSTANT_SYMBOL_ELEMENT
1638 };
1639
1640
1641 const Heap::StructTable Heap::struct_table[] = {
1642 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1643 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1644 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1645 #undef STRUCT_TABLE_ELEMENT
1646 };
1647
1648
CreateInitialMaps()1649 bool Heap::CreateInitialMaps() {
1650 Object* obj;
1651 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1652 if (!maybe_obj->ToObject(&obj)) return false;
1653 }
1654 // Map::cast cannot be used due to uninitialized map field.
1655 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1656 set_meta_map(new_meta_map);
1657 new_meta_map->set_map(new_meta_map);
1658
1659 { MaybeObject* maybe_obj =
1660 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1661 if (!maybe_obj->ToObject(&obj)) return false;
1662 }
1663 set_fixed_array_map(Map::cast(obj));
1664
1665 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1666 if (!maybe_obj->ToObject(&obj)) return false;
1667 }
1668 set_oddball_map(Map::cast(obj));
1669
1670 // Allocate the empty array.
1671 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1672 if (!maybe_obj->ToObject(&obj)) return false;
1673 }
1674 set_empty_fixed_array(FixedArray::cast(obj));
1675
1676 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1677 if (!maybe_obj->ToObject(&obj)) return false;
1678 }
1679 set_null_value(obj);
1680 Oddball::cast(obj)->set_kind(Oddball::kNull);
1681
1682 // Allocate the empty descriptor array.
1683 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1684 if (!maybe_obj->ToObject(&obj)) return false;
1685 }
1686 set_empty_descriptor_array(DescriptorArray::cast(obj));
1687
1688 // Fix the instance_descriptors for the existing maps.
1689 meta_map()->set_instance_descriptors(empty_descriptor_array());
1690 meta_map()->set_code_cache(empty_fixed_array());
1691 meta_map()->set_prototype_transitions(empty_fixed_array());
1692
1693 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1694 fixed_array_map()->set_code_cache(empty_fixed_array());
1695 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
1696
1697 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1698 oddball_map()->set_code_cache(empty_fixed_array());
1699 oddball_map()->set_prototype_transitions(empty_fixed_array());
1700
1701 // Fix prototype object for existing maps.
1702 meta_map()->set_prototype(null_value());
1703 meta_map()->set_constructor(null_value());
1704
1705 fixed_array_map()->set_prototype(null_value());
1706 fixed_array_map()->set_constructor(null_value());
1707
1708 oddball_map()->set_prototype(null_value());
1709 oddball_map()->set_constructor(null_value());
1710
1711 { MaybeObject* maybe_obj =
1712 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1713 if (!maybe_obj->ToObject(&obj)) return false;
1714 }
1715 set_fixed_cow_array_map(Map::cast(obj));
1716 ASSERT(fixed_array_map() != fixed_cow_array_map());
1717
1718 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1719 if (!maybe_obj->ToObject(&obj)) return false;
1720 }
1721 set_heap_number_map(Map::cast(obj));
1722
1723 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1724 if (!maybe_obj->ToObject(&obj)) return false;
1725 }
1726 set_proxy_map(Map::cast(obj));
1727
1728 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1729 const StringTypeTable& entry = string_type_table[i];
1730 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1731 if (!maybe_obj->ToObject(&obj)) return false;
1732 }
1733 roots_[entry.index] = Map::cast(obj);
1734 }
1735
1736 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1737 if (!maybe_obj->ToObject(&obj)) return false;
1738 }
1739 set_undetectable_string_map(Map::cast(obj));
1740 Map::cast(obj)->set_is_undetectable();
1741
1742 { MaybeObject* maybe_obj =
1743 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1744 if (!maybe_obj->ToObject(&obj)) return false;
1745 }
1746 set_undetectable_ascii_string_map(Map::cast(obj));
1747 Map::cast(obj)->set_is_undetectable();
1748
1749 { MaybeObject* maybe_obj =
1750 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1751 if (!maybe_obj->ToObject(&obj)) return false;
1752 }
1753 set_byte_array_map(Map::cast(obj));
1754
1755 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1756 if (!maybe_obj->ToObject(&obj)) return false;
1757 }
1758 set_empty_byte_array(ByteArray::cast(obj));
1759
1760 { MaybeObject* maybe_obj =
1761 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
1762 if (!maybe_obj->ToObject(&obj)) return false;
1763 }
1764 set_external_pixel_array_map(Map::cast(obj));
1765
1766 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1767 ExternalArray::kAlignedSize);
1768 if (!maybe_obj->ToObject(&obj)) return false;
1769 }
1770 set_external_byte_array_map(Map::cast(obj));
1771
1772 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1773 ExternalArray::kAlignedSize);
1774 if (!maybe_obj->ToObject(&obj)) return false;
1775 }
1776 set_external_unsigned_byte_array_map(Map::cast(obj));
1777
1778 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1779 ExternalArray::kAlignedSize);
1780 if (!maybe_obj->ToObject(&obj)) return false;
1781 }
1782 set_external_short_array_map(Map::cast(obj));
1783
1784 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1785 ExternalArray::kAlignedSize);
1786 if (!maybe_obj->ToObject(&obj)) return false;
1787 }
1788 set_external_unsigned_short_array_map(Map::cast(obj));
1789
1790 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1791 ExternalArray::kAlignedSize);
1792 if (!maybe_obj->ToObject(&obj)) return false;
1793 }
1794 set_external_int_array_map(Map::cast(obj));
1795
1796 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1797 ExternalArray::kAlignedSize);
1798 if (!maybe_obj->ToObject(&obj)) return false;
1799 }
1800 set_external_unsigned_int_array_map(Map::cast(obj));
1801
1802 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1803 ExternalArray::kAlignedSize);
1804 if (!maybe_obj->ToObject(&obj)) return false;
1805 }
1806 set_external_float_array_map(Map::cast(obj));
1807
1808 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1809 if (!maybe_obj->ToObject(&obj)) return false;
1810 }
1811 set_code_map(Map::cast(obj));
1812
1813 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1814 JSGlobalPropertyCell::kSize);
1815 if (!maybe_obj->ToObject(&obj)) return false;
1816 }
1817 set_global_property_cell_map(Map::cast(obj));
1818
1819 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1820 if (!maybe_obj->ToObject(&obj)) return false;
1821 }
1822 set_one_pointer_filler_map(Map::cast(obj));
1823
1824 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1825 if (!maybe_obj->ToObject(&obj)) return false;
1826 }
1827 set_two_pointer_filler_map(Map::cast(obj));
1828
1829 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1830 const StructTable& entry = struct_table[i];
1831 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1832 if (!maybe_obj->ToObject(&obj)) return false;
1833 }
1834 roots_[entry.index] = Map::cast(obj);
1835 }
1836
1837 { MaybeObject* maybe_obj =
1838 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1839 if (!maybe_obj->ToObject(&obj)) return false;
1840 }
1841 set_hash_table_map(Map::cast(obj));
1842
1843 { MaybeObject* maybe_obj =
1844 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1845 if (!maybe_obj->ToObject(&obj)) return false;
1846 }
1847 set_context_map(Map::cast(obj));
1848
1849 { MaybeObject* maybe_obj =
1850 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1851 if (!maybe_obj->ToObject(&obj)) return false;
1852 }
1853 set_catch_context_map(Map::cast(obj));
1854
1855 { MaybeObject* maybe_obj =
1856 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1857 if (!maybe_obj->ToObject(&obj)) return false;
1858 }
1859 Map* global_context_map = Map::cast(obj);
1860 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1861 set_global_context_map(global_context_map);
1862
1863 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1864 SharedFunctionInfo::kAlignedSize);
1865 if (!maybe_obj->ToObject(&obj)) return false;
1866 }
1867 set_shared_function_info_map(Map::cast(obj));
1868
1869 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1870 JSMessageObject::kSize);
1871 if (!maybe_obj->ToObject(&obj)) return false;
1872 }
1873 set_message_object_map(Map::cast(obj));
1874
1875 ASSERT(!InNewSpace(empty_fixed_array()));
1876 return true;
1877 }
1878
1879
AllocateHeapNumber(double value,PretenureFlag pretenure)1880 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1881 // Statically ensure that it is safe to allocate heap numbers in paged
1882 // spaces.
1883 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1884 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1885
1886 Object* result;
1887 { MaybeObject* maybe_result =
1888 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1889 if (!maybe_result->ToObject(&result)) return maybe_result;
1890 }
1891
1892 HeapObject::cast(result)->set_map(heap_number_map());
1893 HeapNumber::cast(result)->set_value(value);
1894 return result;
1895 }
1896
1897
AllocateHeapNumber(double value)1898 MaybeObject* Heap::AllocateHeapNumber(double value) {
1899 // Use general version, if we're forced to always allocate.
1900 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1901
1902 // This version of AllocateHeapNumber is optimized for
1903 // allocation in new space.
1904 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1905 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1906 Object* result;
1907 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1908 if (!maybe_result->ToObject(&result)) return maybe_result;
1909 }
1910 HeapObject::cast(result)->set_map(heap_number_map());
1911 HeapNumber::cast(result)->set_value(value);
1912 return result;
1913 }
1914
1915
AllocateJSGlobalPropertyCell(Object * value)1916 MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1917 Object* result;
1918 { MaybeObject* maybe_result = AllocateRawCell();
1919 if (!maybe_result->ToObject(&result)) return maybe_result;
1920 }
1921 HeapObject::cast(result)->set_map(global_property_cell_map());
1922 JSGlobalPropertyCell::cast(result)->set_value(value);
1923 return result;
1924 }
1925
1926
CreateOddball(const char * to_string,Object * to_number,byte kind)1927 MaybeObject* Heap::CreateOddball(const char* to_string,
1928 Object* to_number,
1929 byte kind) {
1930 Object* result;
1931 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1932 if (!maybe_result->ToObject(&result)) return maybe_result;
1933 }
1934 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
1935 }
1936
1937
CreateApiObjects()1938 bool Heap::CreateApiObjects() {
1939 Object* obj;
1940
1941 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1942 if (!maybe_obj->ToObject(&obj)) return false;
1943 }
1944 set_neander_map(Map::cast(obj));
1945
1946 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
1947 if (!maybe_obj->ToObject(&obj)) return false;
1948 }
1949 Object* elements;
1950 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1951 if (!maybe_elements->ToObject(&elements)) return false;
1952 }
1953 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1954 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1955 set_message_listeners(JSObject::cast(obj));
1956
1957 return true;
1958 }
1959
1960
CreateJSEntryStub()1961 void Heap::CreateJSEntryStub() {
1962 JSEntryStub stub;
1963 set_js_entry_code(*stub.GetCode());
1964 }
1965
1966
CreateJSConstructEntryStub()1967 void Heap::CreateJSConstructEntryStub() {
1968 JSConstructEntryStub stub;
1969 set_js_construct_entry_code(*stub.GetCode());
1970 }
1971
1972
CreateFixedStubs()1973 void Heap::CreateFixedStubs() {
1974 // Here we create roots for fixed stubs. They are needed at GC
1975 // for cooking and uncooking (check out frames.cc).
1976 // The eliminates the need for doing dictionary lookup in the
1977 // stub cache for these stubs.
1978 HandleScope scope;
1979 // gcc-4.4 has problem generating correct code of following snippet:
1980 // { JSEntryStub stub;
1981 // js_entry_code_ = *stub.GetCode();
1982 // }
1983 // { JSConstructEntryStub stub;
1984 // js_construct_entry_code_ = *stub.GetCode();
1985 // }
1986 // To workaround the problem, make separate functions without inlining.
1987 Heap::CreateJSEntryStub();
1988 Heap::CreateJSConstructEntryStub();
1989 }
1990
1991
CreateInitialObjects()1992 bool Heap::CreateInitialObjects() {
1993 Object* obj;
1994
1995 // The -0 value must be set before NumberFromDouble works.
1996 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1997 if (!maybe_obj->ToObject(&obj)) return false;
1998 }
1999 set_minus_zero_value(obj);
2000 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2001
2002 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2003 if (!maybe_obj->ToObject(&obj)) return false;
2004 }
2005 set_nan_value(obj);
2006
2007 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2008 if (!maybe_obj->ToObject(&obj)) return false;
2009 }
2010 set_undefined_value(obj);
2011 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2012 ASSERT(!InNewSpace(undefined_value()));
2013
2014 // Allocate initial symbol table.
2015 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2016 if (!maybe_obj->ToObject(&obj)) return false;
2017 }
2018 // Don't use set_symbol_table() due to asserts.
2019 roots_[kSymbolTableRootIndex] = obj;
2020
2021 // Assign the print strings for oddballs after creating symboltable.
2022 Object* symbol;
2023 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2024 if (!maybe_symbol->ToObject(&symbol)) return false;
2025 }
2026 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2027 Oddball::cast(undefined_value())->set_to_number(nan_value());
2028
2029 // Allocate the null_value
2030 { MaybeObject* maybe_obj =
2031 Oddball::cast(null_value())->Initialize("null",
2032 Smi::FromInt(0),
2033 Oddball::kNull);
2034 if (!maybe_obj->ToObject(&obj)) return false;
2035 }
2036
2037 { MaybeObject* maybe_obj = CreateOddball("true",
2038 Smi::FromInt(1),
2039 Oddball::kTrue);
2040 if (!maybe_obj->ToObject(&obj)) return false;
2041 }
2042 set_true_value(obj);
2043
2044 { MaybeObject* maybe_obj = CreateOddball("false",
2045 Smi::FromInt(0),
2046 Oddball::kFalse);
2047 if (!maybe_obj->ToObject(&obj)) return false;
2048 }
2049 set_false_value(obj);
2050
2051 { MaybeObject* maybe_obj = CreateOddball("hole",
2052 Smi::FromInt(-1),
2053 Oddball::kTheHole);
2054 if (!maybe_obj->ToObject(&obj)) return false;
2055 }
2056 set_the_hole_value(obj);
2057
2058 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2059 Smi::FromInt(-4),
2060 Oddball::kArgumentMarker);
2061 if (!maybe_obj->ToObject(&obj)) return false;
2062 }
2063 set_arguments_marker(obj);
2064
2065 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2066 Smi::FromInt(-2),
2067 Oddball::kOther);
2068 if (!maybe_obj->ToObject(&obj)) return false;
2069 }
2070 set_no_interceptor_result_sentinel(obj);
2071
2072 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2073 Smi::FromInt(-3),
2074 Oddball::kOther);
2075 if (!maybe_obj->ToObject(&obj)) return false;
2076 }
2077 set_termination_exception(obj);
2078
2079 // Allocate the empty string.
2080 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2081 if (!maybe_obj->ToObject(&obj)) return false;
2082 }
2083 set_empty_string(String::cast(obj));
2084
2085 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2086 { MaybeObject* maybe_obj =
2087 LookupAsciiSymbol(constant_symbol_table[i].contents);
2088 if (!maybe_obj->ToObject(&obj)) return false;
2089 }
2090 roots_[constant_symbol_table[i].index] = String::cast(obj);
2091 }
2092
2093 // Allocate the hidden symbol which is used to identify the hidden properties
2094 // in JSObjects. The hash code has a special value so that it will not match
2095 // the empty string when searching for the property. It cannot be part of the
2096 // loop above because it needs to be allocated manually with the special
2097 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2098 // that it will always be at the first entry in property descriptors.
2099 { MaybeObject* maybe_obj =
2100 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2101 if (!maybe_obj->ToObject(&obj)) return false;
2102 }
2103 hidden_symbol_ = String::cast(obj);
2104
2105 // Allocate the proxy for __proto__.
2106 { MaybeObject* maybe_obj =
2107 AllocateProxy((Address) &Accessors::ObjectPrototype);
2108 if (!maybe_obj->ToObject(&obj)) return false;
2109 }
2110 set_prototype_accessors(Proxy::cast(obj));
2111
2112 // Allocate the code_stubs dictionary. The initial size is set to avoid
2113 // expanding the dictionary during bootstrapping.
2114 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2115 if (!maybe_obj->ToObject(&obj)) return false;
2116 }
2117 set_code_stubs(NumberDictionary::cast(obj));
2118
2119 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2120 // is set to avoid expanding the dictionary during bootstrapping.
2121 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2122 if (!maybe_obj->ToObject(&obj)) return false;
2123 }
2124 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2125
2126 set_instanceof_cache_function(Smi::FromInt(0));
2127 set_instanceof_cache_map(Smi::FromInt(0));
2128 set_instanceof_cache_answer(Smi::FromInt(0));
2129
2130 CreateFixedStubs();
2131
2132 // Allocate the dictionary of intrinsic function names.
2133 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2134 if (!maybe_obj->ToObject(&obj)) return false;
2135 }
2136 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2137 obj);
2138 if (!maybe_obj->ToObject(&obj)) return false;
2139 }
2140 set_intrinsic_function_names(StringDictionary::cast(obj));
2141
2142 if (InitializeNumberStringCache()->IsFailure()) return false;
2143
2144 // Allocate cache for single character ASCII strings.
2145 { MaybeObject* maybe_obj =
2146 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2147 if (!maybe_obj->ToObject(&obj)) return false;
2148 }
2149 set_single_character_string_cache(FixedArray::cast(obj));
2150
2151 // Allocate cache for external strings pointing to native source code.
2152 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2153 if (!maybe_obj->ToObject(&obj)) return false;
2154 }
2155 set_natives_source_cache(FixedArray::cast(obj));
2156
2157 // Handling of script id generation is in FACTORY->NewScript.
2158 set_last_script_id(undefined_value());
2159
2160 // Initialize keyed lookup cache.
2161 isolate_->keyed_lookup_cache()->Clear();
2162
2163 // Initialize context slot cache.
2164 isolate_->context_slot_cache()->Clear();
2165
2166 // Initialize descriptor cache.
2167 isolate_->descriptor_lookup_cache()->Clear();
2168
2169 // Initialize compilation cache.
2170 isolate_->compilation_cache()->Clear();
2171
2172 return true;
2173 }
2174
2175
InitializeNumberStringCache()2176 MaybeObject* Heap::InitializeNumberStringCache() {
2177 // Compute the size of the number string cache based on the max heap size.
2178 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2179 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2180 int number_string_cache_size = max_semispace_size_ / 512;
2181 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
2182 Object* obj;
2183 MaybeObject* maybe_obj =
2184 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2185 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2186 return maybe_obj;
2187 }
2188
2189
FlushNumberStringCache()2190 void Heap::FlushNumberStringCache() {
2191 // Flush the number to string cache.
2192 int len = number_string_cache()->length();
2193 for (int i = 0; i < len; i++) {
2194 number_string_cache()->set_undefined(this, i);
2195 }
2196 }
2197
2198
double_get_hash(double d)2199 static inline int double_get_hash(double d) {
2200 DoubleRepresentation rep(d);
2201 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
2202 }
2203
2204
smi_get_hash(Smi * smi)2205 static inline int smi_get_hash(Smi* smi) {
2206 return smi->value();
2207 }
2208
2209
GetNumberStringCache(Object * number)2210 Object* Heap::GetNumberStringCache(Object* number) {
2211 int hash;
2212 int mask = (number_string_cache()->length() >> 1) - 1;
2213 if (number->IsSmi()) {
2214 hash = smi_get_hash(Smi::cast(number)) & mask;
2215 } else {
2216 hash = double_get_hash(number->Number()) & mask;
2217 }
2218 Object* key = number_string_cache()->get(hash * 2);
2219 if (key == number) {
2220 return String::cast(number_string_cache()->get(hash * 2 + 1));
2221 } else if (key->IsHeapNumber() &&
2222 number->IsHeapNumber() &&
2223 key->Number() == number->Number()) {
2224 return String::cast(number_string_cache()->get(hash * 2 + 1));
2225 }
2226 return undefined_value();
2227 }
2228
2229
SetNumberStringCache(Object * number,String * string)2230 void Heap::SetNumberStringCache(Object* number, String* string) {
2231 int hash;
2232 int mask = (number_string_cache()->length() >> 1) - 1;
2233 if (number->IsSmi()) {
2234 hash = smi_get_hash(Smi::cast(number)) & mask;
2235 number_string_cache()->set(hash * 2, Smi::cast(number));
2236 } else {
2237 hash = double_get_hash(number->Number()) & mask;
2238 number_string_cache()->set(hash * 2, number);
2239 }
2240 number_string_cache()->set(hash * 2 + 1, string);
2241 }
2242
2243
NumberToString(Object * number,bool check_number_string_cache)2244 MaybeObject* Heap::NumberToString(Object* number,
2245 bool check_number_string_cache) {
2246 isolate_->counters()->number_to_string_runtime()->Increment();
2247 if (check_number_string_cache) {
2248 Object* cached = GetNumberStringCache(number);
2249 if (cached != undefined_value()) {
2250 return cached;
2251 }
2252 }
2253
2254 char arr[100];
2255 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2256 const char* str;
2257 if (number->IsSmi()) {
2258 int num = Smi::cast(number)->value();
2259 str = IntToCString(num, buffer);
2260 } else {
2261 double num = HeapNumber::cast(number)->value();
2262 str = DoubleToCString(num, buffer);
2263 }
2264
2265 Object* js_string;
2266 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2267 if (maybe_js_string->ToObject(&js_string)) {
2268 SetNumberStringCache(number, String::cast(js_string));
2269 }
2270 return maybe_js_string;
2271 }
2272
2273
MapForExternalArrayType(ExternalArrayType array_type)2274 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2275 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2276 }
2277
2278
RootIndexForExternalArrayType(ExternalArrayType array_type)2279 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2280 ExternalArrayType array_type) {
2281 switch (array_type) {
2282 case kExternalByteArray:
2283 return kExternalByteArrayMapRootIndex;
2284 case kExternalUnsignedByteArray:
2285 return kExternalUnsignedByteArrayMapRootIndex;
2286 case kExternalShortArray:
2287 return kExternalShortArrayMapRootIndex;
2288 case kExternalUnsignedShortArray:
2289 return kExternalUnsignedShortArrayMapRootIndex;
2290 case kExternalIntArray:
2291 return kExternalIntArrayMapRootIndex;
2292 case kExternalUnsignedIntArray:
2293 return kExternalUnsignedIntArrayMapRootIndex;
2294 case kExternalFloatArray:
2295 return kExternalFloatArrayMapRootIndex;
2296 case kExternalPixelArray:
2297 return kExternalPixelArrayMapRootIndex;
2298 default:
2299 UNREACHABLE();
2300 return kUndefinedValueRootIndex;
2301 }
2302 }
2303
2304
NumberFromDouble(double value,PretenureFlag pretenure)2305 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
2306 // We need to distinguish the minus zero value and this cannot be
2307 // done after conversion to int. Doing this by comparing bit
2308 // patterns is faster than using fpclassify() et al.
2309 static const DoubleRepresentation minus_zero(-0.0);
2310
2311 DoubleRepresentation rep(value);
2312 if (rep.bits == minus_zero.bits) {
2313 return AllocateHeapNumber(-0.0, pretenure);
2314 }
2315
2316 int int_value = FastD2I(value);
2317 if (value == int_value && Smi::IsValid(int_value)) {
2318 return Smi::FromInt(int_value);
2319 }
2320
2321 // Materialize the value in the heap.
2322 return AllocateHeapNumber(value, pretenure);
2323 }
2324
2325
AllocateProxy(Address proxy,PretenureFlag pretenure)2326 MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
2327 // Statically ensure that it is safe to allocate proxies in paged spaces.
2328 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2329 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2330 Object* result;
2331 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2332 if (!maybe_result->ToObject(&result)) return maybe_result;
2333 }
2334
2335 Proxy::cast(result)->set_proxy(proxy);
2336 return result;
2337 }
2338
2339
AllocateSharedFunctionInfo(Object * name)2340 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2341 Object* result;
2342 { MaybeObject* maybe_result =
2343 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2344 if (!maybe_result->ToObject(&result)) return maybe_result;
2345 }
2346
2347 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2348 share->set_name(name);
2349 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
2350 share->set_code(illegal);
2351 share->set_scope_info(SerializedScopeInfo::Empty());
2352 Code* construct_stub = isolate_->builtins()->builtin(
2353 Builtins::kJSConstructStubGeneric);
2354 share->set_construct_stub(construct_stub);
2355 share->set_expected_nof_properties(0);
2356 share->set_length(0);
2357 share->set_formal_parameter_count(0);
2358 share->set_instance_class_name(Object_symbol());
2359 share->set_function_data(undefined_value());
2360 share->set_script(undefined_value());
2361 share->set_start_position_and_type(0);
2362 share->set_debug_info(undefined_value());
2363 share->set_inferred_name(empty_string());
2364 share->set_compiler_hints(0);
2365 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
2366 share->set_initial_map(undefined_value());
2367 share->set_this_property_assignments_count(0);
2368 share->set_this_property_assignments(undefined_value());
2369 share->set_opt_count(0);
2370 share->set_num_literals(0);
2371 share->set_end_position(0);
2372 share->set_function_token_position(0);
2373 return result;
2374 }
2375
2376
AllocateJSMessageObject(String * type,JSArray * arguments,int start_position,int end_position,Object * script,Object * stack_trace,Object * stack_frames)2377 MaybeObject* Heap::AllocateJSMessageObject(String* type,
2378 JSArray* arguments,
2379 int start_position,
2380 int end_position,
2381 Object* script,
2382 Object* stack_trace,
2383 Object* stack_frames) {
2384 Object* result;
2385 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2386 if (!maybe_result->ToObject(&result)) return maybe_result;
2387 }
2388 JSMessageObject* message = JSMessageObject::cast(result);
2389 message->set_properties(Heap::empty_fixed_array());
2390 message->set_elements(Heap::empty_fixed_array());
2391 message->set_type(type);
2392 message->set_arguments(arguments);
2393 message->set_start_position(start_position);
2394 message->set_end_position(end_position);
2395 message->set_script(script);
2396 message->set_stack_trace(stack_trace);
2397 message->set_stack_frames(stack_frames);
2398 return result;
2399 }
2400
2401
2402
2403 // Returns true for a character in a range. Both limits are inclusive.
Between(uint32_t character,uint32_t from,uint32_t to)2404 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2405 // This makes uses of the the unsigned wraparound.
2406 return character - from <= to - from;
2407 }
2408
2409
MakeOrFindTwoCharacterString(Heap * heap,uint32_t c1,uint32_t c2)2410 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2411 Heap* heap,
2412 uint32_t c1,
2413 uint32_t c2) {
2414 String* symbol;
2415 // Numeric strings have a different hash algorithm not known by
2416 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2417 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2418 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2419 return symbol;
2420 // Now we know the length is 2, we might as well make use of that fact
2421 // when building the new string.
2422 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2423 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
2424 Object* result;
2425 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
2426 if (!maybe_result->ToObject(&result)) return maybe_result;
2427 }
2428 char* dest = SeqAsciiString::cast(result)->GetChars();
2429 dest[0] = c1;
2430 dest[1] = c2;
2431 return result;
2432 } else {
2433 Object* result;
2434 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
2435 if (!maybe_result->ToObject(&result)) return maybe_result;
2436 }
2437 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2438 dest[0] = c1;
2439 dest[1] = c2;
2440 return result;
2441 }
2442 }
2443
2444
AllocateConsString(String * first,String * second)2445 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
2446 int first_length = first->length();
2447 if (first_length == 0) {
2448 return second;
2449 }
2450
2451 int second_length = second->length();
2452 if (second_length == 0) {
2453 return first;
2454 }
2455
2456 int length = first_length + second_length;
2457
2458 // Optimization for 2-byte strings often used as keys in a decompression
2459 // dictionary. Check whether we already have the string in the symbol
2460 // table to prevent creation of many unneccesary strings.
2461 if (length == 2) {
2462 unsigned c1 = first->Get(0);
2463 unsigned c2 = second->Get(0);
2464 return MakeOrFindTwoCharacterString(this, c1, c2);
2465 }
2466
2467 bool first_is_ascii = first->IsAsciiRepresentation();
2468 bool second_is_ascii = second->IsAsciiRepresentation();
2469 bool is_ascii = first_is_ascii && second_is_ascii;
2470
2471 // Make sure that an out of memory exception is thrown if the length
2472 // of the new cons string is too large.
2473 if (length > String::kMaxLength || length < 0) {
2474 isolate()->context()->mark_out_of_memory();
2475 return Failure::OutOfMemoryException();
2476 }
2477
2478 bool is_ascii_data_in_two_byte_string = false;
2479 if (!is_ascii) {
2480 // At least one of the strings uses two-byte representation so we
2481 // can't use the fast case code for short ascii strings below, but
2482 // we can try to save memory if all chars actually fit in ascii.
2483 is_ascii_data_in_two_byte_string =
2484 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2485 if (is_ascii_data_in_two_byte_string) {
2486 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2487 }
2488 }
2489
2490 // If the resulting string is small make a flat string.
2491 if (length < String::kMinNonFlatLength) {
2492 ASSERT(first->IsFlat());
2493 ASSERT(second->IsFlat());
2494 if (is_ascii) {
2495 Object* result;
2496 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2497 if (!maybe_result->ToObject(&result)) return maybe_result;
2498 }
2499 // Copy the characters into the new object.
2500 char* dest = SeqAsciiString::cast(result)->GetChars();
2501 // Copy first part.
2502 const char* src;
2503 if (first->IsExternalString()) {
2504 src = ExternalAsciiString::cast(first)->resource()->data();
2505 } else {
2506 src = SeqAsciiString::cast(first)->GetChars();
2507 }
2508 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2509 // Copy second part.
2510 if (second->IsExternalString()) {
2511 src = ExternalAsciiString::cast(second)->resource()->data();
2512 } else {
2513 src = SeqAsciiString::cast(second)->GetChars();
2514 }
2515 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2516 return result;
2517 } else {
2518 if (is_ascii_data_in_two_byte_string) {
2519 Object* result;
2520 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2521 if (!maybe_result->ToObject(&result)) return maybe_result;
2522 }
2523 // Copy the characters into the new object.
2524 char* dest = SeqAsciiString::cast(result)->GetChars();
2525 String::WriteToFlat(first, dest, 0, first_length);
2526 String::WriteToFlat(second, dest + first_length, 0, second_length);
2527 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
2528 return result;
2529 }
2530
2531 Object* result;
2532 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2533 if (!maybe_result->ToObject(&result)) return maybe_result;
2534 }
2535 // Copy the characters into the new object.
2536 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2537 String::WriteToFlat(first, dest, 0, first_length);
2538 String::WriteToFlat(second, dest + first_length, 0, second_length);
2539 return result;
2540 }
2541 }
2542
2543 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2544 cons_ascii_string_map() : cons_string_map();
2545
2546 Object* result;
2547 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2548 if (!maybe_result->ToObject(&result)) return maybe_result;
2549 }
2550
2551 AssertNoAllocation no_gc;
2552 ConsString* cons_string = ConsString::cast(result);
2553 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
2554 cons_string->set_length(length);
2555 cons_string->set_hash_field(String::kEmptyHashField);
2556 cons_string->set_first(first, mode);
2557 cons_string->set_second(second, mode);
2558 return result;
2559 }
2560
2561
AllocateSubString(String * buffer,int start,int end,PretenureFlag pretenure)2562 MaybeObject* Heap::AllocateSubString(String* buffer,
2563 int start,
2564 int end,
2565 PretenureFlag pretenure) {
2566 int length = end - start;
2567
2568 if (length == 1) {
2569 return LookupSingleCharacterStringFromCode(buffer->Get(start));
2570 } else if (length == 2) {
2571 // Optimization for 2-byte strings often used as keys in a decompression
2572 // dictionary. Check whether we already have the string in the symbol
2573 // table to prevent creation of many unneccesary strings.
2574 unsigned c1 = buffer->Get(start);
2575 unsigned c2 = buffer->Get(start + 1);
2576 return MakeOrFindTwoCharacterString(this, c1, c2);
2577 }
2578
2579 // Make an attempt to flatten the buffer to reduce access time.
2580 buffer = buffer->TryFlattenGetString();
2581
2582 Object* result;
2583 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2584 ? AllocateRawAsciiString(length, pretenure )
2585 : AllocateRawTwoByteString(length, pretenure);
2586 if (!maybe_result->ToObject(&result)) return maybe_result;
2587 }
2588 String* string_result = String::cast(result);
2589 // Copy the characters into the new object.
2590 if (buffer->IsAsciiRepresentation()) {
2591 ASSERT(string_result->IsAsciiRepresentation());
2592 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2593 String::WriteToFlat(buffer, dest, start, end);
2594 } else {
2595 ASSERT(string_result->IsTwoByteRepresentation());
2596 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2597 String::WriteToFlat(buffer, dest, start, end);
2598 }
2599
2600 return result;
2601 }
2602
2603
AllocateExternalStringFromAscii(ExternalAsciiString::Resource * resource)2604 MaybeObject* Heap::AllocateExternalStringFromAscii(
2605 ExternalAsciiString::Resource* resource) {
2606 size_t length = resource->length();
2607 if (length > static_cast<size_t>(String::kMaxLength)) {
2608 isolate()->context()->mark_out_of_memory();
2609 return Failure::OutOfMemoryException();
2610 }
2611
2612 Map* map = external_ascii_string_map();
2613 Object* result;
2614 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2615 if (!maybe_result->ToObject(&result)) return maybe_result;
2616 }
2617
2618 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
2619 external_string->set_length(static_cast<int>(length));
2620 external_string->set_hash_field(String::kEmptyHashField);
2621 external_string->set_resource(resource);
2622
2623 return result;
2624 }
2625
2626
AllocateExternalStringFromTwoByte(ExternalTwoByteString::Resource * resource)2627 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
2628 ExternalTwoByteString::Resource* resource) {
2629 size_t length = resource->length();
2630 if (length > static_cast<size_t>(String::kMaxLength)) {
2631 isolate()->context()->mark_out_of_memory();
2632 return Failure::OutOfMemoryException();
2633 }
2634
2635 // For small strings we check whether the resource contains only
2636 // ASCII characters. If yes, we use a different string map.
2637 static const size_t kAsciiCheckLengthLimit = 32;
2638 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2639 String::IsAscii(resource->data(), static_cast<int>(length));
2640 Map* map = is_ascii ?
2641 external_string_with_ascii_data_map() : external_string_map();
2642 Object* result;
2643 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2644 if (!maybe_result->ToObject(&result)) return maybe_result;
2645 }
2646
2647 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
2648 external_string->set_length(static_cast<int>(length));
2649 external_string->set_hash_field(String::kEmptyHashField);
2650 external_string->set_resource(resource);
2651
2652 return result;
2653 }
2654
2655
LookupSingleCharacterStringFromCode(uint16_t code)2656 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
2657 if (code <= String::kMaxAsciiCharCode) {
2658 Object* value = single_character_string_cache()->get(code);
2659 if (value != undefined_value()) return value;
2660
2661 char buffer[1];
2662 buffer[0] = static_cast<char>(code);
2663 Object* result;
2664 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
2665
2666 if (!maybe_result->ToObject(&result)) return maybe_result;
2667 single_character_string_cache()->set(code, result);
2668 return result;
2669 }
2670
2671 Object* result;
2672 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
2673 if (!maybe_result->ToObject(&result)) return maybe_result;
2674 }
2675 String* answer = String::cast(result);
2676 answer->Set(0, code);
2677 return answer;
2678 }
2679
2680
AllocateByteArray(int length,PretenureFlag pretenure)2681 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
2682 if (length < 0 || length > ByteArray::kMaxLength) {
2683 return Failure::OutOfMemoryException();
2684 }
2685 if (pretenure == NOT_TENURED) {
2686 return AllocateByteArray(length);
2687 }
2688 int size = ByteArray::SizeFor(length);
2689 Object* result;
2690 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2691 ? old_data_space_->AllocateRaw(size)
2692 : lo_space_->AllocateRaw(size);
2693 if (!maybe_result->ToObject(&result)) return maybe_result;
2694 }
2695
2696 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2697 reinterpret_cast<ByteArray*>(result)->set_length(length);
2698 return result;
2699 }
2700
2701
AllocateByteArray(int length)2702 MaybeObject* Heap::AllocateByteArray(int length) {
2703 if (length < 0 || length > ByteArray::kMaxLength) {
2704 return Failure::OutOfMemoryException();
2705 }
2706 int size = ByteArray::SizeFor(length);
2707 AllocationSpace space =
2708 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
2709 Object* result;
2710 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2711 if (!maybe_result->ToObject(&result)) return maybe_result;
2712 }
2713
2714 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2715 reinterpret_cast<ByteArray*>(result)->set_length(length);
2716 return result;
2717 }
2718
2719
CreateFillerObjectAt(Address addr,int size)2720 void Heap::CreateFillerObjectAt(Address addr, int size) {
2721 if (size == 0) return;
2722 HeapObject* filler = HeapObject::FromAddress(addr);
2723 if (size == kPointerSize) {
2724 filler->set_map(one_pointer_filler_map());
2725 } else if (size == 2 * kPointerSize) {
2726 filler->set_map(two_pointer_filler_map());
2727 } else {
2728 filler->set_map(byte_array_map());
2729 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2730 }
2731 }
2732
2733
AllocateExternalArray(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)2734 MaybeObject* Heap::AllocateExternalArray(int length,
2735 ExternalArrayType array_type,
2736 void* external_pointer,
2737 PretenureFlag pretenure) {
2738 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2739 Object* result;
2740 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2741 space,
2742 OLD_DATA_SPACE);
2743 if (!maybe_result->ToObject(&result)) return maybe_result;
2744 }
2745
2746 reinterpret_cast<ExternalArray*>(result)->set_map(
2747 MapForExternalArrayType(array_type));
2748 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2749 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2750 external_pointer);
2751
2752 return result;
2753 }
2754
2755
CreateCode(const CodeDesc & desc,Code::Flags flags,Handle<Object> self_reference,bool immovable)2756 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2757 Code::Flags flags,
2758 Handle<Object> self_reference,
2759 bool immovable) {
2760 // Allocate ByteArray before the Code object, so that we do not risk
2761 // leaving uninitialized Code object (and breaking the heap).
2762 Object* reloc_info;
2763 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2764 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2765 }
2766
2767 // Compute size.
2768 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
2769 int obj_size = Code::SizeFor(body_size);
2770 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
2771 MaybeObject* maybe_result;
2772 // Large code objects and code objects which should stay at a fixed address
2773 // are allocated in large object space.
2774 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
2775 maybe_result = lo_space_->AllocateRawCode(obj_size);
2776 } else {
2777 maybe_result = code_space_->AllocateRaw(obj_size);
2778 }
2779
2780 Object* result;
2781 if (!maybe_result->ToObject(&result)) return maybe_result;
2782
2783 // Initialize the object
2784 HeapObject::cast(result)->set_map(code_map());
2785 Code* code = Code::cast(result);
2786 ASSERT(!isolate_->code_range()->exists() ||
2787 isolate_->code_range()->contains(code->address()));
2788 code->set_instruction_size(desc.instr_size);
2789 code->set_relocation_info(ByteArray::cast(reloc_info));
2790 code->set_flags(flags);
2791 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2792 code->set_check_type(RECEIVER_MAP_CHECK);
2793 }
2794 code->set_deoptimization_data(empty_fixed_array());
2795 // Allow self references to created code object by patching the handle to
2796 // point to the newly allocated Code object.
2797 if (!self_reference.is_null()) {
2798 *(self_reference.location()) = code;
2799 }
2800 // Migrate generated code.
2801 // The generated code can contain Object** values (typically from handles)
2802 // that are dereferenced during the copy to point directly to the actual heap
2803 // objects. These pointers can include references to the code object itself,
2804 // through the self_reference parameter.
2805 code->CopyFrom(desc);
2806
2807 #ifdef DEBUG
2808 code->Verify();
2809 #endif
2810 return code;
2811 }
2812
2813
CopyCode(Code * code)2814 MaybeObject* Heap::CopyCode(Code* code) {
2815 // Allocate an object the same size as the code object.
2816 int obj_size = code->Size();
2817 MaybeObject* maybe_result;
2818 if (obj_size > MaxObjectSizeInPagedSpace()) {
2819 maybe_result = lo_space_->AllocateRawCode(obj_size);
2820 } else {
2821 maybe_result = code_space_->AllocateRaw(obj_size);
2822 }
2823
2824 Object* result;
2825 if (!maybe_result->ToObject(&result)) return maybe_result;
2826
2827 // Copy code object.
2828 Address old_addr = code->address();
2829 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2830 CopyBlock(new_addr, old_addr, obj_size);
2831 // Relocate the copy.
2832 Code* new_code = Code::cast(result);
2833 ASSERT(!isolate_->code_range()->exists() ||
2834 isolate_->code_range()->contains(code->address()));
2835 new_code->Relocate(new_addr - old_addr);
2836 return new_code;
2837 }
2838
2839
CopyCode(Code * code,Vector<byte> reloc_info)2840 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
2841 // Allocate ByteArray before the Code object, so that we do not risk
2842 // leaving uninitialized Code object (and breaking the heap).
2843 Object* reloc_info_array;
2844 { MaybeObject* maybe_reloc_info_array =
2845 AllocateByteArray(reloc_info.length(), TENURED);
2846 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2847 return maybe_reloc_info_array;
2848 }
2849 }
2850
2851 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
2852
2853 int new_obj_size = Code::SizeFor(new_body_size);
2854
2855 Address old_addr = code->address();
2856
2857 size_t relocation_offset =
2858 static_cast<size_t>(code->instruction_end() - old_addr);
2859
2860 MaybeObject* maybe_result;
2861 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
2862 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
2863 } else {
2864 maybe_result = code_space_->AllocateRaw(new_obj_size);
2865 }
2866
2867 Object* result;
2868 if (!maybe_result->ToObject(&result)) return maybe_result;
2869
2870 // Copy code object.
2871 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2872
2873 // Copy header and instructions.
2874 memcpy(new_addr, old_addr, relocation_offset);
2875
2876 Code* new_code = Code::cast(result);
2877 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
2878
2879 // Copy patched rinfo.
2880 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
2881
2882 // Relocate the copy.
2883 ASSERT(!isolate_->code_range()->exists() ||
2884 isolate_->code_range()->contains(code->address()));
2885 new_code->Relocate(new_addr - old_addr);
2886
2887 #ifdef DEBUG
2888 code->Verify();
2889 #endif
2890 return new_code;
2891 }
2892
2893
Allocate(Map * map,AllocationSpace space)2894 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
2895 ASSERT(gc_state_ == NOT_IN_GC);
2896 ASSERT(map->instance_type() != MAP_TYPE);
2897 // If allocation failures are disallowed, we may allocate in a different
2898 // space when new space is full and the object is not a large object.
2899 AllocationSpace retry_space =
2900 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
2901 Object* result;
2902 { MaybeObject* maybe_result =
2903 AllocateRaw(map->instance_size(), space, retry_space);
2904 if (!maybe_result->ToObject(&result)) return maybe_result;
2905 }
2906 HeapObject::cast(result)->set_map(map);
2907 #ifdef ENABLE_LOGGING_AND_PROFILING
2908 isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
2909 #endif
2910 return result;
2911 }
2912
2913
InitializeFunction(JSFunction * function,SharedFunctionInfo * shared,Object * prototype)2914 MaybeObject* Heap::InitializeFunction(JSFunction* function,
2915 SharedFunctionInfo* shared,
2916 Object* prototype) {
2917 ASSERT(!prototype->IsMap());
2918 function->initialize_properties();
2919 function->initialize_elements();
2920 function->set_shared(shared);
2921 function->set_code(shared->code());
2922 function->set_prototype_or_initial_map(prototype);
2923 function->set_context(undefined_value());
2924 function->set_literals(empty_fixed_array());
2925 function->set_next_function_link(undefined_value());
2926 return function;
2927 }
2928
2929
AllocateFunctionPrototype(JSFunction * function)2930 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
2931 // Allocate the prototype. Make sure to use the object function
2932 // from the function's context, since the function can be from a
2933 // different context.
2934 JSFunction* object_function =
2935 function->context()->global_context()->object_function();
2936 Object* prototype;
2937 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2938 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2939 }
2940 // When creating the prototype for the function we must set its
2941 // constructor to the function.
2942 Object* result;
2943 { MaybeObject* maybe_result =
2944 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
2945 constructor_symbol(), function, DONT_ENUM);
2946 if (!maybe_result->ToObject(&result)) return maybe_result;
2947 }
2948 return prototype;
2949 }
2950
2951
AllocateFunction(Map * function_map,SharedFunctionInfo * shared,Object * prototype,PretenureFlag pretenure)2952 MaybeObject* Heap::AllocateFunction(Map* function_map,
2953 SharedFunctionInfo* shared,
2954 Object* prototype,
2955 PretenureFlag pretenure) {
2956 AllocationSpace space =
2957 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2958 Object* result;
2959 { MaybeObject* maybe_result = Allocate(function_map, space);
2960 if (!maybe_result->ToObject(&result)) return maybe_result;
2961 }
2962 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2963 }
2964
2965
AllocateArgumentsObject(Object * callee,int length)2966 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
2967 // To get fast allocation and map sharing for arguments objects we
2968 // allocate them based on an arguments boilerplate.
2969
2970 JSObject* boilerplate;
2971 int arguments_object_size;
2972 bool strict_mode_callee = callee->IsJSFunction() &&
2973 JSFunction::cast(callee)->shared()->strict_mode();
2974 if (strict_mode_callee) {
2975 boilerplate =
2976 isolate()->context()->global_context()->
2977 strict_mode_arguments_boilerplate();
2978 arguments_object_size = kArgumentsObjectSizeStrict;
2979 } else {
2980 boilerplate =
2981 isolate()->context()->global_context()->arguments_boilerplate();
2982 arguments_object_size = kArgumentsObjectSize;
2983 }
2984
2985 // This calls Copy directly rather than using Heap::AllocateRaw so we
2986 // duplicate the check here.
2987 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2988
2989 // Check that the size of the boilerplate matches our
2990 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2991 // on the size being a known constant.
2992 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
2993
2994 // Do the allocation.
2995 Object* result;
2996 { MaybeObject* maybe_result =
2997 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
2998 if (!maybe_result->ToObject(&result)) return maybe_result;
2999 }
3000
3001 // Copy the content. The arguments boilerplate doesn't have any
3002 // fields that point to new space so it's safe to skip the write
3003 // barrier here.
3004 CopyBlock(HeapObject::cast(result)->address(),
3005 boilerplate->address(),
3006 JSObject::kHeaderSize);
3007
3008 // Set the length property.
3009 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
3010 Smi::FromInt(length),
3011 SKIP_WRITE_BARRIER);
3012 // Set the callee property for non-strict mode arguments object only.
3013 if (!strict_mode_callee) {
3014 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3015 callee);
3016 }
3017
3018 // Check the state of the object
3019 ASSERT(JSObject::cast(result)->HasFastProperties());
3020 ASSERT(JSObject::cast(result)->HasFastElements());
3021
3022 return result;
3023 }
3024
3025
HasDuplicates(DescriptorArray * descriptors)3026 static bool HasDuplicates(DescriptorArray* descriptors) {
3027 int count = descriptors->number_of_descriptors();
3028 if (count > 1) {
3029 String* prev_key = descriptors->GetKey(0);
3030 for (int i = 1; i != count; i++) {
3031 String* current_key = descriptors->GetKey(i);
3032 if (prev_key == current_key) return true;
3033 prev_key = current_key;
3034 }
3035 }
3036 return false;
3037 }
3038
3039
AllocateInitialMap(JSFunction * fun)3040 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
3041 ASSERT(!fun->has_initial_map());
3042
3043 // First create a new map with the size and number of in-object properties
3044 // suggested by the function.
3045 int instance_size = fun->shared()->CalculateInstanceSize();
3046 int in_object_properties = fun->shared()->CalculateInObjectProperties();
3047 Object* map_obj;
3048 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
3049 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3050 }
3051
3052 // Fetch or allocate prototype.
3053 Object* prototype;
3054 if (fun->has_instance_prototype()) {
3055 prototype = fun->instance_prototype();
3056 } else {
3057 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3058 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3059 }
3060 }
3061 Map* map = Map::cast(map_obj);
3062 map->set_inobject_properties(in_object_properties);
3063 map->set_unused_property_fields(in_object_properties);
3064 map->set_prototype(prototype);
3065 ASSERT(map->has_fast_elements());
3066
3067 // If the function has only simple this property assignments add
3068 // field descriptors for these to the initial map as the object
3069 // cannot be constructed without having these properties. Guard by
3070 // the inline_new flag so we only change the map if we generate a
3071 // specialized construct stub.
3072 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
3073 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
3074 int count = fun->shared()->this_property_assignments_count();
3075 if (count > in_object_properties) {
3076 // Inline constructor can only handle inobject properties.
3077 fun->shared()->ForbidInlineConstructor();
3078 } else {
3079 Object* descriptors_obj;
3080 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3081 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3082 return maybe_descriptors_obj;
3083 }
3084 }
3085 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3086 for (int i = 0; i < count; i++) {
3087 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3088 ASSERT(name->IsSymbol());
3089 FieldDescriptor field(name, i, NONE);
3090 field.SetEnumerationIndex(i);
3091 descriptors->Set(i, &field);
3092 }
3093 descriptors->SetNextEnumerationIndex(count);
3094 descriptors->SortUnchecked();
3095
3096 // The descriptors may contain duplicates because the compiler does not
3097 // guarantee the uniqueness of property names (it would have required
3098 // quadratic time). Once the descriptors are sorted we can check for
3099 // duplicates in linear time.
3100 if (HasDuplicates(descriptors)) {
3101 fun->shared()->ForbidInlineConstructor();
3102 } else {
3103 map->set_instance_descriptors(descriptors);
3104 map->set_pre_allocated_property_fields(count);
3105 map->set_unused_property_fields(in_object_properties - count);
3106 }
3107 }
3108 }
3109
3110 fun->shared()->StartInobjectSlackTracking(map);
3111
3112 return map;
3113 }
3114
3115
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)3116 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3117 FixedArray* properties,
3118 Map* map) {
3119 obj->set_properties(properties);
3120 obj->initialize_elements();
3121 // TODO(1240798): Initialize the object's body using valid initial values
3122 // according to the object's initial map. For example, if the map's
3123 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3124 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3125 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3126 // verification code has to cope with (temporarily) invalid objects. See
3127 // for example, JSArray::JSArrayVerify).
3128 Object* filler;
3129 // We cannot always fill with one_pointer_filler_map because objects
3130 // created from API functions expect their internal fields to be initialized
3131 // with undefined_value.
3132 if (map->constructor()->IsJSFunction() &&
3133 JSFunction::cast(map->constructor())->shared()->
3134 IsInobjectSlackTrackingInProgress()) {
3135 // We might want to shrink the object later.
3136 ASSERT(obj->GetInternalFieldCount() == 0);
3137 filler = Heap::one_pointer_filler_map();
3138 } else {
3139 filler = Heap::undefined_value();
3140 }
3141 obj->InitializeBody(map->instance_size(), filler);
3142 }
3143
3144
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure)3145 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
3146 // JSFunctions should be allocated using AllocateFunction to be
3147 // properly initialized.
3148 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3149
3150 // Both types of global objects should be allocated using
3151 // AllocateGlobalObject to be properly initialized.
3152 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3153 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3154
3155 // Allocate the backing storage for the properties.
3156 int prop_size =
3157 map->pre_allocated_property_fields() +
3158 map->unused_property_fields() -
3159 map->inobject_properties();
3160 ASSERT(prop_size >= 0);
3161 Object* properties;
3162 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3163 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3164 }
3165
3166 // Allocate the JSObject.
3167 AllocationSpace space =
3168 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3169 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
3170 Object* obj;
3171 { MaybeObject* maybe_obj = Allocate(map, space);
3172 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3173 }
3174
3175 // Initialize the JSObject.
3176 InitializeJSObjectFromMap(JSObject::cast(obj),
3177 FixedArray::cast(properties),
3178 map);
3179 ASSERT(JSObject::cast(obj)->HasFastElements());
3180 return obj;
3181 }
3182
3183
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure)3184 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3185 PretenureFlag pretenure) {
3186 // Allocate the initial map if absent.
3187 if (!constructor->has_initial_map()) {
3188 Object* initial_map;
3189 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3190 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3191 }
3192 constructor->set_initial_map(Map::cast(initial_map));
3193 Map::cast(initial_map)->set_constructor(constructor);
3194 }
3195 // Allocate the object based on the constructors initial map.
3196 MaybeObject* result =
3197 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
3198 #ifdef DEBUG
3199 // Make sure result is NOT a global object if valid.
3200 Object* non_failure;
3201 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3202 #endif
3203 return result;
3204 }
3205
3206
AllocateGlobalObject(JSFunction * constructor)3207 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
3208 ASSERT(constructor->has_initial_map());
3209 Map* map = constructor->initial_map();
3210
3211 // Make sure no field properties are described in the initial map.
3212 // This guarantees us that normalizing the properties does not
3213 // require us to change property values to JSGlobalPropertyCells.
3214 ASSERT(map->NextFreePropertyIndex() == 0);
3215
3216 // Make sure we don't have a ton of pre-allocated slots in the
3217 // global objects. They will be unused once we normalize the object.
3218 ASSERT(map->unused_property_fields() == 0);
3219 ASSERT(map->inobject_properties() == 0);
3220
3221 // Initial size of the backing store to avoid resize of the storage during
3222 // bootstrapping. The size differs between the JS global object ad the
3223 // builtins object.
3224 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3225
3226 // Allocate a dictionary object for backing storage.
3227 Object* obj;
3228 { MaybeObject* maybe_obj =
3229 StringDictionary::Allocate(
3230 map->NumberOfDescribedProperties() * 2 + initial_size);
3231 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3232 }
3233 StringDictionary* dictionary = StringDictionary::cast(obj);
3234
3235 // The global object might be created from an object template with accessors.
3236 // Fill these accessors into the dictionary.
3237 DescriptorArray* descs = map->instance_descriptors();
3238 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3239 PropertyDetails details(descs->GetDetails(i));
3240 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3241 PropertyDetails d =
3242 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3243 Object* value = descs->GetCallbacksObject(i);
3244 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
3245 if (!maybe_value->ToObject(&value)) return maybe_value;
3246 }
3247
3248 Object* result;
3249 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3250 if (!maybe_result->ToObject(&result)) return maybe_result;
3251 }
3252 dictionary = StringDictionary::cast(result);
3253 }
3254
3255 // Allocate the global object and initialize it with the backing store.
3256 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3257 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3258 }
3259 JSObject* global = JSObject::cast(obj);
3260 InitializeJSObjectFromMap(global, dictionary, map);
3261
3262 // Create a new map for the global object.
3263 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3264 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3265 }
3266 Map* new_map = Map::cast(obj);
3267
3268 // Setup the global object as a normalized object.
3269 global->set_map(new_map);
3270 global->map()->set_instance_descriptors(empty_descriptor_array());
3271 global->set_properties(dictionary);
3272
3273 // Make sure result is a global object with properties in dictionary.
3274 ASSERT(global->IsGlobalObject());
3275 ASSERT(!global->HasFastProperties());
3276 return global;
3277 }
3278
3279
CopyJSObject(JSObject * source)3280 MaybeObject* Heap::CopyJSObject(JSObject* source) {
3281 // Never used to copy functions. If functions need to be copied we
3282 // have to be careful to clear the literals array.
3283 ASSERT(!source->IsJSFunction());
3284
3285 // Make the clone.
3286 Map* map = source->map();
3287 int object_size = map->instance_size();
3288 Object* clone;
3289
3290 // If we're forced to always allocate, we use the general allocation
3291 // functions which may leave us with an object in old space.
3292 if (always_allocate()) {
3293 { MaybeObject* maybe_clone =
3294 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3295 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3296 }
3297 Address clone_address = HeapObject::cast(clone)->address();
3298 CopyBlock(clone_address,
3299 source->address(),
3300 object_size);
3301 // Update write barrier for all fields that lie beyond the header.
3302 RecordWrites(clone_address,
3303 JSObject::kHeaderSize,
3304 (object_size - JSObject::kHeaderSize) / kPointerSize);
3305 } else {
3306 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3307 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3308 }
3309 ASSERT(InNewSpace(clone));
3310 // Since we know the clone is allocated in new space, we can copy
3311 // the contents without worrying about updating the write barrier.
3312 CopyBlock(HeapObject::cast(clone)->address(),
3313 source->address(),
3314 object_size);
3315 }
3316
3317 FixedArray* elements = FixedArray::cast(source->elements());
3318 FixedArray* properties = FixedArray::cast(source->properties());
3319 // Update elements if necessary.
3320 if (elements->length() > 0) {
3321 Object* elem;
3322 { MaybeObject* maybe_elem =
3323 (elements->map() == fixed_cow_array_map()) ?
3324 elements : CopyFixedArray(elements);
3325 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3326 }
3327 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3328 }
3329 // Update properties if necessary.
3330 if (properties->length() > 0) {
3331 Object* prop;
3332 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3333 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3334 }
3335 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3336 }
3337 // Return the new clone.
3338 #ifdef ENABLE_LOGGING_AND_PROFILING
3339 isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
3340 #endif
3341 return clone;
3342 }
3343
3344
ReinitializeJSGlobalProxy(JSFunction * constructor,JSGlobalProxy * object)3345 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3346 JSGlobalProxy* object) {
3347 ASSERT(constructor->has_initial_map());
3348 Map* map = constructor->initial_map();
3349
3350 // Check that the already allocated object has the same size and type as
3351 // objects allocated using the constructor.
3352 ASSERT(map->instance_size() == object->map()->instance_size());
3353 ASSERT(map->instance_type() == object->map()->instance_type());
3354
3355 // Allocate the backing storage for the properties.
3356 int prop_size = map->unused_property_fields() - map->inobject_properties();
3357 Object* properties;
3358 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3359 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3360 }
3361
3362 // Reset the map for the object.
3363 object->set_map(constructor->initial_map());
3364
3365 // Reinitialize the object from the constructor map.
3366 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3367 return object;
3368 }
3369
3370
AllocateStringFromAscii(Vector<const char> string,PretenureFlag pretenure)3371 MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3372 PretenureFlag pretenure) {
3373 Object* result;
3374 { MaybeObject* maybe_result =
3375 AllocateRawAsciiString(string.length(), pretenure);
3376 if (!maybe_result->ToObject(&result)) return maybe_result;
3377 }
3378
3379 // Copy the characters into the new object.
3380 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3381 for (int i = 0; i < string.length(); i++) {
3382 string_result->SeqAsciiStringSet(i, string[i]);
3383 }
3384 return result;
3385 }
3386
3387
AllocateStringFromUtf8Slow(Vector<const char> string,PretenureFlag pretenure)3388 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3389 PretenureFlag pretenure) {
3390 // V8 only supports characters in the Basic Multilingual Plane.
3391 const uc32 kMaxSupportedChar = 0xFFFF;
3392 // Count the number of characters in the UTF-8 string and check if
3393 // it is an ASCII string.
3394 Access<UnicodeCache::Utf8Decoder>
3395 decoder(isolate_->unicode_cache()->utf8_decoder());
3396 decoder->Reset(string.start(), string.length());
3397 int chars = 0;
3398 while (decoder->has_more()) {
3399 decoder->GetNext();
3400 chars++;
3401 }
3402
3403 Object* result;
3404 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3405 if (!maybe_result->ToObject(&result)) return maybe_result;
3406 }
3407
3408 // Convert and copy the characters into the new object.
3409 String* string_result = String::cast(result);
3410 decoder->Reset(string.start(), string.length());
3411 for (int i = 0; i < chars; i++) {
3412 uc32 r = decoder->GetNext();
3413 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
3414 string_result->Set(i, r);
3415 }
3416 return result;
3417 }
3418
3419
AllocateStringFromTwoByte(Vector<const uc16> string,PretenureFlag pretenure)3420 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3421 PretenureFlag pretenure) {
3422 // Check if the string is an ASCII string.
3423 MaybeObject* maybe_result;
3424 if (String::IsAscii(string.start(), string.length())) {
3425 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
3426 } else { // It's not an ASCII string.
3427 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
3428 }
3429 Object* result;
3430 if (!maybe_result->ToObject(&result)) return maybe_result;
3431
3432 // Copy the characters into the new object, which may be either ASCII or
3433 // UTF-16.
3434 String* string_result = String::cast(result);
3435 for (int i = 0; i < string.length(); i++) {
3436 string_result->Set(i, string[i]);
3437 }
3438 return result;
3439 }
3440
3441
SymbolMapForString(String * string)3442 Map* Heap::SymbolMapForString(String* string) {
3443 // If the string is in new space it cannot be used as a symbol.
3444 if (InNewSpace(string)) return NULL;
3445
3446 // Find the corresponding symbol map for strings.
3447 Map* map = string->map();
3448 if (map == ascii_string_map()) {
3449 return ascii_symbol_map();
3450 }
3451 if (map == string_map()) {
3452 return symbol_map();
3453 }
3454 if (map == cons_string_map()) {
3455 return cons_symbol_map();
3456 }
3457 if (map == cons_ascii_string_map()) {
3458 return cons_ascii_symbol_map();
3459 }
3460 if (map == external_string_map()) {
3461 return external_symbol_map();
3462 }
3463 if (map == external_ascii_string_map()) {
3464 return external_ascii_symbol_map();
3465 }
3466 if (map == external_string_with_ascii_data_map()) {
3467 return external_symbol_with_ascii_data_map();
3468 }
3469
3470 // No match found.
3471 return NULL;
3472 }
3473
3474
AllocateInternalSymbol(unibrow::CharacterStream * buffer,int chars,uint32_t hash_field)3475 MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3476 int chars,
3477 uint32_t hash_field) {
3478 ASSERT(chars >= 0);
3479 // Ensure the chars matches the number of characters in the buffer.
3480 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3481 // Determine whether the string is ascii.
3482 bool is_ascii = true;
3483 while (buffer->has_more()) {
3484 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3485 is_ascii = false;
3486 break;
3487 }
3488 }
3489 buffer->Rewind();
3490
3491 // Compute map and object size.
3492 int size;
3493 Map* map;
3494
3495 if (is_ascii) {
3496 if (chars > SeqAsciiString::kMaxLength) {
3497 return Failure::OutOfMemoryException();
3498 }
3499 map = ascii_symbol_map();
3500 size = SeqAsciiString::SizeFor(chars);
3501 } else {
3502 if (chars > SeqTwoByteString::kMaxLength) {
3503 return Failure::OutOfMemoryException();
3504 }
3505 map = symbol_map();
3506 size = SeqTwoByteString::SizeFor(chars);
3507 }
3508
3509 // Allocate string.
3510 Object* result;
3511 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3512 ? lo_space_->AllocateRaw(size)
3513 : old_data_space_->AllocateRaw(size);
3514 if (!maybe_result->ToObject(&result)) return maybe_result;
3515 }
3516
3517 reinterpret_cast<HeapObject*>(result)->set_map(map);
3518 // Set length and hash fields of the allocated string.
3519 String* answer = String::cast(result);
3520 answer->set_length(chars);
3521 answer->set_hash_field(hash_field);
3522
3523 ASSERT_EQ(size, answer->Size());
3524
3525 // Fill in the characters.
3526 for (int i = 0; i < chars; i++) {
3527 answer->Set(i, buffer->GetNext());
3528 }
3529 return answer;
3530 }
3531
3532
AllocateRawAsciiString(int length,PretenureFlag pretenure)3533 MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
3534 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3535 return Failure::OutOfMemoryException();
3536 }
3537
3538 int size = SeqAsciiString::SizeFor(length);
3539 ASSERT(size <= SeqAsciiString::kMaxSize);
3540
3541 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3542 AllocationSpace retry_space = OLD_DATA_SPACE;
3543
3544 if (space == NEW_SPACE) {
3545 if (size > kMaxObjectSizeInNewSpace) {
3546 // Allocate in large object space, retry space will be ignored.
3547 space = LO_SPACE;
3548 } else if (size > MaxObjectSizeInPagedSpace()) {
3549 // Allocate in new space, retry in large object space.
3550 retry_space = LO_SPACE;
3551 }
3552 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3553 space = LO_SPACE;
3554 }
3555 Object* result;
3556 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3557 if (!maybe_result->ToObject(&result)) return maybe_result;
3558 }
3559
3560 // Partially initialize the object.
3561 HeapObject::cast(result)->set_map(ascii_string_map());
3562 String::cast(result)->set_length(length);
3563 String::cast(result)->set_hash_field(String::kEmptyHashField);
3564 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3565 return result;
3566 }
3567
3568
AllocateRawTwoByteString(int length,PretenureFlag pretenure)3569 MaybeObject* Heap::AllocateRawTwoByteString(int length,
3570 PretenureFlag pretenure) {
3571 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3572 return Failure::OutOfMemoryException();
3573 }
3574 int size = SeqTwoByteString::SizeFor(length);
3575 ASSERT(size <= SeqTwoByteString::kMaxSize);
3576 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3577 AllocationSpace retry_space = OLD_DATA_SPACE;
3578
3579 if (space == NEW_SPACE) {
3580 if (size > kMaxObjectSizeInNewSpace) {
3581 // Allocate in large object space, retry space will be ignored.
3582 space = LO_SPACE;
3583 } else if (size > MaxObjectSizeInPagedSpace()) {
3584 // Allocate in new space, retry in large object space.
3585 retry_space = LO_SPACE;
3586 }
3587 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3588 space = LO_SPACE;
3589 }
3590 Object* result;
3591 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3592 if (!maybe_result->ToObject(&result)) return maybe_result;
3593 }
3594
3595 // Partially initialize the object.
3596 HeapObject::cast(result)->set_map(string_map());
3597 String::cast(result)->set_length(length);
3598 String::cast(result)->set_hash_field(String::kEmptyHashField);
3599 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3600 return result;
3601 }
3602
3603
AllocateEmptyFixedArray()3604 MaybeObject* Heap::AllocateEmptyFixedArray() {
3605 int size = FixedArray::SizeFor(0);
3606 Object* result;
3607 { MaybeObject* maybe_result =
3608 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3609 if (!maybe_result->ToObject(&result)) return maybe_result;
3610 }
3611 // Initialize the object.
3612 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3613 reinterpret_cast<FixedArray*>(result)->set_length(0);
3614 return result;
3615 }
3616
3617
AllocateRawFixedArray(int length)3618 MaybeObject* Heap::AllocateRawFixedArray(int length) {
3619 if (length < 0 || length > FixedArray::kMaxLength) {
3620 return Failure::OutOfMemoryException();
3621 }
3622 ASSERT(length > 0);
3623 // Use the general function if we're forced to always allocate.
3624 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3625 // Allocate the raw data for a fixed array.
3626 int size = FixedArray::SizeFor(length);
3627 return size <= kMaxObjectSizeInNewSpace
3628 ? new_space_.AllocateRaw(size)
3629 : lo_space_->AllocateRawFixedArray(size);
3630 }
3631
3632
CopyFixedArrayWithMap(FixedArray * src,Map * map)3633 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3634 int len = src->length();
3635 Object* obj;
3636 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3637 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3638 }
3639 if (InNewSpace(obj)) {
3640 HeapObject* dst = HeapObject::cast(obj);
3641 dst->set_map(map);
3642 CopyBlock(dst->address() + kPointerSize,
3643 src->address() + kPointerSize,
3644 FixedArray::SizeFor(len) - kPointerSize);
3645 return obj;
3646 }
3647 HeapObject::cast(obj)->set_map(map);
3648 FixedArray* result = FixedArray::cast(obj);
3649 result->set_length(len);
3650
3651 // Copy the content
3652 AssertNoAllocation no_gc;
3653 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3654 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3655 return result;
3656 }
3657
3658
AllocateFixedArray(int length)3659 MaybeObject* Heap::AllocateFixedArray(int length) {
3660 ASSERT(length >= 0);
3661 if (length == 0) return empty_fixed_array();
3662 Object* result;
3663 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3664 if (!maybe_result->ToObject(&result)) return maybe_result;
3665 }
3666 // Initialize header.
3667 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3668 array->set_map(fixed_array_map());
3669 array->set_length(length);
3670 // Initialize body.
3671 ASSERT(!InNewSpace(undefined_value()));
3672 MemsetPointer(array->data_start(), undefined_value(), length);
3673 return result;
3674 }
3675
3676
AllocateRawFixedArray(int length,PretenureFlag pretenure)3677 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
3678 if (length < 0 || length > FixedArray::kMaxLength) {
3679 return Failure::OutOfMemoryException();
3680 }
3681
3682 AllocationSpace space =
3683 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3684 int size = FixedArray::SizeFor(length);
3685 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3686 // Too big for new space.
3687 space = LO_SPACE;
3688 } else if (space == OLD_POINTER_SPACE &&
3689 size > MaxObjectSizeInPagedSpace()) {
3690 // Too big for old pointer space.
3691 space = LO_SPACE;
3692 }
3693
3694 AllocationSpace retry_space =
3695 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3696
3697 return AllocateRaw(size, space, retry_space);
3698 }
3699
3700
AllocateFixedArrayWithFiller(Heap * heap,int length,PretenureFlag pretenure,Object * filler)3701 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
3702 Heap* heap,
3703 int length,
3704 PretenureFlag pretenure,
3705 Object* filler) {
3706 ASSERT(length >= 0);
3707 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3708 if (length == 0) return heap->empty_fixed_array();
3709
3710 ASSERT(!heap->InNewSpace(filler));
3711 Object* result;
3712 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
3713 if (!maybe_result->ToObject(&result)) return maybe_result;
3714 }
3715
3716 HeapObject::cast(result)->set_map(heap->fixed_array_map());
3717 FixedArray* array = FixedArray::cast(result);
3718 array->set_length(length);
3719 MemsetPointer(array->data_start(), filler, length);
3720 return array;
3721 }
3722
3723
AllocateFixedArray(int length,PretenureFlag pretenure)3724 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3725 return AllocateFixedArrayWithFiller(this,
3726 length,
3727 pretenure,
3728 undefined_value());
3729 }
3730
3731
AllocateFixedArrayWithHoles(int length,PretenureFlag pretenure)3732 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3733 PretenureFlag pretenure) {
3734 return AllocateFixedArrayWithFiller(this,
3735 length,
3736 pretenure,
3737 the_hole_value());
3738 }
3739
3740
AllocateUninitializedFixedArray(int length)3741 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
3742 if (length == 0) return empty_fixed_array();
3743
3744 Object* obj;
3745 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3746 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3747 }
3748
3749 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3750 FixedArray::cast(obj)->set_length(length);
3751 return obj;
3752 }
3753
3754
AllocateHashTable(int length,PretenureFlag pretenure)3755 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3756 Object* result;
3757 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
3758 if (!maybe_result->ToObject(&result)) return maybe_result;
3759 }
3760 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
3761 ASSERT(result->IsHashTable());
3762 return result;
3763 }
3764
3765
AllocateGlobalContext()3766 MaybeObject* Heap::AllocateGlobalContext() {
3767 Object* result;
3768 { MaybeObject* maybe_result =
3769 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3770 if (!maybe_result->ToObject(&result)) return maybe_result;
3771 }
3772 Context* context = reinterpret_cast<Context*>(result);
3773 context->set_map(global_context_map());
3774 ASSERT(context->IsGlobalContext());
3775 ASSERT(result->IsContext());
3776 return result;
3777 }
3778
3779
AllocateFunctionContext(int length,JSFunction * function)3780 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
3781 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
3782 Object* result;
3783 { MaybeObject* maybe_result = AllocateFixedArray(length);
3784 if (!maybe_result->ToObject(&result)) return maybe_result;
3785 }
3786 Context* context = reinterpret_cast<Context*>(result);
3787 context->set_map(context_map());
3788 context->set_closure(function);
3789 context->set_fcontext(context);
3790 context->set_previous(NULL);
3791 context->set_extension(NULL);
3792 context->set_global(function->context()->global());
3793 ASSERT(!context->IsGlobalContext());
3794 ASSERT(context->is_function_context());
3795 ASSERT(result->IsContext());
3796 return result;
3797 }
3798
3799
AllocateWithContext(Context * previous,JSObject * extension,bool is_catch_context)3800 MaybeObject* Heap::AllocateWithContext(Context* previous,
3801 JSObject* extension,
3802 bool is_catch_context) {
3803 Object* result;
3804 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3805 if (!maybe_result->ToObject(&result)) return maybe_result;
3806 }
3807 Context* context = reinterpret_cast<Context*>(result);
3808 context->set_map(is_catch_context ? catch_context_map() :
3809 context_map());
3810 context->set_closure(previous->closure());
3811 context->set_fcontext(previous->fcontext());
3812 context->set_previous(previous);
3813 context->set_extension(extension);
3814 context->set_global(previous->global());
3815 ASSERT(!context->IsGlobalContext());
3816 ASSERT(!context->is_function_context());
3817 ASSERT(result->IsContext());
3818 return result;
3819 }
3820
3821
AllocateStruct(InstanceType type)3822 MaybeObject* Heap::AllocateStruct(InstanceType type) {
3823 Map* map;
3824 switch (type) {
3825 #define MAKE_CASE(NAME, Name, name) \
3826 case NAME##_TYPE: map = name##_map(); break;
3827 STRUCT_LIST(MAKE_CASE)
3828 #undef MAKE_CASE
3829 default:
3830 UNREACHABLE();
3831 return Failure::InternalError();
3832 }
3833 int size = map->instance_size();
3834 AllocationSpace space =
3835 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
3836 Object* result;
3837 { MaybeObject* maybe_result = Allocate(map, space);
3838 if (!maybe_result->ToObject(&result)) return maybe_result;
3839 }
3840 Struct::cast(result)->InitializeBody(size);
3841 return result;
3842 }
3843
3844
IdleNotification()3845 bool Heap::IdleNotification() {
3846 static const int kIdlesBeforeScavenge = 4;
3847 static const int kIdlesBeforeMarkSweep = 7;
3848 static const int kIdlesBeforeMarkCompact = 8;
3849 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
3850 static const unsigned int kGCsBetweenCleanup = 4;
3851
3852 if (!last_idle_notification_gc_count_init_) {
3853 last_idle_notification_gc_count_ = gc_count_;
3854 last_idle_notification_gc_count_init_ = true;
3855 }
3856
3857 bool uncommit = true;
3858 bool finished = false;
3859
3860 // Reset the number of idle notifications received when a number of
3861 // GCs have taken place. This allows another round of cleanup based
3862 // on idle notifications if enough work has been carried out to
3863 // provoke a number of garbage collections.
3864 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
3865 number_idle_notifications_ =
3866 Min(number_idle_notifications_ + 1, kMaxIdleCount);
3867 } else {
3868 number_idle_notifications_ = 0;
3869 last_idle_notification_gc_count_ = gc_count_;
3870 }
3871
3872 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
3873 if (contexts_disposed_ > 0) {
3874 HistogramTimerScope scope(isolate_->counters()->gc_context());
3875 CollectAllGarbage(false);
3876 } else {
3877 CollectGarbage(NEW_SPACE);
3878 }
3879 new_space_.Shrink();
3880 last_idle_notification_gc_count_ = gc_count_;
3881 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
3882 // Before doing the mark-sweep collections we clear the
3883 // compilation cache to avoid hanging on to source code and
3884 // generated code for cached functions.
3885 isolate_->compilation_cache()->Clear();
3886
3887 CollectAllGarbage(false);
3888 new_space_.Shrink();
3889 last_idle_notification_gc_count_ = gc_count_;
3890
3891 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
3892 CollectAllGarbage(true);
3893 new_space_.Shrink();
3894 last_idle_notification_gc_count_ = gc_count_;
3895 number_idle_notifications_ = 0;
3896 finished = true;
3897 } else if (contexts_disposed_ > 0) {
3898 if (FLAG_expose_gc) {
3899 contexts_disposed_ = 0;
3900 } else {
3901 HistogramTimerScope scope(isolate_->counters()->gc_context());
3902 CollectAllGarbage(false);
3903 last_idle_notification_gc_count_ = gc_count_;
3904 }
3905 // If this is the first idle notification, we reset the
3906 // notification count to avoid letting idle notifications for
3907 // context disposal garbage collections start a potentially too
3908 // aggressive idle GC cycle.
3909 if (number_idle_notifications_ <= 1) {
3910 number_idle_notifications_ = 0;
3911 uncommit = false;
3912 }
3913 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
3914 // If we have received more than kIdlesBeforeMarkCompact idle
3915 // notifications we do not perform any cleanup because we don't
3916 // expect to gain much by doing so.
3917 finished = true;
3918 }
3919
3920 // Make sure that we have no pending context disposals and
3921 // conditionally uncommit from space.
3922 ASSERT(contexts_disposed_ == 0);
3923 if (uncommit) UncommitFromSpace();
3924 return finished;
3925 }
3926
3927
3928 #ifdef DEBUG
3929
Print()3930 void Heap::Print() {
3931 if (!HasBeenSetup()) return;
3932 isolate()->PrintStack();
3933 AllSpaces spaces;
3934 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3935 space->Print();
3936 }
3937
3938
ReportCodeStatistics(const char * title)3939 void Heap::ReportCodeStatistics(const char* title) {
3940 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3941 PagedSpace::ResetCodeStatistics();
3942 // We do not look for code in new space, map space, or old space. If code
3943 // somehow ends up in those spaces, we would miss it here.
3944 code_space_->CollectCodeStatistics();
3945 lo_space_->CollectCodeStatistics();
3946 PagedSpace::ReportCodeStatistics();
3947 }
3948
3949
3950 // This function expects that NewSpace's allocated objects histogram is
3951 // populated (via a call to CollectStatistics or else as a side effect of a
3952 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)3953 void Heap::ReportHeapStatistics(const char* title) {
3954 USE(title);
3955 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3956 title, gc_count_);
3957 PrintF("mark-compact GC : %d\n", mc_count_);
3958 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3959 old_gen_promotion_limit_);
3960 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3961 old_gen_allocation_limit_);
3962
3963 PrintF("\n");
3964 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3965 isolate_->global_handles()->PrintStats();
3966 PrintF("\n");
3967
3968 PrintF("Heap statistics : ");
3969 isolate_->memory_allocator()->ReportStatistics();
3970 PrintF("To space : ");
3971 new_space_.ReportStatistics();
3972 PrintF("Old pointer space : ");
3973 old_pointer_space_->ReportStatistics();
3974 PrintF("Old data space : ");
3975 old_data_space_->ReportStatistics();
3976 PrintF("Code space : ");
3977 code_space_->ReportStatistics();
3978 PrintF("Map space : ");
3979 map_space_->ReportStatistics();
3980 PrintF("Cell space : ");
3981 cell_space_->ReportStatistics();
3982 PrintF("Large object space : ");
3983 lo_space_->ReportStatistics();
3984 PrintF(">>>>>> ========================================= >>>>>>\n");
3985 }
3986
3987 #endif // DEBUG
3988
Contains(HeapObject * value)3989 bool Heap::Contains(HeapObject* value) {
3990 return Contains(value->address());
3991 }
3992
3993
Contains(Address addr)3994 bool Heap::Contains(Address addr) {
3995 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3996 return HasBeenSetup() &&
3997 (new_space_.ToSpaceContains(addr) ||
3998 old_pointer_space_->Contains(addr) ||
3999 old_data_space_->Contains(addr) ||
4000 code_space_->Contains(addr) ||
4001 map_space_->Contains(addr) ||
4002 cell_space_->Contains(addr) ||
4003 lo_space_->SlowContains(addr));
4004 }
4005
4006
InSpace(HeapObject * value,AllocationSpace space)4007 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4008 return InSpace(value->address(), space);
4009 }
4010
4011
InSpace(Address addr,AllocationSpace space)4012 bool Heap::InSpace(Address addr, AllocationSpace space) {
4013 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4014 if (!HasBeenSetup()) return false;
4015
4016 switch (space) {
4017 case NEW_SPACE:
4018 return new_space_.ToSpaceContains(addr);
4019 case OLD_POINTER_SPACE:
4020 return old_pointer_space_->Contains(addr);
4021 case OLD_DATA_SPACE:
4022 return old_data_space_->Contains(addr);
4023 case CODE_SPACE:
4024 return code_space_->Contains(addr);
4025 case MAP_SPACE:
4026 return map_space_->Contains(addr);
4027 case CELL_SPACE:
4028 return cell_space_->Contains(addr);
4029 case LO_SPACE:
4030 return lo_space_->SlowContains(addr);
4031 }
4032
4033 return false;
4034 }
4035
4036
4037 #ifdef DEBUG
DummyScavengePointer(HeapObject ** p)4038 static void DummyScavengePointer(HeapObject** p) {
4039 }
4040
4041
VerifyPointersUnderWatermark(PagedSpace * space,DirtyRegionCallback visit_dirty_region)4042 static void VerifyPointersUnderWatermark(
4043 PagedSpace* space,
4044 DirtyRegionCallback visit_dirty_region) {
4045 PageIterator it(space, PageIterator::PAGES_IN_USE);
4046
4047 while (it.has_next()) {
4048 Page* page = it.next();
4049 Address start = page->ObjectAreaStart();
4050 Address end = page->AllocationWatermark();
4051
4052 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
4053 start,
4054 end,
4055 visit_dirty_region,
4056 &DummyScavengePointer);
4057 }
4058 }
4059
4060
VerifyPointersUnderWatermark(LargeObjectSpace * space)4061 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4062 LargeObjectIterator it(space);
4063 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4064 if (object->IsFixedArray()) {
4065 Address slot_address = object->address();
4066 Address end = object->address() + object->Size();
4067
4068 while (slot_address < end) {
4069 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4070 // When we are not in GC the Heap::InNewSpace() predicate
4071 // checks that pointers which satisfy predicate point into
4072 // the active semispace.
4073 HEAP->InNewSpace(*slot);
4074 slot_address += kPointerSize;
4075 }
4076 }
4077 }
4078 }
4079
4080
Verify()4081 void Heap::Verify() {
4082 ASSERT(HasBeenSetup());
4083
4084 VerifyPointersVisitor visitor;
4085 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4086
4087 new_space_.Verify();
4088
4089 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4090 old_pointer_space_->Verify(&dirty_regions_visitor);
4091 map_space_->Verify(&dirty_regions_visitor);
4092
4093 VerifyPointersUnderWatermark(old_pointer_space_,
4094 &IteratePointersInDirtyRegion);
4095 VerifyPointersUnderWatermark(map_space_,
4096 &IteratePointersInDirtyMapsRegion);
4097 VerifyPointersUnderWatermark(lo_space_);
4098
4099 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4100 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4101
4102 VerifyPointersVisitor no_dirty_regions_visitor;
4103 old_data_space_->Verify(&no_dirty_regions_visitor);
4104 code_space_->Verify(&no_dirty_regions_visitor);
4105 cell_space_->Verify(&no_dirty_regions_visitor);
4106
4107 lo_space_->Verify();
4108 }
4109 #endif // DEBUG
4110
4111
LookupSymbol(Vector<const char> string)4112 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
4113 Object* symbol = NULL;
4114 Object* new_table;
4115 { MaybeObject* maybe_new_table =
4116 symbol_table()->LookupSymbol(string, &symbol);
4117 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4118 }
4119 // Can't use set_symbol_table because SymbolTable::cast knows that
4120 // SymbolTable is a singleton and checks for identity.
4121 roots_[kSymbolTableRootIndex] = new_table;
4122 ASSERT(symbol != NULL);
4123 return symbol;
4124 }
4125
4126
LookupAsciiSymbol(Vector<const char> string)4127 MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4128 Object* symbol = NULL;
4129 Object* new_table;
4130 { MaybeObject* maybe_new_table =
4131 symbol_table()->LookupAsciiSymbol(string, &symbol);
4132 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4133 }
4134 // Can't use set_symbol_table because SymbolTable::cast knows that
4135 // SymbolTable is a singleton and checks for identity.
4136 roots_[kSymbolTableRootIndex] = new_table;
4137 ASSERT(symbol != NULL);
4138 return symbol;
4139 }
4140
4141
LookupTwoByteSymbol(Vector<const uc16> string)4142 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4143 Object* symbol = NULL;
4144 Object* new_table;
4145 { MaybeObject* maybe_new_table =
4146 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4147 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4148 }
4149 // Can't use set_symbol_table because SymbolTable::cast knows that
4150 // SymbolTable is a singleton and checks for identity.
4151 roots_[kSymbolTableRootIndex] = new_table;
4152 ASSERT(symbol != NULL);
4153 return symbol;
4154 }
4155
4156
LookupSymbol(String * string)4157 MaybeObject* Heap::LookupSymbol(String* string) {
4158 if (string->IsSymbol()) return string;
4159 Object* symbol = NULL;
4160 Object* new_table;
4161 { MaybeObject* maybe_new_table =
4162 symbol_table()->LookupString(string, &symbol);
4163 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4164 }
4165 // Can't use set_symbol_table because SymbolTable::cast knows that
4166 // SymbolTable is a singleton and checks for identity.
4167 roots_[kSymbolTableRootIndex] = new_table;
4168 ASSERT(symbol != NULL);
4169 return symbol;
4170 }
4171
4172
LookupSymbolIfExists(String * string,String ** symbol)4173 bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4174 if (string->IsSymbol()) {
4175 *symbol = string;
4176 return true;
4177 }
4178 return symbol_table()->LookupSymbolIfExists(string, symbol);
4179 }
4180
4181
4182 #ifdef DEBUG
ZapFromSpace()4183 void Heap::ZapFromSpace() {
4184 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
4185 for (Address a = new_space_.FromSpaceLow();
4186 a < new_space_.FromSpaceHigh();
4187 a += kPointerSize) {
4188 Memory::Address_at(a) = kFromSpaceZapValue;
4189 }
4190 }
4191 #endif // DEBUG
4192
4193
IteratePointersInDirtyRegion(Heap * heap,Address start,Address end,ObjectSlotCallback copy_object_func)4194 bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4195 Address start,
4196 Address end,
4197 ObjectSlotCallback copy_object_func) {
4198 Address slot_address = start;
4199 bool pointers_to_new_space_found = false;
4200
4201 while (slot_address < end) {
4202 Object** slot = reinterpret_cast<Object**>(slot_address);
4203 if (heap->InNewSpace(*slot)) {
4204 ASSERT((*slot)->IsHeapObject());
4205 copy_object_func(reinterpret_cast<HeapObject**>(slot));
4206 if (heap->InNewSpace(*slot)) {
4207 ASSERT((*slot)->IsHeapObject());
4208 pointers_to_new_space_found = true;
4209 }
4210 }
4211 slot_address += kPointerSize;
4212 }
4213 return pointers_to_new_space_found;
4214 }
4215
4216
4217 // Compute start address of the first map following given addr.
MapStartAlign(Address addr)4218 static inline Address MapStartAlign(Address addr) {
4219 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4220 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4221 }
4222
4223
4224 // Compute end address of the first map preceding given addr.
MapEndAlign(Address addr)4225 static inline Address MapEndAlign(Address addr) {
4226 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4227 return page + ((addr - page) / Map::kSize * Map::kSize);
4228 }
4229
4230
IteratePointersInDirtyMaps(Address start,Address end,ObjectSlotCallback copy_object_func)4231 static bool IteratePointersInDirtyMaps(Address start,
4232 Address end,
4233 ObjectSlotCallback copy_object_func) {
4234 ASSERT(MapStartAlign(start) == start);
4235 ASSERT(MapEndAlign(end) == end);
4236
4237 Address map_address = start;
4238 bool pointers_to_new_space_found = false;
4239
4240 Heap* heap = HEAP;
4241 while (map_address < end) {
4242 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
4243 ASSERT(Memory::Object_at(map_address)->IsMap());
4244
4245 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4246 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4247
4248 if (Heap::IteratePointersInDirtyRegion(heap,
4249 pointer_fields_start,
4250 pointer_fields_end,
4251 copy_object_func)) {
4252 pointers_to_new_space_found = true;
4253 }
4254
4255 map_address += Map::kSize;
4256 }
4257
4258 return pointers_to_new_space_found;
4259 }
4260
4261
IteratePointersInDirtyMapsRegion(Heap * heap,Address start,Address end,ObjectSlotCallback copy_object_func)4262 bool Heap::IteratePointersInDirtyMapsRegion(
4263 Heap* heap,
4264 Address start,
4265 Address end,
4266 ObjectSlotCallback copy_object_func) {
4267 Address map_aligned_start = MapStartAlign(start);
4268 Address map_aligned_end = MapEndAlign(end);
4269
4270 bool contains_pointers_to_new_space = false;
4271
4272 if (map_aligned_start != start) {
4273 Address prev_map = map_aligned_start - Map::kSize;
4274 ASSERT(Memory::Object_at(prev_map)->IsMap());
4275
4276 Address pointer_fields_start =
4277 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4278
4279 Address pointer_fields_end =
4280 Min(prev_map + Map::kPointerFieldsEndOffset, end);
4281
4282 contains_pointers_to_new_space =
4283 IteratePointersInDirtyRegion(heap,
4284 pointer_fields_start,
4285 pointer_fields_end,
4286 copy_object_func)
4287 || contains_pointers_to_new_space;
4288 }
4289
4290 contains_pointers_to_new_space =
4291 IteratePointersInDirtyMaps(map_aligned_start,
4292 map_aligned_end,
4293 copy_object_func)
4294 || contains_pointers_to_new_space;
4295
4296 if (map_aligned_end != end) {
4297 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4298
4299 Address pointer_fields_start =
4300 map_aligned_end + Map::kPointerFieldsBeginOffset;
4301
4302 Address pointer_fields_end =
4303 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
4304
4305 contains_pointers_to_new_space =
4306 IteratePointersInDirtyRegion(heap,
4307 pointer_fields_start,
4308 pointer_fields_end,
4309 copy_object_func)
4310 || contains_pointers_to_new_space;
4311 }
4312
4313 return contains_pointers_to_new_space;
4314 }
4315
4316
IterateAndMarkPointersToFromSpace(Address start,Address end,ObjectSlotCallback callback)4317 void Heap::IterateAndMarkPointersToFromSpace(Address start,
4318 Address end,
4319 ObjectSlotCallback callback) {
4320 Address slot_address = start;
4321 Page* page = Page::FromAddress(start);
4322
4323 uint32_t marks = page->GetRegionMarks();
4324
4325 while (slot_address < end) {
4326 Object** slot = reinterpret_cast<Object**>(slot_address);
4327 if (InFromSpace(*slot)) {
4328 ASSERT((*slot)->IsHeapObject());
4329 callback(reinterpret_cast<HeapObject**>(slot));
4330 if (InNewSpace(*slot)) {
4331 ASSERT((*slot)->IsHeapObject());
4332 marks |= page->GetRegionMaskForAddress(slot_address);
4333 }
4334 }
4335 slot_address += kPointerSize;
4336 }
4337
4338 page->SetRegionMarks(marks);
4339 }
4340
4341
IterateDirtyRegions(uint32_t marks,Address area_start,Address area_end,DirtyRegionCallback visit_dirty_region,ObjectSlotCallback copy_object_func)4342 uint32_t Heap::IterateDirtyRegions(
4343 uint32_t marks,
4344 Address area_start,
4345 Address area_end,
4346 DirtyRegionCallback visit_dirty_region,
4347 ObjectSlotCallback copy_object_func) {
4348 uint32_t newmarks = 0;
4349 uint32_t mask = 1;
4350
4351 if (area_start >= area_end) {
4352 return newmarks;
4353 }
4354
4355 Address region_start = area_start;
4356
4357 // area_start does not necessarily coincide with start of the first region.
4358 // Thus to calculate the beginning of the next region we have to align
4359 // area_start by Page::kRegionSize.
4360 Address second_region =
4361 reinterpret_cast<Address>(
4362 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4363 ~Page::kRegionAlignmentMask);
4364
4365 // Next region might be beyond area_end.
4366 Address region_end = Min(second_region, area_end);
4367
4368 if (marks & mask) {
4369 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
4370 newmarks |= mask;
4371 }
4372 }
4373 mask <<= 1;
4374
4375 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4376 region_start = region_end;
4377 region_end = region_start + Page::kRegionSize;
4378
4379 while (region_end <= area_end) {
4380 if (marks & mask) {
4381 if (visit_dirty_region(this,
4382 region_start,
4383 region_end,
4384 copy_object_func)) {
4385 newmarks |= mask;
4386 }
4387 }
4388
4389 region_start = region_end;
4390 region_end = region_start + Page::kRegionSize;
4391
4392 mask <<= 1;
4393 }
4394
4395 if (region_start != area_end) {
4396 // A small piece of area left uniterated because area_end does not coincide
4397 // with region end. Check whether region covering last part of area is
4398 // dirty.
4399 if (marks & mask) {
4400 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
4401 newmarks |= mask;
4402 }
4403 }
4404 }
4405
4406 return newmarks;
4407 }
4408
4409
4410
IterateDirtyRegions(PagedSpace * space,DirtyRegionCallback visit_dirty_region,ObjectSlotCallback copy_object_func,ExpectedPageWatermarkState expected_page_watermark_state)4411 void Heap::IterateDirtyRegions(
4412 PagedSpace* space,
4413 DirtyRegionCallback visit_dirty_region,
4414 ObjectSlotCallback copy_object_func,
4415 ExpectedPageWatermarkState expected_page_watermark_state) {
4416
4417 PageIterator it(space, PageIterator::PAGES_IN_USE);
4418
4419 while (it.has_next()) {
4420 Page* page = it.next();
4421 uint32_t marks = page->GetRegionMarks();
4422
4423 if (marks != Page::kAllRegionsCleanMarks) {
4424 Address start = page->ObjectAreaStart();
4425
4426 // Do not try to visit pointers beyond page allocation watermark.
4427 // Page can contain garbage pointers there.
4428 Address end;
4429
4430 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4431 page->IsWatermarkValid()) {
4432 end = page->AllocationWatermark();
4433 } else {
4434 end = page->CachedAllocationWatermark();
4435 }
4436
4437 ASSERT(space == old_pointer_space_ ||
4438 (space == map_space_ &&
4439 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4440
4441 page->SetRegionMarks(IterateDirtyRegions(marks,
4442 start,
4443 end,
4444 visit_dirty_region,
4445 copy_object_func));
4446 }
4447
4448 // Mark page watermark as invalid to maintain watermark validity invariant.
4449 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4450 page->InvalidateWatermark(true);
4451 }
4452 }
4453
4454
IterateRoots(ObjectVisitor * v,VisitMode mode)4455 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4456 IterateStrongRoots(v, mode);
4457 IterateWeakRoots(v, mode);
4458 }
4459
4460
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)4461 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4462 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
4463 v->Synchronize("symbol_table");
4464 if (mode != VISIT_ALL_IN_SCAVENGE) {
4465 // Scavenge collections have special processing for this.
4466 external_string_table_.Iterate(v);
4467 }
4468 v->Synchronize("external_string_table");
4469 }
4470
4471
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)4472 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4473 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4474 v->Synchronize("strong_root_list");
4475
4476 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
4477 v->Synchronize("symbol");
4478
4479 isolate_->bootstrapper()->Iterate(v);
4480 v->Synchronize("bootstrapper");
4481 isolate_->Iterate(v);
4482 v->Synchronize("top");
4483 Relocatable::Iterate(v);
4484 v->Synchronize("relocatable");
4485
4486 #ifdef ENABLE_DEBUGGER_SUPPORT
4487 isolate_->debug()->Iterate(v);
4488 #endif
4489 v->Synchronize("debug");
4490 isolate_->compilation_cache()->Iterate(v);
4491 v->Synchronize("compilationcache");
4492
4493 // Iterate over local handles in handle scopes.
4494 isolate_->handle_scope_implementer()->Iterate(v);
4495 v->Synchronize("handlescope");
4496
4497 // Iterate over the builtin code objects and code stubs in the
4498 // heap. Note that it is not necessary to iterate over code objects
4499 // on scavenge collections.
4500 if (mode != VISIT_ALL_IN_SCAVENGE) {
4501 isolate_->builtins()->IterateBuiltins(v);
4502 }
4503 v->Synchronize("builtins");
4504
4505 // Iterate over global handles.
4506 if (mode == VISIT_ONLY_STRONG) {
4507 isolate_->global_handles()->IterateStrongRoots(v);
4508 } else {
4509 isolate_->global_handles()->IterateAllRoots(v);
4510 }
4511 v->Synchronize("globalhandles");
4512
4513 // Iterate over pointers being held by inactive threads.
4514 isolate_->thread_manager()->Iterate(v);
4515 v->Synchronize("threadmanager");
4516
4517 // Iterate over the pointers the Serialization/Deserialization code is
4518 // holding.
4519 // During garbage collection this keeps the partial snapshot cache alive.
4520 // During deserialization of the startup snapshot this creates the partial
4521 // snapshot cache and deserializes the objects it refers to. During
4522 // serialization this does nothing, since the partial snapshot cache is
4523 // empty. However the next thing we do is create the partial snapshot,
4524 // filling up the partial snapshot cache with objects it needs as we go.
4525 SerializerDeserializer::Iterate(v);
4526 // We don't do a v->Synchronize call here, because in debug mode that will
4527 // output a flag to the snapshot. However at this point the serializer and
4528 // deserializer are deliberately a little unsynchronized (see above) so the
4529 // checking of the sync flag in the snapshot would fail.
4530 }
4531
4532
4533 // TODO(1236194): Since the heap size is configurable on the command line
4534 // and through the API, we should gracefully handle the case that the heap
4535 // size is not big enough to fit all the initial objects.
ConfigureHeap(int max_semispace_size,int max_old_gen_size,int max_executable_size)4536 bool Heap::ConfigureHeap(int max_semispace_size,
4537 int max_old_gen_size,
4538 int max_executable_size) {
4539 if (HasBeenSetup()) return false;
4540
4541 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4542
4543 if (Snapshot::IsEnabled()) {
4544 // If we are using a snapshot we always reserve the default amount
4545 // of memory for each semispace because code in the snapshot has
4546 // write-barrier code that relies on the size and alignment of new
4547 // space. We therefore cannot use a larger max semispace size
4548 // than the default reserved semispace size.
4549 if (max_semispace_size_ > reserved_semispace_size_) {
4550 max_semispace_size_ = reserved_semispace_size_;
4551 }
4552 } else {
4553 // If we are not using snapshots we reserve space for the actual
4554 // max semispace size.
4555 reserved_semispace_size_ = max_semispace_size_;
4556 }
4557
4558 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
4559 if (max_executable_size > 0) {
4560 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4561 }
4562
4563 // The max executable size must be less than or equal to the max old
4564 // generation size.
4565 if (max_executable_size_ > max_old_generation_size_) {
4566 max_executable_size_ = max_old_generation_size_;
4567 }
4568
4569 // The new space size must be a power of two to support single-bit testing
4570 // for containment.
4571 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4572 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4573 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4574 external_allocation_limit_ = 10 * max_semispace_size_;
4575
4576 // The old generation is paged.
4577 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
4578
4579 configured_ = true;
4580 return true;
4581 }
4582
4583
ConfigureHeapDefault()4584 bool Heap::ConfigureHeapDefault() {
4585 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4586 FLAG_max_old_space_size * MB,
4587 FLAG_max_executable_size * MB);
4588 }
4589
4590
RecordStats(HeapStats * stats,bool take_snapshot)4591 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4592 *stats->start_marker = HeapStats::kStartMarker;
4593 *stats->end_marker = HeapStats::kEndMarker;
4594 *stats->new_space_size = new_space_.SizeAsInt();
4595 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4596 *stats->old_pointer_space_size = old_pointer_space_->Size();
4597 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4598 *stats->old_data_space_size = old_data_space_->Size();
4599 *stats->old_data_space_capacity = old_data_space_->Capacity();
4600 *stats->code_space_size = code_space_->Size();
4601 *stats->code_space_capacity = code_space_->Capacity();
4602 *stats->map_space_size = map_space_->Size();
4603 *stats->map_space_capacity = map_space_->Capacity();
4604 *stats->cell_space_size = cell_space_->Size();
4605 *stats->cell_space_capacity = cell_space_->Capacity();
4606 *stats->lo_space_size = lo_space_->Size();
4607 isolate_->global_handles()->RecordStats(stats);
4608 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4609 *stats->memory_allocator_capacity =
4610 isolate()->memory_allocator()->Size() +
4611 isolate()->memory_allocator()->Available();
4612 *stats->os_error = OS::GetLastError();
4613 isolate()->memory_allocator()->Available();
4614 if (take_snapshot) {
4615 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
4616 for (HeapObject* obj = iterator.next();
4617 obj != NULL;
4618 obj = iterator.next()) {
4619 InstanceType type = obj->map()->instance_type();
4620 ASSERT(0 <= type && type <= LAST_TYPE);
4621 stats->objects_per_type[type]++;
4622 stats->size_per_type[type] += obj->Size();
4623 }
4624 }
4625 }
4626
4627
PromotedSpaceSize()4628 intptr_t Heap::PromotedSpaceSize() {
4629 return old_pointer_space_->Size()
4630 + old_data_space_->Size()
4631 + code_space_->Size()
4632 + map_space_->Size()
4633 + cell_space_->Size()
4634 + lo_space_->Size();
4635 }
4636
4637
PromotedExternalMemorySize()4638 int Heap::PromotedExternalMemorySize() {
4639 if (amount_of_external_allocated_memory_
4640 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4641 return amount_of_external_allocated_memory_
4642 - amount_of_external_allocated_memory_at_last_global_gc_;
4643 }
4644
4645 #ifdef DEBUG
4646
4647 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4648 static const int kMarkTag = 2;
4649
4650
4651 class HeapDebugUtils {
4652 public:
HeapDebugUtils(Heap * heap)4653 explicit HeapDebugUtils(Heap* heap)
4654 : search_for_any_global_(false),
4655 search_target_(NULL),
4656 found_target_(false),
4657 object_stack_(20),
4658 heap_(heap) {
4659 }
4660
4661 class MarkObjectVisitor : public ObjectVisitor {
4662 public:
MarkObjectVisitor(HeapDebugUtils * utils)4663 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4664
VisitPointers(Object ** start,Object ** end)4665 void VisitPointers(Object** start, Object** end) {
4666 // Copy all HeapObject pointers in [start, end)
4667 for (Object** p = start; p < end; p++) {
4668 if ((*p)->IsHeapObject())
4669 utils_->MarkObjectRecursively(p);
4670 }
4671 }
4672
4673 HeapDebugUtils* utils_;
4674 };
4675
MarkObjectRecursively(Object ** p)4676 void MarkObjectRecursively(Object** p) {
4677 if (!(*p)->IsHeapObject()) return;
4678
4679 HeapObject* obj = HeapObject::cast(*p);
4680
4681 Object* map = obj->map();
4682
4683 if (!map->IsHeapObject()) return; // visited before
4684
4685 if (found_target_) return; // stop if target found
4686 object_stack_.Add(obj);
4687 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
4688 (!search_for_any_global_ && (obj == search_target_))) {
4689 found_target_ = true;
4690 return;
4691 }
4692
4693 // not visited yet
4694 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4695
4696 Address map_addr = map_p->address();
4697
4698 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4699
4700 MarkObjectRecursively(&map);
4701
4702 MarkObjectVisitor mark_visitor(this);
4703
4704 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4705 &mark_visitor);
4706
4707 if (!found_target_) // don't pop if found the target
4708 object_stack_.RemoveLast();
4709 }
4710
4711
4712 class UnmarkObjectVisitor : public ObjectVisitor {
4713 public:
UnmarkObjectVisitor(HeapDebugUtils * utils)4714 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4715
VisitPointers(Object ** start,Object ** end)4716 void VisitPointers(Object** start, Object** end) {
4717 // Copy all HeapObject pointers in [start, end)
4718 for (Object** p = start; p < end; p++) {
4719 if ((*p)->IsHeapObject())
4720 utils_->UnmarkObjectRecursively(p);
4721 }
4722 }
4723
4724 HeapDebugUtils* utils_;
4725 };
4726
4727
UnmarkObjectRecursively(Object ** p)4728 void UnmarkObjectRecursively(Object** p) {
4729 if (!(*p)->IsHeapObject()) return;
4730
4731 HeapObject* obj = HeapObject::cast(*p);
4732
4733 Object* map = obj->map();
4734
4735 if (map->IsHeapObject()) return; // unmarked already
4736
4737 Address map_addr = reinterpret_cast<Address>(map);
4738
4739 map_addr -= kMarkTag;
4740
4741 ASSERT_TAG_ALIGNED(map_addr);
4742
4743 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4744
4745 obj->set_map(reinterpret_cast<Map*>(map_p));
4746
4747 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4748
4749 UnmarkObjectVisitor unmark_visitor(this);
4750
4751 obj->IterateBody(Map::cast(map_p)->instance_type(),
4752 obj->SizeFromMap(Map::cast(map_p)),
4753 &unmark_visitor);
4754 }
4755
4756
MarkRootObjectRecursively(Object ** root)4757 void MarkRootObjectRecursively(Object** root) {
4758 if (search_for_any_global_) {
4759 ASSERT(search_target_ == NULL);
4760 } else {
4761 ASSERT(search_target_->IsHeapObject());
4762 }
4763 found_target_ = false;
4764 object_stack_.Clear();
4765
4766 MarkObjectRecursively(root);
4767 UnmarkObjectRecursively(root);
4768
4769 if (found_target_) {
4770 PrintF("=====================================\n");
4771 PrintF("==== Path to object ====\n");
4772 PrintF("=====================================\n\n");
4773
4774 ASSERT(!object_stack_.is_empty());
4775 for (int i = 0; i < object_stack_.length(); i++) {
4776 if (i > 0) PrintF("\n |\n |\n V\n\n");
4777 Object* obj = object_stack_[i];
4778 obj->Print();
4779 }
4780 PrintF("=====================================\n");
4781 }
4782 }
4783
4784 // Helper class for visiting HeapObjects recursively.
4785 class MarkRootVisitor: public ObjectVisitor {
4786 public:
MarkRootVisitor(HeapDebugUtils * utils)4787 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4788
VisitPointers(Object ** start,Object ** end)4789 void VisitPointers(Object** start, Object** end) {
4790 // Visit all HeapObject pointers in [start, end)
4791 for (Object** p = start; p < end; p++) {
4792 if ((*p)->IsHeapObject())
4793 utils_->MarkRootObjectRecursively(p);
4794 }
4795 }
4796
4797 HeapDebugUtils* utils_;
4798 };
4799
4800 bool search_for_any_global_;
4801 Object* search_target_;
4802 bool found_target_;
4803 List<Object*> object_stack_;
4804 Heap* heap_;
4805
4806 friend class Heap;
4807 };
4808
4809 #endif
4810
Setup(bool create_heap_objects)4811 bool Heap::Setup(bool create_heap_objects) {
4812 #ifdef DEBUG
4813 debug_utils_ = new HeapDebugUtils(this);
4814 #endif
4815
4816 // Initialize heap spaces and initial maps and objects. Whenever something
4817 // goes wrong, just return false. The caller should check the results and
4818 // call Heap::TearDown() to release allocated memory.
4819 //
4820 // If the heap is not yet configured (eg, through the API), configure it.
4821 // Configuration is based on the flags new-space-size (really the semispace
4822 // size) and old-space-size if set or the initial values of semispace_size_
4823 // and old_generation_size_ otherwise.
4824 if (!configured_) {
4825 if (!ConfigureHeapDefault()) return false;
4826 }
4827
4828 gc_initializer_mutex->Lock();
4829 static bool initialized_gc = false;
4830 if (!initialized_gc) {
4831 initialized_gc = true;
4832 InitializeScavengingVisitorsTables();
4833 NewSpaceScavenger::Initialize();
4834 MarkCompactCollector::Initialize();
4835 }
4836 gc_initializer_mutex->Unlock();
4837
4838 MarkMapPointersAsEncoded(false);
4839
4840 // Setup memory allocator and reserve a chunk of memory for new
4841 // space. The chunk is double the size of the requested reserved
4842 // new space size to ensure that we can find a pair of semispaces that
4843 // are contiguous and aligned to their size.
4844 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
4845 return false;
4846 void* chunk =
4847 isolate_->memory_allocator()->ReserveInitialChunk(
4848 4 * reserved_semispace_size_);
4849 if (chunk == NULL) return false;
4850
4851 // Align the pair of semispaces to their size, which must be a power
4852 // of 2.
4853 Address new_space_start =
4854 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4855 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4856 return false;
4857 }
4858
4859 // Initialize old pointer space.
4860 old_pointer_space_ =
4861 new OldSpace(this,
4862 max_old_generation_size_,
4863 OLD_POINTER_SPACE,
4864 NOT_EXECUTABLE);
4865 if (old_pointer_space_ == NULL) return false;
4866 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4867
4868 // Initialize old data space.
4869 old_data_space_ =
4870 new OldSpace(this,
4871 max_old_generation_size_,
4872 OLD_DATA_SPACE,
4873 NOT_EXECUTABLE);
4874 if (old_data_space_ == NULL) return false;
4875 if (!old_data_space_->Setup(NULL, 0)) return false;
4876
4877 // Initialize the code space, set its maximum capacity to the old
4878 // generation size. It needs executable memory.
4879 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4880 // virtual address space, so that they can call each other with near calls.
4881 if (code_range_size_ > 0) {
4882 if (!isolate_->code_range()->Setup(code_range_size_)) {
4883 return false;
4884 }
4885 }
4886
4887 code_space_ =
4888 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
4889 if (code_space_ == NULL) return false;
4890 if (!code_space_->Setup(NULL, 0)) return false;
4891
4892 // Initialize map space.
4893 map_space_ = new MapSpace(this, FLAG_use_big_map_space
4894 ? max_old_generation_size_
4895 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4896 FLAG_max_map_space_pages,
4897 MAP_SPACE);
4898 if (map_space_ == NULL) return false;
4899 if (!map_space_->Setup(NULL, 0)) return false;
4900
4901 // Initialize global property cell space.
4902 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
4903 if (cell_space_ == NULL) return false;
4904 if (!cell_space_->Setup(NULL, 0)) return false;
4905
4906 // The large object code space may contain code or data. We set the memory
4907 // to be non-executable here for safety, but this means we need to enable it
4908 // explicitly when allocating large code objects.
4909 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
4910 if (lo_space_ == NULL) return false;
4911 if (!lo_space_->Setup()) return false;
4912
4913 if (create_heap_objects) {
4914 // Create initial maps.
4915 if (!CreateInitialMaps()) return false;
4916 if (!CreateApiObjects()) return false;
4917
4918 // Create initial objects
4919 if (!CreateInitialObjects()) return false;
4920
4921 global_contexts_list_ = undefined_value();
4922 }
4923
4924 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4925 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
4926
4927 #ifdef ENABLE_LOGGING_AND_PROFILING
4928 // This should be called only after initial objects have been created.
4929 isolate_->producer_heap_profile()->Setup();
4930 #endif
4931
4932 return true;
4933 }
4934
4935
SetStackLimits()4936 void Heap::SetStackLimits() {
4937 ASSERT(isolate_ != NULL);
4938 ASSERT(isolate_ == isolate());
4939 // On 64 bit machines, pointers are generally out of range of Smis. We write
4940 // something that looks like an out of range Smi to the GC.
4941
4942 // Set up the special root array entries containing the stack limits.
4943 // These are actually addresses, but the tag makes the GC ignore it.
4944 roots_[kStackLimitRootIndex] =
4945 reinterpret_cast<Object*>(
4946 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
4947 roots_[kRealStackLimitRootIndex] =
4948 reinterpret_cast<Object*>(
4949 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
4950 }
4951
4952
TearDown()4953 void Heap::TearDown() {
4954 if (FLAG_print_cumulative_gc_stat) {
4955 PrintF("\n\n");
4956 PrintF("gc_count=%d ", gc_count_);
4957 PrintF("mark_sweep_count=%d ", ms_count_);
4958 PrintF("mark_compact_count=%d ", mc_count_);
4959 PrintF("max_gc_pause=%d ", get_max_gc_pause());
4960 PrintF("min_in_mutator=%d ", get_min_in_mutator());
4961 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
4962 get_max_alive_after_gc());
4963 PrintF("\n\n");
4964 }
4965
4966 isolate_->global_handles()->TearDown();
4967
4968 external_string_table_.TearDown();
4969
4970 new_space_.TearDown();
4971
4972 if (old_pointer_space_ != NULL) {
4973 old_pointer_space_->TearDown();
4974 delete old_pointer_space_;
4975 old_pointer_space_ = NULL;
4976 }
4977
4978 if (old_data_space_ != NULL) {
4979 old_data_space_->TearDown();
4980 delete old_data_space_;
4981 old_data_space_ = NULL;
4982 }
4983
4984 if (code_space_ != NULL) {
4985 code_space_->TearDown();
4986 delete code_space_;
4987 code_space_ = NULL;
4988 }
4989
4990 if (map_space_ != NULL) {
4991 map_space_->TearDown();
4992 delete map_space_;
4993 map_space_ = NULL;
4994 }
4995
4996 if (cell_space_ != NULL) {
4997 cell_space_->TearDown();
4998 delete cell_space_;
4999 cell_space_ = NULL;
5000 }
5001
5002 if (lo_space_ != NULL) {
5003 lo_space_->TearDown();
5004 delete lo_space_;
5005 lo_space_ = NULL;
5006 }
5007
5008 isolate_->memory_allocator()->TearDown();
5009
5010 #ifdef DEBUG
5011 delete debug_utils_;
5012 debug_utils_ = NULL;
5013 #endif
5014 }
5015
5016
Shrink()5017 void Heap::Shrink() {
5018 // Try to shrink all paged spaces.
5019 PagedSpaces spaces;
5020 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5021 space->Shrink();
5022 }
5023
5024
5025 #ifdef ENABLE_HEAP_PROTECTION
5026
Protect()5027 void Heap::Protect() {
5028 if (HasBeenSetup()) {
5029 AllSpaces spaces;
5030 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5031 space->Protect();
5032 }
5033 }
5034
5035
Unprotect()5036 void Heap::Unprotect() {
5037 if (HasBeenSetup()) {
5038 AllSpaces spaces;
5039 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5040 space->Unprotect();
5041 }
5042 }
5043
5044 #endif
5045
5046
AddGCPrologueCallback(GCPrologueCallback callback,GCType gc_type)5047 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5048 ASSERT(callback != NULL);
5049 GCPrologueCallbackPair pair(callback, gc_type);
5050 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5051 return gc_prologue_callbacks_.Add(pair);
5052 }
5053
5054
RemoveGCPrologueCallback(GCPrologueCallback callback)5055 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5056 ASSERT(callback != NULL);
5057 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5058 if (gc_prologue_callbacks_[i].callback == callback) {
5059 gc_prologue_callbacks_.Remove(i);
5060 return;
5061 }
5062 }
5063 UNREACHABLE();
5064 }
5065
5066
AddGCEpilogueCallback(GCEpilogueCallback callback,GCType gc_type)5067 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5068 ASSERT(callback != NULL);
5069 GCEpilogueCallbackPair pair(callback, gc_type);
5070 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5071 return gc_epilogue_callbacks_.Add(pair);
5072 }
5073
5074
RemoveGCEpilogueCallback(GCEpilogueCallback callback)5075 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5076 ASSERT(callback != NULL);
5077 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5078 if (gc_epilogue_callbacks_[i].callback == callback) {
5079 gc_epilogue_callbacks_.Remove(i);
5080 return;
5081 }
5082 }
5083 UNREACHABLE();
5084 }
5085
5086
5087 #ifdef DEBUG
5088
5089 class PrintHandleVisitor: public ObjectVisitor {
5090 public:
VisitPointers(Object ** start,Object ** end)5091 void VisitPointers(Object** start, Object** end) {
5092 for (Object** p = start; p < end; p++)
5093 PrintF(" handle %p to %p\n",
5094 reinterpret_cast<void*>(p),
5095 reinterpret_cast<void*>(*p));
5096 }
5097 };
5098
PrintHandles()5099 void Heap::PrintHandles() {
5100 PrintF("Handles:\n");
5101 PrintHandleVisitor v;
5102 isolate_->handle_scope_implementer()->Iterate(&v);
5103 }
5104
5105 #endif
5106
5107
next()5108 Space* AllSpaces::next() {
5109 switch (counter_++) {
5110 case NEW_SPACE:
5111 return HEAP->new_space();
5112 case OLD_POINTER_SPACE:
5113 return HEAP->old_pointer_space();
5114 case OLD_DATA_SPACE:
5115 return HEAP->old_data_space();
5116 case CODE_SPACE:
5117 return HEAP->code_space();
5118 case MAP_SPACE:
5119 return HEAP->map_space();
5120 case CELL_SPACE:
5121 return HEAP->cell_space();
5122 case LO_SPACE:
5123 return HEAP->lo_space();
5124 default:
5125 return NULL;
5126 }
5127 }
5128
5129
next()5130 PagedSpace* PagedSpaces::next() {
5131 switch (counter_++) {
5132 case OLD_POINTER_SPACE:
5133 return HEAP->old_pointer_space();
5134 case OLD_DATA_SPACE:
5135 return HEAP->old_data_space();
5136 case CODE_SPACE:
5137 return HEAP->code_space();
5138 case MAP_SPACE:
5139 return HEAP->map_space();
5140 case CELL_SPACE:
5141 return HEAP->cell_space();
5142 default:
5143 return NULL;
5144 }
5145 }
5146
5147
5148
next()5149 OldSpace* OldSpaces::next() {
5150 switch (counter_++) {
5151 case OLD_POINTER_SPACE:
5152 return HEAP->old_pointer_space();
5153 case OLD_DATA_SPACE:
5154 return HEAP->old_data_space();
5155 case CODE_SPACE:
5156 return HEAP->code_space();
5157 default:
5158 return NULL;
5159 }
5160 }
5161
5162
SpaceIterator()5163 SpaceIterator::SpaceIterator()
5164 : current_space_(FIRST_SPACE),
5165 iterator_(NULL),
5166 size_func_(NULL) {
5167 }
5168
5169
SpaceIterator(HeapObjectCallback size_func)5170 SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5171 : current_space_(FIRST_SPACE),
5172 iterator_(NULL),
5173 size_func_(size_func) {
5174 }
5175
5176
~SpaceIterator()5177 SpaceIterator::~SpaceIterator() {
5178 // Delete active iterator if any.
5179 delete iterator_;
5180 }
5181
5182
has_next()5183 bool SpaceIterator::has_next() {
5184 // Iterate until no more spaces.
5185 return current_space_ != LAST_SPACE;
5186 }
5187
5188
next()5189 ObjectIterator* SpaceIterator::next() {
5190 if (iterator_ != NULL) {
5191 delete iterator_;
5192 iterator_ = NULL;
5193 // Move to the next space
5194 current_space_++;
5195 if (current_space_ > LAST_SPACE) {
5196 return NULL;
5197 }
5198 }
5199
5200 // Return iterator for the new current space.
5201 return CreateIterator();
5202 }
5203
5204
5205 // Create an iterator for the space to iterate.
CreateIterator()5206 ObjectIterator* SpaceIterator::CreateIterator() {
5207 ASSERT(iterator_ == NULL);
5208
5209 switch (current_space_) {
5210 case NEW_SPACE:
5211 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
5212 break;
5213 case OLD_POINTER_SPACE:
5214 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
5215 break;
5216 case OLD_DATA_SPACE:
5217 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
5218 break;
5219 case CODE_SPACE:
5220 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
5221 break;
5222 case MAP_SPACE:
5223 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
5224 break;
5225 case CELL_SPACE:
5226 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
5227 break;
5228 case LO_SPACE:
5229 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
5230 break;
5231 }
5232
5233 // Return the newly allocated iterator;
5234 ASSERT(iterator_ != NULL);
5235 return iterator_;
5236 }
5237
5238
5239 class HeapObjectsFilter {
5240 public:
~HeapObjectsFilter()5241 virtual ~HeapObjectsFilter() {}
5242 virtual bool SkipObject(HeapObject* object) = 0;
5243 };
5244
5245
5246 class FreeListNodesFilter : public HeapObjectsFilter {
5247 public:
FreeListNodesFilter()5248 FreeListNodesFilter() {
5249 MarkFreeListNodes();
5250 }
5251
SkipObject(HeapObject * object)5252 bool SkipObject(HeapObject* object) {
5253 if (object->IsMarked()) {
5254 object->ClearMark();
5255 return true;
5256 } else {
5257 return false;
5258 }
5259 }
5260
5261 private:
MarkFreeListNodes()5262 void MarkFreeListNodes() {
5263 Heap* heap = HEAP;
5264 heap->old_pointer_space()->MarkFreeListNodes();
5265 heap->old_data_space()->MarkFreeListNodes();
5266 MarkCodeSpaceFreeListNodes(heap);
5267 heap->map_space()->MarkFreeListNodes();
5268 heap->cell_space()->MarkFreeListNodes();
5269 }
5270
MarkCodeSpaceFreeListNodes(Heap * heap)5271 void MarkCodeSpaceFreeListNodes(Heap* heap) {
5272 // For code space, using FreeListNode::IsFreeListNode is OK.
5273 HeapObjectIterator iter(heap->code_space());
5274 for (HeapObject* obj = iter.next_object();
5275 obj != NULL;
5276 obj = iter.next_object()) {
5277 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5278 }
5279 }
5280
5281 AssertNoAllocation no_alloc;
5282 };
5283
5284
5285 class UnreachableObjectsFilter : public HeapObjectsFilter {
5286 public:
UnreachableObjectsFilter()5287 UnreachableObjectsFilter() {
5288 MarkUnreachableObjects();
5289 }
5290
SkipObject(HeapObject * object)5291 bool SkipObject(HeapObject* object) {
5292 if (object->IsMarked()) {
5293 object->ClearMark();
5294 return true;
5295 } else {
5296 return false;
5297 }
5298 }
5299
5300 private:
5301 class UnmarkingVisitor : public ObjectVisitor {
5302 public:
UnmarkingVisitor()5303 UnmarkingVisitor() : list_(10) {}
5304
VisitPointers(Object ** start,Object ** end)5305 void VisitPointers(Object** start, Object** end) {
5306 for (Object** p = start; p < end; p++) {
5307 if (!(*p)->IsHeapObject()) continue;
5308 HeapObject* obj = HeapObject::cast(*p);
5309 if (obj->IsMarked()) {
5310 obj->ClearMark();
5311 list_.Add(obj);
5312 }
5313 }
5314 }
5315
can_process()5316 bool can_process() { return !list_.is_empty(); }
5317
ProcessNext()5318 void ProcessNext() {
5319 HeapObject* obj = list_.RemoveLast();
5320 obj->Iterate(this);
5321 }
5322
5323 private:
5324 List<HeapObject*> list_;
5325 };
5326
MarkUnreachableObjects()5327 void MarkUnreachableObjects() {
5328 HeapIterator iterator;
5329 for (HeapObject* obj = iterator.next();
5330 obj != NULL;
5331 obj = iterator.next()) {
5332 obj->SetMark();
5333 }
5334 UnmarkingVisitor visitor;
5335 HEAP->IterateRoots(&visitor, VISIT_ALL);
5336 while (visitor.can_process())
5337 visitor.ProcessNext();
5338 }
5339
5340 AssertNoAllocation no_alloc;
5341 };
5342
5343
HeapIterator()5344 HeapIterator::HeapIterator()
5345 : filtering_(HeapIterator::kNoFiltering),
5346 filter_(NULL) {
5347 Init();
5348 }
5349
5350
HeapIterator(HeapIterator::HeapObjectsFiltering filtering)5351 HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
5352 : filtering_(filtering),
5353 filter_(NULL) {
5354 Init();
5355 }
5356
5357
~HeapIterator()5358 HeapIterator::~HeapIterator() {
5359 Shutdown();
5360 }
5361
5362
Init()5363 void HeapIterator::Init() {
5364 // Start the iteration.
5365 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5366 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5367 switch (filtering_) {
5368 case kFilterFreeListNodes:
5369 filter_ = new FreeListNodesFilter;
5370 break;
5371 case kFilterUnreachable:
5372 filter_ = new UnreachableObjectsFilter;
5373 break;
5374 default:
5375 break;
5376 }
5377 object_iterator_ = space_iterator_->next();
5378 }
5379
5380
Shutdown()5381 void HeapIterator::Shutdown() {
5382 #ifdef DEBUG
5383 // Assert that in filtering mode we have iterated through all
5384 // objects. Otherwise, heap will be left in an inconsistent state.
5385 if (filtering_ != kNoFiltering) {
5386 ASSERT(object_iterator_ == NULL);
5387 }
5388 #endif
5389 // Make sure the last iterator is deallocated.
5390 delete space_iterator_;
5391 space_iterator_ = NULL;
5392 object_iterator_ = NULL;
5393 delete filter_;
5394 filter_ = NULL;
5395 }
5396
5397
next()5398 HeapObject* HeapIterator::next() {
5399 if (filter_ == NULL) return NextObject();
5400
5401 HeapObject* obj = NextObject();
5402 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5403 return obj;
5404 }
5405
5406
NextObject()5407 HeapObject* HeapIterator::NextObject() {
5408 // No iterator means we are done.
5409 if (object_iterator_ == NULL) return NULL;
5410
5411 if (HeapObject* obj = object_iterator_->next_object()) {
5412 // If the current iterator has more objects we are fine.
5413 return obj;
5414 } else {
5415 // Go though the spaces looking for one that has objects.
5416 while (space_iterator_->has_next()) {
5417 object_iterator_ = space_iterator_->next();
5418 if (HeapObject* obj = object_iterator_->next_object()) {
5419 return obj;
5420 }
5421 }
5422 }
5423 // Done with the last space.
5424 object_iterator_ = NULL;
5425 return NULL;
5426 }
5427
5428
reset()5429 void HeapIterator::reset() {
5430 // Restart the iterator.
5431 Shutdown();
5432 Init();
5433 }
5434
5435
5436 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
5437
5438 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
5439
5440 class PathTracer::MarkVisitor: public ObjectVisitor {
5441 public:
MarkVisitor(PathTracer * tracer)5442 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)5443 void VisitPointers(Object** start, Object** end) {
5444 // Scan all HeapObject pointers in [start, end)
5445 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5446 if ((*p)->IsHeapObject())
5447 tracer_->MarkRecursively(p, this);
5448 }
5449 }
5450
5451 private:
5452 PathTracer* tracer_;
5453 };
5454
5455
5456 class PathTracer::UnmarkVisitor: public ObjectVisitor {
5457 public:
UnmarkVisitor(PathTracer * tracer)5458 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
VisitPointers(Object ** start,Object ** end)5459 void VisitPointers(Object** start, Object** end) {
5460 // Scan all HeapObject pointers in [start, end)
5461 for (Object** p = start; p < end; p++) {
5462 if ((*p)->IsHeapObject())
5463 tracer_->UnmarkRecursively(p, this);
5464 }
5465 }
5466
5467 private:
5468 PathTracer* tracer_;
5469 };
5470
5471
VisitPointers(Object ** start,Object ** end)5472 void PathTracer::VisitPointers(Object** start, Object** end) {
5473 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5474 // Visit all HeapObject pointers in [start, end)
5475 for (Object** p = start; !done && (p < end); p++) {
5476 if ((*p)->IsHeapObject()) {
5477 TracePathFrom(p);
5478 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5479 }
5480 }
5481 }
5482
5483
Reset()5484 void PathTracer::Reset() {
5485 found_target_ = false;
5486 object_stack_.Clear();
5487 }
5488
5489
TracePathFrom(Object ** root)5490 void PathTracer::TracePathFrom(Object** root) {
5491 ASSERT((search_target_ == kAnyGlobalObject) ||
5492 search_target_->IsHeapObject());
5493 found_target_in_trace_ = false;
5494 object_stack_.Clear();
5495
5496 MarkVisitor mark_visitor(this);
5497 MarkRecursively(root, &mark_visitor);
5498
5499 UnmarkVisitor unmark_visitor(this);
5500 UnmarkRecursively(root, &unmark_visitor);
5501
5502 ProcessResults();
5503 }
5504
5505
MarkRecursively(Object ** p,MarkVisitor * mark_visitor)5506 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5507 if (!(*p)->IsHeapObject()) return;
5508
5509 HeapObject* obj = HeapObject::cast(*p);
5510
5511 Object* map = obj->map();
5512
5513 if (!map->IsHeapObject()) return; // visited before
5514
5515 if (found_target_in_trace_) return; // stop if target found
5516 object_stack_.Add(obj);
5517 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5518 (obj == search_target_)) {
5519 found_target_in_trace_ = true;
5520 found_target_ = true;
5521 return;
5522 }
5523
5524 bool is_global_context = obj->IsGlobalContext();
5525
5526 // not visited yet
5527 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5528
5529 Address map_addr = map_p->address();
5530
5531 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5532
5533 // Scan the object body.
5534 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5535 // This is specialized to scan Context's properly.
5536 Object** start = reinterpret_cast<Object**>(obj->address() +
5537 Context::kHeaderSize);
5538 Object** end = reinterpret_cast<Object**>(obj->address() +
5539 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5540 mark_visitor->VisitPointers(start, end);
5541 } else {
5542 obj->IterateBody(map_p->instance_type(),
5543 obj->SizeFromMap(map_p),
5544 mark_visitor);
5545 }
5546
5547 // Scan the map after the body because the body is a lot more interesting
5548 // when doing leak detection.
5549 MarkRecursively(&map, mark_visitor);
5550
5551 if (!found_target_in_trace_) // don't pop if found the target
5552 object_stack_.RemoveLast();
5553 }
5554
5555
UnmarkRecursively(Object ** p,UnmarkVisitor * unmark_visitor)5556 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5557 if (!(*p)->IsHeapObject()) return;
5558
5559 HeapObject* obj = HeapObject::cast(*p);
5560
5561 Object* map = obj->map();
5562
5563 if (map->IsHeapObject()) return; // unmarked already
5564
5565 Address map_addr = reinterpret_cast<Address>(map);
5566
5567 map_addr -= kMarkTag;
5568
5569 ASSERT_TAG_ALIGNED(map_addr);
5570
5571 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5572
5573 obj->set_map(reinterpret_cast<Map*>(map_p));
5574
5575 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
5576
5577 obj->IterateBody(Map::cast(map_p)->instance_type(),
5578 obj->SizeFromMap(Map::cast(map_p)),
5579 unmark_visitor);
5580 }
5581
5582
ProcessResults()5583 void PathTracer::ProcessResults() {
5584 if (found_target_) {
5585 PrintF("=====================================\n");
5586 PrintF("==== Path to object ====\n");
5587 PrintF("=====================================\n\n");
5588
5589 ASSERT(!object_stack_.is_empty());
5590 for (int i = 0; i < object_stack_.length(); i++) {
5591 if (i > 0) PrintF("\n |\n |\n V\n\n");
5592 Object* obj = object_stack_[i];
5593 #ifdef OBJECT_PRINT
5594 obj->Print();
5595 #else
5596 obj->ShortPrint();
5597 #endif
5598 }
5599 PrintF("=====================================\n");
5600 }
5601 }
5602 #endif // DEBUG || LIVE_OBJECT_LIST
5603
5604
5605 #ifdef DEBUG
5606 // Triggers a depth-first traversal of reachable objects from roots
5607 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)5608 void Heap::TracePathToObject(Object* target) {
5609 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5610 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5611 }
5612
5613
5614 // Triggers a depth-first traversal of reachable objects from roots
5615 // and finds a path to any global object and prints it. Useful for
5616 // determining the source for leaks of global objects.
TracePathToGlobal()5617 void Heap::TracePathToGlobal() {
5618 PathTracer tracer(PathTracer::kAnyGlobalObject,
5619 PathTracer::FIND_ALL,
5620 VISIT_ALL);
5621 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5622 }
5623 #endif
5624
5625
CountTotalHolesSize()5626 static intptr_t CountTotalHolesSize() {
5627 intptr_t holes_size = 0;
5628 OldSpaces spaces;
5629 for (OldSpace* space = spaces.next();
5630 space != NULL;
5631 space = spaces.next()) {
5632 holes_size += space->Waste() + space->AvailableFree();
5633 }
5634 return holes_size;
5635 }
5636
5637
GCTracer(Heap * heap)5638 GCTracer::GCTracer(Heap* heap)
5639 : start_time_(0.0),
5640 start_size_(0),
5641 gc_count_(0),
5642 full_gc_count_(0),
5643 is_compacting_(false),
5644 marked_count_(0),
5645 allocated_since_last_gc_(0),
5646 spent_in_mutator_(0),
5647 promoted_objects_size_(0),
5648 heap_(heap) {
5649 // These two fields reflect the state of the previous full collection.
5650 // Set them before they are changed by the collector.
5651 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5652 previous_marked_count_ =
5653 heap_->mark_compact_collector_.previous_marked_count();
5654 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5655 start_time_ = OS::TimeCurrentMillis();
5656 start_size_ = heap_->SizeOfObjects();
5657
5658 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5659 scopes_[i] = 0;
5660 }
5661
5662 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5663
5664 allocated_since_last_gc_ =
5665 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
5666
5667 if (heap_->last_gc_end_timestamp_ > 0) {
5668 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
5669 }
5670 }
5671
5672
~GCTracer()5673 GCTracer::~GCTracer() {
5674 // Printf ONE line iff flag is set.
5675 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5676
5677 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
5678
5679 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5680 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
5681
5682 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
5683
5684 // Update cumulative GC statistics if required.
5685 if (FLAG_print_cumulative_gc_stat) {
5686 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5687 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5688 heap_->alive_after_last_gc_);
5689 if (!first_gc) {
5690 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5691 static_cast<int>(spent_in_mutator_));
5692 }
5693 }
5694
5695 if (!FLAG_trace_gc_nvp) {
5696 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5697
5698 PrintF("%s %.1f -> %.1f MB, ",
5699 CollectorString(),
5700 static_cast<double>(start_size_) / MB,
5701 SizeOfHeapObjects());
5702
5703 if (external_time > 0) PrintF("%d / ", external_time);
5704 PrintF("%d ms.\n", time);
5705 } else {
5706 PrintF("pause=%d ", time);
5707 PrintF("mutator=%d ",
5708 static_cast<int>(spent_in_mutator_));
5709
5710 PrintF("gc=");
5711 switch (collector_) {
5712 case SCAVENGER:
5713 PrintF("s");
5714 break;
5715 case MARK_COMPACTOR:
5716 PrintF("%s",
5717 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
5718 break;
5719 default:
5720 UNREACHABLE();
5721 }
5722 PrintF(" ");
5723
5724 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5725 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5726 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
5727 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
5728 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5729
5730 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
5731 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
5732 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5733 in_free_list_or_wasted_before_gc_);
5734 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
5735
5736 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5737 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
5738
5739 PrintF("\n");
5740 }
5741
5742 #if defined(ENABLE_LOGGING_AND_PROFILING)
5743 heap_->PrintShortHeapStatistics();
5744 #endif
5745 }
5746
5747
CollectorString()5748 const char* GCTracer::CollectorString() {
5749 switch (collector_) {
5750 case SCAVENGER:
5751 return "Scavenge";
5752 case MARK_COMPACTOR:
5753 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
5754 : "Mark-sweep";
5755 }
5756 return "Unknown GC";
5757 }
5758
5759
Hash(Map * map,String * name)5760 int KeyedLookupCache::Hash(Map* map, String* name) {
5761 // Uses only lower 32 bits if pointers are larger.
5762 uintptr_t addr_hash =
5763 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
5764 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5765 }
5766
5767
Lookup(Map * map,String * name)5768 int KeyedLookupCache::Lookup(Map* map, String* name) {
5769 int index = Hash(map, name);
5770 Key& key = keys_[index];
5771 if ((key.map == map) && key.name->Equals(name)) {
5772 return field_offsets_[index];
5773 }
5774 return kNotFound;
5775 }
5776
5777
Update(Map * map,String * name,int field_offset)5778 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5779 String* symbol;
5780 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
5781 int index = Hash(map, symbol);
5782 Key& key = keys_[index];
5783 key.map = map;
5784 key.name = symbol;
5785 field_offsets_[index] = field_offset;
5786 }
5787 }
5788
5789
Clear()5790 void KeyedLookupCache::Clear() {
5791 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5792 }
5793
5794
Clear()5795 void DescriptorLookupCache::Clear() {
5796 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5797 }
5798
5799
5800 #ifdef DEBUG
GarbageCollectionGreedyCheck()5801 void Heap::GarbageCollectionGreedyCheck() {
5802 ASSERT(FLAG_gc_greedy);
5803 if (isolate_->bootstrapper()->IsActive()) return;
5804 if (disallow_allocation_failure()) return;
5805 CollectGarbage(NEW_SPACE);
5806 }
5807 #endif
5808
5809
SubCache(Type t)5810 TranscendentalCache::SubCache::SubCache(Type t)
5811 : type_(t),
5812 isolate_(Isolate::Current()) {
5813 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5814 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5815 for (int i = 0; i < kCacheSize; i++) {
5816 elements_[i].in[0] = in0;
5817 elements_[i].in[1] = in1;
5818 elements_[i].output = NULL;
5819 }
5820 }
5821
5822
Clear()5823 void TranscendentalCache::Clear() {
5824 for (int i = 0; i < kNumberOfCaches; i++) {
5825 if (caches_[i] != NULL) {
5826 delete caches_[i];
5827 caches_[i] = NULL;
5828 }
5829 }
5830 }
5831
5832
CleanUp()5833 void ExternalStringTable::CleanUp() {
5834 int last = 0;
5835 for (int i = 0; i < new_space_strings_.length(); ++i) {
5836 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5837 if (heap_->InNewSpace(new_space_strings_[i])) {
5838 new_space_strings_[last++] = new_space_strings_[i];
5839 } else {
5840 old_space_strings_.Add(new_space_strings_[i]);
5841 }
5842 }
5843 new_space_strings_.Rewind(last);
5844 last = 0;
5845 for (int i = 0; i < old_space_strings_.length(); ++i) {
5846 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
5847 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
5848 old_space_strings_[last++] = old_space_strings_[i];
5849 }
5850 old_space_strings_.Rewind(last);
5851 Verify();
5852 }
5853
5854
TearDown()5855 void ExternalStringTable::TearDown() {
5856 new_space_strings_.Free();
5857 old_space_strings_.Free();
5858 }
5859
5860
5861 } } // namespace v8::internal
5862